]>
Commit | Line | Data |
---|---|---|
4863dea3 SG |
1 | /* |
2 | * Copyright (C) 2015 Cavium, Inc. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of version 2 of the GNU General Public License | |
6 | * as published by the Free Software Foundation. | |
7 | */ | |
8 | ||
9 | #include <linux/pci.h> | |
10 | #include <linux/netdevice.h> | |
11 | #include <linux/ip.h> | |
12 | #include <linux/etherdevice.h> | |
83abb7d7 | 13 | #include <linux/iommu.h> |
4863dea3 SG |
14 | #include <net/ip.h> |
15 | #include <net/tso.h> | |
16 | ||
17 | #include "nic_reg.h" | |
18 | #include "nic.h" | |
19 | #include "q_struct.h" | |
20 | #include "nicvf_queues.h" | |
21 | ||
83abb7d7 SG |
22 | #define NICVF_PAGE_ORDER ((PAGE_SIZE <= 4096) ? PAGE_ALLOC_COSTLY_ORDER : 0) |
23 | ||
24 | static inline u64 nicvf_iova_to_phys(struct nicvf *nic, dma_addr_t dma_addr) | |
25 | { | |
26 | /* Translation is installed only when IOMMU is present */ | |
27 | if (nic->iommu_domain) | |
28 | return iommu_iova_to_phys(nic->iommu_domain, dma_addr); | |
29 | return dma_addr; | |
30 | } | |
31 | ||
5c2e26f6 SG |
32 | static void nicvf_get_page(struct nicvf *nic) |
33 | { | |
34 | if (!nic->rb_pageref || !nic->rb_page) | |
35 | return; | |
36 | ||
6d061f9f | 37 | page_ref_add(nic->rb_page, nic->rb_pageref); |
5c2e26f6 SG |
38 | nic->rb_pageref = 0; |
39 | } | |
40 | ||
4863dea3 SG |
41 | /* Poll a register for a specific value */ |
42 | static int nicvf_poll_reg(struct nicvf *nic, int qidx, | |
43 | u64 reg, int bit_pos, int bits, int val) | |
44 | { | |
45 | u64 bit_mask; | |
46 | u64 reg_val; | |
47 | int timeout = 10; | |
48 | ||
49 | bit_mask = (1ULL << bits) - 1; | |
50 | bit_mask = (bit_mask << bit_pos); | |
51 | ||
52 | while (timeout) { | |
53 | reg_val = nicvf_queue_reg_read(nic, reg, qidx); | |
54 | if (((reg_val & bit_mask) >> bit_pos) == val) | |
55 | return 0; | |
56 | usleep_range(1000, 2000); | |
57 | timeout--; | |
58 | } | |
59 | netdev_err(nic->netdev, "Poll on reg 0x%llx failed\n", reg); | |
60 | return 1; | |
61 | } | |
62 | ||
63 | /* Allocate memory for a queue's descriptors */ | |
64 | static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem, | |
65 | int q_len, int desc_size, int align_bytes) | |
66 | { | |
67 | dmem->q_len = q_len; | |
68 | dmem->size = (desc_size * q_len) + align_bytes; | |
69 | /* Save address, need it while freeing */ | |
70 | dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size, | |
71 | &dmem->dma, GFP_KERNEL); | |
72 | if (!dmem->unalign_base) | |
73 | return -ENOMEM; | |
74 | ||
75 | /* Align memory address for 'align_bytes' */ | |
76 | dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes); | |
39a0dd0b | 77 | dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma); |
4863dea3 SG |
78 | return 0; |
79 | } | |
80 | ||
81 | /* Free queue's descriptor memory */ | |
82 | static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem) | |
83 | { | |
84 | if (!dmem) | |
85 | return; | |
86 | ||
87 | dma_free_coherent(&nic->pdev->dev, dmem->size, | |
88 | dmem->unalign_base, dmem->dma); | |
89 | dmem->unalign_base = NULL; | |
90 | dmem->base = NULL; | |
91 | } | |
92 | ||
93 | /* Allocate buffer for packet reception | |
94 | * HW returns memory address where packet is DMA'ed but not a pointer | |
95 | * into RBDR ring, so save buffer address at the start of fragment and | |
96 | * align the start address to a cache aligned address | |
97 | */ | |
98 | static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, | |
99 | u32 buf_len, u64 **rbuf) | |
100 | { | |
83abb7d7 | 101 | int order = NICVF_PAGE_ORDER; |
4863dea3 SG |
102 | |
103 | /* Check if request can be accomodated in previous allocated page */ | |
5c2e26f6 SG |
104 | if (nic->rb_page && |
105 | ((nic->rb_page_offset + buf_len) < (PAGE_SIZE << order))) { | |
106 | nic->rb_pageref++; | |
107 | goto ret; | |
4863dea3 SG |
108 | } |
109 | ||
5c2e26f6 | 110 | nicvf_get_page(nic); |
5c2e26f6 | 111 | |
4863dea3 | 112 | /* Allocate a new page */ |
83abb7d7 SG |
113 | nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, |
114 | order); | |
4863dea3 | 115 | if (!nic->rb_page) { |
83abb7d7 SG |
116 | this_cpu_inc(nic->pnicvf->drv_stats->rcv_buffer_alloc_failures); |
117 | return -ENOMEM; | |
4863dea3 | 118 | } |
83abb7d7 | 119 | nic->rb_page_offset = 0; |
5c2e26f6 | 120 | ret: |
83abb7d7 SG |
121 | /* HW will ensure data coherency, CPU sync not required */ |
122 | *rbuf = (u64 *)((u64)dma_map_page_attrs(&nic->pdev->dev, nic->rb_page, | |
123 | nic->rb_page_offset, buf_len, | |
124 | DMA_FROM_DEVICE, | |
125 | DMA_ATTR_SKIP_CPU_SYNC)); | |
126 | if (dma_mapping_error(&nic->pdev->dev, (dma_addr_t)*rbuf)) { | |
127 | if (!nic->rb_page_offset) | |
128 | __free_pages(nic->rb_page, order); | |
129 | nic->rb_page = NULL; | |
130 | return -ENOMEM; | |
131 | } | |
5c2e26f6 | 132 | nic->rb_page_offset += buf_len; |
4863dea3 | 133 | |
4863dea3 SG |
134 | return 0; |
135 | } | |
136 | ||
668dda06 | 137 | /* Build skb around receive buffer */ |
4863dea3 SG |
138 | static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic, |
139 | u64 rb_ptr, int len) | |
140 | { | |
668dda06 | 141 | void *data; |
4863dea3 | 142 | struct sk_buff *skb; |
4863dea3 | 143 | |
668dda06 | 144 | data = phys_to_virt(rb_ptr); |
4863dea3 SG |
145 | |
146 | /* Now build an skb to give to stack */ | |
668dda06 | 147 | skb = build_skb(data, RCV_FRAG_LEN); |
4863dea3 | 148 | if (!skb) { |
668dda06 | 149 | put_page(virt_to_page(data)); |
4863dea3 SG |
150 | return NULL; |
151 | } | |
152 | ||
668dda06 | 153 | prefetch(skb->data); |
4863dea3 SG |
154 | return skb; |
155 | } | |
156 | ||
157 | /* Allocate RBDR ring and populate receive buffers */ | |
158 | static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, | |
159 | int ring_len, int buf_size) | |
160 | { | |
161 | int idx; | |
162 | u64 *rbuf; | |
163 | struct rbdr_entry_t *desc; | |
164 | int err; | |
165 | ||
166 | err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len, | |
167 | sizeof(struct rbdr_entry_t), | |
168 | NICVF_RCV_BUF_ALIGN_BYTES); | |
169 | if (err) | |
170 | return err; | |
171 | ||
172 | rbdr->desc = rbdr->dmem.base; | |
173 | /* Buffer size has to be in multiples of 128 bytes */ | |
174 | rbdr->dma_size = buf_size; | |
175 | rbdr->enable = true; | |
176 | rbdr->thresh = RBDR_THRESH; | |
83abb7d7 SG |
177 | rbdr->head = 0; |
178 | rbdr->tail = 0; | |
4863dea3 SG |
179 | |
180 | nic->rb_page = NULL; | |
181 | for (idx = 0; idx < ring_len; idx++) { | |
182 | err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN, | |
183 | &rbuf); | |
83abb7d7 SG |
184 | if (err) { |
185 | /* To free already allocated and mapped ones */ | |
186 | rbdr->tail = idx - 1; | |
4863dea3 | 187 | return err; |
83abb7d7 | 188 | } |
4863dea3 SG |
189 | |
190 | desc = GET_RBDR_DESC(rbdr, idx); | |
83abb7d7 | 191 | desc->buf_addr = (u64)rbuf >> NICVF_RCV_BUF_ALIGN; |
4863dea3 | 192 | } |
5c2e26f6 SG |
193 | |
194 | nicvf_get_page(nic); | |
195 | ||
4863dea3 SG |
196 | return 0; |
197 | } | |
198 | ||
199 | /* Free RBDR ring and its receive buffers */ | |
200 | static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) | |
201 | { | |
202 | int head, tail; | |
83abb7d7 | 203 | u64 buf_addr, phys_addr; |
4863dea3 | 204 | struct rbdr_entry_t *desc; |
4863dea3 SG |
205 | |
206 | if (!rbdr) | |
207 | return; | |
208 | ||
209 | rbdr->enable = false; | |
210 | if (!rbdr->dmem.base) | |
211 | return; | |
212 | ||
213 | head = rbdr->head; | |
214 | tail = rbdr->tail; | |
215 | ||
83abb7d7 | 216 | /* Release page references */ |
4863dea3 SG |
217 | while (head != tail) { |
218 | desc = GET_RBDR_DESC(rbdr, head); | |
83abb7d7 SG |
219 | buf_addr = ((u64)desc->buf_addr) << NICVF_RCV_BUF_ALIGN; |
220 | phys_addr = nicvf_iova_to_phys(nic, buf_addr); | |
221 | dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN, | |
222 | DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); | |
223 | if (phys_addr) | |
224 | put_page(virt_to_page(phys_to_virt(phys_addr))); | |
4863dea3 SG |
225 | head++; |
226 | head &= (rbdr->dmem.q_len - 1); | |
227 | } | |
83abb7d7 | 228 | /* Release buffer of tail desc */ |
4863dea3 | 229 | desc = GET_RBDR_DESC(rbdr, tail); |
83abb7d7 SG |
230 | buf_addr = ((u64)desc->buf_addr) << NICVF_RCV_BUF_ALIGN; |
231 | phys_addr = nicvf_iova_to_phys(nic, buf_addr); | |
232 | dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN, | |
233 | DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); | |
234 | if (phys_addr) | |
235 | put_page(virt_to_page(phys_to_virt(phys_addr))); | |
4863dea3 SG |
236 | |
237 | /* Free RBDR ring */ | |
238 | nicvf_free_q_desc_mem(nic, &rbdr->dmem); | |
239 | } | |
240 | ||
241 | /* Refill receive buffer descriptors with new buffers. | |
242 | */ | |
fd7ec062 | 243 | static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp) |
4863dea3 SG |
244 | { |
245 | struct queue_set *qs = nic->qs; | |
246 | int rbdr_idx = qs->rbdr_cnt; | |
247 | int tail, qcount; | |
248 | int refill_rb_cnt; | |
249 | struct rbdr *rbdr; | |
250 | struct rbdr_entry_t *desc; | |
251 | u64 *rbuf; | |
252 | int new_rb = 0; | |
253 | ||
254 | refill: | |
255 | if (!rbdr_idx) | |
256 | return; | |
257 | rbdr_idx--; | |
258 | rbdr = &qs->rbdr[rbdr_idx]; | |
259 | /* Check if it's enabled */ | |
260 | if (!rbdr->enable) | |
261 | goto next_rbdr; | |
262 | ||
263 | /* Get no of desc's to be refilled */ | |
264 | qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx); | |
265 | qcount &= 0x7FFFF; | |
266 | /* Doorbell can be ringed with a max of ring size minus 1 */ | |
267 | if (qcount >= (qs->rbdr_len - 1)) | |
268 | goto next_rbdr; | |
269 | else | |
270 | refill_rb_cnt = qs->rbdr_len - qcount - 1; | |
271 | ||
272 | /* Start filling descs from tail */ | |
273 | tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3; | |
274 | while (refill_rb_cnt) { | |
275 | tail++; | |
276 | tail &= (rbdr->dmem.q_len - 1); | |
277 | ||
278 | if (nicvf_alloc_rcv_buffer(nic, gfp, RCV_FRAG_LEN, &rbuf)) | |
279 | break; | |
280 | ||
281 | desc = GET_RBDR_DESC(rbdr, tail); | |
83abb7d7 | 282 | desc->buf_addr = (u64)rbuf >> NICVF_RCV_BUF_ALIGN; |
4863dea3 SG |
283 | refill_rb_cnt--; |
284 | new_rb++; | |
285 | } | |
286 | ||
5c2e26f6 SG |
287 | nicvf_get_page(nic); |
288 | ||
4863dea3 SG |
289 | /* make sure all memory stores are done before ringing doorbell */ |
290 | smp_wmb(); | |
291 | ||
292 | /* Check if buffer allocation failed */ | |
293 | if (refill_rb_cnt) | |
294 | nic->rb_alloc_fail = true; | |
295 | else | |
296 | nic->rb_alloc_fail = false; | |
297 | ||
298 | /* Notify HW */ | |
299 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, | |
300 | rbdr_idx, new_rb); | |
301 | next_rbdr: | |
302 | /* Re-enable RBDR interrupts only if buffer allocation is success */ | |
c94acf80 SG |
303 | if (!nic->rb_alloc_fail && rbdr->enable && |
304 | netif_running(nic->pnicvf->netdev)) | |
4863dea3 SG |
305 | nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx); |
306 | ||
307 | if (rbdr_idx) | |
308 | goto refill; | |
309 | } | |
310 | ||
311 | /* Alloc rcv buffers in non-atomic mode for better success */ | |
312 | void nicvf_rbdr_work(struct work_struct *work) | |
313 | { | |
314 | struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work); | |
315 | ||
316 | nicvf_refill_rbdr(nic, GFP_KERNEL); | |
317 | if (nic->rb_alloc_fail) | |
318 | schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10)); | |
319 | else | |
320 | nic->rb_work_scheduled = false; | |
321 | } | |
322 | ||
323 | /* In Softirq context, alloc rcv buffers in atomic mode */ | |
324 | void nicvf_rbdr_task(unsigned long data) | |
325 | { | |
326 | struct nicvf *nic = (struct nicvf *)data; | |
327 | ||
328 | nicvf_refill_rbdr(nic, GFP_ATOMIC); | |
329 | if (nic->rb_alloc_fail) { | |
330 | nic->rb_work_scheduled = true; | |
331 | schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10)); | |
332 | } | |
333 | } | |
334 | ||
335 | /* Initialize completion queue */ | |
336 | static int nicvf_init_cmp_queue(struct nicvf *nic, | |
337 | struct cmp_queue *cq, int q_len) | |
338 | { | |
339 | int err; | |
340 | ||
341 | err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE, | |
342 | NICVF_CQ_BASE_ALIGN_BYTES); | |
343 | if (err) | |
344 | return err; | |
345 | ||
346 | cq->desc = cq->dmem.base; | |
b9687b48 | 347 | cq->thresh = pass1_silicon(nic->pdev) ? 0 : CMP_QUEUE_CQE_THRESH; |
4863dea3 SG |
348 | nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1; |
349 | ||
350 | return 0; | |
351 | } | |
352 | ||
353 | static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq) | |
354 | { | |
355 | if (!cq) | |
356 | return; | |
357 | if (!cq->dmem.base) | |
358 | return; | |
359 | ||
360 | nicvf_free_q_desc_mem(nic, &cq->dmem); | |
361 | } | |
362 | ||
363 | /* Initialize transmit queue */ | |
364 | static int nicvf_init_snd_queue(struct nicvf *nic, | |
365 | struct snd_queue *sq, int q_len) | |
366 | { | |
367 | int err; | |
368 | ||
369 | err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE, | |
370 | NICVF_SQ_BASE_ALIGN_BYTES); | |
371 | if (err) | |
372 | return err; | |
373 | ||
374 | sq->desc = sq->dmem.base; | |
86ace693 | 375 | sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL); |
fa1a6c93 AM |
376 | if (!sq->skbuff) |
377 | return -ENOMEM; | |
4863dea3 SG |
378 | sq->head = 0; |
379 | sq->tail = 0; | |
380 | atomic_set(&sq->free_cnt, q_len - 1); | |
381 | sq->thresh = SND_QUEUE_THRESH; | |
382 | ||
383 | /* Preallocate memory for TSO segment's header */ | |
384 | sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev, | |
385 | q_len * TSO_HEADER_SIZE, | |
386 | &sq->tso_hdrs_phys, GFP_KERNEL); | |
387 | if (!sq->tso_hdrs) | |
388 | return -ENOMEM; | |
389 | ||
390 | return 0; | |
391 | } | |
392 | ||
83abb7d7 SG |
393 | void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq, |
394 | int hdr_sqe, u8 subdesc_cnt) | |
395 | { | |
396 | u8 idx; | |
397 | struct sq_gather_subdesc *gather; | |
398 | ||
399 | /* Unmap DMA mapped skb data buffers */ | |
400 | for (idx = 0; idx < subdesc_cnt; idx++) { | |
401 | hdr_sqe++; | |
402 | hdr_sqe &= (sq->dmem.q_len - 1); | |
403 | gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, hdr_sqe); | |
404 | /* HW will ensure data coherency, CPU sync not required */ | |
405 | dma_unmap_page_attrs(&nic->pdev->dev, gather->addr, | |
406 | gather->size, DMA_TO_DEVICE, | |
407 | DMA_ATTR_SKIP_CPU_SYNC); | |
408 | } | |
409 | } | |
410 | ||
4863dea3 SG |
411 | static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) |
412 | { | |
c94acf80 | 413 | struct sk_buff *skb; |
83abb7d7 SG |
414 | struct sq_hdr_subdesc *hdr; |
415 | struct sq_hdr_subdesc *tso_sqe; | |
c94acf80 | 416 | |
4863dea3 SG |
417 | if (!sq) |
418 | return; | |
419 | if (!sq->dmem.base) | |
420 | return; | |
421 | ||
422 | if (sq->tso_hdrs) | |
143ceb0b SG |
423 | dma_free_coherent(&nic->pdev->dev, |
424 | sq->dmem.q_len * TSO_HEADER_SIZE, | |
4863dea3 SG |
425 | sq->tso_hdrs, sq->tso_hdrs_phys); |
426 | ||
c94acf80 SG |
427 | /* Free pending skbs in the queue */ |
428 | smp_rmb(); | |
429 | while (sq->head != sq->tail) { | |
430 | skb = (struct sk_buff *)sq->skbuff[sq->head]; | |
83abb7d7 SG |
431 | if (!skb) |
432 | goto next; | |
433 | hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head); | |
434 | /* Check for dummy descriptor used for HW TSO offload on 88xx */ | |
435 | if (hdr->dont_send) { | |
436 | /* Get actual TSO descriptors and unmap them */ | |
437 | tso_sqe = | |
438 | (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2); | |
439 | nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2, | |
440 | tso_sqe->subdesc_cnt); | |
441 | } else { | |
442 | nicvf_unmap_sndq_buffers(nic, sq, sq->head, | |
443 | hdr->subdesc_cnt); | |
444 | } | |
445 | dev_kfree_skb_any(skb); | |
446 | next: | |
c94acf80 SG |
447 | sq->head++; |
448 | sq->head &= (sq->dmem.q_len - 1); | |
449 | } | |
4863dea3 SG |
450 | kfree(sq->skbuff); |
451 | nicvf_free_q_desc_mem(nic, &sq->dmem); | |
452 | } | |
453 | ||
454 | static void nicvf_reclaim_snd_queue(struct nicvf *nic, | |
455 | struct queue_set *qs, int qidx) | |
456 | { | |
457 | /* Disable send queue */ | |
458 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0); | |
459 | /* Check if SQ is stopped */ | |
460 | if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01)) | |
461 | return; | |
462 | /* Reset send queue */ | |
463 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); | |
464 | } | |
465 | ||
466 | static void nicvf_reclaim_rcv_queue(struct nicvf *nic, | |
467 | struct queue_set *qs, int qidx) | |
468 | { | |
469 | union nic_mbx mbx = {}; | |
470 | ||
471 | /* Make sure all packets in the pipeline are written back into mem */ | |
472 | mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC; | |
473 | nicvf_send_msg_to_pf(nic, &mbx); | |
474 | } | |
475 | ||
476 | static void nicvf_reclaim_cmp_queue(struct nicvf *nic, | |
477 | struct queue_set *qs, int qidx) | |
478 | { | |
479 | /* Disable timer threshold (doesn't get reset upon CQ reset */ | |
480 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0); | |
481 | /* Disable completion queue */ | |
482 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0); | |
483 | /* Reset completion queue */ | |
484 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); | |
485 | } | |
486 | ||
487 | static void nicvf_reclaim_rbdr(struct nicvf *nic, | |
488 | struct rbdr *rbdr, int qidx) | |
489 | { | |
490 | u64 tmp, fifo_state; | |
491 | int timeout = 10; | |
492 | ||
493 | /* Save head and tail pointers for feeing up buffers */ | |
494 | rbdr->head = nicvf_queue_reg_read(nic, | |
495 | NIC_QSET_RBDR_0_1_HEAD, | |
496 | qidx) >> 3; | |
497 | rbdr->tail = nicvf_queue_reg_read(nic, | |
498 | NIC_QSET_RBDR_0_1_TAIL, | |
499 | qidx) >> 3; | |
500 | ||
501 | /* If RBDR FIFO is in 'FAIL' state then do a reset first | |
502 | * before relaiming. | |
503 | */ | |
504 | fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx); | |
505 | if (((fifo_state >> 62) & 0x03) == 0x3) | |
506 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, | |
507 | qidx, NICVF_RBDR_RESET); | |
508 | ||
509 | /* Disable RBDR */ | |
510 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0); | |
511 | if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) | |
512 | return; | |
513 | while (1) { | |
514 | tmp = nicvf_queue_reg_read(nic, | |
515 | NIC_QSET_RBDR_0_1_PREFETCH_STATUS, | |
516 | qidx); | |
517 | if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF)) | |
518 | break; | |
519 | usleep_range(1000, 2000); | |
520 | timeout--; | |
521 | if (!timeout) { | |
522 | netdev_err(nic->netdev, | |
523 | "Failed polling on prefetch status\n"); | |
524 | return; | |
525 | } | |
526 | } | |
527 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, | |
528 | qidx, NICVF_RBDR_RESET); | |
529 | ||
530 | if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02)) | |
531 | return; | |
532 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00); | |
533 | if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) | |
534 | return; | |
535 | } | |
536 | ||
aa2e259b SG |
537 | void nicvf_config_vlan_stripping(struct nicvf *nic, netdev_features_t features) |
538 | { | |
539 | u64 rq_cfg; | |
540 | int sqs; | |
541 | ||
542 | rq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_RQ_GEN_CFG, 0); | |
543 | ||
544 | /* Enable first VLAN stripping */ | |
545 | if (features & NETIF_F_HW_VLAN_CTAG_RX) | |
546 | rq_cfg |= (1ULL << 25); | |
547 | else | |
548 | rq_cfg &= ~(1ULL << 25); | |
549 | nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, rq_cfg); | |
550 | ||
551 | /* Configure Secondary Qsets, if any */ | |
552 | for (sqs = 0; sqs < nic->sqs_count; sqs++) | |
553 | if (nic->snicvf[sqs]) | |
554 | nicvf_queue_reg_write(nic->snicvf[sqs], | |
555 | NIC_QSET_RQ_GEN_CFG, 0, rq_cfg); | |
556 | } | |
557 | ||
3458c40d JJ |
558 | static void nicvf_reset_rcv_queue_stats(struct nicvf *nic) |
559 | { | |
560 | union nic_mbx mbx = {}; | |
561 | ||
964cb69b | 562 | /* Reset all RQ/SQ and VF stats */ |
3458c40d | 563 | mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER; |
964cb69b SG |
564 | mbx.reset_stat.rx_stat_mask = 0x3FFF; |
565 | mbx.reset_stat.tx_stat_mask = 0x1F; | |
3458c40d | 566 | mbx.reset_stat.rq_stat_mask = 0xFFFF; |
964cb69b | 567 | mbx.reset_stat.sq_stat_mask = 0xFFFF; |
3458c40d JJ |
568 | nicvf_send_msg_to_pf(nic, &mbx); |
569 | } | |
570 | ||
4863dea3 SG |
571 | /* Configures receive queue */ |
572 | static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, | |
573 | int qidx, bool enable) | |
574 | { | |
575 | union nic_mbx mbx = {}; | |
576 | struct rcv_queue *rq; | |
577 | struct rq_cfg rq_cfg; | |
578 | ||
579 | rq = &qs->rq[qidx]; | |
580 | rq->enable = enable; | |
581 | ||
582 | /* Disable receive queue */ | |
583 | nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0); | |
584 | ||
585 | if (!rq->enable) { | |
586 | nicvf_reclaim_rcv_queue(nic, qs, qidx); | |
587 | return; | |
588 | } | |
589 | ||
590 | rq->cq_qs = qs->vnic_id; | |
591 | rq->cq_idx = qidx; | |
592 | rq->start_rbdr_qs = qs->vnic_id; | |
593 | rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1; | |
594 | rq->cont_rbdr_qs = qs->vnic_id; | |
595 | rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1; | |
596 | /* all writes of RBDR data to be loaded into L2 Cache as well*/ | |
597 | rq->caching = 1; | |
598 | ||
599 | /* Send a mailbox msg to PF to config RQ */ | |
600 | mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG; | |
601 | mbx.rq.qs_num = qs->vnic_id; | |
602 | mbx.rq.rq_num = qidx; | |
603 | mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) | | |
604 | (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) | | |
605 | (rq->cont_qs_rbdr_idx << 8) | | |
606 | (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx); | |
607 | nicvf_send_msg_to_pf(nic, &mbx); | |
608 | ||
609 | mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG; | |
d5b2d7a7 SG |
610 | mbx.rq.cfg = BIT_ULL(63) | BIT_ULL(62) | |
611 | (RQ_PASS_RBDR_LVL << 16) | (RQ_PASS_CQ_LVL << 8) | | |
612 | (qs->vnic_id << 0); | |
4863dea3 SG |
613 | nicvf_send_msg_to_pf(nic, &mbx); |
614 | ||
615 | /* RQ drop config | |
616 | * Enable CQ drop to reserve sufficient CQEs for all tx packets | |
617 | */ | |
618 | mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG; | |
d5b2d7a7 SG |
619 | mbx.rq.cfg = BIT_ULL(63) | BIT_ULL(62) | |
620 | (RQ_PASS_RBDR_LVL << 40) | (RQ_DROP_RBDR_LVL << 32) | | |
621 | (RQ_PASS_CQ_LVL << 16) | (RQ_DROP_CQ_LVL << 8); | |
4863dea3 SG |
622 | nicvf_send_msg_to_pf(nic, &mbx); |
623 | ||
cadcf95a | 624 | if (!nic->sqs_mode && (qidx == 0)) { |
36fa35d2 TS |
625 | /* Enable checking L3/L4 length and TCP/UDP checksums |
626 | * Also allow IPv6 pkts with zero UDP checksum. | |
627 | */ | |
cadcf95a | 628 | nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, |
36fa35d2 | 629 | (BIT(24) | BIT(23) | BIT(21) | BIT(20))); |
aa2e259b | 630 | nicvf_config_vlan_stripping(nic, nic->netdev->features); |
cadcf95a | 631 | } |
4863dea3 SG |
632 | |
633 | /* Enable Receive queue */ | |
161de2ca | 634 | memset(&rq_cfg, 0, sizeof(struct rq_cfg)); |
4863dea3 SG |
635 | rq_cfg.ena = 1; |
636 | rq_cfg.tcp_ena = 0; | |
637 | nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg); | |
638 | } | |
639 | ||
640 | /* Configures completion queue */ | |
641 | void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, | |
642 | int qidx, bool enable) | |
643 | { | |
644 | struct cmp_queue *cq; | |
645 | struct cq_cfg cq_cfg; | |
646 | ||
647 | cq = &qs->cq[qidx]; | |
648 | cq->enable = enable; | |
649 | ||
650 | if (!cq->enable) { | |
651 | nicvf_reclaim_cmp_queue(nic, qs, qidx); | |
652 | return; | |
653 | } | |
654 | ||
655 | /* Reset completion queue */ | |
656 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); | |
657 | ||
658 | if (!cq->enable) | |
659 | return; | |
660 | ||
661 | spin_lock_init(&cq->lock); | |
662 | /* Set completion queue base address */ | |
663 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, | |
664 | qidx, (u64)(cq->dmem.phys_base)); | |
665 | ||
666 | /* Enable Completion queue */ | |
161de2ca | 667 | memset(&cq_cfg, 0, sizeof(struct cq_cfg)); |
4863dea3 SG |
668 | cq_cfg.ena = 1; |
669 | cq_cfg.reset = 0; | |
670 | cq_cfg.caching = 0; | |
fff4ffdd | 671 | cq_cfg.qsize = ilog2(qs->cq_len >> 10); |
4863dea3 SG |
672 | cq_cfg.avg_con = 0; |
673 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg); | |
674 | ||
675 | /* Set threshold value for interrupt generation */ | |
676 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh); | |
677 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, | |
006394a7 | 678 | qidx, CMP_QUEUE_TIMER_THRESH); |
4863dea3 SG |
679 | } |
680 | ||
681 | /* Configures transmit queue */ | |
682 | static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, | |
683 | int qidx, bool enable) | |
684 | { | |
685 | union nic_mbx mbx = {}; | |
686 | struct snd_queue *sq; | |
687 | struct sq_cfg sq_cfg; | |
688 | ||
689 | sq = &qs->sq[qidx]; | |
690 | sq->enable = enable; | |
691 | ||
692 | if (!sq->enable) { | |
693 | nicvf_reclaim_snd_queue(nic, qs, qidx); | |
694 | return; | |
695 | } | |
696 | ||
697 | /* Reset send queue */ | |
698 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); | |
699 | ||
700 | sq->cq_qs = qs->vnic_id; | |
701 | sq->cq_idx = qidx; | |
702 | ||
703 | /* Send a mailbox msg to PF to config SQ */ | |
704 | mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG; | |
705 | mbx.sq.qs_num = qs->vnic_id; | |
706 | mbx.sq.sq_num = qidx; | |
92dc8769 | 707 | mbx.sq.sqs_mode = nic->sqs_mode; |
4863dea3 SG |
708 | mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx; |
709 | nicvf_send_msg_to_pf(nic, &mbx); | |
710 | ||
711 | /* Set queue base address */ | |
712 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, | |
713 | qidx, (u64)(sq->dmem.phys_base)); | |
714 | ||
715 | /* Enable send queue & set queue size */ | |
161de2ca | 716 | memset(&sq_cfg, 0, sizeof(struct sq_cfg)); |
4863dea3 SG |
717 | sq_cfg.ena = 1; |
718 | sq_cfg.reset = 0; | |
719 | sq_cfg.ldwb = 0; | |
fff4ffdd | 720 | sq_cfg.qsize = ilog2(qs->sq_len >> 10); |
4863dea3 | 721 | sq_cfg.tstmp_bgx_intf = 0; |
fff4ffdd SG |
722 | /* CQ's level at which HW will stop processing SQEs to avoid |
723 | * transmitting a pkt with no space in CQ to post CQE_TX. | |
724 | */ | |
725 | sq_cfg.cq_limit = (CMP_QUEUE_PIPELINE_RSVD * 256) / qs->cq_len; | |
4863dea3 SG |
726 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg); |
727 | ||
728 | /* Set threshold value for interrupt generation */ | |
729 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh); | |
730 | ||
731 | /* Set queue:cpu affinity for better load distribution */ | |
732 | if (cpu_online(qidx)) { | |
733 | cpumask_set_cpu(qidx, &sq->affinity_mask); | |
734 | netif_set_xps_queue(nic->netdev, | |
735 | &sq->affinity_mask, qidx); | |
736 | } | |
737 | } | |
738 | ||
739 | /* Configures receive buffer descriptor ring */ | |
740 | static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, | |
741 | int qidx, bool enable) | |
742 | { | |
743 | struct rbdr *rbdr; | |
744 | struct rbdr_cfg rbdr_cfg; | |
745 | ||
746 | rbdr = &qs->rbdr[qidx]; | |
747 | nicvf_reclaim_rbdr(nic, rbdr, qidx); | |
748 | if (!enable) | |
749 | return; | |
750 | ||
751 | /* Set descriptor base address */ | |
752 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, | |
753 | qidx, (u64)(rbdr->dmem.phys_base)); | |
754 | ||
755 | /* Enable RBDR & set queue size */ | |
756 | /* Buffer size should be in multiples of 128 bytes */ | |
161de2ca | 757 | memset(&rbdr_cfg, 0, sizeof(struct rbdr_cfg)); |
4863dea3 SG |
758 | rbdr_cfg.ena = 1; |
759 | rbdr_cfg.reset = 0; | |
760 | rbdr_cfg.ldwb = 0; | |
761 | rbdr_cfg.qsize = RBDR_SIZE; | |
762 | rbdr_cfg.avg_con = 0; | |
763 | rbdr_cfg.lines = rbdr->dma_size / 128; | |
764 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, | |
765 | qidx, *(u64 *)&rbdr_cfg); | |
766 | ||
767 | /* Notify HW */ | |
768 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, | |
769 | qidx, qs->rbdr_len - 1); | |
770 | ||
771 | /* Set threshold value for interrupt generation */ | |
772 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH, | |
773 | qidx, rbdr->thresh - 1); | |
774 | } | |
775 | ||
776 | /* Requests PF to assign and enable Qset */ | |
777 | void nicvf_qset_config(struct nicvf *nic, bool enable) | |
778 | { | |
779 | union nic_mbx mbx = {}; | |
780 | struct queue_set *qs = nic->qs; | |
781 | struct qs_cfg *qs_cfg; | |
782 | ||
783 | if (!qs) { | |
784 | netdev_warn(nic->netdev, | |
785 | "Qset is still not allocated, don't init queues\n"); | |
786 | return; | |
787 | } | |
788 | ||
789 | qs->enable = enable; | |
790 | qs->vnic_id = nic->vf_id; | |
791 | ||
792 | /* Send a mailbox msg to PF to config Qset */ | |
793 | mbx.qs.msg = NIC_MBOX_MSG_QS_CFG; | |
794 | mbx.qs.num = qs->vnic_id; | |
92dc8769 | 795 | mbx.qs.sqs_count = nic->sqs_count; |
4863dea3 SG |
796 | |
797 | mbx.qs.cfg = 0; | |
798 | qs_cfg = (struct qs_cfg *)&mbx.qs.cfg; | |
799 | if (qs->enable) { | |
800 | qs_cfg->ena = 1; | |
801 | #ifdef __BIG_ENDIAN | |
802 | qs_cfg->be = 1; | |
803 | #endif | |
804 | qs_cfg->vnic = qs->vnic_id; | |
805 | } | |
806 | nicvf_send_msg_to_pf(nic, &mbx); | |
807 | } | |
808 | ||
809 | static void nicvf_free_resources(struct nicvf *nic) | |
810 | { | |
811 | int qidx; | |
812 | struct queue_set *qs = nic->qs; | |
813 | ||
814 | /* Free receive buffer descriptor ring */ | |
815 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) | |
816 | nicvf_free_rbdr(nic, &qs->rbdr[qidx]); | |
817 | ||
818 | /* Free completion queue */ | |
819 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) | |
820 | nicvf_free_cmp_queue(nic, &qs->cq[qidx]); | |
821 | ||
822 | /* Free send queue */ | |
823 | for (qidx = 0; qidx < qs->sq_cnt; qidx++) | |
824 | nicvf_free_snd_queue(nic, &qs->sq[qidx]); | |
825 | } | |
826 | ||
827 | static int nicvf_alloc_resources(struct nicvf *nic) | |
828 | { | |
829 | int qidx; | |
830 | struct queue_set *qs = nic->qs; | |
831 | ||
832 | /* Alloc receive buffer descriptor ring */ | |
833 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { | |
834 | if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len, | |
835 | DMA_BUFFER_LEN)) | |
836 | goto alloc_fail; | |
837 | } | |
838 | ||
839 | /* Alloc send queue */ | |
840 | for (qidx = 0; qidx < qs->sq_cnt; qidx++) { | |
841 | if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len)) | |
842 | goto alloc_fail; | |
843 | } | |
844 | ||
845 | /* Alloc completion queue */ | |
846 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) { | |
847 | if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len)) | |
848 | goto alloc_fail; | |
849 | } | |
850 | ||
851 | return 0; | |
852 | alloc_fail: | |
853 | nicvf_free_resources(nic); | |
854 | return -ENOMEM; | |
855 | } | |
856 | ||
857 | int nicvf_set_qset_resources(struct nicvf *nic) | |
858 | { | |
859 | struct queue_set *qs; | |
860 | ||
861 | qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL); | |
862 | if (!qs) | |
863 | return -ENOMEM; | |
864 | nic->qs = qs; | |
865 | ||
866 | /* Set count of each queue */ | |
3a397ebe SG |
867 | qs->rbdr_cnt = DEFAULT_RBDR_CNT; |
868 | qs->rq_cnt = min_t(u8, MAX_RCV_QUEUES_PER_QS, num_online_cpus()); | |
869 | qs->sq_cnt = min_t(u8, MAX_SND_QUEUES_PER_QS, num_online_cpus()); | |
870 | qs->cq_cnt = max_t(u8, qs->rq_cnt, qs->sq_cnt); | |
4863dea3 SG |
871 | |
872 | /* Set queue lengths */ | |
873 | qs->rbdr_len = RCV_BUF_COUNT; | |
874 | qs->sq_len = SND_QUEUE_LEN; | |
875 | qs->cq_len = CMP_QUEUE_LEN; | |
92dc8769 SG |
876 | |
877 | nic->rx_queues = qs->rq_cnt; | |
878 | nic->tx_queues = qs->sq_cnt; | |
879 | ||
4863dea3 SG |
880 | return 0; |
881 | } | |
882 | ||
883 | int nicvf_config_data_transfer(struct nicvf *nic, bool enable) | |
884 | { | |
885 | bool disable = false; | |
886 | struct queue_set *qs = nic->qs; | |
fff4ffdd | 887 | struct queue_set *pqs = nic->pnicvf->qs; |
4863dea3 SG |
888 | int qidx; |
889 | ||
890 | if (!qs) | |
891 | return 0; | |
892 | ||
fff4ffdd SG |
893 | /* Take primary VF's queue lengths. |
894 | * This is needed to take queue lengths set from ethtool | |
895 | * into consideration. | |
896 | */ | |
897 | if (nic->sqs_mode && pqs) { | |
898 | qs->cq_len = pqs->cq_len; | |
899 | qs->sq_len = pqs->sq_len; | |
900 | } | |
901 | ||
4863dea3 SG |
902 | if (enable) { |
903 | if (nicvf_alloc_resources(nic)) | |
904 | return -ENOMEM; | |
905 | ||
906 | for (qidx = 0; qidx < qs->sq_cnt; qidx++) | |
907 | nicvf_snd_queue_config(nic, qs, qidx, enable); | |
908 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) | |
909 | nicvf_cmp_queue_config(nic, qs, qidx, enable); | |
910 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) | |
911 | nicvf_rbdr_config(nic, qs, qidx, enable); | |
912 | for (qidx = 0; qidx < qs->rq_cnt; qidx++) | |
913 | nicvf_rcv_queue_config(nic, qs, qidx, enable); | |
914 | } else { | |
915 | for (qidx = 0; qidx < qs->rq_cnt; qidx++) | |
916 | nicvf_rcv_queue_config(nic, qs, qidx, disable); | |
917 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) | |
918 | nicvf_rbdr_config(nic, qs, qidx, disable); | |
919 | for (qidx = 0; qidx < qs->sq_cnt; qidx++) | |
920 | nicvf_snd_queue_config(nic, qs, qidx, disable); | |
921 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) | |
922 | nicvf_cmp_queue_config(nic, qs, qidx, disable); | |
923 | ||
924 | nicvf_free_resources(nic); | |
925 | } | |
926 | ||
3458c40d JJ |
927 | /* Reset RXQ's stats. |
928 | * SQ's stats will get reset automatically once SQ is reset. | |
929 | */ | |
930 | nicvf_reset_rcv_queue_stats(nic); | |
931 | ||
4863dea3 SG |
932 | return 0; |
933 | } | |
934 | ||
935 | /* Get a free desc from SQ | |
936 | * returns descriptor ponter & descriptor number | |
937 | */ | |
938 | static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt) | |
939 | { | |
940 | int qentry; | |
941 | ||
942 | qentry = sq->tail; | |
943 | atomic_sub(desc_cnt, &sq->free_cnt); | |
944 | sq->tail += desc_cnt; | |
945 | sq->tail &= (sq->dmem.q_len - 1); | |
946 | ||
947 | return qentry; | |
948 | } | |
949 | ||
83abb7d7 SG |
950 | /* Rollback to previous tail pointer when descriptors not used */ |
951 | static inline void nicvf_rollback_sq_desc(struct snd_queue *sq, | |
952 | int qentry, int desc_cnt) | |
953 | { | |
954 | sq->tail = qentry; | |
955 | atomic_add(desc_cnt, &sq->free_cnt); | |
956 | } | |
957 | ||
4863dea3 SG |
958 | /* Free descriptor back to SQ for future use */ |
959 | void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt) | |
960 | { | |
961 | atomic_add(desc_cnt, &sq->free_cnt); | |
962 | sq->head += desc_cnt; | |
963 | sq->head &= (sq->dmem.q_len - 1); | |
964 | } | |
965 | ||
966 | static inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry) | |
967 | { | |
968 | qentry++; | |
969 | qentry &= (sq->dmem.q_len - 1); | |
970 | return qentry; | |
971 | } | |
972 | ||
973 | void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx) | |
974 | { | |
975 | u64 sq_cfg; | |
976 | ||
977 | sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); | |
978 | sq_cfg |= NICVF_SQ_EN; | |
979 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); | |
980 | /* Ring doorbell so that H/W restarts processing SQEs */ | |
981 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0); | |
982 | } | |
983 | ||
984 | void nicvf_sq_disable(struct nicvf *nic, int qidx) | |
985 | { | |
986 | u64 sq_cfg; | |
987 | ||
988 | sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); | |
989 | sq_cfg &= ~NICVF_SQ_EN; | |
990 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); | |
991 | } | |
992 | ||
993 | void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq, | |
994 | int qidx) | |
995 | { | |
996 | u64 head, tail; | |
997 | struct sk_buff *skb; | |
998 | struct nicvf *nic = netdev_priv(netdev); | |
999 | struct sq_hdr_subdesc *hdr; | |
1000 | ||
1001 | head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4; | |
1002 | tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4; | |
1003 | while (sq->head != head) { | |
1004 | hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head); | |
1005 | if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) { | |
1006 | nicvf_put_sq_desc(sq, 1); | |
1007 | continue; | |
1008 | } | |
1009 | skb = (struct sk_buff *)sq->skbuff[sq->head]; | |
143ceb0b SG |
1010 | if (skb) |
1011 | dev_kfree_skb_any(skb); | |
4863dea3 SG |
1012 | atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets); |
1013 | atomic64_add(hdr->tot_len, | |
1014 | (atomic64_t *)&netdev->stats.tx_bytes); | |
4863dea3 SG |
1015 | nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); |
1016 | } | |
1017 | } | |
1018 | ||
1019 | /* Calculate no of SQ subdescriptors needed to transmit all | |
1020 | * segments of this TSO packet. | |
1021 | * Taken from 'Tilera network driver' with a minor modification. | |
1022 | */ | |
1023 | static int nicvf_tso_count_subdescs(struct sk_buff *skb) | |
1024 | { | |
1025 | struct skb_shared_info *sh = skb_shinfo(skb); | |
1026 | unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | |
1027 | unsigned int data_len = skb->len - sh_len; | |
1028 | unsigned int p_len = sh->gso_size; | |
1029 | long f_id = -1; /* id of the current fragment */ | |
1030 | long f_size = skb_headlen(skb) - sh_len; /* current fragment size */ | |
1031 | long f_used = 0; /* bytes used from the current fragment */ | |
1032 | long n; /* size of the current piece of payload */ | |
1033 | int num_edescs = 0; | |
1034 | int segment; | |
1035 | ||
1036 | for (segment = 0; segment < sh->gso_segs; segment++) { | |
1037 | unsigned int p_used = 0; | |
1038 | ||
1039 | /* One edesc for header and for each piece of the payload. */ | |
1040 | for (num_edescs++; p_used < p_len; num_edescs++) { | |
1041 | /* Advance as needed. */ | |
1042 | while (f_used >= f_size) { | |
1043 | f_id++; | |
1044 | f_size = skb_frag_size(&sh->frags[f_id]); | |
1045 | f_used = 0; | |
1046 | } | |
1047 | ||
1048 | /* Use bytes from the current fragment. */ | |
1049 | n = p_len - p_used; | |
1050 | if (n > f_size - f_used) | |
1051 | n = f_size - f_used; | |
1052 | f_used += n; | |
1053 | p_used += n; | |
1054 | } | |
1055 | ||
1056 | /* The last segment may be less than gso_size. */ | |
1057 | data_len -= p_len; | |
1058 | if (data_len < p_len) | |
1059 | p_len = data_len; | |
1060 | } | |
1061 | ||
1062 | /* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */ | |
1063 | return num_edescs + sh->gso_segs; | |
1064 | } | |
1065 | ||
7ceb8a13 SG |
1066 | #define POST_CQE_DESC_COUNT 2 |
1067 | ||
4863dea3 SG |
1068 | /* Get the number of SQ descriptors needed to xmit this skb */ |
1069 | static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb) | |
1070 | { | |
1071 | int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT; | |
1072 | ||
40fb5f8a | 1073 | if (skb_shinfo(skb)->gso_size && !nic->hw_tso) { |
4863dea3 SG |
1074 | subdesc_cnt = nicvf_tso_count_subdescs(skb); |
1075 | return subdesc_cnt; | |
1076 | } | |
1077 | ||
7ceb8a13 SG |
1078 | /* Dummy descriptors to get TSO pkt completion notification */ |
1079 | if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) | |
1080 | subdesc_cnt += POST_CQE_DESC_COUNT; | |
1081 | ||
4863dea3 SG |
1082 | if (skb_shinfo(skb)->nr_frags) |
1083 | subdesc_cnt += skb_shinfo(skb)->nr_frags; | |
1084 | ||
1085 | return subdesc_cnt; | |
1086 | } | |
1087 | ||
1088 | /* Add SQ HEADER subdescriptor. | |
1089 | * First subdescriptor for every send descriptor. | |
1090 | */ | |
1091 | static inline void | |
40fb5f8a | 1092 | nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry, |
4863dea3 SG |
1093 | int subdesc_cnt, struct sk_buff *skb, int len) |
1094 | { | |
1095 | int proto; | |
1096 | struct sq_hdr_subdesc *hdr; | |
1097 | ||
1098 | hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); | |
4863dea3 SG |
1099 | memset(hdr, 0, SND_QUEUE_DESC_SIZE); |
1100 | hdr->subdesc_type = SQ_DESC_TYPE_HEADER; | |
7ceb8a13 SG |
1101 | |
1102 | if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) { | |
1103 | /* post_cqe = 0, to avoid HW posting a CQE for every TSO | |
1104 | * segment transmitted on 88xx. | |
1105 | */ | |
1106 | hdr->subdesc_cnt = subdesc_cnt - POST_CQE_DESC_COUNT; | |
1107 | } else { | |
1108 | sq->skbuff[qentry] = (u64)skb; | |
1109 | /* Enable notification via CQE after processing SQE */ | |
1110 | hdr->post_cqe = 1; | |
1111 | /* No of subdescriptors following this */ | |
1112 | hdr->subdesc_cnt = subdesc_cnt; | |
1113 | } | |
4863dea3 SG |
1114 | hdr->tot_len = len; |
1115 | ||
1116 | /* Offload checksum calculation to HW */ | |
1117 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | |
4863dea3 SG |
1118 | hdr->csum_l3 = 1; /* Enable IP csum calculation */ |
1119 | hdr->l3_offset = skb_network_offset(skb); | |
1120 | hdr->l4_offset = skb_transport_offset(skb); | |
1121 | ||
1122 | proto = ip_hdr(skb)->protocol; | |
1123 | switch (proto) { | |
1124 | case IPPROTO_TCP: | |
1125 | hdr->csum_l4 = SEND_L4_CSUM_TCP; | |
1126 | break; | |
1127 | case IPPROTO_UDP: | |
1128 | hdr->csum_l4 = SEND_L4_CSUM_UDP; | |
1129 | break; | |
1130 | case IPPROTO_SCTP: | |
1131 | hdr->csum_l4 = SEND_L4_CSUM_SCTP; | |
1132 | break; | |
1133 | } | |
1134 | } | |
40fb5f8a SG |
1135 | |
1136 | if (nic->hw_tso && skb_shinfo(skb)->gso_size) { | |
1137 | hdr->tso = 1; | |
1138 | hdr->tso_start = skb_transport_offset(skb) + tcp_hdrlen(skb); | |
1139 | hdr->tso_max_paysize = skb_shinfo(skb)->gso_size; | |
1140 | /* For non-tunneled pkts, point this to L2 ethertype */ | |
1141 | hdr->inner_l3_offset = skb_network_offset(skb) - 2; | |
964cb69b | 1142 | this_cpu_inc(nic->pnicvf->drv_stats->tx_tso); |
40fb5f8a | 1143 | } |
4863dea3 SG |
1144 | } |
1145 | ||
1146 | /* SQ GATHER subdescriptor | |
1147 | * Must follow HDR descriptor | |
1148 | */ | |
1149 | static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, | |
1150 | int size, u64 data) | |
1151 | { | |
1152 | struct sq_gather_subdesc *gather; | |
1153 | ||
1154 | qentry &= (sq->dmem.q_len - 1); | |
1155 | gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry); | |
1156 | ||
1157 | memset(gather, 0, SND_QUEUE_DESC_SIZE); | |
1158 | gather->subdesc_type = SQ_DESC_TYPE_GATHER; | |
4b561c17 | 1159 | gather->ld_type = NIC_SEND_LD_TYPE_E_LDD; |
4863dea3 SG |
1160 | gather->size = size; |
1161 | gather->addr = data; | |
1162 | } | |
1163 | ||
7ceb8a13 SG |
1164 | /* Add HDR + IMMEDIATE subdescriptors right after descriptors of a TSO |
1165 | * packet so that a CQE is posted as a notifation for transmission of | |
1166 | * TSO packet. | |
1167 | */ | |
1168 | static inline void nicvf_sq_add_cqe_subdesc(struct snd_queue *sq, int qentry, | |
1169 | int tso_sqe, struct sk_buff *skb) | |
1170 | { | |
1171 | struct sq_imm_subdesc *imm; | |
1172 | struct sq_hdr_subdesc *hdr; | |
1173 | ||
1174 | sq->skbuff[qentry] = (u64)skb; | |
1175 | ||
1176 | hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); | |
1177 | memset(hdr, 0, SND_QUEUE_DESC_SIZE); | |
1178 | hdr->subdesc_type = SQ_DESC_TYPE_HEADER; | |
1179 | /* Enable notification via CQE after processing SQE */ | |
1180 | hdr->post_cqe = 1; | |
1181 | /* There is no packet to transmit here */ | |
1182 | hdr->dont_send = 1; | |
1183 | hdr->subdesc_cnt = POST_CQE_DESC_COUNT - 1; | |
1184 | hdr->tot_len = 1; | |
1185 | /* Actual TSO header SQE index, needed for cleanup */ | |
1186 | hdr->rsvd2 = tso_sqe; | |
1187 | ||
1188 | qentry = nicvf_get_nxt_sqentry(sq, qentry); | |
1189 | imm = (struct sq_imm_subdesc *)GET_SQ_DESC(sq, qentry); | |
1190 | memset(imm, 0, SND_QUEUE_DESC_SIZE); | |
1191 | imm->subdesc_type = SQ_DESC_TYPE_IMMEDIATE; | |
1192 | imm->len = 1; | |
1193 | } | |
1194 | ||
2c204c2b SG |
1195 | static inline void nicvf_sq_doorbell(struct nicvf *nic, struct sk_buff *skb, |
1196 | int sq_num, int desc_cnt) | |
1197 | { | |
1198 | struct netdev_queue *txq; | |
1199 | ||
1200 | txq = netdev_get_tx_queue(nic->pnicvf->netdev, | |
1201 | skb_get_queue_mapping(skb)); | |
1202 | ||
1203 | netdev_tx_sent_queue(txq, skb->len); | |
1204 | ||
1205 | /* make sure all memory stores are done before ringing doorbell */ | |
1206 | smp_wmb(); | |
1207 | ||
1208 | /* Inform HW to xmit all TSO segments */ | |
1209 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, | |
1210 | sq_num, desc_cnt); | |
1211 | } | |
1212 | ||
4863dea3 SG |
1213 | /* Segment a TSO packet into 'gso_size' segments and append |
1214 | * them to SQ for transfer | |
1215 | */ | |
1216 | static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, | |
92dc8769 | 1217 | int sq_num, int qentry, struct sk_buff *skb) |
4863dea3 SG |
1218 | { |
1219 | struct tso_t tso; | |
1220 | int seg_subdescs = 0, desc_cnt = 0; | |
1221 | int seg_len, total_len, data_left; | |
1222 | int hdr_qentry = qentry; | |
1223 | int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | |
1224 | ||
1225 | tso_start(skb, &tso); | |
1226 | total_len = skb->len - hdr_len; | |
1227 | while (total_len > 0) { | |
1228 | char *hdr; | |
1229 | ||
1230 | /* Save Qentry for adding HDR_SUBDESC at the end */ | |
1231 | hdr_qentry = qentry; | |
1232 | ||
1233 | data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); | |
1234 | total_len -= data_left; | |
1235 | ||
1236 | /* Add segment's header */ | |
1237 | qentry = nicvf_get_nxt_sqentry(sq, qentry); | |
1238 | hdr = sq->tso_hdrs + qentry * TSO_HEADER_SIZE; | |
1239 | tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); | |
1240 | nicvf_sq_add_gather_subdesc(sq, qentry, hdr_len, | |
1241 | sq->tso_hdrs_phys + | |
1242 | qentry * TSO_HEADER_SIZE); | |
1243 | /* HDR_SUDESC + GATHER */ | |
1244 | seg_subdescs = 2; | |
1245 | seg_len = hdr_len; | |
1246 | ||
1247 | /* Add segment's payload fragments */ | |
1248 | while (data_left > 0) { | |
1249 | int size; | |
1250 | ||
1251 | size = min_t(int, tso.size, data_left); | |
1252 | ||
1253 | qentry = nicvf_get_nxt_sqentry(sq, qentry); | |
1254 | nicvf_sq_add_gather_subdesc(sq, qentry, size, | |
1255 | virt_to_phys(tso.data)); | |
1256 | seg_subdescs++; | |
1257 | seg_len += size; | |
1258 | ||
1259 | data_left -= size; | |
1260 | tso_build_data(skb, &tso, size); | |
1261 | } | |
40fb5f8a | 1262 | nicvf_sq_add_hdr_subdesc(nic, sq, hdr_qentry, |
4863dea3 | 1263 | seg_subdescs - 1, skb, seg_len); |
143ceb0b | 1264 | sq->skbuff[hdr_qentry] = (u64)NULL; |
4863dea3 SG |
1265 | qentry = nicvf_get_nxt_sqentry(sq, qentry); |
1266 | ||
1267 | desc_cnt += seg_subdescs; | |
1268 | } | |
1269 | /* Save SKB in the last segment for freeing */ | |
1270 | sq->skbuff[hdr_qentry] = (u64)skb; | |
1271 | ||
2c204c2b | 1272 | nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt); |
4863dea3 | 1273 | |
964cb69b | 1274 | this_cpu_inc(nic->pnicvf->drv_stats->tx_tso); |
4863dea3 SG |
1275 | return 1; |
1276 | } | |
1277 | ||
1278 | /* Append an skb to a SQ for packet transfer. */ | |
bd3ad7d3 SG |
1279 | int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq, |
1280 | struct sk_buff *skb, u8 sq_num) | |
4863dea3 SG |
1281 | { |
1282 | int i, size; | |
83abb7d7 | 1283 | int subdesc_cnt, hdr_sqe = 0; |
bd3ad7d3 | 1284 | int qentry; |
83abb7d7 | 1285 | u64 dma_addr; |
4863dea3 SG |
1286 | |
1287 | subdesc_cnt = nicvf_sq_subdesc_required(nic, skb); | |
1288 | if (subdesc_cnt > atomic_read(&sq->free_cnt)) | |
1289 | goto append_fail; | |
1290 | ||
1291 | qentry = nicvf_get_sq_desc(sq, subdesc_cnt); | |
1292 | ||
1293 | /* Check if its a TSO packet */ | |
40fb5f8a | 1294 | if (skb_shinfo(skb)->gso_size && !nic->hw_tso) |
92dc8769 | 1295 | return nicvf_sq_append_tso(nic, sq, sq_num, qentry, skb); |
4863dea3 SG |
1296 | |
1297 | /* Add SQ header subdesc */ | |
40fb5f8a SG |
1298 | nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1, |
1299 | skb, skb->len); | |
83abb7d7 | 1300 | hdr_sqe = qentry; |
4863dea3 SG |
1301 | |
1302 | /* Add SQ gather subdescs */ | |
1303 | qentry = nicvf_get_nxt_sqentry(sq, qentry); | |
1304 | size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; | |
83abb7d7 SG |
1305 | /* HW will ensure data coherency, CPU sync not required */ |
1306 | dma_addr = dma_map_page_attrs(&nic->pdev->dev, virt_to_page(skb->data), | |
1307 | offset_in_page(skb->data), size, | |
1308 | DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); | |
1309 | if (dma_mapping_error(&nic->pdev->dev, dma_addr)) { | |
1310 | nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt); | |
1311 | return 0; | |
1312 | } | |
1313 | ||
1314 | nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr); | |
4863dea3 SG |
1315 | |
1316 | /* Check for scattered buffer */ | |
1317 | if (!skb_is_nonlinear(skb)) | |
1318 | goto doorbell; | |
1319 | ||
1320 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
1321 | const struct skb_frag_struct *frag; | |
1322 | ||
1323 | frag = &skb_shinfo(skb)->frags[i]; | |
1324 | ||
1325 | qentry = nicvf_get_nxt_sqentry(sq, qentry); | |
1326 | size = skb_frag_size(frag); | |
83abb7d7 SG |
1327 | dma_addr = dma_map_page_attrs(&nic->pdev->dev, |
1328 | skb_frag_page(frag), | |
1329 | frag->page_offset, size, | |
1330 | DMA_TO_DEVICE, | |
1331 | DMA_ATTR_SKIP_CPU_SYNC); | |
1332 | if (dma_mapping_error(&nic->pdev->dev, dma_addr)) { | |
1333 | /* Free entire chain of mapped buffers | |
1334 | * here 'i' = frags mapped + above mapped skb->data | |
1335 | */ | |
1336 | nicvf_unmap_sndq_buffers(nic, sq, hdr_sqe, i); | |
1337 | nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt); | |
1338 | return 0; | |
1339 | } | |
1340 | nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr); | |
4863dea3 SG |
1341 | } |
1342 | ||
1343 | doorbell: | |
7ceb8a13 SG |
1344 | if (nic->t88 && skb_shinfo(skb)->gso_size) { |
1345 | qentry = nicvf_get_nxt_sqentry(sq, qentry); | |
83abb7d7 | 1346 | nicvf_sq_add_cqe_subdesc(sq, qentry, hdr_sqe, skb); |
7ceb8a13 SG |
1347 | } |
1348 | ||
2c204c2b | 1349 | nicvf_sq_doorbell(nic, skb, sq_num, subdesc_cnt); |
4863dea3 | 1350 | |
4863dea3 SG |
1351 | return 1; |
1352 | ||
1353 | append_fail: | |
92dc8769 SG |
1354 | /* Use original PCI dev for debug log */ |
1355 | nic = nic->pnicvf; | |
4863dea3 SG |
1356 | netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n"); |
1357 | return 0; | |
1358 | } | |
1359 | ||
1360 | static inline unsigned frag_num(unsigned i) | |
1361 | { | |
1362 | #ifdef __BIG_ENDIAN | |
1363 | return (i & ~3) + 3 - (i & 3); | |
1364 | #else | |
1365 | return i; | |
1366 | #endif | |
1367 | } | |
1368 | ||
1369 | /* Returns SKB for a received packet */ | |
1370 | struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx) | |
1371 | { | |
1372 | int frag; | |
1373 | int payload_len = 0; | |
1374 | struct sk_buff *skb = NULL; | |
a8671acc SG |
1375 | struct page *page; |
1376 | int offset; | |
4863dea3 SG |
1377 | u16 *rb_lens = NULL; |
1378 | u64 *rb_ptrs = NULL; | |
83abb7d7 | 1379 | u64 phys_addr; |
4863dea3 SG |
1380 | |
1381 | rb_lens = (void *)cqe_rx + (3 * sizeof(u64)); | |
02a72bd8 SG |
1382 | /* Except 88xx pass1 on all other chips CQE_RX2_S is added to |
1383 | * CQE_RX at word6, hence buffer pointers move by word | |
1384 | * | |
1385 | * Use existing 'hw_tso' flag which will be set for all chips | |
1386 | * except 88xx pass1 instead of a additional cache line | |
1387 | * access (or miss) by using pci dev's revision. | |
1388 | */ | |
1389 | if (!nic->hw_tso) | |
1390 | rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64)); | |
1391 | else | |
1392 | rb_ptrs = (void *)cqe_rx + (7 * sizeof(u64)); | |
4863dea3 | 1393 | |
4863dea3 SG |
1394 | for (frag = 0; frag < cqe_rx->rb_cnt; frag++) { |
1395 | payload_len = rb_lens[frag_num(frag)]; | |
83abb7d7 SG |
1396 | phys_addr = nicvf_iova_to_phys(nic, *rb_ptrs); |
1397 | if (!phys_addr) { | |
1398 | if (skb) | |
1399 | dev_kfree_skb_any(skb); | |
1400 | return NULL; | |
1401 | } | |
1402 | ||
4863dea3 SG |
1403 | if (!frag) { |
1404 | /* First fragment */ | |
83abb7d7 SG |
1405 | dma_unmap_page_attrs(&nic->pdev->dev, |
1406 | *rb_ptrs - cqe_rx->align_pad, | |
1407 | RCV_FRAG_LEN, DMA_FROM_DEVICE, | |
1408 | DMA_ATTR_SKIP_CPU_SYNC); | |
4863dea3 | 1409 | skb = nicvf_rb_ptr_to_skb(nic, |
83abb7d7 | 1410 | phys_addr - cqe_rx->align_pad, |
4863dea3 SG |
1411 | payload_len); |
1412 | if (!skb) | |
1413 | return NULL; | |
1414 | skb_reserve(skb, cqe_rx->align_pad); | |
1415 | skb_put(skb, payload_len); | |
1416 | } else { | |
1417 | /* Add fragments */ | |
83abb7d7 SG |
1418 | dma_unmap_page_attrs(&nic->pdev->dev, *rb_ptrs, |
1419 | RCV_FRAG_LEN, DMA_FROM_DEVICE, | |
1420 | DMA_ATTR_SKIP_CPU_SYNC); | |
1421 | page = virt_to_page(phys_to_virt(phys_addr)); | |
1422 | offset = phys_to_virt(phys_addr) - page_address(page); | |
a8671acc SG |
1423 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, |
1424 | offset, payload_len, RCV_FRAG_LEN); | |
4863dea3 SG |
1425 | } |
1426 | /* Next buffer pointer */ | |
1427 | rb_ptrs++; | |
1428 | } | |
1429 | return skb; | |
1430 | } | |
1431 | ||
b45ceb40 | 1432 | static u64 nicvf_int_type_to_mask(int int_type, int q_idx) |
4863dea3 SG |
1433 | { |
1434 | u64 reg_val; | |
1435 | ||
4863dea3 SG |
1436 | switch (int_type) { |
1437 | case NICVF_INTR_CQ: | |
b45ceb40 | 1438 | reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); |
4863dea3 SG |
1439 | break; |
1440 | case NICVF_INTR_SQ: | |
b45ceb40 | 1441 | reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); |
4863dea3 SG |
1442 | break; |
1443 | case NICVF_INTR_RBDR: | |
b45ceb40 | 1444 | reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); |
4863dea3 SG |
1445 | break; |
1446 | case NICVF_INTR_PKT_DROP: | |
b45ceb40 | 1447 | reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT); |
4863dea3 SG |
1448 | break; |
1449 | case NICVF_INTR_TCP_TIMER: | |
b45ceb40 | 1450 | reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); |
4863dea3 SG |
1451 | break; |
1452 | case NICVF_INTR_MBOX: | |
b45ceb40 | 1453 | reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT); |
4863dea3 SG |
1454 | break; |
1455 | case NICVF_INTR_QS_ERR: | |
b45ceb40 | 1456 | reg_val = (1ULL << NICVF_INTR_QS_ERR_SHIFT); |
4863dea3 SG |
1457 | break; |
1458 | default: | |
b45ceb40 | 1459 | reg_val = 0; |
4863dea3 SG |
1460 | } |
1461 | ||
b45ceb40 YN |
1462 | return reg_val; |
1463 | } | |
1464 | ||
1465 | /* Enable interrupt */ | |
1466 | void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx) | |
1467 | { | |
1468 | u64 mask = nicvf_int_type_to_mask(int_type, q_idx); | |
1469 | ||
1470 | if (!mask) { | |
1471 | netdev_dbg(nic->netdev, | |
1472 | "Failed to enable interrupt: unknown type\n"); | |
1473 | return; | |
1474 | } | |
1475 | nicvf_reg_write(nic, NIC_VF_ENA_W1S, | |
1476 | nicvf_reg_read(nic, NIC_VF_ENA_W1S) | mask); | |
4863dea3 SG |
1477 | } |
1478 | ||
1479 | /* Disable interrupt */ | |
1480 | void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx) | |
1481 | { | |
b45ceb40 | 1482 | u64 mask = nicvf_int_type_to_mask(int_type, q_idx); |
4863dea3 | 1483 | |
b45ceb40 YN |
1484 | if (!mask) { |
1485 | netdev_dbg(nic->netdev, | |
4863dea3 | 1486 | "Failed to disable interrupt: unknown type\n"); |
b45ceb40 | 1487 | return; |
4863dea3 SG |
1488 | } |
1489 | ||
b45ceb40 | 1490 | nicvf_reg_write(nic, NIC_VF_ENA_W1C, mask); |
4863dea3 SG |
1491 | } |
1492 | ||
1493 | /* Clear interrupt */ | |
1494 | void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx) | |
1495 | { | |
b45ceb40 | 1496 | u64 mask = nicvf_int_type_to_mask(int_type, q_idx); |
4863dea3 | 1497 | |
b45ceb40 YN |
1498 | if (!mask) { |
1499 | netdev_dbg(nic->netdev, | |
4863dea3 | 1500 | "Failed to clear interrupt: unknown type\n"); |
b45ceb40 | 1501 | return; |
4863dea3 SG |
1502 | } |
1503 | ||
b45ceb40 | 1504 | nicvf_reg_write(nic, NIC_VF_INT, mask); |
4863dea3 SG |
1505 | } |
1506 | ||
1507 | /* Check if interrupt is enabled */ | |
1508 | int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx) | |
1509 | { | |
b45ceb40 YN |
1510 | u64 mask = nicvf_int_type_to_mask(int_type, q_idx); |
1511 | /* If interrupt type is unknown, we treat it disabled. */ | |
1512 | if (!mask) { | |
1513 | netdev_dbg(nic->netdev, | |
4863dea3 | 1514 | "Failed to check interrupt enable: unknown type\n"); |
b45ceb40 | 1515 | return 0; |
4863dea3 SG |
1516 | } |
1517 | ||
b45ceb40 | 1518 | return mask & nicvf_reg_read(nic, NIC_VF_ENA_W1S); |
4863dea3 SG |
1519 | } |
1520 | ||
1521 | void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx) | |
1522 | { | |
1523 | struct rcv_queue *rq; | |
1524 | ||
1525 | #define GET_RQ_STATS(reg) \ | |
1526 | nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\ | |
1527 | (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) | |
1528 | ||
1529 | rq = &nic->qs->rq[rq_idx]; | |
1530 | rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS); | |
1531 | rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS); | |
1532 | } | |
1533 | ||
1534 | void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx) | |
1535 | { | |
1536 | struct snd_queue *sq; | |
1537 | ||
1538 | #define GET_SQ_STATS(reg) \ | |
1539 | nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\ | |
1540 | (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) | |
1541 | ||
1542 | sq = &nic->qs->sq[sq_idx]; | |
1543 | sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS); | |
1544 | sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS); | |
1545 | } | |
1546 | ||
1547 | /* Check for errors in the receive cmp.queue entry */ | |
ad2ecebd | 1548 | int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx) |
4863dea3 | 1549 | { |
ad2ecebd | 1550 | if (!cqe_rx->err_level && !cqe_rx->err_opcode) |
4863dea3 | 1551 | return 0; |
4863dea3 SG |
1552 | |
1553 | if (netif_msg_rx_err(nic)) | |
1554 | netdev_err(nic->netdev, | |
1555 | "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n", | |
1556 | nic->netdev->name, | |
1557 | cqe_rx->err_level, cqe_rx->err_opcode); | |
1558 | ||
4863dea3 SG |
1559 | switch (cqe_rx->err_opcode) { |
1560 | case CQ_RX_ERROP_RE_PARTIAL: | |
964cb69b | 1561 | this_cpu_inc(nic->drv_stats->rx_bgx_truncated_pkts); |
4863dea3 SG |
1562 | break; |
1563 | case CQ_RX_ERROP_RE_JABBER: | |
964cb69b | 1564 | this_cpu_inc(nic->drv_stats->rx_jabber_errs); |
4863dea3 SG |
1565 | break; |
1566 | case CQ_RX_ERROP_RE_FCS: | |
964cb69b | 1567 | this_cpu_inc(nic->drv_stats->rx_fcs_errs); |
4863dea3 SG |
1568 | break; |
1569 | case CQ_RX_ERROP_RE_RX_CTL: | |
964cb69b | 1570 | this_cpu_inc(nic->drv_stats->rx_bgx_errs); |
4863dea3 SG |
1571 | break; |
1572 | case CQ_RX_ERROP_PREL2_ERR: | |
964cb69b | 1573 | this_cpu_inc(nic->drv_stats->rx_prel2_errs); |
4863dea3 SG |
1574 | break; |
1575 | case CQ_RX_ERROP_L2_MAL: | |
964cb69b | 1576 | this_cpu_inc(nic->drv_stats->rx_l2_hdr_malformed); |
4863dea3 SG |
1577 | break; |
1578 | case CQ_RX_ERROP_L2_OVERSIZE: | |
964cb69b | 1579 | this_cpu_inc(nic->drv_stats->rx_oversize); |
4863dea3 SG |
1580 | break; |
1581 | case CQ_RX_ERROP_L2_UNDERSIZE: | |
964cb69b | 1582 | this_cpu_inc(nic->drv_stats->rx_undersize); |
4863dea3 SG |
1583 | break; |
1584 | case CQ_RX_ERROP_L2_LENMISM: | |
964cb69b | 1585 | this_cpu_inc(nic->drv_stats->rx_l2_len_mismatch); |
4863dea3 SG |
1586 | break; |
1587 | case CQ_RX_ERROP_L2_PCLP: | |
964cb69b | 1588 | this_cpu_inc(nic->drv_stats->rx_l2_pclp); |
4863dea3 SG |
1589 | break; |
1590 | case CQ_RX_ERROP_IP_NOT: | |
964cb69b | 1591 | this_cpu_inc(nic->drv_stats->rx_ip_ver_errs); |
4863dea3 SG |
1592 | break; |
1593 | case CQ_RX_ERROP_IP_CSUM_ERR: | |
964cb69b | 1594 | this_cpu_inc(nic->drv_stats->rx_ip_csum_errs); |
4863dea3 SG |
1595 | break; |
1596 | case CQ_RX_ERROP_IP_MAL: | |
964cb69b | 1597 | this_cpu_inc(nic->drv_stats->rx_ip_hdr_malformed); |
4863dea3 SG |
1598 | break; |
1599 | case CQ_RX_ERROP_IP_MALD: | |
964cb69b | 1600 | this_cpu_inc(nic->drv_stats->rx_ip_payload_malformed); |
4863dea3 SG |
1601 | break; |
1602 | case CQ_RX_ERROP_IP_HOP: | |
964cb69b | 1603 | this_cpu_inc(nic->drv_stats->rx_ip_ttl_errs); |
4863dea3 SG |
1604 | break; |
1605 | case CQ_RX_ERROP_L3_PCLP: | |
964cb69b | 1606 | this_cpu_inc(nic->drv_stats->rx_l3_pclp); |
4863dea3 SG |
1607 | break; |
1608 | case CQ_RX_ERROP_L4_MAL: | |
964cb69b | 1609 | this_cpu_inc(nic->drv_stats->rx_l4_malformed); |
4863dea3 SG |
1610 | break; |
1611 | case CQ_RX_ERROP_L4_CHK: | |
964cb69b | 1612 | this_cpu_inc(nic->drv_stats->rx_l4_csum_errs); |
4863dea3 SG |
1613 | break; |
1614 | case CQ_RX_ERROP_UDP_LEN: | |
964cb69b | 1615 | this_cpu_inc(nic->drv_stats->rx_udp_len_errs); |
4863dea3 SG |
1616 | break; |
1617 | case CQ_RX_ERROP_L4_PORT: | |
964cb69b | 1618 | this_cpu_inc(nic->drv_stats->rx_l4_port_errs); |
4863dea3 SG |
1619 | break; |
1620 | case CQ_RX_ERROP_TCP_FLAG: | |
964cb69b | 1621 | this_cpu_inc(nic->drv_stats->rx_tcp_flag_errs); |
4863dea3 SG |
1622 | break; |
1623 | case CQ_RX_ERROP_TCP_OFFSET: | |
964cb69b | 1624 | this_cpu_inc(nic->drv_stats->rx_tcp_offset_errs); |
4863dea3 SG |
1625 | break; |
1626 | case CQ_RX_ERROP_L4_PCLP: | |
964cb69b | 1627 | this_cpu_inc(nic->drv_stats->rx_l4_pclp); |
4863dea3 SG |
1628 | break; |
1629 | case CQ_RX_ERROP_RBDR_TRUNC: | |
964cb69b | 1630 | this_cpu_inc(nic->drv_stats->rx_truncated_pkts); |
4863dea3 SG |
1631 | break; |
1632 | } | |
1633 | ||
1634 | return 1; | |
1635 | } | |
1636 | ||
1637 | /* Check for errors in the send cmp.queue entry */ | |
964cb69b | 1638 | int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx) |
4863dea3 | 1639 | { |
4863dea3 SG |
1640 | switch (cqe_tx->send_status) { |
1641 | case CQ_TX_ERROP_GOOD: | |
4863dea3 SG |
1642 | return 0; |
1643 | case CQ_TX_ERROP_DESC_FAULT: | |
964cb69b | 1644 | this_cpu_inc(nic->drv_stats->tx_desc_fault); |
4863dea3 SG |
1645 | break; |
1646 | case CQ_TX_ERROP_HDR_CONS_ERR: | |
964cb69b | 1647 | this_cpu_inc(nic->drv_stats->tx_hdr_cons_err); |
4863dea3 SG |
1648 | break; |
1649 | case CQ_TX_ERROP_SUBDC_ERR: | |
964cb69b | 1650 | this_cpu_inc(nic->drv_stats->tx_subdesc_err); |
4863dea3 | 1651 | break; |
712c3185 | 1652 | case CQ_TX_ERROP_MAX_SIZE_VIOL: |
964cb69b | 1653 | this_cpu_inc(nic->drv_stats->tx_max_size_exceeded); |
712c3185 | 1654 | break; |
4863dea3 | 1655 | case CQ_TX_ERROP_IMM_SIZE_OFLOW: |
964cb69b | 1656 | this_cpu_inc(nic->drv_stats->tx_imm_size_oflow); |
4863dea3 SG |
1657 | break; |
1658 | case CQ_TX_ERROP_DATA_SEQUENCE_ERR: | |
964cb69b | 1659 | this_cpu_inc(nic->drv_stats->tx_data_seq_err); |
4863dea3 SG |
1660 | break; |
1661 | case CQ_TX_ERROP_MEM_SEQUENCE_ERR: | |
964cb69b | 1662 | this_cpu_inc(nic->drv_stats->tx_mem_seq_err); |
4863dea3 SG |
1663 | break; |
1664 | case CQ_TX_ERROP_LOCK_VIOL: | |
964cb69b | 1665 | this_cpu_inc(nic->drv_stats->tx_lock_viol); |
4863dea3 SG |
1666 | break; |
1667 | case CQ_TX_ERROP_DATA_FAULT: | |
964cb69b | 1668 | this_cpu_inc(nic->drv_stats->tx_data_fault); |
4863dea3 SG |
1669 | break; |
1670 | case CQ_TX_ERROP_TSTMP_CONFLICT: | |
964cb69b | 1671 | this_cpu_inc(nic->drv_stats->tx_tstmp_conflict); |
4863dea3 SG |
1672 | break; |
1673 | case CQ_TX_ERROP_TSTMP_TIMEOUT: | |
964cb69b | 1674 | this_cpu_inc(nic->drv_stats->tx_tstmp_timeout); |
4863dea3 SG |
1675 | break; |
1676 | case CQ_TX_ERROP_MEM_FAULT: | |
964cb69b | 1677 | this_cpu_inc(nic->drv_stats->tx_mem_fault); |
4863dea3 SG |
1678 | break; |
1679 | case CQ_TX_ERROP_CK_OVERLAP: | |
964cb69b | 1680 | this_cpu_inc(nic->drv_stats->tx_csum_overlap); |
4863dea3 SG |
1681 | break; |
1682 | case CQ_TX_ERROP_CK_OFLOW: | |
964cb69b | 1683 | this_cpu_inc(nic->drv_stats->tx_csum_overflow); |
4863dea3 SG |
1684 | break; |
1685 | } | |
1686 | ||
1687 | return 1; | |
1688 | } |