]> git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/drivers/net/bnxt/bnxt_ring.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / seastar / dpdk / drivers / net / bnxt / bnxt_ring.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
3 * All rights reserved.
4 */
5
6 #include <rte_bitmap.h>
7 #include <rte_memzone.h>
8 #include <unistd.h>
9
10 #include "bnxt.h"
11 #include "bnxt_cpr.h"
12 #include "bnxt_hwrm.h"
13 #include "bnxt_ring.h"
14 #include "bnxt_rxq.h"
15 #include "bnxt_rxr.h"
16 #include "bnxt_txq.h"
17 #include "bnxt_txr.h"
18
19 #include "hsi_struct_def_dpdk.h"
20
21 /*
22 * Generic ring handling
23 */
24
25 void bnxt_free_ring(struct bnxt_ring *ring)
26 {
27 if (!ring)
28 return;
29
30 if (ring->vmem_size && *ring->vmem) {
31 memset((char *)*ring->vmem, 0, ring->vmem_size);
32 *ring->vmem = NULL;
33 }
34 ring->mem_zone = NULL;
35 }
36
37 /*
38 * Ring groups
39 */
40
41 int bnxt_init_ring_grps(struct bnxt *bp)
42 {
43 unsigned int i;
44
45 for (i = 0; i < bp->max_ring_grps; i++)
46 memset(&bp->grp_info[i], (uint8_t)HWRM_NA_SIGNATURE,
47 sizeof(struct bnxt_ring_grp_info));
48
49 return 0;
50 }
51
52 /*
53 * Allocates a completion ring with vmem and stats optionally also allocating
54 * a TX and/or RX ring. Passing NULL as tx_ring_info and/or rx_ring_info
55 * to not allocate them.
56 *
57 * Order in the allocation is:
58 * stats - Always non-zero length
59 * cp vmem - Always zero-length, supported for the bnxt_ring abstraction
60 * tx vmem - Only non-zero length if tx_ring_info is not NULL
61 * rx vmem - Only non-zero length if rx_ring_info is not NULL
62 * cp bd ring - Always non-zero length
63 * tx bd ring - Only non-zero length if tx_ring_info is not NULL
64 * rx bd ring - Only non-zero length if rx_ring_info is not NULL
65 */
66 int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
67 struct bnxt_tx_queue *txq,
68 struct bnxt_rx_queue *rxq,
69 struct bnxt_cp_ring_info *cp_ring_info,
70 const char *suffix)
71 {
72 struct bnxt_ring *cp_ring = cp_ring_info->cp_ring_struct;
73 struct bnxt_rx_ring_info *rx_ring_info = rxq ? rxq->rx_ring : NULL;
74 struct bnxt_tx_ring_info *tx_ring_info = txq ? txq->tx_ring : NULL;
75 struct bnxt_ring *tx_ring;
76 struct bnxt_ring *rx_ring;
77 struct rte_pci_device *pdev = bp->pdev;
78 uint64_t rx_offloads = bp->eth_dev->data->dev_conf.rxmode.offloads;
79 const struct rte_memzone *mz = NULL;
80 char mz_name[RTE_MEMZONE_NAMESIZE];
81 rte_iova_t mz_phys_addr;
82 int sz;
83
84 int stats_len = (tx_ring_info || rx_ring_info) ?
85 RTE_CACHE_LINE_ROUNDUP(sizeof(struct hwrm_stat_ctx_query_output) -
86 sizeof (struct hwrm_resp_hdr)) : 0;
87
88 int cp_vmem_start = stats_len;
89 int cp_vmem_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size);
90
91 int tx_vmem_start = cp_vmem_start + cp_vmem_len;
92 int tx_vmem_len =
93 tx_ring_info ? RTE_CACHE_LINE_ROUNDUP(tx_ring_info->
94 tx_ring_struct->vmem_size) : 0;
95
96 int rx_vmem_start = tx_vmem_start + tx_vmem_len;
97 int rx_vmem_len = rx_ring_info ?
98 RTE_CACHE_LINE_ROUNDUP(rx_ring_info->
99 rx_ring_struct->vmem_size) : 0;
100 int ag_vmem_start = 0;
101 int ag_vmem_len = 0;
102 int cp_ring_start = 0;
103
104 ag_vmem_start = rx_vmem_start + rx_vmem_len;
105 ag_vmem_len = rx_ring_info ? RTE_CACHE_LINE_ROUNDUP(
106 rx_ring_info->ag_ring_struct->vmem_size) : 0;
107 cp_ring_start = ag_vmem_start + ag_vmem_len;
108
109 int cp_ring_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->ring_size *
110 sizeof(struct cmpl_base));
111
112 int tx_ring_start = cp_ring_start + cp_ring_len;
113 int tx_ring_len = tx_ring_info ?
114 RTE_CACHE_LINE_ROUNDUP(tx_ring_info->tx_ring_struct->ring_size *
115 sizeof(struct tx_bd_long)) : 0;
116
117 int rx_ring_start = tx_ring_start + tx_ring_len;
118 int rx_ring_len = rx_ring_info ?
119 RTE_CACHE_LINE_ROUNDUP(rx_ring_info->rx_ring_struct->ring_size *
120 sizeof(struct rx_prod_pkt_bd)) : 0;
121
122 int ag_ring_start = rx_ring_start + rx_ring_len;
123 int ag_ring_len = rx_ring_len * AGG_RING_SIZE_FACTOR;
124
125 int ag_bitmap_start = ag_ring_start + ag_ring_len;
126 int ag_bitmap_len = rx_ring_info ?
127 RTE_CACHE_LINE_ROUNDUP(rte_bitmap_get_memory_footprint(
128 rx_ring_info->rx_ring_struct->ring_size *
129 AGG_RING_SIZE_FACTOR)) : 0;
130
131 int tpa_info_start = ag_bitmap_start + ag_bitmap_len;
132 int tpa_info_len = rx_ring_info ?
133 RTE_CACHE_LINE_ROUNDUP(BNXT_TPA_MAX *
134 sizeof(struct bnxt_tpa_info)) : 0;
135
136 int total_alloc_len = tpa_info_start;
137 if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
138 total_alloc_len += tpa_info_len;
139
140 snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
141 "bnxt_%04x:%02x:%02x:%02x-%04x_%s", pdev->addr.domain,
142 pdev->addr.bus, pdev->addr.devid, pdev->addr.function, qidx,
143 suffix);
144 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
145 mz = rte_memzone_lookup(mz_name);
146 if (!mz) {
147 mz = rte_memzone_reserve_aligned(mz_name, total_alloc_len,
148 SOCKET_ID_ANY,
149 RTE_MEMZONE_2MB |
150 RTE_MEMZONE_SIZE_HINT_ONLY |
151 RTE_MEMZONE_IOVA_CONTIG,
152 getpagesize());
153 if (mz == NULL)
154 return -ENOMEM;
155 }
156 memset(mz->addr, 0, mz->len);
157 mz_phys_addr = mz->iova;
158 if ((unsigned long)mz->addr == mz_phys_addr) {
159 PMD_DRV_LOG(WARNING,
160 "Memzone physical address same as virtual.\n");
161 PMD_DRV_LOG(WARNING,
162 "Using rte_mem_virt2iova()\n");
163 for (sz = 0; sz < total_alloc_len; sz += getpagesize())
164 rte_mem_lock_page(((char *)mz->addr) + sz);
165 mz_phys_addr = rte_mem_virt2iova(mz->addr);
166 if (mz_phys_addr == 0) {
167 PMD_DRV_LOG(ERR,
168 "unable to map ring address to physical memory\n");
169 return -ENOMEM;
170 }
171 }
172
173 if (tx_ring_info) {
174 txq->mz = mz;
175 tx_ring = tx_ring_info->tx_ring_struct;
176
177 tx_ring->bd = ((char *)mz->addr + tx_ring_start);
178 tx_ring_info->tx_desc_ring = (struct tx_bd_long *)tx_ring->bd;
179 tx_ring->bd_dma = mz_phys_addr + tx_ring_start;
180 tx_ring_info->tx_desc_mapping = tx_ring->bd_dma;
181 tx_ring->mem_zone = (const void *)mz;
182
183 if (!tx_ring->bd)
184 return -ENOMEM;
185 if (tx_ring->vmem_size) {
186 tx_ring->vmem =
187 (void **)((char *)mz->addr + tx_vmem_start);
188 tx_ring_info->tx_buf_ring =
189 (struct bnxt_sw_tx_bd *)tx_ring->vmem;
190 }
191 }
192
193 if (rx_ring_info) {
194 rxq->mz = mz;
195 rx_ring = rx_ring_info->rx_ring_struct;
196
197 rx_ring->bd = ((char *)mz->addr + rx_ring_start);
198 rx_ring_info->rx_desc_ring =
199 (struct rx_prod_pkt_bd *)rx_ring->bd;
200 rx_ring->bd_dma = mz_phys_addr + rx_ring_start;
201 rx_ring_info->rx_desc_mapping = rx_ring->bd_dma;
202 rx_ring->mem_zone = (const void *)mz;
203
204 if (!rx_ring->bd)
205 return -ENOMEM;
206 if (rx_ring->vmem_size) {
207 rx_ring->vmem =
208 (void **)((char *)mz->addr + rx_vmem_start);
209 rx_ring_info->rx_buf_ring =
210 (struct bnxt_sw_rx_bd *)rx_ring->vmem;
211 }
212
213 rx_ring = rx_ring_info->ag_ring_struct;
214
215 rx_ring->bd = ((char *)mz->addr + ag_ring_start);
216 rx_ring_info->ag_desc_ring =
217 (struct rx_prod_pkt_bd *)rx_ring->bd;
218 rx_ring->bd_dma = mz->iova + ag_ring_start;
219 rx_ring_info->ag_desc_mapping = rx_ring->bd_dma;
220 rx_ring->mem_zone = (const void *)mz;
221
222 if (!rx_ring->bd)
223 return -ENOMEM;
224 if (rx_ring->vmem_size) {
225 rx_ring->vmem =
226 (void **)((char *)mz->addr + ag_vmem_start);
227 rx_ring_info->ag_buf_ring =
228 (struct bnxt_sw_rx_bd *)rx_ring->vmem;
229 }
230
231 rx_ring_info->ag_bitmap =
232 rte_bitmap_init(rx_ring_info->rx_ring_struct->ring_size *
233 AGG_RING_SIZE_FACTOR, (uint8_t *)mz->addr +
234 ag_bitmap_start, ag_bitmap_len);
235
236 /* TPA info */
237 if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
238 rx_ring_info->tpa_info =
239 ((struct bnxt_tpa_info *)((char *)mz->addr +
240 tpa_info_start));
241 }
242
243 cp_ring->bd = ((char *)mz->addr + cp_ring_start);
244 cp_ring->bd_dma = mz_phys_addr + cp_ring_start;
245 cp_ring_info->cp_desc_ring = cp_ring->bd;
246 cp_ring_info->cp_desc_mapping = cp_ring->bd_dma;
247 cp_ring->mem_zone = (const void *)mz;
248
249 if (!cp_ring->bd)
250 return -ENOMEM;
251 if (cp_ring->vmem_size)
252 *cp_ring->vmem = ((char *)mz->addr + stats_len);
253 if (stats_len) {
254 cp_ring_info->hw_stats = mz->addr;
255 cp_ring_info->hw_stats_map = mz_phys_addr;
256 }
257 cp_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
258 return 0;
259 }
260
261 static void bnxt_init_dflt_coal(struct bnxt_coal *coal)
262 {
263 /* Tick values in micro seconds.
264 * 1 coal_buf x bufs_per_record = 1 completion record.
265 */
266 coal->num_cmpl_aggr_int = BNXT_NUM_CMPL_AGGR_INT;
267 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
268 coal->num_cmpl_dma_aggr = BNXT_NUM_CMPL_DMA_AGGR;
269 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
270 coal->num_cmpl_dma_aggr_during_int = BNXT_NUM_CMPL_DMA_AGGR_DURING_INT;
271 coal->int_lat_tmr_max = BNXT_INT_LAT_TMR_MAX;
272 /* min timer set to 1/2 of interrupt timer */
273 coal->int_lat_tmr_min = BNXT_INT_LAT_TMR_MIN;
274 /* buf timer set to 1/4 of interrupt timer */
275 coal->cmpl_aggr_dma_tmr = BNXT_CMPL_AGGR_DMA_TMR;
276 coal->cmpl_aggr_dma_tmr_during_int = BNXT_CMPL_AGGR_DMA_TMR_DURING_INT;
277 }
278
279 int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index)
280 {
281 struct rte_pci_device *pci_dev = bp->pdev;
282 struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
283 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
284 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
285 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
286 struct bnxt_ring *ring = rxr->rx_ring_struct;
287 unsigned int map_idx = queue_index + bp->rx_cp_nr_rings;
288 int rc = 0;
289
290 bp->grp_info[queue_index].fw_stats_ctx = cpr->hw_stats_ctx_id;
291
292 /* Rx cmpl */
293 rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
294 HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
295 queue_index, HWRM_NA_SIGNATURE,
296 HWRM_NA_SIGNATURE);
297 if (rc)
298 goto err_out;
299
300 cpr->cp_doorbell = (char *)pci_dev->mem_resource[2].addr +
301 queue_index * BNXT_DB_SIZE;
302 bp->grp_info[queue_index].cp_fw_ring_id = cp_ring->fw_ring_id;
303 B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
304
305 if (!queue_index) {
306 /*
307 * In order to save completion resources, use the first
308 * completion ring from PF or VF as the default completion ring
309 * for async event and HWRM forward response handling.
310 */
311 bp->def_cp_ring = cpr;
312 rc = bnxt_hwrm_set_async_event_cr(bp);
313 if (rc)
314 goto err_out;
315 }
316 /* Rx ring */
317 rc = bnxt_hwrm_ring_alloc(bp, ring, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
318 queue_index, cpr->hw_stats_ctx_id,
319 cp_ring->fw_ring_id);
320 if (rc)
321 goto err_out;
322
323 rxr->rx_prod = 0;
324 rxr->rx_doorbell = (char *)pci_dev->mem_resource[2].addr +
325 queue_index * BNXT_DB_SIZE;
326 bp->grp_info[queue_index].rx_fw_ring_id = ring->fw_ring_id;
327 B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
328
329 ring = rxr->ag_ring_struct;
330 /* Agg ring */
331 if (!ring)
332 PMD_DRV_LOG(ERR, "Alloc AGG Ring is NULL!\n");
333
334 rc = bnxt_hwrm_ring_alloc(bp, ring, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
335 map_idx, HWRM_NA_SIGNATURE,
336 cp_ring->fw_ring_id);
337 if (rc)
338 goto err_out;
339
340 PMD_DRV_LOG(DEBUG, "Alloc AGG Done!\n");
341 rxr->ag_prod = 0;
342 rxr->ag_doorbell = (char *)pci_dev->mem_resource[2].addr +
343 map_idx * BNXT_DB_SIZE;
344 bp->grp_info[queue_index].ag_fw_ring_id = ring->fw_ring_id;
345 B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
346
347 rxq->rx_buf_use_size = BNXT_MAX_MTU + ETHER_HDR_LEN +
348 ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE);
349
350 if (bp->eth_dev->data->rx_queue_state[queue_index] ==
351 RTE_ETH_QUEUE_STATE_STARTED) {
352 if (bnxt_init_one_rx_ring(rxq)) {
353 RTE_LOG(ERR, PMD,
354 "bnxt_init_one_rx_ring failed!\n");
355 bnxt_rx_queue_release_op(rxq);
356 rc = -ENOMEM;
357 goto err_out;
358 }
359 B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
360 B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
361 }
362 rxq->index = queue_index;
363 PMD_DRV_LOG(INFO,
364 "queue %d, rx_deferred_start %d, state %d!\n",
365 queue_index, rxq->rx_deferred_start,
366 bp->eth_dev->data->rx_queue_state[queue_index]);
367
368 err_out:
369 return rc;
370 }
371 /* ring_grp usage:
372 * [0] = default completion ring
373 * [1 -> +rx_cp_nr_rings] = rx_cp, rx rings
374 * [1+rx_cp_nr_rings + 1 -> +tx_cp_nr_rings] = tx_cp, tx rings
375 */
376 int bnxt_alloc_hwrm_rings(struct bnxt *bp)
377 {
378 struct bnxt_coal coal;
379 unsigned int i;
380 int rc = 0;
381
382 bnxt_init_dflt_coal(&coal);
383
384 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
385 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
386 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
387 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
388 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
389 struct bnxt_ring *ring = rxr->rx_ring_struct;
390 unsigned int map_idx = i + bp->rx_cp_nr_rings;
391
392 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
393
394 /* Rx cmpl */
395 rc = bnxt_hwrm_ring_alloc
396 (bp,
397 cp_ring,
398 HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
399 i,
400 HWRM_NA_SIGNATURE,
401 HWRM_NA_SIGNATURE);
402 if (rc)
403 goto err_out;
404 cpr->cp_doorbell = (char *)bp->doorbell_base + i * 0x80;
405 bp->grp_info[i].cp_fw_ring_id = cp_ring->fw_ring_id;
406 B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
407 bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id);
408
409 if (!i) {
410 /*
411 * In order to save completion resource, use the first
412 * completion ring from PF or VF as the default
413 * completion ring for async event & HWRM
414 * forward response handling.
415 */
416 bp->def_cp_ring = cpr;
417 rc = bnxt_hwrm_set_async_event_cr(bp);
418 if (rc)
419 goto err_out;
420 }
421
422 /* Rx ring */
423 rc = bnxt_hwrm_ring_alloc(bp,
424 ring,
425 HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
426 i,
427 cpr->hw_stats_ctx_id,
428 cp_ring->fw_ring_id);
429 if (rc)
430 goto err_out;
431 rxr->rx_prod = 0;
432 rxr->rx_doorbell = (char *)bp->doorbell_base + i * 0x80;
433 bp->grp_info[i].rx_fw_ring_id = ring->fw_ring_id;
434 B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
435
436 ring = rxr->ag_ring_struct;
437 /* Agg ring */
438 if (ring == NULL) {
439 PMD_DRV_LOG(ERR, "Alloc AGG Ring is NULL!\n");
440 goto err_out;
441 }
442
443 rc = bnxt_hwrm_ring_alloc(bp, ring,
444 HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
445 map_idx, HWRM_NA_SIGNATURE,
446 cp_ring->fw_ring_id);
447 if (rc)
448 goto err_out;
449 PMD_DRV_LOG(DEBUG, "Alloc AGG Done!\n");
450 rxr->ag_prod = 0;
451 rxr->ag_doorbell = (char *)bp->doorbell_base + map_idx * 0x80;
452 bp->grp_info[i].ag_fw_ring_id = ring->fw_ring_id;
453 B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
454
455 rxq->rx_buf_use_size = BNXT_MAX_MTU + ETHER_HDR_LEN +
456 ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE);
457 if (bnxt_init_one_rx_ring(rxq)) {
458 PMD_DRV_LOG(ERR, "bnxt_init_one_rx_ring failed!\n");
459 bnxt_rx_queue_release_op(rxq);
460 return -ENOMEM;
461 }
462 B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
463 B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
464 rxq->index = i;
465 }
466
467 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
468 struct bnxt_tx_queue *txq = bp->tx_queues[i];
469 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
470 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
471 struct bnxt_tx_ring_info *txr = txq->tx_ring;
472 struct bnxt_ring *ring = txr->tx_ring_struct;
473 unsigned int idx = i + bp->rx_cp_nr_rings;
474
475 /* Tx cmpl */
476 rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
477 HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
478 idx, HWRM_NA_SIGNATURE,
479 HWRM_NA_SIGNATURE);
480 if (rc)
481 goto err_out;
482
483 cpr->cp_doorbell = (char *)bp->doorbell_base + idx * 0x80;
484 B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
485
486 /* Tx ring */
487 rc = bnxt_hwrm_ring_alloc(bp, ring,
488 HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
489 idx, cpr->hw_stats_ctx_id,
490 cp_ring->fw_ring_id);
491 if (rc)
492 goto err_out;
493
494 txr->tx_doorbell = (char *)bp->doorbell_base + idx * 0x80;
495 txq->index = idx;
496 bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id);
497 }
498
499 err_out:
500 return rc;
501 }