]> git.proxmox.com Git - ceph.git/blob - ceph/src/dpdk/drivers/net/bnxt/bnxt_ring.c
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / dpdk / drivers / net / bnxt / bnxt_ring.c
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright(c) Broadcom Limited.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Broadcom Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <rte_memzone.h>
35
36 #include "bnxt.h"
37 #include "bnxt_cpr.h"
38 #include "bnxt_hwrm.h"
39 #include "bnxt_ring.h"
40 #include "bnxt_rxq.h"
41 #include "bnxt_rxr.h"
42 #include "bnxt_txq.h"
43 #include "bnxt_txr.h"
44
45 #include "hsi_struct_def_dpdk.h"
46
47 /*
48 * Generic ring handling
49 */
50
51 void bnxt_free_ring(struct bnxt_ring *ring)
52 {
53 if (ring->vmem_size && *ring->vmem) {
54 memset((char *)*ring->vmem, 0, ring->vmem_size);
55 *ring->vmem = NULL;
56 }
57 rte_memzone_free((const struct rte_memzone *)ring->mem_zone);
58 }
59
60 /*
61 * Ring groups
62 */
63
64 void bnxt_init_ring_grps(struct bnxt *bp)
65 {
66 unsigned int i;
67
68 for (i = 0; i < bp->max_ring_grps; i++)
69 memset(&bp->grp_info[i], (uint8_t)HWRM_NA_SIGNATURE,
70 sizeof(struct bnxt_ring_grp_info));
71 }
72
73 /*
74 * Allocates a completion ring with vmem and stats optionally also allocating
75 * a TX and/or RX ring. Passing NULL as tx_ring_info and/or rx_ring_info
76 * to not allocate them.
77 *
78 * Order in the allocation is:
79 * stats - Always non-zero length
80 * cp vmem - Always zero-length, supported for the bnxt_ring abstraction
81 * tx vmem - Only non-zero length if tx_ring_info is not NULL
82 * rx vmem - Only non-zero length if rx_ring_info is not NULL
83 * cp bd ring - Always non-zero length
84 * tx bd ring - Only non-zero length if tx_ring_info is not NULL
85 * rx bd ring - Only non-zero length if rx_ring_info is not NULL
86 */
87 int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
88 struct bnxt_tx_ring_info *tx_ring_info,
89 struct bnxt_rx_ring_info *rx_ring_info,
90 struct bnxt_cp_ring_info *cp_ring_info,
91 const char *suffix)
92 {
93 struct bnxt_ring *cp_ring = cp_ring_info->cp_ring_struct;
94 struct bnxt_ring *tx_ring;
95 struct bnxt_ring *rx_ring;
96 struct rte_pci_device *pdev = bp->pdev;
97 const struct rte_memzone *mz = NULL;
98 char mz_name[RTE_MEMZONE_NAMESIZE];
99
100 int stats_len = (tx_ring_info || rx_ring_info) ?
101 RTE_CACHE_LINE_ROUNDUP(sizeof(struct ctx_hw_stats64)) : 0;
102
103 int cp_vmem_start = stats_len;
104 int cp_vmem_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size);
105
106 int tx_vmem_start = cp_vmem_start + cp_vmem_len;
107 int tx_vmem_len =
108 tx_ring_info ? RTE_CACHE_LINE_ROUNDUP(tx_ring_info->
109 tx_ring_struct->vmem_size) : 0;
110
111 int rx_vmem_start = tx_vmem_start + tx_vmem_len;
112 int rx_vmem_len = rx_ring_info ?
113 RTE_CACHE_LINE_ROUNDUP(rx_ring_info->
114 rx_ring_struct->vmem_size) : 0;
115
116 int cp_ring_start = rx_vmem_start + rx_vmem_len;
117 int cp_ring_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->ring_size *
118 sizeof(struct cmpl_base));
119
120 int tx_ring_start = cp_ring_start + cp_ring_len;
121 int tx_ring_len = tx_ring_info ?
122 RTE_CACHE_LINE_ROUNDUP(tx_ring_info->tx_ring_struct->ring_size *
123 sizeof(struct tx_bd_long)) : 0;
124
125 int rx_ring_start = tx_ring_start + tx_ring_len;
126 int rx_ring_len = rx_ring_info ?
127 RTE_CACHE_LINE_ROUNDUP(rx_ring_info->rx_ring_struct->ring_size *
128 sizeof(struct rx_prod_pkt_bd)) : 0;
129
130 int total_alloc_len = rx_ring_start + rx_ring_len;
131
132 snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
133 "bnxt_%04x:%02x:%02x:%02x-%04x_%s", pdev->addr.domain,
134 pdev->addr.bus, pdev->addr.devid, pdev->addr.function, qidx,
135 suffix);
136 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
137 mz = rte_memzone_lookup(mz_name);
138 if (!mz) {
139 mz = rte_memzone_reserve(mz_name, total_alloc_len,
140 SOCKET_ID_ANY,
141 RTE_MEMZONE_2MB |
142 RTE_MEMZONE_SIZE_HINT_ONLY);
143 if (mz == NULL)
144 return -ENOMEM;
145 }
146 memset(mz->addr, 0, mz->len);
147
148 if (tx_ring_info) {
149 tx_ring = tx_ring_info->tx_ring_struct;
150
151 tx_ring->bd = ((char *)mz->addr + tx_ring_start);
152 tx_ring_info->tx_desc_ring = (struct tx_bd_long *)tx_ring->bd;
153 tx_ring->bd_dma = mz->phys_addr + tx_ring_start;
154 tx_ring_info->tx_desc_mapping = tx_ring->bd_dma;
155 tx_ring->mem_zone = (const void *)mz;
156
157 if (!tx_ring->bd)
158 return -ENOMEM;
159 if (tx_ring->vmem_size) {
160 tx_ring->vmem =
161 (void **)((char *)mz->addr + tx_vmem_start);
162 tx_ring_info->tx_buf_ring =
163 (struct bnxt_sw_tx_bd *)tx_ring->vmem;
164 }
165 }
166
167 if (rx_ring_info) {
168 rx_ring = rx_ring_info->rx_ring_struct;
169
170 rx_ring->bd = ((char *)mz->addr + rx_ring_start);
171 rx_ring_info->rx_desc_ring =
172 (struct rx_prod_pkt_bd *)rx_ring->bd;
173 rx_ring->bd_dma = mz->phys_addr + rx_ring_start;
174 rx_ring_info->rx_desc_mapping = rx_ring->bd_dma;
175 rx_ring->mem_zone = (const void *)mz;
176
177 if (!rx_ring->bd)
178 return -ENOMEM;
179 if (rx_ring->vmem_size) {
180 rx_ring->vmem =
181 (void **)((char *)mz->addr + rx_vmem_start);
182 rx_ring_info->rx_buf_ring =
183 (struct bnxt_sw_rx_bd *)rx_ring->vmem;
184 }
185 }
186
187 cp_ring->bd = ((char *)mz->addr + cp_ring_start);
188 cp_ring->bd_dma = mz->phys_addr + cp_ring_start;
189 cp_ring_info->cp_desc_ring = cp_ring->bd;
190 cp_ring_info->cp_desc_mapping = cp_ring->bd_dma;
191 cp_ring->mem_zone = (const void *)mz;
192
193 if (!cp_ring->bd)
194 return -ENOMEM;
195 if (cp_ring->vmem_size)
196 *cp_ring->vmem = ((char *)mz->addr + stats_len);
197 if (stats_len) {
198 cp_ring_info->hw_stats = mz->addr;
199 cp_ring_info->hw_stats_map = mz->phys_addr;
200 }
201 cp_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
202 return 0;
203 }
204
205 /* ring_grp usage:
206 * [0] = default completion ring
207 * [1 -> +rx_cp_nr_rings] = rx_cp, rx rings
208 * [1+rx_cp_nr_rings + 1 -> +tx_cp_nr_rings] = tx_cp, tx rings
209 */
210 int bnxt_alloc_hwrm_rings(struct bnxt *bp)
211 {
212 unsigned int i;
213 int rc = 0;
214
215 /* Default completion ring */
216 {
217 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
218 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
219
220 rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
221 HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL,
222 0, HWRM_NA_SIGNATURE);
223 if (rc)
224 goto err_out;
225 cpr->cp_doorbell =
226 (char *)bp->eth_dev->pci_dev->mem_resource[2].addr;
227 B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
228 bp->grp_info[0].cp_fw_ring_id = cp_ring->fw_ring_id;
229 }
230
231 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
232 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
233 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
234 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
235 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
236 struct bnxt_ring *ring = rxr->rx_ring_struct;
237 unsigned int idx = i + 1;
238
239 /* Rx cmpl */
240 rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
241 HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL,
242 idx, HWRM_NA_SIGNATURE);
243 if (rc)
244 goto err_out;
245 cpr->cp_doorbell =
246 (char *)bp->eth_dev->pci_dev->mem_resource[2].addr +
247 idx * 0x80;
248 bp->grp_info[idx].cp_fw_ring_id = cp_ring->fw_ring_id;
249 B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
250
251 /* Rx ring */
252 rc = bnxt_hwrm_ring_alloc(bp, ring,
253 HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
254 idx, cpr->hw_stats_ctx_id);
255 if (rc)
256 goto err_out;
257 rxr->rx_prod = 0;
258 rxr->rx_doorbell =
259 (char *)bp->eth_dev->pci_dev->mem_resource[2].addr +
260 idx * 0x80;
261 bp->grp_info[idx].rx_fw_ring_id = ring->fw_ring_id;
262 B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
263 if (bnxt_init_one_rx_ring(rxq)) {
264 RTE_LOG(ERR, PMD, "bnxt_init_one_rx_ring failed!");
265 bnxt_rx_queue_release_op(rxq);
266 return -ENOMEM;
267 }
268 B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
269 }
270
271 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
272 struct bnxt_tx_queue *txq = bp->tx_queues[i];
273 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
274 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
275 struct bnxt_tx_ring_info *txr = txq->tx_ring;
276 struct bnxt_ring *ring = txr->tx_ring_struct;
277 unsigned int idx = 1 + bp->rx_cp_nr_rings + i;
278
279 /* Tx cmpl */
280 rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
281 HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL,
282 idx, HWRM_NA_SIGNATURE);
283 if (rc)
284 goto err_out;
285
286 cpr->cp_doorbell =
287 (char *)bp->eth_dev->pci_dev->mem_resource[2].addr +
288 idx * 0x80;
289 bp->grp_info[idx].cp_fw_ring_id = cp_ring->fw_ring_id;
290 B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
291
292 /* Tx ring */
293 rc = bnxt_hwrm_ring_alloc(bp, ring,
294 HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
295 idx, cpr->hw_stats_ctx_id);
296 if (rc)
297 goto err_out;
298
299 txr->tx_doorbell =
300 (char *)bp->eth_dev->pci_dev->mem_resource[2].addr +
301 idx * 0x80;
302 }
303
304 err_out:
305 return rc;
306 }