]> git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/drivers/net/bnxt/bnxt_hwrm.c
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / seastar / dpdk / drivers / net / bnxt / bnxt_hwrm.c
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright(c) Broadcom Limited.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Broadcom Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <rte_byteorder.h>
35 #include <rte_common.h>
36 #include <rte_cycles.h>
37 #include <rte_malloc.h>
38 #include <rte_memzone.h>
39 #include <rte_version.h>
40
41 #include "bnxt.h"
42 #include "bnxt_cpr.h"
43 #include "bnxt_filter.h"
44 #include "bnxt_hwrm.h"
45 #include "bnxt_rxq.h"
46 #include "bnxt_rxr.h"
47 #include "bnxt_ring.h"
48 #include "bnxt_txq.h"
49 #include "bnxt_txr.h"
50 #include "bnxt_vnic.h"
51 #include "hsi_struct_def_dpdk.h"
52
53 #include <rte_io.h>
54
55 #define HWRM_CMD_TIMEOUT 2000
56
57 /*
58 * HWRM Functions (sent to HWRM)
59 * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
60 * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
61 * command was failed by the ChiMP.
62 */
63
64 static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
65 uint32_t msg_len)
66 {
67 unsigned int i;
68 struct input *req = msg;
69 struct output *resp = bp->hwrm_cmd_resp_addr;
70 uint32_t *data = msg;
71 uint8_t *bar;
72 uint8_t *valid;
73
74 /* Write request msg to hwrm channel */
75 for (i = 0; i < msg_len; i += 4) {
76 bar = (uint8_t *)bp->bar0 + i;
77 rte_write32(*data, bar);
78 data++;
79 }
80
81 /* Zero the rest of the request space */
82 for (; i < bp->max_req_len; i += 4) {
83 bar = (uint8_t *)bp->bar0 + i;
84 rte_write32(0, bar);
85 }
86
87 /* Ring channel doorbell */
88 bar = (uint8_t *)bp->bar0 + 0x100;
89 rte_write32(1, bar);
90
91 /* Poll for the valid bit */
92 for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
93 /* Sanity check on the resp->resp_len */
94 rte_rmb();
95 if (resp->resp_len && resp->resp_len <=
96 bp->max_resp_len) {
97 /* Last byte of resp contains the valid key */
98 valid = (uint8_t *)resp + resp->resp_len - 1;
99 if (*valid == HWRM_RESP_VALID_KEY)
100 break;
101 }
102 rte_delay_us(600);
103 }
104
105 if (i >= HWRM_CMD_TIMEOUT) {
106 RTE_LOG(ERR, PMD, "Error sending msg %x\n",
107 req->req_type);
108 goto err_ret;
109 }
110 return 0;
111
112 err_ret:
113 return -1;
114 }
115
116 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
117 {
118 int rc;
119
120 rte_spinlock_lock(&bp->hwrm_lock);
121 rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
122 rte_spinlock_unlock(&bp->hwrm_lock);
123 return rc;
124 }
125
126 #define HWRM_PREP(req, type, cr, resp) \
127 memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
128 req.req_type = rte_cpu_to_le_16(HWRM_##type); \
129 req.cmpl_ring = rte_cpu_to_le_16(cr); \
130 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
131 req.target_id = rte_cpu_to_le_16(0xffff); \
132 req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
133
134 #define HWRM_CHECK_RESULT \
135 { \
136 if (rc) { \
137 RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
138 __func__, rc); \
139 return rc; \
140 } \
141 if (resp->error_code) { \
142 rc = rte_le_to_cpu_16(resp->error_code); \
143 RTE_LOG(ERR, PMD, "%s error %d\n", __func__, rc); \
144 return rc; \
145 } \
146 }
147
148 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
149 {
150 int rc = 0;
151 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
152 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
153
154 HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
155 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
156 req.mask = 0;
157
158 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
159
160 HWRM_CHECK_RESULT;
161
162 return rc;
163 }
164
165 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
166 {
167 int rc = 0;
168 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
169 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
170 uint32_t mask = 0;
171
172 HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
173 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
174
175 /* FIXME add multicast flag, when multicast adding options is supported
176 * by ethtool.
177 */
178 if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
179 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
180 if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
181 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
182 req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
183 mask);
184
185 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
186
187 HWRM_CHECK_RESULT;
188
189 return rc;
190 }
191
192 int bnxt_hwrm_clear_filter(struct bnxt *bp,
193 struct bnxt_filter_info *filter)
194 {
195 int rc = 0;
196 struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
197 struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
198
199 HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
200
201 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
202
203 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
204
205 HWRM_CHECK_RESULT;
206
207 filter->fw_l2_filter_id = -1;
208
209 return 0;
210 }
211
212 int bnxt_hwrm_set_filter(struct bnxt *bp,
213 struct bnxt_vnic_info *vnic,
214 struct bnxt_filter_info *filter)
215 {
216 int rc = 0;
217 struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
218 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
219 uint32_t enables = 0;
220
221 HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
222
223 req.flags = rte_cpu_to_le_32(filter->flags);
224
225 enables = filter->enables |
226 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
227 req.dst_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
228
229 if (enables &
230 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
231 memcpy(req.l2_addr, filter->l2_addr,
232 ETHER_ADDR_LEN);
233 if (enables &
234 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
235 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
236 ETHER_ADDR_LEN);
237 if (enables &
238 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
239 req.l2_ovlan = filter->l2_ovlan;
240 if (enables &
241 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
242 req.l2_ovlan_mask = filter->l2_ovlan_mask;
243
244 req.enables = rte_cpu_to_le_32(enables);
245
246 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
247
248 HWRM_CHECK_RESULT;
249
250 filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
251
252 return rc;
253 }
254
255 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, void *fwd_cmd)
256 {
257 int rc;
258 struct hwrm_exec_fwd_resp_input req = {.req_type = 0 };
259 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
260
261 HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
262
263 memcpy(req.encap_request, fwd_cmd,
264 sizeof(req.encap_request));
265
266 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
267
268 HWRM_CHECK_RESULT;
269
270 return rc;
271 }
272
273 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
274 {
275 int rc = 0;
276 struct hwrm_func_qcaps_input req = {.req_type = 0 };
277 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
278
279 HWRM_PREP(req, FUNC_QCAPS, -1, resp);
280
281 req.fid = rte_cpu_to_le_16(0xffff);
282
283 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
284
285 HWRM_CHECK_RESULT;
286
287 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
288 if (BNXT_PF(bp)) {
289 struct bnxt_pf_info *pf = &bp->pf;
290
291 pf->fw_fid = rte_le_to_cpu_32(resp->fid);
292 pf->port_id = resp->port_id;
293 memcpy(pf->mac_addr, resp->mac_address, ETHER_ADDR_LEN);
294 pf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
295 pf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
296 pf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
297 pf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
298 pf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
299 pf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
300 pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
301 pf->max_vfs = rte_le_to_cpu_16(resp->max_vfs);
302 } else {
303 struct bnxt_vf_info *vf = &bp->vf;
304
305 vf->fw_fid = rte_le_to_cpu_32(resp->fid);
306 memcpy(vf->mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
307 vf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
308 vf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
309 vf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
310 vf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
311 vf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
312 vf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
313 }
314
315 return rc;
316 }
317
318 int bnxt_hwrm_func_reset(struct bnxt *bp)
319 {
320 int rc = 0;
321 struct hwrm_func_reset_input req = {.req_type = 0 };
322 struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
323
324 HWRM_PREP(req, FUNC_RESET, -1, resp);
325
326 req.enables = rte_cpu_to_le_32(0);
327
328 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
329
330 HWRM_CHECK_RESULT;
331
332 return rc;
333 }
334
335 int bnxt_hwrm_func_driver_register(struct bnxt *bp, uint32_t flags,
336 uint32_t *vf_req_fwd)
337 {
338 int rc;
339 struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
340 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
341
342 if (bp->flags & BNXT_FLAG_REGISTERED)
343 return 0;
344
345 HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
346 req.flags = flags;
347 req.enables = HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
348 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD;
349 req.ver_maj = RTE_VER_YEAR;
350 req.ver_min = RTE_VER_MONTH;
351 req.ver_upd = RTE_VER_MINOR;
352
353 memcpy(req.vf_req_fwd, vf_req_fwd, sizeof(req.vf_req_fwd));
354
355 req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1); /* TODO: Use MACRO */
356
357 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
358
359 HWRM_CHECK_RESULT;
360
361 bp->flags |= BNXT_FLAG_REGISTERED;
362
363 return rc;
364 }
365
366 int bnxt_hwrm_ver_get(struct bnxt *bp)
367 {
368 int rc = 0;
369 struct hwrm_ver_get_input req = {.req_type = 0 };
370 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
371 uint32_t my_version;
372 uint32_t fw_version;
373 uint16_t max_resp_len;
374 char type[RTE_MEMZONE_NAMESIZE];
375
376 HWRM_PREP(req, VER_GET, -1, resp);
377
378 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
379 req.hwrm_intf_min = HWRM_VERSION_MINOR;
380 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
381
382 /*
383 * Hold the lock since we may be adjusting the response pointers.
384 */
385 rte_spinlock_lock(&bp->hwrm_lock);
386 rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
387
388 HWRM_CHECK_RESULT;
389
390 RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
391 resp->hwrm_intf_maj, resp->hwrm_intf_min,
392 resp->hwrm_intf_upd,
393 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
394 RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
395 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
396
397 my_version = HWRM_VERSION_MAJOR << 16;
398 my_version |= HWRM_VERSION_MINOR << 8;
399 my_version |= HWRM_VERSION_UPDATE;
400
401 fw_version = resp->hwrm_intf_maj << 16;
402 fw_version |= resp->hwrm_intf_min << 8;
403 fw_version |= resp->hwrm_intf_upd;
404
405 if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
406 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
407 rc = -EINVAL;
408 goto error;
409 }
410
411 if (my_version != fw_version) {
412 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
413 if (my_version < fw_version) {
414 RTE_LOG(INFO, PMD,
415 "Firmware API version is newer than driver.\n");
416 RTE_LOG(INFO, PMD,
417 "The driver may be missing features.\n");
418 } else {
419 RTE_LOG(INFO, PMD,
420 "Firmware API version is older than driver.\n");
421 RTE_LOG(INFO, PMD,
422 "Not all driver features may be functional.\n");
423 }
424 }
425
426 if (bp->max_req_len > resp->max_req_win_len) {
427 RTE_LOG(ERR, PMD, "Unsupported request length\n");
428 rc = -EINVAL;
429 }
430 bp->max_req_len = resp->max_req_win_len;
431 max_resp_len = resp->max_resp_len;
432 if (bp->max_resp_len != max_resp_len) {
433 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
434 bp->pdev->addr.domain, bp->pdev->addr.bus,
435 bp->pdev->addr.devid, bp->pdev->addr.function);
436
437 rte_free(bp->hwrm_cmd_resp_addr);
438
439 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
440 if (bp->hwrm_cmd_resp_addr == NULL) {
441 rc = -ENOMEM;
442 goto error;
443 }
444 bp->hwrm_cmd_resp_dma_addr =
445 rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
446 bp->max_resp_len = max_resp_len;
447 }
448
449 error:
450 rte_spinlock_unlock(&bp->hwrm_lock);
451 return rc;
452 }
453
454 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
455 {
456 int rc;
457 struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
458 struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
459
460 if (!(bp->flags & BNXT_FLAG_REGISTERED))
461 return 0;
462
463 HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
464 req.flags = flags;
465
466 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
467
468 HWRM_CHECK_RESULT;
469
470 bp->flags &= ~BNXT_FLAG_REGISTERED;
471
472 return rc;
473 }
474
475 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
476 {
477 int rc = 0;
478 struct hwrm_port_phy_cfg_input req = {0};
479 struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
480 uint32_t enables = 0;
481
482 HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
483
484 if (conf->link_up) {
485 req.flags = rte_cpu_to_le_32(conf->phy_flags);
486 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
487 /*
488 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
489 * any auto mode, even "none".
490 */
491 if (!conf->link_speed) {
492 req.auto_mode |= conf->auto_mode;
493 enables = HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
494 req.auto_link_speed_mask = conf->auto_link_speed_mask;
495 enables |=
496 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
497 req.auto_link_speed = bp->link_info.auto_link_speed;
498 enables |=
499 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
500 }
501 req.auto_duplex = conf->duplex;
502 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
503 req.auto_pause = conf->auto_pause;
504 req.force_pause = conf->force_pause;
505 /* Set force_pause if there is no auto or if there is a force */
506 if (req.auto_pause && !req.force_pause)
507 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
508 else
509 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
510
511 req.enables = rte_cpu_to_le_32(enables);
512 } else {
513 req.flags =
514 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DOWN);
515 RTE_LOG(INFO, PMD, "Force Link Down\n");
516 }
517
518 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
519
520 HWRM_CHECK_RESULT;
521
522 return rc;
523 }
524
525 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
526 struct bnxt_link_info *link_info)
527 {
528 int rc = 0;
529 struct hwrm_port_phy_qcfg_input req = {0};
530 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
531
532 HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
533
534 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
535
536 HWRM_CHECK_RESULT;
537
538 link_info->phy_link_status = resp->link;
539 if (link_info->phy_link_status != HWRM_PORT_PHY_QCFG_OUTPUT_LINK_NO_LINK) {
540 link_info->link_up = 1;
541 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
542 } else {
543 link_info->link_up = 0;
544 link_info->link_speed = 0;
545 }
546 link_info->duplex = resp->duplex;
547 link_info->pause = resp->pause;
548 link_info->auto_pause = resp->auto_pause;
549 link_info->force_pause = resp->force_pause;
550 link_info->auto_mode = resp->auto_mode;
551
552 link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
553 link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
554 link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
555 link_info->phy_ver[0] = resp->phy_maj;
556 link_info->phy_ver[1] = resp->phy_min;
557 link_info->phy_ver[2] = resp->phy_bld;
558
559 return rc;
560 }
561
562 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
563 {
564 int rc = 0;
565 struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
566 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
567
568 HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
569
570 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
571
572 HWRM_CHECK_RESULT;
573
574 #define GET_QUEUE_INFO(x) \
575 bp->cos_queue[x].id = resp->queue_id##x; \
576 bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
577
578 GET_QUEUE_INFO(0);
579 GET_QUEUE_INFO(1);
580 GET_QUEUE_INFO(2);
581 GET_QUEUE_INFO(3);
582 GET_QUEUE_INFO(4);
583 GET_QUEUE_INFO(5);
584 GET_QUEUE_INFO(6);
585 GET_QUEUE_INFO(7);
586
587 return rc;
588 }
589
590 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
591 struct bnxt_ring *ring,
592 uint32_t ring_type, uint32_t map_index,
593 uint32_t stats_ctx_id)
594 {
595 int rc = 0;
596 struct hwrm_ring_alloc_input req = {.req_type = 0 };
597 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
598
599 HWRM_PREP(req, RING_ALLOC, -1, resp);
600
601 req.enables = rte_cpu_to_le_32(0);
602
603 req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
604 req.fbo = rte_cpu_to_le_32(0);
605 /* Association of ring index with doorbell index */
606 req.logical_id = rte_cpu_to_le_16(map_index);
607
608 switch (ring_type) {
609 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
610 req.queue_id = bp->cos_queue[0].id;
611 /* FALLTHROUGH */
612 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
613 req.ring_type = ring_type;
614 req.cmpl_ring_id =
615 rte_cpu_to_le_16(bp->grp_info[map_index].cp_fw_ring_id);
616 req.length = rte_cpu_to_le_32(ring->ring_size);
617 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
618 req.enables = rte_cpu_to_le_32(rte_le_to_cpu_32(req.enables) |
619 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
620 break;
621 case HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL:
622 req.ring_type = ring_type;
623 /*
624 * TODO: Some HWRM versions crash with
625 * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
626 */
627 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
628 req.length = rte_cpu_to_le_32(ring->ring_size);
629 break;
630 default:
631 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
632 ring_type);
633 return -1;
634 }
635
636 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
637
638 if (rc || resp->error_code) {
639 if (rc == 0 && resp->error_code)
640 rc = rte_le_to_cpu_16(resp->error_code);
641 switch (ring_type) {
642 case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
643 RTE_LOG(ERR, PMD,
644 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
645 return rc;
646 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
647 RTE_LOG(ERR, PMD,
648 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
649 return rc;
650 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
651 RTE_LOG(ERR, PMD,
652 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
653 return rc;
654 default:
655 RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
656 return rc;
657 }
658 }
659
660 ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
661 return rc;
662 }
663
664 int bnxt_hwrm_ring_free(struct bnxt *bp,
665 struct bnxt_ring *ring, uint32_t ring_type)
666 {
667 int rc;
668 struct hwrm_ring_free_input req = {.req_type = 0 };
669 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
670
671 HWRM_PREP(req, RING_FREE, -1, resp);
672
673 req.ring_type = ring_type;
674 req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
675
676 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
677
678 if (rc || resp->error_code) {
679 if (rc == 0 && resp->error_code)
680 rc = rte_le_to_cpu_16(resp->error_code);
681
682 switch (ring_type) {
683 case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
684 RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
685 rc);
686 return rc;
687 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
688 RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
689 rc);
690 return rc;
691 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
692 RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
693 rc);
694 return rc;
695 default:
696 RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
697 return rc;
698 }
699 }
700 return 0;
701 }
702
703 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
704 {
705 int rc = 0;
706 struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
707 struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
708
709 HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
710
711 req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
712 req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
713 req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
714 req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
715
716 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
717
718 HWRM_CHECK_RESULT;
719
720 bp->grp_info[idx].fw_grp_id =
721 rte_le_to_cpu_16(resp->ring_group_id);
722
723 return rc;
724 }
725
726 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
727 {
728 int rc;
729 struct hwrm_ring_grp_free_input req = {.req_type = 0 };
730 struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
731
732 HWRM_PREP(req, RING_GRP_FREE, -1, resp);
733
734 req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
735
736 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
737
738 HWRM_CHECK_RESULT;
739
740 bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
741 return rc;
742 }
743
744 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
745 {
746 int rc = 0;
747 struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
748 struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
749
750 HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
751
752 if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
753 return rc;
754
755 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
756 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
757
758 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
759
760 HWRM_CHECK_RESULT;
761
762 return rc;
763 }
764
765 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
766 struct bnxt_cp_ring_info *cpr, unsigned int idx)
767 {
768 int rc;
769 struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
770 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
771
772 HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
773
774 req.update_period_ms = rte_cpu_to_le_32(1000);
775
776 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
777 req.stats_dma_addr =
778 rte_cpu_to_le_64(cpr->hw_stats_map);
779
780 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
781
782 HWRM_CHECK_RESULT;
783
784 cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
785 bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
786
787 return rc;
788 }
789
790 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
791 struct bnxt_cp_ring_info *cpr, unsigned int idx)
792 {
793 int rc;
794 struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
795 struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
796
797 HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
798
799 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
800 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
801
802 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
803
804 HWRM_CHECK_RESULT;
805
806 cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
807 bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
808
809 return rc;
810 }
811
812 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
813 {
814 int rc = 0, i, j;
815 struct hwrm_vnic_alloc_input req = { 0 };
816 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
817
818 /* map ring groups to this vnic */
819 for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++) {
820 if (bp->grp_info[i].fw_grp_id == (uint16_t)HWRM_NA_SIGNATURE) {
821 RTE_LOG(ERR, PMD,
822 "Not enough ring groups avail:%x req:%x\n", j,
823 (vnic->end_grp_id - vnic->start_grp_id) + 1);
824 break;
825 }
826 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
827 }
828
829 vnic->fw_rss_cos_lb_ctx = (uint16_t)HWRM_NA_SIGNATURE;
830 vnic->ctx_is_rss_cos_lb = HW_CONTEXT_NONE;
831
832 HWRM_PREP(req, VNIC_ALLOC, -1, resp);
833
834 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
835
836 HWRM_CHECK_RESULT;
837
838 vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
839 return rc;
840 }
841
842 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
843 {
844 int rc = 0;
845 struct hwrm_vnic_cfg_input req = {.req_type = 0 };
846 struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
847
848 HWRM_PREP(req, VNIC_CFG, -1, resp);
849
850 /* Only RSS support for now TBD: COS & LB */
851 req.enables =
852 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
853 HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE |
854 HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
855 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
856 req.dflt_ring_grp =
857 rte_cpu_to_le_16(bp->grp_info[vnic->start_grp_id].fw_grp_id);
858 req.rss_rule = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
859 req.cos_rule = rte_cpu_to_le_16(0xffff);
860 req.lb_rule = rte_cpu_to_le_16(0xffff);
861 req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
862 ETHER_CRC_LEN + VLAN_TAG_SIZE);
863 if (vnic->func_default)
864 req.flags = 1;
865 if (vnic->vlan_strip)
866 req.flags |=
867 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
868
869 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
870
871 HWRM_CHECK_RESULT;
872
873 return rc;
874 }
875
876 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
877 {
878 int rc = 0;
879 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
880 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
881 bp->hwrm_cmd_resp_addr;
882
883 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
884
885 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
886
887 HWRM_CHECK_RESULT;
888
889 vnic->fw_rss_cos_lb_ctx = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
890
891 return rc;
892 }
893
894 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
895 {
896 int rc = 0;
897 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
898 struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
899 bp->hwrm_cmd_resp_addr;
900
901 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
902
903 req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
904
905 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
906
907 HWRM_CHECK_RESULT;
908
909 vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
910
911 return rc;
912 }
913
914 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
915 {
916 int rc = 0;
917 struct hwrm_vnic_free_input req = {.req_type = 0 };
918 struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
919
920 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
921 return rc;
922
923 HWRM_PREP(req, VNIC_FREE, -1, resp);
924
925 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
926
927 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
928
929 HWRM_CHECK_RESULT;
930
931 vnic->fw_vnic_id = INVALID_HW_RING_ID;
932 return rc;
933 }
934
935 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
936 struct bnxt_vnic_info *vnic)
937 {
938 int rc = 0;
939 struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
940 struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
941
942 HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
943
944 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
945
946 req.ring_grp_tbl_addr =
947 rte_cpu_to_le_64(vnic->rss_table_dma_addr);
948 req.hash_key_tbl_addr =
949 rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
950 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
951
952 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
953
954 HWRM_CHECK_RESULT;
955
956 return rc;
957 }
958
959 /*
960 * HWRM utility functions
961 */
962
963 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
964 {
965 unsigned int i;
966 int rc = 0;
967
968 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
969 struct bnxt_tx_queue *txq;
970 struct bnxt_rx_queue *rxq;
971 struct bnxt_cp_ring_info *cpr;
972
973 if (i >= bp->rx_cp_nr_rings) {
974 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
975 cpr = txq->cp_ring;
976 } else {
977 rxq = bp->rx_queues[i];
978 cpr = rxq->cp_ring;
979 }
980
981 rc = bnxt_hwrm_stat_clear(bp, cpr);
982 if (rc)
983 return rc;
984 }
985 return 0;
986 }
987
988 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
989 {
990 int rc;
991 unsigned int i;
992 struct bnxt_cp_ring_info *cpr;
993
994 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
995 unsigned int idx = i + 1;
996
997 if (i >= bp->rx_cp_nr_rings)
998 cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
999 else
1000 cpr = bp->rx_queues[i]->cp_ring;
1001 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1002 rc = bnxt_hwrm_stat_ctx_free(bp, cpr, idx);
1003 if (rc)
1004 return rc;
1005 }
1006 }
1007 return 0;
1008 }
1009
1010 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1011 {
1012 unsigned int i;
1013 int rc = 0;
1014
1015 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1016 struct bnxt_tx_queue *txq;
1017 struct bnxt_rx_queue *rxq;
1018 struct bnxt_cp_ring_info *cpr;
1019 unsigned int idx = i + 1;
1020
1021 if (i >= bp->rx_cp_nr_rings) {
1022 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1023 cpr = txq->cp_ring;
1024 } else {
1025 rxq = bp->rx_queues[i];
1026 cpr = rxq->cp_ring;
1027 }
1028
1029 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, idx);
1030
1031 if (rc)
1032 return rc;
1033 }
1034 return rc;
1035 }
1036
1037 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1038 {
1039 uint16_t i;
1040 uint32_t rc = 0;
1041
1042 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1043 unsigned int idx = i + 1;
1044
1045 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
1046 RTE_LOG(ERR, PMD,
1047 "Attempt to free invalid ring group %d\n",
1048 idx);
1049 continue;
1050 }
1051
1052 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1053
1054 if (rc)
1055 return rc;
1056 }
1057 return rc;
1058 }
1059
1060 static void bnxt_free_cp_ring(struct bnxt *bp,
1061 struct bnxt_cp_ring_info *cpr, unsigned int idx)
1062 {
1063 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1064
1065 bnxt_hwrm_ring_free(bp, cp_ring,
1066 HWRM_RING_FREE_INPUT_RING_TYPE_CMPL);
1067 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1068 bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1069 memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1070 sizeof(*cpr->cp_desc_ring));
1071 cpr->cp_raw_cons = 0;
1072 }
1073
1074 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1075 {
1076 unsigned int i;
1077 int rc = 0;
1078
1079 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1080 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1081 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1082 struct bnxt_ring *ring = txr->tx_ring_struct;
1083 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1084 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1085
1086 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1087 bnxt_hwrm_ring_free(bp, ring,
1088 HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1089 ring->fw_ring_id = INVALID_HW_RING_ID;
1090 memset(txr->tx_desc_ring, 0,
1091 txr->tx_ring_struct->ring_size *
1092 sizeof(*txr->tx_desc_ring));
1093 memset(txr->tx_buf_ring, 0,
1094 txr->tx_ring_struct->ring_size *
1095 sizeof(*txr->tx_buf_ring));
1096 txr->tx_prod = 0;
1097 txr->tx_cons = 0;
1098 }
1099 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1100 bnxt_free_cp_ring(bp, cpr, idx);
1101 }
1102
1103 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1104 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1105 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1106 struct bnxt_ring *ring = rxr->rx_ring_struct;
1107 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1108 unsigned int idx = i + 1;
1109
1110 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1111 bnxt_hwrm_ring_free(bp, ring,
1112 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1113 ring->fw_ring_id = INVALID_HW_RING_ID;
1114 bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1115 memset(rxr->rx_desc_ring, 0,
1116 rxr->rx_ring_struct->ring_size *
1117 sizeof(*rxr->rx_desc_ring));
1118 memset(rxr->rx_buf_ring, 0,
1119 rxr->rx_ring_struct->ring_size *
1120 sizeof(*rxr->rx_buf_ring));
1121 rxr->rx_prod = 0;
1122 }
1123 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1124 bnxt_free_cp_ring(bp, cpr, idx);
1125 }
1126
1127 /* Default completion ring */
1128 {
1129 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1130
1131 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1132 bnxt_free_cp_ring(bp, cpr, 0);
1133 }
1134
1135 return rc;
1136 }
1137
1138 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1139 {
1140 uint16_t i;
1141 uint32_t rc = 0;
1142
1143 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1144 unsigned int idx = i + 1;
1145
1146 if (bp->grp_info[idx].cp_fw_ring_id == INVALID_HW_RING_ID ||
1147 bp->grp_info[idx].rx_fw_ring_id == INVALID_HW_RING_ID)
1148 continue;
1149
1150 rc = bnxt_hwrm_ring_grp_alloc(bp, idx);
1151
1152 if (rc)
1153 return rc;
1154 }
1155 return rc;
1156 }
1157
1158 void bnxt_free_hwrm_resources(struct bnxt *bp)
1159 {
1160 /* Release memzone */
1161 rte_free(bp->hwrm_cmd_resp_addr);
1162 bp->hwrm_cmd_resp_addr = NULL;
1163 bp->hwrm_cmd_resp_dma_addr = 0;
1164 }
1165
1166 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1167 {
1168 struct rte_pci_device *pdev = bp->pdev;
1169 char type[RTE_MEMZONE_NAMESIZE];
1170
1171 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1172 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1173 bp->max_req_len = HWRM_MAX_REQ_LEN;
1174 bp->max_resp_len = HWRM_MAX_RESP_LEN;
1175 bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1176 if (bp->hwrm_cmd_resp_addr == NULL)
1177 return -ENOMEM;
1178 bp->hwrm_cmd_resp_dma_addr =
1179 rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
1180 rte_spinlock_init(&bp->hwrm_lock);
1181
1182 return 0;
1183 }
1184
1185 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1186 {
1187 struct bnxt_filter_info *filter;
1188 int rc = 0;
1189
1190 STAILQ_FOREACH(filter, &vnic->filter, next) {
1191 rc = bnxt_hwrm_clear_filter(bp, filter);
1192 if (rc)
1193 break;
1194 }
1195 return rc;
1196 }
1197
1198 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1199 {
1200 struct bnxt_filter_info *filter;
1201 int rc = 0;
1202
1203 STAILQ_FOREACH(filter, &vnic->filter, next) {
1204 rc = bnxt_hwrm_set_filter(bp, vnic, filter);
1205 if (rc)
1206 break;
1207 }
1208 return rc;
1209 }
1210
1211 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1212 {
1213 struct bnxt_vnic_info *vnic;
1214 unsigned int i;
1215
1216 if (bp->vnic_info == NULL)
1217 return;
1218
1219 vnic = &bp->vnic_info[0];
1220 bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
1221
1222 /* VNIC resources */
1223 for (i = 0; i < bp->nr_vnics; i++) {
1224 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1225
1226 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1227
1228 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1229 bnxt_hwrm_vnic_free(bp, vnic);
1230 }
1231 /* Ring resources */
1232 bnxt_free_all_hwrm_rings(bp);
1233 bnxt_free_all_hwrm_ring_grps(bp);
1234 bnxt_free_all_hwrm_stat_ctxs(bp);
1235 }
1236
1237 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1238 {
1239 uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1240
1241 if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1242 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1243
1244 switch (conf_link_speed) {
1245 case ETH_LINK_SPEED_10M_HD:
1246 case ETH_LINK_SPEED_100M_HD:
1247 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1248 }
1249 return hw_link_duplex;
1250 }
1251
1252 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1253 {
1254 uint16_t eth_link_speed = 0;
1255
1256 if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1257 return ETH_LINK_SPEED_AUTONEG;
1258
1259 switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1260 case ETH_LINK_SPEED_100M:
1261 case ETH_LINK_SPEED_100M_HD:
1262 eth_link_speed =
1263 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1264 break;
1265 case ETH_LINK_SPEED_1G:
1266 eth_link_speed =
1267 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1268 break;
1269 case ETH_LINK_SPEED_2_5G:
1270 eth_link_speed =
1271 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1272 break;
1273 case ETH_LINK_SPEED_10G:
1274 eth_link_speed =
1275 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
1276 break;
1277 case ETH_LINK_SPEED_20G:
1278 eth_link_speed =
1279 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
1280 break;
1281 case ETH_LINK_SPEED_25G:
1282 eth_link_speed =
1283 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
1284 break;
1285 case ETH_LINK_SPEED_40G:
1286 eth_link_speed =
1287 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
1288 break;
1289 case ETH_LINK_SPEED_50G:
1290 eth_link_speed =
1291 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
1292 break;
1293 default:
1294 RTE_LOG(ERR, PMD,
1295 "Unsupported link speed %d; default to AUTO\n",
1296 conf_link_speed);
1297 break;
1298 }
1299 return eth_link_speed;
1300 }
1301
1302 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1303 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1304 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1305 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1306
1307 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1308 {
1309 uint32_t one_speed;
1310
1311 if (link_speed == ETH_LINK_SPEED_AUTONEG)
1312 return 0;
1313
1314 if (link_speed & ETH_LINK_SPEED_FIXED) {
1315 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1316
1317 if (one_speed & (one_speed - 1)) {
1318 RTE_LOG(ERR, PMD,
1319 "Invalid advertised speeds (%u) for port %u\n",
1320 link_speed, port_id);
1321 return -EINVAL;
1322 }
1323 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1324 RTE_LOG(ERR, PMD,
1325 "Unsupported advertised speed (%u) for port %u\n",
1326 link_speed, port_id);
1327 return -EINVAL;
1328 }
1329 } else {
1330 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1331 RTE_LOG(ERR, PMD,
1332 "Unsupported advertised speeds (%u) for port %u\n",
1333 link_speed, port_id);
1334 return -EINVAL;
1335 }
1336 }
1337 return 0;
1338 }
1339
1340 static uint16_t bnxt_parse_eth_link_speed_mask(uint32_t link_speed)
1341 {
1342 uint16_t ret = 0;
1343
1344 if (link_speed == ETH_LINK_SPEED_AUTONEG)
1345 link_speed = BNXT_SUPPORTED_SPEEDS;
1346
1347 if (link_speed & ETH_LINK_SPEED_100M)
1348 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1349 if (link_speed & ETH_LINK_SPEED_100M_HD)
1350 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1351 if (link_speed & ETH_LINK_SPEED_1G)
1352 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1353 if (link_speed & ETH_LINK_SPEED_2_5G)
1354 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1355 if (link_speed & ETH_LINK_SPEED_10G)
1356 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1357 if (link_speed & ETH_LINK_SPEED_20G)
1358 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1359 if (link_speed & ETH_LINK_SPEED_25G)
1360 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1361 if (link_speed & ETH_LINK_SPEED_40G)
1362 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1363 if (link_speed & ETH_LINK_SPEED_50G)
1364 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1365 return ret;
1366 }
1367
1368 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
1369 {
1370 uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
1371
1372 switch (hw_link_speed) {
1373 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1374 eth_link_speed = ETH_SPEED_NUM_100M;
1375 break;
1376 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1377 eth_link_speed = ETH_SPEED_NUM_1G;
1378 break;
1379 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1380 eth_link_speed = ETH_SPEED_NUM_2_5G;
1381 break;
1382 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1383 eth_link_speed = ETH_SPEED_NUM_10G;
1384 break;
1385 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1386 eth_link_speed = ETH_SPEED_NUM_20G;
1387 break;
1388 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1389 eth_link_speed = ETH_SPEED_NUM_25G;
1390 break;
1391 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1392 eth_link_speed = ETH_SPEED_NUM_40G;
1393 break;
1394 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1395 eth_link_speed = ETH_SPEED_NUM_50G;
1396 break;
1397 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1398 default:
1399 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
1400 hw_link_speed);
1401 break;
1402 }
1403 return eth_link_speed;
1404 }
1405
1406 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
1407 {
1408 uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1409
1410 switch (hw_link_duplex) {
1411 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
1412 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
1413 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1414 break;
1415 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
1416 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
1417 break;
1418 default:
1419 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
1420 hw_link_duplex);
1421 break;
1422 }
1423 return eth_link_duplex;
1424 }
1425
1426 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
1427 {
1428 int rc = 0;
1429 struct bnxt_link_info *link_info = &bp->link_info;
1430
1431 rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
1432 if (rc) {
1433 RTE_LOG(ERR, PMD,
1434 "Get link config failed with rc %d\n", rc);
1435 goto exit;
1436 }
1437 if (link_info->link_up)
1438 link->link_speed =
1439 bnxt_parse_hw_link_speed(link_info->link_speed);
1440 else
1441 link->link_speed = ETH_LINK_SPEED_10M;
1442 link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
1443 link->link_status = link_info->link_up;
1444 link->link_autoneg = link_info->auto_mode ==
1445 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
1446 ETH_LINK_SPEED_FIXED : ETH_LINK_SPEED_AUTONEG;
1447 exit:
1448 return rc;
1449 }
1450
1451 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
1452 {
1453 int rc = 0;
1454 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1455 struct bnxt_link_info link_req;
1456 uint16_t speed;
1457
1458 if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
1459 return 0;
1460
1461 rc = bnxt_valid_link_speed(dev_conf->link_speeds,
1462 bp->eth_dev->data->port_id);
1463 if (rc)
1464 goto error;
1465
1466 memset(&link_req, 0, sizeof(link_req));
1467 link_req.link_up = link_up;
1468 if (!link_up)
1469 goto port_phy_cfg;
1470
1471 speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
1472 link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
1473 if (speed == 0) {
1474 link_req.phy_flags |=
1475 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
1476 link_req.auto_mode =
1477 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1478 link_req.auto_link_speed_mask =
1479 bnxt_parse_eth_link_speed_mask(dev_conf->link_speeds);
1480 } else {
1481 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
1482 link_req.link_speed = speed;
1483 RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed);
1484 }
1485 link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
1486 link_req.auto_pause = bp->link_info.auto_pause;
1487 link_req.force_pause = bp->link_info.force_pause;
1488
1489 port_phy_cfg:
1490 rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
1491 if (rc) {
1492 RTE_LOG(ERR, PMD,
1493 "Set link config failed with rc %d\n", rc);
1494 }
1495
1496 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
1497 error:
1498 return rc;
1499 }
1500
1501 /* JIRA 22088 */
1502 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
1503 {
1504 struct hwrm_func_qcfg_input req = {0};
1505 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1506 int rc = 0;
1507
1508 HWRM_PREP(req, FUNC_QCFG, -1, resp);
1509 req.fid = rte_cpu_to_le_16(0xffff);
1510
1511 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1512
1513 HWRM_CHECK_RESULT;
1514
1515 if (BNXT_VF(bp)) {
1516 struct bnxt_vf_info *vf = &bp->vf;
1517
1518 /* Hard Coded.. 0xfff VLAN ID mask */
1519 vf->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
1520 }
1521
1522 switch (resp->port_partition_type) {
1523 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
1524 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
1525 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
1526 bp->port_partition_type = resp->port_partition_type;
1527 break;
1528 default:
1529 bp->port_partition_type = 0;
1530 break;
1531 }
1532
1533 return rc;
1534 }