]> git.proxmox.com Git - ceph.git/blob - ceph/src/dpdk/drivers/net/bnxt/bnxt_hwrm.c
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / dpdk / drivers / net / bnxt / bnxt_hwrm.c
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright(c) Broadcom Limited.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Broadcom Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <rte_byteorder.h>
35 #include <rte_common.h>
36 #include <rte_cycles.h>
37 #include <rte_malloc.h>
38 #include <rte_memzone.h>
39 #include <rte_version.h>
40
41 #include "bnxt.h"
42 #include "bnxt_cpr.h"
43 #include "bnxt_filter.h"
44 #include "bnxt_hwrm.h"
45 #include "bnxt_rxq.h"
46 #include "bnxt_rxr.h"
47 #include "bnxt_ring.h"
48 #include "bnxt_txq.h"
49 #include "bnxt_txr.h"
50 #include "bnxt_vnic.h"
51 #include "hsi_struct_def_dpdk.h"
52
53 #define HWRM_CMD_TIMEOUT 2000
54
55 /*
56 * HWRM Functions (sent to HWRM)
57 * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
58 * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
59 * command was failed by the ChiMP.
60 */
61
62 static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
63 uint32_t msg_len)
64 {
65 unsigned int i;
66 struct input *req = msg;
67 struct output *resp = bp->hwrm_cmd_resp_addr;
68 uint32_t *data = msg;
69 uint8_t *bar;
70 uint8_t *valid;
71
72 /* Write request msg to hwrm channel */
73 for (i = 0; i < msg_len; i += 4) {
74 bar = (uint8_t *)bp->bar0 + i;
75 *(volatile uint32_t *)bar = *data;
76 data++;
77 }
78
79 /* Zero the rest of the request space */
80 for (; i < bp->max_req_len; i += 4) {
81 bar = (uint8_t *)bp->bar0 + i;
82 *(volatile uint32_t *)bar = 0;
83 }
84
85 /* Ring channel doorbell */
86 bar = (uint8_t *)bp->bar0 + 0x100;
87 *(volatile uint32_t *)bar = 1;
88
89 /* Poll for the valid bit */
90 for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
91 /* Sanity check on the resp->resp_len */
92 rte_rmb();
93 if (resp->resp_len && resp->resp_len <=
94 bp->max_resp_len) {
95 /* Last byte of resp contains the valid key */
96 valid = (uint8_t *)resp + resp->resp_len - 1;
97 if (*valid == HWRM_RESP_VALID_KEY)
98 break;
99 }
100 rte_delay_us(600);
101 }
102
103 if (i >= HWRM_CMD_TIMEOUT) {
104 RTE_LOG(ERR, PMD, "Error sending msg %x\n",
105 req->req_type);
106 goto err_ret;
107 }
108 return 0;
109
110 err_ret:
111 return -1;
112 }
113
114 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
115 {
116 int rc;
117
118 rte_spinlock_lock(&bp->hwrm_lock);
119 rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
120 rte_spinlock_unlock(&bp->hwrm_lock);
121 return rc;
122 }
123
124 #define HWRM_PREP(req, type, cr, resp) \
125 memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
126 req.req_type = rte_cpu_to_le_16(HWRM_##type); \
127 req.cmpl_ring = rte_cpu_to_le_16(cr); \
128 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
129 req.target_id = rte_cpu_to_le_16(0xffff); \
130 req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
131
132 #define HWRM_CHECK_RESULT \
133 { \
134 if (rc) { \
135 RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
136 __func__, rc); \
137 return rc; \
138 } \
139 if (resp->error_code) { \
140 rc = rte_le_to_cpu_16(resp->error_code); \
141 RTE_LOG(ERR, PMD, "%s error %d\n", __func__, rc); \
142 return rc; \
143 } \
144 }
145
146 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
147 {
148 int rc = 0;
149 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
150 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
151
152 HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
153 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
154 req.mask = 0;
155
156 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
157
158 HWRM_CHECK_RESULT;
159
160 return rc;
161 }
162
163 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
164 {
165 int rc = 0;
166 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
167 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
168 uint32_t mask = 0;
169
170 HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
171 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
172
173 /* FIXME add multicast flag, when multicast adding options is supported
174 * by ethtool.
175 */
176 if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
177 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
178 if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
179 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
180 req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
181 mask);
182
183 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
184
185 HWRM_CHECK_RESULT;
186
187 return rc;
188 }
189
190 int bnxt_hwrm_clear_filter(struct bnxt *bp,
191 struct bnxt_filter_info *filter)
192 {
193 int rc = 0;
194 struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
195 struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
196
197 HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
198
199 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
200
201 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
202
203 HWRM_CHECK_RESULT;
204
205 filter->fw_l2_filter_id = -1;
206
207 return 0;
208 }
209
210 int bnxt_hwrm_set_filter(struct bnxt *bp,
211 struct bnxt_vnic_info *vnic,
212 struct bnxt_filter_info *filter)
213 {
214 int rc = 0;
215 struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
216 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
217 uint32_t enables = 0;
218
219 HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
220
221 req.flags = rte_cpu_to_le_32(filter->flags);
222
223 enables = filter->enables |
224 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
225 req.dst_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
226
227 if (enables &
228 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
229 memcpy(req.l2_addr, filter->l2_addr,
230 ETHER_ADDR_LEN);
231 if (enables &
232 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
233 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
234 ETHER_ADDR_LEN);
235 if (enables &
236 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
237 req.l2_ovlan = filter->l2_ovlan;
238 if (enables &
239 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
240 req.l2_ovlan_mask = filter->l2_ovlan_mask;
241
242 req.enables = rte_cpu_to_le_32(enables);
243
244 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
245
246 HWRM_CHECK_RESULT;
247
248 filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
249
250 return rc;
251 }
252
253 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, void *fwd_cmd)
254 {
255 int rc;
256 struct hwrm_exec_fwd_resp_input req = {.req_type = 0 };
257 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
258
259 HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
260
261 memcpy(req.encap_request, fwd_cmd,
262 sizeof(req.encap_request));
263
264 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
265
266 HWRM_CHECK_RESULT;
267
268 return rc;
269 }
270
271 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
272 {
273 int rc = 0;
274 struct hwrm_func_qcaps_input req = {.req_type = 0 };
275 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
276
277 HWRM_PREP(req, FUNC_QCAPS, -1, resp);
278
279 req.fid = rte_cpu_to_le_16(0xffff);
280
281 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
282
283 HWRM_CHECK_RESULT;
284
285 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
286 if (BNXT_PF(bp)) {
287 struct bnxt_pf_info *pf = &bp->pf;
288
289 pf->fw_fid = rte_le_to_cpu_32(resp->fid);
290 pf->port_id = resp->port_id;
291 memcpy(pf->mac_addr, resp->mac_address, ETHER_ADDR_LEN);
292 pf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
293 pf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
294 pf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
295 pf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
296 pf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
297 pf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
298 pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
299 pf->max_vfs = rte_le_to_cpu_16(resp->max_vfs);
300 } else {
301 struct bnxt_vf_info *vf = &bp->vf;
302
303 vf->fw_fid = rte_le_to_cpu_32(resp->fid);
304 memcpy(vf->mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
305 vf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
306 vf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
307 vf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
308 vf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
309 vf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
310 vf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
311 }
312
313 return rc;
314 }
315
316 int bnxt_hwrm_func_reset(struct bnxt *bp)
317 {
318 int rc = 0;
319 struct hwrm_func_reset_input req = {.req_type = 0 };
320 struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
321
322 HWRM_PREP(req, FUNC_RESET, -1, resp);
323
324 req.enables = rte_cpu_to_le_32(0);
325
326 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
327
328 HWRM_CHECK_RESULT;
329
330 return rc;
331 }
332
333 int bnxt_hwrm_func_driver_register(struct bnxt *bp, uint32_t flags,
334 uint32_t *vf_req_fwd)
335 {
336 int rc;
337 struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
338 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
339
340 if (bp->flags & BNXT_FLAG_REGISTERED)
341 return 0;
342
343 HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
344 req.flags = flags;
345 req.enables = HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
346 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD;
347 req.ver_maj = RTE_VER_YEAR;
348 req.ver_min = RTE_VER_MONTH;
349 req.ver_upd = RTE_VER_MINOR;
350
351 memcpy(req.vf_req_fwd, vf_req_fwd, sizeof(req.vf_req_fwd));
352
353 req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1); /* TODO: Use MACRO */
354
355 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
356
357 HWRM_CHECK_RESULT;
358
359 bp->flags |= BNXT_FLAG_REGISTERED;
360
361 return rc;
362 }
363
364 int bnxt_hwrm_ver_get(struct bnxt *bp)
365 {
366 int rc = 0;
367 struct hwrm_ver_get_input req = {.req_type = 0 };
368 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
369 uint32_t my_version;
370 uint32_t fw_version;
371 uint16_t max_resp_len;
372 char type[RTE_MEMZONE_NAMESIZE];
373
374 HWRM_PREP(req, VER_GET, -1, resp);
375
376 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
377 req.hwrm_intf_min = HWRM_VERSION_MINOR;
378 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
379
380 /*
381 * Hold the lock since we may be adjusting the response pointers.
382 */
383 rte_spinlock_lock(&bp->hwrm_lock);
384 rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
385
386 HWRM_CHECK_RESULT;
387
388 RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
389 resp->hwrm_intf_maj, resp->hwrm_intf_min,
390 resp->hwrm_intf_upd,
391 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
392 RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
393 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
394
395 my_version = HWRM_VERSION_MAJOR << 16;
396 my_version |= HWRM_VERSION_MINOR << 8;
397 my_version |= HWRM_VERSION_UPDATE;
398
399 fw_version = resp->hwrm_intf_maj << 16;
400 fw_version |= resp->hwrm_intf_min << 8;
401 fw_version |= resp->hwrm_intf_upd;
402
403 if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
404 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
405 rc = -EINVAL;
406 goto error;
407 }
408
409 if (my_version != fw_version) {
410 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
411 if (my_version < fw_version) {
412 RTE_LOG(INFO, PMD,
413 "Firmware API version is newer than driver.\n");
414 RTE_LOG(INFO, PMD,
415 "The driver may be missing features.\n");
416 } else {
417 RTE_LOG(INFO, PMD,
418 "Firmware API version is older than driver.\n");
419 RTE_LOG(INFO, PMD,
420 "Not all driver features may be functional.\n");
421 }
422 }
423
424 if (bp->max_req_len > resp->max_req_win_len) {
425 RTE_LOG(ERR, PMD, "Unsupported request length\n");
426 rc = -EINVAL;
427 }
428 bp->max_req_len = resp->max_req_win_len;
429 max_resp_len = resp->max_resp_len;
430 if (bp->max_resp_len != max_resp_len) {
431 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
432 bp->pdev->addr.domain, bp->pdev->addr.bus,
433 bp->pdev->addr.devid, bp->pdev->addr.function);
434
435 rte_free(bp->hwrm_cmd_resp_addr);
436
437 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
438 if (bp->hwrm_cmd_resp_addr == NULL) {
439 rc = -ENOMEM;
440 goto error;
441 }
442 bp->hwrm_cmd_resp_dma_addr =
443 rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
444 bp->max_resp_len = max_resp_len;
445 }
446
447 error:
448 rte_spinlock_unlock(&bp->hwrm_lock);
449 return rc;
450 }
451
452 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
453 {
454 int rc;
455 struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
456 struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
457
458 if (!(bp->flags & BNXT_FLAG_REGISTERED))
459 return 0;
460
461 HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
462 req.flags = flags;
463
464 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
465
466 HWRM_CHECK_RESULT;
467
468 bp->flags &= ~BNXT_FLAG_REGISTERED;
469
470 return rc;
471 }
472
473 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
474 {
475 int rc = 0;
476 struct hwrm_port_phy_cfg_input req = {0};
477 struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
478 uint32_t enables = 0;
479
480 HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
481
482 if (conf->link_up) {
483 req.flags = rte_cpu_to_le_32(conf->phy_flags);
484 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
485 /*
486 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
487 * any auto mode, even "none".
488 */
489 if (!conf->link_speed) {
490 req.auto_mode |= conf->auto_mode;
491 enables = HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
492 req.auto_link_speed_mask = conf->auto_link_speed_mask;
493 enables |=
494 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
495 req.auto_link_speed = bp->link_info.auto_link_speed;
496 enables |=
497 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
498 }
499 req.auto_duplex = conf->duplex;
500 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
501 req.auto_pause = conf->auto_pause;
502 req.force_pause = conf->force_pause;
503 /* Set force_pause if there is no auto or if there is a force */
504 if (req.auto_pause && !req.force_pause)
505 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
506 else
507 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
508
509 req.enables = rte_cpu_to_le_32(enables);
510 } else {
511 req.flags =
512 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DOWN);
513 RTE_LOG(INFO, PMD, "Force Link Down\n");
514 }
515
516 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
517
518 HWRM_CHECK_RESULT;
519
520 return rc;
521 }
522
523 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
524 struct bnxt_link_info *link_info)
525 {
526 int rc = 0;
527 struct hwrm_port_phy_qcfg_input req = {0};
528 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
529
530 HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
531
532 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
533
534 HWRM_CHECK_RESULT;
535
536 link_info->phy_link_status = resp->link;
537 if (link_info->phy_link_status != HWRM_PORT_PHY_QCFG_OUTPUT_LINK_NO_LINK) {
538 link_info->link_up = 1;
539 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
540 } else {
541 link_info->link_up = 0;
542 link_info->link_speed = 0;
543 }
544 link_info->duplex = resp->duplex;
545 link_info->pause = resp->pause;
546 link_info->auto_pause = resp->auto_pause;
547 link_info->force_pause = resp->force_pause;
548 link_info->auto_mode = resp->auto_mode;
549
550 link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
551 link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
552 link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
553 link_info->phy_ver[0] = resp->phy_maj;
554 link_info->phy_ver[1] = resp->phy_min;
555 link_info->phy_ver[2] = resp->phy_bld;
556
557 return rc;
558 }
559
560 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
561 {
562 int rc = 0;
563 struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
564 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
565
566 HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
567
568 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
569
570 HWRM_CHECK_RESULT;
571
572 #define GET_QUEUE_INFO(x) \
573 bp->cos_queue[x].id = resp->queue_id##x; \
574 bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
575
576 GET_QUEUE_INFO(0);
577 GET_QUEUE_INFO(1);
578 GET_QUEUE_INFO(2);
579 GET_QUEUE_INFO(3);
580 GET_QUEUE_INFO(4);
581 GET_QUEUE_INFO(5);
582 GET_QUEUE_INFO(6);
583 GET_QUEUE_INFO(7);
584
585 return rc;
586 }
587
588 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
589 struct bnxt_ring *ring,
590 uint32_t ring_type, uint32_t map_index,
591 uint32_t stats_ctx_id)
592 {
593 int rc = 0;
594 struct hwrm_ring_alloc_input req = {.req_type = 0 };
595 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
596
597 HWRM_PREP(req, RING_ALLOC, -1, resp);
598
599 req.enables = rte_cpu_to_le_32(0);
600
601 req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
602 req.fbo = rte_cpu_to_le_32(0);
603 /* Association of ring index with doorbell index */
604 req.logical_id = rte_cpu_to_le_16(map_index);
605
606 switch (ring_type) {
607 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
608 req.queue_id = bp->cos_queue[0].id;
609 /* FALLTHROUGH */
610 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
611 req.ring_type = ring_type;
612 req.cmpl_ring_id =
613 rte_cpu_to_le_16(bp->grp_info[map_index].cp_fw_ring_id);
614 req.length = rte_cpu_to_le_32(ring->ring_size);
615 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
616 req.enables = rte_cpu_to_le_32(rte_le_to_cpu_32(req.enables) |
617 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
618 break;
619 case HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL:
620 req.ring_type = ring_type;
621 /*
622 * TODO: Some HWRM versions crash with
623 * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
624 */
625 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
626 req.length = rte_cpu_to_le_32(ring->ring_size);
627 break;
628 default:
629 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
630 ring_type);
631 return -1;
632 }
633
634 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
635
636 if (rc || resp->error_code) {
637 if (rc == 0 && resp->error_code)
638 rc = rte_le_to_cpu_16(resp->error_code);
639 switch (ring_type) {
640 case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
641 RTE_LOG(ERR, PMD,
642 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
643 return rc;
644 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
645 RTE_LOG(ERR, PMD,
646 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
647 return rc;
648 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
649 RTE_LOG(ERR, PMD,
650 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
651 return rc;
652 default:
653 RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
654 return rc;
655 }
656 }
657
658 ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
659 return rc;
660 }
661
662 int bnxt_hwrm_ring_free(struct bnxt *bp,
663 struct bnxt_ring *ring, uint32_t ring_type)
664 {
665 int rc;
666 struct hwrm_ring_free_input req = {.req_type = 0 };
667 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
668
669 HWRM_PREP(req, RING_FREE, -1, resp);
670
671 req.ring_type = ring_type;
672 req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
673
674 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
675
676 if (rc || resp->error_code) {
677 if (rc == 0 && resp->error_code)
678 rc = rte_le_to_cpu_16(resp->error_code);
679
680 switch (ring_type) {
681 case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
682 RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
683 rc);
684 return rc;
685 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
686 RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
687 rc);
688 return rc;
689 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
690 RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
691 rc);
692 return rc;
693 default:
694 RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
695 return rc;
696 }
697 }
698 return 0;
699 }
700
701 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
702 {
703 int rc = 0;
704 struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
705 struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
706
707 HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
708
709 req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
710 req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
711 req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
712 req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
713
714 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
715
716 HWRM_CHECK_RESULT;
717
718 bp->grp_info[idx].fw_grp_id =
719 rte_le_to_cpu_16(resp->ring_group_id);
720
721 return rc;
722 }
723
724 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
725 {
726 int rc;
727 struct hwrm_ring_grp_free_input req = {.req_type = 0 };
728 struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
729
730 HWRM_PREP(req, RING_GRP_FREE, -1, resp);
731
732 req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
733
734 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
735
736 HWRM_CHECK_RESULT;
737
738 bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
739 return rc;
740 }
741
742 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
743 {
744 int rc = 0;
745 struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
746 struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
747
748 HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
749
750 if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
751 return rc;
752
753 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
754 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
755
756 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
757
758 HWRM_CHECK_RESULT;
759
760 return rc;
761 }
762
763 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
764 struct bnxt_cp_ring_info *cpr, unsigned int idx)
765 {
766 int rc;
767 struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
768 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
769
770 HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
771
772 req.update_period_ms = rte_cpu_to_le_32(1000);
773
774 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
775 req.stats_dma_addr =
776 rte_cpu_to_le_64(cpr->hw_stats_map);
777
778 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
779
780 HWRM_CHECK_RESULT;
781
782 cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
783 bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
784
785 return rc;
786 }
787
788 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
789 struct bnxt_cp_ring_info *cpr, unsigned int idx)
790 {
791 int rc;
792 struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
793 struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
794
795 HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
796
797 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
798 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
799
800 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
801
802 HWRM_CHECK_RESULT;
803
804 cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
805 bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
806
807 return rc;
808 }
809
810 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
811 {
812 int rc = 0, i, j;
813 struct hwrm_vnic_alloc_input req = { 0 };
814 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
815
816 /* map ring groups to this vnic */
817 for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++) {
818 if (bp->grp_info[i].fw_grp_id == (uint16_t)HWRM_NA_SIGNATURE) {
819 RTE_LOG(ERR, PMD,
820 "Not enough ring groups avail:%x req:%x\n", j,
821 (vnic->end_grp_id - vnic->start_grp_id) + 1);
822 break;
823 }
824 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
825 }
826
827 vnic->fw_rss_cos_lb_ctx = (uint16_t)HWRM_NA_SIGNATURE;
828 vnic->ctx_is_rss_cos_lb = HW_CONTEXT_NONE;
829
830 HWRM_PREP(req, VNIC_ALLOC, -1, resp);
831
832 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
833
834 HWRM_CHECK_RESULT;
835
836 vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
837 return rc;
838 }
839
840 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
841 {
842 int rc = 0;
843 struct hwrm_vnic_cfg_input req = {.req_type = 0 };
844 struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
845
846 HWRM_PREP(req, VNIC_CFG, -1, resp);
847
848 /* Only RSS support for now TBD: COS & LB */
849 req.enables =
850 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
851 HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE |
852 HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
853 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
854 req.dflt_ring_grp =
855 rte_cpu_to_le_16(bp->grp_info[vnic->start_grp_id].fw_grp_id);
856 req.rss_rule = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
857 req.cos_rule = rte_cpu_to_le_16(0xffff);
858 req.lb_rule = rte_cpu_to_le_16(0xffff);
859 req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
860 ETHER_CRC_LEN + VLAN_TAG_SIZE);
861 if (vnic->func_default)
862 req.flags = 1;
863 if (vnic->vlan_strip)
864 req.flags |=
865 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
866
867 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
868
869 HWRM_CHECK_RESULT;
870
871 return rc;
872 }
873
874 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
875 {
876 int rc = 0;
877 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
878 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
879 bp->hwrm_cmd_resp_addr;
880
881 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
882
883 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
884
885 HWRM_CHECK_RESULT;
886
887 vnic->fw_rss_cos_lb_ctx = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
888
889 return rc;
890 }
891
892 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
893 {
894 int rc = 0;
895 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
896 struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
897 bp->hwrm_cmd_resp_addr;
898
899 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
900
901 req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
902
903 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
904
905 HWRM_CHECK_RESULT;
906
907 vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
908
909 return rc;
910 }
911
912 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
913 {
914 int rc = 0;
915 struct hwrm_vnic_free_input req = {.req_type = 0 };
916 struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
917
918 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
919 return rc;
920
921 HWRM_PREP(req, VNIC_FREE, -1, resp);
922
923 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
924
925 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
926
927 HWRM_CHECK_RESULT;
928
929 vnic->fw_vnic_id = INVALID_HW_RING_ID;
930 return rc;
931 }
932
933 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
934 struct bnxt_vnic_info *vnic)
935 {
936 int rc = 0;
937 struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
938 struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
939
940 HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
941
942 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
943
944 req.ring_grp_tbl_addr =
945 rte_cpu_to_le_64(vnic->rss_table_dma_addr);
946 req.hash_key_tbl_addr =
947 rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
948 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
949
950 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
951
952 HWRM_CHECK_RESULT;
953
954 return rc;
955 }
956
957 /*
958 * HWRM utility functions
959 */
960
961 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
962 {
963 unsigned int i;
964 int rc = 0;
965
966 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
967 struct bnxt_tx_queue *txq;
968 struct bnxt_rx_queue *rxq;
969 struct bnxt_cp_ring_info *cpr;
970
971 if (i >= bp->rx_cp_nr_rings) {
972 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
973 cpr = txq->cp_ring;
974 } else {
975 rxq = bp->rx_queues[i];
976 cpr = rxq->cp_ring;
977 }
978
979 rc = bnxt_hwrm_stat_clear(bp, cpr);
980 if (rc)
981 return rc;
982 }
983 return 0;
984 }
985
986 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
987 {
988 int rc;
989 unsigned int i;
990 struct bnxt_cp_ring_info *cpr;
991
992 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
993 unsigned int idx = i + 1;
994
995 if (i >= bp->rx_cp_nr_rings)
996 cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
997 else
998 cpr = bp->rx_queues[i]->cp_ring;
999 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1000 rc = bnxt_hwrm_stat_ctx_free(bp, cpr, idx);
1001 if (rc)
1002 return rc;
1003 }
1004 }
1005 return 0;
1006 }
1007
1008 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1009 {
1010 unsigned int i;
1011 int rc = 0;
1012
1013 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1014 struct bnxt_tx_queue *txq;
1015 struct bnxt_rx_queue *rxq;
1016 struct bnxt_cp_ring_info *cpr;
1017 unsigned int idx = i + 1;
1018
1019 if (i >= bp->rx_cp_nr_rings) {
1020 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1021 cpr = txq->cp_ring;
1022 } else {
1023 rxq = bp->rx_queues[i];
1024 cpr = rxq->cp_ring;
1025 }
1026
1027 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, idx);
1028
1029 if (rc)
1030 return rc;
1031 }
1032 return rc;
1033 }
1034
1035 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1036 {
1037 uint16_t i;
1038 uint32_t rc = 0;
1039
1040 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1041 unsigned int idx = i + 1;
1042
1043 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
1044 RTE_LOG(ERR, PMD,
1045 "Attempt to free invalid ring group %d\n",
1046 idx);
1047 continue;
1048 }
1049
1050 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1051
1052 if (rc)
1053 return rc;
1054 }
1055 return rc;
1056 }
1057
1058 static void bnxt_free_cp_ring(struct bnxt *bp,
1059 struct bnxt_cp_ring_info *cpr, unsigned int idx)
1060 {
1061 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1062
1063 bnxt_hwrm_ring_free(bp, cp_ring,
1064 HWRM_RING_FREE_INPUT_RING_TYPE_CMPL);
1065 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1066 bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1067 memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1068 sizeof(*cpr->cp_desc_ring));
1069 cpr->cp_raw_cons = 0;
1070 }
1071
1072 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1073 {
1074 unsigned int i;
1075 int rc = 0;
1076
1077 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1078 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1079 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1080 struct bnxt_ring *ring = txr->tx_ring_struct;
1081 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1082 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1083
1084 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1085 bnxt_hwrm_ring_free(bp, ring,
1086 HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1087 ring->fw_ring_id = INVALID_HW_RING_ID;
1088 memset(txr->tx_desc_ring, 0,
1089 txr->tx_ring_struct->ring_size *
1090 sizeof(*txr->tx_desc_ring));
1091 memset(txr->tx_buf_ring, 0,
1092 txr->tx_ring_struct->ring_size *
1093 sizeof(*txr->tx_buf_ring));
1094 txr->tx_prod = 0;
1095 txr->tx_cons = 0;
1096 }
1097 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1098 bnxt_free_cp_ring(bp, cpr, idx);
1099 }
1100
1101 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1102 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1103 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1104 struct bnxt_ring *ring = rxr->rx_ring_struct;
1105 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1106 unsigned int idx = i + 1;
1107
1108 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1109 bnxt_hwrm_ring_free(bp, ring,
1110 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1111 ring->fw_ring_id = INVALID_HW_RING_ID;
1112 bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1113 memset(rxr->rx_desc_ring, 0,
1114 rxr->rx_ring_struct->ring_size *
1115 sizeof(*rxr->rx_desc_ring));
1116 memset(rxr->rx_buf_ring, 0,
1117 rxr->rx_ring_struct->ring_size *
1118 sizeof(*rxr->rx_buf_ring));
1119 rxr->rx_prod = 0;
1120 }
1121 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1122 bnxt_free_cp_ring(bp, cpr, idx);
1123 }
1124
1125 /* Default completion ring */
1126 {
1127 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1128
1129 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1130 bnxt_free_cp_ring(bp, cpr, 0);
1131 }
1132
1133 return rc;
1134 }
1135
1136 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1137 {
1138 uint16_t i;
1139 uint32_t rc = 0;
1140
1141 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1142 unsigned int idx = i + 1;
1143
1144 if (bp->grp_info[idx].cp_fw_ring_id == INVALID_HW_RING_ID ||
1145 bp->grp_info[idx].rx_fw_ring_id == INVALID_HW_RING_ID)
1146 continue;
1147
1148 rc = bnxt_hwrm_ring_grp_alloc(bp, idx);
1149
1150 if (rc)
1151 return rc;
1152 }
1153 return rc;
1154 }
1155
1156 void bnxt_free_hwrm_resources(struct bnxt *bp)
1157 {
1158 /* Release memzone */
1159 rte_free(bp->hwrm_cmd_resp_addr);
1160 bp->hwrm_cmd_resp_addr = NULL;
1161 bp->hwrm_cmd_resp_dma_addr = 0;
1162 }
1163
1164 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1165 {
1166 struct rte_pci_device *pdev = bp->pdev;
1167 char type[RTE_MEMZONE_NAMESIZE];
1168
1169 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1170 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1171 bp->max_req_len = HWRM_MAX_REQ_LEN;
1172 bp->max_resp_len = HWRM_MAX_RESP_LEN;
1173 bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1174 if (bp->hwrm_cmd_resp_addr == NULL)
1175 return -ENOMEM;
1176 bp->hwrm_cmd_resp_dma_addr =
1177 rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
1178 rte_spinlock_init(&bp->hwrm_lock);
1179
1180 return 0;
1181 }
1182
1183 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1184 {
1185 struct bnxt_filter_info *filter;
1186 int rc = 0;
1187
1188 STAILQ_FOREACH(filter, &vnic->filter, next) {
1189 rc = bnxt_hwrm_clear_filter(bp, filter);
1190 if (rc)
1191 break;
1192 }
1193 return rc;
1194 }
1195
1196 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1197 {
1198 struct bnxt_filter_info *filter;
1199 int rc = 0;
1200
1201 STAILQ_FOREACH(filter, &vnic->filter, next) {
1202 rc = bnxt_hwrm_set_filter(bp, vnic, filter);
1203 if (rc)
1204 break;
1205 }
1206 return rc;
1207 }
1208
1209 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1210 {
1211 struct bnxt_vnic_info *vnic;
1212 unsigned int i;
1213
1214 if (bp->vnic_info == NULL)
1215 return;
1216
1217 vnic = &bp->vnic_info[0];
1218 bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
1219
1220 /* VNIC resources */
1221 for (i = 0; i < bp->nr_vnics; i++) {
1222 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1223
1224 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1225
1226 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1227 bnxt_hwrm_vnic_free(bp, vnic);
1228 }
1229 /* Ring resources */
1230 bnxt_free_all_hwrm_rings(bp);
1231 bnxt_free_all_hwrm_ring_grps(bp);
1232 bnxt_free_all_hwrm_stat_ctxs(bp);
1233 }
1234
1235 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1236 {
1237 uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1238
1239 if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1240 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1241
1242 switch (conf_link_speed) {
1243 case ETH_LINK_SPEED_10M_HD:
1244 case ETH_LINK_SPEED_100M_HD:
1245 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1246 }
1247 return hw_link_duplex;
1248 }
1249
1250 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1251 {
1252 uint16_t eth_link_speed = 0;
1253
1254 if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1255 return ETH_LINK_SPEED_AUTONEG;
1256
1257 switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1258 case ETH_LINK_SPEED_100M:
1259 case ETH_LINK_SPEED_100M_HD:
1260 eth_link_speed =
1261 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1262 break;
1263 case ETH_LINK_SPEED_1G:
1264 eth_link_speed =
1265 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1266 break;
1267 case ETH_LINK_SPEED_2_5G:
1268 eth_link_speed =
1269 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1270 break;
1271 case ETH_LINK_SPEED_10G:
1272 eth_link_speed =
1273 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
1274 break;
1275 case ETH_LINK_SPEED_20G:
1276 eth_link_speed =
1277 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
1278 break;
1279 case ETH_LINK_SPEED_25G:
1280 eth_link_speed =
1281 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
1282 break;
1283 case ETH_LINK_SPEED_40G:
1284 eth_link_speed =
1285 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
1286 break;
1287 case ETH_LINK_SPEED_50G:
1288 eth_link_speed =
1289 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
1290 break;
1291 default:
1292 RTE_LOG(ERR, PMD,
1293 "Unsupported link speed %d; default to AUTO\n",
1294 conf_link_speed);
1295 break;
1296 }
1297 return eth_link_speed;
1298 }
1299
1300 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1301 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1302 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1303 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1304
1305 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1306 {
1307 uint32_t one_speed;
1308
1309 if (link_speed == ETH_LINK_SPEED_AUTONEG)
1310 return 0;
1311
1312 if (link_speed & ETH_LINK_SPEED_FIXED) {
1313 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1314
1315 if (one_speed & (one_speed - 1)) {
1316 RTE_LOG(ERR, PMD,
1317 "Invalid advertised speeds (%u) for port %u\n",
1318 link_speed, port_id);
1319 return -EINVAL;
1320 }
1321 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1322 RTE_LOG(ERR, PMD,
1323 "Unsupported advertised speed (%u) for port %u\n",
1324 link_speed, port_id);
1325 return -EINVAL;
1326 }
1327 } else {
1328 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1329 RTE_LOG(ERR, PMD,
1330 "Unsupported advertised speeds (%u) for port %u\n",
1331 link_speed, port_id);
1332 return -EINVAL;
1333 }
1334 }
1335 return 0;
1336 }
1337
1338 static uint16_t bnxt_parse_eth_link_speed_mask(uint32_t link_speed)
1339 {
1340 uint16_t ret = 0;
1341
1342 if (link_speed == ETH_LINK_SPEED_AUTONEG)
1343 link_speed = BNXT_SUPPORTED_SPEEDS;
1344
1345 if (link_speed & ETH_LINK_SPEED_100M)
1346 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1347 if (link_speed & ETH_LINK_SPEED_100M_HD)
1348 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1349 if (link_speed & ETH_LINK_SPEED_1G)
1350 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1351 if (link_speed & ETH_LINK_SPEED_2_5G)
1352 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1353 if (link_speed & ETH_LINK_SPEED_10G)
1354 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1355 if (link_speed & ETH_LINK_SPEED_20G)
1356 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1357 if (link_speed & ETH_LINK_SPEED_25G)
1358 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1359 if (link_speed & ETH_LINK_SPEED_40G)
1360 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1361 if (link_speed & ETH_LINK_SPEED_50G)
1362 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1363 return ret;
1364 }
1365
1366 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
1367 {
1368 uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
1369
1370 switch (hw_link_speed) {
1371 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1372 eth_link_speed = ETH_SPEED_NUM_100M;
1373 break;
1374 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1375 eth_link_speed = ETH_SPEED_NUM_1G;
1376 break;
1377 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1378 eth_link_speed = ETH_SPEED_NUM_2_5G;
1379 break;
1380 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1381 eth_link_speed = ETH_SPEED_NUM_10G;
1382 break;
1383 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1384 eth_link_speed = ETH_SPEED_NUM_20G;
1385 break;
1386 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1387 eth_link_speed = ETH_SPEED_NUM_25G;
1388 break;
1389 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1390 eth_link_speed = ETH_SPEED_NUM_40G;
1391 break;
1392 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1393 eth_link_speed = ETH_SPEED_NUM_50G;
1394 break;
1395 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1396 default:
1397 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
1398 hw_link_speed);
1399 break;
1400 }
1401 return eth_link_speed;
1402 }
1403
1404 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
1405 {
1406 uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1407
1408 switch (hw_link_duplex) {
1409 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
1410 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
1411 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1412 break;
1413 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
1414 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
1415 break;
1416 default:
1417 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
1418 hw_link_duplex);
1419 break;
1420 }
1421 return eth_link_duplex;
1422 }
1423
1424 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
1425 {
1426 int rc = 0;
1427 struct bnxt_link_info *link_info = &bp->link_info;
1428
1429 rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
1430 if (rc) {
1431 RTE_LOG(ERR, PMD,
1432 "Get link config failed with rc %d\n", rc);
1433 goto exit;
1434 }
1435 if (link_info->link_up)
1436 link->link_speed =
1437 bnxt_parse_hw_link_speed(link_info->link_speed);
1438 else
1439 link->link_speed = ETH_LINK_SPEED_10M;
1440 link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
1441 link->link_status = link_info->link_up;
1442 link->link_autoneg = link_info->auto_mode ==
1443 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
1444 ETH_LINK_SPEED_FIXED : ETH_LINK_SPEED_AUTONEG;
1445 exit:
1446 return rc;
1447 }
1448
1449 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
1450 {
1451 int rc = 0;
1452 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1453 struct bnxt_link_info link_req;
1454 uint16_t speed;
1455
1456 if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
1457 return 0;
1458
1459 rc = bnxt_valid_link_speed(dev_conf->link_speeds,
1460 bp->eth_dev->data->port_id);
1461 if (rc)
1462 goto error;
1463
1464 memset(&link_req, 0, sizeof(link_req));
1465 link_req.link_up = link_up;
1466 if (!link_up)
1467 goto port_phy_cfg;
1468
1469 speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
1470 link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
1471 if (speed == 0) {
1472 link_req.phy_flags |=
1473 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
1474 link_req.auto_mode =
1475 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1476 link_req.auto_link_speed_mask =
1477 bnxt_parse_eth_link_speed_mask(dev_conf->link_speeds);
1478 } else {
1479 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
1480 link_req.link_speed = speed;
1481 RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed);
1482 }
1483 link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
1484 link_req.auto_pause = bp->link_info.auto_pause;
1485 link_req.force_pause = bp->link_info.force_pause;
1486
1487 port_phy_cfg:
1488 rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
1489 if (rc) {
1490 RTE_LOG(ERR, PMD,
1491 "Set link config failed with rc %d\n", rc);
1492 }
1493
1494 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
1495 error:
1496 return rc;
1497 }
1498
1499 /* JIRA 22088 */
1500 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
1501 {
1502 struct hwrm_func_qcfg_input req = {0};
1503 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1504 int rc = 0;
1505
1506 HWRM_PREP(req, FUNC_QCFG, -1, resp);
1507 req.fid = rte_cpu_to_le_16(0xffff);
1508
1509 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1510
1511 HWRM_CHECK_RESULT;
1512
1513 if (BNXT_VF(bp)) {
1514 struct bnxt_vf_info *vf = &bp->vf;
1515
1516 /* Hard Coded.. 0xfff VLAN ID mask */
1517 vf->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
1518 }
1519
1520 switch (resp->port_partition_type) {
1521 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
1522 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
1523 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
1524 bp->port_partition_type = resp->port_partition_type;
1525 break;
1526 default:
1527 bp->port_partition_type = 0;
1528 break;
1529 }
1530
1531 return rc;
1532 }