]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/net/bnxt/bnxt_hwrm.c
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / bnxt / bnxt_hwrm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
3 * All rights reserved.
4 */
5
6 #include <unistd.h>
7
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
14
15 #include "bnxt.h"
16 #include "bnxt_cpr.h"
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_rxq.h"
20 #include "bnxt_rxr.h"
21 #include "bnxt_ring.h"
22 #include "bnxt_txq.h"
23 #include "bnxt_txr.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
26
27 #include <rte_io.h>
28
29 #define HWRM_CMD_TIMEOUT 10000
30 #define HWRM_SPEC_CODE_1_8_3 0x10803
31 #define HWRM_VERSION_1_9_1 0x10901
32
33 struct bnxt_plcmodes_cfg {
34 uint32_t flags;
35 uint16_t jumbo_thresh;
36 uint16_t hds_offset;
37 uint16_t hds_threshold;
38 };
39
40 static int page_getenum(size_t size)
41 {
42 if (size <= 1 << 4)
43 return 4;
44 if (size <= 1 << 12)
45 return 12;
46 if (size <= 1 << 13)
47 return 13;
48 if (size <= 1 << 16)
49 return 16;
50 if (size <= 1 << 21)
51 return 21;
52 if (size <= 1 << 22)
53 return 22;
54 if (size <= 1 << 30)
55 return 30;
56 PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
57 return sizeof(void *) * 8 - 1;
58 }
59
60 static int page_roundup(size_t size)
61 {
62 return 1 << page_getenum(size);
63 }
64
65 /*
66 * HWRM Functions (sent to HWRM)
67 * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
68 * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
69 * command was failed by the ChiMP.
70 */
71
72 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
73 uint32_t msg_len)
74 {
75 unsigned int i;
76 struct input *req = msg;
77 struct output *resp = bp->hwrm_cmd_resp_addr;
78 uint32_t *data = msg;
79 uint8_t *bar;
80 uint8_t *valid;
81 uint16_t max_req_len = bp->max_req_len;
82 struct hwrm_short_input short_input = { 0 };
83
84 if (bp->flags & BNXT_FLAG_SHORT_CMD) {
85 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
86
87 memset(short_cmd_req, 0, bp->max_req_len);
88 memcpy(short_cmd_req, req, msg_len);
89
90 short_input.req_type = rte_cpu_to_le_16(req->req_type);
91 short_input.signature = rte_cpu_to_le_16(
92 HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
93 short_input.size = rte_cpu_to_le_16(msg_len);
94 short_input.req_addr =
95 rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
96
97 data = (uint32_t *)&short_input;
98 msg_len = sizeof(short_input);
99
100 /* Sync memory write before updating doorbell */
101 rte_wmb();
102
103 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
104 }
105
106 /* Write request msg to hwrm channel */
107 for (i = 0; i < msg_len; i += 4) {
108 bar = (uint8_t *)bp->bar0 + i;
109 rte_write32(*data, bar);
110 data++;
111 }
112
113 /* Zero the rest of the request space */
114 for (; i < max_req_len; i += 4) {
115 bar = (uint8_t *)bp->bar0 + i;
116 rte_write32(0, bar);
117 }
118
119 /* Ring channel doorbell */
120 bar = (uint8_t *)bp->bar0 + 0x100;
121 rte_write32(1, bar);
122
123 /* Poll for the valid bit */
124 for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
125 /* Sanity check on the resp->resp_len */
126 rte_rmb();
127 if (resp->resp_len && resp->resp_len <=
128 bp->max_resp_len) {
129 /* Last byte of resp contains the valid key */
130 valid = (uint8_t *)resp + resp->resp_len - 1;
131 if (*valid == HWRM_RESP_VALID_KEY)
132 break;
133 }
134 rte_delay_us(600);
135 }
136
137 if (i >= HWRM_CMD_TIMEOUT) {
138 PMD_DRV_LOG(ERR, "Error sending msg 0x%04x\n",
139 req->req_type);
140 goto err_ret;
141 }
142 return 0;
143
144 err_ret:
145 return -1;
146 }
147
148 /*
149 * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the
150 * spinlock, and does initial processing.
151 *
152 * HWRM_CHECK_RESULT() returns errors on failure and may not be used. It
153 * releases the spinlock only if it returns. If the regular int return codes
154 * are not used by the function, HWRM_CHECK_RESULT() should not be used
155 * directly, rather it should be copied and modified to suit the function.
156 *
157 * HWRM_UNLOCK() must be called after all response processing is completed.
158 */
159 #define HWRM_PREP(req, type) do { \
160 rte_spinlock_lock(&bp->hwrm_lock); \
161 memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
162 req.req_type = rte_cpu_to_le_16(HWRM_##type); \
163 req.cmpl_ring = rte_cpu_to_le_16(-1); \
164 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
165 req.target_id = rte_cpu_to_le_16(0xffff); \
166 req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
167 } while (0)
168
169 #define HWRM_CHECK_RESULT_SILENT() do {\
170 if (rc) { \
171 rte_spinlock_unlock(&bp->hwrm_lock); \
172 return rc; \
173 } \
174 if (resp->error_code) { \
175 rc = rte_le_to_cpu_16(resp->error_code); \
176 rte_spinlock_unlock(&bp->hwrm_lock); \
177 return rc; \
178 } \
179 } while (0)
180
181 #define HWRM_CHECK_RESULT() do {\
182 if (rc) { \
183 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
184 rte_spinlock_unlock(&bp->hwrm_lock); \
185 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
186 rc = -EACCES; \
187 else if (rc > 0) \
188 rc = -EINVAL; \
189 return rc; \
190 } \
191 if (resp->error_code) { \
192 rc = rte_le_to_cpu_16(resp->error_code); \
193 if (resp->resp_len >= 16) { \
194 struct hwrm_err_output *tmp_hwrm_err_op = \
195 (void *)resp; \
196 PMD_DRV_LOG(ERR, \
197 "error %d:%d:%08x:%04x\n", \
198 rc, tmp_hwrm_err_op->cmd_err, \
199 rte_le_to_cpu_32(\
200 tmp_hwrm_err_op->opaque_0), \
201 rte_le_to_cpu_16(\
202 tmp_hwrm_err_op->opaque_1)); \
203 } else { \
204 PMD_DRV_LOG(ERR, "error %d\n", rc); \
205 } \
206 rte_spinlock_unlock(&bp->hwrm_lock); \
207 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
208 rc = -EACCES; \
209 else if (rc > 0) \
210 rc = -EINVAL; \
211 return rc; \
212 } \
213 } while (0)
214
215 #define HWRM_UNLOCK() rte_spinlock_unlock(&bp->hwrm_lock)
216
217 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
218 {
219 int rc = 0;
220 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
221 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
222
223 HWRM_PREP(req, CFA_L2_SET_RX_MASK);
224 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
225 req.mask = 0;
226
227 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
228
229 HWRM_CHECK_RESULT();
230 HWRM_UNLOCK();
231
232 return rc;
233 }
234
235 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
236 struct bnxt_vnic_info *vnic,
237 uint16_t vlan_count,
238 struct bnxt_vlan_table_entry *vlan_table)
239 {
240 int rc = 0;
241 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
242 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
243 uint32_t mask = 0;
244
245 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
246 return rc;
247
248 HWRM_PREP(req, CFA_L2_SET_RX_MASK);
249 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
250
251 /* FIXME add multicast flag, when multicast adding options is supported
252 * by ethtool.
253 */
254 if (vnic->flags & BNXT_VNIC_INFO_BCAST)
255 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
256 if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
257 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
258 if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
259 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
260 if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
261 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
262 if (vnic->flags & BNXT_VNIC_INFO_MCAST)
263 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
264 if (vnic->mc_addr_cnt) {
265 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
266 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
267 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
268 }
269 if (vlan_table) {
270 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
271 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
272 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
273 rte_mem_virt2iova(vlan_table));
274 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
275 }
276 req.mask = rte_cpu_to_le_32(mask);
277
278 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
279
280 HWRM_CHECK_RESULT();
281 HWRM_UNLOCK();
282
283 return rc;
284 }
285
286 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
287 uint16_t vlan_count,
288 struct bnxt_vlan_antispoof_table_entry *vlan_table)
289 {
290 int rc = 0;
291 struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
292 struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
293 bp->hwrm_cmd_resp_addr;
294
295 /*
296 * Older HWRM versions did not support this command, and the set_rx_mask
297 * list was used for anti-spoof. In 1.8.0, the TX path configuration was
298 * removed from set_rx_mask call, and this command was added.
299 *
300 * This command is also present from 1.7.8.11 and higher,
301 * as well as 1.7.8.0
302 */
303 if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
304 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
305 if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
306 (11)))
307 return 0;
308 }
309 }
310 HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG);
311 req.fid = rte_cpu_to_le_16(fid);
312
313 req.vlan_tag_mask_tbl_addr =
314 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
315 req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
316
317 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
318
319 HWRM_CHECK_RESULT();
320 HWRM_UNLOCK();
321
322 return rc;
323 }
324
325 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
326 struct bnxt_filter_info *filter)
327 {
328 int rc = 0;
329 struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
330 struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
331
332 if (filter->fw_l2_filter_id == UINT64_MAX)
333 return 0;
334
335 HWRM_PREP(req, CFA_L2_FILTER_FREE);
336
337 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
338
339 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
340
341 HWRM_CHECK_RESULT();
342 HWRM_UNLOCK();
343
344 filter->fw_l2_filter_id = UINT64_MAX;
345
346 return 0;
347 }
348
349 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
350 uint16_t dst_id,
351 struct bnxt_filter_info *filter)
352 {
353 int rc = 0;
354 struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
355 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
356 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
357 const struct rte_eth_vmdq_rx_conf *conf =
358 &dev_conf->rx_adv_conf.vmdq_rx_conf;
359 uint32_t enables = 0;
360 uint16_t j = dst_id - 1;
361
362 //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
363 if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
364 conf->pool_map[j].pools & (1UL << j)) {
365 PMD_DRV_LOG(DEBUG,
366 "Add vlan %u to vmdq pool %u\n",
367 conf->pool_map[j].vlan_id, j);
368
369 filter->l2_ivlan = conf->pool_map[j].vlan_id;
370 filter->enables |=
371 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
372 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
373 }
374
375 if (filter->fw_l2_filter_id != UINT64_MAX)
376 bnxt_hwrm_clear_l2_filter(bp, filter);
377
378 HWRM_PREP(req, CFA_L2_FILTER_ALLOC);
379
380 req.flags = rte_cpu_to_le_32(filter->flags);
381
382 enables = filter->enables |
383 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
384 req.dst_id = rte_cpu_to_le_16(dst_id);
385
386 if (enables &
387 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
388 memcpy(req.l2_addr, filter->l2_addr,
389 ETHER_ADDR_LEN);
390 if (enables &
391 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
392 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
393 ETHER_ADDR_LEN);
394 if (enables &
395 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
396 req.l2_ovlan = filter->l2_ovlan;
397 if (enables &
398 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
399 req.l2_ivlan = filter->l2_ivlan;
400 if (enables &
401 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
402 req.l2_ovlan_mask = filter->l2_ovlan_mask;
403 if (enables &
404 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
405 req.l2_ivlan_mask = filter->l2_ivlan_mask;
406 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
407 req.src_id = rte_cpu_to_le_32(filter->src_id);
408 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
409 req.src_type = filter->src_type;
410
411 req.enables = rte_cpu_to_le_32(enables);
412
413 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
414
415 HWRM_CHECK_RESULT();
416
417 filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
418 HWRM_UNLOCK();
419
420 return rc;
421 }
422
423 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
424 {
425 struct hwrm_port_mac_cfg_input req = {.req_type = 0};
426 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
427 uint32_t flags = 0;
428 int rc;
429
430 if (!ptp)
431 return 0;
432
433 HWRM_PREP(req, PORT_MAC_CFG);
434
435 if (ptp->rx_filter)
436 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
437 else
438 flags |=
439 HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
440 if (ptp->tx_tstamp_en)
441 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
442 else
443 flags |=
444 HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
445 req.flags = rte_cpu_to_le_32(flags);
446 req.enables = rte_cpu_to_le_32
447 (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
448 req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
449
450 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
451 HWRM_UNLOCK();
452
453 return rc;
454 }
455
456 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
457 {
458 int rc = 0;
459 struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
460 struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
461 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
462
463 /* if (bp->hwrm_spec_code < 0x10801 || ptp) TBD */
464 if (ptp)
465 return 0;
466
467 HWRM_PREP(req, PORT_MAC_PTP_QCFG);
468
469 req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
470
471 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
472
473 HWRM_CHECK_RESULT();
474
475 if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
476 return 0;
477
478 ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
479 if (!ptp)
480 return -ENOMEM;
481
482 ptp->rx_regs[BNXT_PTP_RX_TS_L] =
483 rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
484 ptp->rx_regs[BNXT_PTP_RX_TS_H] =
485 rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
486 ptp->rx_regs[BNXT_PTP_RX_SEQ] =
487 rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
488 ptp->rx_regs[BNXT_PTP_RX_FIFO] =
489 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
490 ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
491 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
492 ptp->tx_regs[BNXT_PTP_TX_TS_L] =
493 rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
494 ptp->tx_regs[BNXT_PTP_TX_TS_H] =
495 rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
496 ptp->tx_regs[BNXT_PTP_TX_SEQ] =
497 rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
498 ptp->tx_regs[BNXT_PTP_TX_FIFO] =
499 rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
500
501 ptp->bp = bp;
502 bp->ptp_cfg = ptp;
503
504 return 0;
505 }
506
507 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
508 {
509 int rc = 0;
510 struct hwrm_func_qcaps_input req = {.req_type = 0 };
511 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
512 uint16_t new_max_vfs;
513 uint32_t flags;
514 int i;
515
516 HWRM_PREP(req, FUNC_QCAPS);
517
518 req.fid = rte_cpu_to_le_16(0xffff);
519
520 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
521
522 HWRM_CHECK_RESULT();
523
524 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
525 flags = rte_le_to_cpu_32(resp->flags);
526 if (BNXT_PF(bp)) {
527 bp->pf.port_id = resp->port_id;
528 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
529 bp->pf.total_vfs = rte_le_to_cpu_16(resp->max_vfs);
530 new_max_vfs = bp->pdev->max_vfs;
531 if (new_max_vfs != bp->pf.max_vfs) {
532 if (bp->pf.vf_info)
533 rte_free(bp->pf.vf_info);
534 bp->pf.vf_info = rte_malloc("bnxt_vf_info",
535 sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
536 bp->pf.max_vfs = new_max_vfs;
537 for (i = 0; i < new_max_vfs; i++) {
538 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
539 bp->pf.vf_info[i].vlan_table =
540 rte_zmalloc("VF VLAN table",
541 getpagesize(),
542 getpagesize());
543 if (bp->pf.vf_info[i].vlan_table == NULL)
544 PMD_DRV_LOG(ERR,
545 "Fail to alloc VLAN table for VF %d\n",
546 i);
547 else
548 rte_mem_lock_page(
549 bp->pf.vf_info[i].vlan_table);
550 bp->pf.vf_info[i].vlan_as_table =
551 rte_zmalloc("VF VLAN AS table",
552 getpagesize(),
553 getpagesize());
554 if (bp->pf.vf_info[i].vlan_as_table == NULL)
555 PMD_DRV_LOG(ERR,
556 "Alloc VLAN AS table for VF %d fail\n",
557 i);
558 else
559 rte_mem_lock_page(
560 bp->pf.vf_info[i].vlan_as_table);
561 STAILQ_INIT(&bp->pf.vf_info[i].filter);
562 }
563 }
564 }
565
566 bp->fw_fid = rte_le_to_cpu_32(resp->fid);
567 memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
568 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
569 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
570 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
571 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
572 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
573 /* TODO: For now, do not support VMDq/RFS on VFs. */
574 if (BNXT_PF(bp)) {
575 if (bp->pf.max_vfs)
576 bp->max_vnics = 1;
577 else
578 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
579 } else {
580 bp->max_vnics = 1;
581 }
582 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
583 if (BNXT_PF(bp)) {
584 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
585 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
586 bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
587 PMD_DRV_LOG(INFO, "PTP SUPPORTED\n");
588 HWRM_UNLOCK();
589 bnxt_hwrm_ptp_qcfg(bp);
590 }
591 }
592
593 HWRM_UNLOCK();
594
595 return rc;
596 }
597
598 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
599 {
600 int rc;
601
602 rc = __bnxt_hwrm_func_qcaps(bp);
603 if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
604 rc = bnxt_hwrm_func_resc_qcaps(bp);
605 if (!rc)
606 bp->flags |= BNXT_FLAG_NEW_RM;
607 }
608
609 return rc;
610 }
611
612 int bnxt_hwrm_func_reset(struct bnxt *bp)
613 {
614 int rc = 0;
615 struct hwrm_func_reset_input req = {.req_type = 0 };
616 struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
617
618 HWRM_PREP(req, FUNC_RESET);
619
620 req.enables = rte_cpu_to_le_32(0);
621
622 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
623
624 HWRM_CHECK_RESULT();
625 HWRM_UNLOCK();
626
627 return rc;
628 }
629
630 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
631 {
632 int rc;
633 struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
634 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
635
636 if (bp->flags & BNXT_FLAG_REGISTERED)
637 return 0;
638
639 HWRM_PREP(req, FUNC_DRV_RGTR);
640 req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
641 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
642 req.ver_maj = RTE_VER_YEAR;
643 req.ver_min = RTE_VER_MONTH;
644 req.ver_upd = RTE_VER_MINOR;
645
646 if (BNXT_PF(bp)) {
647 req.enables |= rte_cpu_to_le_32(
648 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
649 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
650 RTE_MIN(sizeof(req.vf_req_fwd),
651 sizeof(bp->pf.vf_req_fwd)));
652
653 /*
654 * PF can sniff HWRM API issued by VF. This can be set up by
655 * linux driver and inherited by the DPDK PF driver. Clear
656 * this HWRM sniffer list in FW because DPDK PF driver does
657 * not support this.
658 */
659 req.flags =
660 rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE);
661 }
662
663 req.async_event_fwd[0] |=
664 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
665 ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
666 ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE);
667 req.async_event_fwd[1] |=
668 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
669 ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
670
671 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
672
673 HWRM_CHECK_RESULT();
674 HWRM_UNLOCK();
675
676 bp->flags |= BNXT_FLAG_REGISTERED;
677
678 return rc;
679 }
680
681 int bnxt_hwrm_check_vf_rings(struct bnxt *bp)
682 {
683 if (!(BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)))
684 return 0;
685
686 return bnxt_hwrm_func_reserve_vf_resc(bp, true);
687 }
688
689 int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
690 {
691 int rc;
692 uint32_t flags = 0;
693 uint32_t enables;
694 struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
695 struct hwrm_func_vf_cfg_input req = {0};
696
697 HWRM_PREP(req, FUNC_VF_CFG);
698
699 req.enables = rte_cpu_to_le_32
700 (HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS |
701 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS |
702 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
703 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
704 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS |
705 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS);
706
707 req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
708 req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
709 AGG_RING_MULTIPLIER);
710 req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
711 req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
712 bp->tx_nr_rings);
713 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
714 req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
715 if (bp->vf_resv_strategy ==
716 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
717 enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |
718 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |
719 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
720 req.enables |= rte_cpu_to_le_32(enables);
721 req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX);
722 req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX);
723 req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC);
724 }
725
726 if (test)
727 flags = HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST |
728 HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST |
729 HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST |
730 HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST |
731 HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST |
732 HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST;
733
734 req.flags = rte_cpu_to_le_32(flags);
735
736 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
737
738 if (test)
739 HWRM_CHECK_RESULT_SILENT();
740 else
741 HWRM_CHECK_RESULT();
742
743 HWRM_UNLOCK();
744 return rc;
745 }
746
747 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
748 {
749 int rc;
750 struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
751 struct hwrm_func_resource_qcaps_input req = {0};
752
753 HWRM_PREP(req, FUNC_RESOURCE_QCAPS);
754 req.fid = rte_cpu_to_le_16(0xffff);
755
756 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
757
758 HWRM_CHECK_RESULT();
759
760 if (BNXT_VF(bp)) {
761 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
762 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
763 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
764 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
765 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
766 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
767 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
768 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
769 }
770 bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);
771 if (bp->vf_resv_strategy >
772 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC)
773 bp->vf_resv_strategy =
774 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL;
775
776 HWRM_UNLOCK();
777 return rc;
778 }
779
780 int bnxt_hwrm_ver_get(struct bnxt *bp)
781 {
782 int rc = 0;
783 struct hwrm_ver_get_input req = {.req_type = 0 };
784 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
785 uint32_t my_version;
786 uint32_t fw_version;
787 uint16_t max_resp_len;
788 char type[RTE_MEMZONE_NAMESIZE];
789 uint32_t dev_caps_cfg;
790
791 bp->max_req_len = HWRM_MAX_REQ_LEN;
792 HWRM_PREP(req, VER_GET);
793
794 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
795 req.hwrm_intf_min = HWRM_VERSION_MINOR;
796 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
797
798 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
799
800 HWRM_CHECK_RESULT();
801
802 PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
803 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
804 resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
805 resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b);
806 bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) |
807 (resp->hwrm_fw_min_8b << 16) |
808 (resp->hwrm_fw_bld_8b << 8) |
809 resp->hwrm_fw_rsvd_8b;
810 PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
811 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
812
813 my_version = HWRM_VERSION_MAJOR << 16;
814 my_version |= HWRM_VERSION_MINOR << 8;
815 my_version |= HWRM_VERSION_UPDATE;
816
817 fw_version = resp->hwrm_intf_maj_8b << 16;
818 fw_version |= resp->hwrm_intf_min_8b << 8;
819 fw_version |= resp->hwrm_intf_upd_8b;
820 bp->hwrm_spec_code = fw_version;
821
822 if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
823 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
824 rc = -EINVAL;
825 goto error;
826 }
827
828 if (my_version != fw_version) {
829 PMD_DRV_LOG(INFO, "BNXT Driver/HWRM API mismatch.\n");
830 if (my_version < fw_version) {
831 PMD_DRV_LOG(INFO,
832 "Firmware API version is newer than driver.\n");
833 PMD_DRV_LOG(INFO,
834 "The driver may be missing features.\n");
835 } else {
836 PMD_DRV_LOG(INFO,
837 "Firmware API version is older than driver.\n");
838 PMD_DRV_LOG(INFO,
839 "Not all driver features may be functional.\n");
840 }
841 }
842
843 if (bp->max_req_len > resp->max_req_win_len) {
844 PMD_DRV_LOG(ERR, "Unsupported request length\n");
845 rc = -EINVAL;
846 }
847 bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
848 max_resp_len = resp->max_resp_len;
849 dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
850
851 if (bp->max_resp_len != max_resp_len) {
852 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
853 bp->pdev->addr.domain, bp->pdev->addr.bus,
854 bp->pdev->addr.devid, bp->pdev->addr.function);
855
856 rte_free(bp->hwrm_cmd_resp_addr);
857
858 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
859 if (bp->hwrm_cmd_resp_addr == NULL) {
860 rc = -ENOMEM;
861 goto error;
862 }
863 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
864 bp->hwrm_cmd_resp_dma_addr =
865 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
866 if (bp->hwrm_cmd_resp_dma_addr == 0) {
867 PMD_DRV_LOG(ERR,
868 "Unable to map response buffer to physical memory.\n");
869 rc = -ENOMEM;
870 goto error;
871 }
872 bp->max_resp_len = max_resp_len;
873 }
874
875 if ((dev_caps_cfg &
876 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
877 (dev_caps_cfg &
878 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
879 PMD_DRV_LOG(DEBUG, "Short command supported\n");
880
881 rte_free(bp->hwrm_short_cmd_req_addr);
882
883 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
884 bp->max_req_len, 0);
885 if (bp->hwrm_short_cmd_req_addr == NULL) {
886 rc = -ENOMEM;
887 goto error;
888 }
889 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
890 bp->hwrm_short_cmd_req_dma_addr =
891 rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
892 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
893 rte_free(bp->hwrm_short_cmd_req_addr);
894 PMD_DRV_LOG(ERR,
895 "Unable to map buffer to physical memory.\n");
896 rc = -ENOMEM;
897 goto error;
898 }
899
900 bp->flags |= BNXT_FLAG_SHORT_CMD;
901 }
902
903 error:
904 HWRM_UNLOCK();
905 return rc;
906 }
907
908 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
909 {
910 int rc;
911 struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
912 struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
913
914 if (!(bp->flags & BNXT_FLAG_REGISTERED))
915 return 0;
916
917 HWRM_PREP(req, FUNC_DRV_UNRGTR);
918 req.flags = flags;
919
920 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
921
922 HWRM_CHECK_RESULT();
923 HWRM_UNLOCK();
924
925 bp->flags &= ~BNXT_FLAG_REGISTERED;
926
927 return rc;
928 }
929
930 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
931 {
932 int rc = 0;
933 struct hwrm_port_phy_cfg_input req = {0};
934 struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
935 uint32_t enables = 0;
936
937 HWRM_PREP(req, PORT_PHY_CFG);
938
939 if (conf->link_up) {
940 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
941 if (bp->link_info.auto_mode && conf->link_speed) {
942 req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
943 PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
944 }
945
946 req.flags = rte_cpu_to_le_32(conf->phy_flags);
947 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
948 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
949 /*
950 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
951 * any auto mode, even "none".
952 */
953 if (!conf->link_speed) {
954 /* No speeds specified. Enable AutoNeg - all speeds */
955 req.auto_mode =
956 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
957 }
958 /* AutoNeg - Advertise speeds specified. */
959 if (conf->auto_link_speed_mask &&
960 !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
961 req.auto_mode =
962 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
963 req.auto_link_speed_mask =
964 conf->auto_link_speed_mask;
965 enables |=
966 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
967 }
968
969 req.auto_duplex = conf->duplex;
970 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
971 req.auto_pause = conf->auto_pause;
972 req.force_pause = conf->force_pause;
973 /* Set force_pause if there is no auto or if there is a force */
974 if (req.auto_pause && !req.force_pause)
975 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
976 else
977 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
978
979 req.enables = rte_cpu_to_le_32(enables);
980 } else {
981 req.flags =
982 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
983 PMD_DRV_LOG(INFO, "Force Link Down\n");
984 }
985
986 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
987
988 HWRM_CHECK_RESULT();
989 HWRM_UNLOCK();
990
991 return rc;
992 }
993
994 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
995 struct bnxt_link_info *link_info)
996 {
997 int rc = 0;
998 struct hwrm_port_phy_qcfg_input req = {0};
999 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1000
1001 HWRM_PREP(req, PORT_PHY_QCFG);
1002
1003 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1004
1005 HWRM_CHECK_RESULT();
1006
1007 link_info->phy_link_status = resp->link;
1008 link_info->link_up =
1009 (link_info->phy_link_status ==
1010 HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
1011 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
1012 link_info->duplex = resp->duplex_cfg;
1013 link_info->pause = resp->pause;
1014 link_info->auto_pause = resp->auto_pause;
1015 link_info->force_pause = resp->force_pause;
1016 link_info->auto_mode = resp->auto_mode;
1017 link_info->phy_type = resp->phy_type;
1018 link_info->media_type = resp->media_type;
1019
1020 link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
1021 link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
1022 link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
1023 link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
1024 link_info->phy_ver[0] = resp->phy_maj;
1025 link_info->phy_ver[1] = resp->phy_min;
1026 link_info->phy_ver[2] = resp->phy_bld;
1027
1028 HWRM_UNLOCK();
1029
1030 PMD_DRV_LOG(DEBUG, "Link Speed %d\n", link_info->link_speed);
1031 PMD_DRV_LOG(DEBUG, "Auto Mode %d\n", link_info->auto_mode);
1032 PMD_DRV_LOG(DEBUG, "Support Speeds %x\n", link_info->support_speeds);
1033 PMD_DRV_LOG(DEBUG, "Auto Link Speed %x\n", link_info->auto_link_speed);
1034 PMD_DRV_LOG(DEBUG, "Auto Link Speed Mask %x\n",
1035 link_info->auto_link_speed_mask);
1036 PMD_DRV_LOG(DEBUG, "Forced Link Speed %x\n",
1037 link_info->force_link_speed);
1038
1039 return rc;
1040 }
1041
1042 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
1043 {
1044 int rc = 0;
1045 struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
1046 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
1047 int i;
1048
1049 HWRM_PREP(req, QUEUE_QPORTCFG);
1050
1051 req.flags = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
1052 /* HWRM Version >= 1.9.1 */
1053 if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1)
1054 req.drv_qmap_cap =
1055 HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
1056 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1057
1058 HWRM_CHECK_RESULT();
1059
1060 #define GET_QUEUE_INFO(x) \
1061 bp->cos_queue[x].id = resp->queue_id##x; \
1062 bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
1063
1064 GET_QUEUE_INFO(0);
1065 GET_QUEUE_INFO(1);
1066 GET_QUEUE_INFO(2);
1067 GET_QUEUE_INFO(3);
1068 GET_QUEUE_INFO(4);
1069 GET_QUEUE_INFO(5);
1070 GET_QUEUE_INFO(6);
1071 GET_QUEUE_INFO(7);
1072
1073 HWRM_UNLOCK();
1074
1075 if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
1076 bp->tx_cosq_id = bp->cos_queue[0].id;
1077 } else {
1078 /* iterate and find the COSq profile to use for Tx */
1079 for (i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
1080 if (bp->cos_queue[i].profile ==
1081 HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
1082 bp->tx_cosq_id = bp->cos_queue[i].id;
1083 break;
1084 }
1085 }
1086 }
1087 PMD_DRV_LOG(DEBUG, "Tx Cos Queue to use: %d\n", bp->tx_cosq_id);
1088
1089 return rc;
1090 }
1091
1092 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
1093 struct bnxt_ring *ring,
1094 uint32_t ring_type, uint32_t map_index,
1095 uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
1096 {
1097 int rc = 0;
1098 uint32_t enables = 0;
1099 struct hwrm_ring_alloc_input req = {.req_type = 0 };
1100 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1101
1102 HWRM_PREP(req, RING_ALLOC);
1103
1104 req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
1105 req.fbo = rte_cpu_to_le_32(0);
1106 /* Association of ring index with doorbell index */
1107 req.logical_id = rte_cpu_to_le_16(map_index);
1108 req.length = rte_cpu_to_le_32(ring->ring_size);
1109
1110 switch (ring_type) {
1111 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1112 req.queue_id = rte_cpu_to_le_16(bp->tx_cosq_id);
1113 /* FALLTHROUGH */
1114 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1115 req.ring_type = ring_type;
1116 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1117 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
1118 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1119 enables |=
1120 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1121 break;
1122 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1123 req.ring_type = ring_type;
1124 /*
1125 * TODO: Some HWRM versions crash with
1126 * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
1127 */
1128 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1129 break;
1130 default:
1131 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
1132 ring_type);
1133 HWRM_UNLOCK();
1134 return -1;
1135 }
1136 req.enables = rte_cpu_to_le_32(enables);
1137
1138 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1139
1140 if (rc || resp->error_code) {
1141 if (rc == 0 && resp->error_code)
1142 rc = rte_le_to_cpu_16(resp->error_code);
1143 switch (ring_type) {
1144 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1145 PMD_DRV_LOG(ERR,
1146 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
1147 HWRM_UNLOCK();
1148 return rc;
1149 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1150 PMD_DRV_LOG(ERR,
1151 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1152 HWRM_UNLOCK();
1153 return rc;
1154 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1155 PMD_DRV_LOG(ERR,
1156 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1157 HWRM_UNLOCK();
1158 return rc;
1159 default:
1160 PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1161 HWRM_UNLOCK();
1162 return rc;
1163 }
1164 }
1165
1166 ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1167 HWRM_UNLOCK();
1168 return rc;
1169 }
1170
1171 int bnxt_hwrm_ring_free(struct bnxt *bp,
1172 struct bnxt_ring *ring, uint32_t ring_type)
1173 {
1174 int rc;
1175 struct hwrm_ring_free_input req = {.req_type = 0 };
1176 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1177
1178 HWRM_PREP(req, RING_FREE);
1179
1180 req.ring_type = ring_type;
1181 req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1182
1183 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1184
1185 if (rc || resp->error_code) {
1186 if (rc == 0 && resp->error_code)
1187 rc = rte_le_to_cpu_16(resp->error_code);
1188 HWRM_UNLOCK();
1189
1190 switch (ring_type) {
1191 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1192 PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1193 rc);
1194 return rc;
1195 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1196 PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1197 rc);
1198 return rc;
1199 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1200 PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1201 rc);
1202 return rc;
1203 default:
1204 PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1205 return rc;
1206 }
1207 }
1208 HWRM_UNLOCK();
1209 return 0;
1210 }
1211
1212 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1213 {
1214 int rc = 0;
1215 struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1216 struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1217
1218 HWRM_PREP(req, RING_GRP_ALLOC);
1219
1220 req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1221 req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1222 req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1223 req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1224
1225 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1226
1227 HWRM_CHECK_RESULT();
1228
1229 bp->grp_info[idx].fw_grp_id =
1230 rte_le_to_cpu_16(resp->ring_group_id);
1231
1232 HWRM_UNLOCK();
1233
1234 return rc;
1235 }
1236
1237 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1238 {
1239 int rc;
1240 struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1241 struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1242
1243 HWRM_PREP(req, RING_GRP_FREE);
1244
1245 req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1246
1247 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1248
1249 HWRM_CHECK_RESULT();
1250 HWRM_UNLOCK();
1251
1252 bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1253 return rc;
1254 }
1255
1256 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1257 {
1258 int rc = 0;
1259 struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1260 struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1261
1262 if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1263 return rc;
1264
1265 HWRM_PREP(req, STAT_CTX_CLR_STATS);
1266
1267 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1268
1269 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1270
1271 HWRM_CHECK_RESULT();
1272 HWRM_UNLOCK();
1273
1274 return rc;
1275 }
1276
1277 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1278 unsigned int idx __rte_unused)
1279 {
1280 int rc;
1281 struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1282 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1283
1284 HWRM_PREP(req, STAT_CTX_ALLOC);
1285
1286 req.update_period_ms = rte_cpu_to_le_32(0);
1287
1288 req.stats_dma_addr =
1289 rte_cpu_to_le_64(cpr->hw_stats_map);
1290
1291 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1292
1293 HWRM_CHECK_RESULT();
1294
1295 cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
1296
1297 HWRM_UNLOCK();
1298
1299 return rc;
1300 }
1301
1302 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1303 unsigned int idx __rte_unused)
1304 {
1305 int rc;
1306 struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1307 struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1308
1309 HWRM_PREP(req, STAT_CTX_FREE);
1310
1311 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1312
1313 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1314
1315 HWRM_CHECK_RESULT();
1316 HWRM_UNLOCK();
1317
1318 return rc;
1319 }
1320
1321 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1322 {
1323 int rc = 0, i, j;
1324 struct hwrm_vnic_alloc_input req = { 0 };
1325 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1326
1327 /* map ring groups to this vnic */
1328 PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1329 vnic->start_grp_id, vnic->end_grp_id);
1330 for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++)
1331 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1332
1333 vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1334 vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1335 vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1336 vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1337 vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1338 ETHER_CRC_LEN + VLAN_TAG_SIZE;
1339 HWRM_PREP(req, VNIC_ALLOC);
1340
1341 if (vnic->func_default)
1342 req.flags =
1343 rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1344 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1345
1346 HWRM_CHECK_RESULT();
1347
1348 vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1349 HWRM_UNLOCK();
1350 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1351 return rc;
1352 }
1353
1354 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1355 struct bnxt_vnic_info *vnic,
1356 struct bnxt_plcmodes_cfg *pmode)
1357 {
1358 int rc = 0;
1359 struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1360 struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1361
1362 HWRM_PREP(req, VNIC_PLCMODES_QCFG);
1363
1364 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1365
1366 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1367
1368 HWRM_CHECK_RESULT();
1369
1370 pmode->flags = rte_le_to_cpu_32(resp->flags);
1371 /* dflt_vnic bit doesn't exist in the _cfg command */
1372 pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1373 pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1374 pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1375 pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1376
1377 HWRM_UNLOCK();
1378
1379 return rc;
1380 }
1381
1382 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1383 struct bnxt_vnic_info *vnic,
1384 struct bnxt_plcmodes_cfg *pmode)
1385 {
1386 int rc = 0;
1387 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1388 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1389
1390 HWRM_PREP(req, VNIC_PLCMODES_CFG);
1391
1392 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1393 req.flags = rte_cpu_to_le_32(pmode->flags);
1394 req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1395 req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1396 req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1397 req.enables = rte_cpu_to_le_32(
1398 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1399 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1400 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1401 );
1402
1403 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1404
1405 HWRM_CHECK_RESULT();
1406 HWRM_UNLOCK();
1407
1408 return rc;
1409 }
1410
1411 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1412 {
1413 int rc = 0;
1414 struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1415 struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1416 uint32_t ctx_enable_flag = 0;
1417 struct bnxt_plcmodes_cfg pmodes;
1418
1419 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1420 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1421 return rc;
1422 }
1423
1424 rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1425 if (rc)
1426 return rc;
1427
1428 HWRM_PREP(req, VNIC_CFG);
1429
1430 /* Only RSS support for now TBD: COS & LB */
1431 req.enables =
1432 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);
1433 if (vnic->lb_rule != 0xffff)
1434 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1435 if (vnic->cos_rule != 0xffff)
1436 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1437 if (vnic->rss_rule != 0xffff) {
1438 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1439 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1440 }
1441 req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1442 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1443 req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1444 req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1445 req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1446 req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1447 req.mru = rte_cpu_to_le_16(vnic->mru);
1448 if (vnic->func_default)
1449 req.flags |=
1450 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1451 if (vnic->vlan_strip)
1452 req.flags |=
1453 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1454 if (vnic->bd_stall)
1455 req.flags |=
1456 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1457 if (vnic->roce_dual)
1458 req.flags |= rte_cpu_to_le_32(
1459 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1460 if (vnic->roce_only)
1461 req.flags |= rte_cpu_to_le_32(
1462 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1463 if (vnic->rss_dflt_cr)
1464 req.flags |= rte_cpu_to_le_32(
1465 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1466
1467 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1468
1469 HWRM_CHECK_RESULT();
1470 HWRM_UNLOCK();
1471
1472 rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1473
1474 return rc;
1475 }
1476
1477 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1478 int16_t fw_vf_id)
1479 {
1480 int rc = 0;
1481 struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1482 struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1483
1484 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1485 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1486 return rc;
1487 }
1488 HWRM_PREP(req, VNIC_QCFG);
1489
1490 req.enables =
1491 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1492 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1493 req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1494
1495 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1496
1497 HWRM_CHECK_RESULT();
1498
1499 vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1500 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1501 vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1502 vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1503 vnic->mru = rte_le_to_cpu_16(resp->mru);
1504 vnic->func_default = rte_le_to_cpu_32(
1505 resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1506 vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1507 HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1508 vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1509 HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1510 vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1511 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1512 vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1513 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1514 vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1515 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1516
1517 HWRM_UNLOCK();
1518
1519 return rc;
1520 }
1521
1522 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1523 {
1524 int rc = 0;
1525 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1526 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1527 bp->hwrm_cmd_resp_addr;
1528
1529 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC);
1530
1531 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1532
1533 HWRM_CHECK_RESULT();
1534
1535 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1536 HWRM_UNLOCK();
1537 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1538
1539 return rc;
1540 }
1541
1542 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1543 {
1544 int rc = 0;
1545 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1546 struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1547 bp->hwrm_cmd_resp_addr;
1548
1549 if (vnic->rss_rule == 0xffff) {
1550 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1551 return rc;
1552 }
1553 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
1554
1555 req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1556
1557 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1558
1559 HWRM_CHECK_RESULT();
1560 HWRM_UNLOCK();
1561
1562 vnic->rss_rule = INVALID_HW_RING_ID;
1563
1564 return rc;
1565 }
1566
1567 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1568 {
1569 int rc = 0;
1570 struct hwrm_vnic_free_input req = {.req_type = 0 };
1571 struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1572
1573 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1574 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1575 return rc;
1576 }
1577
1578 HWRM_PREP(req, VNIC_FREE);
1579
1580 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1581
1582 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1583
1584 HWRM_CHECK_RESULT();
1585 HWRM_UNLOCK();
1586
1587 vnic->fw_vnic_id = INVALID_HW_RING_ID;
1588 return rc;
1589 }
1590
1591 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1592 struct bnxt_vnic_info *vnic)
1593 {
1594 int rc = 0;
1595 struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1596 struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1597
1598 HWRM_PREP(req, VNIC_RSS_CFG);
1599
1600 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1601 req.hash_mode_flags = vnic->hash_mode;
1602
1603 req.ring_grp_tbl_addr =
1604 rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1605 req.hash_key_tbl_addr =
1606 rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1607 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1608
1609 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1610
1611 HWRM_CHECK_RESULT();
1612 HWRM_UNLOCK();
1613
1614 return rc;
1615 }
1616
1617 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1618 struct bnxt_vnic_info *vnic)
1619 {
1620 int rc = 0;
1621 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1622 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1623 uint16_t size;
1624
1625 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1626 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1627 return rc;
1628 }
1629
1630 HWRM_PREP(req, VNIC_PLCMODES_CFG);
1631
1632 req.flags = rte_cpu_to_le_32(
1633 HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1634
1635 req.enables = rte_cpu_to_le_32(
1636 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1637
1638 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1639 size -= RTE_PKTMBUF_HEADROOM;
1640
1641 req.jumbo_thresh = rte_cpu_to_le_16(size);
1642 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1643
1644 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1645
1646 HWRM_CHECK_RESULT();
1647 HWRM_UNLOCK();
1648
1649 return rc;
1650 }
1651
1652 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1653 struct bnxt_vnic_info *vnic, bool enable)
1654 {
1655 int rc = 0;
1656 struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1657 struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1658
1659 HWRM_PREP(req, VNIC_TPA_CFG);
1660
1661 if (enable) {
1662 req.enables = rte_cpu_to_le_32(
1663 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1664 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1665 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1666 req.flags = rte_cpu_to_le_32(
1667 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1668 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1669 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1670 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1671 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1672 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1673 req.max_agg_segs = rte_cpu_to_le_16(5);
1674 req.max_aggs =
1675 rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1676 req.min_agg_len = rte_cpu_to_le_32(512);
1677 }
1678 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1679
1680 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1681
1682 HWRM_CHECK_RESULT();
1683 HWRM_UNLOCK();
1684
1685 return rc;
1686 }
1687
1688 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1689 {
1690 struct hwrm_func_cfg_input req = {0};
1691 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1692 int rc;
1693
1694 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1695 req.enables = rte_cpu_to_le_32(
1696 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1697 memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1698 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1699
1700 HWRM_PREP(req, FUNC_CFG);
1701
1702 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1703 HWRM_CHECK_RESULT();
1704 HWRM_UNLOCK();
1705
1706 bp->pf.vf_info[vf].random_mac = false;
1707
1708 return rc;
1709 }
1710
1711 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1712 uint64_t *dropped)
1713 {
1714 int rc = 0;
1715 struct hwrm_func_qstats_input req = {.req_type = 0};
1716 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1717
1718 HWRM_PREP(req, FUNC_QSTATS);
1719
1720 req.fid = rte_cpu_to_le_16(fid);
1721
1722 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1723
1724 HWRM_CHECK_RESULT();
1725
1726 if (dropped)
1727 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1728
1729 HWRM_UNLOCK();
1730
1731 return rc;
1732 }
1733
1734 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1735 struct rte_eth_stats *stats)
1736 {
1737 int rc = 0;
1738 struct hwrm_func_qstats_input req = {.req_type = 0};
1739 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1740
1741 HWRM_PREP(req, FUNC_QSTATS);
1742
1743 req.fid = rte_cpu_to_le_16(fid);
1744
1745 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1746
1747 HWRM_CHECK_RESULT();
1748
1749 stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1750 stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1751 stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1752 stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1753 stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1754 stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1755
1756 stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1757 stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1758 stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1759 stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1760 stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1761 stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1762
1763 stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts);
1764 stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
1765 stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
1766
1767 HWRM_UNLOCK();
1768
1769 return rc;
1770 }
1771
1772 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1773 {
1774 int rc = 0;
1775 struct hwrm_func_clr_stats_input req = {.req_type = 0};
1776 struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1777
1778 HWRM_PREP(req, FUNC_CLR_STATS);
1779
1780 req.fid = rte_cpu_to_le_16(fid);
1781
1782 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1783
1784 HWRM_CHECK_RESULT();
1785 HWRM_UNLOCK();
1786
1787 return rc;
1788 }
1789
1790 /*
1791 * HWRM utility functions
1792 */
1793
1794 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1795 {
1796 unsigned int i;
1797 int rc = 0;
1798
1799 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1800 struct bnxt_tx_queue *txq;
1801 struct bnxt_rx_queue *rxq;
1802 struct bnxt_cp_ring_info *cpr;
1803
1804 if (i >= bp->rx_cp_nr_rings) {
1805 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1806 cpr = txq->cp_ring;
1807 } else {
1808 rxq = bp->rx_queues[i];
1809 cpr = rxq->cp_ring;
1810 }
1811
1812 rc = bnxt_hwrm_stat_clear(bp, cpr);
1813 if (rc)
1814 return rc;
1815 }
1816 return 0;
1817 }
1818
1819 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1820 {
1821 int rc;
1822 unsigned int i;
1823 struct bnxt_cp_ring_info *cpr;
1824
1825 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1826
1827 if (i >= bp->rx_cp_nr_rings) {
1828 cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1829 } else {
1830 cpr = bp->rx_queues[i]->cp_ring;
1831 bp->grp_info[i].fw_stats_ctx = -1;
1832 }
1833 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1834 rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1835 cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1836 if (rc)
1837 return rc;
1838 }
1839 }
1840 return 0;
1841 }
1842
1843 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1844 {
1845 unsigned int i;
1846 int rc = 0;
1847
1848 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1849 struct bnxt_tx_queue *txq;
1850 struct bnxt_rx_queue *rxq;
1851 struct bnxt_cp_ring_info *cpr;
1852
1853 if (i >= bp->rx_cp_nr_rings) {
1854 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1855 cpr = txq->cp_ring;
1856 } else {
1857 rxq = bp->rx_queues[i];
1858 cpr = rxq->cp_ring;
1859 }
1860
1861 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1862
1863 if (rc)
1864 return rc;
1865 }
1866 return rc;
1867 }
1868
1869 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1870 {
1871 uint16_t idx;
1872 uint32_t rc = 0;
1873
1874 for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1875
1876 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
1877 continue;
1878
1879 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1880
1881 if (rc)
1882 return rc;
1883 }
1884 return rc;
1885 }
1886
1887 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1888 {
1889 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1890
1891 bnxt_hwrm_ring_free(bp, cp_ring,
1892 HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1893 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1894 memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1895 sizeof(*cpr->cp_desc_ring));
1896 cpr->cp_raw_cons = 0;
1897 }
1898
1899 void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
1900 {
1901 struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
1902 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1903 struct bnxt_ring *ring = rxr->rx_ring_struct;
1904 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1905
1906 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1907 bnxt_hwrm_ring_free(bp, ring,
1908 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1909 ring->fw_ring_id = INVALID_HW_RING_ID;
1910 bp->grp_info[queue_index].rx_fw_ring_id = INVALID_HW_RING_ID;
1911 memset(rxr->rx_desc_ring, 0,
1912 rxr->rx_ring_struct->ring_size *
1913 sizeof(*rxr->rx_desc_ring));
1914 memset(rxr->rx_buf_ring, 0,
1915 rxr->rx_ring_struct->ring_size *
1916 sizeof(*rxr->rx_buf_ring));
1917 rxr->rx_prod = 0;
1918 }
1919 ring = rxr->ag_ring_struct;
1920 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1921 bnxt_hwrm_ring_free(bp, ring,
1922 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1923 ring->fw_ring_id = INVALID_HW_RING_ID;
1924 memset(rxr->ag_buf_ring, 0,
1925 rxr->ag_ring_struct->ring_size *
1926 sizeof(*rxr->ag_buf_ring));
1927 rxr->ag_prod = 0;
1928 bp->grp_info[queue_index].ag_fw_ring_id = INVALID_HW_RING_ID;
1929 }
1930 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1931 bnxt_free_cp_ring(bp, cpr);
1932
1933 bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
1934 }
1935
1936 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1937 {
1938 unsigned int i;
1939
1940 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1941 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1942 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1943 struct bnxt_ring *ring = txr->tx_ring_struct;
1944 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1945
1946 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1947 bnxt_hwrm_ring_free(bp, ring,
1948 HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1949 ring->fw_ring_id = INVALID_HW_RING_ID;
1950 memset(txr->tx_desc_ring, 0,
1951 txr->tx_ring_struct->ring_size *
1952 sizeof(*txr->tx_desc_ring));
1953 memset(txr->tx_buf_ring, 0,
1954 txr->tx_ring_struct->ring_size *
1955 sizeof(*txr->tx_buf_ring));
1956 txr->tx_prod = 0;
1957 txr->tx_cons = 0;
1958 }
1959 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1960 bnxt_free_cp_ring(bp, cpr);
1961 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1962 }
1963 }
1964
1965 for (i = 0; i < bp->rx_cp_nr_rings; i++)
1966 bnxt_free_hwrm_rx_ring(bp, i);
1967
1968 return 0;
1969 }
1970
1971 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1972 {
1973 uint16_t i;
1974 uint32_t rc = 0;
1975
1976 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1977 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1978 if (rc)
1979 return rc;
1980 }
1981 return rc;
1982 }
1983
1984 void bnxt_free_hwrm_resources(struct bnxt *bp)
1985 {
1986 /* Release memzone */
1987 rte_free(bp->hwrm_cmd_resp_addr);
1988 rte_free(bp->hwrm_short_cmd_req_addr);
1989 bp->hwrm_cmd_resp_addr = NULL;
1990 bp->hwrm_short_cmd_req_addr = NULL;
1991 bp->hwrm_cmd_resp_dma_addr = 0;
1992 bp->hwrm_short_cmd_req_dma_addr = 0;
1993 }
1994
1995 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1996 {
1997 struct rte_pci_device *pdev = bp->pdev;
1998 char type[RTE_MEMZONE_NAMESIZE];
1999
2000 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
2001 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
2002 bp->max_resp_len = HWRM_MAX_RESP_LEN;
2003 bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
2004 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
2005 if (bp->hwrm_cmd_resp_addr == NULL)
2006 return -ENOMEM;
2007 bp->hwrm_cmd_resp_dma_addr =
2008 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
2009 if (bp->hwrm_cmd_resp_dma_addr == 0) {
2010 PMD_DRV_LOG(ERR,
2011 "unable to map response address to physical memory\n");
2012 return -ENOMEM;
2013 }
2014 rte_spinlock_init(&bp->hwrm_lock);
2015
2016 return 0;
2017 }
2018
2019 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2020 {
2021 struct bnxt_filter_info *filter;
2022 int rc = 0;
2023
2024 STAILQ_FOREACH(filter, &vnic->filter, next) {
2025 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2026 rc = bnxt_hwrm_clear_em_filter(bp, filter);
2027 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2028 rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2029 else
2030 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2031 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
2032 //if (rc)
2033 //break;
2034 }
2035 return rc;
2036 }
2037
2038 static int
2039 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2040 {
2041 struct bnxt_filter_info *filter;
2042 struct rte_flow *flow;
2043 int rc = 0;
2044
2045 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
2046 filter = flow->filter;
2047 PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type);
2048 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2049 rc = bnxt_hwrm_clear_em_filter(bp, filter);
2050 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2051 rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2052 else
2053 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2054
2055 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
2056 rte_free(flow);
2057 //if (rc)
2058 //break;
2059 }
2060 return rc;
2061 }
2062
2063 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2064 {
2065 struct bnxt_filter_info *filter;
2066 int rc = 0;
2067
2068 STAILQ_FOREACH(filter, &vnic->filter, next) {
2069 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2070 rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
2071 filter);
2072 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2073 rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
2074 filter);
2075 else
2076 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
2077 filter);
2078 if (rc)
2079 break;
2080 }
2081 return rc;
2082 }
2083
2084 void bnxt_free_tunnel_ports(struct bnxt *bp)
2085 {
2086 if (bp->vxlan_port_cnt)
2087 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
2088 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
2089 bp->vxlan_port = 0;
2090 if (bp->geneve_port_cnt)
2091 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
2092 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
2093 bp->geneve_port = 0;
2094 }
2095
2096 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
2097 {
2098 int i;
2099
2100 if (bp->vnic_info == NULL)
2101 return;
2102
2103 /*
2104 * Cleanup VNICs in reverse order, to make sure the L2 filter
2105 * from vnic0 is last to be cleaned up.
2106 */
2107 for (i = bp->nr_vnics - 1; i >= 0; i--) {
2108 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2109
2110 bnxt_clear_hwrm_vnic_flows(bp, vnic);
2111
2112 bnxt_clear_hwrm_vnic_filters(bp, vnic);
2113
2114 bnxt_hwrm_vnic_ctx_free(bp, vnic);
2115
2116 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
2117
2118 bnxt_hwrm_vnic_free(bp, vnic);
2119
2120 rte_free(vnic->fw_grp_ids);
2121 }
2122 /* Ring resources */
2123 bnxt_free_all_hwrm_rings(bp);
2124 bnxt_free_all_hwrm_ring_grps(bp);
2125 bnxt_free_all_hwrm_stat_ctxs(bp);
2126 bnxt_free_tunnel_ports(bp);
2127 }
2128
2129 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
2130 {
2131 uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2132
2133 if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
2134 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2135
2136 switch (conf_link_speed) {
2137 case ETH_LINK_SPEED_10M_HD:
2138 case ETH_LINK_SPEED_100M_HD:
2139 /* FALLTHROUGH */
2140 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
2141 }
2142 return hw_link_duplex;
2143 }
2144
2145 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
2146 {
2147 return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
2148 }
2149
2150 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
2151 {
2152 uint16_t eth_link_speed = 0;
2153
2154 if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
2155 return ETH_LINK_SPEED_AUTONEG;
2156
2157 switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2158 case ETH_LINK_SPEED_100M:
2159 case ETH_LINK_SPEED_100M_HD:
2160 /* FALLTHROUGH */
2161 eth_link_speed =
2162 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2163 break;
2164 case ETH_LINK_SPEED_1G:
2165 eth_link_speed =
2166 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2167 break;
2168 case ETH_LINK_SPEED_2_5G:
2169 eth_link_speed =
2170 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2171 break;
2172 case ETH_LINK_SPEED_10G:
2173 eth_link_speed =
2174 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2175 break;
2176 case ETH_LINK_SPEED_20G:
2177 eth_link_speed =
2178 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2179 break;
2180 case ETH_LINK_SPEED_25G:
2181 eth_link_speed =
2182 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2183 break;
2184 case ETH_LINK_SPEED_40G:
2185 eth_link_speed =
2186 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2187 break;
2188 case ETH_LINK_SPEED_50G:
2189 eth_link_speed =
2190 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2191 break;
2192 case ETH_LINK_SPEED_100G:
2193 eth_link_speed =
2194 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2195 break;
2196 default:
2197 PMD_DRV_LOG(ERR,
2198 "Unsupported link speed %d; default to AUTO\n",
2199 conf_link_speed);
2200 break;
2201 }
2202 return eth_link_speed;
2203 }
2204
2205 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2206 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2207 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2208 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G)
2209
2210 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
2211 {
2212 uint32_t one_speed;
2213
2214 if (link_speed == ETH_LINK_SPEED_AUTONEG)
2215 return 0;
2216
2217 if (link_speed & ETH_LINK_SPEED_FIXED) {
2218 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2219
2220 if (one_speed & (one_speed - 1)) {
2221 PMD_DRV_LOG(ERR,
2222 "Invalid advertised speeds (%u) for port %u\n",
2223 link_speed, port_id);
2224 return -EINVAL;
2225 }
2226 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
2227 PMD_DRV_LOG(ERR,
2228 "Unsupported advertised speed (%u) for port %u\n",
2229 link_speed, port_id);
2230 return -EINVAL;
2231 }
2232 } else {
2233 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
2234 PMD_DRV_LOG(ERR,
2235 "Unsupported advertised speeds (%u) for port %u\n",
2236 link_speed, port_id);
2237 return -EINVAL;
2238 }
2239 }
2240 return 0;
2241 }
2242
2243 static uint16_t
2244 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2245 {
2246 uint16_t ret = 0;
2247
2248 if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2249 if (bp->link_info.support_speeds)
2250 return bp->link_info.support_speeds;
2251 link_speed = BNXT_SUPPORTED_SPEEDS;
2252 }
2253
2254 if (link_speed & ETH_LINK_SPEED_100M)
2255 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2256 if (link_speed & ETH_LINK_SPEED_100M_HD)
2257 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2258 if (link_speed & ETH_LINK_SPEED_1G)
2259 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2260 if (link_speed & ETH_LINK_SPEED_2_5G)
2261 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2262 if (link_speed & ETH_LINK_SPEED_10G)
2263 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2264 if (link_speed & ETH_LINK_SPEED_20G)
2265 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2266 if (link_speed & ETH_LINK_SPEED_25G)
2267 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2268 if (link_speed & ETH_LINK_SPEED_40G)
2269 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2270 if (link_speed & ETH_LINK_SPEED_50G)
2271 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2272 if (link_speed & ETH_LINK_SPEED_100G)
2273 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
2274 return ret;
2275 }
2276
2277 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2278 {
2279 uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2280
2281 switch (hw_link_speed) {
2282 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2283 eth_link_speed = ETH_SPEED_NUM_100M;
2284 break;
2285 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2286 eth_link_speed = ETH_SPEED_NUM_1G;
2287 break;
2288 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2289 eth_link_speed = ETH_SPEED_NUM_2_5G;
2290 break;
2291 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2292 eth_link_speed = ETH_SPEED_NUM_10G;
2293 break;
2294 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2295 eth_link_speed = ETH_SPEED_NUM_20G;
2296 break;
2297 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2298 eth_link_speed = ETH_SPEED_NUM_25G;
2299 break;
2300 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2301 eth_link_speed = ETH_SPEED_NUM_40G;
2302 break;
2303 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2304 eth_link_speed = ETH_SPEED_NUM_50G;
2305 break;
2306 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
2307 eth_link_speed = ETH_SPEED_NUM_100G;
2308 break;
2309 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2310 default:
2311 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
2312 hw_link_speed);
2313 break;
2314 }
2315 return eth_link_speed;
2316 }
2317
2318 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2319 {
2320 uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2321
2322 switch (hw_link_duplex) {
2323 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2324 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2325 /* FALLTHROUGH */
2326 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2327 break;
2328 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2329 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2330 break;
2331 default:
2332 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
2333 hw_link_duplex);
2334 break;
2335 }
2336 return eth_link_duplex;
2337 }
2338
2339 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2340 {
2341 int rc = 0;
2342 struct bnxt_link_info *link_info = &bp->link_info;
2343
2344 rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2345 if (rc) {
2346 PMD_DRV_LOG(ERR,
2347 "Get link config failed with rc %d\n", rc);
2348 goto exit;
2349 }
2350 if (link_info->link_speed)
2351 link->link_speed =
2352 bnxt_parse_hw_link_speed(link_info->link_speed);
2353 else
2354 link->link_speed = ETH_SPEED_NUM_NONE;
2355 link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2356 link->link_status = link_info->link_up;
2357 link->link_autoneg = link_info->auto_mode ==
2358 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2359 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2360 exit:
2361 return rc;
2362 }
2363
2364 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2365 {
2366 int rc = 0;
2367 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2368 struct bnxt_link_info link_req;
2369 uint16_t speed, autoneg;
2370
2371 if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
2372 return 0;
2373
2374 rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2375 bp->eth_dev->data->port_id);
2376 if (rc)
2377 goto error;
2378
2379 memset(&link_req, 0, sizeof(link_req));
2380 link_req.link_up = link_up;
2381 if (!link_up)
2382 goto port_phy_cfg;
2383
2384 autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2385 speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2386 link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2387 /* Autoneg can be done only when the FW allows */
2388 if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
2389 bp->link_info.force_link_speed)) {
2390 link_req.phy_flags |=
2391 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2392 link_req.auto_link_speed_mask =
2393 bnxt_parse_eth_link_speed_mask(bp,
2394 dev_conf->link_speeds);
2395 } else {
2396 if (bp->link_info.phy_type ==
2397 HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2398 bp->link_info.phy_type ==
2399 HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2400 bp->link_info.media_type ==
2401 HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2402 PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
2403 return -EINVAL;
2404 }
2405
2406 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2407 /* If user wants a particular speed try that first. */
2408 if (speed)
2409 link_req.link_speed = speed;
2410 else if (bp->link_info.force_link_speed)
2411 link_req.link_speed = bp->link_info.force_link_speed;
2412 else
2413 link_req.link_speed = bp->link_info.auto_link_speed;
2414 }
2415 link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2416 link_req.auto_pause = bp->link_info.auto_pause;
2417 link_req.force_pause = bp->link_info.force_pause;
2418
2419 port_phy_cfg:
2420 rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2421 if (rc) {
2422 PMD_DRV_LOG(ERR,
2423 "Set link config failed with rc %d\n", rc);
2424 }
2425
2426 error:
2427 return rc;
2428 }
2429
2430 /* JIRA 22088 */
2431 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2432 {
2433 struct hwrm_func_qcfg_input req = {0};
2434 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2435 uint16_t flags;
2436 int rc = 0;
2437
2438 HWRM_PREP(req, FUNC_QCFG);
2439 req.fid = rte_cpu_to_le_16(0xffff);
2440
2441 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2442
2443 HWRM_CHECK_RESULT();
2444
2445 /* Hard Coded.. 0xfff VLAN ID mask */
2446 bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2447 flags = rte_le_to_cpu_16(resp->flags);
2448 if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
2449 bp->flags |= BNXT_FLAG_MULTI_HOST;
2450
2451 switch (resp->port_partition_type) {
2452 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2453 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2454 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2455 /* FALLTHROUGH */
2456 bp->port_partition_type = resp->port_partition_type;
2457 break;
2458 default:
2459 bp->port_partition_type = 0;
2460 break;
2461 }
2462
2463 HWRM_UNLOCK();
2464
2465 return rc;
2466 }
2467
2468 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2469 struct hwrm_func_qcaps_output *qcaps)
2470 {
2471 qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2472 memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2473 sizeof(qcaps->mac_address));
2474 qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2475 qcaps->max_rx_rings = fcfg->num_rx_rings;
2476 qcaps->max_tx_rings = fcfg->num_tx_rings;
2477 qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2478 qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2479 qcaps->max_vfs = 0;
2480 qcaps->first_vf_id = 0;
2481 qcaps->max_vnics = fcfg->num_vnics;
2482 qcaps->max_decap_records = 0;
2483 qcaps->max_encap_records = 0;
2484 qcaps->max_tx_wm_flows = 0;
2485 qcaps->max_tx_em_flows = 0;
2486 qcaps->max_rx_wm_flows = 0;
2487 qcaps->max_rx_em_flows = 0;
2488 qcaps->max_flow_id = 0;
2489 qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2490 qcaps->max_sp_tx_rings = 0;
2491 qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2492 }
2493
2494 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2495 {
2496 struct hwrm_func_cfg_input req = {0};
2497 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2498 int rc;
2499
2500 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2501 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2502 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2503 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2504 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2505 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2506 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2507 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2508 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2509 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2510 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2511 req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2512 req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2513 ETHER_CRC_LEN + VLAN_TAG_SIZE *
2514 BNXT_NUM_VLANS);
2515 req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2516 req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2517 req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2518 req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2519 req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2520 req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2521 req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2522 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2523 req.fid = rte_cpu_to_le_16(0xffff);
2524
2525 HWRM_PREP(req, FUNC_CFG);
2526
2527 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2528
2529 HWRM_CHECK_RESULT();
2530 HWRM_UNLOCK();
2531
2532 return rc;
2533 }
2534
2535 static void populate_vf_func_cfg_req(struct bnxt *bp,
2536 struct hwrm_func_cfg_input *req,
2537 int num_vfs)
2538 {
2539 req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2540 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2541 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2542 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2543 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2544 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2545 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2546 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2547 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2548 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2549
2550 req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2551 ETHER_CRC_LEN + VLAN_TAG_SIZE *
2552 BNXT_NUM_VLANS);
2553 req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2554 ETHER_CRC_LEN + VLAN_TAG_SIZE *
2555 BNXT_NUM_VLANS);
2556 req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2557 (num_vfs + 1));
2558 req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2559 req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2560 (num_vfs + 1));
2561 req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2562 req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2563 req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2564 /* TODO: For now, do not support VMDq/RFS on VFs. */
2565 req->num_vnics = rte_cpu_to_le_16(1);
2566 req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2567 (num_vfs + 1));
2568 }
2569
2570 static void add_random_mac_if_needed(struct bnxt *bp,
2571 struct hwrm_func_cfg_input *cfg_req,
2572 int vf)
2573 {
2574 struct ether_addr mac;
2575
2576 if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2577 return;
2578
2579 if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2580 cfg_req->enables |=
2581 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2582 eth_random_addr(cfg_req->dflt_mac_addr);
2583 bp->pf.vf_info[vf].random_mac = true;
2584 } else {
2585 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2586 }
2587 }
2588
2589 static void reserve_resources_from_vf(struct bnxt *bp,
2590 struct hwrm_func_cfg_input *cfg_req,
2591 int vf)
2592 {
2593 struct hwrm_func_qcaps_input req = {0};
2594 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2595 int rc;
2596
2597 /* Get the actual allocated values now */
2598 HWRM_PREP(req, FUNC_QCAPS);
2599 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2600 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2601
2602 if (rc) {
2603 PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
2604 copy_func_cfg_to_qcaps(cfg_req, resp);
2605 } else if (resp->error_code) {
2606 rc = rte_le_to_cpu_16(resp->error_code);
2607 PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
2608 copy_func_cfg_to_qcaps(cfg_req, resp);
2609 }
2610
2611 bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2612 bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2613 bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2614 bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2615 bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2616 bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2617 /*
2618 * TODO: While not supporting VMDq with VFs, max_vnics is always
2619 * forced to 1 in this case
2620 */
2621 //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2622 bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2623
2624 HWRM_UNLOCK();
2625 }
2626
2627 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2628 {
2629 struct hwrm_func_qcfg_input req = {0};
2630 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2631 int rc;
2632
2633 /* Check for zero MAC address */
2634 HWRM_PREP(req, FUNC_QCFG);
2635 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2636 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2637 if (rc) {
2638 PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc);
2639 return -1;
2640 } else if (resp->error_code) {
2641 rc = rte_le_to_cpu_16(resp->error_code);
2642 PMD_DRV_LOG(ERR, "hwrm_func_qcfg error %d\n", rc);
2643 return -1;
2644 }
2645 rc = rte_le_to_cpu_16(resp->vlan);
2646
2647 HWRM_UNLOCK();
2648
2649 return rc;
2650 }
2651
2652 static int update_pf_resource_max(struct bnxt *bp)
2653 {
2654 struct hwrm_func_qcfg_input req = {0};
2655 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2656 int rc;
2657
2658 /* And copy the allocated numbers into the pf struct */
2659 HWRM_PREP(req, FUNC_QCFG);
2660 req.fid = rte_cpu_to_le_16(0xffff);
2661 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2662 HWRM_CHECK_RESULT();
2663
2664 /* Only TX ring value reflects actual allocation? TODO */
2665 bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2666 bp->pf.evb_mode = resp->evb_mode;
2667
2668 HWRM_UNLOCK();
2669
2670 return rc;
2671 }
2672
2673 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2674 {
2675 int rc;
2676
2677 if (!BNXT_PF(bp)) {
2678 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2679 return -1;
2680 }
2681
2682 rc = bnxt_hwrm_func_qcaps(bp);
2683 if (rc)
2684 return rc;
2685
2686 bp->pf.func_cfg_flags &=
2687 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2688 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2689 bp->pf.func_cfg_flags |=
2690 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2691 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2692 return rc;
2693 }
2694
2695 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2696 {
2697 struct hwrm_func_cfg_input req = {0};
2698 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2699 int i;
2700 size_t sz;
2701 int rc = 0;
2702 size_t req_buf_sz;
2703
2704 if (!BNXT_PF(bp)) {
2705 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2706 return -1;
2707 }
2708
2709 rc = bnxt_hwrm_func_qcaps(bp);
2710
2711 if (rc)
2712 return rc;
2713
2714 bp->pf.active_vfs = num_vfs;
2715
2716 /*
2717 * First, configure the PF to only use one TX ring. This ensures that
2718 * there are enough rings for all VFs.
2719 *
2720 * If we don't do this, when we call func_alloc() later, we will lock
2721 * extra rings to the PF that won't be available during func_cfg() of
2722 * the VFs.
2723 *
2724 * This has been fixed with firmware versions above 20.6.54
2725 */
2726 bp->pf.func_cfg_flags &=
2727 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2728 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2729 bp->pf.func_cfg_flags |=
2730 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2731 rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2732 if (rc)
2733 return rc;
2734
2735 /*
2736 * Now, create and register a buffer to hold forwarded VF requests
2737 */
2738 req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2739 bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2740 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2741 if (bp->pf.vf_req_buf == NULL) {
2742 rc = -ENOMEM;
2743 goto error_free;
2744 }
2745 for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2746 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2747 for (i = 0; i < num_vfs; i++)
2748 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2749 (i * HWRM_MAX_REQ_LEN);
2750
2751 rc = bnxt_hwrm_func_buf_rgtr(bp);
2752 if (rc)
2753 goto error_free;
2754
2755 populate_vf_func_cfg_req(bp, &req, num_vfs);
2756
2757 bp->pf.active_vfs = 0;
2758 for (i = 0; i < num_vfs; i++) {
2759 add_random_mac_if_needed(bp, &req, i);
2760
2761 HWRM_PREP(req, FUNC_CFG);
2762 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2763 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2764 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2765
2766 /* Clear enable flag for next pass */
2767 req.enables &= ~rte_cpu_to_le_32(
2768 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2769
2770 if (rc || resp->error_code) {
2771 PMD_DRV_LOG(ERR,
2772 "Failed to initizlie VF %d\n", i);
2773 PMD_DRV_LOG(ERR,
2774 "Not all VFs available. (%d, %d)\n",
2775 rc, resp->error_code);
2776 HWRM_UNLOCK();
2777 break;
2778 }
2779
2780 HWRM_UNLOCK();
2781
2782 reserve_resources_from_vf(bp, &req, i);
2783 bp->pf.active_vfs++;
2784 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2785 }
2786
2787 /*
2788 * Now configure the PF to use "the rest" of the resources
2789 * We're using STD_TX_RING_MODE here though which will limit the TX
2790 * rings. This will allow QoS to function properly. Not setting this
2791 * will cause PF rings to break bandwidth settings.
2792 */
2793 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2794 if (rc)
2795 goto error_free;
2796
2797 rc = update_pf_resource_max(bp);
2798 if (rc)
2799 goto error_free;
2800
2801 return rc;
2802
2803 error_free:
2804 bnxt_hwrm_func_buf_unrgtr(bp);
2805 return rc;
2806 }
2807
2808 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2809 {
2810 struct hwrm_func_cfg_input req = {0};
2811 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2812 int rc;
2813
2814 HWRM_PREP(req, FUNC_CFG);
2815
2816 req.fid = rte_cpu_to_le_16(0xffff);
2817 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2818 req.evb_mode = bp->pf.evb_mode;
2819
2820 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2821 HWRM_CHECK_RESULT();
2822 HWRM_UNLOCK();
2823
2824 return rc;
2825 }
2826
2827 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2828 uint8_t tunnel_type)
2829 {
2830 struct hwrm_tunnel_dst_port_alloc_input req = {0};
2831 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2832 int rc = 0;
2833
2834 HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC);
2835 req.tunnel_type = tunnel_type;
2836 req.tunnel_dst_port_val = port;
2837 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2838 HWRM_CHECK_RESULT();
2839
2840 switch (tunnel_type) {
2841 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2842 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2843 bp->vxlan_port = port;
2844 break;
2845 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2846 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2847 bp->geneve_port = port;
2848 break;
2849 default:
2850 break;
2851 }
2852
2853 HWRM_UNLOCK();
2854
2855 return rc;
2856 }
2857
2858 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2859 uint8_t tunnel_type)
2860 {
2861 struct hwrm_tunnel_dst_port_free_input req = {0};
2862 struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2863 int rc = 0;
2864
2865 HWRM_PREP(req, TUNNEL_DST_PORT_FREE);
2866
2867 req.tunnel_type = tunnel_type;
2868 req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2869 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2870
2871 HWRM_CHECK_RESULT();
2872 HWRM_UNLOCK();
2873
2874 return rc;
2875 }
2876
2877 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2878 uint32_t flags)
2879 {
2880 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2881 struct hwrm_func_cfg_input req = {0};
2882 int rc;
2883
2884 HWRM_PREP(req, FUNC_CFG);
2885
2886 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2887 req.flags = rte_cpu_to_le_32(flags);
2888 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2889
2890 HWRM_CHECK_RESULT();
2891 HWRM_UNLOCK();
2892
2893 return rc;
2894 }
2895
2896 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2897 {
2898 uint32_t *flag = flagp;
2899
2900 vnic->flags = *flag;
2901 }
2902
2903 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2904 {
2905 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2906 }
2907
2908 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2909 {
2910 int rc = 0;
2911 struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2912 struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2913
2914 HWRM_PREP(req, FUNC_BUF_RGTR);
2915
2916 req.req_buf_num_pages = rte_cpu_to_le_16(1);
2917 req.req_buf_page_size = rte_cpu_to_le_16(
2918 page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2919 req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2920 req.req_buf_page_addr0 =
2921 rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
2922 if (req.req_buf_page_addr0 == 0) {
2923 PMD_DRV_LOG(ERR,
2924 "unable to map buffer address to physical memory\n");
2925 return -ENOMEM;
2926 }
2927
2928 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2929
2930 HWRM_CHECK_RESULT();
2931 HWRM_UNLOCK();
2932
2933 return rc;
2934 }
2935
2936 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2937 {
2938 int rc = 0;
2939 struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2940 struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2941
2942 HWRM_PREP(req, FUNC_BUF_UNRGTR);
2943
2944 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2945
2946 HWRM_CHECK_RESULT();
2947 HWRM_UNLOCK();
2948
2949 return rc;
2950 }
2951
2952 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2953 {
2954 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2955 struct hwrm_func_cfg_input req = {0};
2956 int rc;
2957
2958 HWRM_PREP(req, FUNC_CFG);
2959
2960 req.fid = rte_cpu_to_le_16(0xffff);
2961 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2962 req.enables = rte_cpu_to_le_32(
2963 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2964 req.async_event_cr = rte_cpu_to_le_16(
2965 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2966 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2967
2968 HWRM_CHECK_RESULT();
2969 HWRM_UNLOCK();
2970
2971 return rc;
2972 }
2973
2974 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2975 {
2976 struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2977 struct hwrm_func_vf_cfg_input req = {0};
2978 int rc;
2979
2980 HWRM_PREP(req, FUNC_VF_CFG);
2981
2982 req.enables = rte_cpu_to_le_32(
2983 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2984 req.async_event_cr = rte_cpu_to_le_16(
2985 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2986 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2987
2988 HWRM_CHECK_RESULT();
2989 HWRM_UNLOCK();
2990
2991 return rc;
2992 }
2993
2994 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2995 {
2996 struct hwrm_func_cfg_input req = {0};
2997 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2998 uint16_t dflt_vlan, fid;
2999 uint32_t func_cfg_flags;
3000 int rc = 0;
3001
3002 HWRM_PREP(req, FUNC_CFG);
3003
3004 if (is_vf) {
3005 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
3006 fid = bp->pf.vf_info[vf].fid;
3007 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
3008 } else {
3009 fid = rte_cpu_to_le_16(0xffff);
3010 func_cfg_flags = bp->pf.func_cfg_flags;
3011 dflt_vlan = bp->vlan;
3012 }
3013
3014 req.flags = rte_cpu_to_le_32(func_cfg_flags);
3015 req.fid = rte_cpu_to_le_16(fid);
3016 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3017 req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
3018
3019 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3020
3021 HWRM_CHECK_RESULT();
3022 HWRM_UNLOCK();
3023
3024 return rc;
3025 }
3026
3027 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
3028 uint16_t max_bw, uint16_t enables)
3029 {
3030 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3031 struct hwrm_func_cfg_input req = {0};
3032 int rc;
3033
3034 HWRM_PREP(req, FUNC_CFG);
3035
3036 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3037 req.enables |= rte_cpu_to_le_32(enables);
3038 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
3039 req.max_bw = rte_cpu_to_le_32(max_bw);
3040 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3041
3042 HWRM_CHECK_RESULT();
3043 HWRM_UNLOCK();
3044
3045 return rc;
3046 }
3047
3048 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
3049 {
3050 struct hwrm_func_cfg_input req = {0};
3051 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3052 int rc = 0;
3053
3054 HWRM_PREP(req, FUNC_CFG);
3055
3056 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
3057 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3058 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3059 req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
3060
3061 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3062
3063 HWRM_CHECK_RESULT();
3064 HWRM_UNLOCK();
3065
3066 return rc;
3067 }
3068
3069 int bnxt_hwrm_set_async_event_cr(struct bnxt *bp)
3070 {
3071 int rc;
3072
3073 if (BNXT_PF(bp))
3074 rc = bnxt_hwrm_func_cfg_def_cp(bp);
3075 else
3076 rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
3077
3078 return rc;
3079 }
3080
3081 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
3082 void *encaped, size_t ec_size)
3083 {
3084 int rc = 0;
3085 struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
3086 struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3087
3088 if (ec_size > sizeof(req.encap_request))
3089 return -1;
3090
3091 HWRM_PREP(req, REJECT_FWD_RESP);
3092
3093 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3094 memcpy(req.encap_request, encaped, ec_size);
3095
3096 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3097
3098 HWRM_CHECK_RESULT();
3099 HWRM_UNLOCK();
3100
3101 return rc;
3102 }
3103
3104 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
3105 struct ether_addr *mac)
3106 {
3107 struct hwrm_func_qcfg_input req = {0};
3108 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3109 int rc;
3110
3111 HWRM_PREP(req, FUNC_QCFG);
3112
3113 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3114 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3115
3116 HWRM_CHECK_RESULT();
3117
3118 memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
3119
3120 HWRM_UNLOCK();
3121
3122 return rc;
3123 }
3124
3125 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
3126 void *encaped, size_t ec_size)
3127 {
3128 int rc = 0;
3129 struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
3130 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3131
3132 if (ec_size > sizeof(req.encap_request))
3133 return -1;
3134
3135 HWRM_PREP(req, EXEC_FWD_RESP);
3136
3137 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3138 memcpy(req.encap_request, encaped, ec_size);
3139
3140 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3141
3142 HWRM_CHECK_RESULT();
3143 HWRM_UNLOCK();
3144
3145 return rc;
3146 }
3147
3148 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
3149 struct rte_eth_stats *stats, uint8_t rx)
3150 {
3151 int rc = 0;
3152 struct hwrm_stat_ctx_query_input req = {.req_type = 0};
3153 struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
3154
3155 HWRM_PREP(req, STAT_CTX_QUERY);
3156
3157 req.stat_ctx_id = rte_cpu_to_le_32(cid);
3158
3159 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3160
3161 HWRM_CHECK_RESULT();
3162
3163 if (rx) {
3164 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
3165 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
3166 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
3167 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
3168 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
3169 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
3170 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
3171 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
3172 } else {
3173 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
3174 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
3175 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
3176 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
3177 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
3178 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
3179 stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
3180 }
3181
3182
3183 HWRM_UNLOCK();
3184
3185 return rc;
3186 }
3187
3188 int bnxt_hwrm_port_qstats(struct bnxt *bp)
3189 {
3190 struct hwrm_port_qstats_input req = {0};
3191 struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
3192 struct bnxt_pf_info *pf = &bp->pf;
3193 int rc;
3194
3195 HWRM_PREP(req, PORT_QSTATS);
3196
3197 req.port_id = rte_cpu_to_le_16(pf->port_id);
3198 req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3199 req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3200 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3201
3202 HWRM_CHECK_RESULT();
3203 HWRM_UNLOCK();
3204
3205 return rc;
3206 }
3207
3208 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
3209 {
3210 struct hwrm_port_clr_stats_input req = {0};
3211 struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3212 struct bnxt_pf_info *pf = &bp->pf;
3213 int rc;
3214
3215 /* Not allowed on NS2 device, NPAR, MultiHost, VF */
3216 if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) ||
3217 BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp))
3218 return 0;
3219
3220 HWRM_PREP(req, PORT_CLR_STATS);
3221
3222 req.port_id = rte_cpu_to_le_16(pf->port_id);
3223 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3224
3225 HWRM_CHECK_RESULT();
3226 HWRM_UNLOCK();
3227
3228 return rc;
3229 }
3230
3231 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
3232 {
3233 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3234 struct hwrm_port_led_qcaps_input req = {0};
3235 int rc;
3236
3237 if (BNXT_VF(bp))
3238 return 0;
3239
3240 HWRM_PREP(req, PORT_LED_QCAPS);
3241 req.port_id = bp->pf.port_id;
3242 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3243
3244 HWRM_CHECK_RESULT();
3245
3246 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
3247 unsigned int i;
3248
3249 bp->num_leds = resp->num_leds;
3250 memcpy(bp->leds, &resp->led0_id,
3251 sizeof(bp->leds[0]) * bp->num_leds);
3252 for (i = 0; i < bp->num_leds; i++) {
3253 struct bnxt_led_info *led = &bp->leds[i];
3254
3255 uint16_t caps = led->led_state_caps;
3256
3257 if (!led->led_group_id ||
3258 !BNXT_LED_ALT_BLINK_CAP(caps)) {
3259 bp->num_leds = 0;
3260 break;
3261 }
3262 }
3263 }
3264
3265 HWRM_UNLOCK();
3266
3267 return rc;
3268 }
3269
3270 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3271 {
3272 struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3273 struct hwrm_port_led_cfg_input req = {0};
3274 struct bnxt_led_cfg *led_cfg;
3275 uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3276 uint16_t duration = 0;
3277 int rc, i;
3278
3279 if (!bp->num_leds || BNXT_VF(bp))
3280 return -EOPNOTSUPP;
3281
3282 HWRM_PREP(req, PORT_LED_CFG);
3283
3284 if (led_on) {
3285 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3286 duration = rte_cpu_to_le_16(500);
3287 }
3288 req.port_id = bp->pf.port_id;
3289 req.num_leds = bp->num_leds;
3290 led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3291 for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3292 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3293 led_cfg->led_id = bp->leds[i].led_id;
3294 led_cfg->led_state = led_state;
3295 led_cfg->led_blink_on = duration;
3296 led_cfg->led_blink_off = duration;
3297 led_cfg->led_group_id = bp->leds[i].led_group_id;
3298 }
3299
3300 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3301
3302 HWRM_CHECK_RESULT();
3303 HWRM_UNLOCK();
3304
3305 return rc;
3306 }
3307
3308 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3309 uint32_t *length)
3310 {
3311 int rc;
3312 struct hwrm_nvm_get_dir_info_input req = {0};
3313 struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3314
3315 HWRM_PREP(req, NVM_GET_DIR_INFO);
3316
3317 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3318
3319 HWRM_CHECK_RESULT();
3320 HWRM_UNLOCK();
3321
3322 if (!rc) {
3323 *entries = rte_le_to_cpu_32(resp->entries);
3324 *length = rte_le_to_cpu_32(resp->entry_length);
3325 }
3326 return rc;
3327 }
3328
3329 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3330 {
3331 int rc;
3332 uint32_t dir_entries;
3333 uint32_t entry_length;
3334 uint8_t *buf;
3335 size_t buflen;
3336 rte_iova_t dma_handle;
3337 struct hwrm_nvm_get_dir_entries_input req = {0};
3338 struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3339
3340 rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3341 if (rc != 0)
3342 return rc;
3343
3344 *data++ = dir_entries;
3345 *data++ = entry_length;
3346 len -= 2;
3347 memset(data, 0xff, len);
3348
3349 buflen = dir_entries * entry_length;
3350 buf = rte_malloc("nvm_dir", buflen, 0);
3351 rte_mem_lock_page(buf);
3352 if (buf == NULL)
3353 return -ENOMEM;
3354 dma_handle = rte_mem_virt2iova(buf);
3355 if (dma_handle == 0) {
3356 PMD_DRV_LOG(ERR,
3357 "unable to map response address to physical memory\n");
3358 return -ENOMEM;
3359 }
3360 HWRM_PREP(req, NVM_GET_DIR_ENTRIES);
3361 req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3362 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3363
3364 if (rc == 0)
3365 memcpy(data, buf, len > buflen ? buflen : len);
3366
3367 rte_free(buf);
3368 HWRM_CHECK_RESULT();
3369 HWRM_UNLOCK();
3370
3371 return rc;
3372 }
3373
3374 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3375 uint32_t offset, uint32_t length,
3376 uint8_t *data)
3377 {
3378 int rc;
3379 uint8_t *buf;
3380 rte_iova_t dma_handle;
3381 struct hwrm_nvm_read_input req = {0};
3382 struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3383
3384 buf = rte_malloc("nvm_item", length, 0);
3385 rte_mem_lock_page(buf);
3386 if (!buf)
3387 return -ENOMEM;
3388
3389 dma_handle = rte_mem_virt2iova(buf);
3390 if (dma_handle == 0) {
3391 PMD_DRV_LOG(ERR,
3392 "unable to map response address to physical memory\n");
3393 return -ENOMEM;
3394 }
3395 HWRM_PREP(req, NVM_READ);
3396 req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3397 req.dir_idx = rte_cpu_to_le_16(index);
3398 req.offset = rte_cpu_to_le_32(offset);
3399 req.len = rte_cpu_to_le_32(length);
3400 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3401 if (rc == 0)
3402 memcpy(data, buf, length);
3403
3404 rte_free(buf);
3405 HWRM_CHECK_RESULT();
3406 HWRM_UNLOCK();
3407
3408 return rc;
3409 }
3410
3411 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3412 {
3413 int rc;
3414 struct hwrm_nvm_erase_dir_entry_input req = {0};
3415 struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3416
3417 HWRM_PREP(req, NVM_ERASE_DIR_ENTRY);
3418 req.dir_idx = rte_cpu_to_le_16(index);
3419 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3420 HWRM_CHECK_RESULT();
3421 HWRM_UNLOCK();
3422
3423 return rc;
3424 }
3425
3426
3427 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3428 uint16_t dir_ordinal, uint16_t dir_ext,
3429 uint16_t dir_attr, const uint8_t *data,
3430 size_t data_len)
3431 {
3432 int rc;
3433 struct hwrm_nvm_write_input req = {0};
3434 struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3435 rte_iova_t dma_handle;
3436 uint8_t *buf;
3437
3438 buf = rte_malloc("nvm_write", data_len, 0);
3439 rte_mem_lock_page(buf);
3440 if (!buf)
3441 return -ENOMEM;
3442
3443 dma_handle = rte_mem_virt2iova(buf);
3444 if (dma_handle == 0) {
3445 PMD_DRV_LOG(ERR,
3446 "unable to map response address to physical memory\n");
3447 return -ENOMEM;
3448 }
3449 memcpy(buf, data, data_len);
3450
3451 HWRM_PREP(req, NVM_WRITE);
3452
3453 req.dir_type = rte_cpu_to_le_16(dir_type);
3454 req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3455 req.dir_ext = rte_cpu_to_le_16(dir_ext);
3456 req.dir_attr = rte_cpu_to_le_16(dir_attr);
3457 req.dir_data_length = rte_cpu_to_le_32(data_len);
3458 req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3459
3460 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3461
3462 rte_free(buf);
3463 HWRM_CHECK_RESULT();
3464 HWRM_UNLOCK();
3465
3466 return rc;
3467 }
3468
3469 static void
3470 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3471 {
3472 uint32_t *count = cbdata;
3473
3474 *count = *count + 1;
3475 }
3476
3477 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3478 struct bnxt_vnic_info *vnic __rte_unused)
3479 {
3480 return 0;
3481 }
3482
3483 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3484 {
3485 uint32_t count = 0;
3486
3487 bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3488 &count, bnxt_vnic_count_hwrm_stub);
3489
3490 return count;
3491 }
3492
3493 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3494 uint16_t *vnic_ids)
3495 {
3496 struct hwrm_func_vf_vnic_ids_query_input req = {0};
3497 struct hwrm_func_vf_vnic_ids_query_output *resp =
3498 bp->hwrm_cmd_resp_addr;
3499 int rc;
3500
3501 /* First query all VNIC ids */
3502 HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY);
3503
3504 req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3505 req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3506 req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
3507
3508 if (req.vnic_id_tbl_addr == 0) {
3509 HWRM_UNLOCK();
3510 PMD_DRV_LOG(ERR,
3511 "unable to map VNIC ID table address to physical memory\n");
3512 return -ENOMEM;
3513 }
3514 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3515 if (rc) {
3516 HWRM_UNLOCK();
3517 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
3518 return -1;
3519 } else if (resp->error_code) {
3520 rc = rte_le_to_cpu_16(resp->error_code);
3521 HWRM_UNLOCK();
3522 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query error %d\n", rc);
3523 return -1;
3524 }
3525 rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3526
3527 HWRM_UNLOCK();
3528
3529 return rc;
3530 }
3531
3532 /*
3533 * This function queries the VNIC IDs for a specified VF. It then calls
3534 * the vnic_cb to update the necessary field in vnic_info with cbdata.
3535 * Then it calls the hwrm_cb function to program this new vnic configuration.
3536 */
3537 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3538 void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3539 int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3540 {
3541 struct bnxt_vnic_info vnic;
3542 int rc = 0;
3543 int i, num_vnic_ids;
3544 uint16_t *vnic_ids;
3545 size_t vnic_id_sz;
3546 size_t sz;
3547
3548 /* First query all VNIC ids */
3549 vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3550 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3551 RTE_CACHE_LINE_SIZE);
3552 if (vnic_ids == NULL) {
3553 rc = -ENOMEM;
3554 return rc;
3555 }
3556 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3557 rte_mem_lock_page(((char *)vnic_ids) + sz);
3558
3559 num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3560
3561 if (num_vnic_ids < 0)
3562 return num_vnic_ids;
3563
3564 /* Retrieve VNIC, update bd_stall then update */
3565
3566 for (i = 0; i < num_vnic_ids; i++) {
3567 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3568 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3569 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3570 if (rc)
3571 break;
3572 if (vnic.mru <= 4) /* Indicates unallocated */
3573 continue;
3574
3575 vnic_cb(&vnic, cbdata);
3576
3577 rc = hwrm_cb(bp, &vnic);
3578 if (rc)
3579 break;
3580 }
3581
3582 rte_free(vnic_ids);
3583
3584 return rc;
3585 }
3586
3587 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3588 bool on)
3589 {
3590 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3591 struct hwrm_func_cfg_input req = {0};
3592 int rc;
3593
3594 HWRM_PREP(req, FUNC_CFG);
3595
3596 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3597 req.enables |= rte_cpu_to_le_32(
3598 HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3599 req.vlan_antispoof_mode = on ?
3600 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3601 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3602 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3603
3604 HWRM_CHECK_RESULT();
3605 HWRM_UNLOCK();
3606
3607 return rc;
3608 }
3609
3610 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3611 {
3612 struct bnxt_vnic_info vnic;
3613 uint16_t *vnic_ids;
3614 size_t vnic_id_sz;
3615 int num_vnic_ids, i;
3616 size_t sz;
3617 int rc;
3618
3619 vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3620 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3621 RTE_CACHE_LINE_SIZE);
3622 if (vnic_ids == NULL) {
3623 rc = -ENOMEM;
3624 return rc;
3625 }
3626
3627 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3628 rte_mem_lock_page(((char *)vnic_ids) + sz);
3629
3630 rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3631 if (rc <= 0)
3632 goto exit;
3633 num_vnic_ids = rc;
3634
3635 /*
3636 * Loop through to find the default VNIC ID.
3637 * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3638 * by sending the hwrm_func_qcfg command to the firmware.
3639 */
3640 for (i = 0; i < num_vnic_ids; i++) {
3641 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3642 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3643 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3644 bp->pf.first_vf_id + vf);
3645 if (rc)
3646 goto exit;
3647 if (vnic.func_default) {
3648 rte_free(vnic_ids);
3649 return vnic.fw_vnic_id;
3650 }
3651 }
3652 /* Could not find a default VNIC. */
3653 PMD_DRV_LOG(ERR, "No default VNIC\n");
3654 exit:
3655 rte_free(vnic_ids);
3656 return -1;
3657 }
3658
3659 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
3660 uint16_t dst_id,
3661 struct bnxt_filter_info *filter)
3662 {
3663 int rc = 0;
3664 struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
3665 struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3666 uint32_t enables = 0;
3667
3668 if (filter->fw_em_filter_id != UINT64_MAX)
3669 bnxt_hwrm_clear_em_filter(bp, filter);
3670
3671 HWRM_PREP(req, CFA_EM_FLOW_ALLOC);
3672
3673 req.flags = rte_cpu_to_le_32(filter->flags);
3674
3675 enables = filter->enables |
3676 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
3677 req.dst_id = rte_cpu_to_le_16(dst_id);
3678
3679 if (filter->ip_addr_type) {
3680 req.ip_addr_type = filter->ip_addr_type;
3681 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3682 }
3683 if (enables &
3684 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3685 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3686 if (enables &
3687 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3688 memcpy(req.src_macaddr, filter->src_macaddr,
3689 ETHER_ADDR_LEN);
3690 if (enables &
3691 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
3692 memcpy(req.dst_macaddr, filter->dst_macaddr,
3693 ETHER_ADDR_LEN);
3694 if (enables &
3695 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
3696 req.ovlan_vid = filter->l2_ovlan;
3697 if (enables &
3698 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
3699 req.ivlan_vid = filter->l2_ivlan;
3700 if (enables &
3701 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
3702 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3703 if (enables &
3704 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3705 req.ip_protocol = filter->ip_protocol;
3706 if (enables &
3707 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3708 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
3709 if (enables &
3710 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
3711 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
3712 if (enables &
3713 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
3714 req.src_port = rte_cpu_to_be_16(filter->src_port);
3715 if (enables &
3716 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
3717 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
3718 if (enables &
3719 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3720 req.mirror_vnic_id = filter->mirror_vnic_id;
3721
3722 req.enables = rte_cpu_to_le_32(enables);
3723
3724 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3725
3726 HWRM_CHECK_RESULT();
3727
3728 filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
3729 HWRM_UNLOCK();
3730
3731 return rc;
3732 }
3733
3734 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
3735 {
3736 int rc = 0;
3737 struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
3738 struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
3739
3740 if (filter->fw_em_filter_id == UINT64_MAX)
3741 return 0;
3742
3743 PMD_DRV_LOG(ERR, "Clear EM filter\n");
3744 HWRM_PREP(req, CFA_EM_FLOW_FREE);
3745
3746 req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
3747
3748 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3749
3750 HWRM_CHECK_RESULT();
3751 HWRM_UNLOCK();
3752
3753 filter->fw_em_filter_id = UINT64_MAX;
3754 filter->fw_l2_filter_id = UINT64_MAX;
3755
3756 return 0;
3757 }
3758
3759 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
3760 uint16_t dst_id,
3761 struct bnxt_filter_info *filter)
3762 {
3763 int rc = 0;
3764 struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
3765 struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3766 bp->hwrm_cmd_resp_addr;
3767 uint32_t enables = 0;
3768
3769 if (filter->fw_ntuple_filter_id != UINT64_MAX)
3770 bnxt_hwrm_clear_ntuple_filter(bp, filter);
3771
3772 HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC);
3773
3774 req.flags = rte_cpu_to_le_32(filter->flags);
3775
3776 enables = filter->enables |
3777 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
3778 req.dst_id = rte_cpu_to_le_16(dst_id);
3779
3780
3781 if (filter->ip_addr_type) {
3782 req.ip_addr_type = filter->ip_addr_type;
3783 enables |=
3784 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3785 }
3786 if (enables &
3787 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3788 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3789 if (enables &
3790 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3791 memcpy(req.src_macaddr, filter->src_macaddr,
3792 ETHER_ADDR_LEN);
3793 //if (enables &
3794 //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
3795 //memcpy(req.dst_macaddr, filter->dst_macaddr,
3796 //ETHER_ADDR_LEN);
3797 if (enables &
3798 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
3799 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3800 if (enables &
3801 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3802 req.ip_protocol = filter->ip_protocol;
3803 if (enables &
3804 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3805 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
3806 if (enables &
3807 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
3808 req.src_ipaddr_mask[0] =
3809 rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
3810 if (enables &
3811 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
3812 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
3813 if (enables &
3814 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
3815 req.dst_ipaddr_mask[0] =
3816 rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
3817 if (enables &
3818 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
3819 req.src_port = rte_cpu_to_le_16(filter->src_port);
3820 if (enables &
3821 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
3822 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
3823 if (enables &
3824 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
3825 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
3826 if (enables &
3827 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
3828 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
3829 if (enables &
3830 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3831 req.mirror_vnic_id = filter->mirror_vnic_id;
3832
3833 req.enables = rte_cpu_to_le_32(enables);
3834
3835 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3836
3837 HWRM_CHECK_RESULT();
3838
3839 filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
3840 HWRM_UNLOCK();
3841
3842 return rc;
3843 }
3844
3845 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
3846 struct bnxt_filter_info *filter)
3847 {
3848 int rc = 0;
3849 struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
3850 struct hwrm_cfa_ntuple_filter_free_output *resp =
3851 bp->hwrm_cmd_resp_addr;
3852
3853 if (filter->fw_ntuple_filter_id == UINT64_MAX)
3854 return 0;
3855
3856 HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE);
3857
3858 req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
3859
3860 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3861
3862 HWRM_CHECK_RESULT();
3863 HWRM_UNLOCK();
3864
3865 filter->fw_ntuple_filter_id = UINT64_MAX;
3866
3867 return 0;
3868 }
3869
3870 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3871 {
3872 unsigned int rss_idx, fw_idx, i;
3873
3874 if (vnic->rss_table && vnic->hash_type) {
3875 /*
3876 * Fill the RSS hash & redirection table with
3877 * ring group ids for all VNICs
3878 */
3879 for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
3880 rss_idx++, fw_idx++) {
3881 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
3882 fw_idx %= bp->rx_cp_nr_rings;
3883 if (vnic->fw_grp_ids[fw_idx] !=
3884 INVALID_HW_RING_ID)
3885 break;
3886 fw_idx++;
3887 }
3888 if (i == bp->rx_cp_nr_rings)
3889 return 0;
3890 vnic->rss_table[rss_idx] =
3891 vnic->fw_grp_ids[fw_idx];
3892 }
3893 return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
3894 }
3895 return 0;
3896 }
3897
3898 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
3899 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
3900 {
3901 uint16_t flags;
3902
3903 req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int);
3904
3905 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
3906 req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr);
3907
3908 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
3909 req->num_cmpl_dma_aggr_during_int =
3910 rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int);
3911
3912 req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max);
3913
3914 /* min timer set to 1/2 of interrupt timer */
3915 req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min);
3916
3917 /* buf timer set to 1/4 of interrupt timer */
3918 req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr);
3919
3920 req->cmpl_aggr_dma_tmr_during_int =
3921 rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int);
3922
3923 flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
3924 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
3925 req->flags = rte_cpu_to_le_16(flags);
3926 }
3927
3928 int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
3929 struct bnxt_coal *coal, uint16_t ring_id)
3930 {
3931 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
3932 struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp =
3933 bp->hwrm_cmd_resp_addr;
3934 int rc;
3935
3936 /* Set ring coalesce parameters only for Stratus 100G NIC */
3937 if (!bnxt_stratus_device(bp))
3938 return 0;
3939
3940 HWRM_PREP(req, RING_CMPL_RING_CFG_AGGINT_PARAMS);
3941 bnxt_hwrm_set_coal_params(coal, &req);
3942 req.ring_id = rte_cpu_to_le_16(ring_id);
3943 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3944 HWRM_CHECK_RESULT();
3945 HWRM_UNLOCK();
3946 return 0;
3947 }