4 * Copyright(c) Broadcom Limited.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Broadcom Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <rte_malloc.h>
40 #include "bnxt_filter.h"
41 #include "bnxt_hwrm.h"
42 #include "bnxt_ring.h"
45 #include "bnxt_vnic.h"
46 #include "hsi_struct_def_dpdk.h"
52 void bnxt_free_rxq_stats(struct bnxt_rx_queue
*rxq
)
54 struct bnxt_cp_ring_info
*cpr
= rxq
->cp_ring
;
60 int bnxt_mq_rx_configure(struct bnxt
*bp
)
62 struct rte_eth_conf
*dev_conf
= &bp
->eth_dev
->data
->dev_conf
;
63 unsigned int i
, j
, nb_q_per_grp
, ring_idx
;
64 int start_grp_id
, end_grp_id
, rc
= 0;
65 struct bnxt_vnic_info
*vnic
;
66 struct bnxt_filter_info
*filter
;
67 struct bnxt_rx_queue
*rxq
;
71 /* Single queue mode */
72 if (bp
->rx_cp_nr_rings
< 2) {
73 vnic
= bnxt_alloc_vnic(bp
);
75 RTE_LOG(ERR
, PMD
, "VNIC alloc failed\n");
79 STAILQ_INSERT_TAIL(&bp
->ff_pool
[0], vnic
, next
);
82 rxq
= bp
->eth_dev
->data
->rx_queues
[0];
85 vnic
->func_default
= true;
86 vnic
->ff_pool_idx
= 0;
87 vnic
->start_grp_id
= 1;
88 vnic
->end_grp_id
= vnic
->start_grp_id
+
89 bp
->rx_cp_nr_rings
- 1;
90 filter
= bnxt_alloc_filter(bp
);
92 RTE_LOG(ERR
, PMD
, "L2 filter alloc failed\n");
96 STAILQ_INSERT_TAIL(&vnic
->filter
, filter
, next
);
100 /* Multi-queue mode */
101 if (dev_conf
->rxmode
.mq_mode
& ETH_MQ_RX_VMDQ_FLAG
) {
102 /* VMDq ONLY, VMDq+RSS, VMDq+DCB, VMDq+DCB+RSS */
103 enum rte_eth_nb_pools pools
;
105 switch (dev_conf
->rxmode
.mq_mode
) {
106 case ETH_MQ_RX_VMDQ_RSS
:
107 case ETH_MQ_RX_VMDQ_ONLY
:
109 const struct rte_eth_vmdq_rx_conf
*conf
=
110 &dev_conf
->rx_adv_conf
.vmdq_rx_conf
;
113 pools
= conf
->nb_queue_pools
;
117 RTE_LOG(ERR
, PMD
, "Unsupported mq_mod %d\n",
118 dev_conf
->rxmode
.mq_mode
);
122 /* For each pool, allocate MACVLAN CFA rule & VNIC */
125 "VMDq pool not set, defaulted to 64\n");
126 pools
= ETH_64_POOLS
;
128 nb_q_per_grp
= bp
->rx_cp_nr_rings
/ pools
;
130 end_grp_id
= start_grp_id
+ nb_q_per_grp
- 1;
133 for (i
= 0; i
< pools
; i
++) {
134 vnic
= bnxt_alloc_vnic(bp
);
137 "VNIC alloc failed\n");
141 STAILQ_INSERT_TAIL(&bp
->ff_pool
[i
], vnic
, next
);
144 for (j
= 0; j
< nb_q_per_grp
; j
++, ring_idx
++) {
145 rxq
= bp
->eth_dev
->data
->rx_queues
[ring_idx
];
149 vnic
->func_default
= true;
150 vnic
->ff_pool_idx
= i
;
151 vnic
->start_grp_id
= start_grp_id
;
152 vnic
->end_grp_id
= end_grp_id
;
154 filter
= bnxt_alloc_filter(bp
);
157 "L2 filter alloc failed\n");
162 * TODO: Configure & associate CFA rule for
163 * each VNIC for each VMDq with MACVLAN, MACVLAN+TC
165 STAILQ_INSERT_TAIL(&vnic
->filter
, filter
, next
);
167 start_grp_id
= end_grp_id
+ 1;
168 end_grp_id
+= nb_q_per_grp
;
173 /* Non-VMDq mode - RSS, DCB, RSS+DCB */
174 /* Init default VNIC for RSS or DCB only */
175 vnic
= bnxt_alloc_vnic(bp
);
177 RTE_LOG(ERR
, PMD
, "VNIC alloc failed\n");
181 /* Partition the rx queues for the single pool */
182 for (i
= 0; i
< bp
->rx_cp_nr_rings
; i
++) {
183 rxq
= bp
->eth_dev
->data
->rx_queues
[i
];
186 STAILQ_INSERT_TAIL(&bp
->ff_pool
[0], vnic
, next
);
189 vnic
->func_default
= true;
190 vnic
->ff_pool_idx
= 0;
191 vnic
->start_grp_id
= 1;
192 vnic
->end_grp_id
= vnic
->start_grp_id
+
193 bp
->rx_cp_nr_rings
- 1;
194 filter
= bnxt_alloc_filter(bp
);
196 RTE_LOG(ERR
, PMD
, "L2 filter alloc failed\n");
200 STAILQ_INSERT_TAIL(&vnic
->filter
, filter
, next
);
202 if (dev_conf
->rxmode
.mq_mode
& ETH_MQ_RX_RSS_FLAG
)
204 HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4
|
205 HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6
;
211 /* Free allocated vnic/filters */
216 static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue
*rxq __rte_unused
)
218 struct bnxt_sw_rx_bd
*sw_ring
;
222 sw_ring
= rxq
->rx_ring
->rx_buf_ring
;
224 for (i
= 0; i
< rxq
->nb_rx_desc
; i
++) {
225 if (sw_ring
[i
].mbuf
) {
226 rte_pktmbuf_free_seg(sw_ring
[i
].mbuf
);
227 sw_ring
[i
].mbuf
= NULL
;
234 void bnxt_free_rx_mbufs(struct bnxt
*bp
)
236 struct bnxt_rx_queue
*rxq
;
239 for (i
= 0; i
< (int)bp
->rx_nr_rings
; i
++) {
240 rxq
= bp
->rx_queues
[i
];
241 bnxt_rx_queue_release_mbufs(rxq
);
245 void bnxt_rx_queue_release_op(void *rx_queue
)
247 struct bnxt_rx_queue
*rxq
= (struct bnxt_rx_queue
*)rx_queue
;
250 bnxt_rx_queue_release_mbufs(rxq
);
252 /* Free RX ring hardware descriptors */
253 bnxt_free_ring(rxq
->rx_ring
->rx_ring_struct
);
255 /* Free RX completion ring hardware descriptors */
256 bnxt_free_ring(rxq
->cp_ring
->cp_ring_struct
);
258 bnxt_free_rxq_stats(rxq
);
264 int bnxt_rx_queue_setup_op(struct rte_eth_dev
*eth_dev
,
267 unsigned int socket_id
,
268 const struct rte_eth_rxconf
*rx_conf
,
269 struct rte_mempool
*mp
)
271 struct bnxt
*bp
= (struct bnxt
*)eth_dev
->data
->dev_private
;
272 struct bnxt_rx_queue
*rxq
;
275 if (!nb_desc
|| nb_desc
> MAX_RX_DESC_CNT
) {
276 RTE_LOG(ERR
, PMD
, "nb_desc %d is invalid", nb_desc
);
281 if (eth_dev
->data
->rx_queues
) {
282 rxq
= eth_dev
->data
->rx_queues
[queue_idx
];
284 bnxt_rx_queue_release_op(rxq
);
286 rxq
= rte_zmalloc_socket("bnxt_rx_queue", sizeof(struct bnxt_rx_queue
),
287 RTE_CACHE_LINE_SIZE
, socket_id
);
289 RTE_LOG(ERR
, PMD
, "bnxt_rx_queue allocation failed!");
295 rxq
->nb_rx_desc
= nb_desc
;
296 rxq
->rx_free_thresh
= rx_conf
->rx_free_thresh
;
298 rc
= bnxt_init_rx_ring_struct(rxq
, socket_id
);
302 rxq
->queue_id
= queue_idx
;
303 rxq
->port_id
= eth_dev
->data
->port_id
;
304 rxq
->crc_len
= (uint8_t)((eth_dev
->data
->dev_conf
.rxmode
.hw_strip_crc
) ?
307 eth_dev
->data
->rx_queues
[queue_idx
] = rxq
;
308 /* Allocate RX ring hardware descriptors */
309 if (bnxt_alloc_rings(bp
, queue_idx
, NULL
, rxq
->rx_ring
, rxq
->cp_ring
,
311 RTE_LOG(ERR
, PMD
, "ring_dma_zone_reserve for rx_ring failed!");
312 bnxt_rx_queue_release_op(rxq
);