4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
43 #include <rte_string_fns.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_memzone.h>
48 #include <rte_malloc.h>
49 #include <rte_memcpy.h>
51 #include "i40e_logs.h"
52 #include "base/i40e_prototype.h"
53 #include "base/i40e_adminq_cmd.h"
54 #include "base/i40e_type.h"
55 #include "i40e_ethdev.h"
56 #include "i40e_rxtx.h"
59 #define I40E_CFG_CRCSTRIP_DEFAULT 1
62 i40e_pf_host_switch_queues(struct i40e_pf_vf
*vf
,
63 struct i40e_virtchnl_queue_select
*qsel
,
67 * Bind PF queues with VSI and VF.
70 i40e_pf_vf_queues_mapping(struct i40e_pf_vf
*vf
)
73 struct i40e_hw
*hw
= I40E_PF_TO_HW(vf
->pf
);
74 uint16_t vsi_id
= vf
->vsi
->vsi_id
;
75 uint16_t vf_id
= vf
->vf_idx
;
76 uint16_t nb_qps
= vf
->vsi
->nb_qps
;
77 uint16_t qbase
= vf
->vsi
->base_queue
;
82 * VF should use scatter range queues. So, it needn't
83 * to set QBASE in this register.
85 i40e_write_rx_ctl(hw
, I40E_VSILAN_QBASE(vsi_id
),
86 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK
);
88 /* Set to enable VFLAN_QTABLE[] registers valid */
89 I40E_WRITE_REG(hw
, I40E_VPLAN_MAPENA(vf_id
),
90 I40E_VPLAN_MAPENA_TXRX_ENA_MASK
);
92 /* map PF queues to VF */
93 for (i
= 0; i
< nb_qps
; i
++) {
94 val
= ((qbase
+ i
) & I40E_VPLAN_QTABLE_QINDEX_MASK
);
95 I40E_WRITE_REG(hw
, I40E_VPLAN_QTABLE(i
, vf_id
), val
);
98 /* map PF queues to VSI */
99 for (i
= 0; i
< I40E_MAX_QP_NUM_PER_VF
/ 2; i
++) {
100 if (2 * i
> nb_qps
- 1)
101 q1
= I40E_VSILAN_QTABLE_QINDEX_0_MASK
;
105 if (2 * i
+ 1 > nb_qps
- 1)
106 q2
= I40E_VSILAN_QTABLE_QINDEX_0_MASK
;
108 q2
= qbase
+ 2 * i
+ 1;
110 val
= (q2
<< I40E_VSILAN_QTABLE_QINDEX_1_SHIFT
) + q1
;
111 i40e_write_rx_ctl(hw
, I40E_VSILAN_QTABLE(i
, vsi_id
), val
);
113 I40E_WRITE_FLUSH(hw
);
120 * Proceed VF reset operation.
123 i40e_pf_host_vf_reset(struct i40e_pf_vf
*vf
, bool do_hw_reset
)
128 uint16_t vf_id
, abs_vf_id
, vf_msix_num
;
130 struct i40e_virtchnl_queue_select qsel
;
136 hw
= I40E_PF_TO_HW(vf
->pf
);
138 abs_vf_id
= vf_id
+ hw
->func_caps
.vf_base_id
;
140 /* Notify VF that we are in VFR progress */
141 I40E_WRITE_REG(hw
, I40E_VFGEN_RSTAT1(vf_id
), I40E_PF_VFR_INPROGRESS
);
144 * If require a SW VF reset, a VFLR interrupt will be generated,
145 * this function will be called again. To avoid it,
146 * disable interrupt first.
149 vf
->state
= I40E_VF_INRESET
;
150 val
= I40E_READ_REG(hw
, I40E_VPGEN_VFRTRIG(vf_id
));
151 val
|= I40E_VPGEN_VFRTRIG_VFSWR_MASK
;
152 I40E_WRITE_REG(hw
, I40E_VPGEN_VFRTRIG(vf_id
), val
);
153 I40E_WRITE_FLUSH(hw
);
156 #define VFRESET_MAX_WAIT_CNT 100
157 /* Wait until VF reset is done */
158 for (i
= 0; i
< VFRESET_MAX_WAIT_CNT
; i
++) {
160 val
= I40E_READ_REG(hw
, I40E_VPGEN_VFRSTAT(vf_id
));
161 if (val
& I40E_VPGEN_VFRSTAT_VFRD_MASK
)
165 if (i
>= VFRESET_MAX_WAIT_CNT
) {
166 PMD_DRV_LOG(ERR
, "VF reset timeout");
170 /* This is not first time to do reset, do cleanup job first */
173 memset(&qsel
, 0, sizeof(qsel
));
174 for (i
= 0; i
< vf
->vsi
->nb_qps
; i
++)
175 qsel
.rx_queues
|= 1 << i
;
176 qsel
.tx_queues
= qsel
.rx_queues
;
177 ret
= i40e_pf_host_switch_queues(vf
, &qsel
, false);
178 if (ret
!= I40E_SUCCESS
) {
179 PMD_DRV_LOG(ERR
, "Disable VF queues failed");
183 /* Disable VF interrupt setting */
184 vf_msix_num
= hw
->func_caps
.num_msix_vectors_vf
;
185 for (i
= 0; i
< vf_msix_num
; i
++) {
187 val
= I40E_VFINT_DYN_CTL0(vf_id
);
189 val
= I40E_VFINT_DYN_CTLN(((vf_msix_num
- 1) *
191 I40E_WRITE_REG(hw
, val
, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK
);
193 I40E_WRITE_FLUSH(hw
);
196 ret
= i40e_vsi_release(vf
->vsi
);
197 if (ret
!= I40E_SUCCESS
) {
198 PMD_DRV_LOG(ERR
, "Release VSI failed");
203 #define I40E_VF_PCI_ADDR 0xAA
204 #define I40E_VF_PEND_MASK 0x20
205 /* Check the pending transactions of this VF */
206 /* Use absolute VF id, refer to datasheet for details */
207 I40E_WRITE_REG(hw
, I40E_PF_PCI_CIAA
, I40E_VF_PCI_ADDR
|
208 (abs_vf_id
<< I40E_PF_PCI_CIAA_VF_NUM_SHIFT
));
209 for (i
= 0; i
< VFRESET_MAX_WAIT_CNT
; i
++) {
211 val
= I40E_READ_REG(hw
, I40E_PF_PCI_CIAD
);
212 if ((val
& I40E_VF_PEND_MASK
) == 0)
216 if (i
>= VFRESET_MAX_WAIT_CNT
) {
217 PMD_DRV_LOG(ERR
, "Wait VF PCI transaction end timeout");
221 /* Reset done, Set COMPLETE flag and clear reset bit */
222 I40E_WRITE_REG(hw
, I40E_VFGEN_RSTAT1(vf_id
), I40E_PF_VFR_COMPLETED
);
223 val
= I40E_READ_REG(hw
, I40E_VPGEN_VFRTRIG(vf_id
));
224 val
&= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK
;
225 I40E_WRITE_REG(hw
, I40E_VPGEN_VFRTRIG(vf_id
), val
);
227 I40E_WRITE_FLUSH(hw
);
229 /* Allocate resource again */
230 if (pf
->floating_veb
&& pf
->floating_veb_list
[vf_id
]) {
231 vf
->vsi
= i40e_vsi_setup(vf
->pf
, I40E_VSI_SRIOV
,
234 vf
->vsi
= i40e_vsi_setup(vf
->pf
, I40E_VSI_SRIOV
,
235 vf
->pf
->main_vsi
, vf
->vf_idx
);
238 if (vf
->vsi
== NULL
) {
239 PMD_DRV_LOG(ERR
, "Add vsi failed");
243 ret
= i40e_pf_vf_queues_mapping(vf
);
244 if (ret
!= I40E_SUCCESS
) {
245 PMD_DRV_LOG(ERR
, "queue mapping error");
246 i40e_vsi_release(vf
->vsi
);
254 i40e_pf_host_send_msg_to_vf(struct i40e_pf_vf
*vf
,
260 struct i40e_hw
*hw
= I40E_PF_TO_HW(vf
->pf
);
261 uint16_t abs_vf_id
= hw
->func_caps
.vf_base_id
+ vf
->vf_idx
;
264 ret
= i40e_aq_send_msg_to_vf(hw
, abs_vf_id
, opcode
, retval
,
267 PMD_INIT_LOG(ERR
, "Fail to send message to VF, err %u",
268 hw
->aq
.asq_last_status
);
275 i40e_pf_host_process_cmd_version(struct i40e_pf_vf
*vf
)
277 struct i40e_virtchnl_version_info info
;
279 info
.major
= I40E_DPDK_VERSION_MAJOR
;
280 info
.minor
= I40E_DPDK_VERSION_MINOR
;
281 i40e_pf_host_send_msg_to_vf(vf
, I40E_VIRTCHNL_OP_VERSION
,
282 I40E_SUCCESS
, (uint8_t *)&info
, sizeof(info
));
286 i40e_pf_host_process_cmd_reset_vf(struct i40e_pf_vf
*vf
)
288 i40e_pf_host_vf_reset(vf
, 1);
290 /* No feedback will be sent to VF for VFLR */
295 i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf
*vf
)
297 struct i40e_virtchnl_vf_resource
*vf_res
= NULL
;
298 struct i40e_hw
*hw
= I40E_PF_TO_HW(vf
->pf
);
300 int ret
= I40E_SUCCESS
;
302 /* only have 1 VSI by default */
303 len
= sizeof(struct i40e_virtchnl_vf_resource
) +
304 I40E_DEFAULT_VF_VSI_NUM
*
305 sizeof(struct i40e_virtchnl_vsi_resource
);
307 vf_res
= rte_zmalloc("i40e_vf_res", len
, 0);
308 if (vf_res
== NULL
) {
309 PMD_DRV_LOG(ERR
, "failed to allocate mem");
310 ret
= I40E_ERR_NO_MEMORY
;
316 vf_res
->vf_offload_flags
= I40E_VIRTCHNL_VF_OFFLOAD_L2
|
317 I40E_VIRTCHNL_VF_OFFLOAD_VLAN
;
318 vf_res
->max_vectors
= hw
->func_caps
.num_msix_vectors_vf
;
319 vf_res
->num_queue_pairs
= vf
->vsi
->nb_qps
;
320 vf_res
->num_vsis
= I40E_DEFAULT_VF_VSI_NUM
;
322 /* Change below setting if PF host can support more VSIs for VF */
323 vf_res
->vsi_res
[0].vsi_type
= I40E_VSI_SRIOV
;
324 /* As assume Vf only has single VSI now, always return 0 */
325 vf_res
->vsi_res
[0].vsi_id
= 0;
326 vf_res
->vsi_res
[0].num_queue_pairs
= vf
->vsi
->nb_qps
;
327 ether_addr_copy(&vf
->mac_addr
,
328 (struct ether_addr
*)vf_res
->vsi_res
[0].default_mac_addr
);
331 i40e_pf_host_send_msg_to_vf(vf
, I40E_VIRTCHNL_OP_GET_VF_RESOURCES
,
332 ret
, (uint8_t *)vf_res
, len
);
339 i40e_pf_host_hmc_config_rxq(struct i40e_hw
*hw
,
340 struct i40e_pf_vf
*vf
,
341 struct i40e_virtchnl_rxq_info
*rxq
,
344 int err
= I40E_SUCCESS
;
345 struct i40e_hmc_obj_rxq rx_ctx
;
346 uint16_t abs_queue_id
= vf
->vsi
->base_queue
+ rxq
->queue_id
;
348 /* Clear the context structure first */
349 memset(&rx_ctx
, 0, sizeof(struct i40e_hmc_obj_rxq
));
350 rx_ctx
.dbuff
= rxq
->databuffer_size
>> I40E_RXQ_CTX_DBUFF_SHIFT
;
351 rx_ctx
.hbuff
= rxq
->hdr_size
>> I40E_RXQ_CTX_HBUFF_SHIFT
;
352 rx_ctx
.base
= rxq
->dma_ring_addr
/ I40E_QUEUE_BASE_ADDR_UNIT
;
353 rx_ctx
.qlen
= rxq
->ring_len
;
354 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
358 if (rxq
->splithdr_enabled
) {
359 rx_ctx
.hsplit_0
= I40E_HEADER_SPLIT_ALL
;
360 rx_ctx
.dtype
= i40e_header_split_enabled
;
362 rx_ctx
.hsplit_0
= I40E_HEADER_SPLIT_NONE
;
363 rx_ctx
.dtype
= i40e_header_split_none
;
365 rx_ctx
.rxmax
= rxq
->max_pkt_size
;
366 rx_ctx
.tphrdesc_ena
= 1;
367 rx_ctx
.tphwdesc_ena
= 1;
368 rx_ctx
.tphdata_ena
= 1;
369 rx_ctx
.tphhead_ena
= 1;
370 rx_ctx
.lrxqthresh
= 2;
371 rx_ctx
.crcstrip
= crcstrip
;
375 err
= i40e_clear_lan_rx_queue_context(hw
, abs_queue_id
);
376 if (err
!= I40E_SUCCESS
)
378 err
= i40e_set_lan_rx_queue_context(hw
, abs_queue_id
, &rx_ctx
);
384 i40e_pf_host_hmc_config_txq(struct i40e_hw
*hw
,
385 struct i40e_pf_vf
*vf
,
386 struct i40e_virtchnl_txq_info
*txq
)
388 int err
= I40E_SUCCESS
;
389 struct i40e_hmc_obj_txq tx_ctx
;
391 uint16_t abs_queue_id
= vf
->vsi
->base_queue
+ txq
->queue_id
;
394 /* clear the context structure first */
395 memset(&tx_ctx
, 0, sizeof(tx_ctx
));
396 tx_ctx
.new_context
= 1;
397 tx_ctx
.base
= txq
->dma_ring_addr
/ I40E_QUEUE_BASE_ADDR_UNIT
;
398 tx_ctx
.qlen
= txq
->ring_len
;
399 tx_ctx
.rdylist
= rte_le_to_cpu_16(vf
->vsi
->info
.qs_handle
[0]);
400 err
= i40e_clear_lan_tx_queue_context(hw
, abs_queue_id
);
401 if (err
!= I40E_SUCCESS
)
404 err
= i40e_set_lan_tx_queue_context(hw
, abs_queue_id
, &tx_ctx
);
405 if (err
!= I40E_SUCCESS
)
408 /* bind queue with VF function, since TX/QX will appear in pair,
409 * so only has QTX_CTL to set.
411 qtx_ctl
= (I40E_QTX_CTL_VF_QUEUE
<< I40E_QTX_CTL_PFVF_Q_SHIFT
) |
412 ((hw
->pf_id
<< I40E_QTX_CTL_PF_INDX_SHIFT
) &
413 I40E_QTX_CTL_PF_INDX_MASK
) |
414 (((vf
->vf_idx
+ hw
->func_caps
.vf_base_id
) <<
415 I40E_QTX_CTL_VFVM_INDX_SHIFT
) &
416 I40E_QTX_CTL_VFVM_INDX_MASK
);
417 I40E_WRITE_REG(hw
, I40E_QTX_CTL(abs_queue_id
), qtx_ctl
);
418 I40E_WRITE_FLUSH(hw
);
424 i40e_pf_host_process_cmd_config_vsi_queues(struct i40e_pf_vf
*vf
,
428 struct i40e_hw
*hw
= I40E_PF_TO_HW(vf
->pf
);
429 struct i40e_vsi
*vsi
= vf
->vsi
;
430 struct i40e_virtchnl_vsi_queue_config_info
*vc_vqci
=
431 (struct i40e_virtchnl_vsi_queue_config_info
*)msg
;
432 struct i40e_virtchnl_queue_pair_info
*vc_qpi
;
433 int i
, ret
= I40E_SUCCESS
;
435 if (!msg
|| vc_vqci
->num_queue_pairs
> vsi
->nb_qps
||
436 vc_vqci
->num_queue_pairs
> I40E_MAX_VSI_QP
||
437 msglen
< I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqci
,
438 vc_vqci
->num_queue_pairs
)) {
439 PMD_DRV_LOG(ERR
, "vsi_queue_config_info argument wrong\n");
440 ret
= I40E_ERR_PARAM
;
444 vc_qpi
= vc_vqci
->qpair
;
445 for (i
= 0; i
< vc_vqci
->num_queue_pairs
; i
++) {
446 if (vc_qpi
[i
].rxq
.queue_id
> vsi
->nb_qps
- 1 ||
447 vc_qpi
[i
].txq
.queue_id
> vsi
->nb_qps
- 1) {
448 ret
= I40E_ERR_PARAM
;
453 * Apply VF RX queue setting to HMC.
454 * If the opcode is I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
455 * then the extra information of
456 * 'struct i40e_virtchnl_queue_pair_extra_info' is needed,
457 * otherwise set the last parameter to NULL.
459 if (i40e_pf_host_hmc_config_rxq(hw
, vf
, &vc_qpi
[i
].rxq
,
460 I40E_CFG_CRCSTRIP_DEFAULT
) != I40E_SUCCESS
) {
461 PMD_DRV_LOG(ERR
, "Configure RX queue HMC failed");
462 ret
= I40E_ERR_PARAM
;
466 /* Apply VF TX queue setting to HMC */
467 if (i40e_pf_host_hmc_config_txq(hw
, vf
,
468 &vc_qpi
[i
].txq
) != I40E_SUCCESS
) {
469 PMD_DRV_LOG(ERR
, "Configure TX queue HMC failed");
470 ret
= I40E_ERR_PARAM
;
476 i40e_pf_host_send_msg_to_vf(vf
, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
,
483 i40e_pf_host_process_cmd_config_vsi_queues_ext(struct i40e_pf_vf
*vf
,
487 struct i40e_hw
*hw
= I40E_PF_TO_HW(vf
->pf
);
488 struct i40e_vsi
*vsi
= vf
->vsi
;
489 struct i40e_virtchnl_vsi_queue_config_ext_info
*vc_vqcei
=
490 (struct i40e_virtchnl_vsi_queue_config_ext_info
*)msg
;
491 struct i40e_virtchnl_queue_pair_ext_info
*vc_qpei
;
492 int i
, ret
= I40E_SUCCESS
;
494 if (!msg
|| vc_vqcei
->num_queue_pairs
> vsi
->nb_qps
||
495 vc_vqcei
->num_queue_pairs
> I40E_MAX_VSI_QP
||
496 msglen
< I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqcei
,
497 vc_vqcei
->num_queue_pairs
)) {
498 PMD_DRV_LOG(ERR
, "vsi_queue_config_ext_info argument wrong\n");
499 ret
= I40E_ERR_PARAM
;
503 vc_qpei
= vc_vqcei
->qpair
;
504 for (i
= 0; i
< vc_vqcei
->num_queue_pairs
; i
++) {
505 if (vc_qpei
[i
].rxq
.queue_id
> vsi
->nb_qps
- 1 ||
506 vc_qpei
[i
].txq
.queue_id
> vsi
->nb_qps
- 1) {
507 ret
= I40E_ERR_PARAM
;
511 * Apply VF RX queue setting to HMC.
512 * If the opcode is I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
513 * then the extra information of
514 * 'struct i40e_virtchnl_queue_pair_ext_info' is needed,
515 * otherwise set the last parameter to NULL.
517 if (i40e_pf_host_hmc_config_rxq(hw
, vf
, &vc_qpei
[i
].rxq
,
518 vc_qpei
[i
].rxq_ext
.crcstrip
) != I40E_SUCCESS
) {
519 PMD_DRV_LOG(ERR
, "Configure RX queue HMC failed");
520 ret
= I40E_ERR_PARAM
;
524 /* Apply VF TX queue setting to HMC */
525 if (i40e_pf_host_hmc_config_txq(hw
, vf
, &vc_qpei
[i
].txq
) !=
527 PMD_DRV_LOG(ERR
, "Configure TX queue HMC failed");
528 ret
= I40E_ERR_PARAM
;
534 i40e_pf_host_send_msg_to_vf(vf
, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT
,
541 i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf
*vf
,
542 uint8_t *msg
, uint16_t msglen
)
544 int ret
= I40E_SUCCESS
;
545 struct i40e_virtchnl_irq_map_info
*irqmap
=
546 (struct i40e_virtchnl_irq_map_info
*)msg
;
548 if (msg
== NULL
|| msglen
< sizeof(struct i40e_virtchnl_irq_map_info
)) {
549 PMD_DRV_LOG(ERR
, "buffer too short");
550 ret
= I40E_ERR_PARAM
;
554 /* Assume VF only have 1 vector to bind all queues */
555 if (irqmap
->num_vectors
!= 1) {
556 PMD_DRV_LOG(ERR
, "DKDK host only support 1 vector");
557 ret
= I40E_ERR_PARAM
;
561 /* This MSIX intr store the intr in VF range */
562 vf
->vsi
->msix_intr
= irqmap
->vecmap
[0].vector_id
;
563 vf
->vsi
->nb_msix
= irqmap
->num_vectors
;
564 vf
->vsi
->nb_used_qps
= vf
->vsi
->nb_qps
;
566 /* Don't care how the TX/RX queue mapping with this vector.
567 * Link all VF RX queues together. Only did mapping work.
568 * VF can disable/enable the intr by itself.
570 i40e_vsi_queues_bind_intr(vf
->vsi
);
572 i40e_pf_host_send_msg_to_vf(vf
, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
,
579 i40e_pf_host_switch_queues(struct i40e_pf_vf
*vf
,
580 struct i40e_virtchnl_queue_select
*qsel
,
583 int ret
= I40E_SUCCESS
;
585 struct i40e_hw
*hw
= I40E_PF_TO_HW(vf
->pf
);
586 uint16_t baseq
= vf
->vsi
->base_queue
;
588 if (qsel
->rx_queues
+ qsel
->tx_queues
== 0)
589 return I40E_ERR_PARAM
;
591 /* always enable RX first and disable last */
592 /* Enable RX if it's enable */
594 for (i
= 0; i
< I40E_MAX_QP_NUM_PER_VF
; i
++)
595 if (qsel
->rx_queues
& (1 << i
)) {
596 ret
= i40e_switch_rx_queue(hw
, baseq
+ i
, on
);
597 if (ret
!= I40E_SUCCESS
)
602 /* Enable/Disable TX */
603 for (i
= 0; i
< I40E_MAX_QP_NUM_PER_VF
; i
++)
604 if (qsel
->tx_queues
& (1 << i
)) {
605 ret
= i40e_switch_tx_queue(hw
, baseq
+ i
, on
);
606 if (ret
!= I40E_SUCCESS
)
610 /* disable RX last if it's disable */
613 for (i
= 0; i
< I40E_MAX_QP_NUM_PER_VF
; i
++)
614 if (qsel
->rx_queues
& (1 << i
)) {
615 ret
= i40e_switch_rx_queue(hw
, baseq
+ i
, on
);
616 if (ret
!= I40E_SUCCESS
)
625 i40e_pf_host_process_cmd_enable_queues(struct i40e_pf_vf
*vf
,
629 int ret
= I40E_SUCCESS
;
630 struct i40e_virtchnl_queue_select
*q_sel
=
631 (struct i40e_virtchnl_queue_select
*)msg
;
633 if (msg
== NULL
|| msglen
!= sizeof(*q_sel
)) {
634 ret
= I40E_ERR_PARAM
;
637 ret
= i40e_pf_host_switch_queues(vf
, q_sel
, true);
640 i40e_pf_host_send_msg_to_vf(vf
, I40E_VIRTCHNL_OP_ENABLE_QUEUES
,
647 i40e_pf_host_process_cmd_disable_queues(struct i40e_pf_vf
*vf
,
651 int ret
= I40E_SUCCESS
;
652 struct i40e_virtchnl_queue_select
*q_sel
=
653 (struct i40e_virtchnl_queue_select
*)msg
;
655 if (msg
== NULL
|| msglen
!= sizeof(*q_sel
)) {
656 ret
= I40E_ERR_PARAM
;
659 ret
= i40e_pf_host_switch_queues(vf
, q_sel
, false);
662 i40e_pf_host_send_msg_to_vf(vf
, I40E_VIRTCHNL_OP_DISABLE_QUEUES
,
670 i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf
*vf
,
674 int ret
= I40E_SUCCESS
;
675 struct i40e_virtchnl_ether_addr_list
*addr_list
=
676 (struct i40e_virtchnl_ether_addr_list
*)msg
;
677 struct i40e_mac_filter_info filter
;
679 struct ether_addr
*mac
;
681 memset(&filter
, 0 , sizeof(struct i40e_mac_filter_info
));
683 if (msg
== NULL
|| msglen
<= sizeof(*addr_list
)) {
684 PMD_DRV_LOG(ERR
, "add_ether_address argument too short");
685 ret
= I40E_ERR_PARAM
;
689 for (i
= 0; i
< addr_list
->num_elements
; i
++) {
690 mac
= (struct ether_addr
*)(addr_list
->list
[i
].addr
);
691 (void)rte_memcpy(&filter
.mac_addr
, mac
, ETHER_ADDR_LEN
);
692 filter
.filter_type
= RTE_MACVLAN_PERFECT_MATCH
;
693 if(!is_valid_assigned_ether_addr(mac
) ||
694 i40e_vsi_add_mac(vf
->vsi
, &filter
)) {
695 ret
= I40E_ERR_INVALID_MAC_ADDR
;
701 i40e_pf_host_send_msg_to_vf(vf
, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
,
708 i40e_pf_host_process_cmd_del_ether_address(struct i40e_pf_vf
*vf
,
712 int ret
= I40E_SUCCESS
;
713 struct i40e_virtchnl_ether_addr_list
*addr_list
=
714 (struct i40e_virtchnl_ether_addr_list
*)msg
;
716 struct ether_addr
*mac
;
718 if (msg
== NULL
|| msglen
<= sizeof(*addr_list
)) {
719 PMD_DRV_LOG(ERR
, "delete_ether_address argument too short");
720 ret
= I40E_ERR_PARAM
;
724 for (i
= 0; i
< addr_list
->num_elements
; i
++) {
725 mac
= (struct ether_addr
*)(addr_list
->list
[i
].addr
);
726 if(!is_valid_assigned_ether_addr(mac
) ||
727 i40e_vsi_delete_mac(vf
->vsi
, mac
)) {
728 ret
= I40E_ERR_INVALID_MAC_ADDR
;
734 i40e_pf_host_send_msg_to_vf(vf
, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
,
741 i40e_pf_host_process_cmd_add_vlan(struct i40e_pf_vf
*vf
,
742 uint8_t *msg
, uint16_t msglen
)
744 int ret
= I40E_SUCCESS
;
745 struct i40e_virtchnl_vlan_filter_list
*vlan_filter_list
=
746 (struct i40e_virtchnl_vlan_filter_list
*)msg
;
750 if (msg
== NULL
|| msglen
<= sizeof(*vlan_filter_list
)) {
751 PMD_DRV_LOG(ERR
, "add_vlan argument too short");
752 ret
= I40E_ERR_PARAM
;
756 vid
= vlan_filter_list
->vlan_id
;
758 for (i
= 0; i
< vlan_filter_list
->num_elements
; i
++) {
759 ret
= i40e_vsi_add_vlan(vf
->vsi
, vid
[i
]);
760 if(ret
!= I40E_SUCCESS
)
765 i40e_pf_host_send_msg_to_vf(vf
, I40E_VIRTCHNL_OP_ADD_VLAN
,
772 i40e_pf_host_process_cmd_del_vlan(struct i40e_pf_vf
*vf
,
776 int ret
= I40E_SUCCESS
;
777 struct i40e_virtchnl_vlan_filter_list
*vlan_filter_list
=
778 (struct i40e_virtchnl_vlan_filter_list
*)msg
;
782 if (msg
== NULL
|| msglen
<= sizeof(*vlan_filter_list
)) {
783 PMD_DRV_LOG(ERR
, "delete_vlan argument too short");
784 ret
= I40E_ERR_PARAM
;
788 vid
= vlan_filter_list
->vlan_id
;
789 for (i
= 0; i
< vlan_filter_list
->num_elements
; i
++) {
790 ret
= i40e_vsi_delete_vlan(vf
->vsi
, vid
[i
]);
791 if(ret
!= I40E_SUCCESS
)
796 i40e_pf_host_send_msg_to_vf(vf
, I40E_VIRTCHNL_OP_DEL_VLAN
,
803 i40e_pf_host_process_cmd_config_promisc_mode(
804 struct i40e_pf_vf
*vf
,
808 int ret
= I40E_SUCCESS
;
809 struct i40e_virtchnl_promisc_info
*promisc
=
810 (struct i40e_virtchnl_promisc_info
*)msg
;
811 struct i40e_hw
*hw
= I40E_PF_TO_HW(vf
->pf
);
812 bool unicast
= FALSE
, multicast
= FALSE
;
814 if (msg
== NULL
|| msglen
!= sizeof(*promisc
)) {
815 ret
= I40E_ERR_PARAM
;
819 if (promisc
->flags
& I40E_FLAG_VF_UNICAST_PROMISC
)
821 ret
= i40e_aq_set_vsi_unicast_promiscuous(hw
,
822 vf
->vsi
->seid
, unicast
, NULL
, true);
823 if (ret
!= I40E_SUCCESS
)
826 if (promisc
->flags
& I40E_FLAG_VF_MULTICAST_PROMISC
)
828 ret
= i40e_aq_set_vsi_multicast_promiscuous(hw
, vf
->vsi
->seid
,
832 i40e_pf_host_send_msg_to_vf(vf
,
833 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
, ret
, NULL
, 0);
839 i40e_pf_host_process_cmd_get_stats(struct i40e_pf_vf
*vf
)
841 i40e_update_vsi_stats(vf
->vsi
);
843 i40e_pf_host_send_msg_to_vf(vf
, I40E_VIRTCHNL_OP_GET_STATS
,
844 I40E_SUCCESS
, (uint8_t *)&vf
->vsi
->eth_stats
,
845 sizeof(vf
->vsi
->eth_stats
));
851 i40e_pf_host_process_cmd_cfg_vlan_offload(
852 struct i40e_pf_vf
*vf
,
856 int ret
= I40E_SUCCESS
;
857 struct i40e_virtchnl_vlan_offload_info
*offload
=
858 (struct i40e_virtchnl_vlan_offload_info
*)msg
;
860 if (msg
== NULL
|| msglen
!= sizeof(*offload
)) {
861 ret
= I40E_ERR_PARAM
;
865 ret
= i40e_vsi_config_vlan_stripping(vf
->vsi
,
866 !!offload
->enable_vlan_strip
);
868 PMD_DRV_LOG(ERR
, "Failed to configure vlan stripping");
871 i40e_pf_host_send_msg_to_vf(vf
, I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD
,
878 i40e_pf_host_process_cmd_cfg_pvid(struct i40e_pf_vf
*vf
,
882 int ret
= I40E_SUCCESS
;
883 struct i40e_virtchnl_pvid_info
*tpid_info
=
884 (struct i40e_virtchnl_pvid_info
*)msg
;
886 if (msg
== NULL
|| msglen
!= sizeof(*tpid_info
)) {
887 ret
= I40E_ERR_PARAM
;
891 ret
= i40e_vsi_vlan_pvid_set(vf
->vsi
, &tpid_info
->info
);
894 i40e_pf_host_send_msg_to_vf(vf
, I40E_VIRTCHNL_OP_CFG_VLAN_PVID
,
901 i40e_notify_vf_link_status(struct rte_eth_dev
*dev
, struct i40e_pf_vf
*vf
)
903 struct i40e_virtchnl_pf_event event
;
905 event
.event
= I40E_VIRTCHNL_EVENT_LINK_CHANGE
;
906 event
.event_data
.link_event
.link_status
=
907 dev
->data
->dev_link
.link_status
;
908 event
.event_data
.link_event
.link_speed
=
909 (enum i40e_aq_link_speed
)dev
->data
->dev_link
.link_speed
;
910 i40e_pf_host_send_msg_to_vf(vf
, I40E_VIRTCHNL_OP_EVENT
,
911 I40E_SUCCESS
, (uint8_t *)&event
, sizeof(event
));
915 i40e_pf_host_handle_vf_msg(struct rte_eth_dev
*dev
,
916 uint16_t abs_vf_id
, uint32_t opcode
,
917 __rte_unused
uint32_t retval
,
921 struct i40e_pf
*pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
922 struct i40e_hw
*hw
= I40E_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
923 struct i40e_pf_vf
*vf
;
924 /* AdminQ will pass absolute VF id, transfer to internal vf id */
925 uint16_t vf_id
= abs_vf_id
- hw
->func_caps
.vf_base_id
;
927 if (vf_id
> pf
->vf_num
- 1 || !pf
->vfs
) {
928 PMD_DRV_LOG(ERR
, "invalid argument");
932 vf
= &pf
->vfs
[vf_id
];
934 PMD_DRV_LOG(ERR
, "NO VSI associated with VF found");
935 i40e_pf_host_send_msg_to_vf(vf
, opcode
,
936 I40E_ERR_NO_AVAILABLE_VSI
, NULL
, 0);
941 case I40E_VIRTCHNL_OP_VERSION
:
942 PMD_DRV_LOG(INFO
, "OP_VERSION received");
943 i40e_pf_host_process_cmd_version(vf
);
945 case I40E_VIRTCHNL_OP_RESET_VF
:
946 PMD_DRV_LOG(INFO
, "OP_RESET_VF received");
947 i40e_pf_host_process_cmd_reset_vf(vf
);
949 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES
:
950 PMD_DRV_LOG(INFO
, "OP_GET_VF_RESOURCES received");
951 i40e_pf_host_process_cmd_get_vf_resource(vf
);
953 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
:
954 PMD_DRV_LOG(INFO
, "OP_CONFIG_VSI_QUEUES received");
955 i40e_pf_host_process_cmd_config_vsi_queues(vf
, msg
, msglen
);
957 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT
:
958 PMD_DRV_LOG(INFO
, "OP_CONFIG_VSI_QUEUES_EXT received");
959 i40e_pf_host_process_cmd_config_vsi_queues_ext(vf
, msg
,
962 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
:
963 PMD_DRV_LOG(INFO
, "OP_CONFIG_IRQ_MAP received");
964 i40e_pf_host_process_cmd_config_irq_map(vf
, msg
, msglen
);
966 case I40E_VIRTCHNL_OP_ENABLE_QUEUES
:
967 PMD_DRV_LOG(INFO
, "OP_ENABLE_QUEUES received");
968 i40e_pf_host_process_cmd_enable_queues(vf
, msg
, msglen
);
969 i40e_notify_vf_link_status(dev
, vf
);
971 case I40E_VIRTCHNL_OP_DISABLE_QUEUES
:
972 PMD_DRV_LOG(INFO
, "OP_DISABLE_QUEUE received");
973 i40e_pf_host_process_cmd_disable_queues(vf
, msg
, msglen
);
975 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
:
976 PMD_DRV_LOG(INFO
, "OP_ADD_ETHER_ADDRESS received");
977 i40e_pf_host_process_cmd_add_ether_address(vf
, msg
, msglen
);
979 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
:
980 PMD_DRV_LOG(INFO
, "OP_DEL_ETHER_ADDRESS received");
981 i40e_pf_host_process_cmd_del_ether_address(vf
, msg
, msglen
);
983 case I40E_VIRTCHNL_OP_ADD_VLAN
:
984 PMD_DRV_LOG(INFO
, "OP_ADD_VLAN received");
985 i40e_pf_host_process_cmd_add_vlan(vf
, msg
, msglen
);
987 case I40E_VIRTCHNL_OP_DEL_VLAN
:
988 PMD_DRV_LOG(INFO
, "OP_DEL_VLAN received");
989 i40e_pf_host_process_cmd_del_vlan(vf
, msg
, msglen
);
991 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
:
992 PMD_DRV_LOG(INFO
, "OP_CONFIG_PROMISCUOUS_MODE received");
993 i40e_pf_host_process_cmd_config_promisc_mode(vf
, msg
, msglen
);
995 case I40E_VIRTCHNL_OP_GET_STATS
:
996 PMD_DRV_LOG(INFO
, "OP_GET_STATS received");
997 i40e_pf_host_process_cmd_get_stats(vf
);
999 case I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD
:
1000 PMD_DRV_LOG(INFO
, "OP_CFG_VLAN_OFFLOAD received");
1001 i40e_pf_host_process_cmd_cfg_vlan_offload(vf
, msg
, msglen
);
1003 case I40E_VIRTCHNL_OP_CFG_VLAN_PVID
:
1004 PMD_DRV_LOG(INFO
, "OP_CFG_VLAN_PVID received");
1005 i40e_pf_host_process_cmd_cfg_pvid(vf
, msg
, msglen
);
1007 /* Don't add command supported below, which will
1008 * return an error code.
1011 PMD_DRV_LOG(ERR
, "%u received, not supported", opcode
);
1012 i40e_pf_host_send_msg_to_vf(vf
, opcode
, I40E_ERR_PARAM
,
1019 i40e_pf_host_init(struct rte_eth_dev
*dev
)
1021 struct i40e_pf
*pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
1022 struct i40e_hw
*hw
= I40E_PF_TO_HW(pf
);
1026 PMD_INIT_FUNC_TRACE();
1029 * return if SRIOV not enabled, VF number not configured or
1030 * no queue assigned.
1032 if(!hw
->func_caps
.sr_iov_1_1
|| pf
->vf_num
== 0 || pf
->vf_nb_qps
== 0)
1033 return I40E_SUCCESS
;
1035 /* Allocate memory to store VF structure */
1036 pf
->vfs
= rte_zmalloc("i40e_pf_vf",sizeof(*pf
->vfs
) * pf
->vf_num
, 0);
1040 /* Disable irq0 for VFR event */
1041 i40e_pf_disable_irq0(hw
);
1043 /* Disable VF link status interrupt */
1044 val
= I40E_READ_REG(hw
, I40E_PFGEN_PORTMDIO_NUM
);
1045 val
&= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK
;
1046 I40E_WRITE_REG(hw
, I40E_PFGEN_PORTMDIO_NUM
, val
);
1047 I40E_WRITE_FLUSH(hw
);
1049 for (i
= 0; i
< pf
->vf_num
; i
++) {
1051 pf
->vfs
[i
].state
= I40E_VF_INACTIVE
;
1052 pf
->vfs
[i
].vf_idx
= i
;
1053 ret
= i40e_pf_host_vf_reset(&pf
->vfs
[i
], 0);
1054 if (ret
!= I40E_SUCCESS
)
1056 eth_random_addr(pf
->vfs
[i
].mac_addr
.addr_bytes
);
1060 i40e_pf_enable_irq0(hw
);
1062 return I40E_SUCCESS
;
1066 i40e_pf_enable_irq0(hw
);
1072 i40e_pf_host_uninit(struct rte_eth_dev
*dev
)
1074 struct i40e_pf
*pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
1075 struct i40e_hw
*hw
= I40E_PF_TO_HW(pf
);
1078 PMD_INIT_FUNC_TRACE();
1081 * return if SRIOV not enabled, VF number not configured or
1082 * no queue assigned.
1084 if ((!hw
->func_caps
.sr_iov_1_1
) ||
1085 (pf
->vf_num
== 0) ||
1086 (pf
->vf_nb_qps
== 0))
1087 return I40E_SUCCESS
;
1089 /* free memory to store VF structure */
1093 /* Disable irq0 for VFR event */
1094 i40e_pf_disable_irq0(hw
);
1096 /* Disable VF link status interrupt */
1097 val
= I40E_READ_REG(hw
, I40E_PFGEN_PORTMDIO_NUM
);
1098 val
&= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK
;
1099 I40E_WRITE_REG(hw
, I40E_PFGEN_PORTMDIO_NUM
, val
);
1100 I40E_WRITE_FLUSH(hw
);
1102 return I40E_SUCCESS
;