4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
42 #include <rte_byteorder.h>
43 #include <rte_common.h>
44 #include <rte_cycles.h>
46 #include <rte_interrupts.h>
48 #include <rte_debug.h>
50 #include <rte_atomic.h>
51 #include <rte_branch_prediction.h>
52 #include <rte_memory.h>
53 #include <rte_memzone.h>
55 #include <rte_alarm.h>
56 #include <rte_ether.h>
57 #include <rte_ethdev.h>
58 #include <rte_atomic.h>
59 #include <rte_malloc.h>
62 #include "i40e_logs.h"
63 #include "base/i40e_prototype.h"
64 #include "base/i40e_adminq_cmd.h"
65 #include "base/i40e_type.h"
67 #include "i40e_rxtx.h"
68 #include "i40e_ethdev.h"
70 #define I40EVF_VSI_DEFAULT_MSIX_INTR 1
71 #define I40EVF_VSI_DEFAULT_MSIX_INTR_LNX 0
73 /* busy wait delay in msec */
74 #define I40EVF_BUSY_WAIT_DELAY 10
75 #define I40EVF_BUSY_WAIT_COUNT 50
76 #define MAX_RESET_WAIT_CNT 20
78 struct i40evf_arq_msg_info
{
79 enum i40e_virtchnl_ops ops
;
80 enum i40e_status_code result
;
87 enum i40e_virtchnl_ops ops
;
89 uint32_t in_args_size
;
91 /* Input & output type. pass in buffer size and pass out
92 * actual return result
97 enum i40evf_aq_result
{
98 I40EVF_MSG_ERR
= -1, /* Meet error when accessing admin queue */
99 I40EVF_MSG_NON
, /* Read nothing from admin queue */
100 I40EVF_MSG_SYS
, /* Read system msg from admin queue */
101 I40EVF_MSG_CMD
, /* Read async command result */
104 static int i40evf_dev_configure(struct rte_eth_dev
*dev
);
105 static int i40evf_dev_start(struct rte_eth_dev
*dev
);
106 static void i40evf_dev_stop(struct rte_eth_dev
*dev
);
107 static void i40evf_dev_info_get(struct rte_eth_dev
*dev
,
108 struct rte_eth_dev_info
*dev_info
);
109 static int i40evf_dev_link_update(struct rte_eth_dev
*dev
,
110 __rte_unused
int wait_to_complete
);
111 static void i40evf_dev_stats_get(struct rte_eth_dev
*dev
,
112 struct rte_eth_stats
*stats
);
113 static int i40evf_dev_xstats_get(struct rte_eth_dev
*dev
,
114 struct rte_eth_xstat
*xstats
, unsigned n
);
115 static int i40evf_dev_xstats_get_names(struct rte_eth_dev
*dev
,
116 struct rte_eth_xstat_name
*xstats_names
,
118 static void i40evf_dev_xstats_reset(struct rte_eth_dev
*dev
);
119 static int i40evf_vlan_filter_set(struct rte_eth_dev
*dev
,
120 uint16_t vlan_id
, int on
);
121 static void i40evf_vlan_offload_set(struct rte_eth_dev
*dev
, int mask
);
122 static int i40evf_vlan_pvid_set(struct rte_eth_dev
*dev
, uint16_t pvid
,
124 static void i40evf_dev_close(struct rte_eth_dev
*dev
);
125 static void i40evf_dev_promiscuous_enable(struct rte_eth_dev
*dev
);
126 static void i40evf_dev_promiscuous_disable(struct rte_eth_dev
*dev
);
127 static void i40evf_dev_allmulticast_enable(struct rte_eth_dev
*dev
);
128 static void i40evf_dev_allmulticast_disable(struct rte_eth_dev
*dev
);
129 static int i40evf_init_vlan(struct rte_eth_dev
*dev
);
130 static int i40evf_dev_rx_queue_start(struct rte_eth_dev
*dev
,
131 uint16_t rx_queue_id
);
132 static int i40evf_dev_rx_queue_stop(struct rte_eth_dev
*dev
,
133 uint16_t rx_queue_id
);
134 static int i40evf_dev_tx_queue_start(struct rte_eth_dev
*dev
,
135 uint16_t tx_queue_id
);
136 static int i40evf_dev_tx_queue_stop(struct rte_eth_dev
*dev
,
137 uint16_t tx_queue_id
);
138 static void i40evf_add_mac_addr(struct rte_eth_dev
*dev
,
139 struct ether_addr
*addr
,
142 static void i40evf_del_mac_addr(struct rte_eth_dev
*dev
, uint32_t index
);
143 static int i40evf_dev_rss_reta_update(struct rte_eth_dev
*dev
,
144 struct rte_eth_rss_reta_entry64
*reta_conf
,
146 static int i40evf_dev_rss_reta_query(struct rte_eth_dev
*dev
,
147 struct rte_eth_rss_reta_entry64
*reta_conf
,
149 static int i40evf_config_rss(struct i40e_vf
*vf
);
150 static int i40evf_dev_rss_hash_update(struct rte_eth_dev
*dev
,
151 struct rte_eth_rss_conf
*rss_conf
);
152 static int i40evf_dev_rss_hash_conf_get(struct rte_eth_dev
*dev
,
153 struct rte_eth_rss_conf
*rss_conf
);
155 i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev
*dev
, uint16_t queue_id
);
157 i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev
*dev
, uint16_t queue_id
);
158 static void i40evf_handle_pf_event(__rte_unused
struct rte_eth_dev
*dev
,
162 /* Default hash key buffer for RSS */
163 static uint32_t rss_key_default
[I40E_VFQF_HKEY_MAX_INDEX
+ 1];
165 struct rte_i40evf_xstats_name_off
{
166 char name
[RTE_ETH_XSTATS_NAME_SIZE
];
170 static const struct rte_i40evf_xstats_name_off rte_i40evf_stats_strings
[] = {
171 {"rx_bytes", offsetof(struct i40e_eth_stats
, rx_bytes
)},
172 {"rx_unicast_packets", offsetof(struct i40e_eth_stats
, rx_unicast
)},
173 {"rx_multicast_packets", offsetof(struct i40e_eth_stats
, rx_multicast
)},
174 {"rx_broadcast_packets", offsetof(struct i40e_eth_stats
, rx_broadcast
)},
175 {"rx_dropped_packets", offsetof(struct i40e_eth_stats
, rx_discards
)},
176 {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats
,
177 rx_unknown_protocol
)},
178 {"tx_bytes", offsetof(struct i40e_eth_stats
, tx_bytes
)},
179 {"tx_unicast_packets", offsetof(struct i40e_eth_stats
, tx_bytes
)},
180 {"tx_multicast_packets", offsetof(struct i40e_eth_stats
, tx_bytes
)},
181 {"tx_broadcast_packets", offsetof(struct i40e_eth_stats
, tx_bytes
)},
182 {"tx_dropped_packets", offsetof(struct i40e_eth_stats
, tx_bytes
)},
183 {"tx_error_packets", offsetof(struct i40e_eth_stats
, tx_bytes
)},
186 #define I40EVF_NB_XSTATS (sizeof(rte_i40evf_stats_strings) / \
187 sizeof(rte_i40evf_stats_strings[0]))
189 static const struct eth_dev_ops i40evf_eth_dev_ops
= {
190 .dev_configure
= i40evf_dev_configure
,
191 .dev_start
= i40evf_dev_start
,
192 .dev_stop
= i40evf_dev_stop
,
193 .promiscuous_enable
= i40evf_dev_promiscuous_enable
,
194 .promiscuous_disable
= i40evf_dev_promiscuous_disable
,
195 .allmulticast_enable
= i40evf_dev_allmulticast_enable
,
196 .allmulticast_disable
= i40evf_dev_allmulticast_disable
,
197 .link_update
= i40evf_dev_link_update
,
198 .stats_get
= i40evf_dev_stats_get
,
199 .xstats_get
= i40evf_dev_xstats_get
,
200 .xstats_get_names
= i40evf_dev_xstats_get_names
,
201 .xstats_reset
= i40evf_dev_xstats_reset
,
202 .dev_close
= i40evf_dev_close
,
203 .dev_infos_get
= i40evf_dev_info_get
,
204 .dev_supported_ptypes_get
= i40e_dev_supported_ptypes_get
,
205 .vlan_filter_set
= i40evf_vlan_filter_set
,
206 .vlan_offload_set
= i40evf_vlan_offload_set
,
207 .vlan_pvid_set
= i40evf_vlan_pvid_set
,
208 .rx_queue_start
= i40evf_dev_rx_queue_start
,
209 .rx_queue_stop
= i40evf_dev_rx_queue_stop
,
210 .tx_queue_start
= i40evf_dev_tx_queue_start
,
211 .tx_queue_stop
= i40evf_dev_tx_queue_stop
,
212 .rx_queue_setup
= i40e_dev_rx_queue_setup
,
213 .rx_queue_release
= i40e_dev_rx_queue_release
,
214 .rx_queue_intr_enable
= i40evf_dev_rx_queue_intr_enable
,
215 .rx_queue_intr_disable
= i40evf_dev_rx_queue_intr_disable
,
216 .rx_descriptor_done
= i40e_dev_rx_descriptor_done
,
217 .tx_queue_setup
= i40e_dev_tx_queue_setup
,
218 .tx_queue_release
= i40e_dev_tx_queue_release
,
219 .rx_queue_count
= i40e_dev_rx_queue_count
,
220 .rxq_info_get
= i40e_rxq_info_get
,
221 .txq_info_get
= i40e_txq_info_get
,
222 .mac_addr_add
= i40evf_add_mac_addr
,
223 .mac_addr_remove
= i40evf_del_mac_addr
,
224 .reta_update
= i40evf_dev_rss_reta_update
,
225 .reta_query
= i40evf_dev_rss_reta_query
,
226 .rss_hash_update
= i40evf_dev_rss_hash_update
,
227 .rss_hash_conf_get
= i40evf_dev_rss_hash_conf_get
,
231 * Read data in admin queue to get msg from pf driver
233 static enum i40evf_aq_result
234 i40evf_read_pfmsg(struct rte_eth_dev
*dev
, struct i40evf_arq_msg_info
*data
)
236 struct i40e_hw
*hw
= I40E_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
237 struct i40e_vf
*vf
= I40EVF_DEV_PRIVATE_TO_VF(dev
->data
->dev_private
);
238 struct i40e_arq_event_info event
;
239 enum i40e_virtchnl_ops opcode
;
240 enum i40e_status_code retval
;
242 enum i40evf_aq_result result
= I40EVF_MSG_NON
;
244 event
.buf_len
= data
->buf_len
;
245 event
.msg_buf
= data
->msg
;
246 ret
= i40e_clean_arq_element(hw
, &event
, NULL
);
247 /* Can't read any msg from adminQ */
249 if (ret
!= I40E_ERR_ADMIN_QUEUE_NO_WORK
)
250 result
= I40EVF_MSG_ERR
;
254 opcode
= (enum i40e_virtchnl_ops
)rte_le_to_cpu_32(event
.desc
.cookie_high
);
255 retval
= (enum i40e_status_code
)rte_le_to_cpu_32(event
.desc
.cookie_low
);
257 if (opcode
== I40E_VIRTCHNL_OP_EVENT
) {
258 struct i40e_virtchnl_pf_event
*vpe
=
259 (struct i40e_virtchnl_pf_event
*)event
.msg_buf
;
261 result
= I40EVF_MSG_SYS
;
262 switch (vpe
->event
) {
263 case I40E_VIRTCHNL_EVENT_LINK_CHANGE
:
265 vpe
->event_data
.link_event
.link_status
;
267 vpe
->event_data
.link_event
.link_speed
;
268 vf
->pend_msg
|= PFMSG_LINK_CHANGE
;
269 PMD_DRV_LOG(INFO
, "Link status update:%s",
270 vf
->link_up
? "up" : "down");
272 case I40E_VIRTCHNL_EVENT_RESET_IMPENDING
:
274 vf
->pend_msg
|= PFMSG_RESET_IMPENDING
;
275 PMD_DRV_LOG(INFO
, "vf is reseting");
277 case I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE
:
278 vf
->dev_closed
= true;
279 vf
->pend_msg
|= PFMSG_DRIVER_CLOSE
;
280 PMD_DRV_LOG(INFO
, "PF driver closed");
283 PMD_DRV_LOG(ERR
, "%s: Unknown event %d from pf",
284 __func__
, vpe
->event
);
287 /* async reply msg on command issued by vf previously */
288 result
= I40EVF_MSG_CMD
;
289 /* Actual data length read from PF */
290 data
->msg_len
= event
.msg_len
;
293 data
->result
= retval
;
300 * clear current command. Only call in case execute
301 * _atomic_set_cmd successfully.
304 _clear_cmd(struct i40e_vf
*vf
)
307 vf
->pend_cmd
= I40E_VIRTCHNL_OP_UNKNOWN
;
311 * Check there is pending cmd in execution. If none, set new command.
314 _atomic_set_cmd(struct i40e_vf
*vf
, enum i40e_virtchnl_ops ops
)
316 int ret
= rte_atomic32_cmpset(&vf
->pend_cmd
,
317 I40E_VIRTCHNL_OP_UNKNOWN
, ops
);
320 PMD_DRV_LOG(ERR
, "There is incomplete cmd %d", vf
->pend_cmd
);
325 #define MAX_TRY_TIMES 200
326 #define ASQ_DELAY_MS 10
329 i40evf_execute_vf_cmd(struct rte_eth_dev
*dev
, struct vf_cmd_info
*args
)
331 struct i40e_hw
*hw
= I40E_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
332 struct i40e_vf
*vf
= I40EVF_DEV_PRIVATE_TO_VF(dev
->data
->dev_private
);
333 struct i40evf_arq_msg_info info
;
334 enum i40evf_aq_result ret
;
337 if (_atomic_set_cmd(vf
, args
->ops
))
340 info
.msg
= args
->out_buffer
;
341 info
.buf_len
= args
->out_size
;
342 info
.ops
= I40E_VIRTCHNL_OP_UNKNOWN
;
343 info
.result
= I40E_SUCCESS
;
345 err
= i40e_aq_send_msg_to_pf(hw
, args
->ops
, I40E_SUCCESS
,
346 args
->in_args
, args
->in_args_size
, NULL
);
348 PMD_DRV_LOG(ERR
, "fail to send cmd %d", args
->ops
);
354 case I40E_VIRTCHNL_OP_RESET_VF
:
355 /*no need to process in this function */
358 case I40E_VIRTCHNL_OP_VERSION
:
359 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES
:
360 /* for init adminq commands, need to poll the response */
363 ret
= i40evf_read_pfmsg(dev
, &info
);
364 if (ret
== I40EVF_MSG_CMD
) {
367 } else if (ret
== I40EVF_MSG_ERR
)
369 rte_delay_ms(ASQ_DELAY_MS
);
370 /* If don't read msg or read sys event, continue */
371 } while (i
++ < MAX_TRY_TIMES
);
376 /* for other adminq in running time, waiting the cmd done flag */
379 if (vf
->pend_cmd
== I40E_VIRTCHNL_OP_UNKNOWN
) {
383 rte_delay_ms(ASQ_DELAY_MS
);
384 /* If don't read msg or read sys event, continue */
385 } while (i
++ < MAX_TRY_TIMES
);
389 return err
| vf
->cmd_retval
;
393 * Check API version with sync wait until version read or fail from admin queue
396 i40evf_check_api_version(struct rte_eth_dev
*dev
)
398 struct i40e_virtchnl_version_info version
, *pver
;
400 struct vf_cmd_info args
;
401 struct i40e_vf
*vf
= I40EVF_DEV_PRIVATE_TO_VF(dev
->data
->dev_private
);
403 version
.major
= I40E_VIRTCHNL_VERSION_MAJOR
;
404 version
.minor
= I40E_VIRTCHNL_VERSION_MINOR
;
406 args
.ops
= I40E_VIRTCHNL_OP_VERSION
;
407 args
.in_args
= (uint8_t *)&version
;
408 args
.in_args_size
= sizeof(version
);
409 args
.out_buffer
= vf
->aq_resp
;
410 args
.out_size
= I40E_AQ_BUF_SZ
;
412 err
= i40evf_execute_vf_cmd(dev
, &args
);
414 PMD_INIT_LOG(ERR
, "fail to execute command OP_VERSION");
418 pver
= (struct i40e_virtchnl_version_info
*)args
.out_buffer
;
419 vf
->version_major
= pver
->major
;
420 vf
->version_minor
= pver
->minor
;
421 if (vf
->version_major
== I40E_DPDK_VERSION_MAJOR
)
422 PMD_DRV_LOG(INFO
, "Peer is DPDK PF host");
423 else if ((vf
->version_major
== I40E_VIRTCHNL_VERSION_MAJOR
) &&
424 (vf
->version_minor
<= I40E_VIRTCHNL_VERSION_MINOR
))
425 PMD_DRV_LOG(INFO
, "Peer is Linux PF host");
427 PMD_INIT_LOG(ERR
, "PF/VF API version mismatch:(%u.%u)-(%u.%u)",
428 vf
->version_major
, vf
->version_minor
,
429 I40E_VIRTCHNL_VERSION_MAJOR
,
430 I40E_VIRTCHNL_VERSION_MINOR
);
438 i40evf_get_vf_resource(struct rte_eth_dev
*dev
)
440 struct i40e_hw
*hw
= I40E_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
441 struct i40e_vf
*vf
= I40EVF_DEV_PRIVATE_TO_VF(dev
->data
->dev_private
);
443 struct vf_cmd_info args
;
446 args
.ops
= I40E_VIRTCHNL_OP_GET_VF_RESOURCES
;
447 args
.out_buffer
= vf
->aq_resp
;
448 args
.out_size
= I40E_AQ_BUF_SZ
;
450 caps
= I40E_VIRTCHNL_VF_OFFLOAD_L2
|
451 I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ
|
452 I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG
|
453 I40E_VIRTCHNL_VF_OFFLOAD_VLAN
|
454 I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING
;
455 args
.in_args
= (uint8_t *)&caps
;
456 args
.in_args_size
= sizeof(caps
);
459 args
.in_args_size
= 0;
461 err
= i40evf_execute_vf_cmd(dev
, &args
);
464 PMD_DRV_LOG(ERR
, "fail to execute command OP_GET_VF_RESOURCE");
468 len
= sizeof(struct i40e_virtchnl_vf_resource
) +
469 I40E_MAX_VF_VSI
* sizeof(struct i40e_virtchnl_vsi_resource
);
471 (void)rte_memcpy(vf
->vf_res
, args
.out_buffer
,
472 RTE_MIN(args
.out_size
, len
));
473 i40e_vf_parse_hw_config(hw
, vf
->vf_res
);
479 i40evf_config_promisc(struct rte_eth_dev
*dev
,
481 bool enable_multicast
)
483 struct i40e_vf
*vf
= I40EVF_DEV_PRIVATE_TO_VF(dev
->data
->dev_private
);
485 struct vf_cmd_info args
;
486 struct i40e_virtchnl_promisc_info promisc
;
489 promisc
.vsi_id
= vf
->vsi_res
->vsi_id
;
492 promisc
.flags
|= I40E_FLAG_VF_UNICAST_PROMISC
;
494 if (enable_multicast
)
495 promisc
.flags
|= I40E_FLAG_VF_MULTICAST_PROMISC
;
497 args
.ops
= I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
;
498 args
.in_args
= (uint8_t *)&promisc
;
499 args
.in_args_size
= sizeof(promisc
);
500 args
.out_buffer
= vf
->aq_resp
;
501 args
.out_size
= I40E_AQ_BUF_SZ
;
503 err
= i40evf_execute_vf_cmd(dev
, &args
);
506 PMD_DRV_LOG(ERR
, "fail to execute command "
507 "CONFIG_PROMISCUOUS_MODE");
511 /* Configure vlan and double vlan offload. Use flag to specify which part to configure */
513 i40evf_config_vlan_offload(struct rte_eth_dev
*dev
,
514 bool enable_vlan_strip
)
516 struct i40e_vf
*vf
= I40EVF_DEV_PRIVATE_TO_VF(dev
->data
->dev_private
);
518 struct vf_cmd_info args
;
519 struct i40e_virtchnl_vlan_offload_info offload
;
521 offload
.vsi_id
= vf
->vsi_res
->vsi_id
;
522 offload
.enable_vlan_strip
= enable_vlan_strip
;
524 args
.ops
= (enum i40e_virtchnl_ops
)I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD
;
525 args
.in_args
= (uint8_t *)&offload
;
526 args
.in_args_size
= sizeof(offload
);
527 args
.out_buffer
= vf
->aq_resp
;
528 args
.out_size
= I40E_AQ_BUF_SZ
;
530 err
= i40evf_execute_vf_cmd(dev
, &args
);
532 PMD_DRV_LOG(ERR
, "fail to execute command CFG_VLAN_OFFLOAD");
538 i40evf_config_vlan_pvid(struct rte_eth_dev
*dev
,
539 struct i40e_vsi_vlan_pvid_info
*info
)
541 struct i40e_vf
*vf
= I40EVF_DEV_PRIVATE_TO_VF(dev
->data
->dev_private
);
543 struct vf_cmd_info args
;
544 struct i40e_virtchnl_pvid_info tpid_info
;
547 PMD_DRV_LOG(ERR
, "invalid parameters");
548 return I40E_ERR_PARAM
;
551 memset(&tpid_info
, 0, sizeof(tpid_info
));
552 tpid_info
.vsi_id
= vf
->vsi_res
->vsi_id
;
553 (void)rte_memcpy(&tpid_info
.info
, info
, sizeof(*info
));
555 args
.ops
= (enum i40e_virtchnl_ops
)I40E_VIRTCHNL_OP_CFG_VLAN_PVID
;
556 args
.in_args
= (uint8_t *)&tpid_info
;
557 args
.in_args_size
= sizeof(tpid_info
);
558 args
.out_buffer
= vf
->aq_resp
;
559 args
.out_size
= I40E_AQ_BUF_SZ
;
561 err
= i40evf_execute_vf_cmd(dev
, &args
);
563 PMD_DRV_LOG(ERR
, "fail to execute command CFG_VLAN_PVID");
569 i40evf_fill_virtchnl_vsi_txq_info(struct i40e_virtchnl_txq_info
*txq_info
,
573 struct i40e_tx_queue
*txq
)
575 txq_info
->vsi_id
= vsi_id
;
576 txq_info
->queue_id
= queue_id
;
577 if (queue_id
< nb_txq
) {
578 txq_info
->ring_len
= txq
->nb_tx_desc
;
579 txq_info
->dma_ring_addr
= txq
->tx_ring_phys_addr
;
584 i40evf_fill_virtchnl_vsi_rxq_info(struct i40e_virtchnl_rxq_info
*rxq_info
,
588 uint32_t max_pkt_size
,
589 struct i40e_rx_queue
*rxq
)
591 rxq_info
->vsi_id
= vsi_id
;
592 rxq_info
->queue_id
= queue_id
;
593 rxq_info
->max_pkt_size
= max_pkt_size
;
594 if (queue_id
< nb_rxq
) {
595 rxq_info
->ring_len
= rxq
->nb_rx_desc
;
596 rxq_info
->dma_ring_addr
= rxq
->rx_ring_phys_addr
;
597 rxq_info
->databuffer_size
=
598 (rte_pktmbuf_data_room_size(rxq
->mp
) -
599 RTE_PKTMBUF_HEADROOM
);
603 /* It configures VSI queues to co-work with Linux PF host */
605 i40evf_configure_vsi_queues(struct rte_eth_dev
*dev
)
607 struct i40e_vf
*vf
= I40EVF_DEV_PRIVATE_TO_VF(dev
->data
->dev_private
);
608 struct i40e_rx_queue
**rxq
=
609 (struct i40e_rx_queue
**)dev
->data
->rx_queues
;
610 struct i40e_tx_queue
**txq
=
611 (struct i40e_tx_queue
**)dev
->data
->tx_queues
;
612 struct i40e_virtchnl_vsi_queue_config_info
*vc_vqci
;
613 struct i40e_virtchnl_queue_pair_info
*vc_qpi
;
614 struct vf_cmd_info args
;
615 uint16_t i
, nb_qp
= vf
->num_queue_pairs
;
616 const uint32_t size
=
617 I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqci
, nb_qp
);
621 memset(buff
, 0, sizeof(buff
));
622 vc_vqci
= (struct i40e_virtchnl_vsi_queue_config_info
*)buff
;
623 vc_vqci
->vsi_id
= vf
->vsi_res
->vsi_id
;
624 vc_vqci
->num_queue_pairs
= nb_qp
;
626 for (i
= 0, vc_qpi
= vc_vqci
->qpair
; i
< nb_qp
; i
++, vc_qpi
++) {
627 i40evf_fill_virtchnl_vsi_txq_info(&vc_qpi
->txq
,
628 vc_vqci
->vsi_id
, i
, dev
->data
->nb_tx_queues
, txq
[i
]);
629 i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpi
->rxq
,
630 vc_vqci
->vsi_id
, i
, dev
->data
->nb_rx_queues
,
631 vf
->max_pkt_len
, rxq
[i
]);
633 memset(&args
, 0, sizeof(args
));
634 args
.ops
= I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
;
635 args
.in_args
= (uint8_t *)vc_vqci
;
636 args
.in_args_size
= size
;
637 args
.out_buffer
= vf
->aq_resp
;
638 args
.out_size
= I40E_AQ_BUF_SZ
;
639 ret
= i40evf_execute_vf_cmd(dev
, &args
);
641 PMD_DRV_LOG(ERR
, "Failed to execute command of "
642 "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES\n");
647 /* It configures VSI queues to co-work with DPDK PF host */
649 i40evf_configure_vsi_queues_ext(struct rte_eth_dev
*dev
)
651 struct i40e_vf
*vf
= I40EVF_DEV_PRIVATE_TO_VF(dev
->data
->dev_private
);
652 struct i40e_rx_queue
**rxq
=
653 (struct i40e_rx_queue
**)dev
->data
->rx_queues
;
654 struct i40e_tx_queue
**txq
=
655 (struct i40e_tx_queue
**)dev
->data
->tx_queues
;
656 struct i40e_virtchnl_vsi_queue_config_ext_info
*vc_vqcei
;
657 struct i40e_virtchnl_queue_pair_ext_info
*vc_qpei
;
658 struct vf_cmd_info args
;
659 uint16_t i
, nb_qp
= vf
->num_queue_pairs
;
660 const uint32_t size
=
661 I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqcei
, nb_qp
);
665 memset(buff
, 0, sizeof(buff
));
666 vc_vqcei
= (struct i40e_virtchnl_vsi_queue_config_ext_info
*)buff
;
667 vc_vqcei
->vsi_id
= vf
->vsi_res
->vsi_id
;
668 vc_vqcei
->num_queue_pairs
= nb_qp
;
669 vc_qpei
= vc_vqcei
->qpair
;
670 for (i
= 0; i
< nb_qp
; i
++, vc_qpei
++) {
671 i40evf_fill_virtchnl_vsi_txq_info(&vc_qpei
->txq
,
672 vc_vqcei
->vsi_id
, i
, dev
->data
->nb_tx_queues
, txq
[i
]);
673 i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpei
->rxq
,
674 vc_vqcei
->vsi_id
, i
, dev
->data
->nb_rx_queues
,
675 vf
->max_pkt_len
, rxq
[i
]);
676 if (i
< dev
->data
->nb_rx_queues
)
678 * It adds extra info for configuring VSI queues, which
679 * is needed to enable the configurable crc stripping
682 vc_qpei
->rxq_ext
.crcstrip
=
683 dev
->data
->dev_conf
.rxmode
.hw_strip_crc
;
685 memset(&args
, 0, sizeof(args
));
687 (enum i40e_virtchnl_ops
)I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT
;
688 args
.in_args
= (uint8_t *)vc_vqcei
;
689 args
.in_args_size
= size
;
690 args
.out_buffer
= vf
->aq_resp
;
691 args
.out_size
= I40E_AQ_BUF_SZ
;
692 ret
= i40evf_execute_vf_cmd(dev
, &args
);
694 PMD_DRV_LOG(ERR
, "Failed to execute command of "
695 "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT\n");
701 i40evf_configure_queues(struct rte_eth_dev
*dev
)
703 struct i40e_vf
*vf
= I40EVF_DEV_PRIVATE_TO_VF(dev
->data
->dev_private
);
705 if (vf
->version_major
== I40E_DPDK_VERSION_MAJOR
)
706 /* To support DPDK PF host */
707 return i40evf_configure_vsi_queues_ext(dev
);
709 /* To support Linux PF host */
710 return i40evf_configure_vsi_queues(dev
);
714 i40evf_config_irq_map(struct rte_eth_dev
*dev
)
716 struct i40e_vf
*vf
= I40EVF_DEV_PRIVATE_TO_VF(dev
->data
->dev_private
);
717 struct vf_cmd_info args
;
718 uint8_t cmd_buffer
[sizeof(struct i40e_virtchnl_irq_map_info
) + \
719 sizeof(struct i40e_virtchnl_vector_map
)];
720 struct i40e_virtchnl_irq_map_info
*map_info
;
721 struct rte_intr_handle
*intr_handle
= &dev
->pci_dev
->intr_handle
;
725 if (rte_intr_allow_others(intr_handle
)) {
726 if (vf
->version_major
== I40E_DPDK_VERSION_MAJOR
)
727 vector_id
= I40EVF_VSI_DEFAULT_MSIX_INTR
;
729 vector_id
= I40EVF_VSI_DEFAULT_MSIX_INTR_LNX
;
731 vector_id
= I40E_MISC_VEC_ID
;
734 map_info
= (struct i40e_virtchnl_irq_map_info
*)cmd_buffer
;
735 map_info
->num_vectors
= 1;
736 map_info
->vecmap
[0].rxitr_idx
= I40E_ITR_INDEX_DEFAULT
;
737 map_info
->vecmap
[0].vsi_id
= vf
->vsi_res
->vsi_id
;
738 /* Alway use default dynamic MSIX interrupt */
739 map_info
->vecmap
[0].vector_id
= vector_id
;
740 /* Don't map any tx queue */
741 map_info
->vecmap
[0].txq_map
= 0;
742 map_info
->vecmap
[0].rxq_map
= 0;
743 for (i
= 0; i
< dev
->data
->nb_rx_queues
; i
++) {
744 map_info
->vecmap
[0].rxq_map
|= 1 << i
;
745 if (rte_intr_dp_is_en(intr_handle
))
746 intr_handle
->intr_vec
[i
] = vector_id
;
749 args
.ops
= I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
;
750 args
.in_args
= (u8
*)cmd_buffer
;
751 args
.in_args_size
= sizeof(cmd_buffer
);
752 args
.out_buffer
= vf
->aq_resp
;
753 args
.out_size
= I40E_AQ_BUF_SZ
;
754 err
= i40evf_execute_vf_cmd(dev
, &args
);
756 PMD_DRV_LOG(ERR
, "fail to execute command OP_ENABLE_QUEUES");
762 i40evf_switch_queue(struct rte_eth_dev
*dev
, bool isrx
, uint16_t qid
,
765 struct i40e_vf
*vf
= I40EVF_DEV_PRIVATE_TO_VF(dev
->data
->dev_private
);
766 struct i40e_virtchnl_queue_select queue_select
;
768 struct vf_cmd_info args
;
769 memset(&queue_select
, 0, sizeof(queue_select
));
770 queue_select
.vsi_id
= vf
->vsi_res
->vsi_id
;
773 queue_select
.rx_queues
|= 1 << qid
;
775 queue_select
.tx_queues
|= 1 << qid
;
778 args
.ops
= I40E_VIRTCHNL_OP_ENABLE_QUEUES
;
780 args
.ops
= I40E_VIRTCHNL_OP_DISABLE_QUEUES
;
781 args
.in_args
= (u8
*)&queue_select
;
782 args
.in_args_size
= sizeof(queue_select
);
783 args
.out_buffer
= vf
->aq_resp
;
784 args
.out_size
= I40E_AQ_BUF_SZ
;
785 err
= i40evf_execute_vf_cmd(dev
, &args
);
787 PMD_DRV_LOG(ERR
, "fail to switch %s %u %s",
788 isrx
? "RX" : "TX", qid
, on
? "on" : "off");
794 i40evf_start_queues(struct rte_eth_dev
*dev
)
796 struct rte_eth_dev_data
*dev_data
= dev
->data
;
798 struct i40e_rx_queue
*rxq
;
799 struct i40e_tx_queue
*txq
;
801 for (i
= 0; i
< dev
->data
->nb_rx_queues
; i
++) {
802 rxq
= dev_data
->rx_queues
[i
];
803 if (rxq
->rx_deferred_start
)
805 if (i40evf_dev_rx_queue_start(dev
, i
) != 0) {
806 PMD_DRV_LOG(ERR
, "Fail to start queue %u", i
);
811 for (i
= 0; i
< dev
->data
->nb_tx_queues
; i
++) {
812 txq
= dev_data
->tx_queues
[i
];
813 if (txq
->tx_deferred_start
)
815 if (i40evf_dev_tx_queue_start(dev
, i
) != 0) {
816 PMD_DRV_LOG(ERR
, "Fail to start queue %u", i
);
825 i40evf_stop_queues(struct rte_eth_dev
*dev
)
829 /* Stop TX queues first */
830 for (i
= 0; i
< dev
->data
->nb_tx_queues
; i
++) {
831 if (i40evf_dev_tx_queue_stop(dev
, i
) != 0) {
832 PMD_DRV_LOG(ERR
, "Fail to stop queue %u", i
);
837 /* Then stop RX queues */
838 for (i
= 0; i
< dev
->data
->nb_rx_queues
; i
++) {
839 if (i40evf_dev_rx_queue_stop(dev
, i
) != 0) {
840 PMD_DRV_LOG(ERR
, "Fail to stop queue %u", i
);
849 i40evf_add_mac_addr(struct rte_eth_dev
*dev
,
850 struct ether_addr
*addr
,
851 __rte_unused
uint32_t index
,
852 __rte_unused
uint32_t pool
)
854 struct i40e_virtchnl_ether_addr_list
*list
;
855 struct i40e_vf
*vf
= I40EVF_DEV_PRIVATE_TO_VF(dev
->data
->dev_private
);
856 uint8_t cmd_buffer
[sizeof(struct i40e_virtchnl_ether_addr_list
) + \
857 sizeof(struct i40e_virtchnl_ether_addr
)];
859 struct vf_cmd_info args
;
861 if (i40e_validate_mac_addr(addr
->addr_bytes
) != I40E_SUCCESS
) {
862 PMD_DRV_LOG(ERR
, "Invalid mac:%x:%x:%x:%x:%x:%x",
863 addr
->addr_bytes
[0], addr
->addr_bytes
[1],
864 addr
->addr_bytes
[2], addr
->addr_bytes
[3],
865 addr
->addr_bytes
[4], addr
->addr_bytes
[5]);
869 list
= (struct i40e_virtchnl_ether_addr_list
*)cmd_buffer
;
870 list
->vsi_id
= vf
->vsi_res
->vsi_id
;
871 list
->num_elements
= 1;
872 (void)rte_memcpy(list
->list
[0].addr
, addr
->addr_bytes
,
873 sizeof(addr
->addr_bytes
));
875 args
.ops
= I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
;
876 args
.in_args
= cmd_buffer
;
877 args
.in_args_size
= sizeof(cmd_buffer
);
878 args
.out_buffer
= vf
->aq_resp
;
879 args
.out_size
= I40E_AQ_BUF_SZ
;
880 err
= i40evf_execute_vf_cmd(dev
, &args
);
882 PMD_DRV_LOG(ERR
, "fail to execute command "
883 "OP_ADD_ETHER_ADDRESS");
889 i40evf_del_mac_addr(struct rte_eth_dev
*dev
, uint32_t index
)
891 struct i40e_virtchnl_ether_addr_list
*list
;
892 struct i40e_vf
*vf
= I40EVF_DEV_PRIVATE_TO_VF(dev
->data
->dev_private
);
893 struct rte_eth_dev_data
*data
= dev
->data
;
894 struct ether_addr
*addr
;
895 uint8_t cmd_buffer
[sizeof(struct i40e_virtchnl_ether_addr_list
) + \
896 sizeof(struct i40e_virtchnl_ether_addr
)];
898 struct vf_cmd_info args
;
900 addr
= &(data
->mac_addrs
[index
]);
902 if (i40e_validate_mac_addr(addr
->addr_bytes
) != I40E_SUCCESS
) {
903 PMD_DRV_LOG(ERR
, "Invalid mac:%x-%x-%x-%x-%x-%x",
904 addr
->addr_bytes
[0], addr
->addr_bytes
[1],
905 addr
->addr_bytes
[2], addr
->addr_bytes
[3],
906 addr
->addr_bytes
[4], addr
->addr_bytes
[5]);
910 list
= (struct i40e_virtchnl_ether_addr_list
*)cmd_buffer
;
911 list
->vsi_id
= vf
->vsi_res
->vsi_id
;
912 list
->num_elements
= 1;
913 (void)rte_memcpy(list
->list
[0].addr
, addr
->addr_bytes
,
914 sizeof(addr
->addr_bytes
));
916 args
.ops
= I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
;
917 args
.in_args
= cmd_buffer
;
918 args
.in_args_size
= sizeof(cmd_buffer
);
919 args
.out_buffer
= vf
->aq_resp
;
920 args
.out_size
= I40E_AQ_BUF_SZ
;
921 err
= i40evf_execute_vf_cmd(dev
, &args
);
923 PMD_DRV_LOG(ERR
, "fail to execute command "
924 "OP_DEL_ETHER_ADDRESS");
929 i40evf_update_stats(struct rte_eth_dev
*dev
, struct i40e_eth_stats
**pstats
)
931 struct i40e_vf
*vf
= I40EVF_DEV_PRIVATE_TO_VF(dev
->data
->dev_private
);
932 struct i40e_virtchnl_queue_select q_stats
;
934 struct vf_cmd_info args
;
936 memset(&q_stats
, 0, sizeof(q_stats
));
937 q_stats
.vsi_id
= vf
->vsi_res
->vsi_id
;
938 args
.ops
= I40E_VIRTCHNL_OP_GET_STATS
;
939 args
.in_args
= (u8
*)&q_stats
;
940 args
.in_args_size
= sizeof(q_stats
);
941 args
.out_buffer
= vf
->aq_resp
;
942 args
.out_size
= I40E_AQ_BUF_SZ
;
944 err
= i40evf_execute_vf_cmd(dev
, &args
);
946 PMD_DRV_LOG(ERR
, "fail to execute command OP_GET_STATS");
950 *pstats
= (struct i40e_eth_stats
*)args
.out_buffer
;
955 i40evf_get_statics(struct rte_eth_dev
*dev
, struct rte_eth_stats
*stats
)
958 struct i40e_eth_stats
*pstats
= NULL
;
960 ret
= i40evf_update_stats(dev
, &pstats
);
964 stats
->ipackets
= pstats
->rx_unicast
+ pstats
->rx_multicast
+
965 pstats
->rx_broadcast
;
966 stats
->opackets
= pstats
->tx_broadcast
+ pstats
->tx_multicast
+
968 stats
->ierrors
= pstats
->rx_discards
;
969 stats
->oerrors
= pstats
->tx_errors
+ pstats
->tx_discards
;
970 stats
->ibytes
= pstats
->rx_bytes
;
971 stats
->obytes
= pstats
->tx_bytes
;
977 i40evf_dev_xstats_reset(struct rte_eth_dev
*dev
)
979 struct i40e_vf
*vf
= I40EVF_DEV_PRIVATE_TO_VF(dev
->data
->dev_private
);
980 struct i40e_eth_stats
*pstats
= NULL
;
982 /* read stat values to clear hardware registers */
983 i40evf_update_stats(dev
, &pstats
);
985 /* set stats offset base on current values */
986 vf
->vsi
.eth_stats_offset
= vf
->vsi
.eth_stats
;
989 static int i40evf_dev_xstats_get_names(__rte_unused
struct rte_eth_dev
*dev
,
990 struct rte_eth_xstat_name
*xstats_names
,
991 __rte_unused
unsigned limit
)
995 if (xstats_names
!= NULL
)
996 for (i
= 0; i
< I40EVF_NB_XSTATS
; i
++) {
997 snprintf(xstats_names
[i
].name
,
998 sizeof(xstats_names
[i
].name
),
999 "%s", rte_i40evf_stats_strings
[i
].name
);
1001 return I40EVF_NB_XSTATS
;
1004 static int i40evf_dev_xstats_get(struct rte_eth_dev
*dev
,
1005 struct rte_eth_xstat
*xstats
, unsigned n
)
1009 struct i40e_eth_stats
*pstats
= NULL
;
1011 if (n
< I40EVF_NB_XSTATS
)
1012 return I40EVF_NB_XSTATS
;
1014 ret
= i40evf_update_stats(dev
, &pstats
);
1021 /* loop over xstats array and values from pstats */
1022 for (i
= 0; i
< I40EVF_NB_XSTATS
; i
++) {
1024 xstats
[i
].value
= *(uint64_t *)(((char *)pstats
) +
1025 rte_i40evf_stats_strings
[i
].offset
);
1028 return I40EVF_NB_XSTATS
;
1032 i40evf_add_vlan(struct rte_eth_dev
*dev
, uint16_t vlanid
)
1034 struct i40e_vf
*vf
= I40EVF_DEV_PRIVATE_TO_VF(dev
->data
->dev_private
);
1035 struct i40e_virtchnl_vlan_filter_list
*vlan_list
;
1036 uint8_t cmd_buffer
[sizeof(struct i40e_virtchnl_vlan_filter_list
) +
1039 struct vf_cmd_info args
;
1041 vlan_list
= (struct i40e_virtchnl_vlan_filter_list
*)cmd_buffer
;
1042 vlan_list
->vsi_id
= vf
->vsi_res
->vsi_id
;
1043 vlan_list
->num_elements
= 1;
1044 vlan_list
->vlan_id
[0] = vlanid
;
1046 args
.ops
= I40E_VIRTCHNL_OP_ADD_VLAN
;
1047 args
.in_args
= (u8
*)&cmd_buffer
;
1048 args
.in_args_size
= sizeof(cmd_buffer
);
1049 args
.out_buffer
= vf
->aq_resp
;
1050 args
.out_size
= I40E_AQ_BUF_SZ
;
1051 err
= i40evf_execute_vf_cmd(dev
, &args
);
1053 PMD_DRV_LOG(ERR
, "fail to execute command OP_ADD_VLAN");
1059 i40evf_del_vlan(struct rte_eth_dev
*dev
, uint16_t vlanid
)
1061 struct i40e_vf
*vf
= I40EVF_DEV_PRIVATE_TO_VF(dev
->data
->dev_private
);
1062 struct i40e_virtchnl_vlan_filter_list
*vlan_list
;
1063 uint8_t cmd_buffer
[sizeof(struct i40e_virtchnl_vlan_filter_list
) +
1066 struct vf_cmd_info args
;
1068 vlan_list
= (struct i40e_virtchnl_vlan_filter_list
*)cmd_buffer
;
1069 vlan_list
->vsi_id
= vf
->vsi_res
->vsi_id
;
1070 vlan_list
->num_elements
= 1;
1071 vlan_list
->vlan_id
[0] = vlanid
;
1073 args
.ops
= I40E_VIRTCHNL_OP_DEL_VLAN
;
1074 args
.in_args
= (u8
*)&cmd_buffer
;
1075 args
.in_args_size
= sizeof(cmd_buffer
);
1076 args
.out_buffer
= vf
->aq_resp
;
1077 args
.out_size
= I40E_AQ_BUF_SZ
;
1078 err
= i40evf_execute_vf_cmd(dev
, &args
);
1080 PMD_DRV_LOG(ERR
, "fail to execute command OP_DEL_VLAN");
1085 static const struct rte_pci_id pci_id_i40evf_map
[] = {
1086 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID
, I40E_DEV_ID_VF
) },
1087 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID
, I40E_DEV_ID_VF_HV
) },
1088 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID
, I40E_DEV_ID_X722_A0_VF
) },
1089 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID
, I40E_DEV_ID_X722_VF
) },
1090 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID
, I40E_DEV_ID_X722_VF_HV
) },
1091 { .vendor_id
= 0, /* sentinel */ },
1095 i40evf_dev_atomic_write_link_status(struct rte_eth_dev
*dev
,
1096 struct rte_eth_link
*link
)
1098 struct rte_eth_link
*dst
= &(dev
->data
->dev_link
);
1099 struct rte_eth_link
*src
= link
;
1101 if (rte_atomic64_cmpset((uint64_t *)dst
, *(uint64_t *)dst
,
1102 *(uint64_t *)src
) == 0)
1110 i40evf_disable_irq0(struct i40e_hw
*hw
)
1112 /* Disable all interrupt types */
1113 I40E_WRITE_REG(hw
, I40E_VFINT_ICR0_ENA1
, 0);
1114 I40E_WRITE_REG(hw
, I40E_VFINT_DYN_CTL01
,
1115 I40E_VFINT_DYN_CTL01_ITR_INDX_MASK
);
1116 I40EVF_WRITE_FLUSH(hw
);
1121 i40evf_enable_irq0(struct i40e_hw
*hw
)
1123 /* Enable admin queue interrupt trigger */
1126 i40evf_disable_irq0(hw
);
1127 val
= I40E_READ_REG(hw
, I40E_VFINT_ICR0_ENA1
);
1128 val
|= I40E_VFINT_ICR0_ENA1_ADMINQ_MASK
|
1129 I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK
;
1130 I40E_WRITE_REG(hw
, I40E_VFINT_ICR0_ENA1
, val
);
1132 I40E_WRITE_REG(hw
, I40E_VFINT_DYN_CTL01
,
1133 I40E_VFINT_DYN_CTL01_INTENA_MASK
|
1134 I40E_VFINT_DYN_CTL01_CLEARPBA_MASK
|
1135 I40E_VFINT_DYN_CTL01_ITR_INDX_MASK
);
1137 I40EVF_WRITE_FLUSH(hw
);
1141 i40evf_reset_vf(struct i40e_hw
*hw
)
1145 if (i40e_vf_reset(hw
) != I40E_SUCCESS
) {
1146 PMD_INIT_LOG(ERR
, "Reset VF NIC failed");
1150 * After issuing vf reset command to pf, pf won't necessarily
1151 * reset vf, it depends on what state it exactly is. If it's not
1152 * initialized yet, it won't have vf reset since it's in a certain
1153 * state. If not, it will try to reset. Even vf is reset, pf will
1154 * set I40E_VFGEN_RSTAT to COMPLETE first, then wait 10ms and set
1155 * it to ACTIVE. In this duration, vf may not catch the moment that
1156 * COMPLETE is set. So, for vf, we'll try to wait a long time.
1160 for (i
= 0; i
< MAX_RESET_WAIT_CNT
; i
++) {
1161 reset
= rd32(hw
, I40E_VFGEN_RSTAT
) &
1162 I40E_VFGEN_RSTAT_VFR_STATE_MASK
;
1163 reset
= reset
>> I40E_VFGEN_RSTAT_VFR_STATE_SHIFT
;
1164 if (I40E_VFR_COMPLETED
== reset
|| I40E_VFR_VFACTIVE
== reset
)
1170 if (i
>= MAX_RESET_WAIT_CNT
) {
1171 PMD_INIT_LOG(ERR
, "Reset VF NIC failed");
1179 i40evf_init_vf(struct rte_eth_dev
*dev
)
1182 struct i40e_hw
*hw
= I40E_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1183 struct i40e_vf
*vf
= I40EVF_DEV_PRIVATE_TO_VF(dev
->data
->dev_private
);
1184 struct ether_addr
*p_mac_addr
;
1186 i40e_calc_itr_interval(I40E_QUEUE_ITR_INTERVAL_MAX
);
1188 vf
->adapter
= I40E_DEV_PRIVATE_TO_ADAPTER(dev
->data
->dev_private
);
1189 vf
->dev_data
= dev
->data
;
1190 err
= i40e_set_mac_type(hw
);
1192 PMD_INIT_LOG(ERR
, "set_mac_type failed: %d", err
);
1196 i40e_init_adminq_parameter(hw
);
1197 err
= i40e_init_adminq(hw
);
1199 PMD_INIT_LOG(ERR
, "init_adminq failed: %d", err
);
1203 /* Reset VF and wait until it's complete */
1204 if (i40evf_reset_vf(hw
)) {
1205 PMD_INIT_LOG(ERR
, "reset NIC failed");
1209 /* VF reset, shutdown admin queue and initialize again */
1210 if (i40e_shutdown_adminq(hw
) != I40E_SUCCESS
) {
1211 PMD_INIT_LOG(ERR
, "i40e_shutdown_adminq failed");
1215 i40e_init_adminq_parameter(hw
);
1216 if (i40e_init_adminq(hw
) != I40E_SUCCESS
) {
1217 PMD_INIT_LOG(ERR
, "init_adminq failed");
1220 vf
->aq_resp
= rte_zmalloc("vf_aq_resp", I40E_AQ_BUF_SZ
, 0);
1222 PMD_INIT_LOG(ERR
, "unable to allocate vf_aq_resp memory");
1225 if (i40evf_check_api_version(dev
) != 0) {
1226 PMD_INIT_LOG(ERR
, "check_api version failed");
1229 bufsz
= sizeof(struct i40e_virtchnl_vf_resource
) +
1230 (I40E_MAX_VF_VSI
* sizeof(struct i40e_virtchnl_vsi_resource
));
1231 vf
->vf_res
= rte_zmalloc("vf_res", bufsz
, 0);
1233 PMD_INIT_LOG(ERR
, "unable to allocate vf_res memory");
1237 if (i40evf_get_vf_resource(dev
) != 0) {
1238 PMD_INIT_LOG(ERR
, "i40evf_get_vf_config failed");
1242 /* got VF config message back from PF, now we can parse it */
1243 for (i
= 0; i
< vf
->vf_res
->num_vsis
; i
++) {
1244 if (vf
->vf_res
->vsi_res
[i
].vsi_type
== I40E_VSI_SRIOV
)
1245 vf
->vsi_res
= &vf
->vf_res
->vsi_res
[i
];
1249 PMD_INIT_LOG(ERR
, "no LAN VSI found");
1253 if (hw
->mac
.type
== I40E_MAC_X722_VF
)
1254 vf
->flags
= I40E_FLAG_RSS_AQ_CAPABLE
;
1255 vf
->vsi
.vsi_id
= vf
->vsi_res
->vsi_id
;
1256 vf
->vsi
.type
= vf
->vsi_res
->vsi_type
;
1257 vf
->vsi
.nb_qps
= vf
->vsi_res
->num_queue_pairs
;
1258 vf
->vsi
.adapter
= I40E_DEV_PRIVATE_TO_ADAPTER(dev
->data
->dev_private
);
1260 /* Store the MAC address configured by host, or generate random one */
1261 p_mac_addr
= (struct ether_addr
*)(vf
->vsi_res
->default_mac_addr
);
1262 if (is_valid_assigned_ether_addr(p_mac_addr
)) /* Configured by host */
1263 ether_addr_copy(p_mac_addr
, (struct ether_addr
*)hw
->mac
.addr
);
1265 eth_random_addr(hw
->mac
.addr
); /* Generate a random one */
1267 /* If the PF host is not DPDK, set the interval of ITR0 to max*/
1268 if (vf
->version_major
!= I40E_DPDK_VERSION_MAJOR
) {
1269 I40E_WRITE_REG(hw
, I40E_VFINT_DYN_CTL01
,
1270 (I40E_ITR_INDEX_DEFAULT
<<
1271 I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT
) |
1273 I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT
));
1274 I40EVF_WRITE_FLUSH(hw
);
1280 rte_free(vf
->vf_res
);
1282 i40e_shutdown_adminq(hw
); /* ignore error */
1288 i40evf_uninit_vf(struct rte_eth_dev
*dev
)
1290 struct i40e_vf
*vf
= I40EVF_DEV_PRIVATE_TO_VF(dev
->data
->dev_private
);
1291 struct i40e_hw
*hw
= I40E_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1293 PMD_INIT_FUNC_TRACE();
1295 if (hw
->adapter_stopped
== 0)
1296 i40evf_dev_close(dev
);
1297 rte_free(vf
->vf_res
);
1299 rte_free(vf
->aq_resp
);
1306 i40evf_handle_pf_event(__rte_unused
struct rte_eth_dev
*dev
,
1308 __rte_unused
uint16_t msglen
)
1310 struct i40e_virtchnl_pf_event
*pf_msg
=
1311 (struct i40e_virtchnl_pf_event
*)msg
;
1312 struct i40e_vf
*vf
= I40EVF_DEV_PRIVATE_TO_VF(dev
->data
->dev_private
);
1314 switch (pf_msg
->event
) {
1315 case I40E_VIRTCHNL_EVENT_RESET_IMPENDING
:
1316 PMD_DRV_LOG(DEBUG
, "VIRTCHNL_EVENT_RESET_IMPENDING event\n");
1317 _rte_eth_dev_callback_process(dev
, RTE_ETH_EVENT_INTR_RESET
, NULL
);
1319 case I40E_VIRTCHNL_EVENT_LINK_CHANGE
:
1320 PMD_DRV_LOG(DEBUG
, "VIRTCHNL_EVENT_LINK_CHANGE event\n");
1321 vf
->link_up
= pf_msg
->event_data
.link_event
.link_status
;
1322 vf
->link_speed
= pf_msg
->event_data
.link_event
.link_speed
;
1324 case I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE
:
1325 PMD_DRV_LOG(DEBUG
, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event\n");
1328 PMD_DRV_LOG(ERR
, " unknown event received %u", pf_msg
->event
);
1334 i40evf_handle_aq_msg(struct rte_eth_dev
*dev
)
1336 struct i40e_hw
*hw
= I40E_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1337 struct i40e_vf
*vf
= I40EVF_DEV_PRIVATE_TO_VF(dev
->data
->dev_private
);
1338 struct i40e_arq_event_info info
;
1339 struct i40e_virtchnl_msg
*v_msg
;
1340 uint16_t pending
, opcode
;
1343 info
.buf_len
= I40E_AQ_BUF_SZ
;
1345 PMD_DRV_LOG(ERR
, "Buffer for adminq resp should not be NULL");
1348 info
.msg_buf
= vf
->aq_resp
;
1349 v_msg
= (struct i40e_virtchnl_msg
*)&info
.desc
;
1353 ret
= i40e_clean_arq_element(hw
, &info
, &pending
);
1355 if (ret
!= I40E_SUCCESS
) {
1356 PMD_DRV_LOG(INFO
, "Failed to read msg from AdminQ,"
1360 opcode
= rte_le_to_cpu_16(info
.desc
.opcode
);
1363 case i40e_aqc_opc_send_msg_to_vf
:
1364 if (v_msg
->v_opcode
== I40E_VIRTCHNL_OP_EVENT
)
1366 i40evf_handle_pf_event(dev
, info
.msg_buf
,
1369 /* read message and it's expected one */
1370 if (v_msg
->v_opcode
== vf
->pend_cmd
) {
1371 vf
->cmd_retval
= v_msg
->v_retval
;
1372 /* prevent compiler reordering */
1373 rte_compiler_barrier();
1376 PMD_DRV_LOG(ERR
, "command mismatch,"
1377 "expect %u, get %u",
1378 vf
->pend_cmd
, v_msg
->v_opcode
);
1379 PMD_DRV_LOG(DEBUG
, "adminq response is received,"
1380 " opcode = %d\n", v_msg
->v_opcode
);
1384 PMD_DRV_LOG(ERR
, "Request %u is not supported yet",
1392 * Interrupt handler triggered by NIC for handling
1393 * specific interrupt. Only adminq interrupt is processed in VF.
1396 * Pointer to interrupt handle.
1398 * The address of parameter (struct rte_eth_dev *) regsitered before.
1404 i40evf_dev_interrupt_handler(__rte_unused
struct rte_intr_handle
*handle
,
1407 struct rte_eth_dev
*dev
= (struct rte_eth_dev
*)param
;
1408 struct i40e_hw
*hw
= I40E_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1411 i40evf_disable_irq0(hw
);
1413 /* read out interrupt causes */
1414 icr0
= I40E_READ_REG(hw
, I40E_VFINT_ICR01
);
1416 /* No interrupt event indicated */
1417 if (!(icr0
& I40E_VFINT_ICR01_INTEVENT_MASK
)) {
1418 PMD_DRV_LOG(DEBUG
, "No interrupt event, nothing to do\n");
1422 if (icr0
& I40E_VFINT_ICR01_ADMINQ_MASK
) {
1423 PMD_DRV_LOG(DEBUG
, "ICR01_ADMINQ is reported\n");
1424 i40evf_handle_aq_msg(dev
);
1427 /* Link Status Change interrupt */
1428 if (icr0
& I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK
)
1429 PMD_DRV_LOG(DEBUG
, "LINK_STAT_CHANGE is reported,"
1433 i40evf_enable_irq0(hw
);
1434 rte_intr_enable(&dev
->pci_dev
->intr_handle
);
1438 i40evf_dev_init(struct rte_eth_dev
*eth_dev
)
1440 struct i40e_hw
*hw
= I40E_DEV_PRIVATE_TO_HW(\
1441 eth_dev
->data
->dev_private
);
1442 struct rte_pci_device
*pci_dev
= eth_dev
->pci_dev
;
1444 PMD_INIT_FUNC_TRACE();
1446 /* assign ops func pointer */
1447 eth_dev
->dev_ops
= &i40evf_eth_dev_ops
;
1448 eth_dev
->rx_pkt_burst
= &i40e_recv_pkts
;
1449 eth_dev
->tx_pkt_burst
= &i40e_xmit_pkts
;
1452 * For secondary processes, we don't initialise any further as primary
1453 * has already done this work.
1455 if (rte_eal_process_type() != RTE_PROC_PRIMARY
){
1456 i40e_set_rx_function(eth_dev
);
1457 i40e_set_tx_function(eth_dev
);
1461 rte_eth_copy_pci_info(eth_dev
, eth_dev
->pci_dev
);
1463 hw
->vendor_id
= eth_dev
->pci_dev
->id
.vendor_id
;
1464 hw
->device_id
= eth_dev
->pci_dev
->id
.device_id
;
1465 hw
->subsystem_vendor_id
= eth_dev
->pci_dev
->id
.subsystem_vendor_id
;
1466 hw
->subsystem_device_id
= eth_dev
->pci_dev
->id
.subsystem_device_id
;
1467 hw
->bus
.device
= eth_dev
->pci_dev
->addr
.devid
;
1468 hw
->bus
.func
= eth_dev
->pci_dev
->addr
.function
;
1469 hw
->hw_addr
= (void *)eth_dev
->pci_dev
->mem_resource
[0].addr
;
1470 hw
->adapter_stopped
= 0;
1472 if(i40evf_init_vf(eth_dev
) != 0) {
1473 PMD_INIT_LOG(ERR
, "Init vf failed");
1477 /* register callback func to eal lib */
1478 rte_intr_callback_register(&pci_dev
->intr_handle
,
1479 i40evf_dev_interrupt_handler
, (void *)eth_dev
);
1481 /* enable uio intr after callback register */
1482 rte_intr_enable(&pci_dev
->intr_handle
);
1484 /* configure and enable device interrupt */
1485 i40evf_enable_irq0(hw
);
1488 eth_dev
->data
->mac_addrs
= rte_zmalloc("i40evf_mac",
1489 ETHER_ADDR_LEN
* I40E_NUM_MACADDR_MAX
,
1491 if (eth_dev
->data
->mac_addrs
== NULL
) {
1492 PMD_INIT_LOG(ERR
, "Failed to allocate %d bytes needed to"
1493 " store MAC addresses",
1494 ETHER_ADDR_LEN
* I40E_NUM_MACADDR_MAX
);
1497 ether_addr_copy((struct ether_addr
*)hw
->mac
.addr
,
1498 ð_dev
->data
->mac_addrs
[0]);
1504 i40evf_dev_uninit(struct rte_eth_dev
*eth_dev
)
1506 PMD_INIT_FUNC_TRACE();
1508 if (rte_eal_process_type() != RTE_PROC_PRIMARY
)
1511 eth_dev
->dev_ops
= NULL
;
1512 eth_dev
->rx_pkt_burst
= NULL
;
1513 eth_dev
->tx_pkt_burst
= NULL
;
1515 if (i40evf_uninit_vf(eth_dev
) != 0) {
1516 PMD_INIT_LOG(ERR
, "i40evf_uninit_vf failed");
1520 rte_free(eth_dev
->data
->mac_addrs
);
1521 eth_dev
->data
->mac_addrs
= NULL
;
1526 * virtual function driver struct
1528 static struct eth_driver rte_i40evf_pmd
= {
1530 .id_table
= pci_id_i40evf_map
,
1531 .drv_flags
= RTE_PCI_DRV_NEED_MAPPING
| RTE_PCI_DRV_DETACHABLE
,
1532 .probe
= rte_eth_dev_pci_probe
,
1533 .remove
= rte_eth_dev_pci_remove
,
1535 .eth_dev_init
= i40evf_dev_init
,
1536 .eth_dev_uninit
= i40evf_dev_uninit
,
1537 .dev_private_size
= sizeof(struct i40e_adapter
),
1540 RTE_PMD_REGISTER_PCI(net_i40e_vf
, rte_i40evf_pmd
.pci_drv
);
1541 RTE_PMD_REGISTER_PCI_TABLE(net_i40e_vf
, pci_id_i40evf_map
);
1544 i40evf_dev_configure(struct rte_eth_dev
*dev
)
1546 struct i40e_adapter
*ad
=
1547 I40E_DEV_PRIVATE_TO_ADAPTER(dev
->data
->dev_private
);
1548 struct rte_eth_conf
*conf
= &dev
->data
->dev_conf
;
1551 /* Initialize to TRUE. If any of Rx queues doesn't meet the bulk
1552 * allocation or vector Rx preconditions we will reset it.
1554 ad
->rx_bulk_alloc_allowed
= true;
1555 ad
->rx_vec_allowed
= true;
1556 ad
->tx_simple_allowed
= true;
1557 ad
->tx_vec_allowed
= true;
1559 /* For non-DPDK PF drivers, VF has no ability to disable HW
1560 * CRC strip, and is implicitly enabled by the PF.
1562 if (!conf
->rxmode
.hw_strip_crc
) {
1563 vf
= I40EVF_DEV_PRIVATE_TO_VF(dev
->data
->dev_private
);
1564 if ((vf
->version_major
== I40E_VIRTCHNL_VERSION_MAJOR
) &&
1565 (vf
->version_minor
<= I40E_VIRTCHNL_VERSION_MINOR
)) {
1566 /* Peer is running non-DPDK PF driver. */
1567 PMD_INIT_LOG(ERR
, "VF can't disable HW CRC Strip");
1572 return i40evf_init_vlan(dev
);
1576 i40evf_init_vlan(struct rte_eth_dev
*dev
)
1578 struct rte_eth_dev_data
*data
= dev
->data
;
1581 /* Apply vlan offload setting */
1582 i40evf_vlan_offload_set(dev
, ETH_VLAN_STRIP_MASK
);
1584 /* Apply pvid setting */
1585 ret
= i40evf_vlan_pvid_set(dev
, data
->dev_conf
.txmode
.pvid
,
1586 data
->dev_conf
.txmode
.hw_vlan_insert_pvid
);
1591 i40evf_vlan_offload_set(struct rte_eth_dev
*dev
, int mask
)
1593 bool enable_vlan_strip
= 0;
1594 struct rte_eth_conf
*dev_conf
= &dev
->data
->dev_conf
;
1595 struct i40e_vf
*vf
= I40EVF_DEV_PRIVATE_TO_VF(dev
->data
->dev_private
);
1597 /* Linux pf host doesn't support vlan offload yet */
1598 if (vf
->version_major
== I40E_DPDK_VERSION_MAJOR
) {
1599 /* Vlan stripping setting */
1600 if (mask
& ETH_VLAN_STRIP_MASK
) {
1601 /* Enable or disable VLAN stripping */
1602 if (dev_conf
->rxmode
.hw_vlan_strip
)
1603 enable_vlan_strip
= 1;
1605 enable_vlan_strip
= 0;
1607 i40evf_config_vlan_offload(dev
, enable_vlan_strip
);
1613 i40evf_vlan_pvid_set(struct rte_eth_dev
*dev
, uint16_t pvid
, int on
)
1615 struct rte_eth_conf
*dev_conf
= &dev
->data
->dev_conf
;
1616 struct i40e_vsi_vlan_pvid_info info
;
1617 struct i40e_vf
*vf
= I40EVF_DEV_PRIVATE_TO_VF(dev
->data
->dev_private
);
1619 memset(&info
, 0, sizeof(info
));
1622 /* Linux pf host don't support vlan offload yet */
1623 if (vf
->version_major
== I40E_DPDK_VERSION_MAJOR
) {
1625 info
.config
.pvid
= pvid
;
1627 info
.config
.reject
.tagged
=
1628 dev_conf
->txmode
.hw_vlan_reject_tagged
;
1629 info
.config
.reject
.untagged
=
1630 dev_conf
->txmode
.hw_vlan_reject_untagged
;
1632 return i40evf_config_vlan_pvid(dev
, &info
);
1639 i40evf_dev_rx_queue_start(struct rte_eth_dev
*dev
, uint16_t rx_queue_id
)
1641 struct i40e_rx_queue
*rxq
;
1643 struct i40e_hw
*hw
= I40E_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1645 PMD_INIT_FUNC_TRACE();
1647 if (rx_queue_id
< dev
->data
->nb_rx_queues
) {
1648 rxq
= dev
->data
->rx_queues
[rx_queue_id
];
1650 err
= i40e_alloc_rx_queue_mbufs(rxq
);
1652 PMD_DRV_LOG(ERR
, "Failed to allocate RX queue mbuf");
1658 /* Init the RX tail register. */
1659 I40E_PCI_REG_WRITE(rxq
->qrx_tail
, rxq
->nb_rx_desc
- 1);
1660 I40EVF_WRITE_FLUSH(hw
);
1662 /* Ready to switch the queue on */
1663 err
= i40evf_switch_queue(dev
, TRUE
, rx_queue_id
, TRUE
);
1666 PMD_DRV_LOG(ERR
, "Failed to switch RX queue %u on",
1669 dev
->data
->rx_queue_state
[rx_queue_id
] = RTE_ETH_QUEUE_STATE_STARTED
;
1676 i40evf_dev_rx_queue_stop(struct rte_eth_dev
*dev
, uint16_t rx_queue_id
)
1678 struct i40e_rx_queue
*rxq
;
1681 if (rx_queue_id
< dev
->data
->nb_rx_queues
) {
1682 rxq
= dev
->data
->rx_queues
[rx_queue_id
];
1684 err
= i40evf_switch_queue(dev
, TRUE
, rx_queue_id
, FALSE
);
1687 PMD_DRV_LOG(ERR
, "Failed to switch RX queue %u off",
1692 i40e_rx_queue_release_mbufs(rxq
);
1693 i40e_reset_rx_queue(rxq
);
1694 dev
->data
->rx_queue_state
[rx_queue_id
] = RTE_ETH_QUEUE_STATE_STOPPED
;
1701 i40evf_dev_tx_queue_start(struct rte_eth_dev
*dev
, uint16_t tx_queue_id
)
1705 PMD_INIT_FUNC_TRACE();
1707 if (tx_queue_id
< dev
->data
->nb_tx_queues
) {
1709 /* Ready to switch the queue on */
1710 err
= i40evf_switch_queue(dev
, FALSE
, tx_queue_id
, TRUE
);
1713 PMD_DRV_LOG(ERR
, "Failed to switch TX queue %u on",
1716 dev
->data
->tx_queue_state
[tx_queue_id
] = RTE_ETH_QUEUE_STATE_STARTED
;
1723 i40evf_dev_tx_queue_stop(struct rte_eth_dev
*dev
, uint16_t tx_queue_id
)
1725 struct i40e_tx_queue
*txq
;
1728 if (tx_queue_id
< dev
->data
->nb_tx_queues
) {
1729 txq
= dev
->data
->tx_queues
[tx_queue_id
];
1731 err
= i40evf_switch_queue(dev
, FALSE
, tx_queue_id
, FALSE
);
1734 PMD_DRV_LOG(ERR
, "Failed to switch TX queue %u off",
1739 i40e_tx_queue_release_mbufs(txq
);
1740 i40e_reset_tx_queue(txq
);
1741 dev
->data
->tx_queue_state
[tx_queue_id
] = RTE_ETH_QUEUE_STATE_STOPPED
;
1748 i40evf_vlan_filter_set(struct rte_eth_dev
*dev
, uint16_t vlan_id
, int on
)
1753 ret
= i40evf_add_vlan(dev
, vlan_id
);
1755 ret
= i40evf_del_vlan(dev
,vlan_id
);
1761 i40evf_rxq_init(struct rte_eth_dev
*dev
, struct i40e_rx_queue
*rxq
)
1763 struct i40e_hw
*hw
= I40E_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1764 struct rte_eth_dev_data
*dev_data
= dev
->data
;
1765 struct rte_pktmbuf_pool_private
*mbp_priv
;
1766 uint16_t buf_size
, len
;
1768 rxq
->qrx_tail
= hw
->hw_addr
+ I40E_QRX_TAIL1(rxq
->queue_id
);
1769 I40E_PCI_REG_WRITE(rxq
->qrx_tail
, rxq
->nb_rx_desc
- 1);
1770 I40EVF_WRITE_FLUSH(hw
);
1772 /* Calculate the maximum packet length allowed */
1773 mbp_priv
= rte_mempool_get_priv(rxq
->mp
);
1774 buf_size
= (uint16_t)(mbp_priv
->mbuf_data_room_size
-
1775 RTE_PKTMBUF_HEADROOM
);
1776 rxq
->hs_mode
= i40e_header_split_none
;
1777 rxq
->rx_hdr_len
= 0;
1778 rxq
->rx_buf_len
= RTE_ALIGN(buf_size
, (1 << I40E_RXQ_CTX_DBUFF_SHIFT
));
1779 len
= rxq
->rx_buf_len
* I40E_MAX_CHAINED_RX_BUFFERS
;
1780 rxq
->max_pkt_len
= RTE_MIN(len
,
1781 dev_data
->dev_conf
.rxmode
.max_rx_pkt_len
);
1784 * Check if the jumbo frame and maximum packet length are set correctly
1786 if (dev_data
->dev_conf
.rxmode
.jumbo_frame
== 1) {
1787 if (rxq
->max_pkt_len
<= ETHER_MAX_LEN
||
1788 rxq
->max_pkt_len
> I40E_FRAME_SIZE_MAX
) {
1789 PMD_DRV_LOG(ERR
, "maximum packet length must be "
1790 "larger than %u and smaller than %u, as jumbo "
1791 "frame is enabled", (uint32_t)ETHER_MAX_LEN
,
1792 (uint32_t)I40E_FRAME_SIZE_MAX
);
1793 return I40E_ERR_CONFIG
;
1796 if (rxq
->max_pkt_len
< ETHER_MIN_LEN
||
1797 rxq
->max_pkt_len
> ETHER_MAX_LEN
) {
1798 PMD_DRV_LOG(ERR
, "maximum packet length must be "
1799 "larger than %u and smaller than %u, as jumbo "
1800 "frame is disabled", (uint32_t)ETHER_MIN_LEN
,
1801 (uint32_t)ETHER_MAX_LEN
);
1802 return I40E_ERR_CONFIG
;
1806 if (dev_data
->dev_conf
.rxmode
.enable_scatter
||
1807 (rxq
->max_pkt_len
+ 2 * I40E_VLAN_TAG_SIZE
) > buf_size
) {
1808 dev_data
->scattered_rx
= 1;
1815 i40evf_rx_init(struct rte_eth_dev
*dev
)
1817 struct i40e_vf
*vf
= I40EVF_DEV_PRIVATE_TO_VF(dev
->data
->dev_private
);
1819 int ret
= I40E_SUCCESS
;
1820 struct i40e_rx_queue
**rxq
=
1821 (struct i40e_rx_queue
**)dev
->data
->rx_queues
;
1823 i40evf_config_rss(vf
);
1824 for (i
= 0; i
< dev
->data
->nb_rx_queues
; i
++) {
1825 if (!rxq
[i
] || !rxq
[i
]->q_set
)
1827 ret
= i40evf_rxq_init(dev
, rxq
[i
]);
1828 if (ret
!= I40E_SUCCESS
)
1831 if (ret
== I40E_SUCCESS
)
1832 i40e_set_rx_function(dev
);
1838 i40evf_tx_init(struct rte_eth_dev
*dev
)
1841 struct i40e_tx_queue
**txq
=
1842 (struct i40e_tx_queue
**)dev
->data
->tx_queues
;
1843 struct i40e_hw
*hw
= I40E_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1845 for (i
= 0; i
< dev
->data
->nb_tx_queues
; i
++)
1846 txq
[i
]->qtx_tail
= hw
->hw_addr
+ I40E_QTX_TAIL1(i
);
1848 i40e_set_tx_function(dev
);
1852 i40evf_enable_queues_intr(struct rte_eth_dev
*dev
)
1854 struct i40e_vf
*vf
= I40EVF_DEV_PRIVATE_TO_VF(dev
->data
->dev_private
);
1855 struct i40e_hw
*hw
= I40E_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1856 struct rte_intr_handle
*intr_handle
= &dev
->pci_dev
->intr_handle
;
1858 if (!rte_intr_allow_others(intr_handle
)) {
1860 I40E_VFINT_DYN_CTL01
,
1861 I40E_VFINT_DYN_CTL01_INTENA_MASK
|
1862 I40E_VFINT_DYN_CTL01_CLEARPBA_MASK
|
1863 I40E_VFINT_DYN_CTL01_ITR_INDX_MASK
);
1864 I40EVF_WRITE_FLUSH(hw
);
1868 if (vf
->version_major
== I40E_DPDK_VERSION_MAJOR
)
1869 /* To support DPDK PF host */
1871 I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR
- 1),
1872 I40E_VFINT_DYN_CTLN1_INTENA_MASK
|
1873 I40E_VFINT_DYN_CTLN_CLEARPBA_MASK
);
1874 /* If host driver is kernel driver, do nothing.
1875 * Interrupt 0 is used for rx packets, but don't set
1876 * I40E_VFINT_DYN_CTL01,
1877 * because it is already done in i40evf_enable_irq0.
1880 I40EVF_WRITE_FLUSH(hw
);
1884 i40evf_disable_queues_intr(struct rte_eth_dev
*dev
)
1886 struct i40e_vf
*vf
= I40EVF_DEV_PRIVATE_TO_VF(dev
->data
->dev_private
);
1887 struct i40e_hw
*hw
= I40E_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1888 struct rte_intr_handle
*intr_handle
= &dev
->pci_dev
->intr_handle
;
1890 if (!rte_intr_allow_others(intr_handle
)) {
1891 I40E_WRITE_REG(hw
, I40E_VFINT_DYN_CTL01
,
1892 I40E_VFINT_DYN_CTL01_ITR_INDX_MASK
);
1893 I40EVF_WRITE_FLUSH(hw
);
1897 if (vf
->version_major
== I40E_DPDK_VERSION_MAJOR
)
1899 I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR
1902 /* If host driver is kernel driver, do nothing.
1903 * Interrupt 0 is used for rx packets, but don't zero
1904 * I40E_VFINT_DYN_CTL01,
1905 * because interrupt 0 is also used for adminq processing.
1908 I40EVF_WRITE_FLUSH(hw
);
1912 i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev
*dev
, uint16_t queue_id
)
1914 struct rte_intr_handle
*intr_handle
= &dev
->pci_dev
->intr_handle
;
1915 struct i40e_hw
*hw
= I40E_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1917 i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL
);
1920 msix_intr
= intr_handle
->intr_vec
[queue_id
];
1921 if (msix_intr
== I40E_MISC_VEC_ID
)
1922 I40E_WRITE_REG(hw
, I40E_VFINT_DYN_CTL01
,
1923 I40E_VFINT_DYN_CTL01_INTENA_MASK
|
1924 I40E_VFINT_DYN_CTL01_CLEARPBA_MASK
|
1925 (0 << I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT
) |
1927 I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT
));
1930 I40E_VFINT_DYN_CTLN1(msix_intr
-
1932 I40E_VFINT_DYN_CTLN1_INTENA_MASK
|
1933 I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK
|
1934 (0 << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT
) |
1936 I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT
));
1938 I40EVF_WRITE_FLUSH(hw
);
1940 rte_intr_enable(&dev
->pci_dev
->intr_handle
);
1946 i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev
*dev
, uint16_t queue_id
)
1948 struct rte_intr_handle
*intr_handle
= &dev
->pci_dev
->intr_handle
;
1949 struct i40e_hw
*hw
= I40E_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1952 msix_intr
= intr_handle
->intr_vec
[queue_id
];
1953 if (msix_intr
== I40E_MISC_VEC_ID
)
1954 I40E_WRITE_REG(hw
, I40E_VFINT_DYN_CTL01
, 0);
1957 I40E_VFINT_DYN_CTLN1(msix_intr
-
1961 I40EVF_WRITE_FLUSH(hw
);
1967 i40evf_add_del_all_mac_addr(struct rte_eth_dev
*dev
, bool add
)
1969 struct i40e_virtchnl_ether_addr_list
*list
;
1970 struct i40e_vf
*vf
= I40EVF_DEV_PRIVATE_TO_VF(dev
->data
->dev_private
);
1975 struct ether_addr
*addr
;
1976 struct vf_cmd_info args
;
1980 len
= sizeof(struct i40e_virtchnl_ether_addr_list
);
1981 for (i
= begin
; i
< I40E_NUM_MACADDR_MAX
; i
++, next_begin
++) {
1982 if (is_zero_ether_addr(&dev
->data
->mac_addrs
[i
]))
1984 len
+= sizeof(struct i40e_virtchnl_ether_addr
);
1985 if (len
>= I40E_AQ_BUF_SZ
) {
1991 list
= rte_zmalloc("i40evf_del_mac_buffer", len
, 0);
1993 for (i
= begin
; i
< next_begin
; i
++) {
1994 addr
= &dev
->data
->mac_addrs
[i
];
1995 if (is_zero_ether_addr(addr
))
1997 (void)rte_memcpy(list
->list
[j
].addr
, addr
->addr_bytes
,
1998 sizeof(addr
->addr_bytes
));
1999 PMD_DRV_LOG(DEBUG
, "add/rm mac:%x:%x:%x:%x:%x:%x",
2000 addr
->addr_bytes
[0], addr
->addr_bytes
[1],
2001 addr
->addr_bytes
[2], addr
->addr_bytes
[3],
2002 addr
->addr_bytes
[4], addr
->addr_bytes
[5]);
2005 list
->vsi_id
= vf
->vsi_res
->vsi_id
;
2006 list
->num_elements
= j
;
2007 args
.ops
= add
? I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
:
2008 I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
;
2009 args
.in_args
= (uint8_t *)list
;
2010 args
.in_args_size
= len
;
2011 args
.out_buffer
= vf
->aq_resp
;
2012 args
.out_size
= I40E_AQ_BUF_SZ
;
2013 err
= i40evf_execute_vf_cmd(dev
, &args
);
2015 PMD_DRV_LOG(ERR
, "fail to execute command %s",
2016 add
? "OP_ADD_ETHER_ADDRESS" :
2017 "OP_DEL_ETHER_ADDRESS");
2020 } while (begin
< I40E_NUM_MACADDR_MAX
);
2024 i40evf_dev_start(struct rte_eth_dev
*dev
)
2026 struct i40e_vf
*vf
= I40EVF_DEV_PRIVATE_TO_VF(dev
->data
->dev_private
);
2027 struct i40e_hw
*hw
= I40E_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2028 struct rte_intr_handle
*intr_handle
= &dev
->pci_dev
->intr_handle
;
2029 uint32_t intr_vector
= 0;
2031 PMD_INIT_FUNC_TRACE();
2033 hw
->adapter_stopped
= 0;
2035 vf
->max_pkt_len
= dev
->data
->dev_conf
.rxmode
.max_rx_pkt_len
;
2036 vf
->num_queue_pairs
= RTE_MAX(dev
->data
->nb_rx_queues
,
2037 dev
->data
->nb_tx_queues
);
2039 /* check and configure queue intr-vector mapping */
2040 if (dev
->data
->dev_conf
.intr_conf
.rxq
!= 0) {
2041 intr_vector
= dev
->data
->nb_rx_queues
;
2042 if (rte_intr_efd_enable(intr_handle
, intr_vector
))
2046 if (rte_intr_dp_is_en(intr_handle
) && !intr_handle
->intr_vec
) {
2047 intr_handle
->intr_vec
=
2048 rte_zmalloc("intr_vec",
2049 dev
->data
->nb_rx_queues
* sizeof(int), 0);
2050 if (!intr_handle
->intr_vec
) {
2051 PMD_INIT_LOG(ERR
, "Failed to allocate %d rx_queues"
2052 " intr_vec\n", dev
->data
->nb_rx_queues
);
2057 if (i40evf_rx_init(dev
) != 0){
2058 PMD_DRV_LOG(ERR
, "failed to do RX init");
2062 i40evf_tx_init(dev
);
2064 if (i40evf_configure_queues(dev
) != 0) {
2065 PMD_DRV_LOG(ERR
, "configure queues failed");
2068 if (i40evf_config_irq_map(dev
)) {
2069 PMD_DRV_LOG(ERR
, "config_irq_map failed");
2073 /* Set all mac addrs */
2074 i40evf_add_del_all_mac_addr(dev
, TRUE
);
2076 if (i40evf_start_queues(dev
) != 0) {
2077 PMD_DRV_LOG(ERR
, "enable queues failed");
2081 i40evf_enable_queues_intr(dev
);
2085 i40evf_add_del_all_mac_addr(dev
, FALSE
);
2091 i40evf_dev_stop(struct rte_eth_dev
*dev
)
2093 struct rte_intr_handle
*intr_handle
= &dev
->pci_dev
->intr_handle
;
2095 PMD_INIT_FUNC_TRACE();
2097 i40evf_stop_queues(dev
);
2098 i40evf_disable_queues_intr(dev
);
2099 i40e_dev_clear_queues(dev
);
2101 /* Clean datapath event and queue/vec mapping */
2102 rte_intr_efd_disable(intr_handle
);
2103 if (intr_handle
->intr_vec
) {
2104 rte_free(intr_handle
->intr_vec
);
2105 intr_handle
->intr_vec
= NULL
;
2107 /* remove all mac addrs */
2108 i40evf_add_del_all_mac_addr(dev
, FALSE
);
2113 i40evf_dev_link_update(struct rte_eth_dev
*dev
,
2114 __rte_unused
int wait_to_complete
)
2116 struct rte_eth_link new_link
;
2117 struct i40e_vf
*vf
= I40EVF_DEV_PRIVATE_TO_VF(dev
->data
->dev_private
);
2119 * DPDK pf host provide interfacet to acquire link status
2120 * while Linux driver does not
2123 /* Linux driver PF host */
2124 switch (vf
->link_speed
) {
2125 case I40E_LINK_SPEED_100MB
:
2126 new_link
.link_speed
= ETH_SPEED_NUM_100M
;
2128 case I40E_LINK_SPEED_1GB
:
2129 new_link
.link_speed
= ETH_SPEED_NUM_1G
;
2131 case I40E_LINK_SPEED_10GB
:
2132 new_link
.link_speed
= ETH_SPEED_NUM_10G
;
2134 case I40E_LINK_SPEED_20GB
:
2135 new_link
.link_speed
= ETH_SPEED_NUM_20G
;
2137 case I40E_LINK_SPEED_40GB
:
2138 new_link
.link_speed
= ETH_SPEED_NUM_40G
;
2141 new_link
.link_speed
= ETH_SPEED_NUM_100M
;
2144 /* full duplex only */
2145 new_link
.link_duplex
= ETH_LINK_FULL_DUPLEX
;
2146 new_link
.link_status
= vf
->link_up
? ETH_LINK_UP
:
2149 i40evf_dev_atomic_write_link_status(dev
, &new_link
);
2155 i40evf_dev_promiscuous_enable(struct rte_eth_dev
*dev
)
2157 struct i40e_vf
*vf
= I40EVF_DEV_PRIVATE_TO_VF(dev
->data
->dev_private
);
2160 /* If enabled, just return */
2161 if (vf
->promisc_unicast_enabled
)
2164 ret
= i40evf_config_promisc(dev
, 1, vf
->promisc_multicast_enabled
);
2166 vf
->promisc_unicast_enabled
= TRUE
;
2170 i40evf_dev_promiscuous_disable(struct rte_eth_dev
*dev
)
2172 struct i40e_vf
*vf
= I40EVF_DEV_PRIVATE_TO_VF(dev
->data
->dev_private
);
2175 /* If disabled, just return */
2176 if (!vf
->promisc_unicast_enabled
)
2179 ret
= i40evf_config_promisc(dev
, 0, vf
->promisc_multicast_enabled
);
2181 vf
->promisc_unicast_enabled
= FALSE
;
2185 i40evf_dev_allmulticast_enable(struct rte_eth_dev
*dev
)
2187 struct i40e_vf
*vf
= I40EVF_DEV_PRIVATE_TO_VF(dev
->data
->dev_private
);
2190 /* If enabled, just return */
2191 if (vf
->promisc_multicast_enabled
)
2194 ret
= i40evf_config_promisc(dev
, vf
->promisc_unicast_enabled
, 1);
2196 vf
->promisc_multicast_enabled
= TRUE
;
2200 i40evf_dev_allmulticast_disable(struct rte_eth_dev
*dev
)
2202 struct i40e_vf
*vf
= I40EVF_DEV_PRIVATE_TO_VF(dev
->data
->dev_private
);
2205 /* If enabled, just return */
2206 if (!vf
->promisc_multicast_enabled
)
2209 ret
= i40evf_config_promisc(dev
, vf
->promisc_unicast_enabled
, 0);
2211 vf
->promisc_multicast_enabled
= FALSE
;
2215 i40evf_dev_info_get(struct rte_eth_dev
*dev
, struct rte_eth_dev_info
*dev_info
)
2217 struct i40e_vf
*vf
= I40EVF_DEV_PRIVATE_TO_VF(dev
->data
->dev_private
);
2219 memset(dev_info
, 0, sizeof(*dev_info
));
2220 dev_info
->max_rx_queues
= vf
->vsi_res
->num_queue_pairs
;
2221 dev_info
->max_tx_queues
= vf
->vsi_res
->num_queue_pairs
;
2222 dev_info
->min_rx_bufsize
= I40E_BUF_SIZE_MIN
;
2223 dev_info
->max_rx_pktlen
= I40E_FRAME_SIZE_MAX
;
2224 dev_info
->hash_key_size
= (I40E_VFQF_HKEY_MAX_INDEX
+ 1) * sizeof(uint32_t);
2225 dev_info
->reta_size
= ETH_RSS_RETA_SIZE_64
;
2226 dev_info
->flow_type_rss_offloads
= I40E_RSS_OFFLOAD_ALL
;
2227 dev_info
->max_mac_addrs
= I40E_NUM_MACADDR_MAX
;
2228 dev_info
->rx_offload_capa
=
2229 DEV_RX_OFFLOAD_VLAN_STRIP
|
2230 DEV_RX_OFFLOAD_QINQ_STRIP
|
2231 DEV_RX_OFFLOAD_IPV4_CKSUM
|
2232 DEV_RX_OFFLOAD_UDP_CKSUM
|
2233 DEV_RX_OFFLOAD_TCP_CKSUM
;
2234 dev_info
->tx_offload_capa
=
2235 DEV_TX_OFFLOAD_VLAN_INSERT
|
2236 DEV_TX_OFFLOAD_QINQ_INSERT
|
2237 DEV_TX_OFFLOAD_IPV4_CKSUM
|
2238 DEV_TX_OFFLOAD_UDP_CKSUM
|
2239 DEV_TX_OFFLOAD_TCP_CKSUM
|
2240 DEV_TX_OFFLOAD_SCTP_CKSUM
;
2242 dev_info
->default_rxconf
= (struct rte_eth_rxconf
) {
2244 .pthresh
= I40E_DEFAULT_RX_PTHRESH
,
2245 .hthresh
= I40E_DEFAULT_RX_HTHRESH
,
2246 .wthresh
= I40E_DEFAULT_RX_WTHRESH
,
2248 .rx_free_thresh
= I40E_DEFAULT_RX_FREE_THRESH
,
2252 dev_info
->default_txconf
= (struct rte_eth_txconf
) {
2254 .pthresh
= I40E_DEFAULT_TX_PTHRESH
,
2255 .hthresh
= I40E_DEFAULT_TX_HTHRESH
,
2256 .wthresh
= I40E_DEFAULT_TX_WTHRESH
,
2258 .tx_free_thresh
= I40E_DEFAULT_TX_FREE_THRESH
,
2259 .tx_rs_thresh
= I40E_DEFAULT_TX_RSBIT_THRESH
,
2260 .txq_flags
= ETH_TXQ_FLAGS_NOMULTSEGS
|
2261 ETH_TXQ_FLAGS_NOOFFLOADS
,
2264 dev_info
->rx_desc_lim
= (struct rte_eth_desc_lim
) {
2265 .nb_max
= I40E_MAX_RING_DESC
,
2266 .nb_min
= I40E_MIN_RING_DESC
,
2267 .nb_align
= I40E_ALIGN_RING_DESC
,
2270 dev_info
->tx_desc_lim
= (struct rte_eth_desc_lim
) {
2271 .nb_max
= I40E_MAX_RING_DESC
,
2272 .nb_min
= I40E_MIN_RING_DESC
,
2273 .nb_align
= I40E_ALIGN_RING_DESC
,
2278 i40evf_dev_stats_get(struct rte_eth_dev
*dev
, struct rte_eth_stats
*stats
)
2280 if (i40evf_get_statics(dev
, stats
))
2281 PMD_DRV_LOG(ERR
, "Get statics failed");
2285 i40evf_dev_close(struct rte_eth_dev
*dev
)
2287 struct i40e_hw
*hw
= I40E_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2288 struct rte_pci_device
*pci_dev
= dev
->pci_dev
;
2290 i40evf_dev_stop(dev
);
2291 hw
->adapter_stopped
= 1;
2292 i40e_dev_free_queues(dev
);
2293 i40evf_reset_vf(hw
);
2294 i40e_shutdown_adminq(hw
);
2295 /* disable uio intr before callback unregister */
2296 rte_intr_disable(&pci_dev
->intr_handle
);
2298 /* unregister callback func from eal lib */
2299 rte_intr_callback_unregister(&pci_dev
->intr_handle
,
2300 i40evf_dev_interrupt_handler
, (void *)dev
);
2301 i40evf_disable_irq0(hw
);
2305 i40evf_get_rss_lut(struct i40e_vsi
*vsi
, uint8_t *lut
, uint16_t lut_size
)
2307 struct i40e_vf
*vf
= I40E_VSI_TO_VF(vsi
);
2308 struct i40e_hw
*hw
= I40E_VSI_TO_HW(vsi
);
2314 if (vf
->flags
& I40E_FLAG_RSS_AQ_CAPABLE
) {
2315 ret
= i40e_aq_get_rss_lut(hw
, vsi
->vsi_id
, FALSE
,
2318 PMD_DRV_LOG(ERR
, "Failed to get RSS lookup table");
2322 uint32_t *lut_dw
= (uint32_t *)lut
;
2323 uint16_t i
, lut_size_dw
= lut_size
/ 4;
2325 for (i
= 0; i
< lut_size_dw
; i
++)
2326 lut_dw
[i
] = I40E_READ_REG(hw
, I40E_VFQF_HLUT(i
));
2333 i40evf_set_rss_lut(struct i40e_vsi
*vsi
, uint8_t *lut
, uint16_t lut_size
)
2342 vf
= I40E_VSI_TO_VF(vsi
);
2343 hw
= I40E_VSI_TO_HW(vsi
);
2345 if (vf
->flags
& I40E_FLAG_RSS_AQ_CAPABLE
) {
2346 ret
= i40e_aq_set_rss_lut(hw
, vsi
->vsi_id
, FALSE
,
2349 PMD_DRV_LOG(ERR
, "Failed to set RSS lookup table");
2353 uint32_t *lut_dw
= (uint32_t *)lut
;
2354 uint16_t i
, lut_size_dw
= lut_size
/ 4;
2356 for (i
= 0; i
< lut_size_dw
; i
++)
2357 I40E_WRITE_REG(hw
, I40E_VFQF_HLUT(i
), lut_dw
[i
]);
2358 I40EVF_WRITE_FLUSH(hw
);
2365 i40evf_dev_rss_reta_update(struct rte_eth_dev
*dev
,
2366 struct rte_eth_rss_reta_entry64
*reta_conf
,
2369 struct i40e_vf
*vf
= I40EVF_DEV_PRIVATE_TO_VF(dev
->data
->dev_private
);
2371 uint16_t i
, idx
, shift
;
2374 if (reta_size
!= ETH_RSS_RETA_SIZE_64
) {
2375 PMD_DRV_LOG(ERR
, "The size of hash lookup table configured "
2376 "(%d) doesn't match the number of hardware can "
2377 "support (%d)\n", reta_size
, ETH_RSS_RETA_SIZE_64
);
2381 lut
= rte_zmalloc("i40e_rss_lut", reta_size
, 0);
2383 PMD_DRV_LOG(ERR
, "No memory can be allocated");
2386 ret
= i40evf_get_rss_lut(&vf
->vsi
, lut
, reta_size
);
2389 for (i
= 0; i
< reta_size
; i
++) {
2390 idx
= i
/ RTE_RETA_GROUP_SIZE
;
2391 shift
= i
% RTE_RETA_GROUP_SIZE
;
2392 if (reta_conf
[idx
].mask
& (1ULL << shift
))
2393 lut
[i
] = reta_conf
[idx
].reta
[shift
];
2395 ret
= i40evf_set_rss_lut(&vf
->vsi
, lut
, reta_size
);
2404 i40evf_dev_rss_reta_query(struct rte_eth_dev
*dev
,
2405 struct rte_eth_rss_reta_entry64
*reta_conf
,
2408 struct i40e_vf
*vf
= I40EVF_DEV_PRIVATE_TO_VF(dev
->data
->dev_private
);
2409 uint16_t i
, idx
, shift
;
2413 if (reta_size
!= ETH_RSS_RETA_SIZE_64
) {
2414 PMD_DRV_LOG(ERR
, "The size of hash lookup table configured "
2415 "(%d) doesn't match the number of hardware can "
2416 "support (%d)\n", reta_size
, ETH_RSS_RETA_SIZE_64
);
2420 lut
= rte_zmalloc("i40e_rss_lut", reta_size
, 0);
2422 PMD_DRV_LOG(ERR
, "No memory can be allocated");
2426 ret
= i40evf_get_rss_lut(&vf
->vsi
, lut
, reta_size
);
2429 for (i
= 0; i
< reta_size
; i
++) {
2430 idx
= i
/ RTE_RETA_GROUP_SIZE
;
2431 shift
= i
% RTE_RETA_GROUP_SIZE
;
2432 if (reta_conf
[idx
].mask
& (1ULL << shift
))
2433 reta_conf
[idx
].reta
[shift
] = lut
[i
];
2443 i40evf_set_rss_key(struct i40e_vsi
*vsi
, uint8_t *key
, uint8_t key_len
)
2445 struct i40e_vf
*vf
= I40E_VSI_TO_VF(vsi
);
2446 struct i40e_hw
*hw
= I40E_VSI_TO_HW(vsi
);
2449 if (!key
|| key_len
== 0) {
2450 PMD_DRV_LOG(DEBUG
, "No key to be configured");
2452 } else if (key_len
!= (I40E_VFQF_HKEY_MAX_INDEX
+ 1) *
2454 PMD_DRV_LOG(ERR
, "Invalid key length %u", key_len
);
2458 if (vf
->flags
& I40E_FLAG_RSS_AQ_CAPABLE
) {
2459 struct i40e_aqc_get_set_rss_key_data
*key_dw
=
2460 (struct i40e_aqc_get_set_rss_key_data
*)key
;
2462 ret
= i40e_aq_set_rss_key(hw
, vsi
->vsi_id
, key_dw
);
2464 PMD_INIT_LOG(ERR
, "Failed to configure RSS key "
2467 uint32_t *hash_key
= (uint32_t *)key
;
2470 for (i
= 0; i
<= I40E_VFQF_HKEY_MAX_INDEX
; i
++)
2471 i40e_write_rx_ctl(hw
, I40E_VFQF_HKEY(i
), hash_key
[i
]);
2472 I40EVF_WRITE_FLUSH(hw
);
2479 i40evf_get_rss_key(struct i40e_vsi
*vsi
, uint8_t *key
, uint8_t *key_len
)
2481 struct i40e_vf
*vf
= I40E_VSI_TO_VF(vsi
);
2482 struct i40e_hw
*hw
= I40E_VSI_TO_HW(vsi
);
2485 if (!key
|| !key_len
)
2488 if (vf
->flags
& I40E_FLAG_RSS_AQ_CAPABLE
) {
2489 ret
= i40e_aq_get_rss_key(hw
, vsi
->vsi_id
,
2490 (struct i40e_aqc_get_set_rss_key_data
*)key
);
2492 PMD_INIT_LOG(ERR
, "Failed to get RSS key via AQ");
2496 uint32_t *key_dw
= (uint32_t *)key
;
2499 for (i
= 0; i
<= I40E_VFQF_HKEY_MAX_INDEX
; i
++)
2500 key_dw
[i
] = i40e_read_rx_ctl(hw
, I40E_VFQF_HKEY(i
));
2502 *key_len
= (I40E_VFQF_HKEY_MAX_INDEX
+ 1) * sizeof(uint32_t);
2508 i40evf_hw_rss_hash_set(struct i40e_vf
*vf
, struct rte_eth_rss_conf
*rss_conf
)
2510 struct i40e_hw
*hw
= I40E_VF_TO_HW(vf
);
2511 uint64_t rss_hf
, hena
;
2514 ret
= i40evf_set_rss_key(&vf
->vsi
, rss_conf
->rss_key
,
2515 rss_conf
->rss_key_len
);
2519 rss_hf
= rss_conf
->rss_hf
;
2520 hena
= (uint64_t)i40e_read_rx_ctl(hw
, I40E_VFQF_HENA(0));
2521 hena
|= ((uint64_t)i40e_read_rx_ctl(hw
, I40E_VFQF_HENA(1))) << 32;
2522 if (hw
->mac
.type
== I40E_MAC_X722
)
2523 hena
&= ~I40E_RSS_HENA_ALL_X722
;
2525 hena
&= ~I40E_RSS_HENA_ALL
;
2526 hena
|= i40e_config_hena(rss_hf
, hw
->mac
.type
);
2527 i40e_write_rx_ctl(hw
, I40E_VFQF_HENA(0), (uint32_t)hena
);
2528 i40e_write_rx_ctl(hw
, I40E_VFQF_HENA(1), (uint32_t)(hena
>> 32));
2529 I40EVF_WRITE_FLUSH(hw
);
2535 i40evf_disable_rss(struct i40e_vf
*vf
)
2537 struct i40e_hw
*hw
= I40E_VF_TO_HW(vf
);
2540 hena
= (uint64_t)i40e_read_rx_ctl(hw
, I40E_VFQF_HENA(0));
2541 hena
|= ((uint64_t)i40e_read_rx_ctl(hw
, I40E_VFQF_HENA(1))) << 32;
2542 if (hw
->mac
.type
== I40E_MAC_X722
)
2543 hena
&= ~I40E_RSS_HENA_ALL_X722
;
2545 hena
&= ~I40E_RSS_HENA_ALL
;
2546 i40e_write_rx_ctl(hw
, I40E_VFQF_HENA(0), (uint32_t)hena
);
2547 i40e_write_rx_ctl(hw
, I40E_VFQF_HENA(1), (uint32_t)(hena
>> 32));
2548 I40EVF_WRITE_FLUSH(hw
);
2552 i40evf_config_rss(struct i40e_vf
*vf
)
2554 struct i40e_hw
*hw
= I40E_VF_TO_HW(vf
);
2555 struct rte_eth_rss_conf rss_conf
;
2556 uint32_t i
, j
, lut
= 0, nb_q
= (I40E_VFQF_HLUT_MAX_INDEX
+ 1) * 4;
2559 if (vf
->dev_data
->dev_conf
.rxmode
.mq_mode
!= ETH_MQ_RX_RSS
) {
2560 i40evf_disable_rss(vf
);
2561 PMD_DRV_LOG(DEBUG
, "RSS not configured\n");
2565 num
= RTE_MIN(vf
->dev_data
->nb_rx_queues
, I40E_MAX_QP_NUM_PER_VF
);
2566 /* Fill out the look up table */
2567 for (i
= 0, j
= 0; i
< nb_q
; i
++, j
++) {
2570 lut
= (lut
<< 8) | j
;
2572 I40E_WRITE_REG(hw
, I40E_VFQF_HLUT(i
>> 2), lut
);
2575 rss_conf
= vf
->dev_data
->dev_conf
.rx_adv_conf
.rss_conf
;
2576 if ((rss_conf
.rss_hf
& I40E_RSS_OFFLOAD_ALL
) == 0) {
2577 i40evf_disable_rss(vf
);
2578 PMD_DRV_LOG(DEBUG
, "No hash flag is set\n");
2582 if (rss_conf
.rss_key
== NULL
|| rss_conf
.rss_key_len
<
2583 (I40E_VFQF_HKEY_MAX_INDEX
+ 1) * sizeof(uint32_t)) {
2584 /* Calculate the default hash key */
2585 for (i
= 0; i
<= I40E_VFQF_HKEY_MAX_INDEX
; i
++)
2586 rss_key_default
[i
] = (uint32_t)rte_rand();
2587 rss_conf
.rss_key
= (uint8_t *)rss_key_default
;
2588 rss_conf
.rss_key_len
= (I40E_VFQF_HKEY_MAX_INDEX
+ 1) *
2592 return i40evf_hw_rss_hash_set(vf
, &rss_conf
);
2596 i40evf_dev_rss_hash_update(struct rte_eth_dev
*dev
,
2597 struct rte_eth_rss_conf
*rss_conf
)
2599 struct i40e_vf
*vf
= I40EVF_DEV_PRIVATE_TO_VF(dev
->data
->dev_private
);
2600 struct i40e_hw
*hw
= I40E_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2601 uint64_t rss_hf
= rss_conf
->rss_hf
& I40E_RSS_OFFLOAD_ALL
;
2604 hena
= (uint64_t)i40e_read_rx_ctl(hw
, I40E_VFQF_HENA(0));
2605 hena
|= ((uint64_t)i40e_read_rx_ctl(hw
, I40E_VFQF_HENA(1))) << 32;
2606 if (!(hena
& ((hw
->mac
.type
== I40E_MAC_X722
)
2607 ? I40E_RSS_HENA_ALL_X722
2608 : I40E_RSS_HENA_ALL
))) { /* RSS disabled */
2609 if (rss_hf
!= 0) /* Enable RSS */
2615 if (rss_hf
== 0) /* Disable RSS */
2618 return i40evf_hw_rss_hash_set(vf
, rss_conf
);
2622 i40evf_dev_rss_hash_conf_get(struct rte_eth_dev
*dev
,
2623 struct rte_eth_rss_conf
*rss_conf
)
2625 struct i40e_vf
*vf
= I40EVF_DEV_PRIVATE_TO_VF(dev
->data
->dev_private
);
2626 struct i40e_hw
*hw
= I40E_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2629 i40evf_get_rss_key(&vf
->vsi
, rss_conf
->rss_key
,
2630 &rss_conf
->rss_key_len
);
2632 hena
= (uint64_t)i40e_read_rx_ctl(hw
, I40E_VFQF_HENA(0));
2633 hena
|= ((uint64_t)i40e_read_rx_ctl(hw
, I40E_VFQF_HENA(1))) << 32;
2634 rss_conf
->rss_hf
= i40e_parse_hena(hena
);