1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
11 #include <rte_interrupts.h>
12 #include <rte_debug.h>
14 #include <rte_atomic.h>
16 #include <rte_ether.h>
17 #include <rte_ethdev_pci.h>
18 #include <rte_kvargs.h>
19 #include <rte_malloc.h>
20 #include <rte_memzone.h>
23 #include <iavf_devids.h>
25 #include "ice_generic_flow.h"
26 #include "ice_dcf_ethdev.h"
29 ice_dcf_recv_pkts(__rte_unused
void *rx_queue
,
30 __rte_unused
struct rte_mbuf
**bufs
,
31 __rte_unused
uint16_t nb_pkts
)
37 ice_dcf_xmit_pkts(__rte_unused
void *tx_queue
,
38 __rte_unused
struct rte_mbuf
**bufs
,
39 __rte_unused
uint16_t nb_pkts
)
45 ice_dcf_dev_start(struct rte_eth_dev
*dev
)
47 dev
->data
->dev_link
.link_status
= ETH_LINK_UP
;
53 ice_dcf_dev_stop(struct rte_eth_dev
*dev
)
55 dev
->data
->dev_link
.link_status
= ETH_LINK_DOWN
;
59 ice_dcf_dev_configure(__rte_unused
struct rte_eth_dev
*dev
)
65 ice_dcf_dev_info_get(struct rte_eth_dev
*dev
,
66 struct rte_eth_dev_info
*dev_info
)
68 struct ice_dcf_adapter
*adapter
= dev
->data
->dev_private
;
70 dev_info
->max_mac_addrs
= 1;
71 dev_info
->max_rx_pktlen
= (uint32_t)-1;
72 dev_info
->max_rx_queues
= RTE_DIM(adapter
->rxqs
);
73 dev_info
->max_tx_queues
= RTE_DIM(adapter
->txqs
);
79 ice_dcf_stats_get(__rte_unused
struct rte_eth_dev
*dev
,
80 __rte_unused
struct rte_eth_stats
*igb_stats
)
86 ice_dcf_stats_reset(__rte_unused
struct rte_eth_dev
*dev
)
92 ice_dcf_dev_promiscuous_enable(__rte_unused
struct rte_eth_dev
*dev
)
98 ice_dcf_dev_promiscuous_disable(__rte_unused
struct rte_eth_dev
*dev
)
104 ice_dcf_dev_allmulticast_enable(__rte_unused
struct rte_eth_dev
*dev
)
110 ice_dcf_dev_allmulticast_disable(__rte_unused
struct rte_eth_dev
*dev
)
116 ice_dcf_dev_filter_ctrl(struct rte_eth_dev
*dev
,
117 enum rte_filter_type filter_type
,
118 enum rte_filter_op filter_op
,
126 switch (filter_type
) {
127 case RTE_ETH_FILTER_GENERIC
:
128 if (filter_op
!= RTE_ETH_FILTER_GET
)
130 *(const void **)arg
= &ice_flow_ops
;
134 PMD_DRV_LOG(WARNING
, "Filter type (%d) not supported",
144 ice_dcf_dev_close(struct rte_eth_dev
*dev
)
146 struct ice_dcf_adapter
*adapter
= dev
->data
->dev_private
;
148 if (rte_eal_process_type() != RTE_PROC_PRIMARY
)
152 dev
->rx_pkt_burst
= NULL
;
153 dev
->tx_pkt_burst
= NULL
;
155 ice_dcf_uninit_parent_adapter(dev
);
156 ice_dcf_uninit_hw(dev
, &adapter
->real_hw
);
160 ice_dcf_queue_release(__rte_unused
void *q
)
165 ice_dcf_link_update(__rte_unused
struct rte_eth_dev
*dev
,
166 __rte_unused
int wait_to_complete
)
172 ice_dcf_rx_queue_setup(struct rte_eth_dev
*dev
,
173 uint16_t rx_queue_id
,
174 __rte_unused
uint16_t nb_rx_desc
,
175 __rte_unused
unsigned int socket_id
,
176 __rte_unused
const struct rte_eth_rxconf
*rx_conf
,
177 __rte_unused
struct rte_mempool
*mb_pool
)
179 struct ice_dcf_adapter
*adapter
= dev
->data
->dev_private
;
181 dev
->data
->rx_queues
[rx_queue_id
] = &adapter
->rxqs
[rx_queue_id
];
187 ice_dcf_tx_queue_setup(struct rte_eth_dev
*dev
,
188 uint16_t tx_queue_id
,
189 __rte_unused
uint16_t nb_tx_desc
,
190 __rte_unused
unsigned int socket_id
,
191 __rte_unused
const struct rte_eth_txconf
*tx_conf
)
193 struct ice_dcf_adapter
*adapter
= dev
->data
->dev_private
;
195 dev
->data
->tx_queues
[tx_queue_id
] = &adapter
->txqs
[tx_queue_id
];
200 static const struct eth_dev_ops ice_dcf_eth_dev_ops
= {
201 .dev_start
= ice_dcf_dev_start
,
202 .dev_stop
= ice_dcf_dev_stop
,
203 .dev_close
= ice_dcf_dev_close
,
204 .dev_configure
= ice_dcf_dev_configure
,
205 .dev_infos_get
= ice_dcf_dev_info_get
,
206 .rx_queue_setup
= ice_dcf_rx_queue_setup
,
207 .tx_queue_setup
= ice_dcf_tx_queue_setup
,
208 .rx_queue_release
= ice_dcf_queue_release
,
209 .tx_queue_release
= ice_dcf_queue_release
,
210 .link_update
= ice_dcf_link_update
,
211 .stats_get
= ice_dcf_stats_get
,
212 .stats_reset
= ice_dcf_stats_reset
,
213 .promiscuous_enable
= ice_dcf_dev_promiscuous_enable
,
214 .promiscuous_disable
= ice_dcf_dev_promiscuous_disable
,
215 .allmulticast_enable
= ice_dcf_dev_allmulticast_enable
,
216 .allmulticast_disable
= ice_dcf_dev_allmulticast_disable
,
217 .filter_ctrl
= ice_dcf_dev_filter_ctrl
,
221 ice_dcf_dev_init(struct rte_eth_dev
*eth_dev
)
223 struct ice_dcf_adapter
*adapter
= eth_dev
->data
->dev_private
;
225 eth_dev
->dev_ops
= &ice_dcf_eth_dev_ops
;
226 eth_dev
->rx_pkt_burst
= ice_dcf_recv_pkts
;
227 eth_dev
->tx_pkt_burst
= ice_dcf_xmit_pkts
;
229 if (rte_eal_process_type() != RTE_PROC_PRIMARY
)
232 eth_dev
->data
->dev_flags
|= RTE_ETH_DEV_CLOSE_REMOVE
;
234 adapter
->real_hw
.vc_event_msg_cb
= ice_dcf_handle_pf_event_msg
;
235 if (ice_dcf_init_hw(eth_dev
, &adapter
->real_hw
) != 0) {
236 PMD_INIT_LOG(ERR
, "Failed to init DCF hardware");
240 if (ice_dcf_init_parent_adapter(eth_dev
) != 0) {
241 PMD_INIT_LOG(ERR
, "Failed to init DCF parent adapter");
242 ice_dcf_uninit_hw(eth_dev
, &adapter
->real_hw
);
250 ice_dcf_dev_uninit(struct rte_eth_dev
*eth_dev
)
252 ice_dcf_dev_close(eth_dev
);
258 ice_dcf_cap_check_handler(__rte_unused
const char *key
,
259 const char *value
, __rte_unused
void *opaque
)
261 if (strcmp(value
, "dcf"))
268 ice_dcf_cap_selected(struct rte_devargs
*devargs
)
270 struct rte_kvargs
*kvlist
;
271 const char *key
= "cap";
277 kvlist
= rte_kvargs_parse(devargs
->args
, NULL
);
281 if (!rte_kvargs_count(kvlist
, key
))
284 /* dcf capability selected when there's a key-value pair: cap=dcf */
285 if (rte_kvargs_process(kvlist
, key
,
286 ice_dcf_cap_check_handler
, NULL
) < 0)
292 rte_kvargs_free(kvlist
);
296 static int eth_ice_dcf_pci_probe(__rte_unused
struct rte_pci_driver
*pci_drv
,
297 struct rte_pci_device
*pci_dev
)
299 if (!ice_dcf_cap_selected(pci_dev
->device
.devargs
))
302 return rte_eth_dev_pci_generic_probe(pci_dev
,
303 sizeof(struct ice_dcf_adapter
),
307 static int eth_ice_dcf_pci_remove(struct rte_pci_device
*pci_dev
)
309 return rte_eth_dev_pci_generic_remove(pci_dev
, ice_dcf_dev_uninit
);
312 static const struct rte_pci_id pci_id_ice_dcf_map
[] = {
313 { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID
, IAVF_DEV_ID_ADAPTIVE_VF
) },
314 { .vendor_id
= 0, /* sentinel */ },
317 static struct rte_pci_driver rte_ice_dcf_pmd
= {
318 .id_table
= pci_id_ice_dcf_map
,
319 .drv_flags
= RTE_PCI_DRV_NEED_MAPPING
,
320 .probe
= eth_ice_dcf_pci_probe
,
321 .remove
= eth_ice_dcf_pci_remove
,
324 RTE_PMD_REGISTER_PCI(net_ice_dcf
, rte_ice_dcf_pmd
);
325 RTE_PMD_REGISTER_PCI_TABLE(net_ice_dcf
, pci_id_ice_dcf_map
);
326 RTE_PMD_REGISTER_KMOD_DEP(net_ice_dcf
, "* igb_uio | vfio-pci");
327 RTE_PMD_REGISTER_PARAM_STRING(net_ice_dcf
, "cap=dcf");