]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/net/ipn3ke/ipn3ke_representor.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / ipn3ke / ipn3ke_representor.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
3 */
4
5 #include <stdint.h>
6
7 #include <rte_bus_pci.h>
8 #include <rte_ethdev.h>
9 #include <rte_pci.h>
10 #include <rte_malloc.h>
11
12 #include <rte_mbuf.h>
13 #include <rte_sched.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_spinlock.h>
16
17 #include <rte_io.h>
18 #include <rte_rawdev.h>
19 #include <rte_rawdev_pmd.h>
20 #include <rte_bus_ifpga.h>
21 #include <ifpga_logs.h>
22
23 #include "ipn3ke_rawdev_api.h"
24 #include "ipn3ke_flow.h"
25 #include "ipn3ke_logs.h"
26 #include "ipn3ke_ethdev.h"
27
28 static int ipn3ke_rpst_scan_num;
29 static pthread_t ipn3ke_rpst_scan_thread;
30
31 /** Double linked list of representor port. */
32 TAILQ_HEAD(ipn3ke_rpst_list, ipn3ke_rpst);
33
34 static struct ipn3ke_rpst_list ipn3ke_rpst_list =
35 TAILQ_HEAD_INITIALIZER(ipn3ke_rpst_list);
36
37 static rte_spinlock_t ipn3ke_link_notify_list_lk = RTE_SPINLOCK_INITIALIZER;
38
39 static int
40 ipn3ke_rpst_link_check(struct ipn3ke_rpst *rpst);
41
42 static void
43 ipn3ke_rpst_dev_infos_get(struct rte_eth_dev *ethdev,
44 struct rte_eth_dev_info *dev_info)
45 {
46 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
47 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
48
49 dev_info->speed_capa =
50 (hw->retimer.mac_type ==
51 IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) ?
52 ETH_LINK_SPEED_10G :
53 ((hw->retimer.mac_type ==
54 IFPGA_RAWDEV_RETIMER_MAC_TYPE_25GE_25GAUI) ?
55 ETH_LINK_SPEED_25G :
56 ETH_LINK_SPEED_AUTONEG);
57
58 dev_info->max_rx_queues = 1;
59 dev_info->max_tx_queues = 1;
60 dev_info->min_rx_bufsize = IPN3KE_AFU_BUF_SIZE_MIN;
61 dev_info->max_rx_pktlen = IPN3KE_AFU_FRAME_SIZE_MAX;
62 dev_info->max_mac_addrs = hw->port_num;
63 dev_info->max_vfs = 0;
64 dev_info->default_txconf = (struct rte_eth_txconf) {
65 .offloads = 0,
66 };
67 dev_info->rx_queue_offload_capa = 0;
68 dev_info->rx_offload_capa =
69 DEV_RX_OFFLOAD_VLAN_STRIP |
70 DEV_RX_OFFLOAD_QINQ_STRIP |
71 DEV_RX_OFFLOAD_IPV4_CKSUM |
72 DEV_RX_OFFLOAD_UDP_CKSUM |
73 DEV_RX_OFFLOAD_TCP_CKSUM |
74 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
75 DEV_RX_OFFLOAD_VLAN_EXTEND |
76 DEV_RX_OFFLOAD_VLAN_FILTER |
77 DEV_RX_OFFLOAD_JUMBO_FRAME;
78
79 dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
80 dev_info->tx_offload_capa =
81 DEV_TX_OFFLOAD_VLAN_INSERT |
82 DEV_TX_OFFLOAD_QINQ_INSERT |
83 DEV_TX_OFFLOAD_IPV4_CKSUM |
84 DEV_TX_OFFLOAD_UDP_CKSUM |
85 DEV_TX_OFFLOAD_TCP_CKSUM |
86 DEV_TX_OFFLOAD_SCTP_CKSUM |
87 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
88 DEV_TX_OFFLOAD_TCP_TSO |
89 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
90 DEV_TX_OFFLOAD_GRE_TNL_TSO |
91 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
92 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
93 DEV_TX_OFFLOAD_MULTI_SEGS |
94 dev_info->tx_queue_offload_capa;
95
96 dev_info->dev_capa =
97 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
98 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
99
100 dev_info->switch_info.name = ethdev->device->name;
101 dev_info->switch_info.domain_id = rpst->switch_domain_id;
102 dev_info->switch_info.port_id = rpst->port_id;
103 }
104
105 static int
106 ipn3ke_rpst_dev_configure(__rte_unused struct rte_eth_dev *dev)
107 {
108 return 0;
109 }
110
111 static int
112 ipn3ke_rpst_dev_start(struct rte_eth_dev *dev)
113 {
114 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
115 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(dev);
116 struct rte_rawdev *rawdev;
117 uint64_t base_mac;
118 uint32_t val;
119 char attr_name[IPN3KE_RAWDEV_ATTR_LEN_MAX];
120
121 rawdev = hw->rawdev;
122
123 memset(attr_name, 0, sizeof(attr_name));
124 snprintf(attr_name, IPN3KE_RAWDEV_ATTR_LEN_MAX, "%s",
125 "LineSideBaseMAC");
126 rawdev->dev_ops->attr_get(rawdev, attr_name, &base_mac);
127 ether_addr_copy((struct ether_addr *)&base_mac, &rpst->mac_addr);
128
129 ether_addr_copy(&rpst->mac_addr, &dev->data->mac_addrs[0]);
130 dev->data->mac_addrs->addr_bytes[ETHER_ADDR_LEN - 1] =
131 (uint8_t)rpst->port_id + 1;
132
133 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
134 /* Set mac address */
135 rte_memcpy(((char *)(&val)),
136 (char *)&dev->data->mac_addrs->addr_bytes[0],
137 sizeof(uint32_t));
138 (*hw->f_mac_write)(hw,
139 val,
140 IPN3KE_MAC_PRIMARY_MAC_ADDR0,
141 rpst->port_id,
142 0);
143 rte_memcpy(((char *)(&val)),
144 (char *)&dev->data->mac_addrs->addr_bytes[4],
145 sizeof(uint16_t));
146 (*hw->f_mac_write)(hw,
147 val,
148 IPN3KE_MAC_PRIMARY_MAC_ADDR1,
149 rpst->port_id,
150 0);
151
152 /* Enable the TX path */
153 ipn3ke_xmac_tx_enable(hw, rpst->port_id, 0);
154
155 /* Disables source address override */
156 ipn3ke_xmac_smac_ovd_dis(hw, rpst->port_id, 0);
157
158 /* Enable the RX path */
159 ipn3ke_xmac_rx_enable(hw, rpst->port_id, 0);
160
161 /* Clear all TX statistics counters */
162 ipn3ke_xmac_tx_clr_stcs(hw, rpst->port_id, 0);
163
164 /* Clear all RX statistics counters */
165 ipn3ke_xmac_rx_clr_stcs(hw, rpst->port_id, 0);
166 }
167
168 ipn3ke_rpst_link_update(dev, 0);
169
170 return 0;
171 }
172
173 static void
174 ipn3ke_rpst_dev_stop(struct rte_eth_dev *dev)
175 {
176 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
177 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(dev);
178
179 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
180 /* Disable the TX path */
181 ipn3ke_xmac_tx_disable(hw, rpst->port_id, 0);
182
183 /* Disable the RX path */
184 ipn3ke_xmac_rx_disable(hw, rpst->port_id, 0);
185 }
186 }
187
188 static void
189 ipn3ke_rpst_dev_close(struct rte_eth_dev *dev)
190 {
191 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
192 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(dev);
193
194 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
195 /* Disable the TX path */
196 ipn3ke_xmac_tx_disable(hw, rpst->port_id, 0);
197
198 /* Disable the RX path */
199 ipn3ke_xmac_rx_disable(hw, rpst->port_id, 0);
200 }
201 }
202
203 /*
204 * Reset PF device only to re-initialize resources in PMD layer
205 */
206 static int
207 ipn3ke_rpst_dev_reset(struct rte_eth_dev *dev)
208 {
209 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
210 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(dev);
211
212 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
213 /* Disable the TX path */
214 ipn3ke_xmac_tx_disable(hw, rpst->port_id, 0);
215
216 /* Disable the RX path */
217 ipn3ke_xmac_rx_disable(hw, rpst->port_id, 0);
218 }
219
220 return 0;
221 }
222
223 static int
224 ipn3ke_rpst_rx_queue_start(__rte_unused struct rte_eth_dev *dev,
225 __rte_unused uint16_t rx_queue_id)
226 {
227 return 0;
228 }
229
230 static int
231 ipn3ke_rpst_rx_queue_stop(__rte_unused struct rte_eth_dev *dev,
232 __rte_unused uint16_t rx_queue_id)
233 {
234 return 0;
235 }
236
237 static int
238 ipn3ke_rpst_tx_queue_start(__rte_unused struct rte_eth_dev *dev,
239 __rte_unused uint16_t tx_queue_id)
240 {
241 return 0;
242 }
243
244 static int
245 ipn3ke_rpst_tx_queue_stop(__rte_unused struct rte_eth_dev *dev,
246 __rte_unused uint16_t tx_queue_id)
247 {
248 return 0;
249 }
250
251 static int
252 ipn3ke_rpst_rx_queue_setup(__rte_unused struct rte_eth_dev *dev,
253 __rte_unused uint16_t queue_idx, __rte_unused uint16_t nb_desc,
254 __rte_unused unsigned int socket_id,
255 __rte_unused const struct rte_eth_rxconf *rx_conf,
256 __rte_unused struct rte_mempool *mp)
257 {
258 return 0;
259 }
260
261 static void
262 ipn3ke_rpst_rx_queue_release(__rte_unused void *rxq)
263 {
264 }
265
266 static int
267 ipn3ke_rpst_tx_queue_setup(__rte_unused struct rte_eth_dev *dev,
268 __rte_unused uint16_t queue_idx, __rte_unused uint16_t nb_desc,
269 __rte_unused unsigned int socket_id,
270 __rte_unused const struct rte_eth_txconf *tx_conf)
271 {
272 return 0;
273 }
274
275 static void
276 ipn3ke_rpst_tx_queue_release(__rte_unused void *txq)
277 {
278 }
279
280 static int
281 ipn3ke_rpst_stats_get(__rte_unused struct rte_eth_dev *ethdev,
282 __rte_unused struct rte_eth_stats *stats)
283 {
284 return 0;
285 }
286
287 static int
288 ipn3ke_rpst_xstats_get(__rte_unused struct rte_eth_dev *dev,
289 __rte_unused struct rte_eth_xstat *xstats, __rte_unused unsigned int n)
290 {
291 return 0;
292 }
293
294 static int
295 ipn3ke_rpst_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
296 __rte_unused struct rte_eth_xstat_name *xstats_names,
297 __rte_unused unsigned int limit)
298 {
299 return 0;
300 }
301
302 static void
303 ipn3ke_rpst_stats_reset(__rte_unused struct rte_eth_dev *ethdev)
304 {
305 }
306
307 static void
308 ipn3ke_update_link(struct rte_rawdev *rawdev,
309 uint16_t port, struct rte_eth_link *link)
310 {
311 uint64_t line_link_bitmap = 0;
312 enum ifpga_rawdev_link_speed link_speed;
313
314 rawdev->dev_ops->attr_get(rawdev,
315 "LineSideLinkStatus",
316 (uint64_t *)&line_link_bitmap);
317
318 /* Parse the link status */
319 if ((1 << port) & line_link_bitmap)
320 link->link_status = 1;
321 else
322 link->link_status = 0;
323
324 IPN3KE_AFU_PMD_DEBUG("port is %d\n", port);
325 IPN3KE_AFU_PMD_DEBUG("link->link_status is %d\n", link->link_status);
326
327 rawdev->dev_ops->attr_get(rawdev,
328 "LineSideLinkSpeed",
329 (uint64_t *)&link_speed);
330 switch (link_speed) {
331 case IFPGA_RAWDEV_LINK_SPEED_10GB:
332 link->link_speed = ETH_SPEED_NUM_10G;
333 break;
334 case IFPGA_RAWDEV_LINK_SPEED_25GB:
335 link->link_speed = ETH_SPEED_NUM_25G;
336 break;
337 default:
338 IPN3KE_AFU_PMD_ERR("Unknown link speed info %u", link_speed);
339 break;
340 }
341 }
342
343 /*
344 * Set device link up.
345 */
346 int
347 ipn3ke_rpst_dev_set_link_up(struct rte_eth_dev *dev)
348 {
349 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(dev);
350 struct rte_eth_dev *pf;
351 int ret = 0;
352
353 if (rpst->i40e_pf_eth) {
354 ret = rte_eth_dev_set_link_up(rpst->i40e_pf_eth_port_id);
355 pf = rpst->i40e_pf_eth;
356 (*rpst->i40e_pf_eth->dev_ops->link_update)(pf, 1);
357 }
358
359 return ret;
360 }
361
362 /*
363 * Set device link down.
364 */
365 int
366 ipn3ke_rpst_dev_set_link_down(struct rte_eth_dev *dev)
367 {
368 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(dev);
369 struct rte_eth_dev *pf;
370 int ret = 0;
371
372 if (rpst->i40e_pf_eth) {
373 ret = rte_eth_dev_set_link_down(rpst->i40e_pf_eth_port_id);
374 pf = rpst->i40e_pf_eth;
375 (*rpst->i40e_pf_eth->dev_ops->link_update)(pf, 1);
376 }
377
378 return ret;
379 }
380
381 int
382 ipn3ke_rpst_link_update(struct rte_eth_dev *ethdev,
383 __rte_unused int wait_to_complete)
384 {
385 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
386 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
387 struct rte_rawdev *rawdev;
388 struct rte_eth_link link;
389 struct rte_eth_dev *pf;
390
391 memset(&link, 0, sizeof(link));
392
393 link.link_duplex = ETH_LINK_FULL_DUPLEX;
394 link.link_autoneg = !(ethdev->data->dev_conf.link_speeds &
395 ETH_LINK_SPEED_FIXED);
396
397 rawdev = hw->rawdev;
398 ipn3ke_update_link(rawdev, rpst->port_id, &link);
399
400 if (!rpst->ori_linfo.link_status &&
401 link.link_status) {
402 IPN3KE_AFU_PMD_DEBUG("Update Rpst %d Up\n", rpst->port_id);
403 rpst->ori_linfo.link_status = link.link_status;
404 rpst->ori_linfo.link_speed = link.link_speed;
405
406 rte_eth_linkstatus_set(ethdev, &link);
407
408 if (rpst->i40e_pf_eth) {
409 IPN3KE_AFU_PMD_DEBUG("Update FVL PF %d Up\n",
410 rpst->i40e_pf_eth_port_id);
411 rte_eth_dev_set_link_up(rpst->i40e_pf_eth_port_id);
412 pf = rpst->i40e_pf_eth;
413 (*rpst->i40e_pf_eth->dev_ops->link_update)(pf, 1);
414 }
415 } else if (rpst->ori_linfo.link_status &&
416 !link.link_status) {
417 IPN3KE_AFU_PMD_DEBUG("Update Rpst %d Down\n",
418 rpst->port_id);
419 rpst->ori_linfo.link_status = link.link_status;
420 rpst->ori_linfo.link_speed = link.link_speed;
421
422 rte_eth_linkstatus_set(ethdev, &link);
423
424 if (rpst->i40e_pf_eth) {
425 IPN3KE_AFU_PMD_DEBUG("Update FVL PF %d Down\n",
426 rpst->i40e_pf_eth_port_id);
427 rte_eth_dev_set_link_down(rpst->i40e_pf_eth_port_id);
428 pf = rpst->i40e_pf_eth;
429 (*rpst->i40e_pf_eth->dev_ops->link_update)(pf, 1);
430 }
431 }
432
433 return 0;
434 }
435
436 static int
437 ipn3ke_rpst_link_check(struct ipn3ke_rpst *rpst)
438 {
439 struct ipn3ke_hw *hw;
440 struct rte_rawdev *rawdev;
441 struct rte_eth_link link;
442 struct rte_eth_dev *pf;
443
444 if (rpst == NULL)
445 return -1;
446
447 hw = rpst->hw;
448
449 memset(&link, 0, sizeof(link));
450
451 link.link_duplex = ETH_LINK_FULL_DUPLEX;
452 link.link_autoneg = !(rpst->ethdev->data->dev_conf.link_speeds &
453 ETH_LINK_SPEED_FIXED);
454
455 rawdev = hw->rawdev;
456 ipn3ke_update_link(rawdev, rpst->port_id, &link);
457
458 if (!rpst->ori_linfo.link_status &&
459 link.link_status) {
460 IPN3KE_AFU_PMD_DEBUG("Check Rpst %d Up\n", rpst->port_id);
461 rpst->ori_linfo.link_status = link.link_status;
462 rpst->ori_linfo.link_speed = link.link_speed;
463
464 rte_eth_linkstatus_set(rpst->ethdev, &link);
465
466 if (rpst->i40e_pf_eth) {
467 IPN3KE_AFU_PMD_DEBUG("Check FVL PF %d Up\n",
468 rpst->i40e_pf_eth_port_id);
469 rte_eth_dev_set_link_up(rpst->i40e_pf_eth_port_id);
470 pf = rpst->i40e_pf_eth;
471 (*rpst->i40e_pf_eth->dev_ops->link_update)(pf, 1);
472 }
473 } else if (rpst->ori_linfo.link_status &&
474 !link.link_status) {
475 IPN3KE_AFU_PMD_DEBUG("Check Rpst %d Down\n", rpst->port_id);
476 rpst->ori_linfo.link_status = link.link_status;
477 rpst->ori_linfo.link_speed = link.link_speed;
478
479 rte_eth_linkstatus_set(rpst->ethdev, &link);
480
481 if (rpst->i40e_pf_eth) {
482 IPN3KE_AFU_PMD_DEBUG("Check FVL PF %d Down\n",
483 rpst->i40e_pf_eth_port_id);
484 rte_eth_dev_set_link_down(rpst->i40e_pf_eth_port_id);
485 pf = rpst->i40e_pf_eth;
486 (*rpst->i40e_pf_eth->dev_ops->link_update)(pf, 1);
487 }
488 }
489
490 return 0;
491 }
492
493 static void *
494 ipn3ke_rpst_scan_handle_request(__rte_unused void *param)
495 {
496 struct ipn3ke_rpst *rpst;
497 int num = 0;
498 #define MS 1000
499 #define SCAN_NUM 32
500
501 for (;;) {
502 num = 0;
503 TAILQ_FOREACH(rpst, &ipn3ke_rpst_list, next) {
504 if (rpst->i40e_pf_eth &&
505 rpst->ethdev->data->dev_started &&
506 rpst->i40e_pf_eth->data->dev_started)
507 ipn3ke_rpst_link_check(rpst);
508
509 if (++num > SCAN_NUM)
510 rte_delay_us(1 * MS);
511 }
512 rte_delay_us(50 * MS);
513
514 if (num == 0xffffff)
515 return NULL;
516 }
517
518 return NULL;
519 }
520
521 static int
522 ipn3ke_rpst_scan_check(void)
523 {
524 int ret;
525
526 if (ipn3ke_rpst_scan_num == 1) {
527 ret = pthread_create(&ipn3ke_rpst_scan_thread,
528 NULL,
529 ipn3ke_rpst_scan_handle_request, NULL);
530 if (ret) {
531 IPN3KE_AFU_PMD_ERR("Fail to create ipn3ke rpst scan thread");
532 return -1;
533 }
534 } else if (ipn3ke_rpst_scan_num == 0) {
535 ret = pthread_cancel(ipn3ke_rpst_scan_thread);
536 if (ret)
537 IPN3KE_AFU_PMD_ERR("Can't cancel the thread");
538
539 ret = pthread_join(ipn3ke_rpst_scan_thread, NULL);
540 if (ret)
541 IPN3KE_AFU_PMD_ERR("Can't join the thread");
542
543 return ret;
544 }
545
546 return 0;
547 }
548
549 void
550 ipn3ke_rpst_promiscuous_enable(struct rte_eth_dev *ethdev)
551 {
552 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
553 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
554 uint32_t rddata, val;
555
556 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
557 /* Enable all unicast */
558 (*hw->f_mac_read)(hw,
559 &rddata,
560 IPN3KE_MAC_RX_FRAME_CONTROL,
561 rpst->port_id,
562 0);
563 val = 1;
564 val &= IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLUCAST_MASK;
565 val |= rddata;
566 (*hw->f_mac_write)(hw,
567 val,
568 IPN3KE_MAC_RX_FRAME_CONTROL,
569 rpst->port_id,
570 0);
571 }
572 }
573
574 void
575 ipn3ke_rpst_promiscuous_disable(struct rte_eth_dev *ethdev)
576 {
577 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
578 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
579 uint32_t rddata, val;
580
581 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
582 /* Disable all unicast */
583 (*hw->f_mac_read)(hw,
584 &rddata,
585 IPN3KE_MAC_RX_FRAME_CONTROL,
586 rpst->port_id,
587 0);
588 val = 0;
589 val &= IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLUCAST_MASK;
590 val |= rddata;
591 (*hw->f_mac_write)(hw,
592 val,
593 IPN3KE_MAC_RX_FRAME_CONTROL,
594 rpst->port_id,
595 0);
596 }
597 }
598
599 void
600 ipn3ke_rpst_allmulticast_enable(struct rte_eth_dev *ethdev)
601 {
602 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
603 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
604 uint32_t rddata, val;
605
606 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
607 /* Enable all unicast */
608 (*hw->f_mac_read)(hw,
609 &rddata,
610 IPN3KE_MAC_RX_FRAME_CONTROL,
611 rpst->port_id,
612 0);
613 val = 1;
614 val <<= IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLMCAST_SHIFT;
615 val &= IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLMCAST_MASK;
616 val |= rddata;
617 (*hw->f_mac_write)(hw,
618 val,
619 IPN3KE_MAC_RX_FRAME_CONTROL,
620 rpst->port_id,
621 0);
622 }
623 }
624
625 void
626 ipn3ke_rpst_allmulticast_disable(struct rte_eth_dev *ethdev)
627 {
628 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
629 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
630 uint32_t rddata, val;
631
632 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
633 /* Disable all unicast */
634 (*hw->f_mac_read)(hw,
635 &rddata,
636 IPN3KE_MAC_RX_FRAME_CONTROL,
637 rpst->port_id,
638 0);
639 val = 0;
640 val <<= IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLMCAST_SHIFT;
641 val &= IPN3KE_MAC_RX_FRAME_CONTROL_EN_ALLMCAST_MASK;
642 val |= rddata;
643 (*hw->f_mac_write)(hw,
644 val,
645 IPN3KE_MAC_RX_FRAME_CONTROL,
646 rpst->port_id,
647 0);
648 }
649 }
650
651 int
652 ipn3ke_rpst_mac_addr_set(struct rte_eth_dev *ethdev,
653 struct ether_addr *mac_addr)
654 {
655 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
656 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
657 uint32_t val;
658
659 if (!is_valid_assigned_ether_addr(mac_addr)) {
660 IPN3KE_AFU_PMD_ERR("Tried to set invalid MAC address.");
661 return -EINVAL;
662 }
663
664 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
665 ether_addr_copy(&mac_addr[0], &rpst->mac_addr);
666
667 /* Set mac address */
668 rte_memcpy(((char *)(&val)), &mac_addr[0], sizeof(uint32_t));
669 (*hw->f_mac_write)(hw,
670 val,
671 IPN3KE_MAC_PRIMARY_MAC_ADDR0,
672 rpst->port_id,
673 0);
674 rte_memcpy(((char *)(&val)), &mac_addr[4], sizeof(uint16_t));
675 (*hw->f_mac_write)(hw,
676 val,
677 IPN3KE_MAC_PRIMARY_MAC_ADDR0,
678 rpst->port_id,
679 0);
680 }
681
682 return 0;
683 }
684
685 int
686 ipn3ke_rpst_mtu_set(struct rte_eth_dev *ethdev, uint16_t mtu)
687 {
688 int ret = 0;
689 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
690 struct rte_eth_dev_data *dev_data = ethdev->data;
691 uint32_t frame_size = mtu + IPN3KE_ETH_OVERHEAD;
692
693 /* check if mtu is within the allowed range */
694 if (mtu < ETHER_MIN_MTU ||
695 frame_size > IPN3KE_MAC_FRAME_SIZE_MAX)
696 return -EINVAL;
697
698 /* mtu setting is forbidden if port is start */
699 /* make sure NIC port is stopped */
700 if (rpst->i40e_pf_eth && rpst->i40e_pf_eth->data->dev_started) {
701 IPN3KE_AFU_PMD_ERR("NIC port %d must "
702 "be stopped before configuration",
703 rpst->i40e_pf_eth->data->port_id);
704 return -EBUSY;
705 }
706 /* mtu setting is forbidden if port is start */
707 if (dev_data->dev_started) {
708 IPN3KE_AFU_PMD_ERR("FPGA port %d must "
709 "be stopped before configuration",
710 dev_data->port_id);
711 return -EBUSY;
712 }
713
714 if (frame_size > ETHER_MAX_LEN)
715 dev_data->dev_conf.rxmode.offloads |=
716 (uint64_t)(DEV_RX_OFFLOAD_JUMBO_FRAME);
717 else
718 dev_data->dev_conf.rxmode.offloads &=
719 (uint64_t)(~DEV_RX_OFFLOAD_JUMBO_FRAME);
720
721 dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
722
723 if (rpst->i40e_pf_eth) {
724 ret = rpst->i40e_pf_eth->dev_ops->mtu_set(rpst->i40e_pf_eth,
725 mtu);
726 if (!ret)
727 rpst->i40e_pf_eth->data->mtu = mtu;
728 }
729
730 return ret;
731 }
732
733 static int
734 ipn3ke_afu_filter_ctrl(struct rte_eth_dev *ethdev,
735 enum rte_filter_type filter_type, enum rte_filter_op filter_op,
736 void *arg)
737 {
738 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(ethdev);
739 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
740 int ret = 0;
741
742 if (ethdev == NULL)
743 return -EINVAL;
744
745 if (hw->acc_flow)
746 switch (filter_type) {
747 case RTE_ETH_FILTER_GENERIC:
748 if (filter_op != RTE_ETH_FILTER_GET)
749 return -EINVAL;
750 *(const void **)arg = &ipn3ke_flow_ops;
751 break;
752 default:
753 IPN3KE_AFU_PMD_WARN("Filter type (%d) not supported",
754 filter_type);
755 ret = -EINVAL;
756 break;
757 }
758 else if (rpst->i40e_pf_eth)
759 (*rpst->i40e_pf_eth->dev_ops->filter_ctrl)(ethdev,
760 filter_type,
761 filter_op,
762 arg);
763 else
764 return -EINVAL;
765
766 return ret;
767 }
768
769 static const struct eth_dev_ops ipn3ke_rpst_dev_ops = {
770 .dev_infos_get = ipn3ke_rpst_dev_infos_get,
771
772 .dev_configure = ipn3ke_rpst_dev_configure,
773 .dev_start = ipn3ke_rpst_dev_start,
774 .dev_stop = ipn3ke_rpst_dev_stop,
775 .dev_close = ipn3ke_rpst_dev_close,
776 .dev_reset = ipn3ke_rpst_dev_reset,
777
778 .stats_get = ipn3ke_rpst_stats_get,
779 .xstats_get = ipn3ke_rpst_xstats_get,
780 .xstats_get_names = ipn3ke_rpst_xstats_get_names,
781 .stats_reset = ipn3ke_rpst_stats_reset,
782 .xstats_reset = ipn3ke_rpst_stats_reset,
783
784 .filter_ctrl = ipn3ke_afu_filter_ctrl,
785
786 .rx_queue_start = ipn3ke_rpst_rx_queue_start,
787 .rx_queue_stop = ipn3ke_rpst_rx_queue_stop,
788 .tx_queue_start = ipn3ke_rpst_tx_queue_start,
789 .tx_queue_stop = ipn3ke_rpst_tx_queue_stop,
790 .rx_queue_setup = ipn3ke_rpst_rx_queue_setup,
791 .rx_queue_release = ipn3ke_rpst_rx_queue_release,
792 .tx_queue_setup = ipn3ke_rpst_tx_queue_setup,
793 .tx_queue_release = ipn3ke_rpst_tx_queue_release,
794
795 .dev_set_link_up = ipn3ke_rpst_dev_set_link_up,
796 .dev_set_link_down = ipn3ke_rpst_dev_set_link_down,
797 .link_update = ipn3ke_rpst_link_update,
798
799 .promiscuous_enable = ipn3ke_rpst_promiscuous_enable,
800 .promiscuous_disable = ipn3ke_rpst_promiscuous_disable,
801 .allmulticast_enable = ipn3ke_rpst_allmulticast_enable,
802 .allmulticast_disable = ipn3ke_rpst_allmulticast_disable,
803 .mac_addr_set = ipn3ke_rpst_mac_addr_set,
804 .mtu_set = ipn3ke_rpst_mtu_set,
805
806 .tm_ops_get = ipn3ke_tm_ops_get,
807 };
808
809 static uint16_t ipn3ke_rpst_recv_pkts(__rte_unused void *rx_q,
810 __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts)
811 {
812 return 0;
813 }
814
815 static uint16_t
816 ipn3ke_rpst_xmit_pkts(__rte_unused void *tx_queue,
817 __rte_unused struct rte_mbuf **tx_pkts, __rte_unused uint16_t nb_pkts)
818 {
819 return 0;
820 }
821
822 int
823 ipn3ke_rpst_init(struct rte_eth_dev *ethdev, void *init_params)
824 {
825 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
826 struct ipn3ke_rpst *representor_param =
827 (struct ipn3ke_rpst *)init_params;
828
829 if (representor_param->port_id >= representor_param->hw->port_num)
830 return -ENODEV;
831
832 rpst->ethdev = ethdev;
833 rpst->switch_domain_id = representor_param->switch_domain_id;
834 rpst->port_id = representor_param->port_id;
835 rpst->hw = representor_param->hw;
836 rpst->i40e_pf_eth = NULL;
837 rpst->i40e_pf_eth_port_id = 0xFFFF;
838
839 ethdev->data->mac_addrs = rte_zmalloc("ipn3ke", ETHER_ADDR_LEN, 0);
840 if (!ethdev->data->mac_addrs) {
841 IPN3KE_AFU_PMD_ERR("Failed to "
842 "allocated memory for storing mac address");
843 return -ENODEV;
844 }
845
846 if (rpst->hw->tm_hw_enable)
847 ipn3ke_tm_init(rpst);
848
849 /* Set representor device ops */
850 ethdev->dev_ops = &ipn3ke_rpst_dev_ops;
851
852 /* No data-path, but need stub Rx/Tx functions to avoid crash
853 * when testing with the likes of testpmd.
854 */
855 ethdev->rx_pkt_burst = ipn3ke_rpst_recv_pkts;
856 ethdev->tx_pkt_burst = ipn3ke_rpst_xmit_pkts;
857
858 ethdev->data->nb_rx_queues = 1;
859 ethdev->data->nb_tx_queues = 1;
860
861 ethdev->data->mac_addrs = rte_zmalloc("ipn3ke_afu_representor",
862 ETHER_ADDR_LEN,
863 0);
864 if (!ethdev->data->mac_addrs) {
865 IPN3KE_AFU_PMD_ERR("Failed to "
866 "allocated memory for storing mac address");
867 return -ENODEV;
868 }
869
870 ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
871
872 rte_spinlock_lock(&ipn3ke_link_notify_list_lk);
873 TAILQ_INSERT_TAIL(&ipn3ke_rpst_list, rpst, next);
874 ipn3ke_rpst_scan_num++;
875 ipn3ke_rpst_scan_check();
876 rte_spinlock_unlock(&ipn3ke_link_notify_list_lk);
877
878 return 0;
879 }
880
881 int
882 ipn3ke_rpst_uninit(struct rte_eth_dev *ethdev)
883 {
884 struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(ethdev);
885
886 rte_spinlock_lock(&ipn3ke_link_notify_list_lk);
887 TAILQ_REMOVE(&ipn3ke_rpst_list, rpst, next);
888 ipn3ke_rpst_scan_num--;
889 ipn3ke_rpst_scan_check();
890 rte_spinlock_unlock(&ipn3ke_link_notify_list_lk);
891
892 return 0;
893 }