]> git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / seastar / dpdk / drivers / net / ipn3ke / ipn3ke_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
3 */
4
5 #include <stdint.h>
6
7 #include <rte_bus_pci.h>
8 #include <rte_ethdev.h>
9 #include <rte_pci.h>
10 #include <rte_malloc.h>
11
12 #include <rte_mbuf.h>
13 #include <rte_sched.h>
14 #include <rte_ethdev_driver.h>
15
16 #include <rte_io.h>
17 #include <rte_rawdev.h>
18 #include <rte_rawdev_pmd.h>
19 #include <rte_bus_ifpga.h>
20 #include <ifpga_common.h>
21 #include <ifpga_logs.h>
22
23 #include "ipn3ke_rawdev_api.h"
24 #include "ipn3ke_flow.h"
25 #include "ipn3ke_logs.h"
26 #include "ipn3ke_ethdev.h"
27
28 int ipn3ke_afu_logtype;
29
30 static const struct rte_afu_uuid afu_uuid_ipn3ke_map[] = {
31 { MAP_UUID_10G_LOW, MAP_UUID_10G_HIGH },
32 { IPN3KE_UUID_10G_LOW, IPN3KE_UUID_10G_HIGH },
33 { IPN3KE_UUID_VBNG_LOW, IPN3KE_UUID_VBNG_HIGH},
34 { IPN3KE_UUID_25G_LOW, IPN3KE_UUID_25G_HIGH },
35 { 0, 0 /* sentinel */ },
36 };
37
38 static int
39 ipn3ke_indirect_read(struct ipn3ke_hw *hw, uint32_t *rd_data,
40 uint32_t addr, uint32_t dev_sel, uint32_t eth_group_sel)
41 {
42 uint32_t i, try_cnt;
43 uint64_t indirect_value;
44 volatile void *indirect_addrs;
45 uint64_t target_addr;
46 uint64_t read_data = 0;
47
48 if (eth_group_sel != 0 && eth_group_sel != 1)
49 return -1;
50
51 addr &= 0x3FF;
52 target_addr = addr | dev_sel << 17;
53
54 indirect_value = RCMD | target_addr << 32;
55 indirect_addrs = hw->eth_group_bar[eth_group_sel] + 0x10;
56
57 rte_delay_us(10);
58
59 rte_write64((rte_cpu_to_le_64(indirect_value)), indirect_addrs);
60
61 i = 0;
62 try_cnt = 10;
63 indirect_addrs = hw->eth_group_bar[eth_group_sel] +
64 0x18;
65 do {
66 read_data = rte_read64(indirect_addrs);
67 if ((read_data >> 32) == 1)
68 break;
69 i++;
70 } while (i <= try_cnt);
71 if (i > try_cnt)
72 return -1;
73
74 *rd_data = rte_le_to_cpu_32(read_data);
75 return 0;
76 }
77
78 static int
79 ipn3ke_indirect_write(struct ipn3ke_hw *hw, uint32_t wr_data,
80 uint32_t addr, uint32_t dev_sel, uint32_t eth_group_sel)
81 {
82 volatile void *indirect_addrs;
83 uint64_t indirect_value;
84 uint64_t target_addr;
85
86 if (eth_group_sel != 0 && eth_group_sel != 1)
87 return -1;
88
89 addr &= 0x3FF;
90 target_addr = addr | dev_sel << 17;
91
92 indirect_value = WCMD | target_addr << 32 | wr_data;
93 indirect_addrs = hw->eth_group_bar[eth_group_sel] + 0x10;
94
95 rte_write64((rte_cpu_to_le_64(indirect_value)), indirect_addrs);
96 return 0;
97 }
98
99 static int
100 ipn3ke_indirect_mac_read(struct ipn3ke_hw *hw, uint32_t *rd_data,
101 uint32_t addr, uint32_t mac_num, uint32_t eth_group_sel)
102 {
103 uint32_t dev_sel;
104
105 if (mac_num >= hw->port_num)
106 return -1;
107
108 mac_num &= 0x7;
109 dev_sel = mac_num * 2 + 3;
110
111 return ipn3ke_indirect_read(hw, rd_data, addr, dev_sel, eth_group_sel);
112 }
113
114 static int
115 ipn3ke_indirect_mac_write(struct ipn3ke_hw *hw, uint32_t wr_data,
116 uint32_t addr, uint32_t mac_num, uint32_t eth_group_sel)
117 {
118 uint32_t dev_sel;
119
120 if (mac_num >= hw->port_num)
121 return -1;
122
123 mac_num &= 0x7;
124 dev_sel = mac_num * 2 + 3;
125
126 return ipn3ke_indirect_write(hw, wr_data, addr, dev_sel, eth_group_sel);
127 }
128
129 static void
130 ipn3ke_hw_cap_init(struct ipn3ke_hw *hw)
131 {
132 hw->hw_cap.version_number = IPN3KE_MASK_READ_REG(hw,
133 (IPN3KE_HW_BASE + 0), 0, 0xFFFF);
134 hw->hw_cap.capability_registers_block_offset = IPN3KE_MASK_READ_REG(hw,
135 (IPN3KE_HW_BASE + 0x8), 0, 0xFFFFFFFF);
136 hw->hw_cap.status_registers_block_offset = IPN3KE_MASK_READ_REG(hw,
137 (IPN3KE_HW_BASE + 0x10), 0, 0xFFFFFFFF);
138 hw->hw_cap.control_registers_block_offset = IPN3KE_MASK_READ_REG(hw,
139 (IPN3KE_HW_BASE + 0x18), 0, 0xFFFFFFFF);
140 hw->hw_cap.classify_offset = IPN3KE_MASK_READ_REG(hw,
141 (IPN3KE_HW_BASE + 0x20), 0, 0xFFFFFFFF);
142 hw->hw_cap.classy_size = IPN3KE_MASK_READ_REG(hw,
143 (IPN3KE_HW_BASE + 0x24), 0, 0xFFFF);
144 hw->hw_cap.policer_offset = IPN3KE_MASK_READ_REG(hw,
145 (IPN3KE_HW_BASE + 0x28), 0, 0xFFFFFFFF);
146 hw->hw_cap.policer_entry_size = IPN3KE_MASK_READ_REG(hw,
147 (IPN3KE_HW_BASE + 0x2C), 0, 0xFFFF);
148 hw->hw_cap.rss_key_array_offset = IPN3KE_MASK_READ_REG(hw,
149 (IPN3KE_HW_BASE + 0x30), 0, 0xFFFFFFFF);
150 hw->hw_cap.rss_key_entry_size = IPN3KE_MASK_READ_REG(hw,
151 (IPN3KE_HW_BASE + 0x34), 0, 0xFFFF);
152 hw->hw_cap.rss_indirection_table_array_offset = IPN3KE_MASK_READ_REG(hw,
153 (IPN3KE_HW_BASE + 0x38), 0, 0xFFFFFFFF);
154 hw->hw_cap.rss_indirection_table_entry_size = IPN3KE_MASK_READ_REG(hw,
155 (IPN3KE_HW_BASE + 0x3C), 0, 0xFFFF);
156 hw->hw_cap.dmac_map_offset = IPN3KE_MASK_READ_REG(hw,
157 (IPN3KE_HW_BASE + 0x40), 0, 0xFFFFFFFF);
158 hw->hw_cap.dmac_map_size = IPN3KE_MASK_READ_REG(hw,
159 (IPN3KE_HW_BASE + 0x44), 0, 0xFFFF);
160 hw->hw_cap.qm_offset = IPN3KE_MASK_READ_REG(hw,
161 (IPN3KE_HW_BASE + 0x48), 0, 0xFFFFFFFF);
162 hw->hw_cap.qm_size = IPN3KE_MASK_READ_REG(hw,
163 (IPN3KE_HW_BASE + 0x4C), 0, 0xFFFF);
164 hw->hw_cap.ccb_offset = IPN3KE_MASK_READ_REG(hw,
165 (IPN3KE_HW_BASE + 0x50), 0, 0xFFFFFFFF);
166 hw->hw_cap.ccb_entry_size = IPN3KE_MASK_READ_REG(hw,
167 (IPN3KE_HW_BASE + 0x54), 0, 0xFFFF);
168 hw->hw_cap.qos_offset = IPN3KE_MASK_READ_REG(hw,
169 (IPN3KE_HW_BASE + 0x58), 0, 0xFFFFFFFF);
170 hw->hw_cap.qos_size = IPN3KE_MASK_READ_REG(hw,
171 (IPN3KE_HW_BASE + 0x5C), 0, 0xFFFF);
172
173 hw->hw_cap.num_rx_flow = IPN3KE_MASK_READ_REG(hw,
174 IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
175 0, 0xFFFF);
176 hw->hw_cap.num_rss_blocks = IPN3KE_MASK_READ_REG(hw,
177 IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
178 4, 0xFFFF);
179 hw->hw_cap.num_dmac_map = IPN3KE_MASK_READ_REG(hw,
180 IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
181 8, 0xFFFF);
182 hw->hw_cap.num_tx_flow = IPN3KE_MASK_READ_REG(hw,
183 IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
184 0xC, 0xFFFF);
185 hw->hw_cap.num_smac_map = IPN3KE_MASK_READ_REG(hw,
186 IPN3KE_CAPABILITY_REGISTERS_BLOCK_OFFSET,
187 0x10, 0xFFFF);
188
189 hw->hw_cap.link_speed_mbps = IPN3KE_MASK_READ_REG(hw,
190 IPN3KE_STATUS_REGISTERS_BLOCK_OFFSET,
191 0, 0xFFFFF);
192 }
193
194 static int
195 ipn3ke_hw_init(struct rte_afu_device *afu_dev,
196 struct ipn3ke_hw *hw)
197 {
198 struct rte_rawdev *rawdev;
199 int ret;
200 int i;
201 uint64_t port_num, mac_type, index;
202
203 rawdev = afu_dev->rawdev;
204
205 hw->afu_id.uuid.uuid_low = afu_dev->id.uuid.uuid_low;
206 hw->afu_id.uuid.uuid_high = afu_dev->id.uuid.uuid_high;
207 hw->afu_id.port = afu_dev->id.port;
208 hw->hw_addr = (uint8_t *)(afu_dev->mem_resource[0].addr);
209 hw->f_mac_read = ipn3ke_indirect_mac_read;
210 hw->f_mac_write = ipn3ke_indirect_mac_write;
211 hw->rawdev = rawdev;
212 rawdev->dev_ops->attr_get(rawdev,
213 "LineSideBARIndex", &index);
214 hw->eth_group_bar[0] = (uint8_t *)(afu_dev->mem_resource[index].addr);
215 rawdev->dev_ops->attr_get(rawdev,
216 "NICSideBARIndex", &index);
217 hw->eth_group_bar[1] = (uint8_t *)(afu_dev->mem_resource[index].addr);
218 rawdev->dev_ops->attr_get(rawdev,
219 "LineSideLinkPortNum", &port_num);
220 hw->retimer.port_num = (int)port_num;
221 hw->port_num = hw->retimer.port_num;
222 rawdev->dev_ops->attr_get(rawdev,
223 "LineSideMACType", &mac_type);
224 hw->retimer.mac_type = (int)mac_type;
225
226 if (afu_dev->id.uuid.uuid_low == IPN3KE_UUID_VBNG_LOW &&
227 afu_dev->id.uuid.uuid_high == IPN3KE_UUID_VBNG_HIGH) {
228 ipn3ke_hw_cap_init(hw);
229 IPN3KE_AFU_PMD_DEBUG("UPL_version is 0x%x\n",
230 IPN3KE_READ_REG(hw, 0));
231
232 /* Reset FPGA IP */
233 IPN3KE_WRITE_REG(hw, IPN3KE_CTRL_RESET, 1);
234 IPN3KE_WRITE_REG(hw, IPN3KE_CTRL_RESET, 0);
235 }
236
237 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
238 /* Enable inter connect channel */
239 for (i = 0; i < hw->port_num; i++) {
240 /* Enable the TX path */
241 ipn3ke_xmac_tx_enable(hw, i, 1);
242
243 /* Disables source address override */
244 ipn3ke_xmac_smac_ovd_dis(hw, i, 1);
245
246 /* Enable the RX path */
247 ipn3ke_xmac_rx_enable(hw, i, 1);
248
249 /* Clear all TX statistics counters */
250 ipn3ke_xmac_tx_clr_stcs(hw, i, 1);
251
252 /* Clear all RX statistics counters */
253 ipn3ke_xmac_rx_clr_stcs(hw, i, 1);
254 }
255 }
256
257 ret = rte_eth_switch_domain_alloc(&hw->switch_domain_id);
258 if (ret)
259 IPN3KE_AFU_PMD_WARN("failed to allocate switch domain for device %d",
260 ret);
261
262 hw->tm_hw_enable = 0;
263 hw->flow_hw_enable = 0;
264 if (afu_dev->id.uuid.uuid_low == IPN3KE_UUID_VBNG_LOW &&
265 afu_dev->id.uuid.uuid_high == IPN3KE_UUID_VBNG_HIGH) {
266 ret = ipn3ke_hw_tm_init(hw);
267 if (ret)
268 return ret;
269 hw->tm_hw_enable = 1;
270
271 ret = ipn3ke_flow_init(hw);
272 if (ret)
273 return ret;
274 hw->flow_hw_enable = 1;
275 }
276
277 hw->acc_tm = 0;
278 hw->acc_flow = 0;
279
280 return 0;
281 }
282
283 static void
284 ipn3ke_hw_uninit(struct ipn3ke_hw *hw)
285 {
286 int i;
287
288 if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) {
289 for (i = 0; i < hw->port_num; i++) {
290 /* Disable the TX path */
291 ipn3ke_xmac_tx_disable(hw, i, 1);
292
293 /* Disable the RX path */
294 ipn3ke_xmac_rx_disable(hw, i, 1);
295
296 /* Clear all TX statistics counters */
297 ipn3ke_xmac_tx_clr_stcs(hw, i, 1);
298
299 /* Clear all RX statistics counters */
300 ipn3ke_xmac_rx_clr_stcs(hw, i, 1);
301 }
302 }
303 }
304
305 static int ipn3ke_vswitch_probe(struct rte_afu_device *afu_dev)
306 {
307 char name[RTE_ETH_NAME_MAX_LEN];
308 struct ipn3ke_hw *hw;
309 int i, retval;
310
311 /* check if the AFU device has been probed already */
312 /* allocate shared mcp_vswitch structure */
313 if (!afu_dev->shared.data) {
314 snprintf(name, sizeof(name), "net_%s_hw",
315 afu_dev->device.name);
316 hw = rte_zmalloc_socket(name,
317 sizeof(struct ipn3ke_hw),
318 RTE_CACHE_LINE_SIZE,
319 afu_dev->device.numa_node);
320 if (!hw) {
321 IPN3KE_AFU_PMD_ERR("failed to allocate hardwart data");
322 retval = -ENOMEM;
323 return -ENOMEM;
324 }
325 afu_dev->shared.data = hw;
326
327 rte_spinlock_init(&afu_dev->shared.lock);
328 } else {
329 hw = afu_dev->shared.data;
330 }
331
332 retval = ipn3ke_hw_init(afu_dev, hw);
333 if (retval)
334 return retval;
335
336 /* probe representor ports */
337 for (i = 0; i < hw->port_num; i++) {
338 struct ipn3ke_rpst rpst = {
339 .port_id = i,
340 .switch_domain_id = hw->switch_domain_id,
341 .hw = hw
342 };
343
344 /* representor port net_bdf_port */
345 snprintf(name, sizeof(name), "net_%s_representor_%d",
346 afu_dev->device.name, i);
347
348 retval = rte_eth_dev_create(&afu_dev->device, name,
349 sizeof(struct ipn3ke_rpst), NULL, NULL,
350 ipn3ke_rpst_init, &rpst);
351
352 if (retval)
353 IPN3KE_AFU_PMD_ERR("failed to create ipn3ke representor %s.",
354 name);
355 }
356
357 return 0;
358 }
359
360 static int ipn3ke_vswitch_remove(struct rte_afu_device *afu_dev)
361 {
362 char name[RTE_ETH_NAME_MAX_LEN];
363 struct ipn3ke_hw *hw;
364 struct rte_eth_dev *ethdev;
365 int i, ret;
366
367 hw = afu_dev->shared.data;
368
369 /* remove representor ports */
370 for (i = 0; i < hw->port_num; i++) {
371 /* representor port net_bdf_port */
372 snprintf(name, sizeof(name), "net_%s_representor_%d",
373 afu_dev->device.name, i);
374
375 ethdev = rte_eth_dev_allocated(afu_dev->device.name);
376 if (!ethdev)
377 return -ENODEV;
378
379 rte_eth_dev_destroy(ethdev, ipn3ke_rpst_uninit);
380 }
381
382 ret = rte_eth_switch_domain_free(hw->switch_domain_id);
383 if (ret)
384 IPN3KE_AFU_PMD_WARN("failed to free switch domain: %d", ret);
385
386 /* hw uninit*/
387 ipn3ke_hw_uninit(hw);
388
389 return 0;
390 }
391
392 static struct rte_afu_driver afu_ipn3ke_driver = {
393 .id_table = afu_uuid_ipn3ke_map,
394 .probe = ipn3ke_vswitch_probe,
395 .remove = ipn3ke_vswitch_remove,
396 };
397
398 RTE_PMD_REGISTER_AFU(net_ipn3ke_afu, afu_ipn3ke_driver);
399
400 static const char * const valid_args[] = {
401 #define IPN3KE_AFU_NAME "afu"
402 IPN3KE_AFU_NAME,
403 #define IPN3KE_FPGA_ACCELERATION_LIST "fpga_acc"
404 IPN3KE_FPGA_ACCELERATION_LIST,
405 #define IPN3KE_I40E_PF_LIST "i40e_pf"
406 IPN3KE_I40E_PF_LIST,
407 NULL
408 };
409
410 static int
411 ipn3ke_cfg_parse_acc_list(const char *afu_name,
412 const char *acc_list_name)
413 {
414 struct rte_afu_device *afu_dev;
415 struct ipn3ke_hw *hw;
416 const char *p_source;
417 char *p_start;
418 char name[RTE_ETH_NAME_MAX_LEN];
419
420 afu_dev = rte_ifpga_find_afu_by_name(afu_name);
421 if (!afu_dev)
422 return -1;
423 hw = afu_dev->shared.data;
424 if (!hw)
425 return -1;
426
427 p_source = acc_list_name;
428 while (*p_source) {
429 while ((*p_source == '{') || (*p_source == '|'))
430 p_source++;
431 p_start = name;
432 while ((*p_source != '|') && (*p_source != '}'))
433 *p_start++ = *p_source++;
434 *p_start = 0;
435 if (!strcmp(name, "tm") && hw->tm_hw_enable)
436 hw->acc_tm = 1;
437
438 if (!strcmp(name, "flow") && hw->flow_hw_enable)
439 hw->acc_flow = 1;
440
441 if (*p_source == '}')
442 return 0;
443 }
444
445 return 0;
446 }
447
448 static int
449 ipn3ke_cfg_parse_i40e_pf_ethdev(const char *afu_name,
450 const char *pf_name)
451 {
452 struct rte_eth_dev *i40e_eth, *rpst_eth;
453 struct rte_afu_device *afu_dev;
454 struct ipn3ke_rpst *rpst;
455 struct ipn3ke_hw *hw;
456 const char *p_source;
457 char *p_start;
458 char name[RTE_ETH_NAME_MAX_LEN];
459 uint16_t port_id;
460 int i;
461 int ret = -1;
462
463 afu_dev = rte_ifpga_find_afu_by_name(afu_name);
464 if (!afu_dev)
465 return -1;
466 hw = afu_dev->shared.data;
467 if (!hw)
468 return -1;
469
470 p_source = pf_name;
471 for (i = 0; i < hw->port_num; i++) {
472 snprintf(name, sizeof(name), "net_%s_representor_%d",
473 afu_name, i);
474 ret = rte_eth_dev_get_port_by_name(name, &port_id);
475 if (ret)
476 return -1;
477 rpst_eth = &rte_eth_devices[port_id];
478 rpst = IPN3KE_DEV_PRIVATE_TO_RPST(rpst_eth);
479
480 while ((*p_source == '{') || (*p_source == '|'))
481 p_source++;
482 p_start = name;
483 while ((*p_source != '|') && (*p_source != '}'))
484 *p_start++ = *p_source++;
485 *p_start = 0;
486
487 ret = rte_eth_dev_get_port_by_name(name, &port_id);
488 if (ret)
489 return -1;
490 i40e_eth = &rte_eth_devices[port_id];
491
492 rpst->i40e_pf_eth = i40e_eth;
493 rpst->i40e_pf_eth_port_id = port_id;
494
495 if ((*p_source == '}') || !(*p_source))
496 break;
497 }
498
499 return 0;
500 }
501
502 static int
503 ipn3ke_cfg_probe(struct rte_vdev_device *dev)
504 {
505 struct rte_devargs *devargs;
506 struct rte_kvargs *kvlist = NULL;
507 char *afu_name = NULL;
508 char *acc_name = NULL;
509 char *pf_name = NULL;
510 int afu_name_en = 0;
511 int acc_list_en = 0;
512 int pf_list_en = 0;
513 int ret = -1;
514
515 devargs = dev->device.devargs;
516
517 kvlist = rte_kvargs_parse(devargs->args, valid_args);
518 if (!kvlist) {
519 IPN3KE_AFU_PMD_ERR("error when parsing param");
520 goto end;
521 }
522
523 if (rte_kvargs_count(kvlist, IPN3KE_AFU_NAME) == 1) {
524 if (rte_kvargs_process(kvlist, IPN3KE_AFU_NAME,
525 &rte_ifpga_get_string_arg,
526 &afu_name) < 0) {
527 IPN3KE_AFU_PMD_ERR("error to parse %s",
528 IPN3KE_AFU_NAME);
529 goto end;
530 } else {
531 afu_name_en = 1;
532 }
533 }
534
535 if (rte_kvargs_count(kvlist, IPN3KE_FPGA_ACCELERATION_LIST) == 1) {
536 if (rte_kvargs_process(kvlist, IPN3KE_FPGA_ACCELERATION_LIST,
537 &rte_ifpga_get_string_arg,
538 &acc_name) < 0) {
539 IPN3KE_AFU_PMD_ERR("error to parse %s",
540 IPN3KE_FPGA_ACCELERATION_LIST);
541 goto end;
542 } else {
543 acc_list_en = 1;
544 }
545 }
546
547 if (rte_kvargs_count(kvlist, IPN3KE_I40E_PF_LIST) == 1) {
548 if (rte_kvargs_process(kvlist, IPN3KE_I40E_PF_LIST,
549 &rte_ifpga_get_string_arg,
550 &pf_name) < 0) {
551 IPN3KE_AFU_PMD_ERR("error to parse %s",
552 IPN3KE_I40E_PF_LIST);
553 goto end;
554 } else {
555 pf_list_en = 1;
556 }
557 }
558
559 if (!afu_name_en) {
560 IPN3KE_AFU_PMD_ERR("arg %s is mandatory for ipn3ke",
561 IPN3KE_AFU_NAME);
562 goto end;
563 }
564
565 if (!pf_list_en) {
566 IPN3KE_AFU_PMD_ERR("arg %s is mandatory for ipn3ke",
567 IPN3KE_I40E_PF_LIST);
568 goto end;
569 }
570
571 if (acc_list_en) {
572 ret = ipn3ke_cfg_parse_acc_list(afu_name, acc_name);
573 if (ret) {
574 IPN3KE_AFU_PMD_ERR("arg %s parse error for ipn3ke",
575 IPN3KE_FPGA_ACCELERATION_LIST);
576 goto end;
577 }
578 } else {
579 IPN3KE_AFU_PMD_INFO("arg %s is optional for ipn3ke, using i40e acc",
580 IPN3KE_FPGA_ACCELERATION_LIST);
581 }
582
583 ret = ipn3ke_cfg_parse_i40e_pf_ethdev(afu_name, pf_name);
584 if (ret)
585 goto end;
586 end:
587 if (kvlist)
588 rte_kvargs_free(kvlist);
589 if (afu_name)
590 free(afu_name);
591 if (acc_name)
592 free(acc_name);
593
594 return ret;
595 }
596
597 static int
598 ipn3ke_cfg_remove(struct rte_vdev_device *dev)
599 {
600 struct rte_devargs *devargs;
601 struct rte_kvargs *kvlist = NULL;
602 char *afu_name = NULL;
603 struct rte_afu_device *afu_dev;
604 int ret = -1;
605
606 devargs = dev->device.devargs;
607
608 kvlist = rte_kvargs_parse(devargs->args, valid_args);
609 if (!kvlist) {
610 IPN3KE_AFU_PMD_ERR("error when parsing param");
611 goto end;
612 }
613
614 if (rte_kvargs_count(kvlist, IPN3KE_AFU_NAME) == 1) {
615 if (rte_kvargs_process(kvlist, IPN3KE_AFU_NAME,
616 &rte_ifpga_get_string_arg,
617 &afu_name) < 0) {
618 IPN3KE_AFU_PMD_ERR("error to parse %s",
619 IPN3KE_AFU_NAME);
620 } else {
621 afu_dev = rte_ifpga_find_afu_by_name(afu_name);
622 if (!afu_dev)
623 goto end;
624 ret = ipn3ke_vswitch_remove(afu_dev);
625 }
626 } else {
627 IPN3KE_AFU_PMD_ERR("Remove ipn3ke_cfg %p error", dev);
628 }
629
630 end:
631 if (kvlist)
632 rte_kvargs_free(kvlist);
633
634 return ret;
635 }
636
637 static struct rte_vdev_driver ipn3ke_cfg_driver = {
638 .probe = ipn3ke_cfg_probe,
639 .remove = ipn3ke_cfg_remove,
640 };
641
642 RTE_PMD_REGISTER_VDEV(ipn3ke_cfg, ipn3ke_cfg_driver);
643 RTE_PMD_REGISTER_PARAM_STRING(ipn3ke_cfg,
644 "afu=<string> "
645 "fpga_acc=<string>"
646 "i40e_pf=<string>");
647
648 RTE_INIT(ipn3ke_afu_init_log)
649 {
650 ipn3ke_afu_logtype = rte_log_register("pmd.afu.ipn3ke");
651 if (ipn3ke_afu_logtype >= 0)
652 rte_log_set_level(ipn3ke_afu_logtype, RTE_LOG_NOTICE);
653 }