]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/dpdk/drivers/net/kni/rte_eth_kni.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / kni / rte_eth_kni.c
CommitLineData
11fdf7f2
TL
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
3 */
4
5#include <fcntl.h>
6#include <pthread.h>
7#include <unistd.h>
8
9f95a23c 9#include <rte_string_fns.h>
11fdf7f2
TL
10#include <rte_ethdev_driver.h>
11#include <rte_ethdev_vdev.h>
12#include <rte_kni.h>
13#include <rte_kvargs.h>
14#include <rte_malloc.h>
15#include <rte_bus_vdev.h>
16
17/* Only single queue supported */
18#define KNI_MAX_QUEUE_PER_PORT 1
19
11fdf7f2
TL
20#define MAX_KNI_PORTS 8
21
9f95a23c 22#define KNI_ETHER_MTU(mbuf_size) \
f67539c2 23 ((mbuf_size) - RTE_ETHER_HDR_LEN) /**< Ethernet MTU. */
9f95a23c 24
11fdf7f2
TL
25#define ETH_KNI_NO_REQUEST_THREAD_ARG "no_request_thread"
26static const char * const valid_arguments[] = {
27 ETH_KNI_NO_REQUEST_THREAD_ARG,
28 NULL
29};
30
31struct eth_kni_args {
32 int no_request_thread;
33};
34
35struct pmd_queue_stats {
36 uint64_t pkts;
37 uint64_t bytes;
11fdf7f2
TL
38};
39
40struct pmd_queue {
41 struct pmd_internals *internals;
42 struct rte_mempool *mb_pool;
43
44 struct pmd_queue_stats rx;
45 struct pmd_queue_stats tx;
46};
47
48struct pmd_internals {
49 struct rte_kni *kni;
50 int is_kni_started;
51
52 pthread_t thread;
53 int stop_thread;
54 int no_request_thread;
55
f67539c2 56 struct rte_ether_addr eth_addr;
11fdf7f2
TL
57
58 struct pmd_queue rx_queues[KNI_MAX_QUEUE_PER_PORT];
59 struct pmd_queue tx_queues[KNI_MAX_QUEUE_PER_PORT];
60};
61
62static const struct rte_eth_link pmd_link = {
63 .link_speed = ETH_SPEED_NUM_10G,
64 .link_duplex = ETH_LINK_FULL_DUPLEX,
65 .link_status = ETH_LINK_DOWN,
66 .link_autoneg = ETH_LINK_FIXED,
67};
68static int is_kni_initialized;
69
70static int eth_kni_logtype;
71
72#define PMD_LOG(level, fmt, args...) \
73 rte_log(RTE_LOG_ ## level, eth_kni_logtype, \
74 "%s(): " fmt "\n", __func__, ##args)
75static uint16_t
76eth_kni_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
77{
78 struct pmd_queue *kni_q = q;
79 struct rte_kni *kni = kni_q->internals->kni;
80 uint16_t nb_pkts;
81
82 nb_pkts = rte_kni_rx_burst(kni, bufs, nb_bufs);
83
84 kni_q->rx.pkts += nb_pkts;
11fdf7f2
TL
85
86 return nb_pkts;
87}
88
89static uint16_t
90eth_kni_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
91{
92 struct pmd_queue *kni_q = q;
93 struct rte_kni *kni = kni_q->internals->kni;
94 uint16_t nb_pkts;
95
96 nb_pkts = rte_kni_tx_burst(kni, bufs, nb_bufs);
97
98 kni_q->tx.pkts += nb_pkts;
11fdf7f2
TL
99
100 return nb_pkts;
101}
102
103static void *
104kni_handle_request(void *param)
105{
106 struct pmd_internals *internals = param;
107#define MS 1000
108
109 while (!internals->stop_thread) {
110 rte_kni_handle_request(internals->kni);
111 usleep(500 * MS);
112 }
113
114 return param;
115}
116
117static int
118eth_kni_start(struct rte_eth_dev *dev)
119{
120 struct pmd_internals *internals = dev->data->dev_private;
121 uint16_t port_id = dev->data->port_id;
122 struct rte_mempool *mb_pool;
123 struct rte_kni_conf conf;
124 const char *name = dev->device->name + 4; /* remove net_ */
125
9f95a23c
TL
126 mb_pool = internals->rx_queues[0].mb_pool;
127 strlcpy(conf.name, name, RTE_KNI_NAMESIZE);
11fdf7f2
TL
128 conf.force_bind = 0;
129 conf.group_id = port_id;
9f95a23c
TL
130 conf.mbuf_size =
131 rte_pktmbuf_data_room_size(mb_pool) - RTE_PKTMBUF_HEADROOM;
132 conf.mtu = KNI_ETHER_MTU(conf.mbuf_size);
11fdf7f2
TL
133
134 internals->kni = rte_kni_alloc(mb_pool, &conf, NULL);
135 if (internals->kni == NULL) {
136 PMD_LOG(ERR,
137 "Fail to create kni interface for port: %d",
138 port_id);
139 return -1;
140 }
141
142 return 0;
143}
144
145static int
146eth_kni_dev_start(struct rte_eth_dev *dev)
147{
148 struct pmd_internals *internals = dev->data->dev_private;
149 int ret;
150
151 if (internals->is_kni_started == 0) {
152 ret = eth_kni_start(dev);
153 if (ret)
154 return -1;
155 internals->is_kni_started = 1;
156 }
157
158 if (internals->no_request_thread == 0) {
f67539c2
TL
159 internals->stop_thread = 0;
160
11fdf7f2
TL
161 ret = rte_ctrl_thread_create(&internals->thread,
162 "kni_handle_req", NULL,
163 kni_handle_request, internals);
164 if (ret) {
165 PMD_LOG(ERR,
166 "Fail to create kni request thread");
167 return -1;
168 }
169 }
170
171 dev->data->dev_link.link_status = 1;
172
173 return 0;
174}
175
176static void
177eth_kni_dev_stop(struct rte_eth_dev *dev)
178{
179 struct pmd_internals *internals = dev->data->dev_private;
180 int ret;
181
f67539c2 182 if (internals->no_request_thread == 0 && internals->stop_thread == 0) {
11fdf7f2
TL
183 internals->stop_thread = 1;
184
185 ret = pthread_cancel(internals->thread);
186 if (ret)
187 PMD_LOG(ERR, "Can't cancel the thread");
188
189 ret = pthread_join(internals->thread, NULL);
190 if (ret)
191 PMD_LOG(ERR, "Can't join the thread");
11fdf7f2
TL
192 }
193
194 dev->data->dev_link.link_status = 0;
195}
196
f67539c2
TL
197static void
198eth_kni_close(struct rte_eth_dev *eth_dev)
199{
200 struct pmd_internals *internals;
201 int ret;
202
203 eth_kni_dev_stop(eth_dev);
204
205 /* mac_addrs must not be freed alone because part of dev_private */
206 eth_dev->data->mac_addrs = NULL;
207
208 internals = eth_dev->data->dev_private;
209 ret = rte_kni_release(internals->kni);
210 if (ret)
211 PMD_LOG(WARNING, "Not able to release kni for %s",
212 eth_dev->data->name);
213}
214
11fdf7f2
TL
215static int
216eth_kni_dev_configure(struct rte_eth_dev *dev __rte_unused)
217{
218 return 0;
219}
220
f67539c2 221static int
11fdf7f2
TL
222eth_kni_dev_info(struct rte_eth_dev *dev __rte_unused,
223 struct rte_eth_dev_info *dev_info)
224{
225 dev_info->max_mac_addrs = 1;
226 dev_info->max_rx_pktlen = UINT32_MAX;
227 dev_info->max_rx_queues = KNI_MAX_QUEUE_PER_PORT;
228 dev_info->max_tx_queues = KNI_MAX_QUEUE_PER_PORT;
229 dev_info->min_rx_bufsize = 0;
f67539c2
TL
230
231 return 0;
11fdf7f2
TL
232}
233
234static int
235eth_kni_rx_queue_setup(struct rte_eth_dev *dev,
236 uint16_t rx_queue_id,
237 uint16_t nb_rx_desc __rte_unused,
238 unsigned int socket_id __rte_unused,
239 const struct rte_eth_rxconf *rx_conf __rte_unused,
240 struct rte_mempool *mb_pool)
241{
242 struct pmd_internals *internals = dev->data->dev_private;
243 struct pmd_queue *q;
244
245 q = &internals->rx_queues[rx_queue_id];
246 q->internals = internals;
247 q->mb_pool = mb_pool;
248
249 dev->data->rx_queues[rx_queue_id] = q;
250
251 return 0;
252}
253
254static int
255eth_kni_tx_queue_setup(struct rte_eth_dev *dev,
256 uint16_t tx_queue_id,
257 uint16_t nb_tx_desc __rte_unused,
258 unsigned int socket_id __rte_unused,
259 const struct rte_eth_txconf *tx_conf __rte_unused)
260{
261 struct pmd_internals *internals = dev->data->dev_private;
262 struct pmd_queue *q;
263
264 q = &internals->tx_queues[tx_queue_id];
265 q->internals = internals;
266
267 dev->data->tx_queues[tx_queue_id] = q;
268
269 return 0;
270}
271
272static void
273eth_kni_queue_release(void *q __rte_unused)
274{
275}
276
277static int
278eth_kni_link_update(struct rte_eth_dev *dev __rte_unused,
279 int wait_to_complete __rte_unused)
280{
281 return 0;
282}
283
284static int
285eth_kni_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
286{
287 unsigned long rx_packets_total = 0, rx_bytes_total = 0;
288 unsigned long tx_packets_total = 0, tx_bytes_total = 0;
289 struct rte_eth_dev_data *data = dev->data;
11fdf7f2
TL
290 unsigned int i, num_stats;
291 struct pmd_queue *q;
292
293 num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
294 data->nb_rx_queues);
295 for (i = 0; i < num_stats; i++) {
296 q = data->rx_queues[i];
297 stats->q_ipackets[i] = q->rx.pkts;
298 stats->q_ibytes[i] = q->rx.bytes;
299 rx_packets_total += stats->q_ipackets[i];
300 rx_bytes_total += stats->q_ibytes[i];
301 }
302
303 num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
304 data->nb_tx_queues);
305 for (i = 0; i < num_stats; i++) {
306 q = data->tx_queues[i];
307 stats->q_opackets[i] = q->tx.pkts;
308 stats->q_obytes[i] = q->tx.bytes;
11fdf7f2
TL
309 tx_packets_total += stats->q_opackets[i];
310 tx_bytes_total += stats->q_obytes[i];
11fdf7f2
TL
311 }
312
313 stats->ipackets = rx_packets_total;
314 stats->ibytes = rx_bytes_total;
315 stats->opackets = tx_packets_total;
316 stats->obytes = tx_bytes_total;
11fdf7f2
TL
317
318 return 0;
319}
320
f67539c2 321static int
11fdf7f2
TL
322eth_kni_stats_reset(struct rte_eth_dev *dev)
323{
324 struct rte_eth_dev_data *data = dev->data;
325 struct pmd_queue *q;
326 unsigned int i;
327
328 for (i = 0; i < data->nb_rx_queues; i++) {
329 q = data->rx_queues[i];
330 q->rx.pkts = 0;
331 q->rx.bytes = 0;
332 }
333 for (i = 0; i < data->nb_tx_queues; i++) {
334 q = data->tx_queues[i];
335 q->tx.pkts = 0;
336 q->tx.bytes = 0;
11fdf7f2 337 }
f67539c2
TL
338
339 return 0;
11fdf7f2
TL
340}
341
342static const struct eth_dev_ops eth_kni_ops = {
343 .dev_start = eth_kni_dev_start,
344 .dev_stop = eth_kni_dev_stop,
f67539c2 345 .dev_close = eth_kni_close,
11fdf7f2
TL
346 .dev_configure = eth_kni_dev_configure,
347 .dev_infos_get = eth_kni_dev_info,
348 .rx_queue_setup = eth_kni_rx_queue_setup,
349 .tx_queue_setup = eth_kni_tx_queue_setup,
350 .rx_queue_release = eth_kni_queue_release,
351 .tx_queue_release = eth_kni_queue_release,
352 .link_update = eth_kni_link_update,
353 .stats_get = eth_kni_stats_get,
354 .stats_reset = eth_kni_stats_reset,
355};
356
357static struct rte_eth_dev *
358eth_kni_create(struct rte_vdev_device *vdev,
359 struct eth_kni_args *args,
360 unsigned int numa_node)
361{
362 struct pmd_internals *internals;
363 struct rte_eth_dev_data *data;
364 struct rte_eth_dev *eth_dev;
365
366 PMD_LOG(INFO, "Creating kni ethdev on numa socket %u",
367 numa_node);
368
369 /* reserve an ethdev entry */
370 eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*internals));
371 if (!eth_dev)
372 return NULL;
373
374 internals = eth_dev->data->dev_private;
375 data = eth_dev->data;
376 data->nb_rx_queues = 1;
377 data->nb_tx_queues = 1;
378 data->dev_link = pmd_link;
379 data->mac_addrs = &internals->eth_addr;
f67539c2
TL
380 data->promiscuous = 1;
381 data->all_multicast = 1;
382
383 data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
11fdf7f2 384
f67539c2 385 rte_eth_random_addr(internals->eth_addr.addr_bytes);
11fdf7f2
TL
386
387 eth_dev->dev_ops = &eth_kni_ops;
388
389 internals->no_request_thread = args->no_request_thread;
390
391 return eth_dev;
392}
393
394static int
395kni_init(void)
396{
397 if (is_kni_initialized == 0)
398 rte_kni_init(MAX_KNI_PORTS);
399
400 is_kni_initialized++;
401
402 return 0;
403}
404
405static int
406eth_kni_kvargs_process(struct eth_kni_args *args, const char *params)
407{
408 struct rte_kvargs *kvlist;
409
410 kvlist = rte_kvargs_parse(params, valid_arguments);
411 if (kvlist == NULL)
412 return -1;
413
414 memset(args, 0, sizeof(struct eth_kni_args));
415
416 if (rte_kvargs_count(kvlist, ETH_KNI_NO_REQUEST_THREAD_ARG) == 1)
417 args->no_request_thread = 1;
418
419 rte_kvargs_free(kvlist);
420
421 return 0;
422}
423
424static int
425eth_kni_probe(struct rte_vdev_device *vdev)
426{
427 struct rte_eth_dev *eth_dev;
428 struct eth_kni_args args;
429 const char *name;
430 const char *params;
431 int ret;
432
433 name = rte_vdev_device_name(vdev);
434 params = rte_vdev_device_args(vdev);
435 PMD_LOG(INFO, "Initializing eth_kni for %s", name);
436
9f95a23c 437 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
11fdf7f2
TL
438 eth_dev = rte_eth_dev_attach_secondary(name);
439 if (!eth_dev) {
440 PMD_LOG(ERR, "Failed to probe %s", name);
441 return -1;
442 }
443 /* TODO: request info from primary to set up Rx and Tx */
444 eth_dev->dev_ops = &eth_kni_ops;
445 eth_dev->device = &vdev->device;
446 rte_eth_dev_probing_finish(eth_dev);
447 return 0;
448 }
449
450 ret = eth_kni_kvargs_process(&args, params);
451 if (ret < 0)
452 return ret;
453
454 ret = kni_init();
455 if (ret < 0)
456 return ret;
457
458 eth_dev = eth_kni_create(vdev, &args, rte_socket_id());
459 if (eth_dev == NULL)
460 goto kni_uninit;
461
462 eth_dev->rx_pkt_burst = eth_kni_rx;
463 eth_dev->tx_pkt_burst = eth_kni_tx;
464
465 rte_eth_dev_probing_finish(eth_dev);
466 return 0;
467
468kni_uninit:
469 is_kni_initialized--;
470 if (is_kni_initialized == 0)
471 rte_kni_close();
472 return -1;
473}
474
475static int
476eth_kni_remove(struct rte_vdev_device *vdev)
477{
478 struct rte_eth_dev *eth_dev;
11fdf7f2
TL
479 const char *name;
480
481 name = rte_vdev_device_name(vdev);
482 PMD_LOG(INFO, "Un-Initializing eth_kni for %s", name);
483
484 /* find the ethdev entry */
485 eth_dev = rte_eth_dev_allocated(name);
486 if (eth_dev == NULL)
487 return -1;
488
f67539c2
TL
489 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
490 eth_kni_dev_stop(eth_dev);
9f95a23c 491 return rte_eth_dev_release_port(eth_dev);
f67539c2 492 }
9f95a23c 493
f67539c2 494 eth_kni_close(eth_dev);
11fdf7f2
TL
495 rte_eth_dev_release_port(eth_dev);
496
497 is_kni_initialized--;
498 if (is_kni_initialized == 0)
499 rte_kni_close();
500
501 return 0;
502}
503
504static struct rte_vdev_driver eth_kni_drv = {
505 .probe = eth_kni_probe,
506 .remove = eth_kni_remove,
507};
508
509RTE_PMD_REGISTER_VDEV(net_kni, eth_kni_drv);
510RTE_PMD_REGISTER_PARAM_STRING(net_kni, ETH_KNI_NO_REQUEST_THREAD_ARG "=<int>");
511
512RTE_INIT(eth_kni_init_log)
513{
514 eth_kni_logtype = rte_log_register("pmd.net.kni");
515 if (eth_kni_logtype >= 0)
516 rte_log_set_level(eth_kni_logtype, RTE_LOG_NOTICE);
517}