]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/dpdk/drivers/raw/skeleton_rawdev/skeleton_rawdev.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / dpdk / drivers / raw / skeleton_rawdev / skeleton_rawdev.c
CommitLineData
11fdf7f2
TL
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 NXP
3 */
4
5#include <assert.h>
6#include <stdio.h>
7#include <stdbool.h>
8#include <errno.h>
9#include <stdint.h>
10#include <inttypes.h>
11#include <string.h>
12
13#include <rte_byteorder.h>
14#include <rte_common.h>
15#include <rte_debug.h>
16#include <rte_dev.h>
17#include <rte_eal.h>
18#include <rte_kvargs.h>
19#include <rte_log.h>
20#include <rte_malloc.h>
21#include <rte_memory.h>
22#include <rte_memcpy.h>
23#include <rte_lcore.h>
24#include <rte_bus_vdev.h>
25
26#include <rte_rawdev.h>
27#include <rte_rawdev_pmd.h>
28
29#include "skeleton_rawdev.h"
30
31/* Dynamic log type identifier */
32int skeleton_pmd_logtype;
33
34/* Count of instances */
9f95a23c 35static uint16_t skeldev_init_once;
11fdf7f2
TL
36
37/**< Rawdev Skeleton dummy driver name */
38#define SKELETON_PMD_RAWDEV_NAME rawdev_skeleton
39
40/**< Skeleton rawdev driver object */
41static struct rte_vdev_driver skeleton_pmd_drv;
42
43struct queue_buffers {
44 void *bufs[SKELETON_QUEUE_MAX_DEPTH];
45};
46
47static struct queue_buffers queue_buf[SKELETON_MAX_QUEUES] = {};
48static void clear_queue_bufs(int queue_id);
49
50static void skeleton_rawdev_info_get(struct rte_rawdev *dev,
51 rte_rawdev_obj_t dev_info)
52{
53 struct skeleton_rawdev *skeldev;
54 struct skeleton_rawdev_conf *skeldev_conf;
55
56 SKELETON_PMD_FUNC_TRACE();
57
58 if (!dev_info) {
59 SKELETON_PMD_ERR("Invalid request");
60 return;
61 }
62
63 skeldev = skeleton_rawdev_get_priv(dev);
64
65 skeldev_conf = dev_info;
66
67 skeldev_conf->num_queues = skeldev->num_queues;
68 skeldev_conf->capabilities = skeldev->capabilities;
69 skeldev_conf->device_state = skeldev->device_state;
70 skeldev_conf->firmware_state = skeldev->fw.firmware_state;
71}
72
73static int skeleton_rawdev_configure(const struct rte_rawdev *dev,
74 rte_rawdev_obj_t config)
75{
76 struct skeleton_rawdev *skeldev;
77 struct skeleton_rawdev_conf *skeldev_conf;
78
79 SKELETON_PMD_FUNC_TRACE();
80
81 RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
82
83 if (!config) {
84 SKELETON_PMD_ERR("Invalid configuration");
85 return -EINVAL;
86 }
87
88 skeldev_conf = config;
89 skeldev = skeleton_rawdev_get_priv(dev);
90
91 if (skeldev_conf->num_queues <= SKELETON_MAX_QUEUES)
92 skeldev->num_queues = skeldev_conf->num_queues;
93 else
94 return -EINVAL;
95
96 skeldev->capabilities = skeldev_conf->capabilities;
97 skeldev->num_queues = skeldev_conf->num_queues;
98
99 return 0;
100}
101
102static int skeleton_rawdev_start(struct rte_rawdev *dev)
103{
104 int ret = 0;
105 struct skeleton_rawdev *skeldev;
106 enum skeleton_firmware_state fw_state;
107 enum skeleton_device_state device_state;
108
109 SKELETON_PMD_FUNC_TRACE();
110
111 RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
112
113 skeldev = skeleton_rawdev_get_priv(dev);
114
115 fw_state = skeldev->fw.firmware_state;
116 device_state = skeldev->device_state;
117
118 if (fw_state == SKELETON_FW_LOADED &&
119 device_state == SKELETON_DEV_STOPPED) {
120 skeldev->device_state = SKELETON_DEV_RUNNING;
121 } else {
122 SKELETON_PMD_ERR("Device not ready for starting");
123 ret = -EINVAL;
124 }
125
126 return ret;
127}
128
129static void skeleton_rawdev_stop(struct rte_rawdev *dev)
130{
131 struct skeleton_rawdev *skeldev;
132
133 SKELETON_PMD_FUNC_TRACE();
134
135 if (dev) {
136 skeldev = skeleton_rawdev_get_priv(dev);
137 skeldev->device_state = SKELETON_DEV_STOPPED;
138 }
139}
140
141static void
142reset_queues(struct skeleton_rawdev *skeldev)
143{
144 int i;
145
146 for (i = 0; i < SKELETON_MAX_QUEUES; i++) {
147 skeldev->queues[i].depth = SKELETON_QUEUE_DEF_DEPTH;
148 skeldev->queues[i].state = SKELETON_QUEUE_DETACH;
149 }
150}
151
152static void
153reset_attribute_table(struct skeleton_rawdev *skeldev)
154{
155 int i;
156
157 for (i = 0; i < SKELETON_MAX_ATTRIBUTES; i++) {
158 if (skeldev->attr[i].name) {
159 free(skeldev->attr[i].name);
160 skeldev->attr[i].name = NULL;
161 }
162 }
163}
164
165static int skeleton_rawdev_close(struct rte_rawdev *dev)
166{
167 int ret = 0, i;
168 struct skeleton_rawdev *skeldev;
169 enum skeleton_firmware_state fw_state;
170 enum skeleton_device_state device_state;
171
172 SKELETON_PMD_FUNC_TRACE();
173
174 RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
175
176 skeldev = skeleton_rawdev_get_priv(dev);
177
178 fw_state = skeldev->fw.firmware_state;
179 device_state = skeldev->device_state;
180
181 reset_queues(skeldev);
182 reset_attribute_table(skeldev);
183
184 switch (fw_state) {
185 case SKELETON_FW_LOADED:
186 if (device_state == SKELETON_DEV_RUNNING) {
187 SKELETON_PMD_ERR("Cannot close running device");
188 ret = -EINVAL;
189 } else {
190 /* Probably call fw reset here */
191 skeldev->fw.firmware_state = SKELETON_FW_READY;
192 }
193 break;
194 case SKELETON_FW_READY:
195 case SKELETON_FW_ERROR:
196 default:
197 SKELETON_PMD_DEBUG("Device already in stopped state");
198 ret = -EINVAL;
199 break;
200 }
201
202 /* Clear all allocated queues */
203 for (i = 0; i < SKELETON_MAX_QUEUES; i++)
204 clear_queue_bufs(i);
205
206 return ret;
207}
208
209static int skeleton_rawdev_reset(struct rte_rawdev *dev)
210{
211 struct skeleton_rawdev *skeldev;
212
213 SKELETON_PMD_FUNC_TRACE();
214
215 RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
216
217 skeldev = skeleton_rawdev_get_priv(dev);
218
219 SKELETON_PMD_DEBUG("Resetting device");
220 skeldev->fw.firmware_state = SKELETON_FW_READY;
221
222 return 0;
223}
224
225static void skeleton_rawdev_queue_def_conf(struct rte_rawdev *dev,
226 uint16_t queue_id,
227 rte_rawdev_obj_t queue_conf)
228{
229 struct skeleton_rawdev *skeldev;
230 struct skeleton_rawdev_queue *skelq;
231
232 SKELETON_PMD_FUNC_TRACE();
233
234 if (!dev || !queue_conf)
235 return;
236
237 skeldev = skeleton_rawdev_get_priv(dev);
238 skelq = &skeldev->queues[queue_id];
239
240 if (queue_id < SKELETON_MAX_QUEUES)
241 rte_memcpy(queue_conf, skelq,
242 sizeof(struct skeleton_rawdev_queue));
243}
244
245static void
246clear_queue_bufs(int queue_id)
247{
248 int i;
249
250 /* Clear buffers for queue_id */
251 for (i = 0; i < SKELETON_QUEUE_MAX_DEPTH; i++)
252 queue_buf[queue_id].bufs[i] = NULL;
253}
254
255static int skeleton_rawdev_queue_setup(struct rte_rawdev *dev,
256 uint16_t queue_id,
257 rte_rawdev_obj_t queue_conf)
258{
259 int ret = 0;
260 struct skeleton_rawdev *skeldev;
261 struct skeleton_rawdev_queue *q;
262
263 SKELETON_PMD_FUNC_TRACE();
264
265 if (!dev || !queue_conf)
266 return -EINVAL;
267
268 skeldev = skeleton_rawdev_get_priv(dev);
269 q = &skeldev->queues[queue_id];
270
271 if (skeldev->num_queues > queue_id &&
272 q->depth < SKELETON_QUEUE_MAX_DEPTH) {
273 rte_memcpy(q, queue_conf,
274 sizeof(struct skeleton_rawdev_queue));
275 clear_queue_bufs(queue_id);
276 } else {
277 SKELETON_PMD_ERR("Invalid queue configuration");
278 ret = -EINVAL;
279 }
280
281 return ret;
282}
283
284static int skeleton_rawdev_queue_release(struct rte_rawdev *dev,
285 uint16_t queue_id)
286{
287 int ret = 0;
288 struct skeleton_rawdev *skeldev;
289
290 SKELETON_PMD_FUNC_TRACE();
291
292 RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
293
294 skeldev = skeleton_rawdev_get_priv(dev);
295
296 if (skeldev->num_queues > queue_id) {
297 skeldev->queues[queue_id].state = SKELETON_QUEUE_DETACH;
298 skeldev->queues[queue_id].depth = SKELETON_QUEUE_DEF_DEPTH;
299 clear_queue_bufs(queue_id);
300 } else {
301 SKELETON_PMD_ERR("Invalid queue configuration");
302 ret = -EINVAL;
303 }
304
305 return ret;
306}
307
308static uint16_t skeleton_rawdev_queue_count(struct rte_rawdev *dev)
309{
310 struct skeleton_rawdev *skeldev;
311
312 SKELETON_PMD_FUNC_TRACE();
313
314 RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
315
316 skeldev = skeleton_rawdev_get_priv(dev);
317 return skeldev->num_queues;
318}
319
320static int skeleton_rawdev_get_attr(struct rte_rawdev *dev,
321 const char *attr_name,
322 uint64_t *attr_value)
323{
324 int i;
325 uint8_t done = 0;
326 struct skeleton_rawdev *skeldev;
327
328 SKELETON_PMD_FUNC_TRACE();
329
330 if (!dev || !attr_name || !attr_value) {
331 SKELETON_PMD_ERR("Invalid arguments for getting attributes");
332 return -EINVAL;
333 }
334
335 skeldev = skeleton_rawdev_get_priv(dev);
336
337 for (i = 0; i < SKELETON_MAX_ATTRIBUTES; i++) {
338 if (!skeldev->attr[i].name)
339 continue;
340
341 if (!strncmp(skeldev->attr[i].name, attr_name,
342 SKELETON_ATTRIBUTE_NAME_MAX)) {
343 *attr_value = skeldev->attr[i].value;
344 done = 1;
345 SKELETON_PMD_DEBUG("Attribute (%s) Value (%" PRIu64 ")",
346 attr_name, *attr_value);
347 break;
348 }
349 }
350
351 if (done)
352 return 0;
353
354 /* Attribute not found */
355 return -EINVAL;
356}
357
358static int skeleton_rawdev_set_attr(struct rte_rawdev *dev,
359 const char *attr_name,
360 const uint64_t attr_value)
361{
362 int i;
363 uint8_t done = 0;
364 struct skeleton_rawdev *skeldev;
365
366 SKELETON_PMD_FUNC_TRACE();
367
368 if (!dev || !attr_name) {
369 SKELETON_PMD_ERR("Invalid arguments for setting attributes");
370 return -EINVAL;
371 }
372
373 skeldev = skeleton_rawdev_get_priv(dev);
374
375 /* Check if attribute already exists */
376 for (i = 0; i < SKELETON_MAX_ATTRIBUTES; i++) {
377 if (!skeldev->attr[i].name)
378 break;
379
380 if (!strncmp(skeldev->attr[i].name, attr_name,
381 SKELETON_ATTRIBUTE_NAME_MAX)) {
382 /* Update value */
383 skeldev->attr[i].value = attr_value;
384 done = 1;
385 break;
386 }
387 }
388
389 if (!done) {
390 if (i < (SKELETON_MAX_ATTRIBUTES - 1)) {
391 /* There is still space to insert one more */
392 skeldev->attr[i].name = strdup(attr_name);
393 if (!skeldev->attr[i].name)
394 return -ENOMEM;
395
396 skeldev->attr[i].value = attr_value;
397 return 0;
398 }
399 }
400
401 return -EINVAL;
402}
403
404static int skeleton_rawdev_enqueue_bufs(struct rte_rawdev *dev,
405 struct rte_rawdev_buf **buffers,
406 unsigned int count,
407 rte_rawdev_obj_t context)
408{
409 unsigned int i;
410 uint16_t q_id;
411 RTE_SET_USED(dev);
412
413 /* context is essentially the queue_id which is
414 * transferred as opaque object through the library layer. This can
415 * help in complex implementation which require more information than
416 * just an integer - for example, a queue-pair.
417 */
418 q_id = *((int *)context);
419
420 for (i = 0; i < count; i++)
421 queue_buf[q_id].bufs[i] = buffers[i]->buf_addr;
422
423 return i;
424}
425
426static int skeleton_rawdev_dequeue_bufs(struct rte_rawdev *dev,
427 struct rte_rawdev_buf **buffers,
428 unsigned int count,
429 rte_rawdev_obj_t context)
430{
431 unsigned int i;
432 uint16_t q_id;
433 RTE_SET_USED(dev);
434
435 /* context is essentially the queue_id which is
436 * transferred as opaque object through the library layer. This can
437 * help in complex implementation which require more information than
438 * just an integer - for example, a queue-pair.
439 */
440 q_id = *((int *)context);
441
442 for (i = 0; i < count; i++)
443 buffers[i]->buf_addr = queue_buf[q_id].bufs[i];
444
445 return i;
446}
447
448static int skeleton_rawdev_dump(struct rte_rawdev *dev, FILE *f)
449{
450 RTE_SET_USED(dev);
451 RTE_SET_USED(f);
452
453 return 0;
454}
455
456static int skeleton_rawdev_firmware_status_get(struct rte_rawdev *dev,
457 rte_rawdev_obj_t status_info)
458{
459 struct skeleton_rawdev *skeldev;
460
461 SKELETON_PMD_FUNC_TRACE();
462
463 skeldev = skeleton_rawdev_get_priv(dev);
464
465 RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
466
467 if (status_info)
468 memcpy(status_info, &skeldev->fw.firmware_state,
469 sizeof(enum skeleton_firmware_state));
470
471 return 0;
472}
473
474
475static int skeleton_rawdev_firmware_version_get(
476 struct rte_rawdev *dev,
477 rte_rawdev_obj_t version_info)
478{
479 struct skeleton_rawdev *skeldev;
480 struct skeleton_firmware_version_info *vi;
481
482 SKELETON_PMD_FUNC_TRACE();
483
484 skeldev = skeleton_rawdev_get_priv(dev);
485 vi = version_info;
486
487 vi->major = skeldev->fw.firmware_version.major;
488 vi->minor = skeldev->fw.firmware_version.minor;
489 vi->subrel = skeldev->fw.firmware_version.subrel;
490
491 return 0;
492}
493
494static int skeleton_rawdev_firmware_load(struct rte_rawdev *dev,
495 rte_rawdev_obj_t firmware_buf)
496{
497 struct skeleton_rawdev *skeldev;
498
499 SKELETON_PMD_FUNC_TRACE();
500
501 skeldev = skeleton_rawdev_get_priv(dev);
502
503 /* firmware_buf is a mmaped, possibly DMA'able area, buffer. Being
504 * dummy, all this does is check if firmware_buf is not NULL and
505 * sets the state of the firmware.
506 */
507 if (!firmware_buf)
508 return -EINVAL;
509
510 skeldev->fw.firmware_state = SKELETON_FW_LOADED;
511
512 return 0;
513}
514
515static int skeleton_rawdev_firmware_unload(struct rte_rawdev *dev)
516{
517 struct skeleton_rawdev *skeldev;
518
519 SKELETON_PMD_FUNC_TRACE();
520
521 skeldev = skeleton_rawdev_get_priv(dev);
522
523 skeldev->fw.firmware_state = SKELETON_FW_READY;
524
525 return 0;
526}
527
528static const struct rte_rawdev_ops skeleton_rawdev_ops = {
529 .dev_info_get = skeleton_rawdev_info_get,
530 .dev_configure = skeleton_rawdev_configure,
531 .dev_start = skeleton_rawdev_start,
532 .dev_stop = skeleton_rawdev_stop,
533 .dev_close = skeleton_rawdev_close,
534 .dev_reset = skeleton_rawdev_reset,
535
536 .queue_def_conf = skeleton_rawdev_queue_def_conf,
537 .queue_setup = skeleton_rawdev_queue_setup,
538 .queue_release = skeleton_rawdev_queue_release,
539 .queue_count = skeleton_rawdev_queue_count,
540
541 .attr_get = skeleton_rawdev_get_attr,
542 .attr_set = skeleton_rawdev_set_attr,
543
544 .enqueue_bufs = skeleton_rawdev_enqueue_bufs,
545 .dequeue_bufs = skeleton_rawdev_dequeue_bufs,
546
547 .dump = skeleton_rawdev_dump,
548
549 .xstats_get = NULL,
550 .xstats_get_names = NULL,
551 .xstats_get_by_name = NULL,
552 .xstats_reset = NULL,
553
554 .firmware_status_get = skeleton_rawdev_firmware_status_get,
555 .firmware_version_get = skeleton_rawdev_firmware_version_get,
556 .firmware_load = skeleton_rawdev_firmware_load,
557 .firmware_unload = skeleton_rawdev_firmware_unload,
558
559 .dev_selftest = test_rawdev_skeldev,
560};
561
562static int
563skeleton_rawdev_create(const char *name,
564 struct rte_vdev_device *vdev,
565 int socket_id)
566{
567 int ret = 0, i;
568 struct rte_rawdev *rawdev = NULL;
569 struct skeleton_rawdev *skeldev = NULL;
570
571 if (!name) {
572 SKELETON_PMD_ERR("Invalid name of the device!");
573 ret = -EINVAL;
574 goto cleanup;
575 }
576
577 /* Allocate device structure */
578 rawdev = rte_rawdev_pmd_allocate(name, sizeof(struct skeleton_rawdev),
579 socket_id);
580 if (rawdev == NULL) {
581 SKELETON_PMD_ERR("Unable to allocate rawdevice");
582 ret = -EINVAL;
583 goto cleanup;
584 }
585
586 rawdev->dev_ops = &skeleton_rawdev_ops;
587 rawdev->device = &vdev->device;
11fdf7f2
TL
588
589 skeldev = skeleton_rawdev_get_priv(rawdev);
590
591 skeldev->device_id = SKELETON_DEVICE_ID;
592 skeldev->vendor_id = SKELETON_VENDOR_ID;
593 skeldev->capabilities = SKELETON_DEFAULT_CAPA;
594
595 memset(&skeldev->fw, 0, sizeof(struct skeleton_firmware));
596
597 skeldev->fw.firmware_state = SKELETON_FW_READY;
598 skeldev->fw.firmware_version.major = SKELETON_MAJOR_VER;
599 skeldev->fw.firmware_version.minor = SKELETON_MINOR_VER;
600 skeldev->fw.firmware_version.subrel = SKELETON_SUB_VER;
601
602 skeldev->device_state = SKELETON_DEV_STOPPED;
603
604 /* Reset/set to default queue configuration for this device */
605 for (i = 0; i < SKELETON_MAX_QUEUES; i++) {
606 skeldev->queues[i].state = SKELETON_QUEUE_DETACH;
607 skeldev->queues[i].depth = SKELETON_QUEUE_DEF_DEPTH;
608 }
609
610 /* Clear all allocated queue buffers */
611 for (i = 0; i < SKELETON_MAX_QUEUES; i++)
612 clear_queue_bufs(i);
613
614 return ret;
615
616cleanup:
617 if (rawdev)
618 rte_rawdev_pmd_release(rawdev);
619
620 return ret;
621}
622
623static int
624skeleton_rawdev_destroy(const char *name)
625{
626 int ret;
627 struct rte_rawdev *rdev;
628
629 if (!name) {
630 SKELETON_PMD_ERR("Invalid device name");
631 return -EINVAL;
632 }
633
634 rdev = rte_rawdev_pmd_get_named_dev(name);
635 if (!rdev) {
636 SKELETON_PMD_ERR("Invalid device name (%s)", name);
637 return -EINVAL;
638 }
639
640 /* rte_rawdev_close is called by pmd_release */
641 ret = rte_rawdev_pmd_release(rdev);
642 if (ret)
643 SKELETON_PMD_DEBUG("Device cleanup failed");
644
645 return 0;
646}
647
648static int
649skeldev_get_selftest(const char *key __rte_unused,
650 const char *value,
651 void *opaque)
652{
653 int *flag = opaque;
654 *flag = atoi(value);
655 return 0;
656}
657
658static int
659skeldev_parse_vdev_args(struct rte_vdev_device *vdev)
660{
661 int selftest = 0;
662 const char *name;
663 const char *params;
664
665 static const char *const args[] = {
666 SKELETON_SELFTEST_ARG,
667 NULL
668 };
669
670 name = rte_vdev_device_name(vdev);
671
672 params = rte_vdev_device_args(vdev);
673 if (params != NULL && params[0] != '\0') {
674 struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
675
676 if (!kvlist) {
677 SKELETON_PMD_INFO(
678 "Ignoring unsupported params supplied '%s'",
679 name);
680 } else {
681 int ret = rte_kvargs_process(kvlist,
682 SKELETON_SELFTEST_ARG,
683 skeldev_get_selftest, &selftest);
684 if (ret != 0 || (selftest < 0 || selftest > 1)) {
685 SKELETON_PMD_ERR("%s: Error in parsing args",
686 name);
687 rte_kvargs_free(kvlist);
688 ret = -1; /* enforce if selftest is invalid */
689 return ret;
690 }
691 }
692
693 rte_kvargs_free(kvlist);
694 }
695
696 return selftest;
697}
698
699static int
700skeleton_rawdev_probe(struct rte_vdev_device *vdev)
701{
702 const char *name;
703 int selftest = 0, ret = 0;
704
705
706 name = rte_vdev_device_name(vdev);
9f95a23c
TL
707 if (name == NULL)
708 return -EINVAL;
709
11fdf7f2
TL
710 /* More than one instance is not supported */
711 if (skeldev_init_once) {
712 SKELETON_PMD_ERR("Multiple instance not supported for %s",
713 name);
714 return -EINVAL;
715 }
716
717 SKELETON_PMD_INFO("Init %s on NUMA node %d", name, rte_socket_id());
718
719 selftest = skeldev_parse_vdev_args(vdev);
720 /* In case of invalid argument, selftest != 1; ignore other values */
721
722 ret = skeleton_rawdev_create(name, vdev, rte_socket_id());
723 if (!ret) {
724 /* In case command line argument for 'selftest' was passed;
725 * if invalid arguments were passed, execution continues but
726 * without selftest.
727 */
728 if (selftest == 1)
729 test_rawdev_skeldev();
730 }
731
732 /* Device instance created; Second instance not possible */
733 skeldev_init_once = 1;
734
735 return ret;
736}
737
738static int
739skeleton_rawdev_remove(struct rte_vdev_device *vdev)
740{
741 const char *name;
742 int ret;
743
744 name = rte_vdev_device_name(vdev);
9f95a23c
TL
745 if (name == NULL)
746 return -1;
11fdf7f2
TL
747
748 SKELETON_PMD_INFO("Closing %s on NUMA node %d", name, rte_socket_id());
749
750 ret = skeleton_rawdev_destroy(name);
751 if (!ret)
752 skeldev_init_once = 0;
753
754 return ret;
755}
756
757static struct rte_vdev_driver skeleton_pmd_drv = {
758 .probe = skeleton_rawdev_probe,
759 .remove = skeleton_rawdev_remove
760};
761
762RTE_PMD_REGISTER_VDEV(SKELETON_PMD_RAWDEV_NAME, skeleton_pmd_drv);
763
764RTE_INIT(skeleton_pmd_init_log)
765{
766 skeleton_pmd_logtype = rte_log_register("rawdev.skeleton");
767 if (skeleton_pmd_logtype >= 0)
768 rte_log_set_level(skeleton_pmd_logtype, RTE_LOG_INFO);
769}