]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/lib/librte_eal/freebsd/eal/eal_interrupts.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / dpdk / lib / librte_eal / freebsd / eal / eal_interrupts.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
3 */
4
5 #include <string.h>
6 #include <sys/types.h>
7 #include <sys/event.h>
8 #include <sys/queue.h>
9 #include <unistd.h>
10
11 #include <rte_errno.h>
12 #include <rte_lcore.h>
13 #include <rte_spinlock.h>
14 #include <rte_common.h>
15 #include <rte_interrupts.h>
16
17 #include "eal_private.h"
18 #include "eal_alarm_private.h"
19
20 #define MAX_INTR_EVENTS 16
21
22 /**
23 * union buffer for reading on different devices
24 */
25 union rte_intr_read_buffer {
26 char charbuf[16]; /* for others */
27 };
28
29 TAILQ_HEAD(rte_intr_cb_list, rte_intr_callback);
30 TAILQ_HEAD(rte_intr_source_list, rte_intr_source);
31
32 struct rte_intr_callback {
33 TAILQ_ENTRY(rte_intr_callback) next;
34 rte_intr_callback_fn cb_fn; /**< callback address */
35 void *cb_arg; /**< parameter for callback */
36 uint8_t pending_delete; /**< delete after callback is called */
37 rte_intr_unregister_callback_fn ucb_fn; /**< fn to call before cb is deleted */
38 };
39
40 struct rte_intr_source {
41 TAILQ_ENTRY(rte_intr_source) next;
42 struct rte_intr_handle intr_handle; /**< interrupt handle */
43 struct rte_intr_cb_list callbacks; /**< user callbacks */
44 uint32_t active;
45 };
46
47 /* global spinlock for interrupt data operation */
48 static rte_spinlock_t intr_lock = RTE_SPINLOCK_INITIALIZER;
49
50 /* interrupt sources list */
51 static struct rte_intr_source_list intr_sources;
52
53 /* interrupt handling thread */
54 static pthread_t intr_thread;
55
56 static volatile int kq = -1;
57
58 static int
59 intr_source_to_kevent(const struct rte_intr_handle *ih, struct kevent *ke)
60 {
61 /* alarm callbacks are special case */
62 if (ih->type == RTE_INTR_HANDLE_ALARM) {
63 uint64_t timeout_ns;
64
65 /* get soonest alarm timeout */
66 if (eal_alarm_get_timeout_ns(&timeout_ns) < 0)
67 return -1;
68
69 ke->filter = EVFILT_TIMER;
70 /* timers are one shot */
71 ke->flags |= EV_ONESHOT;
72 ke->fflags = NOTE_NSECONDS;
73 ke->data = timeout_ns;
74 } else {
75 ke->filter = EVFILT_READ;
76 }
77 ke->ident = ih->fd;
78
79 return 0;
80 }
81
82 int
83 rte_intr_callback_register(const struct rte_intr_handle *intr_handle,
84 rte_intr_callback_fn cb, void *cb_arg)
85 {
86 struct rte_intr_callback *callback = NULL;
87 struct rte_intr_source *src = NULL;
88 int ret, add_event;
89
90 /* first do parameter checking */
91 if (intr_handle == NULL || intr_handle->fd < 0 || cb == NULL) {
92 RTE_LOG(ERR, EAL,
93 "Registering with invalid input parameter\n");
94 return -EINVAL;
95 }
96 if (kq < 0) {
97 RTE_LOG(ERR, EAL, "Kqueue is not active: %d\n", kq);
98 return -ENODEV;
99 }
100
101 /* allocate a new interrupt callback entity */
102 callback = calloc(1, sizeof(*callback));
103 if (callback == NULL) {
104 RTE_LOG(ERR, EAL, "Can not allocate memory\n");
105 return -ENOMEM;
106 }
107 callback->cb_fn = cb;
108 callback->cb_arg = cb_arg;
109 callback->pending_delete = 0;
110 callback->ucb_fn = NULL;
111
112 rte_spinlock_lock(&intr_lock);
113
114 /* check if there is at least one callback registered for the fd */
115 TAILQ_FOREACH(src, &intr_sources, next) {
116 if (src->intr_handle.fd == intr_handle->fd) {
117 /* we had no interrupts for this */
118 if (TAILQ_EMPTY(&src->callbacks))
119 add_event = 1;
120
121 TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
122 ret = 0;
123 break;
124 }
125 }
126
127 /* no existing callbacks for this - add new source */
128 if (src == NULL) {
129 src = calloc(1, sizeof(*src));
130 if (src == NULL) {
131 RTE_LOG(ERR, EAL, "Can not allocate memory\n");
132 ret = -ENOMEM;
133 goto fail;
134 } else {
135 src->intr_handle = *intr_handle;
136 TAILQ_INIT(&src->callbacks);
137 TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
138 TAILQ_INSERT_TAIL(&intr_sources, src, next);
139 add_event = 1;
140 ret = 0;
141 }
142 }
143
144 /* add events to the queue. timer events are special as we need to
145 * re-set the timer.
146 */
147 if (add_event || src->intr_handle.type == RTE_INTR_HANDLE_ALARM) {
148 struct kevent ke;
149
150 memset(&ke, 0, sizeof(ke));
151 ke.flags = EV_ADD; /* mark for addition to the queue */
152
153 if (intr_source_to_kevent(intr_handle, &ke) < 0) {
154 RTE_LOG(ERR, EAL, "Cannot convert interrupt handle to kevent\n");
155 ret = -ENODEV;
156 goto fail;
157 }
158
159 /**
160 * add the intr file descriptor into wait list.
161 */
162 if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
163 /* currently, nic_uio does not support interrupts, so
164 * this error will always be triggered and output to the
165 * user. so, don't output it unless debug log level set.
166 */
167 if (errno == ENODEV)
168 RTE_LOG(DEBUG, EAL, "Interrupt handle %d not supported\n",
169 src->intr_handle.fd);
170 else
171 RTE_LOG(ERR, EAL, "Error adding fd %d "
172 "kevent, %s\n",
173 src->intr_handle.fd,
174 strerror(errno));
175 ret = -errno;
176 goto fail;
177 }
178 }
179 rte_spinlock_unlock(&intr_lock);
180
181 return ret;
182 fail:
183 /* clean up */
184 if (src != NULL) {
185 TAILQ_REMOVE(&(src->callbacks), callback, next);
186 if (TAILQ_EMPTY(&(src->callbacks))) {
187 TAILQ_REMOVE(&intr_sources, src, next);
188 free(src);
189 }
190 }
191 free(callback);
192 rte_spinlock_unlock(&intr_lock);
193 return ret;
194 }
195
196 int __rte_experimental
197 rte_intr_callback_unregister_pending(const struct rte_intr_handle *intr_handle,
198 rte_intr_callback_fn cb_fn, void *cb_arg,
199 rte_intr_unregister_callback_fn ucb_fn)
200 {
201 int ret;
202 struct rte_intr_source *src;
203 struct rte_intr_callback *cb, *next;
204
205 /* do parameter checking first */
206 if (intr_handle == NULL || intr_handle->fd < 0) {
207 RTE_LOG(ERR, EAL,
208 "Unregistering with invalid input parameter\n");
209 return -EINVAL;
210 }
211
212 if (kq < 0) {
213 RTE_LOG(ERR, EAL, "Kqueue is not active\n");
214 return -ENODEV;
215 }
216
217 rte_spinlock_lock(&intr_lock);
218
219 /* check if the insterrupt source for the fd is existent */
220 TAILQ_FOREACH(src, &intr_sources, next)
221 if (src->intr_handle.fd == intr_handle->fd)
222 break;
223
224 /* No interrupt source registered for the fd */
225 if (src == NULL) {
226 ret = -ENOENT;
227
228 /* only usable if the source is active */
229 } else if (src->active == 0) {
230 ret = -EAGAIN;
231
232 } else {
233 ret = 0;
234
235 /* walk through the callbacks and mark all that match. */
236 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
237 next = TAILQ_NEXT(cb, next);
238 if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
239 cb->cb_arg == cb_arg)) {
240 cb->pending_delete = 1;
241 cb->ucb_fn = ucb_fn;
242 ret++;
243 }
244 }
245 }
246
247 rte_spinlock_unlock(&intr_lock);
248
249 return ret;
250 }
251
252 int
253 rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle,
254 rte_intr_callback_fn cb_fn, void *cb_arg)
255 {
256 int ret;
257 struct rte_intr_source *src;
258 struct rte_intr_callback *cb, *next;
259
260 /* do parameter checking first */
261 if (intr_handle == NULL || intr_handle->fd < 0) {
262 RTE_LOG(ERR, EAL,
263 "Unregistering with invalid input parameter\n");
264 return -EINVAL;
265 }
266 if (kq < 0) {
267 RTE_LOG(ERR, EAL, "Kqueue is not active\n");
268 return -ENODEV;
269 }
270
271 rte_spinlock_lock(&intr_lock);
272
273 /* check if the insterrupt source for the fd is existent */
274 TAILQ_FOREACH(src, &intr_sources, next)
275 if (src->intr_handle.fd == intr_handle->fd)
276 break;
277
278 /* No interrupt source registered for the fd */
279 if (src == NULL) {
280 ret = -ENOENT;
281
282 /* interrupt source has some active callbacks right now. */
283 } else if (src->active != 0) {
284 ret = -EAGAIN;
285
286 /* ok to remove. */
287 } else {
288 struct kevent ke;
289
290 ret = 0;
291
292 /* remove it from the kqueue */
293 memset(&ke, 0, sizeof(ke));
294 ke.flags = EV_DELETE; /* mark for deletion from the queue */
295
296 if (intr_source_to_kevent(intr_handle, &ke) < 0) {
297 RTE_LOG(ERR, EAL, "Cannot convert to kevent\n");
298 ret = -ENODEV;
299 goto out;
300 }
301
302 /**
303 * remove intr file descriptor from wait list.
304 */
305 if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
306 RTE_LOG(ERR, EAL, "Error removing fd %d kevent, %s\n",
307 src->intr_handle.fd, strerror(errno));
308 /* removing non-existent even is an expected condition
309 * in some circumstances (e.g. oneshot events).
310 */
311 }
312
313 /*walk through the callbacks and remove all that match. */
314 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
315 next = TAILQ_NEXT(cb, next);
316 if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
317 cb->cb_arg == cb_arg)) {
318 TAILQ_REMOVE(&src->callbacks, cb, next);
319 free(cb);
320 ret++;
321 }
322 }
323
324 /* all callbacks for that source are removed. */
325 if (TAILQ_EMPTY(&src->callbacks)) {
326 TAILQ_REMOVE(&intr_sources, src, next);
327 free(src);
328 }
329 }
330 out:
331 rte_spinlock_unlock(&intr_lock);
332
333 return ret;
334 }
335
336 int
337 rte_intr_enable(const struct rte_intr_handle *intr_handle)
338 {
339 if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
340 return 0;
341
342 if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
343 return -1;
344
345 switch (intr_handle->type) {
346 /* not used at this moment */
347 case RTE_INTR_HANDLE_ALARM:
348 return -1;
349 /* not used at this moment */
350 case RTE_INTR_HANDLE_DEV_EVENT:
351 return -1;
352 /* unknown handle type */
353 default:
354 RTE_LOG(ERR, EAL,
355 "Unknown handle type of fd %d\n",
356 intr_handle->fd);
357 return -1;
358 }
359
360 return 0;
361 }
362
363 int
364 rte_intr_disable(const struct rte_intr_handle *intr_handle)
365 {
366 if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
367 return 0;
368
369 if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
370 return -1;
371
372 switch (intr_handle->type) {
373 /* not used at this moment */
374 case RTE_INTR_HANDLE_ALARM:
375 return -1;
376 /* not used at this moment */
377 case RTE_INTR_HANDLE_DEV_EVENT:
378 return -1;
379 /* unknown handle type */
380 default:
381 RTE_LOG(ERR, EAL,
382 "Unknown handle type of fd %d\n",
383 intr_handle->fd);
384 return -1;
385 }
386
387 return 0;
388 }
389
390 static void
391 eal_intr_process_interrupts(struct kevent *events, int nfds)
392 {
393 struct rte_intr_callback active_cb;
394 union rte_intr_read_buffer buf;
395 struct rte_intr_callback *cb, *next;
396 struct rte_intr_source *src;
397 bool call = false;
398 int n, bytes_read;
399 struct kevent ke;
400
401 for (n = 0; n < nfds; n++) {
402 int event_fd = events[n].ident;
403
404 rte_spinlock_lock(&intr_lock);
405 TAILQ_FOREACH(src, &intr_sources, next)
406 if (src->intr_handle.fd == event_fd)
407 break;
408 if (src == NULL) {
409 rte_spinlock_unlock(&intr_lock);
410 continue;
411 }
412
413 /* mark this interrupt source as active and release the lock. */
414 src->active = 1;
415 rte_spinlock_unlock(&intr_lock);
416
417 /* set the length to be read dor different handle type */
418 switch (src->intr_handle.type) {
419 case RTE_INTR_HANDLE_ALARM:
420 bytes_read = 0;
421 call = true;
422 break;
423 case RTE_INTR_HANDLE_VDEV:
424 case RTE_INTR_HANDLE_EXT:
425 bytes_read = 0;
426 call = true;
427 break;
428 case RTE_INTR_HANDLE_DEV_EVENT:
429 bytes_read = 0;
430 call = true;
431 break;
432 default:
433 bytes_read = 1;
434 break;
435 }
436
437 if (bytes_read > 0) {
438 /**
439 * read out to clear the ready-to-be-read flag
440 * for epoll_wait.
441 */
442 bytes_read = read(event_fd, &buf, bytes_read);
443 if (bytes_read < 0) {
444 if (errno == EINTR || errno == EWOULDBLOCK)
445 continue;
446
447 RTE_LOG(ERR, EAL, "Error reading from file "
448 "descriptor %d: %s\n",
449 event_fd,
450 strerror(errno));
451 } else if (bytes_read == 0)
452 RTE_LOG(ERR, EAL, "Read nothing from file "
453 "descriptor %d\n", event_fd);
454 else
455 call = true;
456 }
457
458 /* grab a lock, again to call callbacks and update status. */
459 rte_spinlock_lock(&intr_lock);
460
461 if (call) {
462 /* Finally, call all callbacks. */
463 TAILQ_FOREACH(cb, &src->callbacks, next) {
464
465 /* make a copy and unlock. */
466 active_cb = *cb;
467 rte_spinlock_unlock(&intr_lock);
468
469 /* call the actual callback */
470 active_cb.cb_fn(active_cb.cb_arg);
471
472 /*get the lock back. */
473 rte_spinlock_lock(&intr_lock);
474 }
475 }
476
477 /* we done with that interrupt source, release it. */
478 src->active = 0;
479
480 /* check if any callback are supposed to be removed */
481 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
482 next = TAILQ_NEXT(cb, next);
483 if (cb->pending_delete) {
484 /* remove it from the kqueue */
485 memset(&ke, 0, sizeof(ke));
486 /* mark for deletion from the queue */
487 ke.flags = EV_DELETE;
488
489 if (intr_source_to_kevent(&src->intr_handle, &ke) < 0) {
490 RTE_LOG(ERR, EAL, "Cannot convert to kevent\n");
491 rte_spinlock_unlock(&intr_lock);
492 return;
493 }
494
495 /**
496 * remove intr file descriptor from wait list.
497 */
498 if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
499 RTE_LOG(ERR, EAL, "Error removing fd %d kevent, "
500 "%s\n", src->intr_handle.fd,
501 strerror(errno));
502 /* removing non-existent even is an expected
503 * condition in some circumstances
504 * (e.g. oneshot events).
505 */
506 }
507
508 TAILQ_REMOVE(&src->callbacks, cb, next);
509 if (cb->ucb_fn)
510 cb->ucb_fn(&src->intr_handle, cb->cb_arg);
511 free(cb);
512 }
513 }
514
515 /* all callbacks for that source are removed. */
516 if (TAILQ_EMPTY(&src->callbacks)) {
517 TAILQ_REMOVE(&intr_sources, src, next);
518 free(src);
519 }
520
521 rte_spinlock_unlock(&intr_lock);
522 }
523 }
524
525 static void *
526 eal_intr_thread_main(void *arg __rte_unused)
527 {
528 struct kevent events[MAX_INTR_EVENTS];
529 int nfds;
530
531 /* host thread, never break out */
532 for (;;) {
533 /* do not change anything, just wait */
534 nfds = kevent(kq, NULL, 0, events, MAX_INTR_EVENTS, NULL);
535
536 /* kevent fail */
537 if (nfds < 0) {
538 if (errno == EINTR)
539 continue;
540 RTE_LOG(ERR, EAL,
541 "kevent returns with fail\n");
542 break;
543 }
544 /* kevent timeout, will never happen here */
545 else if (nfds == 0)
546 continue;
547
548 /* kevent has at least one fd ready to read */
549 eal_intr_process_interrupts(events, nfds);
550 }
551 close(kq);
552 kq = -1;
553 return NULL;
554 }
555
556 int
557 rte_eal_intr_init(void)
558 {
559 int ret = 0;
560
561 /* init the global interrupt source head */
562 TAILQ_INIT(&intr_sources);
563
564 kq = kqueue();
565 if (kq < 0) {
566 RTE_LOG(ERR, EAL, "Cannot create kqueue instance\n");
567 return -1;
568 }
569
570 /* create the host thread to wait/handle the interrupt */
571 ret = rte_ctrl_thread_create(&intr_thread, "eal-intr-thread", NULL,
572 eal_intr_thread_main, NULL);
573 if (ret != 0) {
574 rte_errno = -ret;
575 RTE_LOG(ERR, EAL,
576 "Failed to create thread for interrupt handling\n");
577 }
578
579 return ret;
580 }
581
582 int
583 rte_intr_rx_ctl(struct rte_intr_handle *intr_handle,
584 int epfd, int op, unsigned int vec, void *data)
585 {
586 RTE_SET_USED(intr_handle);
587 RTE_SET_USED(epfd);
588 RTE_SET_USED(op);
589 RTE_SET_USED(vec);
590 RTE_SET_USED(data);
591
592 return -ENOTSUP;
593 }
594
595 int
596 rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd)
597 {
598 RTE_SET_USED(intr_handle);
599 RTE_SET_USED(nb_efd);
600
601 return 0;
602 }
603
604 void
605 rte_intr_efd_disable(struct rte_intr_handle *intr_handle)
606 {
607 RTE_SET_USED(intr_handle);
608 }
609
610 int
611 rte_intr_dp_is_en(struct rte_intr_handle *intr_handle)
612 {
613 RTE_SET_USED(intr_handle);
614 return 0;
615 }
616
617 int
618 rte_intr_allow_others(struct rte_intr_handle *intr_handle)
619 {
620 RTE_SET_USED(intr_handle);
621 return 1;
622 }
623
624 int
625 rte_intr_cap_multiple(struct rte_intr_handle *intr_handle)
626 {
627 RTE_SET_USED(intr_handle);
628 return 0;
629 }
630
631 int
632 rte_epoll_wait(int epfd, struct rte_epoll_event *events,
633 int maxevents, int timeout)
634 {
635 RTE_SET_USED(epfd);
636 RTE_SET_USED(events);
637 RTE_SET_USED(maxevents);
638 RTE_SET_USED(timeout);
639
640 return -ENOTSUP;
641 }
642
643 int
644 rte_epoll_ctl(int epfd, int op, int fd, struct rte_epoll_event *event)
645 {
646 RTE_SET_USED(epfd);
647 RTE_SET_USED(op);
648 RTE_SET_USED(fd);
649 RTE_SET_USED(event);
650
651 return -ENOTSUP;
652 }
653
654 int
655 rte_intr_tls_epfd(void)
656 {
657 return -ENOTSUP;
658 }
659
660 void
661 rte_intr_free_epoll_fd(struct rte_intr_handle *intr_handle)
662 {
663 RTE_SET_USED(intr_handle);
664 }