]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/dpdk/lib/librte_eal/linuxapp/eal/eal_interrupts.c
update download target update for octopus release
[ceph.git] / ceph / src / spdk / dpdk / lib / librte_eal / linuxapp / eal / eal_interrupts.c
CommitLineData
11fdf7f2
TL
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
7c673cae
FG
3 */
4
5#include <stdio.h>
6#include <stdint.h>
7#include <stdlib.h>
8#include <pthread.h>
9#include <sys/queue.h>
10#include <stdarg.h>
11#include <unistd.h>
12#include <string.h>
13#include <errno.h>
14#include <inttypes.h>
15#include <sys/epoll.h>
16#include <sys/signalfd.h>
17#include <sys/ioctl.h>
18#include <sys/eventfd.h>
19#include <assert.h>
11fdf7f2 20#include <stdbool.h>
7c673cae
FG
21
22#include <rte_common.h>
23#include <rte_interrupts.h>
24#include <rte_memory.h>
7c673cae
FG
25#include <rte_launch.h>
26#include <rte_eal.h>
27#include <rte_per_lcore.h>
28#include <rte_lcore.h>
29#include <rte_atomic.h>
30#include <rte_branch_prediction.h>
31#include <rte_debug.h>
32#include <rte_log.h>
7c673cae
FG
33#include <rte_errno.h>
34#include <rte_spinlock.h>
11fdf7f2 35#include <rte_pause.h>
7c673cae
FG
36
37#include "eal_private.h"
38#include "eal_vfio.h"
39#include "eal_thread.h"
40
41#define EAL_INTR_EPOLL_WAIT_FOREVER (-1)
42#define NB_OTHER_INTR 1
43
44static RTE_DEFINE_PER_LCORE(int, _epfd) = -1; /**< epoll fd per thread */
45
46/**
47 * union for pipe fds.
48 */
49union intr_pipefds{
50 struct {
51 int pipefd[2];
52 };
53 struct {
54 int readfd;
55 int writefd;
56 };
57};
58
59/**
60 * union buffer for reading on different devices
61 */
62union rte_intr_read_buffer {
63 int uio_intr_count; /* for uio device */
64#ifdef VFIO_PRESENT
65 uint64_t vfio_intr_count; /* for vfio device */
66#endif
67 uint64_t timerfd_num; /* for timerfd */
68 char charbuf[16]; /* for others */
69};
70
71TAILQ_HEAD(rte_intr_cb_list, rte_intr_callback);
72TAILQ_HEAD(rte_intr_source_list, rte_intr_source);
73
74struct rte_intr_callback {
75 TAILQ_ENTRY(rte_intr_callback) next;
76 rte_intr_callback_fn cb_fn; /**< callback address */
77 void *cb_arg; /**< parameter for callback */
78};
79
80struct rte_intr_source {
81 TAILQ_ENTRY(rte_intr_source) next;
82 struct rte_intr_handle intr_handle; /**< interrupt handle */
83 struct rte_intr_cb_list callbacks; /**< user callbacks */
84 uint32_t active;
85};
86
87/* global spinlock for interrupt data operation */
88static rte_spinlock_t intr_lock = RTE_SPINLOCK_INITIALIZER;
89
90/* union buffer for pipe read/write */
91static union intr_pipefds intr_pipe;
92
93/* interrupt sources list */
94static struct rte_intr_source_list intr_sources;
95
96/* interrupt handling thread */
97static pthread_t intr_thread;
98
99/* VFIO interrupts */
100#ifdef VFIO_PRESENT
101
102#define IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + sizeof(int))
103/* irq set buffer length for queue interrupts and LSC interrupt */
104#define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \
105 sizeof(int) * (RTE_MAX_RXTX_INTR_VEC_ID + 1))
106
107/* enable legacy (INTx) interrupts */
108static int
11fdf7f2 109vfio_enable_intx(const struct rte_intr_handle *intr_handle) {
7c673cae
FG
110 struct vfio_irq_set *irq_set;
111 char irq_set_buf[IRQ_SET_BUF_LEN];
112 int len, ret;
113 int *fd_ptr;
114
115 len = sizeof(irq_set_buf);
116
117 /* enable INTx */
118 irq_set = (struct vfio_irq_set *) irq_set_buf;
119 irq_set->argsz = len;
120 irq_set->count = 1;
121 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
122 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
123 irq_set->start = 0;
124 fd_ptr = (int *) &irq_set->data;
125 *fd_ptr = intr_handle->fd;
126
127 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
128
129 if (ret) {
130 RTE_LOG(ERR, EAL, "Error enabling INTx interrupts for fd %d\n",
131 intr_handle->fd);
132 return -1;
133 }
134
135 /* unmask INTx after enabling */
136 memset(irq_set, 0, len);
137 len = sizeof(struct vfio_irq_set);
138 irq_set->argsz = len;
139 irq_set->count = 1;
140 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK;
141 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
142 irq_set->start = 0;
143
144 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
145
146 if (ret) {
147 RTE_LOG(ERR, EAL, "Error unmasking INTx interrupts for fd %d\n",
148 intr_handle->fd);
149 return -1;
150 }
151 return 0;
152}
153
154/* disable legacy (INTx) interrupts */
155static int
11fdf7f2 156vfio_disable_intx(const struct rte_intr_handle *intr_handle) {
7c673cae
FG
157 struct vfio_irq_set *irq_set;
158 char irq_set_buf[IRQ_SET_BUF_LEN];
159 int len, ret;
160
161 len = sizeof(struct vfio_irq_set);
162
163 /* mask interrupts before disabling */
164 irq_set = (struct vfio_irq_set *) irq_set_buf;
165 irq_set->argsz = len;
166 irq_set->count = 1;
11fdf7f2 167 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK;
7c673cae
FG
168 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
169 irq_set->start = 0;
170
171 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
172
173 if (ret) {
11fdf7f2 174 RTE_LOG(ERR, EAL, "Error masking INTx interrupts for fd %d\n",
7c673cae
FG
175 intr_handle->fd);
176 return -1;
177 }
178
179 /* disable INTx*/
180 memset(irq_set, 0, len);
181 irq_set->argsz = len;
182 irq_set->count = 0;
183 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
184 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
185 irq_set->start = 0;
186
187 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
188
189 if (ret) {
190 RTE_LOG(ERR, EAL,
191 "Error disabling INTx interrupts for fd %d\n", intr_handle->fd);
192 return -1;
193 }
194 return 0;
195}
196
197/* enable MSI interrupts */
198static int
11fdf7f2 199vfio_enable_msi(const struct rte_intr_handle *intr_handle) {
7c673cae
FG
200 int len, ret;
201 char irq_set_buf[IRQ_SET_BUF_LEN];
202 struct vfio_irq_set *irq_set;
203 int *fd_ptr;
204
205 len = sizeof(irq_set_buf);
206
207 irq_set = (struct vfio_irq_set *) irq_set_buf;
208 irq_set->argsz = len;
209 irq_set->count = 1;
210 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
211 irq_set->index = VFIO_PCI_MSI_IRQ_INDEX;
212 irq_set->start = 0;
213 fd_ptr = (int *) &irq_set->data;
214 *fd_ptr = intr_handle->fd;
215
216 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
217
218 if (ret) {
219 RTE_LOG(ERR, EAL, "Error enabling MSI interrupts for fd %d\n",
220 intr_handle->fd);
221 return -1;
222 }
223 return 0;
224}
225
226/* disable MSI interrupts */
227static int
11fdf7f2 228vfio_disable_msi(const struct rte_intr_handle *intr_handle) {
7c673cae
FG
229 struct vfio_irq_set *irq_set;
230 char irq_set_buf[IRQ_SET_BUF_LEN];
231 int len, ret;
232
233 len = sizeof(struct vfio_irq_set);
234
235 irq_set = (struct vfio_irq_set *) irq_set_buf;
236 irq_set->argsz = len;
237 irq_set->count = 0;
238 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
239 irq_set->index = VFIO_PCI_MSI_IRQ_INDEX;
240 irq_set->start = 0;
241
242 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
243
244 if (ret)
245 RTE_LOG(ERR, EAL,
246 "Error disabling MSI interrupts for fd %d\n", intr_handle->fd);
247
248 return ret;
249}
250
251/* enable MSI-X interrupts */
252static int
11fdf7f2 253vfio_enable_msix(const struct rte_intr_handle *intr_handle) {
7c673cae
FG
254 int len, ret;
255 char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
256 struct vfio_irq_set *irq_set;
257 int *fd_ptr;
258
259 len = sizeof(irq_set_buf);
260
261 irq_set = (struct vfio_irq_set *) irq_set_buf;
262 irq_set->argsz = len;
11fdf7f2
TL
263 /* 0 < irq_set->count < RTE_MAX_RXTX_INTR_VEC_ID + 1 */
264 irq_set->count = intr_handle->max_intr ?
265 (intr_handle->max_intr > RTE_MAX_RXTX_INTR_VEC_ID + 1 ?
266 RTE_MAX_RXTX_INTR_VEC_ID + 1 : intr_handle->max_intr) : 1;
7c673cae
FG
267 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
268 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
269 irq_set->start = 0;
270 fd_ptr = (int *) &irq_set->data;
271 /* INTR vector offset 0 reserve for non-efds mapping */
272 fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = intr_handle->fd;
273 memcpy(&fd_ptr[RTE_INTR_VEC_RXTX_OFFSET], intr_handle->efds,
274 sizeof(*intr_handle->efds) * intr_handle->nb_efd);
275
276 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
277
278 if (ret) {
279 RTE_LOG(ERR, EAL, "Error enabling MSI-X interrupts for fd %d\n",
280 intr_handle->fd);
281 return -1;
282 }
283
284 return 0;
285}
286
287/* disable MSI-X interrupts */
288static int
11fdf7f2 289vfio_disable_msix(const struct rte_intr_handle *intr_handle) {
7c673cae
FG
290 struct vfio_irq_set *irq_set;
291 char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
292 int len, ret;
293
294 len = sizeof(struct vfio_irq_set);
295
296 irq_set = (struct vfio_irq_set *) irq_set_buf;
297 irq_set->argsz = len;
298 irq_set->count = 0;
299 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
300 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
301 irq_set->start = 0;
302
303 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
304
305 if (ret)
306 RTE_LOG(ERR, EAL,
307 "Error disabling MSI-X interrupts for fd %d\n", intr_handle->fd);
308
309 return ret;
310}
311#endif
312
313static int
11fdf7f2 314uio_intx_intr_disable(const struct rte_intr_handle *intr_handle)
7c673cae
FG
315{
316 unsigned char command_high;
317
318 /* use UIO config file descriptor for uio_pci_generic */
319 if (pread(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) {
320 RTE_LOG(ERR, EAL,
321 "Error reading interrupts status for fd %d\n",
322 intr_handle->uio_cfg_fd);
323 return -1;
324 }
325 /* disable interrupts */
326 command_high |= 0x4;
327 if (pwrite(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) {
328 RTE_LOG(ERR, EAL,
329 "Error disabling interrupts for fd %d\n",
330 intr_handle->uio_cfg_fd);
331 return -1;
332 }
333
334 return 0;
335}
336
337static int
11fdf7f2 338uio_intx_intr_enable(const struct rte_intr_handle *intr_handle)
7c673cae
FG
339{
340 unsigned char command_high;
341
342 /* use UIO config file descriptor for uio_pci_generic */
343 if (pread(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) {
344 RTE_LOG(ERR, EAL,
345 "Error reading interrupts status for fd %d\n",
346 intr_handle->uio_cfg_fd);
347 return -1;
348 }
349 /* enable interrupts */
350 command_high &= ~0x4;
351 if (pwrite(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) {
352 RTE_LOG(ERR, EAL,
353 "Error enabling interrupts for fd %d\n",
354 intr_handle->uio_cfg_fd);
355 return -1;
356 }
357
358 return 0;
359}
360
361static int
11fdf7f2 362uio_intr_disable(const struct rte_intr_handle *intr_handle)
7c673cae
FG
363{
364 const int value = 0;
365
366 if (write(intr_handle->fd, &value, sizeof(value)) < 0) {
367 RTE_LOG(ERR, EAL,
368 "Error disabling interrupts for fd %d (%s)\n",
369 intr_handle->fd, strerror(errno));
370 return -1;
371 }
372 return 0;
373}
374
375static int
11fdf7f2 376uio_intr_enable(const struct rte_intr_handle *intr_handle)
7c673cae
FG
377{
378 const int value = 1;
379
380 if (write(intr_handle->fd, &value, sizeof(value)) < 0) {
381 RTE_LOG(ERR, EAL,
382 "Error enabling interrupts for fd %d (%s)\n",
383 intr_handle->fd, strerror(errno));
384 return -1;
385 }
386 return 0;
387}
388
389int
11fdf7f2 390rte_intr_callback_register(const struct rte_intr_handle *intr_handle,
7c673cae
FG
391 rte_intr_callback_fn cb, void *cb_arg)
392{
393 int ret, wake_thread;
394 struct rte_intr_source *src;
395 struct rte_intr_callback *callback;
396
397 wake_thread = 0;
398
399 /* first do parameter checking */
400 if (intr_handle == NULL || intr_handle->fd < 0 || cb == NULL) {
401 RTE_LOG(ERR, EAL,
402 "Registering with invalid input parameter\n");
403 return -EINVAL;
404 }
405
406 /* allocate a new interrupt callback entity */
11fdf7f2 407 callback = calloc(1, sizeof(*callback));
7c673cae
FG
408 if (callback == NULL) {
409 RTE_LOG(ERR, EAL, "Can not allocate memory\n");
410 return -ENOMEM;
411 }
412 callback->cb_fn = cb;
413 callback->cb_arg = cb_arg;
414
415 rte_spinlock_lock(&intr_lock);
416
417 /* check if there is at least one callback registered for the fd */
418 TAILQ_FOREACH(src, &intr_sources, next) {
419 if (src->intr_handle.fd == intr_handle->fd) {
420 /* we had no interrupts for this */
11fdf7f2 421 if (TAILQ_EMPTY(&src->callbacks))
7c673cae
FG
422 wake_thread = 1;
423
424 TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
425 ret = 0;
426 break;
427 }
428 }
429
430 /* no existing callbacks for this - add new source */
431 if (src == NULL) {
11fdf7f2
TL
432 src = calloc(1, sizeof(*src));
433 if (src == NULL) {
7c673cae 434 RTE_LOG(ERR, EAL, "Can not allocate memory\n");
11fdf7f2 435 free(callback);
7c673cae
FG
436 ret = -ENOMEM;
437 } else {
438 src->intr_handle = *intr_handle;
439 TAILQ_INIT(&src->callbacks);
440 TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
441 TAILQ_INSERT_TAIL(&intr_sources, src, next);
442 wake_thread = 1;
443 ret = 0;
444 }
445 }
446
447 rte_spinlock_unlock(&intr_lock);
448
449 /**
450 * check if need to notify the pipe fd waited by epoll_wait to
451 * rebuild the wait list.
452 */
453 if (wake_thread)
454 if (write(intr_pipe.writefd, "1", 1) < 0)
455 return -EPIPE;
456
457 return ret;
458}
459
460int
11fdf7f2 461rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle,
7c673cae
FG
462 rte_intr_callback_fn cb_fn, void *cb_arg)
463{
464 int ret;
465 struct rte_intr_source *src;
466 struct rte_intr_callback *cb, *next;
467
468 /* do parameter checking first */
469 if (intr_handle == NULL || intr_handle->fd < 0) {
470 RTE_LOG(ERR, EAL,
471 "Unregistering with invalid input parameter\n");
472 return -EINVAL;
473 }
474
475 rte_spinlock_lock(&intr_lock);
476
477 /* check if the insterrupt source for the fd is existent */
478 TAILQ_FOREACH(src, &intr_sources, next)
479 if (src->intr_handle.fd == intr_handle->fd)
480 break;
481
482 /* No interrupt source registered for the fd */
483 if (src == NULL) {
484 ret = -ENOENT;
485
486 /* interrupt source has some active callbacks right now. */
487 } else if (src->active != 0) {
488 ret = -EAGAIN;
489
490 /* ok to remove. */
491 } else {
492 ret = 0;
493
494 /*walk through the callbacks and remove all that match. */
495 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
496
497 next = TAILQ_NEXT(cb, next);
498
499 if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
500 cb->cb_arg == cb_arg)) {
501 TAILQ_REMOVE(&src->callbacks, cb, next);
11fdf7f2 502 free(cb);
7c673cae
FG
503 ret++;
504 }
505 }
506
507 /* all callbacks for that source are removed. */
508 if (TAILQ_EMPTY(&src->callbacks)) {
509 TAILQ_REMOVE(&intr_sources, src, next);
11fdf7f2 510 free(src);
7c673cae
FG
511 }
512 }
513
514 rte_spinlock_unlock(&intr_lock);
515
516 /* notify the pipe fd waited by epoll_wait to rebuild the wait list */
517 if (ret >= 0 && write(intr_pipe.writefd, "1", 1) < 0) {
518 ret = -EPIPE;
519 }
520
521 return ret;
522}
523
524int
11fdf7f2 525rte_intr_enable(const struct rte_intr_handle *intr_handle)
7c673cae 526{
11fdf7f2
TL
527 if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
528 return 0;
529
7c673cae
FG
530 if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
531 return -1;
532
533 switch (intr_handle->type){
534 /* write to the uio fd to enable the interrupt */
535 case RTE_INTR_HANDLE_UIO:
536 if (uio_intr_enable(intr_handle))
537 return -1;
538 break;
539 case RTE_INTR_HANDLE_UIO_INTX:
540 if (uio_intx_intr_enable(intr_handle))
541 return -1;
542 break;
543 /* not used at this moment */
544 case RTE_INTR_HANDLE_ALARM:
545 return -1;
546#ifdef VFIO_PRESENT
547 case RTE_INTR_HANDLE_VFIO_MSIX:
548 if (vfio_enable_msix(intr_handle))
549 return -1;
550 break;
551 case RTE_INTR_HANDLE_VFIO_MSI:
552 if (vfio_enable_msi(intr_handle))
553 return -1;
554 break;
555 case RTE_INTR_HANDLE_VFIO_LEGACY:
556 if (vfio_enable_intx(intr_handle))
557 return -1;
558 break;
559#endif
11fdf7f2
TL
560 /* not used at this moment */
561 case RTE_INTR_HANDLE_DEV_EVENT:
562 return -1;
7c673cae
FG
563 /* unknown handle type */
564 default:
565 RTE_LOG(ERR, EAL,
566 "Unknown handle type of fd %d\n",
567 intr_handle->fd);
568 return -1;
569 }
570
571 return 0;
572}
573
574int
11fdf7f2 575rte_intr_disable(const struct rte_intr_handle *intr_handle)
7c673cae 576{
11fdf7f2
TL
577 if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
578 return 0;
579
7c673cae
FG
580 if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
581 return -1;
582
583 switch (intr_handle->type){
584 /* write to the uio fd to disable the interrupt */
585 case RTE_INTR_HANDLE_UIO:
586 if (uio_intr_disable(intr_handle))
587 return -1;
588 break;
589 case RTE_INTR_HANDLE_UIO_INTX:
590 if (uio_intx_intr_disable(intr_handle))
591 return -1;
592 break;
593 /* not used at this moment */
594 case RTE_INTR_HANDLE_ALARM:
595 return -1;
596#ifdef VFIO_PRESENT
597 case RTE_INTR_HANDLE_VFIO_MSIX:
598 if (vfio_disable_msix(intr_handle))
599 return -1;
600 break;
601 case RTE_INTR_HANDLE_VFIO_MSI:
602 if (vfio_disable_msi(intr_handle))
603 return -1;
604 break;
605 case RTE_INTR_HANDLE_VFIO_LEGACY:
606 if (vfio_disable_intx(intr_handle))
607 return -1;
608 break;
609#endif
11fdf7f2
TL
610 /* not used at this moment */
611 case RTE_INTR_HANDLE_DEV_EVENT:
612 return -1;
7c673cae
FG
613 /* unknown handle type */
614 default:
615 RTE_LOG(ERR, EAL,
616 "Unknown handle type of fd %d\n",
617 intr_handle->fd);
618 return -1;
619 }
620
621 return 0;
622}
623
624static int
625eal_intr_process_interrupts(struct epoll_event *events, int nfds)
626{
11fdf7f2 627 bool call = false;
7c673cae
FG
628 int n, bytes_read;
629 struct rte_intr_source *src;
630 struct rte_intr_callback *cb;
631 union rte_intr_read_buffer buf;
632 struct rte_intr_callback active_cb;
633
634 for (n = 0; n < nfds; n++) {
635
636 /**
637 * if the pipe fd is ready to read, return out to
638 * rebuild the wait list.
639 */
640 if (events[n].data.fd == intr_pipe.readfd){
641 int r = read(intr_pipe.readfd, buf.charbuf,
642 sizeof(buf.charbuf));
643 RTE_SET_USED(r);
644 return -1;
645 }
646 rte_spinlock_lock(&intr_lock);
647 TAILQ_FOREACH(src, &intr_sources, next)
648 if (src->intr_handle.fd ==
649 events[n].data.fd)
650 break;
651 if (src == NULL){
652 rte_spinlock_unlock(&intr_lock);
653 continue;
654 }
655
656 /* mark this interrupt source as active and release the lock. */
657 src->active = 1;
658 rte_spinlock_unlock(&intr_lock);
659
660 /* set the length to be read dor different handle type */
661 switch (src->intr_handle.type) {
662 case RTE_INTR_HANDLE_UIO:
663 case RTE_INTR_HANDLE_UIO_INTX:
664 bytes_read = sizeof(buf.uio_intr_count);
665 break;
666 case RTE_INTR_HANDLE_ALARM:
667 bytes_read = sizeof(buf.timerfd_num);
668 break;
669#ifdef VFIO_PRESENT
670 case RTE_INTR_HANDLE_VFIO_MSIX:
671 case RTE_INTR_HANDLE_VFIO_MSI:
672 case RTE_INTR_HANDLE_VFIO_LEGACY:
673 bytes_read = sizeof(buf.vfio_intr_count);
674 break;
675#endif
11fdf7f2 676 case RTE_INTR_HANDLE_VDEV:
7c673cae 677 case RTE_INTR_HANDLE_EXT:
11fdf7f2
TL
678 bytes_read = 0;
679 call = true;
680 break;
681 case RTE_INTR_HANDLE_DEV_EVENT:
682 bytes_read = 0;
683 call = true;
684 break;
7c673cae
FG
685 default:
686 bytes_read = 1;
687 break;
688 }
689
11fdf7f2 690 if (bytes_read > 0) {
7c673cae
FG
691 /**
692 * read out to clear the ready-to-be-read flag
693 * for epoll_wait.
694 */
695 bytes_read = read(events[n].data.fd, &buf, bytes_read);
696 if (bytes_read < 0) {
697 if (errno == EINTR || errno == EWOULDBLOCK)
698 continue;
699
700 RTE_LOG(ERR, EAL, "Error reading from file "
701 "descriptor %d: %s\n",
702 events[n].data.fd,
703 strerror(errno));
704 } else if (bytes_read == 0)
705 RTE_LOG(ERR, EAL, "Read nothing from file "
706 "descriptor %d\n", events[n].data.fd);
11fdf7f2
TL
707 else
708 call = true;
7c673cae
FG
709 }
710
711 /* grab a lock, again to call callbacks and update status. */
712 rte_spinlock_lock(&intr_lock);
713
11fdf7f2 714 if (call) {
7c673cae
FG
715
716 /* Finally, call all callbacks. */
717 TAILQ_FOREACH(cb, &src->callbacks, next) {
718
719 /* make a copy and unlock. */
720 active_cb = *cb;
721 rte_spinlock_unlock(&intr_lock);
722
723 /* call the actual callback */
11fdf7f2 724 active_cb.cb_fn(active_cb.cb_arg);
7c673cae
FG
725
726 /*get the lock back. */
727 rte_spinlock_lock(&intr_lock);
728 }
729 }
730
731 /* we done with that interrupt source, release it. */
732 src->active = 0;
733 rte_spinlock_unlock(&intr_lock);
734 }
735
736 return 0;
737}
738
739/**
740 * It handles all the interrupts.
741 *
742 * @param pfd
743 * epoll file descriptor.
744 * @param totalfds
745 * The number of file descriptors added in epoll.
746 *
747 * @return
748 * void
749 */
750static void
751eal_intr_handle_interrupts(int pfd, unsigned totalfds)
752{
753 struct epoll_event events[totalfds];
754 int nfds = 0;
755
756 for(;;) {
757 nfds = epoll_wait(pfd, events, totalfds,
758 EAL_INTR_EPOLL_WAIT_FOREVER);
759 /* epoll_wait fail */
760 if (nfds < 0) {
761 if (errno == EINTR)
762 continue;
763 RTE_LOG(ERR, EAL,
764 "epoll_wait returns with fail\n");
765 return;
766 }
767 /* epoll_wait timeout, will never happens here */
768 else if (nfds == 0)
769 continue;
770 /* epoll_wait has at least one fd ready to read */
771 if (eal_intr_process_interrupts(events, nfds) < 0)
772 return;
773 }
774}
775
776/**
777 * It builds/rebuilds up the epoll file descriptor with all the
778 * file descriptors being waited on. Then handles the interrupts.
779 *
780 * @param arg
781 * pointer. (unused)
782 *
783 * @return
784 * never return;
785 */
786static __attribute__((noreturn)) void *
787eal_intr_thread_main(__rte_unused void *arg)
788{
789 struct epoll_event ev;
790
791 /* host thread, never break out */
792 for (;;) {
793 /* build up the epoll fd with all descriptors we are to
794 * wait on then pass it to the handle_interrupts function
795 */
796 static struct epoll_event pipe_event = {
797 .events = EPOLLIN | EPOLLPRI,
798 };
799 struct rte_intr_source *src;
800 unsigned numfds = 0;
801
802 /* create epoll fd */
803 int pfd = epoll_create(1);
804 if (pfd < 0)
805 rte_panic("Cannot create epoll instance\n");
806
807 pipe_event.data.fd = intr_pipe.readfd;
808 /**
809 * add pipe fd into wait list, this pipe is used to
810 * rebuild the wait list.
811 */
812 if (epoll_ctl(pfd, EPOLL_CTL_ADD, intr_pipe.readfd,
813 &pipe_event) < 0) {
814 rte_panic("Error adding fd to %d epoll_ctl, %s\n",
815 intr_pipe.readfd, strerror(errno));
816 }
817 numfds++;
818
819 rte_spinlock_lock(&intr_lock);
820
821 TAILQ_FOREACH(src, &intr_sources, next) {
822 if (src->callbacks.tqh_first == NULL)
823 continue; /* skip those with no callbacks */
11fdf7f2 824 ev.events = EPOLLIN | EPOLLPRI | EPOLLRDHUP | EPOLLHUP;
7c673cae
FG
825 ev.data.fd = src->intr_handle.fd;
826
827 /**
828 * add all the uio device file descriptor
829 * into wait list.
830 */
831 if (epoll_ctl(pfd, EPOLL_CTL_ADD,
832 src->intr_handle.fd, &ev) < 0){
833 rte_panic("Error adding fd %d epoll_ctl, %s\n",
834 src->intr_handle.fd, strerror(errno));
835 }
836 else
837 numfds++;
838 }
839 rte_spinlock_unlock(&intr_lock);
840 /* serve the interrupt */
841 eal_intr_handle_interrupts(pfd, numfds);
842
843 /**
844 * when we return, we need to rebuild the
845 * list of fds to monitor.
846 */
847 close(pfd);
848 }
849}
850
851int
852rte_eal_intr_init(void)
853{
11fdf7f2 854 int ret = 0;
7c673cae
FG
855
856 /* init the global interrupt source head */
857 TAILQ_INIT(&intr_sources);
858
859 /**
860 * create a pipe which will be waited by epoll and notified to
861 * rebuild the wait list of epoll.
862 */
11fdf7f2
TL
863 if (pipe(intr_pipe.pipefd) < 0) {
864 rte_errno = errno;
7c673cae 865 return -1;
11fdf7f2 866 }
7c673cae
FG
867
868 /* create the host thread to wait/handle the interrupt */
11fdf7f2 869 ret = rte_ctrl_thread_create(&intr_thread, "eal-intr-thread", NULL,
7c673cae
FG
870 eal_intr_thread_main, NULL);
871 if (ret != 0) {
11fdf7f2 872 rte_errno = -ret;
7c673cae
FG
873 RTE_LOG(ERR, EAL,
874 "Failed to create thread for interrupt handling\n");
7c673cae
FG
875 }
876
11fdf7f2 877 return ret;
7c673cae
FG
878}
879
880static void
881eal_intr_proc_rxtx_intr(int fd, const struct rte_intr_handle *intr_handle)
882{
883 union rte_intr_read_buffer buf;
11fdf7f2 884 int bytes_read = 0;
7c673cae
FG
885 int nbytes;
886
887 switch (intr_handle->type) {
888 case RTE_INTR_HANDLE_UIO:
889 case RTE_INTR_HANDLE_UIO_INTX:
890 bytes_read = sizeof(buf.uio_intr_count);
891 break;
892#ifdef VFIO_PRESENT
893 case RTE_INTR_HANDLE_VFIO_MSIX:
894 case RTE_INTR_HANDLE_VFIO_MSI:
895 case RTE_INTR_HANDLE_VFIO_LEGACY:
896 bytes_read = sizeof(buf.vfio_intr_count);
897 break;
898#endif
11fdf7f2
TL
899 case RTE_INTR_HANDLE_VDEV:
900 bytes_read = intr_handle->efd_counter_size;
901 /* For vdev, number of bytes to read is set by driver */
902 break;
903 case RTE_INTR_HANDLE_EXT:
904 return;
7c673cae
FG
905 default:
906 bytes_read = 1;
907 RTE_LOG(INFO, EAL, "unexpected intr type\n");
908 break;
909 }
910
911 /**
912 * read out to clear the ready-to-be-read flag
913 * for epoll_wait.
914 */
11fdf7f2
TL
915 if (bytes_read == 0)
916 return;
7c673cae
FG
917 do {
918 nbytes = read(fd, &buf, bytes_read);
919 if (nbytes < 0) {
920 if (errno == EINTR || errno == EWOULDBLOCK ||
921 errno == EAGAIN)
922 continue;
923 RTE_LOG(ERR, EAL,
924 "Error reading from fd %d: %s\n",
925 fd, strerror(errno));
926 } else if (nbytes == 0)
927 RTE_LOG(ERR, EAL, "Read nothing from fd %d\n", fd);
928 return;
929 } while (1);
930}
931
932static int
933eal_epoll_process_event(struct epoll_event *evs, unsigned int n,
934 struct rte_epoll_event *events)
935{
936 unsigned int i, count = 0;
937 struct rte_epoll_event *rev;
938
939 for (i = 0; i < n; i++) {
940 rev = evs[i].data.ptr;
941 if (!rev || !rte_atomic32_cmpset(&rev->status, RTE_EPOLL_VALID,
942 RTE_EPOLL_EXEC))
943 continue;
944
945 events[count].status = RTE_EPOLL_VALID;
946 events[count].fd = rev->fd;
947 events[count].epfd = rev->epfd;
948 events[count].epdata.event = rev->epdata.event;
949 events[count].epdata.data = rev->epdata.data;
950 if (rev->epdata.cb_fun)
951 rev->epdata.cb_fun(rev->fd,
952 rev->epdata.cb_arg);
953
954 rte_compiler_barrier();
955 rev->status = RTE_EPOLL_VALID;
956 count++;
957 }
958 return count;
959}
960
961static inline int
962eal_init_tls_epfd(void)
963{
964 int pfd = epoll_create(255);
965
966 if (pfd < 0) {
967 RTE_LOG(ERR, EAL,
968 "Cannot create epoll instance\n");
969 return -1;
970 }
971 return pfd;
972}
973
974int
975rte_intr_tls_epfd(void)
976{
977 if (RTE_PER_LCORE(_epfd) == -1)
978 RTE_PER_LCORE(_epfd) = eal_init_tls_epfd();
979
980 return RTE_PER_LCORE(_epfd);
981}
982
983int
984rte_epoll_wait(int epfd, struct rte_epoll_event *events,
985 int maxevents, int timeout)
986{
987 struct epoll_event evs[maxevents];
988 int rc;
989
990 if (!events) {
991 RTE_LOG(ERR, EAL, "rte_epoll_event can't be NULL\n");
992 return -1;
993 }
994
995 /* using per thread epoll fd */
996 if (epfd == RTE_EPOLL_PER_THREAD)
997 epfd = rte_intr_tls_epfd();
998
999 while (1) {
1000 rc = epoll_wait(epfd, evs, maxevents, timeout);
1001 if (likely(rc > 0)) {
1002 /* epoll_wait has at least one fd ready to read */
1003 rc = eal_epoll_process_event(evs, rc, events);
1004 break;
1005 } else if (rc < 0) {
1006 if (errno == EINTR)
1007 continue;
1008 /* epoll_wait fail */
1009 RTE_LOG(ERR, EAL, "epoll_wait returns with fail %s\n",
1010 strerror(errno));
1011 rc = -1;
1012 break;
1013 } else {
1014 /* rc == 0, epoll_wait timed out */
1015 break;
1016 }
1017 }
1018
1019 return rc;
1020}
1021
1022static inline void
1023eal_epoll_data_safe_free(struct rte_epoll_event *ev)
1024{
1025 while (!rte_atomic32_cmpset(&ev->status, RTE_EPOLL_VALID,
1026 RTE_EPOLL_INVALID))
1027 while (ev->status != RTE_EPOLL_VALID)
1028 rte_pause();
1029 memset(&ev->epdata, 0, sizeof(ev->epdata));
1030 ev->fd = -1;
1031 ev->epfd = -1;
1032}
1033
1034int
1035rte_epoll_ctl(int epfd, int op, int fd,
1036 struct rte_epoll_event *event)
1037{
1038 struct epoll_event ev;
1039
1040 if (!event) {
1041 RTE_LOG(ERR, EAL, "rte_epoll_event can't be NULL\n");
1042 return -1;
1043 }
1044
1045 /* using per thread epoll fd */
1046 if (epfd == RTE_EPOLL_PER_THREAD)
1047 epfd = rte_intr_tls_epfd();
1048
1049 if (op == EPOLL_CTL_ADD) {
1050 event->status = RTE_EPOLL_VALID;
1051 event->fd = fd; /* ignore fd in event */
1052 event->epfd = epfd;
1053 ev.data.ptr = (void *)event;
1054 }
1055
1056 ev.events = event->epdata.event;
1057 if (epoll_ctl(epfd, op, fd, &ev) < 0) {
1058 RTE_LOG(ERR, EAL, "Error op %d fd %d epoll_ctl, %s\n",
1059 op, fd, strerror(errno));
1060 if (op == EPOLL_CTL_ADD)
1061 /* rollback status when CTL_ADD fail */
1062 event->status = RTE_EPOLL_INVALID;
1063 return -1;
1064 }
1065
1066 if (op == EPOLL_CTL_DEL && event->status != RTE_EPOLL_INVALID)
1067 eal_epoll_data_safe_free(event);
1068
1069 return 0;
1070}
1071
1072int
1073rte_intr_rx_ctl(struct rte_intr_handle *intr_handle, int epfd,
1074 int op, unsigned int vec, void *data)
1075{
1076 struct rte_epoll_event *rev;
1077 struct rte_epoll_data *epdata;
1078 int epfd_op;
1079 unsigned int efd_idx;
1080 int rc = 0;
1081
1082 efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
1083 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
1084
1085 if (!intr_handle || intr_handle->nb_efd == 0 ||
1086 efd_idx >= intr_handle->nb_efd) {
1087 RTE_LOG(ERR, EAL, "Wrong intr vector number.\n");
1088 return -EPERM;
1089 }
1090
1091 switch (op) {
1092 case RTE_INTR_EVENT_ADD:
1093 epfd_op = EPOLL_CTL_ADD;
1094 rev = &intr_handle->elist[efd_idx];
1095 if (rev->status != RTE_EPOLL_INVALID) {
1096 RTE_LOG(INFO, EAL, "Event already been added.\n");
1097 return -EEXIST;
1098 }
1099
1100 /* attach to intr vector fd */
1101 epdata = &rev->epdata;
1102 epdata->event = EPOLLIN | EPOLLPRI | EPOLLET;
1103 epdata->data = data;
1104 epdata->cb_fun = (rte_intr_event_cb_t)eal_intr_proc_rxtx_intr;
1105 epdata->cb_arg = (void *)intr_handle;
1106 rc = rte_epoll_ctl(epfd, epfd_op,
1107 intr_handle->efds[efd_idx], rev);
1108 if (!rc)
1109 RTE_LOG(DEBUG, EAL,
1110 "efd %d associated with vec %d added on epfd %d"
1111 "\n", rev->fd, vec, epfd);
1112 else
1113 rc = -EPERM;
1114 break;
1115 case RTE_INTR_EVENT_DEL:
1116 epfd_op = EPOLL_CTL_DEL;
1117 rev = &intr_handle->elist[efd_idx];
1118 if (rev->status == RTE_EPOLL_INVALID) {
1119 RTE_LOG(INFO, EAL, "Event does not exist.\n");
1120 return -EPERM;
1121 }
1122
1123 rc = rte_epoll_ctl(rev->epfd, epfd_op, rev->fd, rev);
1124 if (rc)
1125 rc = -EPERM;
1126 break;
1127 default:
1128 RTE_LOG(ERR, EAL, "event op type mismatch\n");
1129 rc = -EPERM;
1130 }
1131
1132 return rc;
1133}
1134
11fdf7f2
TL
1135void
1136rte_intr_free_epoll_fd(struct rte_intr_handle *intr_handle)
1137{
1138 uint32_t i;
1139 struct rte_epoll_event *rev;
1140
1141 for (i = 0; i < intr_handle->nb_efd; i++) {
1142 rev = &intr_handle->elist[i];
1143 if (rev->status == RTE_EPOLL_INVALID)
1144 continue;
1145 if (rte_epoll_ctl(rev->epfd, EPOLL_CTL_DEL, rev->fd, rev)) {
1146 /* force free if the entry valid */
1147 eal_epoll_data_safe_free(rev);
1148 rev->status = RTE_EPOLL_INVALID;
1149 }
1150 }
1151}
1152
7c673cae
FG
1153int
1154rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd)
1155{
1156 uint32_t i;
1157 int fd;
1158 uint32_t n = RTE_MIN(nb_efd, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
1159
1160 assert(nb_efd != 0);
1161
1162 if (intr_handle->type == RTE_INTR_HANDLE_VFIO_MSIX) {
1163 for (i = 0; i < n; i++) {
1164 fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
1165 if (fd < 0) {
1166 RTE_LOG(ERR, EAL,
1167 "can't setup eventfd, error %i (%s)\n",
1168 errno, strerror(errno));
11fdf7f2 1169 return -errno;
7c673cae
FG
1170 }
1171 intr_handle->efds[i] = fd;
1172 }
1173 intr_handle->nb_efd = n;
1174 intr_handle->max_intr = NB_OTHER_INTR + n;
11fdf7f2
TL
1175 } else if (intr_handle->type == RTE_INTR_HANDLE_VDEV) {
1176 /* only check, initialization would be done in vdev driver.*/
1177 if (intr_handle->efd_counter_size >
1178 sizeof(union rte_intr_read_buffer)) {
1179 RTE_LOG(ERR, EAL, "the efd_counter_size is oversized");
1180 return -EINVAL;
1181 }
7c673cae
FG
1182 } else {
1183 intr_handle->efds[0] = intr_handle->fd;
1184 intr_handle->nb_efd = RTE_MIN(nb_efd, 1U);
1185 intr_handle->max_intr = NB_OTHER_INTR;
1186 }
1187
1188 return 0;
1189}
1190
1191void
1192rte_intr_efd_disable(struct rte_intr_handle *intr_handle)
1193{
1194 uint32_t i;
7c673cae 1195
11fdf7f2 1196 rte_intr_free_epoll_fd(intr_handle);
7c673cae
FG
1197 if (intr_handle->max_intr > intr_handle->nb_efd) {
1198 for (i = 0; i < intr_handle->nb_efd; i++)
1199 close(intr_handle->efds[i]);
1200 }
1201 intr_handle->nb_efd = 0;
1202 intr_handle->max_intr = 0;
1203}
1204
1205int
1206rte_intr_dp_is_en(struct rte_intr_handle *intr_handle)
1207{
1208 return !(!intr_handle->nb_efd);
1209}
1210
1211int
1212rte_intr_allow_others(struct rte_intr_handle *intr_handle)
1213{
1214 if (!rte_intr_dp_is_en(intr_handle))
1215 return 1;
1216 else
1217 return !!(intr_handle->max_intr - intr_handle->nb_efd);
1218}
1219
1220int
1221rte_intr_cap_multiple(struct rte_intr_handle *intr_handle)
1222{
1223 if (intr_handle->type == RTE_INTR_HANDLE_VFIO_MSIX)
1224 return 1;
1225
11fdf7f2
TL
1226 if (intr_handle->type == RTE_INTR_HANDLE_VDEV)
1227 return 1;
1228
7c673cae
FG
1229 return 0;
1230}