]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | /*- |
2 | * BSD LICENSE | |
3 | * | |
4 | * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. | |
5 | * All rights reserved. | |
6 | * | |
7 | * Redistribution and use in source and binary forms, with or without | |
8 | * modification, are permitted provided that the following conditions | |
9 | * are met: | |
10 | * | |
11 | * * Redistributions of source code must retain the above copyright | |
12 | * notice, this list of conditions and the following disclaimer. | |
13 | * * Redistributions in binary form must reproduce the above copyright | |
14 | * notice, this list of conditions and the following disclaimer in | |
15 | * the documentation and/or other materials provided with the | |
16 | * distribution. | |
17 | * * Neither the name of Intel Corporation nor the names of its | |
18 | * contributors may be used to endorse or promote products derived | |
19 | * from this software without specific prior written permission. | |
20 | * | |
21 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
22 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
23 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
24 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
25 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
26 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
27 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
28 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
29 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
31 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
32 | */ | |
33 | ||
34 | #include <stdio.h> | |
35 | #include <stdint.h> | |
36 | #include <stdlib.h> | |
37 | #include <pthread.h> | |
38 | #include <sys/queue.h> | |
39 | #include <stdarg.h> | |
40 | #include <unistd.h> | |
41 | #include <string.h> | |
42 | #include <errno.h> | |
43 | #include <inttypes.h> | |
44 | #include <sys/epoll.h> | |
45 | #include <sys/signalfd.h> | |
46 | #include <sys/ioctl.h> | |
47 | #include <sys/eventfd.h> | |
48 | #include <assert.h> | |
49 | ||
50 | #include <rte_common.h> | |
51 | #include <rte_interrupts.h> | |
52 | #include <rte_memory.h> | |
53 | #include <rte_memzone.h> | |
54 | #include <rte_launch.h> | |
55 | #include <rte_eal.h> | |
56 | #include <rte_per_lcore.h> | |
57 | #include <rte_lcore.h> | |
58 | #include <rte_atomic.h> | |
59 | #include <rte_branch_prediction.h> | |
60 | #include <rte_debug.h> | |
61 | #include <rte_log.h> | |
62 | #include <rte_pci.h> | |
63 | #include <rte_malloc.h> | |
64 | #include <rte_errno.h> | |
65 | #include <rte_spinlock.h> | |
66 | ||
67 | #include "eal_private.h" | |
68 | #include "eal_vfio.h" | |
69 | #include "eal_thread.h" | |
70 | ||
71 | #define EAL_INTR_EPOLL_WAIT_FOREVER (-1) | |
72 | #define NB_OTHER_INTR 1 | |
73 | ||
74 | static RTE_DEFINE_PER_LCORE(int, _epfd) = -1; /**< epoll fd per thread */ | |
75 | ||
76 | /** | |
77 | * union for pipe fds. | |
78 | */ | |
79 | union intr_pipefds{ | |
80 | struct { | |
81 | int pipefd[2]; | |
82 | }; | |
83 | struct { | |
84 | int readfd; | |
85 | int writefd; | |
86 | }; | |
87 | }; | |
88 | ||
89 | /** | |
90 | * union buffer for reading on different devices | |
91 | */ | |
92 | union rte_intr_read_buffer { | |
93 | int uio_intr_count; /* for uio device */ | |
94 | #ifdef VFIO_PRESENT | |
95 | uint64_t vfio_intr_count; /* for vfio device */ | |
96 | #endif | |
97 | uint64_t timerfd_num; /* for timerfd */ | |
98 | char charbuf[16]; /* for others */ | |
99 | }; | |
100 | ||
101 | TAILQ_HEAD(rte_intr_cb_list, rte_intr_callback); | |
102 | TAILQ_HEAD(rte_intr_source_list, rte_intr_source); | |
103 | ||
104 | struct rte_intr_callback { | |
105 | TAILQ_ENTRY(rte_intr_callback) next; | |
106 | rte_intr_callback_fn cb_fn; /**< callback address */ | |
107 | void *cb_arg; /**< parameter for callback */ | |
108 | }; | |
109 | ||
110 | struct rte_intr_source { | |
111 | TAILQ_ENTRY(rte_intr_source) next; | |
112 | struct rte_intr_handle intr_handle; /**< interrupt handle */ | |
113 | struct rte_intr_cb_list callbacks; /**< user callbacks */ | |
114 | uint32_t active; | |
115 | }; | |
116 | ||
117 | /* global spinlock for interrupt data operation */ | |
118 | static rte_spinlock_t intr_lock = RTE_SPINLOCK_INITIALIZER; | |
119 | ||
120 | /* union buffer for pipe read/write */ | |
121 | static union intr_pipefds intr_pipe; | |
122 | ||
123 | /* interrupt sources list */ | |
124 | static struct rte_intr_source_list intr_sources; | |
125 | ||
126 | /* interrupt handling thread */ | |
127 | static pthread_t intr_thread; | |
128 | ||
129 | /* VFIO interrupts */ | |
130 | #ifdef VFIO_PRESENT | |
131 | ||
132 | #define IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + sizeof(int)) | |
133 | /* irq set buffer length for queue interrupts and LSC interrupt */ | |
134 | #define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \ | |
135 | sizeof(int) * (RTE_MAX_RXTX_INTR_VEC_ID + 1)) | |
136 | ||
137 | /* enable legacy (INTx) interrupts */ | |
138 | static int | |
139 | vfio_enable_intx(struct rte_intr_handle *intr_handle) { | |
140 | struct vfio_irq_set *irq_set; | |
141 | char irq_set_buf[IRQ_SET_BUF_LEN]; | |
142 | int len, ret; | |
143 | int *fd_ptr; | |
144 | ||
145 | len = sizeof(irq_set_buf); | |
146 | ||
147 | /* enable INTx */ | |
148 | irq_set = (struct vfio_irq_set *) irq_set_buf; | |
149 | irq_set->argsz = len; | |
150 | irq_set->count = 1; | |
151 | irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER; | |
152 | irq_set->index = VFIO_PCI_INTX_IRQ_INDEX; | |
153 | irq_set->start = 0; | |
154 | fd_ptr = (int *) &irq_set->data; | |
155 | *fd_ptr = intr_handle->fd; | |
156 | ||
157 | ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set); | |
158 | ||
159 | if (ret) { | |
160 | RTE_LOG(ERR, EAL, "Error enabling INTx interrupts for fd %d\n", | |
161 | intr_handle->fd); | |
162 | return -1; | |
163 | } | |
164 | ||
165 | /* unmask INTx after enabling */ | |
166 | memset(irq_set, 0, len); | |
167 | len = sizeof(struct vfio_irq_set); | |
168 | irq_set->argsz = len; | |
169 | irq_set->count = 1; | |
170 | irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK; | |
171 | irq_set->index = VFIO_PCI_INTX_IRQ_INDEX; | |
172 | irq_set->start = 0; | |
173 | ||
174 | ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set); | |
175 | ||
176 | if (ret) { | |
177 | RTE_LOG(ERR, EAL, "Error unmasking INTx interrupts for fd %d\n", | |
178 | intr_handle->fd); | |
179 | return -1; | |
180 | } | |
181 | return 0; | |
182 | } | |
183 | ||
184 | /* disable legacy (INTx) interrupts */ | |
185 | static int | |
186 | vfio_disable_intx(struct rte_intr_handle *intr_handle) { | |
187 | struct vfio_irq_set *irq_set; | |
188 | char irq_set_buf[IRQ_SET_BUF_LEN]; | |
189 | int len, ret; | |
190 | ||
191 | len = sizeof(struct vfio_irq_set); | |
192 | ||
193 | /* mask interrupts before disabling */ | |
194 | irq_set = (struct vfio_irq_set *) irq_set_buf; | |
195 | irq_set->argsz = len; | |
196 | irq_set->count = 1; | |
197 | irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK; | |
198 | irq_set->index = VFIO_PCI_INTX_IRQ_INDEX; | |
199 | irq_set->start = 0; | |
200 | ||
201 | ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set); | |
202 | ||
203 | if (ret) { | |
204 | RTE_LOG(ERR, EAL, "Error unmasking INTx interrupts for fd %d\n", | |
205 | intr_handle->fd); | |
206 | return -1; | |
207 | } | |
208 | ||
209 | /* disable INTx*/ | |
210 | memset(irq_set, 0, len); | |
211 | irq_set->argsz = len; | |
212 | irq_set->count = 0; | |
213 | irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER; | |
214 | irq_set->index = VFIO_PCI_INTX_IRQ_INDEX; | |
215 | irq_set->start = 0; | |
216 | ||
217 | ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set); | |
218 | ||
219 | if (ret) { | |
220 | RTE_LOG(ERR, EAL, | |
221 | "Error disabling INTx interrupts for fd %d\n", intr_handle->fd); | |
222 | return -1; | |
223 | } | |
224 | return 0; | |
225 | } | |
226 | ||
227 | /* enable MSI interrupts */ | |
228 | static int | |
229 | vfio_enable_msi(struct rte_intr_handle *intr_handle) { | |
230 | int len, ret; | |
231 | char irq_set_buf[IRQ_SET_BUF_LEN]; | |
232 | struct vfio_irq_set *irq_set; | |
233 | int *fd_ptr; | |
234 | ||
235 | len = sizeof(irq_set_buf); | |
236 | ||
237 | irq_set = (struct vfio_irq_set *) irq_set_buf; | |
238 | irq_set->argsz = len; | |
239 | irq_set->count = 1; | |
240 | irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER; | |
241 | irq_set->index = VFIO_PCI_MSI_IRQ_INDEX; | |
242 | irq_set->start = 0; | |
243 | fd_ptr = (int *) &irq_set->data; | |
244 | *fd_ptr = intr_handle->fd; | |
245 | ||
246 | ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set); | |
247 | ||
248 | if (ret) { | |
249 | RTE_LOG(ERR, EAL, "Error enabling MSI interrupts for fd %d\n", | |
250 | intr_handle->fd); | |
251 | return -1; | |
252 | } | |
253 | return 0; | |
254 | } | |
255 | ||
256 | /* disable MSI interrupts */ | |
257 | static int | |
258 | vfio_disable_msi(struct rte_intr_handle *intr_handle) { | |
259 | struct vfio_irq_set *irq_set; | |
260 | char irq_set_buf[IRQ_SET_BUF_LEN]; | |
261 | int len, ret; | |
262 | ||
263 | len = sizeof(struct vfio_irq_set); | |
264 | ||
265 | irq_set = (struct vfio_irq_set *) irq_set_buf; | |
266 | irq_set->argsz = len; | |
267 | irq_set->count = 0; | |
268 | irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER; | |
269 | irq_set->index = VFIO_PCI_MSI_IRQ_INDEX; | |
270 | irq_set->start = 0; | |
271 | ||
272 | ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set); | |
273 | ||
274 | if (ret) | |
275 | RTE_LOG(ERR, EAL, | |
276 | "Error disabling MSI interrupts for fd %d\n", intr_handle->fd); | |
277 | ||
278 | return ret; | |
279 | } | |
280 | ||
281 | /* enable MSI-X interrupts */ | |
282 | static int | |
283 | vfio_enable_msix(struct rte_intr_handle *intr_handle) { | |
284 | int len, ret; | |
285 | char irq_set_buf[MSIX_IRQ_SET_BUF_LEN]; | |
286 | struct vfio_irq_set *irq_set; | |
287 | int *fd_ptr; | |
288 | ||
289 | len = sizeof(irq_set_buf); | |
290 | ||
291 | irq_set = (struct vfio_irq_set *) irq_set_buf; | |
292 | irq_set->argsz = len; | |
293 | if (!intr_handle->max_intr) | |
294 | intr_handle->max_intr = 1; | |
295 | else if (intr_handle->max_intr > RTE_MAX_RXTX_INTR_VEC_ID) | |
296 | intr_handle->max_intr = RTE_MAX_RXTX_INTR_VEC_ID + 1; | |
297 | ||
298 | irq_set->count = intr_handle->max_intr; | |
299 | irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER; | |
300 | irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX; | |
301 | irq_set->start = 0; | |
302 | fd_ptr = (int *) &irq_set->data; | |
303 | /* INTR vector offset 0 reserve for non-efds mapping */ | |
304 | fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = intr_handle->fd; | |
305 | memcpy(&fd_ptr[RTE_INTR_VEC_RXTX_OFFSET], intr_handle->efds, | |
306 | sizeof(*intr_handle->efds) * intr_handle->nb_efd); | |
307 | ||
308 | ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set); | |
309 | ||
310 | if (ret) { | |
311 | RTE_LOG(ERR, EAL, "Error enabling MSI-X interrupts for fd %d\n", | |
312 | intr_handle->fd); | |
313 | return -1; | |
314 | } | |
315 | ||
316 | return 0; | |
317 | } | |
318 | ||
319 | /* disable MSI-X interrupts */ | |
320 | static int | |
321 | vfio_disable_msix(struct rte_intr_handle *intr_handle) { | |
322 | struct vfio_irq_set *irq_set; | |
323 | char irq_set_buf[MSIX_IRQ_SET_BUF_LEN]; | |
324 | int len, ret; | |
325 | ||
326 | len = sizeof(struct vfio_irq_set); | |
327 | ||
328 | irq_set = (struct vfio_irq_set *) irq_set_buf; | |
329 | irq_set->argsz = len; | |
330 | irq_set->count = 0; | |
331 | irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER; | |
332 | irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX; | |
333 | irq_set->start = 0; | |
334 | ||
335 | ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set); | |
336 | ||
337 | if (ret) | |
338 | RTE_LOG(ERR, EAL, | |
339 | "Error disabling MSI-X interrupts for fd %d\n", intr_handle->fd); | |
340 | ||
341 | return ret; | |
342 | } | |
343 | #endif | |
344 | ||
345 | static int | |
346 | uio_intx_intr_disable(struct rte_intr_handle *intr_handle) | |
347 | { | |
348 | unsigned char command_high; | |
349 | ||
350 | /* use UIO config file descriptor for uio_pci_generic */ | |
351 | if (pread(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) { | |
352 | RTE_LOG(ERR, EAL, | |
353 | "Error reading interrupts status for fd %d\n", | |
354 | intr_handle->uio_cfg_fd); | |
355 | return -1; | |
356 | } | |
357 | /* disable interrupts */ | |
358 | command_high |= 0x4; | |
359 | if (pwrite(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) { | |
360 | RTE_LOG(ERR, EAL, | |
361 | "Error disabling interrupts for fd %d\n", | |
362 | intr_handle->uio_cfg_fd); | |
363 | return -1; | |
364 | } | |
365 | ||
366 | return 0; | |
367 | } | |
368 | ||
369 | static int | |
370 | uio_intx_intr_enable(struct rte_intr_handle *intr_handle) | |
371 | { | |
372 | unsigned char command_high; | |
373 | ||
374 | /* use UIO config file descriptor for uio_pci_generic */ | |
375 | if (pread(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) { | |
376 | RTE_LOG(ERR, EAL, | |
377 | "Error reading interrupts status for fd %d\n", | |
378 | intr_handle->uio_cfg_fd); | |
379 | return -1; | |
380 | } | |
381 | /* enable interrupts */ | |
382 | command_high &= ~0x4; | |
383 | if (pwrite(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) { | |
384 | RTE_LOG(ERR, EAL, | |
385 | "Error enabling interrupts for fd %d\n", | |
386 | intr_handle->uio_cfg_fd); | |
387 | return -1; | |
388 | } | |
389 | ||
390 | return 0; | |
391 | } | |
392 | ||
393 | static int | |
394 | uio_intr_disable(struct rte_intr_handle *intr_handle) | |
395 | { | |
396 | const int value = 0; | |
397 | ||
398 | if (write(intr_handle->fd, &value, sizeof(value)) < 0) { | |
399 | RTE_LOG(ERR, EAL, | |
400 | "Error disabling interrupts for fd %d (%s)\n", | |
401 | intr_handle->fd, strerror(errno)); | |
402 | return -1; | |
403 | } | |
404 | return 0; | |
405 | } | |
406 | ||
407 | static int | |
408 | uio_intr_enable(struct rte_intr_handle *intr_handle) | |
409 | { | |
410 | const int value = 1; | |
411 | ||
412 | if (write(intr_handle->fd, &value, sizeof(value)) < 0) { | |
413 | RTE_LOG(ERR, EAL, | |
414 | "Error enabling interrupts for fd %d (%s)\n", | |
415 | intr_handle->fd, strerror(errno)); | |
416 | return -1; | |
417 | } | |
418 | return 0; | |
419 | } | |
420 | ||
421 | int | |
422 | rte_intr_callback_register(struct rte_intr_handle *intr_handle, | |
423 | rte_intr_callback_fn cb, void *cb_arg) | |
424 | { | |
425 | int ret, wake_thread; | |
426 | struct rte_intr_source *src; | |
427 | struct rte_intr_callback *callback; | |
428 | ||
429 | wake_thread = 0; | |
430 | ||
431 | /* first do parameter checking */ | |
432 | if (intr_handle == NULL || intr_handle->fd < 0 || cb == NULL) { | |
433 | RTE_LOG(ERR, EAL, | |
434 | "Registering with invalid input parameter\n"); | |
435 | return -EINVAL; | |
436 | } | |
437 | ||
438 | /* allocate a new interrupt callback entity */ | |
439 | callback = rte_zmalloc("interrupt callback list", | |
440 | sizeof(*callback), 0); | |
441 | if (callback == NULL) { | |
442 | RTE_LOG(ERR, EAL, "Can not allocate memory\n"); | |
443 | return -ENOMEM; | |
444 | } | |
445 | callback->cb_fn = cb; | |
446 | callback->cb_arg = cb_arg; | |
447 | ||
448 | rte_spinlock_lock(&intr_lock); | |
449 | ||
450 | /* check if there is at least one callback registered for the fd */ | |
451 | TAILQ_FOREACH(src, &intr_sources, next) { | |
452 | if (src->intr_handle.fd == intr_handle->fd) { | |
453 | /* we had no interrupts for this */ | |
454 | if TAILQ_EMPTY(&src->callbacks) | |
455 | wake_thread = 1; | |
456 | ||
457 | TAILQ_INSERT_TAIL(&(src->callbacks), callback, next); | |
458 | ret = 0; | |
459 | break; | |
460 | } | |
461 | } | |
462 | ||
463 | /* no existing callbacks for this - add new source */ | |
464 | if (src == NULL) { | |
465 | if ((src = rte_zmalloc("interrupt source list", | |
466 | sizeof(*src), 0)) == NULL) { | |
467 | RTE_LOG(ERR, EAL, "Can not allocate memory\n"); | |
468 | rte_free(callback); | |
469 | ret = -ENOMEM; | |
470 | } else { | |
471 | src->intr_handle = *intr_handle; | |
472 | TAILQ_INIT(&src->callbacks); | |
473 | TAILQ_INSERT_TAIL(&(src->callbacks), callback, next); | |
474 | TAILQ_INSERT_TAIL(&intr_sources, src, next); | |
475 | wake_thread = 1; | |
476 | ret = 0; | |
477 | } | |
478 | } | |
479 | ||
480 | rte_spinlock_unlock(&intr_lock); | |
481 | ||
482 | /** | |
483 | * check if need to notify the pipe fd waited by epoll_wait to | |
484 | * rebuild the wait list. | |
485 | */ | |
486 | if (wake_thread) | |
487 | if (write(intr_pipe.writefd, "1", 1) < 0) | |
488 | return -EPIPE; | |
489 | ||
490 | return ret; | |
491 | } | |
492 | ||
493 | int | |
494 | rte_intr_callback_unregister(struct rte_intr_handle *intr_handle, | |
495 | rte_intr_callback_fn cb_fn, void *cb_arg) | |
496 | { | |
497 | int ret; | |
498 | struct rte_intr_source *src; | |
499 | struct rte_intr_callback *cb, *next; | |
500 | ||
501 | /* do parameter checking first */ | |
502 | if (intr_handle == NULL || intr_handle->fd < 0) { | |
503 | RTE_LOG(ERR, EAL, | |
504 | "Unregistering with invalid input parameter\n"); | |
505 | return -EINVAL; | |
506 | } | |
507 | ||
508 | rte_spinlock_lock(&intr_lock); | |
509 | ||
510 | /* check if the insterrupt source for the fd is existent */ | |
511 | TAILQ_FOREACH(src, &intr_sources, next) | |
512 | if (src->intr_handle.fd == intr_handle->fd) | |
513 | break; | |
514 | ||
515 | /* No interrupt source registered for the fd */ | |
516 | if (src == NULL) { | |
517 | ret = -ENOENT; | |
518 | ||
519 | /* interrupt source has some active callbacks right now. */ | |
520 | } else if (src->active != 0) { | |
521 | ret = -EAGAIN; | |
522 | ||
523 | /* ok to remove. */ | |
524 | } else { | |
525 | ret = 0; | |
526 | ||
527 | /*walk through the callbacks and remove all that match. */ | |
528 | for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) { | |
529 | ||
530 | next = TAILQ_NEXT(cb, next); | |
531 | ||
532 | if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 || | |
533 | cb->cb_arg == cb_arg)) { | |
534 | TAILQ_REMOVE(&src->callbacks, cb, next); | |
535 | rte_free(cb); | |
536 | ret++; | |
537 | } | |
538 | } | |
539 | ||
540 | /* all callbacks for that source are removed. */ | |
541 | if (TAILQ_EMPTY(&src->callbacks)) { | |
542 | TAILQ_REMOVE(&intr_sources, src, next); | |
543 | rte_free(src); | |
544 | } | |
545 | } | |
546 | ||
547 | rte_spinlock_unlock(&intr_lock); | |
548 | ||
549 | /* notify the pipe fd waited by epoll_wait to rebuild the wait list */ | |
550 | if (ret >= 0 && write(intr_pipe.writefd, "1", 1) < 0) { | |
551 | ret = -EPIPE; | |
552 | } | |
553 | ||
554 | return ret; | |
555 | } | |
556 | ||
557 | int | |
558 | rte_intr_enable(struct rte_intr_handle *intr_handle) | |
559 | { | |
560 | if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0) | |
561 | return -1; | |
562 | ||
563 | switch (intr_handle->type){ | |
564 | /* write to the uio fd to enable the interrupt */ | |
565 | case RTE_INTR_HANDLE_UIO: | |
566 | if (uio_intr_enable(intr_handle)) | |
567 | return -1; | |
568 | break; | |
569 | case RTE_INTR_HANDLE_UIO_INTX: | |
570 | if (uio_intx_intr_enable(intr_handle)) | |
571 | return -1; | |
572 | break; | |
573 | /* not used at this moment */ | |
574 | case RTE_INTR_HANDLE_ALARM: | |
575 | return -1; | |
576 | #ifdef VFIO_PRESENT | |
577 | case RTE_INTR_HANDLE_VFIO_MSIX: | |
578 | if (vfio_enable_msix(intr_handle)) | |
579 | return -1; | |
580 | break; | |
581 | case RTE_INTR_HANDLE_VFIO_MSI: | |
582 | if (vfio_enable_msi(intr_handle)) | |
583 | return -1; | |
584 | break; | |
585 | case RTE_INTR_HANDLE_VFIO_LEGACY: | |
586 | if (vfio_enable_intx(intr_handle)) | |
587 | return -1; | |
588 | break; | |
589 | #endif | |
590 | /* unknown handle type */ | |
591 | default: | |
592 | RTE_LOG(ERR, EAL, | |
593 | "Unknown handle type of fd %d\n", | |
594 | intr_handle->fd); | |
595 | return -1; | |
596 | } | |
597 | ||
598 | return 0; | |
599 | } | |
600 | ||
601 | int | |
602 | rte_intr_disable(struct rte_intr_handle *intr_handle) | |
603 | { | |
604 | if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0) | |
605 | return -1; | |
606 | ||
607 | switch (intr_handle->type){ | |
608 | /* write to the uio fd to disable the interrupt */ | |
609 | case RTE_INTR_HANDLE_UIO: | |
610 | if (uio_intr_disable(intr_handle)) | |
611 | return -1; | |
612 | break; | |
613 | case RTE_INTR_HANDLE_UIO_INTX: | |
614 | if (uio_intx_intr_disable(intr_handle)) | |
615 | return -1; | |
616 | break; | |
617 | /* not used at this moment */ | |
618 | case RTE_INTR_HANDLE_ALARM: | |
619 | return -1; | |
620 | #ifdef VFIO_PRESENT | |
621 | case RTE_INTR_HANDLE_VFIO_MSIX: | |
622 | if (vfio_disable_msix(intr_handle)) | |
623 | return -1; | |
624 | break; | |
625 | case RTE_INTR_HANDLE_VFIO_MSI: | |
626 | if (vfio_disable_msi(intr_handle)) | |
627 | return -1; | |
628 | break; | |
629 | case RTE_INTR_HANDLE_VFIO_LEGACY: | |
630 | if (vfio_disable_intx(intr_handle)) | |
631 | return -1; | |
632 | break; | |
633 | #endif | |
634 | /* unknown handle type */ | |
635 | default: | |
636 | RTE_LOG(ERR, EAL, | |
637 | "Unknown handle type of fd %d\n", | |
638 | intr_handle->fd); | |
639 | return -1; | |
640 | } | |
641 | ||
642 | return 0; | |
643 | } | |
644 | ||
645 | static int | |
646 | eal_intr_process_interrupts(struct epoll_event *events, int nfds) | |
647 | { | |
648 | int n, bytes_read; | |
649 | struct rte_intr_source *src; | |
650 | struct rte_intr_callback *cb; | |
651 | union rte_intr_read_buffer buf; | |
652 | struct rte_intr_callback active_cb; | |
653 | ||
654 | for (n = 0; n < nfds; n++) { | |
655 | ||
656 | /** | |
657 | * if the pipe fd is ready to read, return out to | |
658 | * rebuild the wait list. | |
659 | */ | |
660 | if (events[n].data.fd == intr_pipe.readfd){ | |
661 | int r = read(intr_pipe.readfd, buf.charbuf, | |
662 | sizeof(buf.charbuf)); | |
663 | RTE_SET_USED(r); | |
664 | return -1; | |
665 | } | |
666 | rte_spinlock_lock(&intr_lock); | |
667 | TAILQ_FOREACH(src, &intr_sources, next) | |
668 | if (src->intr_handle.fd == | |
669 | events[n].data.fd) | |
670 | break; | |
671 | if (src == NULL){ | |
672 | rte_spinlock_unlock(&intr_lock); | |
673 | continue; | |
674 | } | |
675 | ||
676 | /* mark this interrupt source as active and release the lock. */ | |
677 | src->active = 1; | |
678 | rte_spinlock_unlock(&intr_lock); | |
679 | ||
680 | /* set the length to be read dor different handle type */ | |
681 | switch (src->intr_handle.type) { | |
682 | case RTE_INTR_HANDLE_UIO: | |
683 | case RTE_INTR_HANDLE_UIO_INTX: | |
684 | bytes_read = sizeof(buf.uio_intr_count); | |
685 | break; | |
686 | case RTE_INTR_HANDLE_ALARM: | |
687 | bytes_read = sizeof(buf.timerfd_num); | |
688 | break; | |
689 | #ifdef VFIO_PRESENT | |
690 | case RTE_INTR_HANDLE_VFIO_MSIX: | |
691 | case RTE_INTR_HANDLE_VFIO_MSI: | |
692 | case RTE_INTR_HANDLE_VFIO_LEGACY: | |
693 | bytes_read = sizeof(buf.vfio_intr_count); | |
694 | break; | |
695 | #endif | |
696 | case RTE_INTR_HANDLE_EXT: | |
697 | default: | |
698 | bytes_read = 1; | |
699 | break; | |
700 | } | |
701 | ||
702 | if (src->intr_handle.type != RTE_INTR_HANDLE_EXT) { | |
703 | /** | |
704 | * read out to clear the ready-to-be-read flag | |
705 | * for epoll_wait. | |
706 | */ | |
707 | bytes_read = read(events[n].data.fd, &buf, bytes_read); | |
708 | if (bytes_read < 0) { | |
709 | if (errno == EINTR || errno == EWOULDBLOCK) | |
710 | continue; | |
711 | ||
712 | RTE_LOG(ERR, EAL, "Error reading from file " | |
713 | "descriptor %d: %s\n", | |
714 | events[n].data.fd, | |
715 | strerror(errno)); | |
716 | } else if (bytes_read == 0) | |
717 | RTE_LOG(ERR, EAL, "Read nothing from file " | |
718 | "descriptor %d\n", events[n].data.fd); | |
719 | } | |
720 | ||
721 | /* grab a lock, again to call callbacks and update status. */ | |
722 | rte_spinlock_lock(&intr_lock); | |
723 | ||
724 | if (bytes_read > 0) { | |
725 | ||
726 | /* Finally, call all callbacks. */ | |
727 | TAILQ_FOREACH(cb, &src->callbacks, next) { | |
728 | ||
729 | /* make a copy and unlock. */ | |
730 | active_cb = *cb; | |
731 | rte_spinlock_unlock(&intr_lock); | |
732 | ||
733 | /* call the actual callback */ | |
734 | active_cb.cb_fn(&src->intr_handle, | |
735 | active_cb.cb_arg); | |
736 | ||
737 | /*get the lock back. */ | |
738 | rte_spinlock_lock(&intr_lock); | |
739 | } | |
740 | } | |
741 | ||
742 | /* we done with that interrupt source, release it. */ | |
743 | src->active = 0; | |
744 | rte_spinlock_unlock(&intr_lock); | |
745 | } | |
746 | ||
747 | return 0; | |
748 | } | |
749 | ||
750 | /** | |
751 | * It handles all the interrupts. | |
752 | * | |
753 | * @param pfd | |
754 | * epoll file descriptor. | |
755 | * @param totalfds | |
756 | * The number of file descriptors added in epoll. | |
757 | * | |
758 | * @return | |
759 | * void | |
760 | */ | |
761 | static void | |
762 | eal_intr_handle_interrupts(int pfd, unsigned totalfds) | |
763 | { | |
764 | struct epoll_event events[totalfds]; | |
765 | int nfds = 0; | |
766 | ||
767 | for(;;) { | |
768 | nfds = epoll_wait(pfd, events, totalfds, | |
769 | EAL_INTR_EPOLL_WAIT_FOREVER); | |
770 | /* epoll_wait fail */ | |
771 | if (nfds < 0) { | |
772 | if (errno == EINTR) | |
773 | continue; | |
774 | RTE_LOG(ERR, EAL, | |
775 | "epoll_wait returns with fail\n"); | |
776 | return; | |
777 | } | |
778 | /* epoll_wait timeout, will never happens here */ | |
779 | else if (nfds == 0) | |
780 | continue; | |
781 | /* epoll_wait has at least one fd ready to read */ | |
782 | if (eal_intr_process_interrupts(events, nfds) < 0) | |
783 | return; | |
784 | } | |
785 | } | |
786 | ||
787 | /** | |
788 | * It builds/rebuilds up the epoll file descriptor with all the | |
789 | * file descriptors being waited on. Then handles the interrupts. | |
790 | * | |
791 | * @param arg | |
792 | * pointer. (unused) | |
793 | * | |
794 | * @return | |
795 | * never return; | |
796 | */ | |
797 | static __attribute__((noreturn)) void * | |
798 | eal_intr_thread_main(__rte_unused void *arg) | |
799 | { | |
800 | struct epoll_event ev; | |
801 | ||
802 | /* host thread, never break out */ | |
803 | for (;;) { | |
804 | /* build up the epoll fd with all descriptors we are to | |
805 | * wait on then pass it to the handle_interrupts function | |
806 | */ | |
807 | static struct epoll_event pipe_event = { | |
808 | .events = EPOLLIN | EPOLLPRI, | |
809 | }; | |
810 | struct rte_intr_source *src; | |
811 | unsigned numfds = 0; | |
812 | ||
813 | /* create epoll fd */ | |
814 | int pfd = epoll_create(1); | |
815 | if (pfd < 0) | |
816 | rte_panic("Cannot create epoll instance\n"); | |
817 | ||
818 | pipe_event.data.fd = intr_pipe.readfd; | |
819 | /** | |
820 | * add pipe fd into wait list, this pipe is used to | |
821 | * rebuild the wait list. | |
822 | */ | |
823 | if (epoll_ctl(pfd, EPOLL_CTL_ADD, intr_pipe.readfd, | |
824 | &pipe_event) < 0) { | |
825 | rte_panic("Error adding fd to %d epoll_ctl, %s\n", | |
826 | intr_pipe.readfd, strerror(errno)); | |
827 | } | |
828 | numfds++; | |
829 | ||
830 | rte_spinlock_lock(&intr_lock); | |
831 | ||
832 | TAILQ_FOREACH(src, &intr_sources, next) { | |
833 | if (src->callbacks.tqh_first == NULL) | |
834 | continue; /* skip those with no callbacks */ | |
835 | ev.events = EPOLLIN | EPOLLPRI; | |
836 | ev.data.fd = src->intr_handle.fd; | |
837 | ||
838 | /** | |
839 | * add all the uio device file descriptor | |
840 | * into wait list. | |
841 | */ | |
842 | if (epoll_ctl(pfd, EPOLL_CTL_ADD, | |
843 | src->intr_handle.fd, &ev) < 0){ | |
844 | rte_panic("Error adding fd %d epoll_ctl, %s\n", | |
845 | src->intr_handle.fd, strerror(errno)); | |
846 | } | |
847 | else | |
848 | numfds++; | |
849 | } | |
850 | rte_spinlock_unlock(&intr_lock); | |
851 | /* serve the interrupt */ | |
852 | eal_intr_handle_interrupts(pfd, numfds); | |
853 | ||
854 | /** | |
855 | * when we return, we need to rebuild the | |
856 | * list of fds to monitor. | |
857 | */ | |
858 | close(pfd); | |
859 | } | |
860 | } | |
861 | ||
862 | int | |
863 | rte_eal_intr_init(void) | |
864 | { | |
865 | int ret = 0, ret_1 = 0; | |
866 | char thread_name[RTE_MAX_THREAD_NAME_LEN]; | |
867 | ||
868 | /* init the global interrupt source head */ | |
869 | TAILQ_INIT(&intr_sources); | |
870 | ||
871 | /** | |
872 | * create a pipe which will be waited by epoll and notified to | |
873 | * rebuild the wait list of epoll. | |
874 | */ | |
875 | if (pipe(intr_pipe.pipefd) < 0) | |
876 | return -1; | |
877 | ||
878 | /* create the host thread to wait/handle the interrupt */ | |
879 | ret = pthread_create(&intr_thread, NULL, | |
880 | eal_intr_thread_main, NULL); | |
881 | if (ret != 0) { | |
882 | RTE_LOG(ERR, EAL, | |
883 | "Failed to create thread for interrupt handling\n"); | |
884 | } else { | |
885 | /* Set thread_name for aid in debugging. */ | |
886 | snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, | |
887 | "eal-intr-thread"); | |
888 | ret_1 = rte_thread_setname(intr_thread, thread_name); | |
889 | if (ret_1 != 0) | |
890 | RTE_LOG(DEBUG, EAL, | |
891 | "Failed to set thread name for interrupt handling\n"); | |
892 | } | |
893 | ||
894 | return -ret; | |
895 | } | |
896 | ||
897 | static void | |
898 | eal_intr_proc_rxtx_intr(int fd, const struct rte_intr_handle *intr_handle) | |
899 | { | |
900 | union rte_intr_read_buffer buf; | |
901 | int bytes_read = 1; | |
902 | int nbytes; | |
903 | ||
904 | switch (intr_handle->type) { | |
905 | case RTE_INTR_HANDLE_UIO: | |
906 | case RTE_INTR_HANDLE_UIO_INTX: | |
907 | bytes_read = sizeof(buf.uio_intr_count); | |
908 | break; | |
909 | #ifdef VFIO_PRESENT | |
910 | case RTE_INTR_HANDLE_VFIO_MSIX: | |
911 | case RTE_INTR_HANDLE_VFIO_MSI: | |
912 | case RTE_INTR_HANDLE_VFIO_LEGACY: | |
913 | bytes_read = sizeof(buf.vfio_intr_count); | |
914 | break; | |
915 | #endif | |
916 | default: | |
917 | bytes_read = 1; | |
918 | RTE_LOG(INFO, EAL, "unexpected intr type\n"); | |
919 | break; | |
920 | } | |
921 | ||
922 | /** | |
923 | * read out to clear the ready-to-be-read flag | |
924 | * for epoll_wait. | |
925 | */ | |
926 | do { | |
927 | nbytes = read(fd, &buf, bytes_read); | |
928 | if (nbytes < 0) { | |
929 | if (errno == EINTR || errno == EWOULDBLOCK || | |
930 | errno == EAGAIN) | |
931 | continue; | |
932 | RTE_LOG(ERR, EAL, | |
933 | "Error reading from fd %d: %s\n", | |
934 | fd, strerror(errno)); | |
935 | } else if (nbytes == 0) | |
936 | RTE_LOG(ERR, EAL, "Read nothing from fd %d\n", fd); | |
937 | return; | |
938 | } while (1); | |
939 | } | |
940 | ||
941 | static int | |
942 | eal_epoll_process_event(struct epoll_event *evs, unsigned int n, | |
943 | struct rte_epoll_event *events) | |
944 | { | |
945 | unsigned int i, count = 0; | |
946 | struct rte_epoll_event *rev; | |
947 | ||
948 | for (i = 0; i < n; i++) { | |
949 | rev = evs[i].data.ptr; | |
950 | if (!rev || !rte_atomic32_cmpset(&rev->status, RTE_EPOLL_VALID, | |
951 | RTE_EPOLL_EXEC)) | |
952 | continue; | |
953 | ||
954 | events[count].status = RTE_EPOLL_VALID; | |
955 | events[count].fd = rev->fd; | |
956 | events[count].epfd = rev->epfd; | |
957 | events[count].epdata.event = rev->epdata.event; | |
958 | events[count].epdata.data = rev->epdata.data; | |
959 | if (rev->epdata.cb_fun) | |
960 | rev->epdata.cb_fun(rev->fd, | |
961 | rev->epdata.cb_arg); | |
962 | ||
963 | rte_compiler_barrier(); | |
964 | rev->status = RTE_EPOLL_VALID; | |
965 | count++; | |
966 | } | |
967 | return count; | |
968 | } | |
969 | ||
970 | static inline int | |
971 | eal_init_tls_epfd(void) | |
972 | { | |
973 | int pfd = epoll_create(255); | |
974 | ||
975 | if (pfd < 0) { | |
976 | RTE_LOG(ERR, EAL, | |
977 | "Cannot create epoll instance\n"); | |
978 | return -1; | |
979 | } | |
980 | return pfd; | |
981 | } | |
982 | ||
983 | int | |
984 | rte_intr_tls_epfd(void) | |
985 | { | |
986 | if (RTE_PER_LCORE(_epfd) == -1) | |
987 | RTE_PER_LCORE(_epfd) = eal_init_tls_epfd(); | |
988 | ||
989 | return RTE_PER_LCORE(_epfd); | |
990 | } | |
991 | ||
992 | int | |
993 | rte_epoll_wait(int epfd, struct rte_epoll_event *events, | |
994 | int maxevents, int timeout) | |
995 | { | |
996 | struct epoll_event evs[maxevents]; | |
997 | int rc; | |
998 | ||
999 | if (!events) { | |
1000 | RTE_LOG(ERR, EAL, "rte_epoll_event can't be NULL\n"); | |
1001 | return -1; | |
1002 | } | |
1003 | ||
1004 | /* using per thread epoll fd */ | |
1005 | if (epfd == RTE_EPOLL_PER_THREAD) | |
1006 | epfd = rte_intr_tls_epfd(); | |
1007 | ||
1008 | while (1) { | |
1009 | rc = epoll_wait(epfd, evs, maxevents, timeout); | |
1010 | if (likely(rc > 0)) { | |
1011 | /* epoll_wait has at least one fd ready to read */ | |
1012 | rc = eal_epoll_process_event(evs, rc, events); | |
1013 | break; | |
1014 | } else if (rc < 0) { | |
1015 | if (errno == EINTR) | |
1016 | continue; | |
1017 | /* epoll_wait fail */ | |
1018 | RTE_LOG(ERR, EAL, "epoll_wait returns with fail %s\n", | |
1019 | strerror(errno)); | |
1020 | rc = -1; | |
1021 | break; | |
1022 | } else { | |
1023 | /* rc == 0, epoll_wait timed out */ | |
1024 | break; | |
1025 | } | |
1026 | } | |
1027 | ||
1028 | return rc; | |
1029 | } | |
1030 | ||
1031 | static inline void | |
1032 | eal_epoll_data_safe_free(struct rte_epoll_event *ev) | |
1033 | { | |
1034 | while (!rte_atomic32_cmpset(&ev->status, RTE_EPOLL_VALID, | |
1035 | RTE_EPOLL_INVALID)) | |
1036 | while (ev->status != RTE_EPOLL_VALID) | |
1037 | rte_pause(); | |
1038 | memset(&ev->epdata, 0, sizeof(ev->epdata)); | |
1039 | ev->fd = -1; | |
1040 | ev->epfd = -1; | |
1041 | } | |
1042 | ||
1043 | int | |
1044 | rte_epoll_ctl(int epfd, int op, int fd, | |
1045 | struct rte_epoll_event *event) | |
1046 | { | |
1047 | struct epoll_event ev; | |
1048 | ||
1049 | if (!event) { | |
1050 | RTE_LOG(ERR, EAL, "rte_epoll_event can't be NULL\n"); | |
1051 | return -1; | |
1052 | } | |
1053 | ||
1054 | /* using per thread epoll fd */ | |
1055 | if (epfd == RTE_EPOLL_PER_THREAD) | |
1056 | epfd = rte_intr_tls_epfd(); | |
1057 | ||
1058 | if (op == EPOLL_CTL_ADD) { | |
1059 | event->status = RTE_EPOLL_VALID; | |
1060 | event->fd = fd; /* ignore fd in event */ | |
1061 | event->epfd = epfd; | |
1062 | ev.data.ptr = (void *)event; | |
1063 | } | |
1064 | ||
1065 | ev.events = event->epdata.event; | |
1066 | if (epoll_ctl(epfd, op, fd, &ev) < 0) { | |
1067 | RTE_LOG(ERR, EAL, "Error op %d fd %d epoll_ctl, %s\n", | |
1068 | op, fd, strerror(errno)); | |
1069 | if (op == EPOLL_CTL_ADD) | |
1070 | /* rollback status when CTL_ADD fail */ | |
1071 | event->status = RTE_EPOLL_INVALID; | |
1072 | return -1; | |
1073 | } | |
1074 | ||
1075 | if (op == EPOLL_CTL_DEL && event->status != RTE_EPOLL_INVALID) | |
1076 | eal_epoll_data_safe_free(event); | |
1077 | ||
1078 | return 0; | |
1079 | } | |
1080 | ||
1081 | int | |
1082 | rte_intr_rx_ctl(struct rte_intr_handle *intr_handle, int epfd, | |
1083 | int op, unsigned int vec, void *data) | |
1084 | { | |
1085 | struct rte_epoll_event *rev; | |
1086 | struct rte_epoll_data *epdata; | |
1087 | int epfd_op; | |
1088 | unsigned int efd_idx; | |
1089 | int rc = 0; | |
1090 | ||
1091 | efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ? | |
1092 | (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec; | |
1093 | ||
1094 | if (!intr_handle || intr_handle->nb_efd == 0 || | |
1095 | efd_idx >= intr_handle->nb_efd) { | |
1096 | RTE_LOG(ERR, EAL, "Wrong intr vector number.\n"); | |
1097 | return -EPERM; | |
1098 | } | |
1099 | ||
1100 | switch (op) { | |
1101 | case RTE_INTR_EVENT_ADD: | |
1102 | epfd_op = EPOLL_CTL_ADD; | |
1103 | rev = &intr_handle->elist[efd_idx]; | |
1104 | if (rev->status != RTE_EPOLL_INVALID) { | |
1105 | RTE_LOG(INFO, EAL, "Event already been added.\n"); | |
1106 | return -EEXIST; | |
1107 | } | |
1108 | ||
1109 | /* attach to intr vector fd */ | |
1110 | epdata = &rev->epdata; | |
1111 | epdata->event = EPOLLIN | EPOLLPRI | EPOLLET; | |
1112 | epdata->data = data; | |
1113 | epdata->cb_fun = (rte_intr_event_cb_t)eal_intr_proc_rxtx_intr; | |
1114 | epdata->cb_arg = (void *)intr_handle; | |
1115 | rc = rte_epoll_ctl(epfd, epfd_op, | |
1116 | intr_handle->efds[efd_idx], rev); | |
1117 | if (!rc) | |
1118 | RTE_LOG(DEBUG, EAL, | |
1119 | "efd %d associated with vec %d added on epfd %d" | |
1120 | "\n", rev->fd, vec, epfd); | |
1121 | else | |
1122 | rc = -EPERM; | |
1123 | break; | |
1124 | case RTE_INTR_EVENT_DEL: | |
1125 | epfd_op = EPOLL_CTL_DEL; | |
1126 | rev = &intr_handle->elist[efd_idx]; | |
1127 | if (rev->status == RTE_EPOLL_INVALID) { | |
1128 | RTE_LOG(INFO, EAL, "Event does not exist.\n"); | |
1129 | return -EPERM; | |
1130 | } | |
1131 | ||
1132 | rc = rte_epoll_ctl(rev->epfd, epfd_op, rev->fd, rev); | |
1133 | if (rc) | |
1134 | rc = -EPERM; | |
1135 | break; | |
1136 | default: | |
1137 | RTE_LOG(ERR, EAL, "event op type mismatch\n"); | |
1138 | rc = -EPERM; | |
1139 | } | |
1140 | ||
1141 | return rc; | |
1142 | } | |
1143 | ||
1144 | int | |
1145 | rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd) | |
1146 | { | |
1147 | uint32_t i; | |
1148 | int fd; | |
1149 | uint32_t n = RTE_MIN(nb_efd, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID); | |
1150 | ||
1151 | assert(nb_efd != 0); | |
1152 | ||
1153 | if (intr_handle->type == RTE_INTR_HANDLE_VFIO_MSIX) { | |
1154 | for (i = 0; i < n; i++) { | |
1155 | fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC); | |
1156 | if (fd < 0) { | |
1157 | RTE_LOG(ERR, EAL, | |
1158 | "can't setup eventfd, error %i (%s)\n", | |
1159 | errno, strerror(errno)); | |
1160 | return -1; | |
1161 | } | |
1162 | intr_handle->efds[i] = fd; | |
1163 | } | |
1164 | intr_handle->nb_efd = n; | |
1165 | intr_handle->max_intr = NB_OTHER_INTR + n; | |
1166 | } else { | |
1167 | intr_handle->efds[0] = intr_handle->fd; | |
1168 | intr_handle->nb_efd = RTE_MIN(nb_efd, 1U); | |
1169 | intr_handle->max_intr = NB_OTHER_INTR; | |
1170 | } | |
1171 | ||
1172 | return 0; | |
1173 | } | |
1174 | ||
1175 | void | |
1176 | rte_intr_efd_disable(struct rte_intr_handle *intr_handle) | |
1177 | { | |
1178 | uint32_t i; | |
1179 | struct rte_epoll_event *rev; | |
1180 | ||
1181 | for (i = 0; i < intr_handle->nb_efd; i++) { | |
1182 | rev = &intr_handle->elist[i]; | |
1183 | if (rev->status == RTE_EPOLL_INVALID) | |
1184 | continue; | |
1185 | if (rte_epoll_ctl(rev->epfd, EPOLL_CTL_DEL, rev->fd, rev)) { | |
1186 | /* force free if the entry valid */ | |
1187 | eal_epoll_data_safe_free(rev); | |
1188 | rev->status = RTE_EPOLL_INVALID; | |
1189 | } | |
1190 | } | |
1191 | ||
1192 | if (intr_handle->max_intr > intr_handle->nb_efd) { | |
1193 | for (i = 0; i < intr_handle->nb_efd; i++) | |
1194 | close(intr_handle->efds[i]); | |
1195 | } | |
1196 | intr_handle->nb_efd = 0; | |
1197 | intr_handle->max_intr = 0; | |
1198 | } | |
1199 | ||
1200 | int | |
1201 | rte_intr_dp_is_en(struct rte_intr_handle *intr_handle) | |
1202 | { | |
1203 | return !(!intr_handle->nb_efd); | |
1204 | } | |
1205 | ||
1206 | int | |
1207 | rte_intr_allow_others(struct rte_intr_handle *intr_handle) | |
1208 | { | |
1209 | if (!rte_intr_dp_is_en(intr_handle)) | |
1210 | return 1; | |
1211 | else | |
1212 | return !!(intr_handle->max_intr - intr_handle->nb_efd); | |
1213 | } | |
1214 | ||
1215 | int | |
1216 | rte_intr_cap_multiple(struct rte_intr_handle *intr_handle) | |
1217 | { | |
1218 | if (intr_handle->type == RTE_INTR_HANDLE_VFIO_MSIX) | |
1219 | return 1; | |
1220 | ||
1221 | return 0; | |
1222 | } |