]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - fs/eventfd.c
vdpa: ifcvf: free config irq in ifcvf_free_irq()
[mirror_ubuntu-jammy-kernel.git] / fs / eventfd.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
e1ad7468
DL
2/*
3 * fs/eventfd.c
4 *
5 * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org>
6 *
7 */
8
9#include <linux/file.h>
10#include <linux/poll.h>
11#include <linux/init.h>
12#include <linux/fs.h>
174cd4b1 13#include <linux/sched/signal.h>
e1ad7468 14#include <linux/kernel.h>
5a0e3ad6 15#include <linux/slab.h>
e1ad7468
DL
16#include <linux/list.h>
17#include <linux/spinlock.h>
18#include <linux/anon_inodes.h>
7747cdb2 19#include <linux/syscalls.h>
630d9c47 20#include <linux/export.h>
13389010
DL
21#include <linux/kref.h>
22#include <linux/eventfd.h>
cbac5542
CG
23#include <linux/proc_fs.h>
24#include <linux/seq_file.h>
b556db17 25#include <linux/idr.h>
12aceb89 26#include <linux/uio.h>
b556db17 27
b5e683d5
JA
28DEFINE_PER_CPU(int, eventfd_wake_count);
29
ce528c4c 30static DEFINE_IDA(eventfd_ida);
e1ad7468
DL
31
32struct eventfd_ctx {
13389010 33 struct kref kref;
e1ad7468
DL
34 wait_queue_head_t wqh;
35 /*
36 * Every time that a write(2) is performed on an eventfd, the
37 * value of the __u64 being written is added to "count" and a
38 * wakeup is performed on "wqh". A read(2) will return the "count"
39 * value to userspace, and will reset "count" to zero. The kernel
13389010 40 * side eventfd_signal() also, adds to the "count" counter and
e1ad7468
DL
41 * issue a wakeup.
42 */
43 __u64 count;
bcd0b235 44 unsigned int flags;
b556db17 45 int id;
e1ad7468
DL
46};
47
13389010
DL
48/**
49 * eventfd_signal - Adds @n to the eventfd counter.
50 * @ctx: [in] Pointer to the eventfd context.
51 * @n: [in] Value of the counter to be added to the eventfd internal counter.
52 * The value cannot be negative.
53 *
54 * This function is supposed to be called by the kernel in paths that do not
55 * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
a9a08845 56 * value, and we signal this as overflow condition by returning a EPOLLERR
13389010
DL
57 * to poll(2).
58 *
20d5a865 59 * Returns the amount by which the counter was incremented. This will be less
ee62c6b2 60 * than @n if the counter has overflowed.
e1ad7468 61 */
ee62c6b2 62__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
e1ad7468 63{
e1ad7468
DL
64 unsigned long flags;
65
b5e683d5
JA
66 /*
67 * Deadlock or stack overflow issues can happen if we recurse here
68 * through waitqueue wakeup handlers. If the caller users potentially
69 * nested waitqueues with custom wakeup handlers, then it should
70 * check eventfd_signal_count() before calling this function. If
71 * it returns true, the eventfd_signal() call should be deferred to a
72 * safe context.
73 */
74 if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count)))
75 return 0;
76
d48eb233 77 spin_lock_irqsave(&ctx->wqh.lock, flags);
b5e683d5 78 this_cpu_inc(eventfd_wake_count);
e1ad7468 79 if (ULLONG_MAX - ctx->count < n)
ee62c6b2 80 n = ULLONG_MAX - ctx->count;
e1ad7468
DL
81 ctx->count += n;
82 if (waitqueue_active(&ctx->wqh))
a9a08845 83 wake_up_locked_poll(&ctx->wqh, EPOLLIN);
b5e683d5 84 this_cpu_dec(eventfd_wake_count);
d48eb233 85 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
e1ad7468
DL
86
87 return n;
88}
5718607b 89EXPORT_SYMBOL_GPL(eventfd_signal);
e1ad7468 90
562787a5
DL
91static void eventfd_free_ctx(struct eventfd_ctx *ctx)
92{
b556db17
MY
93 if (ctx->id >= 0)
94 ida_simple_remove(&eventfd_ida, ctx->id);
562787a5
DL
95 kfree(ctx);
96}
97
13389010
DL
98static void eventfd_free(struct kref *kref)
99{
100 struct eventfd_ctx *ctx = container_of(kref, struct eventfd_ctx, kref);
101
562787a5 102 eventfd_free_ctx(ctx);
13389010
DL
103}
104
13389010
DL
105/**
106 * eventfd_ctx_put - Releases a reference to the internal eventfd context.
107 * @ctx: [in] Pointer to eventfd context.
108 *
109 * The eventfd context reference must have been previously acquired either
105f2b70 110 * with eventfd_ctx_fdget() or eventfd_ctx_fileget().
13389010
DL
111 */
112void eventfd_ctx_put(struct eventfd_ctx *ctx)
113{
114 kref_put(&ctx->kref, eventfd_free);
115}
116EXPORT_SYMBOL_GPL(eventfd_ctx_put);
117
e1ad7468
DL
118static int eventfd_release(struct inode *inode, struct file *file)
119{
13389010
DL
120 struct eventfd_ctx *ctx = file->private_data;
121
a9a08845 122 wake_up_poll(&ctx->wqh, EPOLLHUP);
13389010 123 eventfd_ctx_put(ctx);
e1ad7468
DL
124 return 0;
125}
126
a11e1d43 127static __poll_t eventfd_poll(struct file *file, poll_table *wait)
e1ad7468
DL
128{
129 struct eventfd_ctx *ctx = file->private_data;
076ccb76 130 __poll_t events = 0;
e22553e2 131 u64 count;
e1ad7468 132
a11e1d43
LT
133 poll_wait(file, &ctx->wqh, wait);
134
a484c3dd
PB
135 /*
136 * All writes to ctx->count occur within ctx->wqh.lock. This read
137 * can be done outside ctx->wqh.lock because we know that poll_wait
138 * takes that lock (through add_wait_queue) if our caller will sleep.
139 *
140 * The read _can_ therefore seep into add_wait_queue's critical
141 * section, but cannot move above it! add_wait_queue's spin_lock acts
142 * as an acquire barrier and ensures that the read be ordered properly
143 * against the writes. The following CAN happen and is safe:
144 *
145 * poll write
146 * ----------------- ------------
147 * lock ctx->wqh.lock (in poll_wait)
148 * count = ctx->count
149 * __add_wait_queue
150 * unlock ctx->wqh.lock
151 * lock ctx->qwh.lock
152 * ctx->count += n
153 * if (waitqueue_active)
154 * wake_up_locked_poll
155 * unlock ctx->qwh.lock
156 * eventfd_poll returns 0
157 *
158 * but the following, which would miss a wakeup, cannot happen:
159 *
160 * poll write
161 * ----------------- ------------
162 * count = ctx->count (INVALID!)
163 * lock ctx->qwh.lock
164 * ctx->count += n
165 * **waitqueue_active is false**
166 * **no wake_up_locked_poll!**
167 * unlock ctx->qwh.lock
168 * lock ctx->wqh.lock (in poll_wait)
169 * __add_wait_queue
170 * unlock ctx->wqh.lock
171 * eventfd_poll returns 0
172 */
173 count = READ_ONCE(ctx->count);
e1ad7468 174
e22553e2 175 if (count > 0)
a11e1d43 176 events |= EPOLLIN;
e22553e2 177 if (count == ULLONG_MAX)
a9a08845 178 events |= EPOLLERR;
e22553e2 179 if (ULLONG_MAX - 1 > count)
a11e1d43 180 events |= EPOLLOUT;
e1ad7468
DL
181
182 return events;
183}
184
cb289d62
DL
185static void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
186{
187 *cnt = (ctx->flags & EFD_SEMAPHORE) ? 1 : ctx->count;
188 ctx->count -= *cnt;
189}
190
191/**
192 * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue.
193 * @ctx: [in] Pointer to eventfd context.
194 * @wait: [in] Wait queue to be removed.
36182185 195 * @cnt: [out] Pointer to the 64-bit counter value.
cb289d62 196 *
36182185 197 * Returns %0 if successful, or the following error codes:
cb289d62
DL
198 *
199 * -EAGAIN : The operation would have blocked.
200 *
201 * This is used to atomically remove a wait queue entry from the eventfd wait
202 * queue head, and read/reset the counter value.
203 */
ac6424b9 204int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
cb289d62
DL
205 __u64 *cnt)
206{
207 unsigned long flags;
208
209 spin_lock_irqsave(&ctx->wqh.lock, flags);
210 eventfd_ctx_do_read(ctx, cnt);
211 __remove_wait_queue(&ctx->wqh, wait);
212 if (*cnt != 0 && waitqueue_active(&ctx->wqh))
a9a08845 213 wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
cb289d62
DL
214 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
215
216 return *cnt != 0 ? 0 : -EAGAIN;
217}
218EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue);
219
12aceb89 220static ssize_t eventfd_read(struct kiocb *iocb, struct iov_iter *to)
e1ad7468 221{
12aceb89 222 struct file *file = iocb->ki_filp;
b6364572 223 struct eventfd_ctx *ctx = file->private_data;
b6364572 224 __u64 ucnt = 0;
e1ad7468
DL
225 DECLARE_WAITQUEUE(wait, current);
226
12aceb89 227 if (iov_iter_count(to) < sizeof(ucnt))
b6364572 228 return -EINVAL;
d48eb233 229 spin_lock_irq(&ctx->wqh.lock);
12aceb89
JA
230 if (!ctx->count) {
231 if ((file->f_flags & O_NONBLOCK) ||
232 (iocb->ki_flags & IOCB_NOWAIT)) {
233 spin_unlock_irq(&ctx->wqh.lock);
234 return -EAGAIN;
235 }
e1ad7468 236 __add_wait_queue(&ctx->wqh, &wait);
cb289d62 237 for (;;) {
e1ad7468 238 set_current_state(TASK_INTERRUPTIBLE);
12aceb89 239 if (ctx->count)
e1ad7468 240 break;
e1ad7468 241 if (signal_pending(current)) {
12aceb89
JA
242 __remove_wait_queue(&ctx->wqh, &wait);
243 __set_current_state(TASK_RUNNING);
244 spin_unlock_irq(&ctx->wqh.lock);
245 return -ERESTARTSYS;
e1ad7468 246 }
d48eb233 247 spin_unlock_irq(&ctx->wqh.lock);
e1ad7468 248 schedule();
d48eb233 249 spin_lock_irq(&ctx->wqh.lock);
e1ad7468
DL
250 }
251 __remove_wait_queue(&ctx->wqh, &wait);
252 __set_current_state(TASK_RUNNING);
253 }
12aceb89
JA
254 eventfd_ctx_do_read(ctx, &ucnt);
255 if (waitqueue_active(&ctx->wqh))
256 wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
d48eb233 257 spin_unlock_irq(&ctx->wqh.lock);
12aceb89 258 if (unlikely(copy_to_iter(&ucnt, sizeof(ucnt), to) != sizeof(ucnt)))
b6364572 259 return -EFAULT;
cb289d62 260
12aceb89 261 return sizeof(ucnt);
cb289d62 262}
e1ad7468
DL
263
264static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count,
265 loff_t *ppos)
266{
267 struct eventfd_ctx *ctx = file->private_data;
268 ssize_t res;
269 __u64 ucnt;
270 DECLARE_WAITQUEUE(wait, current);
271
272 if (count < sizeof(ucnt))
273 return -EINVAL;
274 if (copy_from_user(&ucnt, buf, sizeof(ucnt)))
275 return -EFAULT;
276 if (ucnt == ULLONG_MAX)
277 return -EINVAL;
d48eb233 278 spin_lock_irq(&ctx->wqh.lock);
e1ad7468
DL
279 res = -EAGAIN;
280 if (ULLONG_MAX - ctx->count > ucnt)
281 res = sizeof(ucnt);
282 else if (!(file->f_flags & O_NONBLOCK)) {
283 __add_wait_queue(&ctx->wqh, &wait);
284 for (res = 0;;) {
285 set_current_state(TASK_INTERRUPTIBLE);
286 if (ULLONG_MAX - ctx->count > ucnt) {
287 res = sizeof(ucnt);
288 break;
289 }
290 if (signal_pending(current)) {
291 res = -ERESTARTSYS;
292 break;
293 }
d48eb233 294 spin_unlock_irq(&ctx->wqh.lock);
e1ad7468 295 schedule();
d48eb233 296 spin_lock_irq(&ctx->wqh.lock);
e1ad7468
DL
297 }
298 __remove_wait_queue(&ctx->wqh, &wait);
299 __set_current_state(TASK_RUNNING);
300 }
bcd0b235 301 if (likely(res > 0)) {
e1ad7468
DL
302 ctx->count += ucnt;
303 if (waitqueue_active(&ctx->wqh))
a9a08845 304 wake_up_locked_poll(&ctx->wqh, EPOLLIN);
e1ad7468 305 }
d48eb233 306 spin_unlock_irq(&ctx->wqh.lock);
e1ad7468
DL
307
308 return res;
309}
310
cbac5542 311#ifdef CONFIG_PROC_FS
a3816ab0 312static void eventfd_show_fdinfo(struct seq_file *m, struct file *f)
cbac5542
CG
313{
314 struct eventfd_ctx *ctx = f->private_data;
cbac5542
CG
315
316 spin_lock_irq(&ctx->wqh.lock);
a3816ab0
JP
317 seq_printf(m, "eventfd-count: %16llx\n",
318 (unsigned long long)ctx->count);
cbac5542 319 spin_unlock_irq(&ctx->wqh.lock);
b556db17 320 seq_printf(m, "eventfd-id: %d\n", ctx->id);
cbac5542
CG
321}
322#endif
323
e1ad7468 324static const struct file_operations eventfd_fops = {
cbac5542
CG
325#ifdef CONFIG_PROC_FS
326 .show_fdinfo = eventfd_show_fdinfo,
327#endif
e1ad7468 328 .release = eventfd_release,
a11e1d43 329 .poll = eventfd_poll,
12aceb89 330 .read_iter = eventfd_read,
e1ad7468 331 .write = eventfd_write,
6038f373 332 .llseek = noop_llseek,
e1ad7468
DL
333};
334
13389010
DL
335/**
336 * eventfd_fget - Acquire a reference of an eventfd file descriptor.
337 * @fd: [in] Eventfd file descriptor.
338 *
339 * Returns a pointer to the eventfd file structure in case of success, or the
340 * following error pointer:
341 *
342 * -EBADF : Invalid @fd file descriptor.
343 * -EINVAL : The @fd file descriptor is not an eventfd file.
344 */
e1ad7468
DL
345struct file *eventfd_fget(int fd)
346{
347 struct file *file;
348
349 file = fget(fd);
350 if (!file)
351 return ERR_PTR(-EBADF);
352 if (file->f_op != &eventfd_fops) {
353 fput(file);
354 return ERR_PTR(-EINVAL);
355 }
356
357 return file;
358}
5718607b 359EXPORT_SYMBOL_GPL(eventfd_fget);
e1ad7468 360
13389010
DL
361/**
362 * eventfd_ctx_fdget - Acquires a reference to the internal eventfd context.
363 * @fd: [in] Eventfd file descriptor.
364 *
365 * Returns a pointer to the internal eventfd context, otherwise the error
366 * pointers returned by the following functions:
367 *
368 * eventfd_fget
369 */
370struct eventfd_ctx *eventfd_ctx_fdget(int fd)
371{
13389010 372 struct eventfd_ctx *ctx;
36a74117
AV
373 struct fd f = fdget(fd);
374 if (!f.file)
375 return ERR_PTR(-EBADF);
376 ctx = eventfd_ctx_fileget(f.file);
377 fdput(f);
13389010
DL
378 return ctx;
379}
380EXPORT_SYMBOL_GPL(eventfd_ctx_fdget);
381
382/**
383 * eventfd_ctx_fileget - Acquires a reference to the internal eventfd context.
384 * @file: [in] Eventfd file pointer.
385 *
386 * Returns a pointer to the internal eventfd context, otherwise the error
387 * pointer:
388 *
389 * -EINVAL : The @fd file descriptor is not an eventfd file.
390 */
391struct eventfd_ctx *eventfd_ctx_fileget(struct file *file)
392{
105f2b70
EB
393 struct eventfd_ctx *ctx;
394
13389010
DL
395 if (file->f_op != &eventfd_fops)
396 return ERR_PTR(-EINVAL);
397
105f2b70
EB
398 ctx = file->private_data;
399 kref_get(&ctx->kref);
400 return ctx;
13389010
DL
401}
402EXPORT_SYMBOL_GPL(eventfd_ctx_fileget);
403
2fc96f83 404static int do_eventfd(unsigned int count, int flags)
e1ad7468 405{
e1ad7468 406 struct eventfd_ctx *ctx;
12aceb89 407 struct file *file;
7d815165 408 int fd;
e1ad7468 409
e38b36f3
UD
410 /* Check the EFD_* constants for consistency. */
411 BUILD_BUG_ON(EFD_CLOEXEC != O_CLOEXEC);
412 BUILD_BUG_ON(EFD_NONBLOCK != O_NONBLOCK);
413
bcd0b235 414 if (flags & ~EFD_FLAGS_SET)
7d815165 415 return -EINVAL;
b087498e 416
e1ad7468
DL
417 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
418 if (!ctx)
7d815165 419 return -ENOMEM;
e1ad7468 420
13389010 421 kref_init(&ctx->kref);
e1ad7468 422 init_waitqueue_head(&ctx->wqh);
e1ad7468 423 ctx->count = count;
bcd0b235 424 ctx->flags = flags;
b556db17 425 ctx->id = ida_simple_get(&eventfd_ida, 0, 0, GFP_KERNEL);
e1ad7468 426
12aceb89
JA
427 flags &= EFD_SHARED_FCNTL_FLAGS;
428 flags |= O_RDWR;
429 fd = get_unused_fd_flags(flags);
7d815165 430 if (fd < 0)
12aceb89
JA
431 goto err;
432
433 file = anon_inode_getfile("[eventfd]", &eventfd_fops, ctx, flags);
434 if (IS_ERR(file)) {
435 put_unused_fd(fd);
436 fd = PTR_ERR(file);
437 goto err;
438 }
562787a5 439
12aceb89
JA
440 file->f_mode |= FMODE_NOWAIT;
441 fd_install(fd, file);
442 return fd;
443err:
444 eventfd_free_ctx(ctx);
2030a42c 445 return fd;
e1ad7468
DL
446}
447
2fc96f83
DB
448SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
449{
450 return do_eventfd(count, flags);
451}
452
d4e82042 453SYSCALL_DEFINE1(eventfd, unsigned int, count)
b087498e 454{
2fc96f83 455 return do_eventfd(count, 0);
b087498e 456}
bcd0b235 457