]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - kernel/watch_queue.c
KVM: x86/speculation: Disable Fill buffer clear within guests
[mirror_ubuntu-jammy-kernel.git] / kernel / watch_queue.c
CommitLineData
c73be61c
DH
1// SPDX-License-Identifier: GPL-2.0
2/* Watch queue and general notification mechanism, built on pipes
3 *
4 * Copyright (C) 2020 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 *
7 * See Documentation/watch_queue.rst
8 */
9
10#define pr_fmt(fmt) "watchq: " fmt
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/sched.h>
14#include <linux/slab.h>
15#include <linux/printk.h>
16#include <linux/miscdevice.h>
17#include <linux/fs.h>
18#include <linux/mm.h>
19#include <linux/pagemap.h>
20#include <linux/poll.h>
21#include <linux/uaccess.h>
22#include <linux/vmalloc.h>
23#include <linux/file.h>
24#include <linux/security.h>
25#include <linux/cred.h>
26#include <linux/sched/signal.h>
27#include <linux/watch_queue.h>
28#include <linux/pipe_fs_i.h>
29
30MODULE_DESCRIPTION("Watch queue");
31MODULE_AUTHOR("Red Hat, Inc.");
32MODULE_LICENSE("GPL");
33
34#define WATCH_QUEUE_NOTE_SIZE 128
35#define WATCH_QUEUE_NOTES_PER_PAGE (PAGE_SIZE / WATCH_QUEUE_NOTE_SIZE)
36
37static void watch_queue_pipe_buf_release(struct pipe_inode_info *pipe,
38 struct pipe_buffer *buf)
39{
40 struct watch_queue *wqueue = (struct watch_queue *)buf->private;
41 struct page *page;
42 unsigned int bit;
43
44 /* We need to work out which note within the page this refers to, but
45 * the note might have been maximum size, so merely ANDing the offset
46 * off doesn't work. OTOH, the note must've been more than zero size.
47 */
48 bit = buf->offset + buf->len;
49 if ((bit & (WATCH_QUEUE_NOTE_SIZE - 1)) == 0)
50 bit -= WATCH_QUEUE_NOTE_SIZE;
51 bit /= WATCH_QUEUE_NOTE_SIZE;
52
53 page = buf->page;
54 bit += page->index;
55
56 set_bit(bit, wqueue->notes_bitmap);
3ba28c9f 57 generic_pipe_buf_release(pipe, buf);
c73be61c
DH
58}
59
6c329784
LT
60// No try_steal function => no stealing
61#define watch_queue_pipe_buf_try_steal NULL
c73be61c
DH
62
63/* New data written to a pipe may be appended to a buffer with this type. */
64static const struct pipe_buf_operations watch_queue_pipe_buf_ops = {
c73be61c 65 .release = watch_queue_pipe_buf_release,
6c329784 66 .try_steal = watch_queue_pipe_buf_try_steal,
c73be61c
DH
67 .get = generic_pipe_buf_get,
68};
69
70/*
71 * Post a notification to a watch queue.
72 */
73static bool post_one_notification(struct watch_queue *wqueue,
74 struct watch_notification *n)
75{
76 void *p;
77 struct pipe_inode_info *pipe = wqueue->pipe;
78 struct pipe_buffer *buf;
79 struct page *page;
80 unsigned int head, tail, mask, note, offset, len;
81 bool done = false;
82
83 if (!pipe)
84 return false;
85
86 spin_lock_irq(&pipe->rd_wait.lock);
87
88 if (wqueue->defunct)
89 goto out;
90
91 mask = pipe->ring_size - 1;
92 head = pipe->head;
93 tail = pipe->tail;
94 if (pipe_full(head, tail, pipe->ring_size))
95 goto lost;
96
97 note = find_first_bit(wqueue->notes_bitmap, wqueue->nr_notes);
98 if (note >= wqueue->nr_notes)
99 goto lost;
100
101 page = wqueue->notes[note / WATCH_QUEUE_NOTES_PER_PAGE];
102 offset = note % WATCH_QUEUE_NOTES_PER_PAGE * WATCH_QUEUE_NOTE_SIZE;
103 get_page(page);
104 len = n->info & WATCH_INFO_LENGTH;
105 p = kmap_atomic(page);
106 memcpy(p + offset, n, len);
107 kunmap_atomic(p);
108
109 buf = &pipe->bufs[head & mask];
110 buf->page = page;
111 buf->private = (unsigned long)wqueue;
112 buf->ops = &watch_queue_pipe_buf_ops;
113 buf->offset = offset;
114 buf->len = len;
8cfba763 115 buf->flags = PIPE_BUF_FLAG_WHOLE;
d3fc643a 116 smp_store_release(&pipe->head, head + 1); /* vs pipe_read() */
c73be61c
DH
117
118 if (!test_and_clear_bit(note, wqueue->notes_bitmap)) {
119 spin_unlock_irq(&pipe->rd_wait.lock);
120 BUG();
121 }
122 wake_up_interruptible_sync_poll_locked(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
123 done = true;
124
125out:
126 spin_unlock_irq(&pipe->rd_wait.lock);
127 if (done)
128 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
129 return done;
130
131lost:
e7d553d6
DH
132 buf = &pipe->bufs[(head - 1) & mask];
133 buf->flags |= PIPE_BUF_FLAG_LOSS;
c73be61c
DH
134 goto out;
135}
136
137/*
138 * Apply filter rules to a notification.
139 */
140static bool filter_watch_notification(const struct watch_filter *wf,
141 const struct watch_notification *n)
142{
143 const struct watch_type_filter *wt;
144 unsigned int st_bits = sizeof(wt->subtype_filter[0]) * 8;
145 unsigned int st_index = n->subtype / st_bits;
146 unsigned int st_bit = 1U << (n->subtype % st_bits);
147 int i;
148
149 if (!test_bit(n->type, wf->type_filter))
150 return false;
151
152 for (i = 0; i < wf->nr_filters; i++) {
153 wt = &wf->filters[i];
154 if (n->type == wt->type &&
155 (wt->subtype_filter[st_index] & st_bit) &&
156 (n->info & wt->info_mask) == wt->info_filter)
157 return true;
158 }
159
160 return false; /* If there is a filter, the default is to reject. */
161}
162
163/**
164 * __post_watch_notification - Post an event notification
165 * @wlist: The watch list to post the event to.
166 * @n: The notification record to post.
167 * @cred: The creds of the process that triggered the notification.
168 * @id: The ID to match on the watch.
169 *
170 * Post a notification of an event into a set of watch queues and let the users
171 * know.
172 *
173 * The size of the notification should be set in n->info & WATCH_INFO_LENGTH and
174 * should be in units of sizeof(*n).
175 */
176void __post_watch_notification(struct watch_list *wlist,
177 struct watch_notification *n,
178 const struct cred *cred,
179 u64 id)
180{
181 const struct watch_filter *wf;
182 struct watch_queue *wqueue;
183 struct watch *watch;
184
185 if (((n->info & WATCH_INFO_LENGTH) >> WATCH_INFO_LENGTH__SHIFT) == 0) {
186 WARN_ON(1);
187 return;
188 }
189
190 rcu_read_lock();
191
192 hlist_for_each_entry_rcu(watch, &wlist->watchers, list_node) {
193 if (watch->id != id)
194 continue;
195 n->info &= ~WATCH_INFO_ID;
196 n->info |= watch->info_id;
197
198 wqueue = rcu_dereference(watch->queue);
199 wf = rcu_dereference(wqueue->filter);
200 if (wf && !filter_watch_notification(wf, n))
201 continue;
202
203 if (security_post_notification(watch->cred, cred, n) < 0)
204 continue;
205
206 post_one_notification(wqueue, n);
207 }
208
209 rcu_read_unlock();
210}
211EXPORT_SYMBOL(__post_watch_notification);
212
213/*
214 * Allocate sufficient pages to preallocation for the requested number of
215 * notifications.
216 */
217long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes)
218{
219 struct watch_queue *wqueue = pipe->watch_queue;
220 struct page **pages;
221 unsigned long *bitmap;
222 unsigned long user_bufs;
223 unsigned int bmsize;
224 int ret, i, nr_pages;
225
226 if (!wqueue)
227 return -ENODEV;
228 if (wqueue->notes)
229 return -EBUSY;
230
231 if (nr_notes < 1 ||
232 nr_notes > 512) /* TODO: choose a better hard limit */
233 return -EINVAL;
234
235 nr_pages = (nr_notes + WATCH_QUEUE_NOTES_PER_PAGE - 1);
236 nr_pages /= WATCH_QUEUE_NOTES_PER_PAGE;
237 user_bufs = account_pipe_buffers(pipe->user, pipe->nr_accounted, nr_pages);
238
239 if (nr_pages > pipe->max_usage &&
240 (too_many_pipe_buffers_hard(user_bufs) ||
241 too_many_pipe_buffers_soft(user_bufs)) &&
242 pipe_is_unprivileged_user()) {
243 ret = -EPERM;
244 goto error;
245 }
246
04b01b31 247 nr_notes = nr_pages * WATCH_QUEUE_NOTES_PER_PAGE;
ba281794 248 ret = pipe_resize_ring(pipe, roundup_pow_of_two(nr_notes));
c73be61c
DH
249 if (ret < 0)
250 goto error;
251
252 pages = kcalloc(sizeof(struct page *), nr_pages, GFP_KERNEL);
253 if (!pages)
254 goto error;
255
256 for (i = 0; i < nr_pages; i++) {
257 pages[i] = alloc_page(GFP_KERNEL);
258 if (!pages[i])
259 goto error_p;
260 pages[i]->index = i * WATCH_QUEUE_NOTES_PER_PAGE;
261 }
262
263 bmsize = (nr_notes + BITS_PER_LONG - 1) / BITS_PER_LONG;
264 bmsize *= sizeof(unsigned long);
265 bitmap = kmalloc(bmsize, GFP_KERNEL);
266 if (!bitmap)
267 goto error_p;
268
269 memset(bitmap, 0xff, bmsize);
270 wqueue->notes = pages;
271 wqueue->notes_bitmap = bitmap;
272 wqueue->nr_pages = nr_pages;
04b01b31 273 wqueue->nr_notes = nr_notes;
c73be61c
DH
274 return 0;
275
276error_p:
9b3ae8cb 277 while (--i >= 0)
c73be61c
DH
278 __free_page(pages[i]);
279 kfree(pages);
280error:
281 (void) account_pipe_buffers(pipe->user, nr_pages, pipe->nr_accounted);
282 return ret;
283}
284
285/*
286 * Set the filter on a watch queue.
287 */
288long watch_queue_set_filter(struct pipe_inode_info *pipe,
289 struct watch_notification_filter __user *_filter)
290{
291 struct watch_notification_type_filter *tf;
292 struct watch_notification_filter filter;
293 struct watch_type_filter *q;
294 struct watch_filter *wfilter;
295 struct watch_queue *wqueue = pipe->watch_queue;
296 int ret, nr_filter = 0, i;
297
298 if (!wqueue)
299 return -ENODEV;
300
301 if (!_filter) {
302 /* Remove the old filter */
303 wfilter = NULL;
304 goto set;
305 }
306
307 /* Grab the user's filter specification */
308 if (copy_from_user(&filter, _filter, sizeof(filter)) != 0)
309 return -EFAULT;
310 if (filter.nr_filters == 0 ||
311 filter.nr_filters > 16 ||
312 filter.__reserved != 0)
313 return -EINVAL;
314
315 tf = memdup_user(_filter->filters, filter.nr_filters * sizeof(*tf));
316 if (IS_ERR(tf))
317 return PTR_ERR(tf);
318
319 ret = -EINVAL;
320 for (i = 0; i < filter.nr_filters; i++) {
321 if ((tf[i].info_filter & ~tf[i].info_mask) ||
322 tf[i].info_mask & WATCH_INFO_LENGTH)
323 goto err_filter;
324 /* Ignore any unknown types */
22d7d790 325 if (tf[i].type >= WATCH_TYPE__NR)
c73be61c
DH
326 continue;
327 nr_filter++;
328 }
329
330 /* Now we need to build the internal filter from only the relevant
331 * user-specified filters.
332 */
333 ret = -ENOMEM;
334 wfilter = kzalloc(struct_size(wfilter, filters, nr_filter), GFP_KERNEL);
335 if (!wfilter)
336 goto err_filter;
337 wfilter->nr_filters = nr_filter;
338
339 q = wfilter->filters;
340 for (i = 0; i < filter.nr_filters; i++) {
22d7d790 341 if (tf[i].type >= WATCH_TYPE__NR)
c73be61c
DH
342 continue;
343
344 q->type = tf[i].type;
345 q->info_filter = tf[i].info_filter;
346 q->info_mask = tf[i].info_mask;
347 q->subtype_filter[0] = tf[i].subtype_filter[0];
348 __set_bit(q->type, wfilter->type_filter);
349 q++;
350 }
351
352 kfree(tf);
353set:
354 pipe_lock(pipe);
355 wfilter = rcu_replace_pointer(wqueue->filter, wfilter,
356 lockdep_is_held(&pipe->mutex));
357 pipe_unlock(pipe);
358 if (wfilter)
359 kfree_rcu(wfilter, rcu);
360 return 0;
361
362err_filter:
363 kfree(tf);
364 return ret;
365}
366
367static void __put_watch_queue(struct kref *kref)
368{
369 struct watch_queue *wqueue =
370 container_of(kref, struct watch_queue, usage);
371 struct watch_filter *wfilter;
372 int i;
373
374 for (i = 0; i < wqueue->nr_pages; i++)
375 __free_page(wqueue->notes[i]);
a3d9b2a8 376 kfree(wqueue->notes);
30fa28f9 377 bitmap_free(wqueue->notes_bitmap);
c73be61c
DH
378
379 wfilter = rcu_access_pointer(wqueue->filter);
380 if (wfilter)
381 kfree_rcu(wfilter, rcu);
382 kfree_rcu(wqueue, rcu);
383}
384
385/**
386 * put_watch_queue - Dispose of a ref on a watchqueue.
387 * @wqueue: The watch queue to unref.
388 */
389void put_watch_queue(struct watch_queue *wqueue)
390{
391 kref_put(&wqueue->usage, __put_watch_queue);
392}
393EXPORT_SYMBOL(put_watch_queue);
394
395static void free_watch(struct rcu_head *rcu)
396{
397 struct watch *watch = container_of(rcu, struct watch, rcu);
398
399 put_watch_queue(rcu_access_pointer(watch->queue));
29e44f45 400 atomic_dec(&watch->cred->user->nr_watches);
c73be61c 401 put_cred(watch->cred);
281ad3b7 402 kfree(watch);
c73be61c
DH
403}
404
405static void __put_watch(struct kref *kref)
406{
407 struct watch *watch = container_of(kref, struct watch, usage);
408
409 call_rcu(&watch->rcu, free_watch);
410}
411
412/*
413 * Discard a watch.
414 */
415static void put_watch(struct watch *watch)
416{
417 kref_put(&watch->usage, __put_watch);
418}
419
420/**
8f0bfc25 421 * init_watch - Initialise a watch
c73be61c
DH
422 * @watch: The watch to initialise.
423 * @wqueue: The queue to assign.
424 *
425 * Initialise a watch and set the watch queue.
426 */
427void init_watch(struct watch *watch, struct watch_queue *wqueue)
428{
429 kref_init(&watch->usage);
430 INIT_HLIST_NODE(&watch->list_node);
431 INIT_HLIST_NODE(&watch->queue_node);
432 rcu_assign_pointer(watch->queue, wqueue);
433}
434
435/**
436 * add_watch_to_object - Add a watch on an object to a watch list
437 * @watch: The watch to add
438 * @wlist: The watch list to add to
439 *
440 * @watch->queue must have been set to point to the queue to post notifications
441 * to and the watch list of the object to be watched. @watch->cred must also
442 * have been set to the appropriate credentials and a ref taken on them.
443 *
444 * The caller must pin the queue and the list both and must hold the list
445 * locked against racing watch additions/removals.
446 */
447int add_watch_to_object(struct watch *watch, struct watch_list *wlist)
448{
449 struct watch_queue *wqueue = rcu_access_pointer(watch->queue);
450 struct watch *w;
451
452 hlist_for_each_entry(w, &wlist->watchers, list_node) {
453 struct watch_queue *wq = rcu_access_pointer(w->queue);
454 if (wqueue == wq && watch->id == w->id)
455 return -EBUSY;
456 }
457
458 watch->cred = get_current_cred();
459 rcu_assign_pointer(watch->watch_list, wlist);
460
29e44f45
DH
461 if (atomic_inc_return(&watch->cred->user->nr_watches) >
462 task_rlimit(current, RLIMIT_NOFILE)) {
463 atomic_dec(&watch->cred->user->nr_watches);
464 put_cred(watch->cred);
465 return -EAGAIN;
466 }
467
c73be61c
DH
468 spin_lock_bh(&wqueue->lock);
469 kref_get(&wqueue->usage);
470 kref_get(&watch->usage);
471 hlist_add_head(&watch->queue_node, &wqueue->watches);
472 spin_unlock_bh(&wqueue->lock);
473
474 hlist_add_head(&watch->list_node, &wlist->watchers);
475 return 0;
476}
477EXPORT_SYMBOL(add_watch_to_object);
478
479/**
480 * remove_watch_from_object - Remove a watch or all watches from an object.
481 * @wlist: The watch list to remove from
482 * @wq: The watch queue of interest (ignored if @all is true)
483 * @id: The ID of the watch to remove (ignored if @all is true)
484 * @all: True to remove all objects
485 *
486 * Remove a specific watch or all watches from an object. A notification is
487 * sent to the watcher to tell them that this happened.
488 */
489int remove_watch_from_object(struct watch_list *wlist, struct watch_queue *wq,
490 u64 id, bool all)
491{
492 struct watch_notification_removal n;
493 struct watch_queue *wqueue;
494 struct watch *watch;
495 int ret = -EBADSLT;
496
497 rcu_read_lock();
498
499again:
500 spin_lock(&wlist->lock);
501 hlist_for_each_entry(watch, &wlist->watchers, list_node) {
502 if (all ||
503 (watch->id == id && rcu_access_pointer(watch->queue) == wq))
504 goto found;
505 }
506 spin_unlock(&wlist->lock);
507 goto out;
508
509found:
510 ret = 0;
511 hlist_del_init_rcu(&watch->list_node);
512 rcu_assign_pointer(watch->watch_list, NULL);
513 spin_unlock(&wlist->lock);
514
515 /* We now own the reference on watch that used to belong to wlist. */
516
517 n.watch.type = WATCH_TYPE_META;
518 n.watch.subtype = WATCH_META_REMOVAL_NOTIFICATION;
519 n.watch.info = watch->info_id | watch_sizeof(n.watch);
520 n.id = id;
521 if (id != 0)
522 n.watch.info = watch->info_id | watch_sizeof(n);
523
524 wqueue = rcu_dereference(watch->queue);
525
526 /* We don't need the watch list lock for the next bit as RCU is
527 * protecting *wqueue from deallocation.
528 */
529 if (wqueue) {
530 post_one_notification(wqueue, &n.watch);
531
532 spin_lock_bh(&wqueue->lock);
533
534 if (!hlist_unhashed(&watch->queue_node)) {
535 hlist_del_init_rcu(&watch->queue_node);
536 put_watch(watch);
537 }
538
539 spin_unlock_bh(&wqueue->lock);
540 }
541
542 if (wlist->release_watch) {
543 void (*release_watch)(struct watch *);
544
545 release_watch = wlist->release_watch;
546 rcu_read_unlock();
547 (*release_watch)(watch);
548 rcu_read_lock();
549 }
550 put_watch(watch);
551
552 if (all && !hlist_empty(&wlist->watchers))
553 goto again;
554out:
555 rcu_read_unlock();
556 return ret;
557}
558EXPORT_SYMBOL(remove_watch_from_object);
559
560/*
561 * Remove all the watches that are contributory to a queue. This has the
562 * potential to race with removal of the watches by the destruction of the
563 * objects being watched or with the distribution of notifications.
564 */
565void watch_queue_clear(struct watch_queue *wqueue)
566{
567 struct watch_list *wlist;
568 struct watch *watch;
569 bool release;
570
571 rcu_read_lock();
572 spin_lock_bh(&wqueue->lock);
573
b34b949a 574 /* Prevent new notifications from being stored. */
c73be61c
DH
575 wqueue->defunct = true;
576
577 while (!hlist_empty(&wqueue->watches)) {
578 watch = hlist_entry(wqueue->watches.first, struct watch, queue_node);
579 hlist_del_init_rcu(&watch->queue_node);
580 /* We now own a ref on the watch. */
581 spin_unlock_bh(&wqueue->lock);
582
583 /* We can't do the next bit under the queue lock as we need to
584 * get the list lock - which would cause a deadlock if someone
585 * was removing from the opposite direction at the same time or
586 * posting a notification.
587 */
588 wlist = rcu_dereference(watch->watch_list);
589 if (wlist) {
590 void (*release_watch)(struct watch *);
591
592 spin_lock(&wlist->lock);
593
594 release = !hlist_unhashed(&watch->list_node);
595 if (release) {
596 hlist_del_init_rcu(&watch->list_node);
597 rcu_assign_pointer(watch->watch_list, NULL);
598
599 /* We now own a second ref on the watch. */
600 }
601
602 release_watch = wlist->release_watch;
603 spin_unlock(&wlist->lock);
604
605 if (release) {
606 if (release_watch) {
607 rcu_read_unlock();
608 /* This might need to call dput(), so
609 * we have to drop all the locks.
610 */
611 (*release_watch)(watch);
612 rcu_read_lock();
613 }
614 put_watch(watch);
615 }
616 }
617
618 put_watch(watch);
619 spin_lock_bh(&wqueue->lock);
620 }
621
622 spin_unlock_bh(&wqueue->lock);
623 rcu_read_unlock();
624}
625
626/**
627 * get_watch_queue - Get a watch queue from its file descriptor.
628 * @fd: The fd to query.
629 */
630struct watch_queue *get_watch_queue(int fd)
631{
632 struct pipe_inode_info *pipe;
633 struct watch_queue *wqueue = ERR_PTR(-EINVAL);
634 struct fd f;
635
636 f = fdget(fd);
637 if (f.file) {
638 pipe = get_pipe_info(f.file, false);
639 if (pipe && pipe->watch_queue) {
640 wqueue = pipe->watch_queue;
641 kref_get(&wqueue->usage);
642 }
643 fdput(f);
644 }
645
646 return wqueue;
647}
648EXPORT_SYMBOL(get_watch_queue);
649
650/*
651 * Initialise a watch queue
652 */
653int watch_queue_init(struct pipe_inode_info *pipe)
654{
655 struct watch_queue *wqueue;
656
657 wqueue = kzalloc(sizeof(*wqueue), GFP_KERNEL);
658 if (!wqueue)
659 return -ENOMEM;
660
661 wqueue->pipe = pipe;
662 kref_init(&wqueue->usage);
663 spin_lock_init(&wqueue->lock);
664 INIT_HLIST_HEAD(&wqueue->watches);
665
666 pipe->watch_queue = wqueue;
667 return 0;
668}