]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - ipc/mqueue.c
PCI: hv: Add hv_pci_remove_slots() when we unload the driver
[mirror_ubuntu-hirsute-kernel.git] / ipc / mqueue.c
1 /*
2 * POSIX message queues filesystem for Linux.
3 *
4 * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl)
5 * Michal Wronski (michal.wronski@gmail.com)
6 *
7 * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com)
8 * Lockless receive & send, fd based notify:
9 * Manfred Spraul (manfred@colorfullife.com)
10 *
11 * Audit: George Wilson (ltcgcw@us.ibm.com)
12 *
13 * This file is released under the GPL.
14 */
15
16 #include <linux/capability.h>
17 #include <linux/init.h>
18 #include <linux/pagemap.h>
19 #include <linux/file.h>
20 #include <linux/mount.h>
21 #include <linux/fs_context.h>
22 #include <linux/namei.h>
23 #include <linux/sysctl.h>
24 #include <linux/poll.h>
25 #include <linux/mqueue.h>
26 #include <linux/msg.h>
27 #include <linux/skbuff.h>
28 #include <linux/vmalloc.h>
29 #include <linux/netlink.h>
30 #include <linux/syscalls.h>
31 #include <linux/audit.h>
32 #include <linux/signal.h>
33 #include <linux/mutex.h>
34 #include <linux/nsproxy.h>
35 #include <linux/pid.h>
36 #include <linux/ipc_namespace.h>
37 #include <linux/user_namespace.h>
38 #include <linux/slab.h>
39 #include <linux/sched/wake_q.h>
40 #include <linux/sched/signal.h>
41 #include <linux/sched/user.h>
42
43 #include <net/sock.h>
44 #include "util.h"
45
46 struct mqueue_fs_context {
47 struct ipc_namespace *ipc_ns;
48 };
49
50 #define MQUEUE_MAGIC 0x19800202
51 #define DIRENT_SIZE 20
52 #define FILENT_SIZE 80
53
54 #define SEND 0
55 #define RECV 1
56
57 #define STATE_NONE 0
58 #define STATE_READY 1
59
60 struct posix_msg_tree_node {
61 struct rb_node rb_node;
62 struct list_head msg_list;
63 int priority;
64 };
65
66 struct ext_wait_queue { /* queue of sleeping tasks */
67 struct task_struct *task;
68 struct list_head list;
69 struct msg_msg *msg; /* ptr of loaded message */
70 int state; /* one of STATE_* values */
71 };
72
73 struct mqueue_inode_info {
74 spinlock_t lock;
75 struct inode vfs_inode;
76 wait_queue_head_t wait_q;
77
78 struct rb_root msg_tree;
79 struct posix_msg_tree_node *node_cache;
80 struct mq_attr attr;
81
82 struct sigevent notify;
83 struct pid *notify_owner;
84 struct user_namespace *notify_user_ns;
85 struct user_struct *user; /* user who created, for accounting */
86 struct sock *notify_sock;
87 struct sk_buff *notify_cookie;
88
89 /* for tasks waiting for free space and messages, respectively */
90 struct ext_wait_queue e_wait_q[2];
91
92 unsigned long qsize; /* size of queue in memory (sum of all msgs) */
93 };
94
95 static struct file_system_type mqueue_fs_type;
96 static const struct inode_operations mqueue_dir_inode_operations;
97 static const struct file_operations mqueue_file_operations;
98 static const struct super_operations mqueue_super_ops;
99 static const struct fs_context_operations mqueue_fs_context_ops;
100 static void remove_notification(struct mqueue_inode_info *info);
101
102 static struct kmem_cache *mqueue_inode_cachep;
103
104 static struct ctl_table_header *mq_sysctl_table;
105
106 static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode)
107 {
108 return container_of(inode, struct mqueue_inode_info, vfs_inode);
109 }
110
111 /*
112 * This routine should be called with the mq_lock held.
113 */
114 static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode)
115 {
116 return get_ipc_ns(inode->i_sb->s_fs_info);
117 }
118
119 static struct ipc_namespace *get_ns_from_inode(struct inode *inode)
120 {
121 struct ipc_namespace *ns;
122
123 spin_lock(&mq_lock);
124 ns = __get_ns_from_inode(inode);
125 spin_unlock(&mq_lock);
126 return ns;
127 }
128
129 /* Auxiliary functions to manipulate messages' list */
130 static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
131 {
132 struct rb_node **p, *parent = NULL;
133 struct posix_msg_tree_node *leaf;
134
135 p = &info->msg_tree.rb_node;
136 while (*p) {
137 parent = *p;
138 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
139
140 if (likely(leaf->priority == msg->m_type))
141 goto insert_msg;
142 else if (msg->m_type < leaf->priority)
143 p = &(*p)->rb_left;
144 else
145 p = &(*p)->rb_right;
146 }
147 if (info->node_cache) {
148 leaf = info->node_cache;
149 info->node_cache = NULL;
150 } else {
151 leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC);
152 if (!leaf)
153 return -ENOMEM;
154 INIT_LIST_HEAD(&leaf->msg_list);
155 }
156 leaf->priority = msg->m_type;
157 rb_link_node(&leaf->rb_node, parent, p);
158 rb_insert_color(&leaf->rb_node, &info->msg_tree);
159 insert_msg:
160 info->attr.mq_curmsgs++;
161 info->qsize += msg->m_ts;
162 list_add_tail(&msg->m_list, &leaf->msg_list);
163 return 0;
164 }
165
166 static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
167 {
168 struct rb_node **p, *parent = NULL;
169 struct posix_msg_tree_node *leaf;
170 struct msg_msg *msg;
171
172 try_again:
173 p = &info->msg_tree.rb_node;
174 while (*p) {
175 parent = *p;
176 /*
177 * During insert, low priorities go to the left and high to the
178 * right. On receive, we want the highest priorities first, so
179 * walk all the way to the right.
180 */
181 p = &(*p)->rb_right;
182 }
183 if (!parent) {
184 if (info->attr.mq_curmsgs) {
185 pr_warn_once("Inconsistency in POSIX message queue, "
186 "no tree element, but supposedly messages "
187 "should exist!\n");
188 info->attr.mq_curmsgs = 0;
189 }
190 return NULL;
191 }
192 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
193 if (unlikely(list_empty(&leaf->msg_list))) {
194 pr_warn_once("Inconsistency in POSIX message queue, "
195 "empty leaf node but we haven't implemented "
196 "lazy leaf delete!\n");
197 rb_erase(&leaf->rb_node, &info->msg_tree);
198 if (info->node_cache) {
199 kfree(leaf);
200 } else {
201 info->node_cache = leaf;
202 }
203 goto try_again;
204 } else {
205 msg = list_first_entry(&leaf->msg_list,
206 struct msg_msg, m_list);
207 list_del(&msg->m_list);
208 if (list_empty(&leaf->msg_list)) {
209 rb_erase(&leaf->rb_node, &info->msg_tree);
210 if (info->node_cache) {
211 kfree(leaf);
212 } else {
213 info->node_cache = leaf;
214 }
215 }
216 }
217 info->attr.mq_curmsgs--;
218 info->qsize -= msg->m_ts;
219 return msg;
220 }
221
222 static struct inode *mqueue_get_inode(struct super_block *sb,
223 struct ipc_namespace *ipc_ns, umode_t mode,
224 struct mq_attr *attr)
225 {
226 struct user_struct *u = current_user();
227 struct inode *inode;
228 int ret = -ENOMEM;
229
230 inode = new_inode(sb);
231 if (!inode)
232 goto err;
233
234 inode->i_ino = get_next_ino();
235 inode->i_mode = mode;
236 inode->i_uid = current_fsuid();
237 inode->i_gid = current_fsgid();
238 inode->i_mtime = inode->i_ctime = inode->i_atime = current_time(inode);
239
240 if (S_ISREG(mode)) {
241 struct mqueue_inode_info *info;
242 unsigned long mq_bytes, mq_treesize;
243
244 inode->i_fop = &mqueue_file_operations;
245 inode->i_size = FILENT_SIZE;
246 /* mqueue specific info */
247 info = MQUEUE_I(inode);
248 spin_lock_init(&info->lock);
249 init_waitqueue_head(&info->wait_q);
250 INIT_LIST_HEAD(&info->e_wait_q[0].list);
251 INIT_LIST_HEAD(&info->e_wait_q[1].list);
252 info->notify_owner = NULL;
253 info->notify_user_ns = NULL;
254 info->qsize = 0;
255 info->user = NULL; /* set when all is ok */
256 info->msg_tree = RB_ROOT;
257 info->node_cache = NULL;
258 memset(&info->attr, 0, sizeof(info->attr));
259 info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
260 ipc_ns->mq_msg_default);
261 info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
262 ipc_ns->mq_msgsize_default);
263 if (attr) {
264 info->attr.mq_maxmsg = attr->mq_maxmsg;
265 info->attr.mq_msgsize = attr->mq_msgsize;
266 }
267 /*
268 * We used to allocate a static array of pointers and account
269 * the size of that array as well as one msg_msg struct per
270 * possible message into the queue size. That's no longer
271 * accurate as the queue is now an rbtree and will grow and
272 * shrink depending on usage patterns. We can, however, still
273 * account one msg_msg struct per message, but the nodes are
274 * allocated depending on priority usage, and most programs
275 * only use one, or a handful, of priorities. However, since
276 * this is pinned memory, we need to assume worst case, so
277 * that means the min(mq_maxmsg, max_priorities) * struct
278 * posix_msg_tree_node.
279 */
280
281 ret = -EINVAL;
282 if (info->attr.mq_maxmsg <= 0 || info->attr.mq_msgsize <= 0)
283 goto out_inode;
284 if (capable(CAP_SYS_RESOURCE)) {
285 if (info->attr.mq_maxmsg > HARD_MSGMAX ||
286 info->attr.mq_msgsize > HARD_MSGSIZEMAX)
287 goto out_inode;
288 } else {
289 if (info->attr.mq_maxmsg > ipc_ns->mq_msg_max ||
290 info->attr.mq_msgsize > ipc_ns->mq_msgsize_max)
291 goto out_inode;
292 }
293 ret = -EOVERFLOW;
294 /* check for overflow */
295 if (info->attr.mq_msgsize > ULONG_MAX/info->attr.mq_maxmsg)
296 goto out_inode;
297 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
298 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
299 sizeof(struct posix_msg_tree_node);
300 mq_bytes = info->attr.mq_maxmsg * info->attr.mq_msgsize;
301 if (mq_bytes + mq_treesize < mq_bytes)
302 goto out_inode;
303 mq_bytes += mq_treesize;
304 spin_lock(&mq_lock);
305 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
306 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
307 spin_unlock(&mq_lock);
308 /* mqueue_evict_inode() releases info->messages */
309 ret = -EMFILE;
310 goto out_inode;
311 }
312 u->mq_bytes += mq_bytes;
313 spin_unlock(&mq_lock);
314
315 /* all is ok */
316 info->user = get_uid(u);
317 } else if (S_ISDIR(mode)) {
318 inc_nlink(inode);
319 /* Some things misbehave if size == 0 on a directory */
320 inode->i_size = 2 * DIRENT_SIZE;
321 inode->i_op = &mqueue_dir_inode_operations;
322 inode->i_fop = &simple_dir_operations;
323 }
324
325 return inode;
326 out_inode:
327 iput(inode);
328 err:
329 return ERR_PTR(ret);
330 }
331
332 static int mqueue_fill_super(struct super_block *sb, struct fs_context *fc)
333 {
334 struct inode *inode;
335 struct ipc_namespace *ns = sb->s_fs_info;
336
337 sb->s_iflags |= SB_I_NOEXEC | SB_I_NODEV;
338 sb->s_blocksize = PAGE_SIZE;
339 sb->s_blocksize_bits = PAGE_SHIFT;
340 sb->s_magic = MQUEUE_MAGIC;
341 sb->s_op = &mqueue_super_ops;
342
343 inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL);
344 if (IS_ERR(inode))
345 return PTR_ERR(inode);
346
347 sb->s_root = d_make_root(inode);
348 if (!sb->s_root)
349 return -ENOMEM;
350 return 0;
351 }
352
353 static int mqueue_get_tree(struct fs_context *fc)
354 {
355 struct mqueue_fs_context *ctx = fc->fs_private;
356
357 put_user_ns(fc->user_ns);
358 fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns);
359 fc->s_fs_info = ctx->ipc_ns;
360 return vfs_get_super(fc, vfs_get_keyed_super, mqueue_fill_super);
361 }
362
363 static void mqueue_fs_context_free(struct fs_context *fc)
364 {
365 struct mqueue_fs_context *ctx = fc->fs_private;
366
367 if (ctx->ipc_ns)
368 put_ipc_ns(ctx->ipc_ns);
369 kfree(ctx);
370 }
371
372 static int mqueue_init_fs_context(struct fs_context *fc)
373 {
374 struct mqueue_fs_context *ctx;
375
376 ctx = kzalloc(sizeof(struct mqueue_fs_context), GFP_KERNEL);
377 if (!ctx)
378 return -ENOMEM;
379
380 ctx->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns);
381 fc->fs_private = ctx;
382 fc->ops = &mqueue_fs_context_ops;
383 return 0;
384 }
385
386 static struct vfsmount *mq_create_mount(struct ipc_namespace *ns)
387 {
388 struct mqueue_fs_context *ctx;
389 struct fs_context *fc;
390 struct vfsmount *mnt;
391
392 fc = fs_context_for_mount(&mqueue_fs_type, SB_KERNMOUNT);
393 if (IS_ERR(fc))
394 return ERR_CAST(fc);
395
396 ctx = fc->fs_private;
397 put_ipc_ns(ctx->ipc_ns);
398 ctx->ipc_ns = get_ipc_ns(ns);
399
400 mnt = fc_mount(fc);
401 put_fs_context(fc);
402 return mnt;
403 }
404
405 static void init_once(void *foo)
406 {
407 struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
408
409 inode_init_once(&p->vfs_inode);
410 }
411
412 static struct inode *mqueue_alloc_inode(struct super_block *sb)
413 {
414 struct mqueue_inode_info *ei;
415
416 ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL);
417 if (!ei)
418 return NULL;
419 return &ei->vfs_inode;
420 }
421
422 static void mqueue_i_callback(struct rcu_head *head)
423 {
424 struct inode *inode = container_of(head, struct inode, i_rcu);
425 kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
426 }
427
428 static void mqueue_destroy_inode(struct inode *inode)
429 {
430 call_rcu(&inode->i_rcu, mqueue_i_callback);
431 }
432
433 static void mqueue_evict_inode(struct inode *inode)
434 {
435 struct mqueue_inode_info *info;
436 struct user_struct *user;
437 unsigned long mq_bytes, mq_treesize;
438 struct ipc_namespace *ipc_ns;
439 struct msg_msg *msg;
440
441 clear_inode(inode);
442
443 if (S_ISDIR(inode->i_mode))
444 return;
445
446 ipc_ns = get_ns_from_inode(inode);
447 info = MQUEUE_I(inode);
448 spin_lock(&info->lock);
449 while ((msg = msg_get(info)) != NULL)
450 free_msg(msg);
451 kfree(info->node_cache);
452 spin_unlock(&info->lock);
453
454 /* Total amount of bytes accounted for the mqueue */
455 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
456 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
457 sizeof(struct posix_msg_tree_node);
458
459 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
460 info->attr.mq_msgsize);
461
462 user = info->user;
463 if (user) {
464 spin_lock(&mq_lock);
465 user->mq_bytes -= mq_bytes;
466 /*
467 * get_ns_from_inode() ensures that the
468 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns
469 * to which we now hold a reference, or it is NULL.
470 * We can't put it here under mq_lock, though.
471 */
472 if (ipc_ns)
473 ipc_ns->mq_queues_count--;
474 spin_unlock(&mq_lock);
475 free_uid(user);
476 }
477 if (ipc_ns)
478 put_ipc_ns(ipc_ns);
479 }
480
481 static int mqueue_create_attr(struct dentry *dentry, umode_t mode, void *arg)
482 {
483 struct inode *dir = dentry->d_parent->d_inode;
484 struct inode *inode;
485 struct mq_attr *attr = arg;
486 int error;
487 struct ipc_namespace *ipc_ns;
488
489 spin_lock(&mq_lock);
490 ipc_ns = __get_ns_from_inode(dir);
491 if (!ipc_ns) {
492 error = -EACCES;
493 goto out_unlock;
494 }
495
496 if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
497 !capable(CAP_SYS_RESOURCE)) {
498 error = -ENOSPC;
499 goto out_unlock;
500 }
501 ipc_ns->mq_queues_count++;
502 spin_unlock(&mq_lock);
503
504 inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr);
505 if (IS_ERR(inode)) {
506 error = PTR_ERR(inode);
507 spin_lock(&mq_lock);
508 ipc_ns->mq_queues_count--;
509 goto out_unlock;
510 }
511
512 put_ipc_ns(ipc_ns);
513 dir->i_size += DIRENT_SIZE;
514 dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
515
516 d_instantiate(dentry, inode);
517 dget(dentry);
518 return 0;
519 out_unlock:
520 spin_unlock(&mq_lock);
521 if (ipc_ns)
522 put_ipc_ns(ipc_ns);
523 return error;
524 }
525
526 static int mqueue_create(struct inode *dir, struct dentry *dentry,
527 umode_t mode, bool excl)
528 {
529 return mqueue_create_attr(dentry, mode, NULL);
530 }
531
532 static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
533 {
534 struct inode *inode = d_inode(dentry);
535
536 dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
537 dir->i_size -= DIRENT_SIZE;
538 drop_nlink(inode);
539 dput(dentry);
540 return 0;
541 }
542
543 /*
544 * This is routine for system read from queue file.
545 * To avoid mess with doing here some sort of mq_receive we allow
546 * to read only queue size & notification info (the only values
547 * that are interesting from user point of view and aren't accessible
548 * through std routines)
549 */
550 static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
551 size_t count, loff_t *off)
552 {
553 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
554 char buffer[FILENT_SIZE];
555 ssize_t ret;
556
557 spin_lock(&info->lock);
558 snprintf(buffer, sizeof(buffer),
559 "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
560 info->qsize,
561 info->notify_owner ? info->notify.sigev_notify : 0,
562 (info->notify_owner &&
563 info->notify.sigev_notify == SIGEV_SIGNAL) ?
564 info->notify.sigev_signo : 0,
565 pid_vnr(info->notify_owner));
566 spin_unlock(&info->lock);
567 buffer[sizeof(buffer)-1] = '\0';
568
569 ret = simple_read_from_buffer(u_data, count, off, buffer,
570 strlen(buffer));
571 if (ret <= 0)
572 return ret;
573
574 file_inode(filp)->i_atime = file_inode(filp)->i_ctime = current_time(file_inode(filp));
575 return ret;
576 }
577
578 static int mqueue_flush_file(struct file *filp, fl_owner_t id)
579 {
580 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
581
582 spin_lock(&info->lock);
583 if (task_tgid(current) == info->notify_owner)
584 remove_notification(info);
585
586 spin_unlock(&info->lock);
587 return 0;
588 }
589
590 static __poll_t mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
591 {
592 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
593 __poll_t retval = 0;
594
595 poll_wait(filp, &info->wait_q, poll_tab);
596
597 spin_lock(&info->lock);
598 if (info->attr.mq_curmsgs)
599 retval = EPOLLIN | EPOLLRDNORM;
600
601 if (info->attr.mq_curmsgs < info->attr.mq_maxmsg)
602 retval |= EPOLLOUT | EPOLLWRNORM;
603 spin_unlock(&info->lock);
604
605 return retval;
606 }
607
608 /* Adds current to info->e_wait_q[sr] before element with smaller prio */
609 static void wq_add(struct mqueue_inode_info *info, int sr,
610 struct ext_wait_queue *ewp)
611 {
612 struct ext_wait_queue *walk;
613
614 ewp->task = current;
615
616 list_for_each_entry(walk, &info->e_wait_q[sr].list, list) {
617 if (walk->task->prio <= current->prio) {
618 list_add_tail(&ewp->list, &walk->list);
619 return;
620 }
621 }
622 list_add_tail(&ewp->list, &info->e_wait_q[sr].list);
623 }
624
625 /*
626 * Puts current task to sleep. Caller must hold queue lock. After return
627 * lock isn't held.
628 * sr: SEND or RECV
629 */
630 static int wq_sleep(struct mqueue_inode_info *info, int sr,
631 ktime_t *timeout, struct ext_wait_queue *ewp)
632 __releases(&info->lock)
633 {
634 int retval;
635 signed long time;
636
637 wq_add(info, sr, ewp);
638
639 for (;;) {
640 __set_current_state(TASK_INTERRUPTIBLE);
641
642 spin_unlock(&info->lock);
643 time = schedule_hrtimeout_range_clock(timeout, 0,
644 HRTIMER_MODE_ABS, CLOCK_REALTIME);
645
646 if (ewp->state == STATE_READY) {
647 retval = 0;
648 goto out;
649 }
650 spin_lock(&info->lock);
651 if (ewp->state == STATE_READY) {
652 retval = 0;
653 goto out_unlock;
654 }
655 if (signal_pending(current)) {
656 retval = -ERESTARTSYS;
657 break;
658 }
659 if (time == 0) {
660 retval = -ETIMEDOUT;
661 break;
662 }
663 }
664 list_del(&ewp->list);
665 out_unlock:
666 spin_unlock(&info->lock);
667 out:
668 return retval;
669 }
670
671 /*
672 * Returns waiting task that should be serviced first or NULL if none exists
673 */
674 static struct ext_wait_queue *wq_get_first_waiter(
675 struct mqueue_inode_info *info, int sr)
676 {
677 struct list_head *ptr;
678
679 ptr = info->e_wait_q[sr].list.prev;
680 if (ptr == &info->e_wait_q[sr].list)
681 return NULL;
682 return list_entry(ptr, struct ext_wait_queue, list);
683 }
684
685
686 static inline void set_cookie(struct sk_buff *skb, char code)
687 {
688 ((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code;
689 }
690
691 /*
692 * The next function is only to split too long sys_mq_timedsend
693 */
694 static void __do_notify(struct mqueue_inode_info *info)
695 {
696 /* notification
697 * invoked when there is registered process and there isn't process
698 * waiting synchronously for message AND state of queue changed from
699 * empty to not empty. Here we are sure that no one is waiting
700 * synchronously. */
701 if (info->notify_owner &&
702 info->attr.mq_curmsgs == 1) {
703 struct kernel_siginfo sig_i;
704 switch (info->notify.sigev_notify) {
705 case SIGEV_NONE:
706 break;
707 case SIGEV_SIGNAL:
708 /* sends signal */
709
710 clear_siginfo(&sig_i);
711 sig_i.si_signo = info->notify.sigev_signo;
712 sig_i.si_errno = 0;
713 sig_i.si_code = SI_MESGQ;
714 sig_i.si_value = info->notify.sigev_value;
715 /* map current pid/uid into info->owner's namespaces */
716 rcu_read_lock();
717 sig_i.si_pid = task_tgid_nr_ns(current,
718 ns_of_pid(info->notify_owner));
719 sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid());
720 rcu_read_unlock();
721
722 kill_pid_info(info->notify.sigev_signo,
723 &sig_i, info->notify_owner);
724 break;
725 case SIGEV_THREAD:
726 set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
727 netlink_sendskb(info->notify_sock, info->notify_cookie);
728 break;
729 }
730 /* after notification unregisters process */
731 put_pid(info->notify_owner);
732 put_user_ns(info->notify_user_ns);
733 info->notify_owner = NULL;
734 info->notify_user_ns = NULL;
735 }
736 wake_up(&info->wait_q);
737 }
738
739 static int prepare_timeout(const struct __kernel_timespec __user *u_abs_timeout,
740 struct timespec64 *ts)
741 {
742 if (get_timespec64(ts, u_abs_timeout))
743 return -EFAULT;
744 if (!timespec64_valid(ts))
745 return -EINVAL;
746 return 0;
747 }
748
749 static void remove_notification(struct mqueue_inode_info *info)
750 {
751 if (info->notify_owner != NULL &&
752 info->notify.sigev_notify == SIGEV_THREAD) {
753 set_cookie(info->notify_cookie, NOTIFY_REMOVED);
754 netlink_sendskb(info->notify_sock, info->notify_cookie);
755 }
756 put_pid(info->notify_owner);
757 put_user_ns(info->notify_user_ns);
758 info->notify_owner = NULL;
759 info->notify_user_ns = NULL;
760 }
761
762 static int prepare_open(struct dentry *dentry, int oflag, int ro,
763 umode_t mode, struct filename *name,
764 struct mq_attr *attr)
765 {
766 static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
767 MAY_READ | MAY_WRITE };
768 int acc;
769
770 if (d_really_is_negative(dentry)) {
771 if (!(oflag & O_CREAT))
772 return -ENOENT;
773 if (ro)
774 return ro;
775 audit_inode_parent_hidden(name, dentry->d_parent);
776 return vfs_mkobj(dentry, mode & ~current_umask(),
777 mqueue_create_attr, attr);
778 }
779 /* it already existed */
780 audit_inode(name, dentry, 0);
781 if ((oflag & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL))
782 return -EEXIST;
783 if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY))
784 return -EINVAL;
785 acc = oflag2acc[oflag & O_ACCMODE];
786 return inode_permission(d_inode(dentry), acc);
787 }
788
789 static int do_mq_open(const char __user *u_name, int oflag, umode_t mode,
790 struct mq_attr *attr)
791 {
792 struct vfsmount *mnt = current->nsproxy->ipc_ns->mq_mnt;
793 struct dentry *root = mnt->mnt_root;
794 struct filename *name;
795 struct path path;
796 int fd, error;
797 int ro;
798
799 audit_mq_open(oflag, mode, attr);
800
801 if (IS_ERR(name = getname(u_name)))
802 return PTR_ERR(name);
803
804 fd = get_unused_fd_flags(O_CLOEXEC);
805 if (fd < 0)
806 goto out_putname;
807
808 ro = mnt_want_write(mnt); /* we'll drop it in any case */
809 inode_lock(d_inode(root));
810 path.dentry = lookup_one_len(name->name, root, strlen(name->name));
811 if (IS_ERR(path.dentry)) {
812 error = PTR_ERR(path.dentry);
813 goto out_putfd;
814 }
815 path.mnt = mntget(mnt);
816 error = prepare_open(path.dentry, oflag, ro, mode, name, attr);
817 if (!error) {
818 struct file *file = dentry_open(&path, oflag, current_cred());
819 if (!IS_ERR(file))
820 fd_install(fd, file);
821 else
822 error = PTR_ERR(file);
823 }
824 path_put(&path);
825 out_putfd:
826 if (error) {
827 put_unused_fd(fd);
828 fd = error;
829 }
830 inode_unlock(d_inode(root));
831 if (!ro)
832 mnt_drop_write(mnt);
833 out_putname:
834 putname(name);
835 return fd;
836 }
837
838 SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
839 struct mq_attr __user *, u_attr)
840 {
841 struct mq_attr attr;
842 if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr)))
843 return -EFAULT;
844
845 return do_mq_open(u_name, oflag, mode, u_attr ? &attr : NULL);
846 }
847
848 SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
849 {
850 int err;
851 struct filename *name;
852 struct dentry *dentry;
853 struct inode *inode = NULL;
854 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
855 struct vfsmount *mnt = ipc_ns->mq_mnt;
856
857 name = getname(u_name);
858 if (IS_ERR(name))
859 return PTR_ERR(name);
860
861 audit_inode_parent_hidden(name, mnt->mnt_root);
862 err = mnt_want_write(mnt);
863 if (err)
864 goto out_name;
865 inode_lock_nested(d_inode(mnt->mnt_root), I_MUTEX_PARENT);
866 dentry = lookup_one_len(name->name, mnt->mnt_root,
867 strlen(name->name));
868 if (IS_ERR(dentry)) {
869 err = PTR_ERR(dentry);
870 goto out_unlock;
871 }
872
873 inode = d_inode(dentry);
874 if (!inode) {
875 err = -ENOENT;
876 } else {
877 ihold(inode);
878 err = vfs_unlink(d_inode(dentry->d_parent), dentry, NULL);
879 }
880 dput(dentry);
881
882 out_unlock:
883 inode_unlock(d_inode(mnt->mnt_root));
884 if (inode)
885 iput(inode);
886 mnt_drop_write(mnt);
887 out_name:
888 putname(name);
889
890 return err;
891 }
892
893 /* Pipelined send and receive functions.
894 *
895 * If a receiver finds no waiting message, then it registers itself in the
896 * list of waiting receivers. A sender checks that list before adding the new
897 * message into the message array. If there is a waiting receiver, then it
898 * bypasses the message array and directly hands the message over to the
899 * receiver. The receiver accepts the message and returns without grabbing the
900 * queue spinlock:
901 *
902 * - Set pointer to message.
903 * - Queue the receiver task for later wakeup (without the info->lock).
904 * - Update its state to STATE_READY. Now the receiver can continue.
905 * - Wake up the process after the lock is dropped. Should the process wake up
906 * before this wakeup (due to a timeout or a signal) it will either see
907 * STATE_READY and continue or acquire the lock to check the state again.
908 *
909 * The same algorithm is used for senders.
910 */
911
912 /* pipelined_send() - send a message directly to the task waiting in
913 * sys_mq_timedreceive() (without inserting message into a queue).
914 */
915 static inline void pipelined_send(struct wake_q_head *wake_q,
916 struct mqueue_inode_info *info,
917 struct msg_msg *message,
918 struct ext_wait_queue *receiver)
919 {
920 receiver->msg = message;
921 list_del(&receiver->list);
922 wake_q_add(wake_q, receiver->task);
923 /*
924 * Rely on the implicit cmpxchg barrier from wake_q_add such
925 * that we can ensure that updating receiver->state is the last
926 * write operation: As once set, the receiver can continue,
927 * and if we don't have the reference count from the wake_q,
928 * yet, at that point we can later have a use-after-free
929 * condition and bogus wakeup.
930 */
931 receiver->state = STATE_READY;
932 }
933
934 /* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
935 * gets its message and put to the queue (we have one free place for sure). */
936 static inline void pipelined_receive(struct wake_q_head *wake_q,
937 struct mqueue_inode_info *info)
938 {
939 struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND);
940
941 if (!sender) {
942 /* for poll */
943 wake_up_interruptible(&info->wait_q);
944 return;
945 }
946 if (msg_insert(sender->msg, info))
947 return;
948
949 list_del(&sender->list);
950 wake_q_add(wake_q, sender->task);
951 sender->state = STATE_READY;
952 }
953
954 static int do_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr,
955 size_t msg_len, unsigned int msg_prio,
956 struct timespec64 *ts)
957 {
958 struct fd f;
959 struct inode *inode;
960 struct ext_wait_queue wait;
961 struct ext_wait_queue *receiver;
962 struct msg_msg *msg_ptr;
963 struct mqueue_inode_info *info;
964 ktime_t expires, *timeout = NULL;
965 struct posix_msg_tree_node *new_leaf = NULL;
966 int ret = 0;
967 DEFINE_WAKE_Q(wake_q);
968
969 if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX))
970 return -EINVAL;
971
972 if (ts) {
973 expires = timespec64_to_ktime(*ts);
974 timeout = &expires;
975 }
976
977 audit_mq_sendrecv(mqdes, msg_len, msg_prio, ts);
978
979 f = fdget(mqdes);
980 if (unlikely(!f.file)) {
981 ret = -EBADF;
982 goto out;
983 }
984
985 inode = file_inode(f.file);
986 if (unlikely(f.file->f_op != &mqueue_file_operations)) {
987 ret = -EBADF;
988 goto out_fput;
989 }
990 info = MQUEUE_I(inode);
991 audit_file(f.file);
992
993 if (unlikely(!(f.file->f_mode & FMODE_WRITE))) {
994 ret = -EBADF;
995 goto out_fput;
996 }
997
998 if (unlikely(msg_len > info->attr.mq_msgsize)) {
999 ret = -EMSGSIZE;
1000 goto out_fput;
1001 }
1002
1003 /* First try to allocate memory, before doing anything with
1004 * existing queues. */
1005 msg_ptr = load_msg(u_msg_ptr, msg_len);
1006 if (IS_ERR(msg_ptr)) {
1007 ret = PTR_ERR(msg_ptr);
1008 goto out_fput;
1009 }
1010 msg_ptr->m_ts = msg_len;
1011 msg_ptr->m_type = msg_prio;
1012
1013 /*
1014 * msg_insert really wants us to have a valid, spare node struct so
1015 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1016 * fall back to that if necessary.
1017 */
1018 if (!info->node_cache)
1019 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1020
1021 spin_lock(&info->lock);
1022
1023 if (!info->node_cache && new_leaf) {
1024 /* Save our speculative allocation into the cache */
1025 INIT_LIST_HEAD(&new_leaf->msg_list);
1026 info->node_cache = new_leaf;
1027 new_leaf = NULL;
1028 } else {
1029 kfree(new_leaf);
1030 }
1031
1032 if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
1033 if (f.file->f_flags & O_NONBLOCK) {
1034 ret = -EAGAIN;
1035 } else {
1036 wait.task = current;
1037 wait.msg = (void *) msg_ptr;
1038 wait.state = STATE_NONE;
1039 ret = wq_sleep(info, SEND, timeout, &wait);
1040 /*
1041 * wq_sleep must be called with info->lock held, and
1042 * returns with the lock released
1043 */
1044 goto out_free;
1045 }
1046 } else {
1047 receiver = wq_get_first_waiter(info, RECV);
1048 if (receiver) {
1049 pipelined_send(&wake_q, info, msg_ptr, receiver);
1050 } else {
1051 /* adds message to the queue */
1052 ret = msg_insert(msg_ptr, info);
1053 if (ret)
1054 goto out_unlock;
1055 __do_notify(info);
1056 }
1057 inode->i_atime = inode->i_mtime = inode->i_ctime =
1058 current_time(inode);
1059 }
1060 out_unlock:
1061 spin_unlock(&info->lock);
1062 wake_up_q(&wake_q);
1063 out_free:
1064 if (ret)
1065 free_msg(msg_ptr);
1066 out_fput:
1067 fdput(f);
1068 out:
1069 return ret;
1070 }
1071
1072 static int do_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr,
1073 size_t msg_len, unsigned int __user *u_msg_prio,
1074 struct timespec64 *ts)
1075 {
1076 ssize_t ret;
1077 struct msg_msg *msg_ptr;
1078 struct fd f;
1079 struct inode *inode;
1080 struct mqueue_inode_info *info;
1081 struct ext_wait_queue wait;
1082 ktime_t expires, *timeout = NULL;
1083 struct posix_msg_tree_node *new_leaf = NULL;
1084
1085 if (ts) {
1086 expires = timespec64_to_ktime(*ts);
1087 timeout = &expires;
1088 }
1089
1090 audit_mq_sendrecv(mqdes, msg_len, 0, ts);
1091
1092 f = fdget(mqdes);
1093 if (unlikely(!f.file)) {
1094 ret = -EBADF;
1095 goto out;
1096 }
1097
1098 inode = file_inode(f.file);
1099 if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1100 ret = -EBADF;
1101 goto out_fput;
1102 }
1103 info = MQUEUE_I(inode);
1104 audit_file(f.file);
1105
1106 if (unlikely(!(f.file->f_mode & FMODE_READ))) {
1107 ret = -EBADF;
1108 goto out_fput;
1109 }
1110
1111 /* checks if buffer is big enough */
1112 if (unlikely(msg_len < info->attr.mq_msgsize)) {
1113 ret = -EMSGSIZE;
1114 goto out_fput;
1115 }
1116
1117 /*
1118 * msg_insert really wants us to have a valid, spare node struct so
1119 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1120 * fall back to that if necessary.
1121 */
1122 if (!info->node_cache)
1123 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1124
1125 spin_lock(&info->lock);
1126
1127 if (!info->node_cache && new_leaf) {
1128 /* Save our speculative allocation into the cache */
1129 INIT_LIST_HEAD(&new_leaf->msg_list);
1130 info->node_cache = new_leaf;
1131 } else {
1132 kfree(new_leaf);
1133 }
1134
1135 if (info->attr.mq_curmsgs == 0) {
1136 if (f.file->f_flags & O_NONBLOCK) {
1137 spin_unlock(&info->lock);
1138 ret = -EAGAIN;
1139 } else {
1140 wait.task = current;
1141 wait.state = STATE_NONE;
1142 ret = wq_sleep(info, RECV, timeout, &wait);
1143 msg_ptr = wait.msg;
1144 }
1145 } else {
1146 DEFINE_WAKE_Q(wake_q);
1147
1148 msg_ptr = msg_get(info);
1149
1150 inode->i_atime = inode->i_mtime = inode->i_ctime =
1151 current_time(inode);
1152
1153 /* There is now free space in queue. */
1154 pipelined_receive(&wake_q, info);
1155 spin_unlock(&info->lock);
1156 wake_up_q(&wake_q);
1157 ret = 0;
1158 }
1159 if (ret == 0) {
1160 ret = msg_ptr->m_ts;
1161
1162 if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) ||
1163 store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) {
1164 ret = -EFAULT;
1165 }
1166 free_msg(msg_ptr);
1167 }
1168 out_fput:
1169 fdput(f);
1170 out:
1171 return ret;
1172 }
1173
1174 SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
1175 size_t, msg_len, unsigned int, msg_prio,
1176 const struct __kernel_timespec __user *, u_abs_timeout)
1177 {
1178 struct timespec64 ts, *p = NULL;
1179 if (u_abs_timeout) {
1180 int res = prepare_timeout(u_abs_timeout, &ts);
1181 if (res)
1182 return res;
1183 p = &ts;
1184 }
1185 return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p);
1186 }
1187
1188 SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
1189 size_t, msg_len, unsigned int __user *, u_msg_prio,
1190 const struct __kernel_timespec __user *, u_abs_timeout)
1191 {
1192 struct timespec64 ts, *p = NULL;
1193 if (u_abs_timeout) {
1194 int res = prepare_timeout(u_abs_timeout, &ts);
1195 if (res)
1196 return res;
1197 p = &ts;
1198 }
1199 return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p);
1200 }
1201
1202 /*
1203 * Notes: the case when user wants us to deregister (with NULL as pointer)
1204 * and he isn't currently owner of notification, will be silently discarded.
1205 * It isn't explicitly defined in the POSIX.
1206 */
1207 static int do_mq_notify(mqd_t mqdes, const struct sigevent *notification)
1208 {
1209 int ret;
1210 struct fd f;
1211 struct sock *sock;
1212 struct inode *inode;
1213 struct mqueue_inode_info *info;
1214 struct sk_buff *nc;
1215
1216 audit_mq_notify(mqdes, notification);
1217
1218 nc = NULL;
1219 sock = NULL;
1220 if (notification != NULL) {
1221 if (unlikely(notification->sigev_notify != SIGEV_NONE &&
1222 notification->sigev_notify != SIGEV_SIGNAL &&
1223 notification->sigev_notify != SIGEV_THREAD))
1224 return -EINVAL;
1225 if (notification->sigev_notify == SIGEV_SIGNAL &&
1226 !valid_signal(notification->sigev_signo)) {
1227 return -EINVAL;
1228 }
1229 if (notification->sigev_notify == SIGEV_THREAD) {
1230 long timeo;
1231
1232 /* create the notify skb */
1233 nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
1234 if (!nc) {
1235 ret = -ENOMEM;
1236 goto out;
1237 }
1238 if (copy_from_user(nc->data,
1239 notification->sigev_value.sival_ptr,
1240 NOTIFY_COOKIE_LEN)) {
1241 ret = -EFAULT;
1242 goto out;
1243 }
1244
1245 /* TODO: add a header? */
1246 skb_put(nc, NOTIFY_COOKIE_LEN);
1247 /* and attach it to the socket */
1248 retry:
1249 f = fdget(notification->sigev_signo);
1250 if (!f.file) {
1251 ret = -EBADF;
1252 goto out;
1253 }
1254 sock = netlink_getsockbyfilp(f.file);
1255 fdput(f);
1256 if (IS_ERR(sock)) {
1257 ret = PTR_ERR(sock);
1258 sock = NULL;
1259 goto out;
1260 }
1261
1262 timeo = MAX_SCHEDULE_TIMEOUT;
1263 ret = netlink_attachskb(sock, nc, &timeo, NULL);
1264 if (ret == 1) {
1265 sock = NULL;
1266 goto retry;
1267 }
1268 if (ret) {
1269 sock = NULL;
1270 nc = NULL;
1271 goto out;
1272 }
1273 }
1274 }
1275
1276 f = fdget(mqdes);
1277 if (!f.file) {
1278 ret = -EBADF;
1279 goto out;
1280 }
1281
1282 inode = file_inode(f.file);
1283 if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1284 ret = -EBADF;
1285 goto out_fput;
1286 }
1287 info = MQUEUE_I(inode);
1288
1289 ret = 0;
1290 spin_lock(&info->lock);
1291 if (notification == NULL) {
1292 if (info->notify_owner == task_tgid(current)) {
1293 remove_notification(info);
1294 inode->i_atime = inode->i_ctime = current_time(inode);
1295 }
1296 } else if (info->notify_owner != NULL) {
1297 ret = -EBUSY;
1298 } else {
1299 switch (notification->sigev_notify) {
1300 case SIGEV_NONE:
1301 info->notify.sigev_notify = SIGEV_NONE;
1302 break;
1303 case SIGEV_THREAD:
1304 info->notify_sock = sock;
1305 info->notify_cookie = nc;
1306 sock = NULL;
1307 nc = NULL;
1308 info->notify.sigev_notify = SIGEV_THREAD;
1309 break;
1310 case SIGEV_SIGNAL:
1311 info->notify.sigev_signo = notification->sigev_signo;
1312 info->notify.sigev_value = notification->sigev_value;
1313 info->notify.sigev_notify = SIGEV_SIGNAL;
1314 break;
1315 }
1316
1317 info->notify_owner = get_pid(task_tgid(current));
1318 info->notify_user_ns = get_user_ns(current_user_ns());
1319 inode->i_atime = inode->i_ctime = current_time(inode);
1320 }
1321 spin_unlock(&info->lock);
1322 out_fput:
1323 fdput(f);
1324 out:
1325 if (sock)
1326 netlink_detachskb(sock, nc);
1327 else if (nc)
1328 dev_kfree_skb(nc);
1329
1330 return ret;
1331 }
1332
1333 SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1334 const struct sigevent __user *, u_notification)
1335 {
1336 struct sigevent n, *p = NULL;
1337 if (u_notification) {
1338 if (copy_from_user(&n, u_notification, sizeof(struct sigevent)))
1339 return -EFAULT;
1340 p = &n;
1341 }
1342 return do_mq_notify(mqdes, p);
1343 }
1344
1345 static int do_mq_getsetattr(int mqdes, struct mq_attr *new, struct mq_attr *old)
1346 {
1347 struct fd f;
1348 struct inode *inode;
1349 struct mqueue_inode_info *info;
1350
1351 if (new && (new->mq_flags & (~O_NONBLOCK)))
1352 return -EINVAL;
1353
1354 f = fdget(mqdes);
1355 if (!f.file)
1356 return -EBADF;
1357
1358 if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1359 fdput(f);
1360 return -EBADF;
1361 }
1362
1363 inode = file_inode(f.file);
1364 info = MQUEUE_I(inode);
1365
1366 spin_lock(&info->lock);
1367
1368 if (old) {
1369 *old = info->attr;
1370 old->mq_flags = f.file->f_flags & O_NONBLOCK;
1371 }
1372 if (new) {
1373 audit_mq_getsetattr(mqdes, new);
1374 spin_lock(&f.file->f_lock);
1375 if (new->mq_flags & O_NONBLOCK)
1376 f.file->f_flags |= O_NONBLOCK;
1377 else
1378 f.file->f_flags &= ~O_NONBLOCK;
1379 spin_unlock(&f.file->f_lock);
1380
1381 inode->i_atime = inode->i_ctime = current_time(inode);
1382 }
1383
1384 spin_unlock(&info->lock);
1385 fdput(f);
1386 return 0;
1387 }
1388
1389 SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1390 const struct mq_attr __user *, u_mqstat,
1391 struct mq_attr __user *, u_omqstat)
1392 {
1393 int ret;
1394 struct mq_attr mqstat, omqstat;
1395 struct mq_attr *new = NULL, *old = NULL;
1396
1397 if (u_mqstat) {
1398 new = &mqstat;
1399 if (copy_from_user(new, u_mqstat, sizeof(struct mq_attr)))
1400 return -EFAULT;
1401 }
1402 if (u_omqstat)
1403 old = &omqstat;
1404
1405 ret = do_mq_getsetattr(mqdes, new, old);
1406 if (ret || !old)
1407 return ret;
1408
1409 if (copy_to_user(u_omqstat, old, sizeof(struct mq_attr)))
1410 return -EFAULT;
1411 return 0;
1412 }
1413
1414 #ifdef CONFIG_COMPAT
1415
1416 struct compat_mq_attr {
1417 compat_long_t mq_flags; /* message queue flags */
1418 compat_long_t mq_maxmsg; /* maximum number of messages */
1419 compat_long_t mq_msgsize; /* maximum message size */
1420 compat_long_t mq_curmsgs; /* number of messages currently queued */
1421 compat_long_t __reserved[4]; /* ignored for input, zeroed for output */
1422 };
1423
1424 static inline int get_compat_mq_attr(struct mq_attr *attr,
1425 const struct compat_mq_attr __user *uattr)
1426 {
1427 struct compat_mq_attr v;
1428
1429 if (copy_from_user(&v, uattr, sizeof(*uattr)))
1430 return -EFAULT;
1431
1432 memset(attr, 0, sizeof(*attr));
1433 attr->mq_flags = v.mq_flags;
1434 attr->mq_maxmsg = v.mq_maxmsg;
1435 attr->mq_msgsize = v.mq_msgsize;
1436 attr->mq_curmsgs = v.mq_curmsgs;
1437 return 0;
1438 }
1439
1440 static inline int put_compat_mq_attr(const struct mq_attr *attr,
1441 struct compat_mq_attr __user *uattr)
1442 {
1443 struct compat_mq_attr v;
1444
1445 memset(&v, 0, sizeof(v));
1446 v.mq_flags = attr->mq_flags;
1447 v.mq_maxmsg = attr->mq_maxmsg;
1448 v.mq_msgsize = attr->mq_msgsize;
1449 v.mq_curmsgs = attr->mq_curmsgs;
1450 if (copy_to_user(uattr, &v, sizeof(*uattr)))
1451 return -EFAULT;
1452 return 0;
1453 }
1454
1455 COMPAT_SYSCALL_DEFINE4(mq_open, const char __user *, u_name,
1456 int, oflag, compat_mode_t, mode,
1457 struct compat_mq_attr __user *, u_attr)
1458 {
1459 struct mq_attr attr, *p = NULL;
1460 if (u_attr && oflag & O_CREAT) {
1461 p = &attr;
1462 if (get_compat_mq_attr(&attr, u_attr))
1463 return -EFAULT;
1464 }
1465 return do_mq_open(u_name, oflag, mode, p);
1466 }
1467
1468 COMPAT_SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1469 const struct compat_sigevent __user *, u_notification)
1470 {
1471 struct sigevent n, *p = NULL;
1472 if (u_notification) {
1473 if (get_compat_sigevent(&n, u_notification))
1474 return -EFAULT;
1475 if (n.sigev_notify == SIGEV_THREAD)
1476 n.sigev_value.sival_ptr = compat_ptr(n.sigev_value.sival_int);
1477 p = &n;
1478 }
1479 return do_mq_notify(mqdes, p);
1480 }
1481
1482 COMPAT_SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1483 const struct compat_mq_attr __user *, u_mqstat,
1484 struct compat_mq_attr __user *, u_omqstat)
1485 {
1486 int ret;
1487 struct mq_attr mqstat, omqstat;
1488 struct mq_attr *new = NULL, *old = NULL;
1489
1490 if (u_mqstat) {
1491 new = &mqstat;
1492 if (get_compat_mq_attr(new, u_mqstat))
1493 return -EFAULT;
1494 }
1495 if (u_omqstat)
1496 old = &omqstat;
1497
1498 ret = do_mq_getsetattr(mqdes, new, old);
1499 if (ret || !old)
1500 return ret;
1501
1502 if (put_compat_mq_attr(old, u_omqstat))
1503 return -EFAULT;
1504 return 0;
1505 }
1506 #endif
1507
1508 #ifdef CONFIG_COMPAT_32BIT_TIME
1509 static int compat_prepare_timeout(const struct old_timespec32 __user *p,
1510 struct timespec64 *ts)
1511 {
1512 if (get_old_timespec32(ts, p))
1513 return -EFAULT;
1514 if (!timespec64_valid(ts))
1515 return -EINVAL;
1516 return 0;
1517 }
1518
1519 SYSCALL_DEFINE5(mq_timedsend_time32, mqd_t, mqdes,
1520 const char __user *, u_msg_ptr,
1521 unsigned int, msg_len, unsigned int, msg_prio,
1522 const struct old_timespec32 __user *, u_abs_timeout)
1523 {
1524 struct timespec64 ts, *p = NULL;
1525 if (u_abs_timeout) {
1526 int res = compat_prepare_timeout(u_abs_timeout, &ts);
1527 if (res)
1528 return res;
1529 p = &ts;
1530 }
1531 return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p);
1532 }
1533
1534 SYSCALL_DEFINE5(mq_timedreceive_time32, mqd_t, mqdes,
1535 char __user *, u_msg_ptr,
1536 unsigned int, msg_len, unsigned int __user *, u_msg_prio,
1537 const struct old_timespec32 __user *, u_abs_timeout)
1538 {
1539 struct timespec64 ts, *p = NULL;
1540 if (u_abs_timeout) {
1541 int res = compat_prepare_timeout(u_abs_timeout, &ts);
1542 if (res)
1543 return res;
1544 p = &ts;
1545 }
1546 return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p);
1547 }
1548 #endif
1549
1550 static const struct inode_operations mqueue_dir_inode_operations = {
1551 .lookup = simple_lookup,
1552 .create = mqueue_create,
1553 .unlink = mqueue_unlink,
1554 };
1555
1556 static const struct file_operations mqueue_file_operations = {
1557 .flush = mqueue_flush_file,
1558 .poll = mqueue_poll_file,
1559 .read = mqueue_read_file,
1560 .llseek = default_llseek,
1561 };
1562
1563 static const struct super_operations mqueue_super_ops = {
1564 .alloc_inode = mqueue_alloc_inode,
1565 .destroy_inode = mqueue_destroy_inode,
1566 .evict_inode = mqueue_evict_inode,
1567 .statfs = simple_statfs,
1568 };
1569
1570 static const struct fs_context_operations mqueue_fs_context_ops = {
1571 .free = mqueue_fs_context_free,
1572 .get_tree = mqueue_get_tree,
1573 };
1574
1575 static struct file_system_type mqueue_fs_type = {
1576 .name = "mqueue",
1577 .init_fs_context = mqueue_init_fs_context,
1578 .kill_sb = kill_litter_super,
1579 .fs_flags = FS_USERNS_MOUNT,
1580 };
1581
1582 int mq_init_ns(struct ipc_namespace *ns)
1583 {
1584 struct vfsmount *m;
1585
1586 ns->mq_queues_count = 0;
1587 ns->mq_queues_max = DFLT_QUEUESMAX;
1588 ns->mq_msg_max = DFLT_MSGMAX;
1589 ns->mq_msgsize_max = DFLT_MSGSIZEMAX;
1590 ns->mq_msg_default = DFLT_MSG;
1591 ns->mq_msgsize_default = DFLT_MSGSIZE;
1592
1593 m = mq_create_mount(ns);
1594 if (IS_ERR(m))
1595 return PTR_ERR(m);
1596 ns->mq_mnt = m;
1597 return 0;
1598 }
1599
1600 void mq_clear_sbinfo(struct ipc_namespace *ns)
1601 {
1602 ns->mq_mnt->mnt_sb->s_fs_info = NULL;
1603 }
1604
1605 void mq_put_mnt(struct ipc_namespace *ns)
1606 {
1607 kern_unmount(ns->mq_mnt);
1608 }
1609
1610 static int __init init_mqueue_fs(void)
1611 {
1612 int error;
1613
1614 mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
1615 sizeof(struct mqueue_inode_info), 0,
1616 SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, init_once);
1617 if (mqueue_inode_cachep == NULL)
1618 return -ENOMEM;
1619
1620 /* ignore failures - they are not fatal */
1621 mq_sysctl_table = mq_register_sysctl_table();
1622
1623 error = register_filesystem(&mqueue_fs_type);
1624 if (error)
1625 goto out_sysctl;
1626
1627 spin_lock_init(&mq_lock);
1628
1629 error = mq_init_ns(&init_ipc_ns);
1630 if (error)
1631 goto out_filesystem;
1632
1633 return 0;
1634
1635 out_filesystem:
1636 unregister_filesystem(&mqueue_fs_type);
1637 out_sysctl:
1638 if (mq_sysctl_table)
1639 unregister_sysctl_table(mq_sysctl_table);
1640 kmem_cache_destroy(mqueue_inode_cachep);
1641 return error;
1642 }
1643
1644 device_initcall(init_mqueue_fs);