]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - ipc/mqueue.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/hid/hid
[mirror_ubuntu-jammy-kernel.git] / ipc / mqueue.c
CommitLineData
1da177e4
LT
1/*
2 * POSIX message queues filesystem for Linux.
3 *
4 * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl)
f66e928b 5 * Michal Wronski (michal.wronski@gmail.com)
1da177e4
LT
6 *
7 * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com)
8 * Lockless receive & send, fd based notify:
239521f3 9 * Manfred Spraul (manfred@colorfullife.com)
1da177e4 10 *
20ca73bc
GW
11 * Audit: George Wilson (ltcgcw@us.ibm.com)
12 *
1da177e4
LT
13 * This file is released under the GPL.
14 */
15
c59ede7b 16#include <linux/capability.h>
1da177e4
LT
17#include <linux/init.h>
18#include <linux/pagemap.h>
19#include <linux/file.h>
20#include <linux/mount.h>
935c6912 21#include <linux/fs_context.h>
1da177e4
LT
22#include <linux/namei.h>
23#include <linux/sysctl.h>
24#include <linux/poll.h>
25#include <linux/mqueue.h>
26#include <linux/msg.h>
27#include <linux/skbuff.h>
5b5c4d1a 28#include <linux/vmalloc.h>
1da177e4
LT
29#include <linux/netlink.h>
30#include <linux/syscalls.h>
20ca73bc 31#include <linux/audit.h>
7ed20e1a 32#include <linux/signal.h>
5f921ae9 33#include <linux/mutex.h>
b488893a
PE
34#include <linux/nsproxy.h>
35#include <linux/pid.h>
614b84cf 36#include <linux/ipc_namespace.h>
6b550f94 37#include <linux/user_namespace.h>
5a0e3ad6 38#include <linux/slab.h>
84f001e1 39#include <linux/sched/wake_q.h>
3f07c014 40#include <linux/sched/signal.h>
8703e8a4 41#include <linux/sched/user.h>
5f921ae9 42
1da177e4
LT
43#include <net/sock.h>
44#include "util.h"
45
935c6912
DH
46struct mqueue_fs_context {
47 struct ipc_namespace *ipc_ns;
48};
49
1da177e4
LT
50#define MQUEUE_MAGIC 0x19800202
51#define DIRENT_SIZE 20
52#define FILENT_SIZE 80
53
54#define SEND 0
55#define RECV 1
56
57#define STATE_NONE 0
fa6004ad 58#define STATE_READY 1
1da177e4 59
d6629859
DL
60struct posix_msg_tree_node {
61 struct rb_node rb_node;
62 struct list_head msg_list;
63 int priority;
64};
65
c5b2cbdb
MS
66/*
67 * Locking:
68 *
69 * Accesses to a message queue are synchronized by acquiring info->lock.
70 *
71 * There are two notable exceptions:
72 * - The actual wakeup of a sleeping task is performed using the wake_q
73 * framework. info->lock is already released when wake_up_q is called.
74 * - The exit codepaths after sleeping check ext_wait_queue->state without
75 * any locks. If it is STATE_READY, then the syscall is completed without
76 * acquiring info->lock.
77 *
78 * MQ_BARRIER:
79 * To achieve proper release/acquire memory barrier pairing, the state is set to
80 * STATE_READY with smp_store_release(), and it is read with READ_ONCE followed
81 * by smp_acquire__after_ctrl_dep(). In addition, wake_q_add_safe() is used.
82 *
83 * This prevents the following races:
84 *
85 * 1) With the simple wake_q_add(), the task could be gone already before
86 * the increase of the reference happens
87 * Thread A
88 * Thread B
89 * WRITE_ONCE(wait.state, STATE_NONE);
90 * schedule_hrtimeout()
91 * wake_q_add(A)
92 * if (cmpxchg()) // success
93 * ->state = STATE_READY (reordered)
94 * <timeout returns>
95 * if (wait.state == STATE_READY) return;
96 * sysret to user space
97 * sys_exit()
98 * get_task_struct() // UaF
99 *
100 * Solution: Use wake_q_add_safe() and perform the get_task_struct() before
101 * the smp_store_release() that does ->state = STATE_READY.
102 *
103 * 2) Without proper _release/_acquire barriers, the woken up task
104 * could read stale data
105 *
106 * Thread A
107 * Thread B
108 * do_mq_timedreceive
109 * WRITE_ONCE(wait.state, STATE_NONE);
110 * schedule_hrtimeout()
111 * state = STATE_READY;
112 * <timeout returns>
113 * if (wait.state == STATE_READY) return;
114 * msg_ptr = wait.msg; // Access to stale data!
115 * receiver->msg = message; (reordered)
116 *
117 * Solution: use _release and _acquire barriers.
118 *
119 * 3) There is intentionally no barrier when setting current->state
120 * to TASK_INTERRUPTIBLE: spin_unlock(&info->lock) provides the
121 * release memory barrier, and the wakeup is triggered when holding
122 * info->lock, i.e. spin_lock(&info->lock) provided a pairing
123 * acquire memory barrier.
124 */
125
1da177e4
LT
126struct ext_wait_queue { /* queue of sleeping tasks */
127 struct task_struct *task;
128 struct list_head list;
129 struct msg_msg *msg; /* ptr of loaded message */
130 int state; /* one of STATE_* values */
131};
132
133struct mqueue_inode_info {
134 spinlock_t lock;
135 struct inode vfs_inode;
136 wait_queue_head_t wait_q;
137
d6629859 138 struct rb_root msg_tree;
a5091fda 139 struct rb_node *msg_tree_rightmost;
ce2d52cc 140 struct posix_msg_tree_node *node_cache;
1da177e4
LT
141 struct mq_attr attr;
142
143 struct sigevent notify;
239521f3 144 struct pid *notify_owner;
b5f20061 145 u32 notify_self_exec_id;
6f9ac6d9 146 struct user_namespace *notify_user_ns;
338cec32 147 struct user_struct *user; /* user who created, for accounting */
1da177e4
LT
148 struct sock *notify_sock;
149 struct sk_buff *notify_cookie;
150
151 /* for tasks waiting for free space and messages, respectively */
152 struct ext_wait_queue e_wait_q[2];
153
154 unsigned long qsize; /* size of queue in memory (sum of all msgs) */
155};
156
935c6912 157static struct file_system_type mqueue_fs_type;
92e1d5be 158static const struct inode_operations mqueue_dir_inode_operations;
9a32144e 159static const struct file_operations mqueue_file_operations;
b87221de 160static const struct super_operations mqueue_super_ops;
935c6912 161static const struct fs_context_operations mqueue_fs_context_ops;
1da177e4
LT
162static void remove_notification(struct mqueue_inode_info *info);
163
e18b890b 164static struct kmem_cache *mqueue_inode_cachep;
1da177e4 165
239521f3 166static struct ctl_table_header *mq_sysctl_table;
1da177e4
LT
167
168static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode)
169{
170 return container_of(inode, struct mqueue_inode_info, vfs_inode);
171}
172
7eafd7c7
SH
173/*
174 * This routine should be called with the mq_lock held.
175 */
176static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode)
614b84cf 177{
7eafd7c7 178 return get_ipc_ns(inode->i_sb->s_fs_info);
614b84cf
SH
179}
180
7eafd7c7 181static struct ipc_namespace *get_ns_from_inode(struct inode *inode)
614b84cf 182{
7eafd7c7
SH
183 struct ipc_namespace *ns;
184
185 spin_lock(&mq_lock);
186 ns = __get_ns_from_inode(inode);
187 spin_unlock(&mq_lock);
188 return ns;
614b84cf
SH
189}
190
d6629859
DL
191/* Auxiliary functions to manipulate messages' list */
192static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
193{
194 struct rb_node **p, *parent = NULL;
195 struct posix_msg_tree_node *leaf;
a5091fda 196 bool rightmost = true;
d6629859
DL
197
198 p = &info->msg_tree.rb_node;
199 while (*p) {
200 parent = *p;
201 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
202
203 if (likely(leaf->priority == msg->m_type))
204 goto insert_msg;
a5091fda 205 else if (msg->m_type < leaf->priority) {
d6629859 206 p = &(*p)->rb_left;
a5091fda
DB
207 rightmost = false;
208 } else
d6629859
DL
209 p = &(*p)->rb_right;
210 }
ce2d52cc
DL
211 if (info->node_cache) {
212 leaf = info->node_cache;
213 info->node_cache = NULL;
214 } else {
215 leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC);
216 if (!leaf)
217 return -ENOMEM;
ce2d52cc 218 INIT_LIST_HEAD(&leaf->msg_list);
ce2d52cc 219 }
d6629859 220 leaf->priority = msg->m_type;
a5091fda
DB
221
222 if (rightmost)
223 info->msg_tree_rightmost = &leaf->rb_node;
224
d6629859
DL
225 rb_link_node(&leaf->rb_node, parent, p);
226 rb_insert_color(&leaf->rb_node, &info->msg_tree);
d6629859
DL
227insert_msg:
228 info->attr.mq_curmsgs++;
229 info->qsize += msg->m_ts;
230 list_add_tail(&msg->m_list, &leaf->msg_list);
231 return 0;
232}
233
a5091fda
DB
234static inline void msg_tree_erase(struct posix_msg_tree_node *leaf,
235 struct mqueue_inode_info *info)
236{
237 struct rb_node *node = &leaf->rb_node;
238
239 if (info->msg_tree_rightmost == node)
240 info->msg_tree_rightmost = rb_prev(node);
241
242 rb_erase(node, &info->msg_tree);
43afe4d3 243 if (info->node_cache)
a5091fda 244 kfree(leaf);
43afe4d3 245 else
a5091fda 246 info->node_cache = leaf;
a5091fda
DB
247}
248
d6629859
DL
249static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
250{
a5091fda 251 struct rb_node *parent = NULL;
d6629859
DL
252 struct posix_msg_tree_node *leaf;
253 struct msg_msg *msg;
254
255try_again:
a5091fda
DB
256 /*
257 * During insert, low priorities go to the left and high to the
258 * right. On receive, we want the highest priorities first, so
259 * walk all the way to the right.
260 */
261 parent = info->msg_tree_rightmost;
d6629859
DL
262 if (!parent) {
263 if (info->attr.mq_curmsgs) {
264 pr_warn_once("Inconsistency in POSIX message queue, "
265 "no tree element, but supposedly messages "
266 "should exist!\n");
267 info->attr.mq_curmsgs = 0;
268 }
269 return NULL;
270 }
271 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
ce2d52cc 272 if (unlikely(list_empty(&leaf->msg_list))) {
d6629859
DL
273 pr_warn_once("Inconsistency in POSIX message queue, "
274 "empty leaf node but we haven't implemented "
275 "lazy leaf delete!\n");
a5091fda 276 msg_tree_erase(leaf, info);
d6629859
DL
277 goto try_again;
278 } else {
279 msg = list_first_entry(&leaf->msg_list,
280 struct msg_msg, m_list);
281 list_del(&msg->m_list);
282 if (list_empty(&leaf->msg_list)) {
a5091fda 283 msg_tree_erase(leaf, info);
d6629859
DL
284 }
285 }
286 info->attr.mq_curmsgs--;
287 info->qsize -= msg->m_ts;
288 return msg;
289}
290
7eafd7c7 291static struct inode *mqueue_get_inode(struct super_block *sb,
1b9d5ff7 292 struct ipc_namespace *ipc_ns, umode_t mode,
7eafd7c7 293 struct mq_attr *attr)
1da177e4 294{
86a264ab 295 struct user_struct *u = current_user();
1da177e4 296 struct inode *inode;
d40dcdb0 297 int ret = -ENOMEM;
1da177e4
LT
298
299 inode = new_inode(sb);
04715206
JS
300 if (!inode)
301 goto err;
302
303 inode->i_ino = get_next_ino();
304 inode->i_mode = mode;
305 inode->i_uid = current_fsuid();
306 inode->i_gid = current_fsgid();
078cd827 307 inode->i_mtime = inode->i_ctime = inode->i_atime = current_time(inode);
04715206
JS
308
309 if (S_ISREG(mode)) {
310 struct mqueue_inode_info *info;
d6629859 311 unsigned long mq_bytes, mq_treesize;
04715206
JS
312
313 inode->i_fop = &mqueue_file_operations;
314 inode->i_size = FILENT_SIZE;
315 /* mqueue specific info */
316 info = MQUEUE_I(inode);
317 spin_lock_init(&info->lock);
318 init_waitqueue_head(&info->wait_q);
319 INIT_LIST_HEAD(&info->e_wait_q[0].list);
320 INIT_LIST_HEAD(&info->e_wait_q[1].list);
321 info->notify_owner = NULL;
6f9ac6d9 322 info->notify_user_ns = NULL;
04715206
JS
323 info->qsize = 0;
324 info->user = NULL; /* set when all is ok */
d6629859 325 info->msg_tree = RB_ROOT;
a5091fda 326 info->msg_tree_rightmost = NULL;
ce2d52cc 327 info->node_cache = NULL;
04715206 328 memset(&info->attr, 0, sizeof(info->attr));
cef0184c
KM
329 info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
330 ipc_ns->mq_msg_default);
331 info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
332 ipc_ns->mq_msgsize_default);
04715206
JS
333 if (attr) {
334 info->attr.mq_maxmsg = attr->mq_maxmsg;
335 info->attr.mq_msgsize = attr->mq_msgsize;
336 }
d6629859
DL
337 /*
338 * We used to allocate a static array of pointers and account
339 * the size of that array as well as one msg_msg struct per
340 * possible message into the queue size. That's no longer
341 * accurate as the queue is now an rbtree and will grow and
342 * shrink depending on usage patterns. We can, however, still
343 * account one msg_msg struct per message, but the nodes are
344 * allocated depending on priority usage, and most programs
345 * only use one, or a handful, of priorities. However, since
346 * this is pinned memory, we need to assume worst case, so
347 * that means the min(mq_maxmsg, max_priorities) * struct
348 * posix_msg_tree_node.
349 */
05c1b290
AV
350
351 ret = -EINVAL;
352 if (info->attr.mq_maxmsg <= 0 || info->attr.mq_msgsize <= 0)
353 goto out_inode;
354 if (capable(CAP_SYS_RESOURCE)) {
355 if (info->attr.mq_maxmsg > HARD_MSGMAX ||
356 info->attr.mq_msgsize > HARD_MSGSIZEMAX)
357 goto out_inode;
358 } else {
359 if (info->attr.mq_maxmsg > ipc_ns->mq_msg_max ||
360 info->attr.mq_msgsize > ipc_ns->mq_msgsize_max)
361 goto out_inode;
362 }
363 ret = -EOVERFLOW;
364 /* check for overflow */
365 if (info->attr.mq_msgsize > ULONG_MAX/info->attr.mq_maxmsg)
366 goto out_inode;
d6629859
DL
367 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
368 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
369 sizeof(struct posix_msg_tree_node);
05c1b290
AV
370 mq_bytes = info->attr.mq_maxmsg * info->attr.mq_msgsize;
371 if (mq_bytes + mq_treesize < mq_bytes)
372 goto out_inode;
373 mq_bytes += mq_treesize;
04715206
JS
374 spin_lock(&mq_lock);
375 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
2a4e64b8 376 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
04715206
JS
377 spin_unlock(&mq_lock);
378 /* mqueue_evict_inode() releases info->messages */
d40dcdb0 379 ret = -EMFILE;
04715206 380 goto out_inode;
1da177e4 381 }
04715206
JS
382 u->mq_bytes += mq_bytes;
383 spin_unlock(&mq_lock);
384
385 /* all is ok */
386 info->user = get_uid(u);
387 } else if (S_ISDIR(mode)) {
388 inc_nlink(inode);
389 /* Some things misbehave if size == 0 on a directory */
390 inode->i_size = 2 * DIRENT_SIZE;
391 inode->i_op = &mqueue_dir_inode_operations;
392 inode->i_fop = &simple_dir_operations;
1da177e4 393 }
04715206 394
1da177e4
LT
395 return inode;
396out_inode:
1da177e4 397 iput(inode);
04715206 398err:
d40dcdb0 399 return ERR_PTR(ret);
1da177e4
LT
400}
401
935c6912 402static int mqueue_fill_super(struct super_block *sb, struct fs_context *fc)
1da177e4
LT
403{
404 struct inode *inode;
cfb2f6f6 405 struct ipc_namespace *ns = sb->s_fs_info;
1da177e4 406
a2982cc9 407 sb->s_iflags |= SB_I_NOEXEC | SB_I_NODEV;
09cbfeaf
KS
408 sb->s_blocksize = PAGE_SIZE;
409 sb->s_blocksize_bits = PAGE_SHIFT;
1da177e4
LT
410 sb->s_magic = MQUEUE_MAGIC;
411 sb->s_op = &mqueue_super_ops;
412
48fde701
AV
413 inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL);
414 if (IS_ERR(inode))
415 return PTR_ERR(inode);
1da177e4 416
48fde701
AV
417 sb->s_root = d_make_root(inode);
418 if (!sb->s_root)
419 return -ENOMEM;
420 return 0;
1da177e4
LT
421}
422
935c6912 423static int mqueue_get_tree(struct fs_context *fc)
1da177e4 424{
935c6912
DH
425 struct mqueue_fs_context *ctx = fc->fs_private;
426
533770cc 427 return get_tree_keyed(fc, mqueue_fill_super, ctx->ipc_ns);
935c6912
DH
428}
429
430static void mqueue_fs_context_free(struct fs_context *fc)
431{
432 struct mqueue_fs_context *ctx = fc->fs_private;
433
709a643d 434 put_ipc_ns(ctx->ipc_ns);
935c6912
DH
435 kfree(ctx);
436}
437
438static int mqueue_init_fs_context(struct fs_context *fc)
439{
440 struct mqueue_fs_context *ctx;
441
442 ctx = kzalloc(sizeof(struct mqueue_fs_context), GFP_KERNEL);
443 if (!ctx)
444 return -ENOMEM;
445
446 ctx->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns);
709a643d
AV
447 put_user_ns(fc->user_ns);
448 fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns);
935c6912
DH
449 fc->fs_private = ctx;
450 fc->ops = &mqueue_fs_context_ops;
451 return 0;
452}
453
454static struct vfsmount *mq_create_mount(struct ipc_namespace *ns)
455{
456 struct mqueue_fs_context *ctx;
457 struct fs_context *fc;
458 struct vfsmount *mnt;
459
460 fc = fs_context_for_mount(&mqueue_fs_type, SB_KERNMOUNT);
461 if (IS_ERR(fc))
462 return ERR_CAST(fc);
463
464 ctx = fc->fs_private;
465 put_ipc_ns(ctx->ipc_ns);
466 ctx->ipc_ns = get_ipc_ns(ns);
709a643d
AV
467 put_user_ns(fc->user_ns);
468 fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns);
935c6912
DH
469
470 mnt = fc_mount(fc);
471 put_fs_context(fc);
472 return mnt;
1da177e4
LT
473}
474
51cc5068 475static void init_once(void *foo)
1da177e4
LT
476{
477 struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
478
a35afb83 479 inode_init_once(&p->vfs_inode);
1da177e4
LT
480}
481
482static struct inode *mqueue_alloc_inode(struct super_block *sb)
483{
484 struct mqueue_inode_info *ei;
485
e94b1766 486 ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL);
1da177e4
LT
487 if (!ei)
488 return NULL;
489 return &ei->vfs_inode;
490}
491
015d7956 492static void mqueue_free_inode(struct inode *inode)
1da177e4
LT
493{
494 kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
495}
496
6d8af64c 497static void mqueue_evict_inode(struct inode *inode)
1da177e4
LT
498{
499 struct mqueue_inode_info *info;
500 struct user_struct *user;
7eafd7c7 501 struct ipc_namespace *ipc_ns;
d6a2946a
LR
502 struct msg_msg *msg, *nmsg;
503 LIST_HEAD(tmp_msg);
1da177e4 504
dbd5768f 505 clear_inode(inode);
6d8af64c
AV
506
507 if (S_ISDIR(inode->i_mode))
1da177e4 508 return;
6d8af64c 509
7eafd7c7 510 ipc_ns = get_ns_from_inode(inode);
1da177e4
LT
511 info = MQUEUE_I(inode);
512 spin_lock(&info->lock);
d6629859 513 while ((msg = msg_get(info)) != NULL)
d6a2946a 514 list_add_tail(&msg->m_list, &tmp_msg);
ce2d52cc 515 kfree(info->node_cache);
1da177e4
LT
516 spin_unlock(&info->lock);
517
d6a2946a
LR
518 list_for_each_entry_safe(msg, nmsg, &tmp_msg, m_list) {
519 list_del(&msg->m_list);
520 free_msg(msg);
521 }
522
1da177e4
LT
523 user = info->user;
524 if (user) {
a318f12e
KC
525 unsigned long mq_bytes, mq_treesize;
526
527 /* Total amount of bytes accounted for the mqueue */
528 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
529 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
530 sizeof(struct posix_msg_tree_node);
531
532 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
533 info->attr.mq_msgsize);
534
1da177e4
LT
535 spin_lock(&mq_lock);
536 user->mq_bytes -= mq_bytes;
7eafd7c7
SH
537 /*
538 * get_ns_from_inode() ensures that the
539 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns
540 * to which we now hold a reference, or it is NULL.
541 * We can't put it here under mq_lock, though.
542 */
543 if (ipc_ns)
544 ipc_ns->mq_queues_count--;
1da177e4
LT
545 spin_unlock(&mq_lock);
546 free_uid(user);
547 }
7eafd7c7
SH
548 if (ipc_ns)
549 put_ipc_ns(ipc_ns);
1da177e4
LT
550}
551
eecec19d 552static int mqueue_create_attr(struct dentry *dentry, umode_t mode, void *arg)
1da177e4 553{
eecec19d 554 struct inode *dir = dentry->d_parent->d_inode;
1da177e4 555 struct inode *inode;
eecec19d 556 struct mq_attr *attr = arg;
1da177e4 557 int error;
7eafd7c7 558 struct ipc_namespace *ipc_ns;
1da177e4
LT
559
560 spin_lock(&mq_lock);
7eafd7c7
SH
561 ipc_ns = __get_ns_from_inode(dir);
562 if (!ipc_ns) {
563 error = -EACCES;
564 goto out_unlock;
565 }
f3713fd9
DB
566
567 if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
568 !capable(CAP_SYS_RESOURCE)) {
1da177e4 569 error = -ENOSPC;
614b84cf 570 goto out_unlock;
1da177e4 571 }
614b84cf 572 ipc_ns->mq_queues_count++;
1da177e4
LT
573 spin_unlock(&mq_lock);
574
7eafd7c7 575 inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr);
d40dcdb0
JS
576 if (IS_ERR(inode)) {
577 error = PTR_ERR(inode);
1da177e4 578 spin_lock(&mq_lock);
614b84cf
SH
579 ipc_ns->mq_queues_count--;
580 goto out_unlock;
1da177e4
LT
581 }
582
7eafd7c7 583 put_ipc_ns(ipc_ns);
1da177e4 584 dir->i_size += DIRENT_SIZE;
078cd827 585 dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
1da177e4
LT
586
587 d_instantiate(dentry, inode);
588 dget(dentry);
589 return 0;
614b84cf 590out_unlock:
1da177e4 591 spin_unlock(&mq_lock);
7eafd7c7
SH
592 if (ipc_ns)
593 put_ipc_ns(ipc_ns);
1da177e4
LT
594 return error;
595}
596
549c7297
CB
597static int mqueue_create(struct user_namespace *mnt_userns, struct inode *dir,
598 struct dentry *dentry, umode_t mode, bool excl)
eecec19d
AV
599{
600 return mqueue_create_attr(dentry, mode, NULL);
601}
602
1da177e4
LT
603static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
604{
75c3cfa8 605 struct inode *inode = d_inode(dentry);
1da177e4 606
078cd827 607 dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
1da177e4 608 dir->i_size -= DIRENT_SIZE;
239521f3
MS
609 drop_nlink(inode);
610 dput(dentry);
611 return 0;
1da177e4
LT
612}
613
614/*
615* This is routine for system read from queue file.
616* To avoid mess with doing here some sort of mq_receive we allow
617* to read only queue size & notification info (the only values
618* that are interesting from user point of view and aren't accessible
619* through std routines)
620*/
621static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
f1a43f93 622 size_t count, loff_t *off)
1da177e4 623{
496ad9aa 624 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
1da177e4 625 char buffer[FILENT_SIZE];
f1a43f93 626 ssize_t ret;
1da177e4
LT
627
628 spin_lock(&info->lock);
629 snprintf(buffer, sizeof(buffer),
630 "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
631 info->qsize,
632 info->notify_owner ? info->notify.sigev_notify : 0,
633 (info->notify_owner &&
634 info->notify.sigev_notify == SIGEV_SIGNAL) ?
635 info->notify.sigev_signo : 0,
6c5f3e7b 636 pid_vnr(info->notify_owner));
1da177e4
LT
637 spin_unlock(&info->lock);
638 buffer[sizeof(buffer)-1] = '\0';
1da177e4 639
f1a43f93
AM
640 ret = simple_read_from_buffer(u_data, count, off, buffer,
641 strlen(buffer));
642 if (ret <= 0)
643 return ret;
1da177e4 644
078cd827 645 file_inode(filp)->i_atime = file_inode(filp)->i_ctime = current_time(file_inode(filp));
f1a43f93 646 return ret;
1da177e4
LT
647}
648
75e1fcc0 649static int mqueue_flush_file(struct file *filp, fl_owner_t id)
1da177e4 650{
496ad9aa 651 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
1da177e4
LT
652
653 spin_lock(&info->lock);
a03fcb73 654 if (task_tgid(current) == info->notify_owner)
1da177e4
LT
655 remove_notification(info);
656
657 spin_unlock(&info->lock);
658 return 0;
659}
660
9dd95748 661static __poll_t mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
1da177e4 662{
496ad9aa 663 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
9dd95748 664 __poll_t retval = 0;
1da177e4
LT
665
666 poll_wait(filp, &info->wait_q, poll_tab);
667
668 spin_lock(&info->lock);
669 if (info->attr.mq_curmsgs)
a9a08845 670 retval = EPOLLIN | EPOLLRDNORM;
1da177e4
LT
671
672 if (info->attr.mq_curmsgs < info->attr.mq_maxmsg)
a9a08845 673 retval |= EPOLLOUT | EPOLLWRNORM;
1da177e4
LT
674 spin_unlock(&info->lock);
675
676 return retval;
677}
678
679/* Adds current to info->e_wait_q[sr] before element with smaller prio */
680static void wq_add(struct mqueue_inode_info *info, int sr,
681 struct ext_wait_queue *ewp)
682{
683 struct ext_wait_queue *walk;
684
1da177e4 685 list_for_each_entry(walk, &info->e_wait_q[sr].list, list) {
68e34f4e 686 if (walk->task->prio <= current->prio) {
1da177e4
LT
687 list_add_tail(&ewp->list, &walk->list);
688 return;
689 }
690 }
691 list_add_tail(&ewp->list, &info->e_wait_q[sr].list);
692}
693
694/*
695 * Puts current task to sleep. Caller must hold queue lock. After return
696 * lock isn't held.
697 * sr: SEND or RECV
698 */
699static int wq_sleep(struct mqueue_inode_info *info, int sr,
9ca7d8e6 700 ktime_t *timeout, struct ext_wait_queue *ewp)
eac0b1c3 701 __releases(&info->lock)
1da177e4
LT
702{
703 int retval;
704 signed long time;
705
706 wq_add(info, sr, ewp);
707
708 for (;;) {
c5b2cbdb 709 /* memory barrier not required, we hold info->lock */
fa6004ad 710 __set_current_state(TASK_INTERRUPTIBLE);
1da177e4
LT
711
712 spin_unlock(&info->lock);
32ea845d
WG
713 time = schedule_hrtimeout_range_clock(timeout, 0,
714 HRTIMER_MODE_ABS, CLOCK_REALTIME);
1da177e4 715
c5b2cbdb
MS
716 if (READ_ONCE(ewp->state) == STATE_READY) {
717 /* see MQ_BARRIER for purpose/pairing */
718 smp_acquire__after_ctrl_dep();
1da177e4
LT
719 retval = 0;
720 goto out;
721 }
722 spin_lock(&info->lock);
c5b2cbdb
MS
723
724 /* we hold info->lock, so no memory barrier required */
725 if (READ_ONCE(ewp->state) == STATE_READY) {
1da177e4
LT
726 retval = 0;
727 goto out_unlock;
728 }
729 if (signal_pending(current)) {
730 retval = -ERESTARTSYS;
731 break;
732 }
733 if (time == 0) {
734 retval = -ETIMEDOUT;
735 break;
736 }
737 }
738 list_del(&ewp->list);
739out_unlock:
740 spin_unlock(&info->lock);
741out:
742 return retval;
743}
744
745/*
746 * Returns waiting task that should be serviced first or NULL if none exists
747 */
748static struct ext_wait_queue *wq_get_first_waiter(
749 struct mqueue_inode_info *info, int sr)
750{
751 struct list_head *ptr;
752
753 ptr = info->e_wait_q[sr].list.prev;
754 if (ptr == &info->e_wait_q[sr].list)
755 return NULL;
756 return list_entry(ptr, struct ext_wait_queue, list);
757}
758
1da177e4
LT
759
760static inline void set_cookie(struct sk_buff *skb, char code)
761{
239521f3 762 ((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code;
1da177e4
LT
763}
764
765/*
766 * The next function is only to split too long sys_mq_timedsend
767 */
768static void __do_notify(struct mqueue_inode_info *info)
769{
770 /* notification
771 * invoked when there is registered process and there isn't process
772 * waiting synchronously for message AND state of queue changed from
773 * empty to not empty. Here we are sure that no one is waiting
774 * synchronously. */
775 if (info->notify_owner &&
776 info->attr.mq_curmsgs == 1) {
1da177e4
LT
777 switch (info->notify.sigev_notify) {
778 case SIGEV_NONE:
779 break;
b5f20061
ON
780 case SIGEV_SIGNAL: {
781 struct kernel_siginfo sig_i;
782 struct task_struct *task;
783
784 /* do_mq_notify() accepts sigev_signo == 0, why?? */
785 if (!info->notify.sigev_signo)
786 break;
1da177e4 787
faf1f22b 788 clear_siginfo(&sig_i);
1da177e4
LT
789 sig_i.si_signo = info->notify.sigev_signo;
790 sig_i.si_errno = 0;
791 sig_i.si_code = SI_MESGQ;
792 sig_i.si_value = info->notify.sigev_value;
6b550f94 793 rcu_read_lock();
b5f20061 794 /* map current pid/uid into info->owner's namespaces */
a6684999
SB
795 sig_i.si_pid = task_tgid_nr_ns(current,
796 ns_of_pid(info->notify_owner));
b5f20061
ON
797 sig_i.si_uid = from_kuid_munged(info->notify_user_ns,
798 current_uid());
799 /*
800 * We can't use kill_pid_info(), this signal should
801 * bypass check_kill_permission(). It is from kernel
802 * but si_fromuser() can't know this.
803 * We do check the self_exec_id, to avoid sending
804 * signals to programs that don't expect them.
805 */
806 task = pid_task(info->notify_owner, PIDTYPE_TGID);
807 if (task && task->self_exec_id ==
808 info->notify_self_exec_id) {
809 do_send_sig_info(info->notify.sigev_signo,
810 &sig_i, task, PIDTYPE_TGID);
811 }
6b550f94 812 rcu_read_unlock();
1da177e4 813 break;
b5f20061 814 }
1da177e4
LT
815 case SIGEV_THREAD:
816 set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
7ee015e0 817 netlink_sendskb(info->notify_sock, info->notify_cookie);
1da177e4
LT
818 break;
819 }
820 /* after notification unregisters process */
a03fcb73 821 put_pid(info->notify_owner);
6f9ac6d9 822 put_user_ns(info->notify_user_ns);
a03fcb73 823 info->notify_owner = NULL;
6f9ac6d9 824 info->notify_user_ns = NULL;
1da177e4
LT
825 }
826 wake_up(&info->wait_q);
827}
828
21fc538d 829static int prepare_timeout(const struct __kernel_timespec __user *u_abs_timeout,
b9047726 830 struct timespec64 *ts)
1da177e4 831{
b9047726 832 if (get_timespec64(ts, u_abs_timeout))
9ca7d8e6 833 return -EFAULT;
b9047726 834 if (!timespec64_valid(ts))
9ca7d8e6 835 return -EINVAL;
9ca7d8e6 836 return 0;
1da177e4
LT
837}
838
839static void remove_notification(struct mqueue_inode_info *info)
840{
a03fcb73 841 if (info->notify_owner != NULL &&
1da177e4
LT
842 info->notify.sigev_notify == SIGEV_THREAD) {
843 set_cookie(info->notify_cookie, NOTIFY_REMOVED);
7ee015e0 844 netlink_sendskb(info->notify_sock, info->notify_cookie);
1da177e4 845 }
a03fcb73 846 put_pid(info->notify_owner);
6f9ac6d9 847 put_user_ns(info->notify_user_ns);
a03fcb73 848 info->notify_owner = NULL;
6f9ac6d9 849 info->notify_user_ns = NULL;
1da177e4
LT
850}
851
066cc813
AV
852static int prepare_open(struct dentry *dentry, int oflag, int ro,
853 umode_t mode, struct filename *name,
614b84cf 854 struct mq_attr *attr)
1da177e4 855{
745ca247
DH
856 static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
857 MAY_READ | MAY_WRITE };
765927b2 858 int acc;
066cc813 859
9b20d7fc
AV
860 if (d_really_is_negative(dentry)) {
861 if (!(oflag & O_CREAT))
066cc813 862 return -ENOENT;
9b20d7fc
AV
863 if (ro)
864 return ro;
865 audit_inode_parent_hidden(name, dentry->d_parent);
866 return vfs_mkobj(dentry, mode & ~current_umask(),
867 mqueue_create_attr, attr);
066cc813 868 }
9b20d7fc
AV
869 /* it already existed */
870 audit_inode(name, dentry, 0);
871 if ((oflag & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL))
872 return -EEXIST;
765927b2 873 if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY))
af4a5372 874 return -EINVAL;
765927b2 875 acc = oflag2acc[oflag & O_ACCMODE];
47291baa 876 return inode_permission(&init_user_ns, d_inode(dentry), acc);
1da177e4
LT
877}
878
0d060606
AV
879static int do_mq_open(const char __user *u_name, int oflag, umode_t mode,
880 struct mq_attr *attr)
1da177e4 881{
cfb2f6f6
EB
882 struct vfsmount *mnt = current->nsproxy->ipc_ns->mq_mnt;
883 struct dentry *root = mnt->mnt_root;
91a27b2a 884 struct filename *name;
a713fd7f 885 struct path path;
1da177e4 886 int fd, error;
312b90fb 887 int ro;
1da177e4 888
0d060606 889 audit_mq_open(oflag, mode, attr);
20ca73bc 890
1da177e4
LT
891 if (IS_ERR(name = getname(u_name)))
892 return PTR_ERR(name);
893
269f2134 894 fd = get_unused_fd_flags(O_CLOEXEC);
1da177e4
LT
895 if (fd < 0)
896 goto out_putname;
897
312b90fb 898 ro = mnt_want_write(mnt); /* we'll drop it in any case */
5955102c 899 inode_lock(d_inode(root));
91a27b2a 900 path.dentry = lookup_one_len(name->name, root, strlen(name->name));
765927b2
AV
901 if (IS_ERR(path.dentry)) {
902 error = PTR_ERR(path.dentry);
4294a8ee 903 goto out_putfd;
1da177e4 904 }
312b90fb 905 path.mnt = mntget(mnt);
066cc813
AV
906 error = prepare_open(path.dentry, oflag, ro, mode, name, attr);
907 if (!error) {
908 struct file *file = dentry_open(&path, oflag, current_cred());
909 if (!IS_ERR(file))
910 fd_install(fd, file);
911 else
912 error = PTR_ERR(file);
7c7dce92 913 }
765927b2 914 path_put(&path);
7c7dce92 915out_putfd:
765927b2
AV
916 if (error) {
917 put_unused_fd(fd);
918 fd = error;
919 }
5955102c 920 inode_unlock(d_inode(root));
38d78e58
VD
921 if (!ro)
922 mnt_drop_write(mnt);
1da177e4
LT
923out_putname:
924 putname(name);
925 return fd;
926}
927
0d060606
AV
928SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
929 struct mq_attr __user *, u_attr)
930{
931 struct mq_attr attr;
932 if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr)))
933 return -EFAULT;
934
935 return do_mq_open(u_name, oflag, mode, u_attr ? &attr : NULL);
936}
937
d5460c99 938SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
1da177e4
LT
939{
940 int err;
91a27b2a 941 struct filename *name;
1da177e4
LT
942 struct dentry *dentry;
943 struct inode *inode = NULL;
7eafd7c7 944 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
312b90fb 945 struct vfsmount *mnt = ipc_ns->mq_mnt;
1da177e4
LT
946
947 name = getname(u_name);
948 if (IS_ERR(name))
949 return PTR_ERR(name);
950
79f6530c 951 audit_inode_parent_hidden(name, mnt->mnt_root);
312b90fb
AV
952 err = mnt_want_write(mnt);
953 if (err)
954 goto out_name;
5955102c 955 inode_lock_nested(d_inode(mnt->mnt_root), I_MUTEX_PARENT);
91a27b2a
JL
956 dentry = lookup_one_len(name->name, mnt->mnt_root,
957 strlen(name->name));
1da177e4
LT
958 if (IS_ERR(dentry)) {
959 err = PTR_ERR(dentry);
960 goto out_unlock;
961 }
962
75c3cfa8 963 inode = d_inode(dentry);
312b90fb
AV
964 if (!inode) {
965 err = -ENOENT;
966 } else {
7de9c6ee 967 ihold(inode);
6521f891
CB
968 err = vfs_unlink(&init_user_ns, d_inode(dentry->d_parent),
969 dentry, NULL);
312b90fb 970 }
1da177e4
LT
971 dput(dentry);
972
973out_unlock:
5955102c 974 inode_unlock(d_inode(mnt->mnt_root));
1da177e4
LT
975 if (inode)
976 iput(inode);
312b90fb
AV
977 mnt_drop_write(mnt);
978out_name:
979 putname(name);
1da177e4
LT
980
981 return err;
982}
983
984/* Pipelined send and receive functions.
985 *
986 * If a receiver finds no waiting message, then it registers itself in the
987 * list of waiting receivers. A sender checks that list before adding the new
988 * message into the message array. If there is a waiting receiver, then it
989 * bypasses the message array and directly hands the message over to the
fa6004ad
DB
990 * receiver. The receiver accepts the message and returns without grabbing the
991 * queue spinlock:
992 *
993 * - Set pointer to message.
994 * - Queue the receiver task for later wakeup (without the info->lock).
995 * - Update its state to STATE_READY. Now the receiver can continue.
996 * - Wake up the process after the lock is dropped. Should the process wake up
997 * before this wakeup (due to a timeout or a signal) it will either see
998 * STATE_READY and continue or acquire the lock to check the state again.
1da177e4
LT
999 *
1000 * The same algorithm is used for senders.
1001 */
1002
ed29f171 1003static inline void __pipelined_op(struct wake_q_head *wake_q,
fa6004ad 1004 struct mqueue_inode_info *info,
ed29f171 1005 struct ext_wait_queue *this)
1da177e4 1006{
a11ddb37
VG
1007 struct task_struct *task;
1008
ed29f171 1009 list_del(&this->list);
a11ddb37 1010 task = get_task_struct(this->task);
c5b2cbdb
MS
1011
1012 /* see MQ_BARRIER for purpose/pairing */
1013 smp_store_release(&this->state, STATE_READY);
a11ddb37 1014 wake_q_add_safe(wake_q, task);
ed29f171
DB
1015}
1016
1017/* pipelined_send() - send a message directly to the task waiting in
1018 * sys_mq_timedreceive() (without inserting message into a queue).
1019 */
1020static inline void pipelined_send(struct wake_q_head *wake_q,
1021 struct mqueue_inode_info *info,
1022 struct msg_msg *message,
1023 struct ext_wait_queue *receiver)
1024{
1025 receiver->msg = message;
1026 __pipelined_op(wake_q, info, receiver);
1da177e4
LT
1027}
1028
1029/* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
1030 * gets its message and put to the queue (we have one free place for sure). */
fa6004ad
DB
1031static inline void pipelined_receive(struct wake_q_head *wake_q,
1032 struct mqueue_inode_info *info)
1da177e4
LT
1033{
1034 struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND);
1035
1036 if (!sender) {
1037 /* for poll */
1038 wake_up_interruptible(&info->wait_q);
1039 return;
1040 }
d6629859
DL
1041 if (msg_insert(sender->msg, info))
1042 return;
fa6004ad 1043
ed29f171 1044 __pipelined_op(wake_q, info, sender);
1da177e4
LT
1045}
1046
0d060606
AV
1047static int do_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr,
1048 size_t msg_len, unsigned int msg_prio,
b9047726 1049 struct timespec64 *ts)
1da177e4 1050{
2903ff01 1051 struct fd f;
1da177e4
LT
1052 struct inode *inode;
1053 struct ext_wait_queue wait;
1054 struct ext_wait_queue *receiver;
1055 struct msg_msg *msg_ptr;
1056 struct mqueue_inode_info *info;
9ca7d8e6 1057 ktime_t expires, *timeout = NULL;
ce2d52cc 1058 struct posix_msg_tree_node *new_leaf = NULL;
2903ff01 1059 int ret = 0;
194a6b5b 1060 DEFINE_WAKE_Q(wake_q);
1da177e4
LT
1061
1062 if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX))
1063 return -EINVAL;
1064
0d060606 1065 if (ts) {
b9047726 1066 expires = timespec64_to_ktime(*ts);
0d060606
AV
1067 timeout = &expires;
1068 }
1069
1070 audit_mq_sendrecv(mqdes, msg_len, msg_prio, ts);
1da177e4 1071
2903ff01
AV
1072 f = fdget(mqdes);
1073 if (unlikely(!f.file)) {
8d8ffefa 1074 ret = -EBADF;
1da177e4 1075 goto out;
8d8ffefa 1076 }
1da177e4 1077
496ad9aa 1078 inode = file_inode(f.file);
2903ff01 1079 if (unlikely(f.file->f_op != &mqueue_file_operations)) {
8d8ffefa 1080 ret = -EBADF;
1da177e4 1081 goto out_fput;
8d8ffefa 1082 }
1da177e4 1083 info = MQUEUE_I(inode);
9f45f5bf 1084 audit_file(f.file);
1da177e4 1085
2903ff01 1086 if (unlikely(!(f.file->f_mode & FMODE_WRITE))) {
8d8ffefa 1087 ret = -EBADF;
1da177e4 1088 goto out_fput;
8d8ffefa 1089 }
1da177e4
LT
1090
1091 if (unlikely(msg_len > info->attr.mq_msgsize)) {
1092 ret = -EMSGSIZE;
1093 goto out_fput;
1094 }
1095
1096 /* First try to allocate memory, before doing anything with
1097 * existing queues. */
1098 msg_ptr = load_msg(u_msg_ptr, msg_len);
1099 if (IS_ERR(msg_ptr)) {
1100 ret = PTR_ERR(msg_ptr);
1101 goto out_fput;
1102 }
1103 msg_ptr->m_ts = msg_len;
1104 msg_ptr->m_type = msg_prio;
1105
ce2d52cc
DL
1106 /*
1107 * msg_insert really wants us to have a valid, spare node struct so
1108 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1109 * fall back to that if necessary.
1110 */
1111 if (!info->node_cache)
1112 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1113
1da177e4
LT
1114 spin_lock(&info->lock);
1115
ce2d52cc
DL
1116 if (!info->node_cache && new_leaf) {
1117 /* Save our speculative allocation into the cache */
ce2d52cc
DL
1118 INIT_LIST_HEAD(&new_leaf->msg_list);
1119 info->node_cache = new_leaf;
ce2d52cc
DL
1120 new_leaf = NULL;
1121 } else {
1122 kfree(new_leaf);
1123 }
1124
1da177e4 1125 if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
2903ff01 1126 if (f.file->f_flags & O_NONBLOCK) {
1da177e4 1127 ret = -EAGAIN;
1da177e4
LT
1128 } else {
1129 wait.task = current;
1130 wait.msg = (void *) msg_ptr;
c5b2cbdb
MS
1131
1132 /* memory barrier not required, we hold info->lock */
1133 WRITE_ONCE(wait.state, STATE_NONE);
1da177e4 1134 ret = wq_sleep(info, SEND, timeout, &wait);
ce2d52cc
DL
1135 /*
1136 * wq_sleep must be called with info->lock held, and
1137 * returns with the lock released
1138 */
1139 goto out_free;
1da177e4 1140 }
1da177e4
LT
1141 } else {
1142 receiver = wq_get_first_waiter(info, RECV);
1143 if (receiver) {
fa6004ad 1144 pipelined_send(&wake_q, info, msg_ptr, receiver);
1da177e4
LT
1145 } else {
1146 /* adds message to the queue */
ce2d52cc
DL
1147 ret = msg_insert(msg_ptr, info);
1148 if (ret)
1149 goto out_unlock;
1da177e4
LT
1150 __do_notify(info);
1151 }
1152 inode->i_atime = inode->i_mtime = inode->i_ctime =
078cd827 1153 current_time(inode);
1da177e4 1154 }
ce2d52cc
DL
1155out_unlock:
1156 spin_unlock(&info->lock);
fa6004ad 1157 wake_up_q(&wake_q);
ce2d52cc
DL
1158out_free:
1159 if (ret)
1160 free_msg(msg_ptr);
1da177e4 1161out_fput:
2903ff01 1162 fdput(f);
1da177e4
LT
1163out:
1164 return ret;
1165}
1166
0d060606
AV
1167static int do_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr,
1168 size_t msg_len, unsigned int __user *u_msg_prio,
b9047726 1169 struct timespec64 *ts)
1da177e4 1170{
1da177e4
LT
1171 ssize_t ret;
1172 struct msg_msg *msg_ptr;
2903ff01 1173 struct fd f;
1da177e4
LT
1174 struct inode *inode;
1175 struct mqueue_inode_info *info;
1176 struct ext_wait_queue wait;
9ca7d8e6 1177 ktime_t expires, *timeout = NULL;
ce2d52cc 1178 struct posix_msg_tree_node *new_leaf = NULL;
1da177e4 1179
0d060606 1180 if (ts) {
b9047726 1181 expires = timespec64_to_ktime(*ts);
9ca7d8e6 1182 timeout = &expires;
c32c8af4 1183 }
20ca73bc 1184
0d060606 1185 audit_mq_sendrecv(mqdes, msg_len, 0, ts);
1da177e4 1186
2903ff01
AV
1187 f = fdget(mqdes);
1188 if (unlikely(!f.file)) {
8d8ffefa 1189 ret = -EBADF;
1da177e4 1190 goto out;
8d8ffefa 1191 }
1da177e4 1192
496ad9aa 1193 inode = file_inode(f.file);
2903ff01 1194 if (unlikely(f.file->f_op != &mqueue_file_operations)) {
8d8ffefa 1195 ret = -EBADF;
1da177e4 1196 goto out_fput;
8d8ffefa 1197 }
1da177e4 1198 info = MQUEUE_I(inode);
9f45f5bf 1199 audit_file(f.file);
1da177e4 1200
2903ff01 1201 if (unlikely(!(f.file->f_mode & FMODE_READ))) {
8d8ffefa 1202 ret = -EBADF;
1da177e4 1203 goto out_fput;
8d8ffefa 1204 }
1da177e4
LT
1205
1206 /* checks if buffer is big enough */
1207 if (unlikely(msg_len < info->attr.mq_msgsize)) {
1208 ret = -EMSGSIZE;
1209 goto out_fput;
1210 }
1211
ce2d52cc
DL
1212 /*
1213 * msg_insert really wants us to have a valid, spare node struct so
1214 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1215 * fall back to that if necessary.
1216 */
1217 if (!info->node_cache)
1218 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1219
1da177e4 1220 spin_lock(&info->lock);
ce2d52cc
DL
1221
1222 if (!info->node_cache && new_leaf) {
1223 /* Save our speculative allocation into the cache */
ce2d52cc
DL
1224 INIT_LIST_HEAD(&new_leaf->msg_list);
1225 info->node_cache = new_leaf;
ce2d52cc
DL
1226 } else {
1227 kfree(new_leaf);
1228 }
1229
1da177e4 1230 if (info->attr.mq_curmsgs == 0) {
2903ff01 1231 if (f.file->f_flags & O_NONBLOCK) {
1da177e4
LT
1232 spin_unlock(&info->lock);
1233 ret = -EAGAIN;
1da177e4
LT
1234 } else {
1235 wait.task = current;
c5b2cbdb
MS
1236
1237 /* memory barrier not required, we hold info->lock */
1238 WRITE_ONCE(wait.state, STATE_NONE);
1da177e4
LT
1239 ret = wq_sleep(info, RECV, timeout, &wait);
1240 msg_ptr = wait.msg;
1241 }
1242 } else {
194a6b5b 1243 DEFINE_WAKE_Q(wake_q);
fa6004ad 1244
1da177e4
LT
1245 msg_ptr = msg_get(info);
1246
1247 inode->i_atime = inode->i_mtime = inode->i_ctime =
078cd827 1248 current_time(inode);
1da177e4
LT
1249
1250 /* There is now free space in queue. */
fa6004ad 1251 pipelined_receive(&wake_q, info);
1da177e4 1252 spin_unlock(&info->lock);
fa6004ad 1253 wake_up_q(&wake_q);
1da177e4
LT
1254 ret = 0;
1255 }
1256 if (ret == 0) {
1257 ret = msg_ptr->m_ts;
1258
1259 if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) ||
1260 store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) {
1261 ret = -EFAULT;
1262 }
1263 free_msg(msg_ptr);
1264 }
1265out_fput:
2903ff01 1266 fdput(f);
1da177e4
LT
1267out:
1268 return ret;
1269}
1270
0d060606
AV
1271SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
1272 size_t, msg_len, unsigned int, msg_prio,
21fc538d 1273 const struct __kernel_timespec __user *, u_abs_timeout)
0d060606 1274{
b9047726 1275 struct timespec64 ts, *p = NULL;
0d060606
AV
1276 if (u_abs_timeout) {
1277 int res = prepare_timeout(u_abs_timeout, &ts);
1278 if (res)
1279 return res;
1280 p = &ts;
1281 }
1282 return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p);
1283}
1284
1285SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
1286 size_t, msg_len, unsigned int __user *, u_msg_prio,
21fc538d 1287 const struct __kernel_timespec __user *, u_abs_timeout)
0d060606 1288{
b9047726 1289 struct timespec64 ts, *p = NULL;
0d060606
AV
1290 if (u_abs_timeout) {
1291 int res = prepare_timeout(u_abs_timeout, &ts);
1292 if (res)
1293 return res;
1294 p = &ts;
1295 }
1296 return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p);
1297}
1298
1da177e4
LT
1299/*
1300 * Notes: the case when user wants us to deregister (with NULL as pointer)
1301 * and he isn't currently owner of notification, will be silently discarded.
1302 * It isn't explicitly defined in the POSIX.
1303 */
0d060606 1304static int do_mq_notify(mqd_t mqdes, const struct sigevent *notification)
1da177e4 1305{
2903ff01
AV
1306 int ret;
1307 struct fd f;
1da177e4
LT
1308 struct sock *sock;
1309 struct inode *inode;
1da177e4
LT
1310 struct mqueue_inode_info *info;
1311 struct sk_buff *nc;
1312
0d060606 1313 audit_mq_notify(mqdes, notification);
1da177e4 1314
20114f71
AV
1315 nc = NULL;
1316 sock = NULL;
0d060606
AV
1317 if (notification != NULL) {
1318 if (unlikely(notification->sigev_notify != SIGEV_NONE &&
1319 notification->sigev_notify != SIGEV_SIGNAL &&
1320 notification->sigev_notify != SIGEV_THREAD))
1da177e4 1321 return -EINVAL;
0d060606
AV
1322 if (notification->sigev_notify == SIGEV_SIGNAL &&
1323 !valid_signal(notification->sigev_signo)) {
1da177e4
LT
1324 return -EINVAL;
1325 }
0d060606 1326 if (notification->sigev_notify == SIGEV_THREAD) {
c3d8d1e3
PM
1327 long timeo;
1328
1da177e4
LT
1329 /* create the notify skb */
1330 nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
c231740d
ME
1331 if (!nc)
1332 return -ENOMEM;
1333
1da177e4 1334 if (copy_from_user(nc->data,
0d060606 1335 notification->sigev_value.sival_ptr,
1da177e4 1336 NOTIFY_COOKIE_LEN)) {
8d8ffefa 1337 ret = -EFAULT;
c231740d 1338 goto free_skb;
1da177e4
LT
1339 }
1340
1341 /* TODO: add a header? */
1342 skb_put(nc, NOTIFY_COOKIE_LEN);
1343 /* and attach it to the socket */
1344retry:
0d060606 1345 f = fdget(notification->sigev_signo);
2903ff01 1346 if (!f.file) {
8d8ffefa 1347 ret = -EBADF;
1da177e4 1348 goto out;
8d8ffefa 1349 }
2903ff01
AV
1350 sock = netlink_getsockbyfilp(f.file);
1351 fdput(f);
1da177e4
LT
1352 if (IS_ERR(sock)) {
1353 ret = PTR_ERR(sock);
c231740d 1354 goto free_skb;
1da177e4
LT
1355 }
1356
c3d8d1e3 1357 timeo = MAX_SCHEDULE_TIMEOUT;
9457afee 1358 ret = netlink_attachskb(sock, nc, &timeo, NULL);
f991af3d
CW
1359 if (ret == 1) {
1360 sock = NULL;
8d8ffefa 1361 goto retry;
f991af3d 1362 }
c231740d
ME
1363 if (ret)
1364 return ret;
1da177e4
LT
1365 }
1366 }
1367
2903ff01
AV
1368 f = fdget(mqdes);
1369 if (!f.file) {
8d8ffefa 1370 ret = -EBADF;
1da177e4 1371 goto out;
8d8ffefa 1372 }
1da177e4 1373
496ad9aa 1374 inode = file_inode(f.file);
2903ff01 1375 if (unlikely(f.file->f_op != &mqueue_file_operations)) {
8d8ffefa 1376 ret = -EBADF;
1da177e4 1377 goto out_fput;
8d8ffefa 1378 }
1da177e4
LT
1379 info = MQUEUE_I(inode);
1380
1381 ret = 0;
1382 spin_lock(&info->lock);
0d060606 1383 if (notification == NULL) {
a03fcb73 1384 if (info->notify_owner == task_tgid(current)) {
1da177e4 1385 remove_notification(info);
078cd827 1386 inode->i_atime = inode->i_ctime = current_time(inode);
1da177e4 1387 }
a03fcb73 1388 } else if (info->notify_owner != NULL) {
1da177e4
LT
1389 ret = -EBUSY;
1390 } else {
0d060606 1391 switch (notification->sigev_notify) {
1da177e4
LT
1392 case SIGEV_NONE:
1393 info->notify.sigev_notify = SIGEV_NONE;
1394 break;
1395 case SIGEV_THREAD:
1396 info->notify_sock = sock;
1397 info->notify_cookie = nc;
1398 sock = NULL;
1399 nc = NULL;
1400 info->notify.sigev_notify = SIGEV_THREAD;
1401 break;
1402 case SIGEV_SIGNAL:
0d060606
AV
1403 info->notify.sigev_signo = notification->sigev_signo;
1404 info->notify.sigev_value = notification->sigev_value;
1da177e4 1405 info->notify.sigev_notify = SIGEV_SIGNAL;
b5f20061 1406 info->notify_self_exec_id = current->self_exec_id;
1da177e4
LT
1407 break;
1408 }
a03fcb73
CLG
1409
1410 info->notify_owner = get_pid(task_tgid(current));
6f9ac6d9 1411 info->notify_user_ns = get_user_ns(current_user_ns());
078cd827 1412 inode->i_atime = inode->i_ctime = current_time(inode);
1da177e4
LT
1413 }
1414 spin_unlock(&info->lock);
1415out_fput:
2903ff01 1416 fdput(f);
1da177e4 1417out:
3ab08fe2 1418 if (sock)
1da177e4 1419 netlink_detachskb(sock, nc);
97b0b1ad 1420 else
c231740d 1421free_skb:
1da177e4 1422 dev_kfree_skb(nc);
3ab08fe2 1423
1da177e4
LT
1424 return ret;
1425}
1426
0d060606
AV
1427SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1428 const struct sigevent __user *, u_notification)
1429{
1430 struct sigevent n, *p = NULL;
1431 if (u_notification) {
1432 if (copy_from_user(&n, u_notification, sizeof(struct sigevent)))
1433 return -EFAULT;
1434 p = &n;
1435 }
1436 return do_mq_notify(mqdes, p);
1437}
1438
1439static int do_mq_getsetattr(int mqdes, struct mq_attr *new, struct mq_attr *old)
1da177e4 1440{
2903ff01 1441 struct fd f;
1da177e4
LT
1442 struct inode *inode;
1443 struct mqueue_inode_info *info;
1444
0d060606
AV
1445 if (new && (new->mq_flags & (~O_NONBLOCK)))
1446 return -EINVAL;
1da177e4 1447
2903ff01 1448 f = fdget(mqdes);
0d060606
AV
1449 if (!f.file)
1450 return -EBADF;
1da177e4 1451
2903ff01 1452 if (unlikely(f.file->f_op != &mqueue_file_operations)) {
0d060606
AV
1453 fdput(f);
1454 return -EBADF;
8d8ffefa 1455 }
0d060606
AV
1456
1457 inode = file_inode(f.file);
1da177e4
LT
1458 info = MQUEUE_I(inode);
1459
1460 spin_lock(&info->lock);
1461
0d060606
AV
1462 if (old) {
1463 *old = info->attr;
1464 old->mq_flags = f.file->f_flags & O_NONBLOCK;
1465 }
1466 if (new) {
1467 audit_mq_getsetattr(mqdes, new);
2903ff01 1468 spin_lock(&f.file->f_lock);
0d060606 1469 if (new->mq_flags & O_NONBLOCK)
2903ff01 1470 f.file->f_flags |= O_NONBLOCK;
1da177e4 1471 else
2903ff01
AV
1472 f.file->f_flags &= ~O_NONBLOCK;
1473 spin_unlock(&f.file->f_lock);
1da177e4 1474
078cd827 1475 inode->i_atime = inode->i_ctime = current_time(inode);
1da177e4
LT
1476 }
1477
1478 spin_unlock(&info->lock);
0d060606
AV
1479 fdput(f);
1480 return 0;
1481}
1da177e4 1482
0d060606
AV
1483SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1484 const struct mq_attr __user *, u_mqstat,
1485 struct mq_attr __user *, u_omqstat)
1486{
1487 int ret;
1488 struct mq_attr mqstat, omqstat;
1489 struct mq_attr *new = NULL, *old = NULL;
1da177e4 1490
0d060606
AV
1491 if (u_mqstat) {
1492 new = &mqstat;
1493 if (copy_from_user(new, u_mqstat, sizeof(struct mq_attr)))
1494 return -EFAULT;
1495 }
1496 if (u_omqstat)
1497 old = &omqstat;
1498
1499 ret = do_mq_getsetattr(mqdes, new, old);
1500 if (ret || !old)
1501 return ret;
1502
1503 if (copy_to_user(u_omqstat, old, sizeof(struct mq_attr)))
1504 return -EFAULT;
1505 return 0;
1506}
1507
1508#ifdef CONFIG_COMPAT
1509
1510struct compat_mq_attr {
1511 compat_long_t mq_flags; /* message queue flags */
1512 compat_long_t mq_maxmsg; /* maximum number of messages */
1513 compat_long_t mq_msgsize; /* maximum message size */
1514 compat_long_t mq_curmsgs; /* number of messages currently queued */
1515 compat_long_t __reserved[4]; /* ignored for input, zeroed for output */
1516};
1517
1518static inline int get_compat_mq_attr(struct mq_attr *attr,
1519 const struct compat_mq_attr __user *uattr)
1520{
1521 struct compat_mq_attr v;
1522
1523 if (copy_from_user(&v, uattr, sizeof(*uattr)))
1524 return -EFAULT;
1525
1526 memset(attr, 0, sizeof(*attr));
1527 attr->mq_flags = v.mq_flags;
1528 attr->mq_maxmsg = v.mq_maxmsg;
1529 attr->mq_msgsize = v.mq_msgsize;
1530 attr->mq_curmsgs = v.mq_curmsgs;
1531 return 0;
1532}
1533
1534static inline int put_compat_mq_attr(const struct mq_attr *attr,
1535 struct compat_mq_attr __user *uattr)
1536{
1537 struct compat_mq_attr v;
1538
1539 memset(&v, 0, sizeof(v));
1540 v.mq_flags = attr->mq_flags;
1541 v.mq_maxmsg = attr->mq_maxmsg;
1542 v.mq_msgsize = attr->mq_msgsize;
1543 v.mq_curmsgs = attr->mq_curmsgs;
1544 if (copy_to_user(uattr, &v, sizeof(*uattr)))
1545 return -EFAULT;
1546 return 0;
1547}
1548
1549COMPAT_SYSCALL_DEFINE4(mq_open, const char __user *, u_name,
1550 int, oflag, compat_mode_t, mode,
1551 struct compat_mq_attr __user *, u_attr)
1552{
1553 struct mq_attr attr, *p = NULL;
1554 if (u_attr && oflag & O_CREAT) {
1555 p = &attr;
1556 if (get_compat_mq_attr(&attr, u_attr))
1557 return -EFAULT;
1558 }
1559 return do_mq_open(u_name, oflag, mode, p);
1560}
1561
b0d17578
AB
1562COMPAT_SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1563 const struct compat_sigevent __user *, u_notification)
1564{
1565 struct sigevent n, *p = NULL;
1566 if (u_notification) {
1567 if (get_compat_sigevent(&n, u_notification))
1568 return -EFAULT;
1569 if (n.sigev_notify == SIGEV_THREAD)
1570 n.sigev_value.sival_ptr = compat_ptr(n.sigev_value.sival_int);
1571 p = &n;
1572 }
1573 return do_mq_notify(mqdes, p);
1574}
1575
1576COMPAT_SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1577 const struct compat_mq_attr __user *, u_mqstat,
1578 struct compat_mq_attr __user *, u_omqstat)
1579{
1580 int ret;
1581 struct mq_attr mqstat, omqstat;
1582 struct mq_attr *new = NULL, *old = NULL;
1583
1584 if (u_mqstat) {
1585 new = &mqstat;
1586 if (get_compat_mq_attr(new, u_mqstat))
1587 return -EFAULT;
1588 }
1589 if (u_omqstat)
1590 old = &omqstat;
1591
1592 ret = do_mq_getsetattr(mqdes, new, old);
1593 if (ret || !old)
1594 return ret;
1595
1596 if (put_compat_mq_attr(old, u_omqstat))
1597 return -EFAULT;
1598 return 0;
1599}
1600#endif
1601
1602#ifdef CONFIG_COMPAT_32BIT_TIME
9afc5eee 1603static int compat_prepare_timeout(const struct old_timespec32 __user *p,
b9047726 1604 struct timespec64 *ts)
0d060606 1605{
9afc5eee 1606 if (get_old_timespec32(ts, p))
0d060606 1607 return -EFAULT;
b9047726 1608 if (!timespec64_valid(ts))
0d060606
AV
1609 return -EINVAL;
1610 return 0;
1611}
1612
8dabe724
AB
1613SYSCALL_DEFINE5(mq_timedsend_time32, mqd_t, mqdes,
1614 const char __user *, u_msg_ptr,
1615 unsigned int, msg_len, unsigned int, msg_prio,
1616 const struct old_timespec32 __user *, u_abs_timeout)
0d060606 1617{
b9047726 1618 struct timespec64 ts, *p = NULL;
0d060606
AV
1619 if (u_abs_timeout) {
1620 int res = compat_prepare_timeout(u_abs_timeout, &ts);
1621 if (res)
1622 return res;
1623 p = &ts;
1624 }
1625 return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p);
1626}
1627
8dabe724
AB
1628SYSCALL_DEFINE5(mq_timedreceive_time32, mqd_t, mqdes,
1629 char __user *, u_msg_ptr,
1630 unsigned int, msg_len, unsigned int __user *, u_msg_prio,
1631 const struct old_timespec32 __user *, u_abs_timeout)
0d060606 1632{
b9047726 1633 struct timespec64 ts, *p = NULL;
0d060606
AV
1634 if (u_abs_timeout) {
1635 int res = compat_prepare_timeout(u_abs_timeout, &ts);
1636 if (res)
1637 return res;
1638 p = &ts;
1639 }
1640 return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p);
1641}
0d060606 1642#endif
1da177e4 1643
92e1d5be 1644static const struct inode_operations mqueue_dir_inode_operations = {
1da177e4
LT
1645 .lookup = simple_lookup,
1646 .create = mqueue_create,
1647 .unlink = mqueue_unlink,
1648};
1649
9a32144e 1650static const struct file_operations mqueue_file_operations = {
1da177e4
LT
1651 .flush = mqueue_flush_file,
1652 .poll = mqueue_poll_file,
1653 .read = mqueue_read_file,
6038f373 1654 .llseek = default_llseek,
1da177e4
LT
1655};
1656
b87221de 1657static const struct super_operations mqueue_super_ops = {
1da177e4 1658 .alloc_inode = mqueue_alloc_inode,
015d7956 1659 .free_inode = mqueue_free_inode,
6d8af64c 1660 .evict_inode = mqueue_evict_inode,
1da177e4 1661 .statfs = simple_statfs,
1da177e4
LT
1662};
1663
935c6912
DH
1664static const struct fs_context_operations mqueue_fs_context_ops = {
1665 .free = mqueue_fs_context_free,
1666 .get_tree = mqueue_get_tree,
1667};
1668
1da177e4 1669static struct file_system_type mqueue_fs_type = {
935c6912
DH
1670 .name = "mqueue",
1671 .init_fs_context = mqueue_init_fs_context,
1672 .kill_sb = kill_litter_super,
1673 .fs_flags = FS_USERNS_MOUNT,
1da177e4
LT
1674};
1675
7eafd7c7
SH
1676int mq_init_ns(struct ipc_namespace *ns)
1677{
935c6912
DH
1678 struct vfsmount *m;
1679
7eafd7c7
SH
1680 ns->mq_queues_count = 0;
1681 ns->mq_queues_max = DFLT_QUEUESMAX;
1682 ns->mq_msg_max = DFLT_MSGMAX;
1683 ns->mq_msgsize_max = DFLT_MSGSIZEMAX;
cef0184c
KM
1684 ns->mq_msg_default = DFLT_MSG;
1685 ns->mq_msgsize_default = DFLT_MSGSIZE;
7eafd7c7 1686
935c6912
DH
1687 m = mq_create_mount(ns);
1688 if (IS_ERR(m))
1689 return PTR_ERR(m);
1690 ns->mq_mnt = m;
7eafd7c7
SH
1691 return 0;
1692}
1693
1694void mq_clear_sbinfo(struct ipc_namespace *ns)
1695{
cfb2f6f6 1696 ns->mq_mnt->mnt_sb->s_fs_info = NULL;
7eafd7c7
SH
1697}
1698
1699void mq_put_mnt(struct ipc_namespace *ns)
1700{
cfb2f6f6 1701 kern_unmount(ns->mq_mnt);
7eafd7c7
SH
1702}
1703
1da177e4
LT
1704static int __init init_mqueue_fs(void)
1705{
1706 int error;
1707
1708 mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
1709 sizeof(struct mqueue_inode_info), 0,
5d097056 1710 SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, init_once);
1da177e4
LT
1711 if (mqueue_inode_cachep == NULL)
1712 return -ENOMEM;
1713
2329e392 1714 /* ignore failures - they are not fatal */
bdc8e5f8 1715 mq_sysctl_table = mq_register_sysctl_table();
1da177e4
LT
1716
1717 error = register_filesystem(&mqueue_fs_type);
1718 if (error)
1719 goto out_sysctl;
1720
7eafd7c7
SH
1721 spin_lock_init(&mq_lock);
1722
6f686574
AV
1723 error = mq_init_ns(&init_ipc_ns);
1724 if (error)
1da177e4 1725 goto out_filesystem;
1da177e4 1726
1da177e4
LT
1727 return 0;
1728
1729out_filesystem:
1730 unregister_filesystem(&mqueue_fs_type);
1731out_sysctl:
1732 if (mq_sysctl_table)
1733 unregister_sysctl_table(mq_sysctl_table);
1a1d92c1 1734 kmem_cache_destroy(mqueue_inode_cachep);
1da177e4
LT
1735 return error;
1736}
1737
6d08a256 1738device_initcall(init_mqueue_fs);