]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - ipc/msg.c
IPC: get rid of the use *_setbuf structure.
[mirror_ubuntu-bionic-kernel.git] / ipc / msg.c
1 /*
2 * linux/ipc/msg.c
3 * Copyright (C) 1992 Krishna Balasubramanian
4 *
5 * Removed all the remaining kerneld mess
6 * Catch the -EFAULT stuff properly
7 * Use GFP_KERNEL for messages as in 1.2
8 * Fixed up the unchecked user space derefs
9 * Copyright (C) 1998 Alan Cox & Andi Kleen
10 *
11 * /proc/sysvipc/msg support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
12 *
13 * mostly rewritten, threaded and wake-one semantics added
14 * MSGMAX limit removed, sysctl's added
15 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
16 *
17 * support for audit of ipc object properties and permission changes
18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
19 *
20 * namespaces support
21 * OpenVZ, SWsoft Inc.
22 * Pavel Emelianov <xemul@openvz.org>
23 */
24
25 #include <linux/capability.h>
26 #include <linux/slab.h>
27 #include <linux/msg.h>
28 #include <linux/spinlock.h>
29 #include <linux/init.h>
30 #include <linux/mm.h>
31 #include <linux/proc_fs.h>
32 #include <linux/list.h>
33 #include <linux/security.h>
34 #include <linux/sched.h>
35 #include <linux/syscalls.h>
36 #include <linux/audit.h>
37 #include <linux/seq_file.h>
38 #include <linux/rwsem.h>
39 #include <linux/nsproxy.h>
40 #include <linux/ipc_namespace.h>
41
42 #include <asm/current.h>
43 #include <asm/uaccess.h>
44 #include "util.h"
45
46 /*
47 * one msg_receiver structure for each sleeping receiver:
48 */
49 struct msg_receiver {
50 struct list_head r_list;
51 struct task_struct *r_tsk;
52
53 int r_mode;
54 long r_msgtype;
55 long r_maxsize;
56
57 struct msg_msg *volatile r_msg;
58 };
59
60 /* one msg_sender for each sleeping sender */
61 struct msg_sender {
62 struct list_head list;
63 struct task_struct *tsk;
64 };
65
66 #define SEARCH_ANY 1
67 #define SEARCH_EQUAL 2
68 #define SEARCH_NOTEQUAL 3
69 #define SEARCH_LESSEQUAL 4
70
71 #define msg_ids(ns) ((ns)->ids[IPC_MSG_IDS])
72
73 #define msg_unlock(msq) ipc_unlock(&(msq)->q_perm)
74
75 static void freeque(struct ipc_namespace *, struct kern_ipc_perm *);
76 static int newque(struct ipc_namespace *, struct ipc_params *);
77 #ifdef CONFIG_PROC_FS
78 static int sysvipc_msg_proc_show(struct seq_file *s, void *it);
79 #endif
80
81 /*
82 * Scale msgmni with the available lowmem size: the memory dedicated to msg
83 * queues should occupy at most 1/MSG_MEM_SCALE of lowmem.
84 * Also take into account the number of nsproxies created so far.
85 * This should be done staying within the (MSGMNI , IPCMNI/nr_ipc_ns) range.
86 */
87 void recompute_msgmni(struct ipc_namespace *ns)
88 {
89 struct sysinfo i;
90 unsigned long allowed;
91 int nb_ns;
92
93 si_meminfo(&i);
94 allowed = (((i.totalram - i.totalhigh) / MSG_MEM_SCALE) * i.mem_unit)
95 / MSGMNB;
96 nb_ns = atomic_read(&nr_ipc_ns);
97 allowed /= nb_ns;
98
99 if (allowed < MSGMNI) {
100 ns->msg_ctlmni = MSGMNI;
101 goto out_callback;
102 }
103
104 if (allowed > IPCMNI / nb_ns) {
105 ns->msg_ctlmni = IPCMNI / nb_ns;
106 goto out_callback;
107 }
108
109 ns->msg_ctlmni = allowed;
110
111 out_callback:
112
113 printk(KERN_INFO "msgmni has been set to %d for ipc namespace %p\n",
114 ns->msg_ctlmni, ns);
115 }
116
117 void msg_init_ns(struct ipc_namespace *ns)
118 {
119 ns->msg_ctlmax = MSGMAX;
120 ns->msg_ctlmnb = MSGMNB;
121
122 recompute_msgmni(ns);
123
124 atomic_set(&ns->msg_bytes, 0);
125 atomic_set(&ns->msg_hdrs, 0);
126 ipc_init_ids(&ns->ids[IPC_MSG_IDS]);
127 }
128
129 #ifdef CONFIG_IPC_NS
130 void msg_exit_ns(struct ipc_namespace *ns)
131 {
132 free_ipcs(ns, &msg_ids(ns), freeque);
133 }
134 #endif
135
136 void __init msg_init(void)
137 {
138 msg_init_ns(&init_ipc_ns);
139 ipc_init_proc_interface("sysvipc/msg",
140 " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n",
141 IPC_MSG_IDS, sysvipc_msg_proc_show);
142 }
143
144 /*
145 * This routine is called in the paths where the rw_mutex is held to protect
146 * access to the idr tree.
147 */
148 static inline struct msg_queue *msg_lock_check_down(struct ipc_namespace *ns,
149 int id)
150 {
151 struct kern_ipc_perm *ipcp = ipc_lock_check_down(&msg_ids(ns), id);
152
153 if (IS_ERR(ipcp))
154 return (struct msg_queue *)ipcp;
155
156 return container_of(ipcp, struct msg_queue, q_perm);
157 }
158
159 /*
160 * msg_lock_(check_) routines are called in the paths where the rw_mutex
161 * is not held.
162 */
163 static inline struct msg_queue *msg_lock(struct ipc_namespace *ns, int id)
164 {
165 struct kern_ipc_perm *ipcp = ipc_lock(&msg_ids(ns), id);
166
167 if (IS_ERR(ipcp))
168 return (struct msg_queue *)ipcp;
169
170 return container_of(ipcp, struct msg_queue, q_perm);
171 }
172
173 static inline struct msg_queue *msg_lock_check(struct ipc_namespace *ns,
174 int id)
175 {
176 struct kern_ipc_perm *ipcp = ipc_lock_check(&msg_ids(ns), id);
177
178 if (IS_ERR(ipcp))
179 return (struct msg_queue *)ipcp;
180
181 return container_of(ipcp, struct msg_queue, q_perm);
182 }
183
184 static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s)
185 {
186 ipc_rmid(&msg_ids(ns), &s->q_perm);
187 }
188
189 /**
190 * newque - Create a new msg queue
191 * @ns: namespace
192 * @params: ptr to the structure that contains the key and msgflg
193 *
194 * Called with msg_ids.rw_mutex held (writer)
195 */
196 static int newque(struct ipc_namespace *ns, struct ipc_params *params)
197 {
198 struct msg_queue *msq;
199 int id, retval;
200 key_t key = params->key;
201 int msgflg = params->flg;
202
203 msq = ipc_rcu_alloc(sizeof(*msq));
204 if (!msq)
205 return -ENOMEM;
206
207 msq->q_perm.mode = msgflg & S_IRWXUGO;
208 msq->q_perm.key = key;
209
210 msq->q_perm.security = NULL;
211 retval = security_msg_queue_alloc(msq);
212 if (retval) {
213 ipc_rcu_putref(msq);
214 return retval;
215 }
216
217 /*
218 * ipc_addid() locks msq
219 */
220 id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
221 if (id < 0) {
222 security_msg_queue_free(msq);
223 ipc_rcu_putref(msq);
224 return id;
225 }
226
227 msq->q_stime = msq->q_rtime = 0;
228 msq->q_ctime = get_seconds();
229 msq->q_cbytes = msq->q_qnum = 0;
230 msq->q_qbytes = ns->msg_ctlmnb;
231 msq->q_lspid = msq->q_lrpid = 0;
232 INIT_LIST_HEAD(&msq->q_messages);
233 INIT_LIST_HEAD(&msq->q_receivers);
234 INIT_LIST_HEAD(&msq->q_senders);
235
236 msg_unlock(msq);
237
238 return msq->q_perm.id;
239 }
240
241 static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss)
242 {
243 mss->tsk = current;
244 current->state = TASK_INTERRUPTIBLE;
245 list_add_tail(&mss->list, &msq->q_senders);
246 }
247
248 static inline void ss_del(struct msg_sender *mss)
249 {
250 if (mss->list.next != NULL)
251 list_del(&mss->list);
252 }
253
254 static void ss_wakeup(struct list_head *h, int kill)
255 {
256 struct list_head *tmp;
257
258 tmp = h->next;
259 while (tmp != h) {
260 struct msg_sender *mss;
261
262 mss = list_entry(tmp, struct msg_sender, list);
263 tmp = tmp->next;
264 if (kill)
265 mss->list.next = NULL;
266 wake_up_process(mss->tsk);
267 }
268 }
269
270 static void expunge_all(struct msg_queue *msq, int res)
271 {
272 struct list_head *tmp;
273
274 tmp = msq->q_receivers.next;
275 while (tmp != &msq->q_receivers) {
276 struct msg_receiver *msr;
277
278 msr = list_entry(tmp, struct msg_receiver, r_list);
279 tmp = tmp->next;
280 msr->r_msg = NULL;
281 wake_up_process(msr->r_tsk);
282 smp_mb();
283 msr->r_msg = ERR_PTR(res);
284 }
285 }
286
287 /*
288 * freeque() wakes up waiters on the sender and receiver waiting queue,
289 * removes the message queue from message queue ID IDR, and cleans up all the
290 * messages associated with this queue.
291 *
292 * msg_ids.rw_mutex (writer) and the spinlock for this message queue are held
293 * before freeque() is called. msg_ids.rw_mutex remains locked on exit.
294 */
295 static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
296 {
297 struct list_head *tmp;
298 struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
299
300 expunge_all(msq, -EIDRM);
301 ss_wakeup(&msq->q_senders, 1);
302 msg_rmid(ns, msq);
303 msg_unlock(msq);
304
305 tmp = msq->q_messages.next;
306 while (tmp != &msq->q_messages) {
307 struct msg_msg *msg = list_entry(tmp, struct msg_msg, m_list);
308
309 tmp = tmp->next;
310 atomic_dec(&ns->msg_hdrs);
311 free_msg(msg);
312 }
313 atomic_sub(msq->q_cbytes, &ns->msg_bytes);
314 security_msg_queue_free(msq);
315 ipc_rcu_putref(msq);
316 }
317
318 /*
319 * Called with msg_ids.rw_mutex and ipcp locked.
320 */
321 static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
322 {
323 struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
324
325 return security_msg_queue_associate(msq, msgflg);
326 }
327
328 asmlinkage long sys_msgget(key_t key, int msgflg)
329 {
330 struct ipc_namespace *ns;
331 struct ipc_ops msg_ops;
332 struct ipc_params msg_params;
333
334 ns = current->nsproxy->ipc_ns;
335
336 msg_ops.getnew = newque;
337 msg_ops.associate = msg_security;
338 msg_ops.more_checks = NULL;
339
340 msg_params.key = key;
341 msg_params.flg = msgflg;
342
343 return ipcget(ns, &msg_ids(ns), &msg_ops, &msg_params);
344 }
345
346 static inline unsigned long
347 copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version)
348 {
349 switch(version) {
350 case IPC_64:
351 return copy_to_user(buf, in, sizeof(*in));
352 case IPC_OLD:
353 {
354 struct msqid_ds out;
355
356 memset(&out, 0, sizeof(out));
357
358 ipc64_perm_to_ipc_perm(&in->msg_perm, &out.msg_perm);
359
360 out.msg_stime = in->msg_stime;
361 out.msg_rtime = in->msg_rtime;
362 out.msg_ctime = in->msg_ctime;
363
364 if (in->msg_cbytes > USHRT_MAX)
365 out.msg_cbytes = USHRT_MAX;
366 else
367 out.msg_cbytes = in->msg_cbytes;
368 out.msg_lcbytes = in->msg_cbytes;
369
370 if (in->msg_qnum > USHRT_MAX)
371 out.msg_qnum = USHRT_MAX;
372 else
373 out.msg_qnum = in->msg_qnum;
374
375 if (in->msg_qbytes > USHRT_MAX)
376 out.msg_qbytes = USHRT_MAX;
377 else
378 out.msg_qbytes = in->msg_qbytes;
379 out.msg_lqbytes = in->msg_qbytes;
380
381 out.msg_lspid = in->msg_lspid;
382 out.msg_lrpid = in->msg_lrpid;
383
384 return copy_to_user(buf, &out, sizeof(out));
385 }
386 default:
387 return -EINVAL;
388 }
389 }
390
391 static inline unsigned long
392 copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version)
393 {
394 switch(version) {
395 case IPC_64:
396 if (copy_from_user(out, buf, sizeof(*out)))
397 return -EFAULT;
398 return 0;
399 case IPC_OLD:
400 {
401 struct msqid_ds tbuf_old;
402
403 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
404 return -EFAULT;
405
406 out->msg_perm.uid = tbuf_old.msg_perm.uid;
407 out->msg_perm.gid = tbuf_old.msg_perm.gid;
408 out->msg_perm.mode = tbuf_old.msg_perm.mode;
409
410 if (tbuf_old.msg_qbytes == 0)
411 out->msg_qbytes = tbuf_old.msg_lqbytes;
412 else
413 out->msg_qbytes = tbuf_old.msg_qbytes;
414
415 return 0;
416 }
417 default:
418 return -EINVAL;
419 }
420 }
421
422 /*
423 * This function handles some msgctl commands which require the rw_mutex
424 * to be held in write mode.
425 * NOTE: no locks must be held, the rw_mutex is taken inside this function.
426 */
427 static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
428 struct msqid_ds __user *buf, int version)
429 {
430 struct kern_ipc_perm *ipcp;
431 struct msqid64_ds msqid64;
432 struct msg_queue *msq;
433 int err;
434
435 if (cmd == IPC_SET) {
436 if (copy_msqid_from_user(&msqid64, buf, version))
437 return -EFAULT;
438 }
439
440 down_write(&msg_ids(ns).rw_mutex);
441 msq = msg_lock_check_down(ns, msqid);
442 if (IS_ERR(msq)) {
443 err = PTR_ERR(msq);
444 goto out_up;
445 }
446
447 ipcp = &msq->q_perm;
448
449 err = audit_ipc_obj(ipcp);
450 if (err)
451 goto out_unlock;
452
453 if (cmd == IPC_SET) {
454 err = audit_ipc_set_perm(msqid64.msg_qbytes,
455 msqid64.msg_perm.uid,
456 msqid64.msg_perm.gid,
457 msqid64.msg_perm.mode);
458 if (err)
459 goto out_unlock;
460 }
461
462 if (current->euid != ipcp->cuid &&
463 current->euid != ipcp->uid &&
464 !capable(CAP_SYS_ADMIN)) {
465 /* We _could_ check for CAP_CHOWN above, but we don't */
466 err = -EPERM;
467 goto out_unlock;
468 }
469
470 err = security_msg_queue_msgctl(msq, cmd);
471 if (err)
472 goto out_unlock;
473
474 switch (cmd) {
475 case IPC_RMID:
476 freeque(ns, ipcp);
477 goto out_up;
478 case IPC_SET:
479 if (msqid64.msg_qbytes > ns->msg_ctlmnb &&
480 !capable(CAP_SYS_RESOURCE)) {
481 err = -EPERM;
482 goto out_unlock;
483 }
484
485 msq->q_qbytes = msqid64.msg_qbytes;
486
487 ipcp->uid = msqid64.msg_perm.uid;
488 ipcp->gid = msqid64.msg_perm.gid;
489 ipcp->mode = (ipcp->mode & ~S_IRWXUGO) |
490 (S_IRWXUGO & msqid64.msg_perm.mode);
491 msq->q_ctime = get_seconds();
492 /* sleeping receivers might be excluded by
493 * stricter permissions.
494 */
495 expunge_all(msq, -EAGAIN);
496 /* sleeping senders might be able to send
497 * due to a larger queue size.
498 */
499 ss_wakeup(&msq->q_senders, 0);
500 break;
501 default:
502 err = -EINVAL;
503 }
504 out_unlock:
505 msg_unlock(msq);
506 out_up:
507 up_write(&msg_ids(ns).rw_mutex);
508 return err;
509 }
510
511 asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf)
512 {
513 struct msg_queue *msq;
514 int err, version;
515 struct ipc_namespace *ns;
516
517 if (msqid < 0 || cmd < 0)
518 return -EINVAL;
519
520 version = ipc_parse_version(&cmd);
521 ns = current->nsproxy->ipc_ns;
522
523 switch (cmd) {
524 case IPC_INFO:
525 case MSG_INFO:
526 {
527 struct msginfo msginfo;
528 int max_id;
529
530 if (!buf)
531 return -EFAULT;
532 /*
533 * We must not return kernel stack data.
534 * due to padding, it's not enough
535 * to set all member fields.
536 */
537 err = security_msg_queue_msgctl(NULL, cmd);
538 if (err)
539 return err;
540
541 memset(&msginfo, 0, sizeof(msginfo));
542 msginfo.msgmni = ns->msg_ctlmni;
543 msginfo.msgmax = ns->msg_ctlmax;
544 msginfo.msgmnb = ns->msg_ctlmnb;
545 msginfo.msgssz = MSGSSZ;
546 msginfo.msgseg = MSGSEG;
547 down_read(&msg_ids(ns).rw_mutex);
548 if (cmd == MSG_INFO) {
549 msginfo.msgpool = msg_ids(ns).in_use;
550 msginfo.msgmap = atomic_read(&ns->msg_hdrs);
551 msginfo.msgtql = atomic_read(&ns->msg_bytes);
552 } else {
553 msginfo.msgmap = MSGMAP;
554 msginfo.msgpool = MSGPOOL;
555 msginfo.msgtql = MSGTQL;
556 }
557 max_id = ipc_get_maxid(&msg_ids(ns));
558 up_read(&msg_ids(ns).rw_mutex);
559 if (copy_to_user(buf, &msginfo, sizeof(struct msginfo)))
560 return -EFAULT;
561 return (max_id < 0) ? 0 : max_id;
562 }
563 case MSG_STAT: /* msqid is an index rather than a msg queue id */
564 case IPC_STAT:
565 {
566 struct msqid64_ds tbuf;
567 int success_return;
568
569 if (!buf)
570 return -EFAULT;
571
572 if (cmd == MSG_STAT) {
573 msq = msg_lock(ns, msqid);
574 if (IS_ERR(msq))
575 return PTR_ERR(msq);
576 success_return = msq->q_perm.id;
577 } else {
578 msq = msg_lock_check(ns, msqid);
579 if (IS_ERR(msq))
580 return PTR_ERR(msq);
581 success_return = 0;
582 }
583 err = -EACCES;
584 if (ipcperms(&msq->q_perm, S_IRUGO))
585 goto out_unlock;
586
587 err = security_msg_queue_msgctl(msq, cmd);
588 if (err)
589 goto out_unlock;
590
591 memset(&tbuf, 0, sizeof(tbuf));
592
593 kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm);
594 tbuf.msg_stime = msq->q_stime;
595 tbuf.msg_rtime = msq->q_rtime;
596 tbuf.msg_ctime = msq->q_ctime;
597 tbuf.msg_cbytes = msq->q_cbytes;
598 tbuf.msg_qnum = msq->q_qnum;
599 tbuf.msg_qbytes = msq->q_qbytes;
600 tbuf.msg_lspid = msq->q_lspid;
601 tbuf.msg_lrpid = msq->q_lrpid;
602 msg_unlock(msq);
603 if (copy_msqid_to_user(buf, &tbuf, version))
604 return -EFAULT;
605 return success_return;
606 }
607 case IPC_SET:
608 case IPC_RMID:
609 err = msgctl_down(ns, msqid, cmd, buf, version);
610 return err;
611 default:
612 return -EINVAL;
613 }
614
615 out_unlock:
616 msg_unlock(msq);
617 return err;
618 }
619
620 static int testmsg(struct msg_msg *msg, long type, int mode)
621 {
622 switch(mode)
623 {
624 case SEARCH_ANY:
625 return 1;
626 case SEARCH_LESSEQUAL:
627 if (msg->m_type <=type)
628 return 1;
629 break;
630 case SEARCH_EQUAL:
631 if (msg->m_type == type)
632 return 1;
633 break;
634 case SEARCH_NOTEQUAL:
635 if (msg->m_type != type)
636 return 1;
637 break;
638 }
639 return 0;
640 }
641
642 static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
643 {
644 struct list_head *tmp;
645
646 tmp = msq->q_receivers.next;
647 while (tmp != &msq->q_receivers) {
648 struct msg_receiver *msr;
649
650 msr = list_entry(tmp, struct msg_receiver, r_list);
651 tmp = tmp->next;
652 if (testmsg(msg, msr->r_msgtype, msr->r_mode) &&
653 !security_msg_queue_msgrcv(msq, msg, msr->r_tsk,
654 msr->r_msgtype, msr->r_mode)) {
655
656 list_del(&msr->r_list);
657 if (msr->r_maxsize < msg->m_ts) {
658 msr->r_msg = NULL;
659 wake_up_process(msr->r_tsk);
660 smp_mb();
661 msr->r_msg = ERR_PTR(-E2BIG);
662 } else {
663 msr->r_msg = NULL;
664 msq->q_lrpid = task_pid_vnr(msr->r_tsk);
665 msq->q_rtime = get_seconds();
666 wake_up_process(msr->r_tsk);
667 smp_mb();
668 msr->r_msg = msg;
669
670 return 1;
671 }
672 }
673 }
674 return 0;
675 }
676
677 long do_msgsnd(int msqid, long mtype, void __user *mtext,
678 size_t msgsz, int msgflg)
679 {
680 struct msg_queue *msq;
681 struct msg_msg *msg;
682 int err;
683 struct ipc_namespace *ns;
684
685 ns = current->nsproxy->ipc_ns;
686
687 if (msgsz > ns->msg_ctlmax || (long) msgsz < 0 || msqid < 0)
688 return -EINVAL;
689 if (mtype < 1)
690 return -EINVAL;
691
692 msg = load_msg(mtext, msgsz);
693 if (IS_ERR(msg))
694 return PTR_ERR(msg);
695
696 msg->m_type = mtype;
697 msg->m_ts = msgsz;
698
699 msq = msg_lock_check(ns, msqid);
700 if (IS_ERR(msq)) {
701 err = PTR_ERR(msq);
702 goto out_free;
703 }
704
705 for (;;) {
706 struct msg_sender s;
707
708 err = -EACCES;
709 if (ipcperms(&msq->q_perm, S_IWUGO))
710 goto out_unlock_free;
711
712 err = security_msg_queue_msgsnd(msq, msg, msgflg);
713 if (err)
714 goto out_unlock_free;
715
716 if (msgsz + msq->q_cbytes <= msq->q_qbytes &&
717 1 + msq->q_qnum <= msq->q_qbytes) {
718 break;
719 }
720
721 /* queue full, wait: */
722 if (msgflg & IPC_NOWAIT) {
723 err = -EAGAIN;
724 goto out_unlock_free;
725 }
726 ss_add(msq, &s);
727 ipc_rcu_getref(msq);
728 msg_unlock(msq);
729 schedule();
730
731 ipc_lock_by_ptr(&msq->q_perm);
732 ipc_rcu_putref(msq);
733 if (msq->q_perm.deleted) {
734 err = -EIDRM;
735 goto out_unlock_free;
736 }
737 ss_del(&s);
738
739 if (signal_pending(current)) {
740 err = -ERESTARTNOHAND;
741 goto out_unlock_free;
742 }
743 }
744
745 msq->q_lspid = task_tgid_vnr(current);
746 msq->q_stime = get_seconds();
747
748 if (!pipelined_send(msq, msg)) {
749 /* noone is waiting for this message, enqueue it */
750 list_add_tail(&msg->m_list, &msq->q_messages);
751 msq->q_cbytes += msgsz;
752 msq->q_qnum++;
753 atomic_add(msgsz, &ns->msg_bytes);
754 atomic_inc(&ns->msg_hdrs);
755 }
756
757 err = 0;
758 msg = NULL;
759
760 out_unlock_free:
761 msg_unlock(msq);
762 out_free:
763 if (msg != NULL)
764 free_msg(msg);
765 return err;
766 }
767
768 asmlinkage long
769 sys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz, int msgflg)
770 {
771 long mtype;
772
773 if (get_user(mtype, &msgp->mtype))
774 return -EFAULT;
775 return do_msgsnd(msqid, mtype, msgp->mtext, msgsz, msgflg);
776 }
777
778 static inline int convert_mode(long *msgtyp, int msgflg)
779 {
780 /*
781 * find message of correct type.
782 * msgtyp = 0 => get first.
783 * msgtyp > 0 => get first message of matching type.
784 * msgtyp < 0 => get message with least type must be < abs(msgtype).
785 */
786 if (*msgtyp == 0)
787 return SEARCH_ANY;
788 if (*msgtyp < 0) {
789 *msgtyp = -*msgtyp;
790 return SEARCH_LESSEQUAL;
791 }
792 if (msgflg & MSG_EXCEPT)
793 return SEARCH_NOTEQUAL;
794 return SEARCH_EQUAL;
795 }
796
797 long do_msgrcv(int msqid, long *pmtype, void __user *mtext,
798 size_t msgsz, long msgtyp, int msgflg)
799 {
800 struct msg_queue *msq;
801 struct msg_msg *msg;
802 int mode;
803 struct ipc_namespace *ns;
804
805 if (msqid < 0 || (long) msgsz < 0)
806 return -EINVAL;
807 mode = convert_mode(&msgtyp, msgflg);
808 ns = current->nsproxy->ipc_ns;
809
810 msq = msg_lock_check(ns, msqid);
811 if (IS_ERR(msq))
812 return PTR_ERR(msq);
813
814 for (;;) {
815 struct msg_receiver msr_d;
816 struct list_head *tmp;
817
818 msg = ERR_PTR(-EACCES);
819 if (ipcperms(&msq->q_perm, S_IRUGO))
820 goto out_unlock;
821
822 msg = ERR_PTR(-EAGAIN);
823 tmp = msq->q_messages.next;
824 while (tmp != &msq->q_messages) {
825 struct msg_msg *walk_msg;
826
827 walk_msg = list_entry(tmp, struct msg_msg, m_list);
828 if (testmsg(walk_msg, msgtyp, mode) &&
829 !security_msg_queue_msgrcv(msq, walk_msg, current,
830 msgtyp, mode)) {
831
832 msg = walk_msg;
833 if (mode == SEARCH_LESSEQUAL &&
834 walk_msg->m_type != 1) {
835 msg = walk_msg;
836 msgtyp = walk_msg->m_type - 1;
837 } else {
838 msg = walk_msg;
839 break;
840 }
841 }
842 tmp = tmp->next;
843 }
844 if (!IS_ERR(msg)) {
845 /*
846 * Found a suitable message.
847 * Unlink it from the queue.
848 */
849 if ((msgsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) {
850 msg = ERR_PTR(-E2BIG);
851 goto out_unlock;
852 }
853 list_del(&msg->m_list);
854 msq->q_qnum--;
855 msq->q_rtime = get_seconds();
856 msq->q_lrpid = task_tgid_vnr(current);
857 msq->q_cbytes -= msg->m_ts;
858 atomic_sub(msg->m_ts, &ns->msg_bytes);
859 atomic_dec(&ns->msg_hdrs);
860 ss_wakeup(&msq->q_senders, 0);
861 msg_unlock(msq);
862 break;
863 }
864 /* No message waiting. Wait for a message */
865 if (msgflg & IPC_NOWAIT) {
866 msg = ERR_PTR(-ENOMSG);
867 goto out_unlock;
868 }
869 list_add_tail(&msr_d.r_list, &msq->q_receivers);
870 msr_d.r_tsk = current;
871 msr_d.r_msgtype = msgtyp;
872 msr_d.r_mode = mode;
873 if (msgflg & MSG_NOERROR)
874 msr_d.r_maxsize = INT_MAX;
875 else
876 msr_d.r_maxsize = msgsz;
877 msr_d.r_msg = ERR_PTR(-EAGAIN);
878 current->state = TASK_INTERRUPTIBLE;
879 msg_unlock(msq);
880
881 schedule();
882
883 /* Lockless receive, part 1:
884 * Disable preemption. We don't hold a reference to the queue
885 * and getting a reference would defeat the idea of a lockless
886 * operation, thus the code relies on rcu to guarantee the
887 * existance of msq:
888 * Prior to destruction, expunge_all(-EIRDM) changes r_msg.
889 * Thus if r_msg is -EAGAIN, then the queue not yet destroyed.
890 * rcu_read_lock() prevents preemption between reading r_msg
891 * and the spin_lock() inside ipc_lock_by_ptr().
892 */
893 rcu_read_lock();
894
895 /* Lockless receive, part 2:
896 * Wait until pipelined_send or expunge_all are outside of
897 * wake_up_process(). There is a race with exit(), see
898 * ipc/mqueue.c for the details.
899 */
900 msg = (struct msg_msg*)msr_d.r_msg;
901 while (msg == NULL) {
902 cpu_relax();
903 msg = (struct msg_msg *)msr_d.r_msg;
904 }
905
906 /* Lockless receive, part 3:
907 * If there is a message or an error then accept it without
908 * locking.
909 */
910 if (msg != ERR_PTR(-EAGAIN)) {
911 rcu_read_unlock();
912 break;
913 }
914
915 /* Lockless receive, part 3:
916 * Acquire the queue spinlock.
917 */
918 ipc_lock_by_ptr(&msq->q_perm);
919 rcu_read_unlock();
920
921 /* Lockless receive, part 4:
922 * Repeat test after acquiring the spinlock.
923 */
924 msg = (struct msg_msg*)msr_d.r_msg;
925 if (msg != ERR_PTR(-EAGAIN))
926 goto out_unlock;
927
928 list_del(&msr_d.r_list);
929 if (signal_pending(current)) {
930 msg = ERR_PTR(-ERESTARTNOHAND);
931 out_unlock:
932 msg_unlock(msq);
933 break;
934 }
935 }
936 if (IS_ERR(msg))
937 return PTR_ERR(msg);
938
939 msgsz = (msgsz > msg->m_ts) ? msg->m_ts : msgsz;
940 *pmtype = msg->m_type;
941 if (store_msg(mtext, msg, msgsz))
942 msgsz = -EFAULT;
943
944 free_msg(msg);
945
946 return msgsz;
947 }
948
949 asmlinkage long sys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz,
950 long msgtyp, int msgflg)
951 {
952 long err, mtype;
953
954 err = do_msgrcv(msqid, &mtype, msgp->mtext, msgsz, msgtyp, msgflg);
955 if (err < 0)
956 goto out;
957
958 if (put_user(mtype, &msgp->mtype))
959 err = -EFAULT;
960 out:
961 return err;
962 }
963
964 #ifdef CONFIG_PROC_FS
965 static int sysvipc_msg_proc_show(struct seq_file *s, void *it)
966 {
967 struct msg_queue *msq = it;
968
969 return seq_printf(s,
970 "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",
971 msq->q_perm.key,
972 msq->q_perm.id,
973 msq->q_perm.mode,
974 msq->q_cbytes,
975 msq->q_qnum,
976 msq->q_lspid,
977 msq->q_lrpid,
978 msq->q_perm.uid,
979 msq->q_perm.gid,
980 msq->q_perm.cuid,
981 msq->q_perm.cgid,
982 msq->q_stime,
983 msq->q_rtime,
984 msq->q_ctime);
985 }
986 #endif