]>
git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - ipc/msg.c
3 * Copyright (C) 1992 Krishna Balasubramanian
5 * Removed all the remaining kerneld mess
6 * Catch the -EFAULT stuff properly
7 * Use GFP_KERNEL for messages as in 1.2
8 * Fixed up the unchecked user space derefs
9 * Copyright (C) 1998 Alan Cox & Andi Kleen
11 * /proc/sysvipc/msg support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
13 * mostly rewritten, threaded and wake-one semantics added
14 * MSGMAX limit removed, sysctl's added
15 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
17 * support for audit of ipc object properties and permission changes
18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
22 * Pavel Emelianov <xemul@openvz.org>
25 #include <linux/capability.h>
26 #include <linux/slab.h>
27 #include <linux/msg.h>
28 #include <linux/spinlock.h>
29 #include <linux/init.h>
30 #include <linux/proc_fs.h>
31 #include <linux/list.h>
32 #include <linux/security.h>
33 #include <linux/sched.h>
34 #include <linux/syscalls.h>
35 #include <linux/audit.h>
36 #include <linux/seq_file.h>
37 #include <linux/mutex.h>
38 #include <linux/nsproxy.h>
40 #include <asm/current.h>
41 #include <asm/uaccess.h>
45 * one msg_receiver structure for each sleeping receiver:
48 struct list_head r_list
;
49 struct task_struct
*r_tsk
;
55 struct msg_msg
*volatile r_msg
;
58 /* one msg_sender for each sleeping sender */
60 struct list_head list
;
61 struct task_struct
*tsk
;
65 #define SEARCH_EQUAL 2
66 #define SEARCH_NOTEQUAL 3
67 #define SEARCH_LESSEQUAL 4
69 static atomic_t msg_bytes
= ATOMIC_INIT(0);
70 static atomic_t msg_hdrs
= ATOMIC_INIT(0);
72 static struct ipc_ids init_msg_ids
;
74 #define msg_ids(ns) (*((ns)->ids[IPC_MSG_IDS]))
76 #define msg_lock(ns, id) ((struct msg_queue*)ipc_lock(&msg_ids(ns), id))
77 #define msg_unlock(msq) ipc_unlock(&(msq)->q_perm)
78 #define msg_rmid(ns, id) ((struct msg_queue*)ipc_rmid(&msg_ids(ns), id))
79 #define msg_checkid(ns, msq, msgid) \
80 ipc_checkid(&msg_ids(ns), &msq->q_perm, msgid)
81 #define msg_buildid(ns, id, seq) \
82 ipc_buildid(&msg_ids(ns), id, seq)
84 static void freeque (struct ipc_namespace
*ns
, struct msg_queue
*msq
, int id
);
85 static int newque (struct ipc_namespace
*ns
, key_t key
, int msgflg
);
87 static int sysvipc_msg_proc_show(struct seq_file
*s
, void *it
);
90 static void __msg_init_ns(struct ipc_namespace
*ns
, struct ipc_ids
*ids
)
92 ns
->ids
[IPC_MSG_IDS
] = ids
;
93 ns
->msg_ctlmax
= MSGMAX
;
94 ns
->msg_ctlmnb
= MSGMNB
;
95 ns
->msg_ctlmni
= MSGMNI
;
96 ipc_init_ids(ids
, ns
->msg_ctlmni
);
99 int msg_init_ns(struct ipc_namespace
*ns
)
103 ids
= kmalloc(sizeof(struct ipc_ids
), GFP_KERNEL
);
107 __msg_init_ns(ns
, ids
);
111 void msg_exit_ns(struct ipc_namespace
*ns
)
114 struct msg_queue
*msq
;
116 mutex_lock(&msg_ids(ns
).mutex
);
117 for (i
= 0; i
<= msg_ids(ns
).max_id
; i
++) {
118 msq
= msg_lock(ns
, i
);
124 mutex_unlock(&msg_ids(ns
).mutex
);
126 ipc_fini_ids(ns
->ids
[IPC_MSG_IDS
]);
127 kfree(ns
->ids
[IPC_MSG_IDS
]);
128 ns
->ids
[IPC_MSG_IDS
] = NULL
;
131 void __init
msg_init(void)
133 __msg_init_ns(&init_ipc_ns
, &init_msg_ids
);
134 ipc_init_proc_interface("sysvipc/msg",
135 " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n",
136 IPC_MSG_IDS
, sysvipc_msg_proc_show
);
139 static int newque (struct ipc_namespace
*ns
, key_t key
, int msgflg
)
141 struct msg_queue
*msq
;
144 msq
= ipc_rcu_alloc(sizeof(*msq
));
148 msq
->q_perm
.mode
= msgflg
& S_IRWXUGO
;
149 msq
->q_perm
.key
= key
;
151 msq
->q_perm
.security
= NULL
;
152 retval
= security_msg_queue_alloc(msq
);
158 id
= ipc_addid(&msg_ids(ns
), &msq
->q_perm
, ns
->msg_ctlmni
);
160 security_msg_queue_free(msq
);
165 msq
->q_id
= msg_buildid(ns
, id
, msq
->q_perm
.seq
);
166 msq
->q_stime
= msq
->q_rtime
= 0;
167 msq
->q_ctime
= get_seconds();
168 msq
->q_cbytes
= msq
->q_qnum
= 0;
169 msq
->q_qbytes
= ns
->msg_ctlmnb
;
170 msq
->q_lspid
= msq
->q_lrpid
= 0;
171 INIT_LIST_HEAD(&msq
->q_messages
);
172 INIT_LIST_HEAD(&msq
->q_receivers
);
173 INIT_LIST_HEAD(&msq
->q_senders
);
179 static inline void ss_add(struct msg_queue
*msq
, struct msg_sender
*mss
)
182 current
->state
= TASK_INTERRUPTIBLE
;
183 list_add_tail(&mss
->list
, &msq
->q_senders
);
186 static inline void ss_del(struct msg_sender
*mss
)
188 if (mss
->list
.next
!= NULL
)
189 list_del(&mss
->list
);
192 static void ss_wakeup(struct list_head
*h
, int kill
)
194 struct list_head
*tmp
;
198 struct msg_sender
*mss
;
200 mss
= list_entry(tmp
, struct msg_sender
, list
);
203 mss
->list
.next
= NULL
;
204 wake_up_process(mss
->tsk
);
208 static void expunge_all(struct msg_queue
*msq
, int res
)
210 struct list_head
*tmp
;
212 tmp
= msq
->q_receivers
.next
;
213 while (tmp
!= &msq
->q_receivers
) {
214 struct msg_receiver
*msr
;
216 msr
= list_entry(tmp
, struct msg_receiver
, r_list
);
219 wake_up_process(msr
->r_tsk
);
221 msr
->r_msg
= ERR_PTR(res
);
226 * freeque() wakes up waiters on the sender and receiver waiting queue,
227 * removes the message queue from message queue ID
228 * array, and cleans up all the messages associated with this queue.
230 * msg_ids.mutex and the spinlock for this message queue is hold
231 * before freeque() is called. msg_ids.mutex remains locked on exit.
233 static void freeque(struct ipc_namespace
*ns
, struct msg_queue
*msq
, int id
)
235 struct list_head
*tmp
;
237 expunge_all(msq
, -EIDRM
);
238 ss_wakeup(&msq
->q_senders
, 1);
239 msq
= msg_rmid(ns
, id
);
242 tmp
= msq
->q_messages
.next
;
243 while (tmp
!= &msq
->q_messages
) {
244 struct msg_msg
*msg
= list_entry(tmp
, struct msg_msg
, m_list
);
247 atomic_dec(&msg_hdrs
);
250 atomic_sub(msq
->q_cbytes
, &msg_bytes
);
251 security_msg_queue_free(msq
);
255 asmlinkage
long sys_msgget(key_t key
, int msgflg
)
257 struct msg_queue
*msq
;
258 int id
, ret
= -EPERM
;
259 struct ipc_namespace
*ns
;
261 ns
= current
->nsproxy
->ipc_ns
;
263 mutex_lock(&msg_ids(ns
).mutex
);
264 if (key
== IPC_PRIVATE
)
265 ret
= newque(ns
, key
, msgflg
);
266 else if ((id
= ipc_findkey(&msg_ids(ns
), key
)) == -1) { /* key not used */
267 if (!(msgflg
& IPC_CREAT
))
270 ret
= newque(ns
, key
, msgflg
);
271 } else if (msgflg
& IPC_CREAT
&& msgflg
& IPC_EXCL
) {
274 msq
= msg_lock(ns
, id
);
276 if (ipcperms(&msq
->q_perm
, msgflg
))
279 int qid
= msg_buildid(ns
, id
, msq
->q_perm
.seq
);
281 ret
= security_msg_queue_associate(msq
, msgflg
);
287 mutex_unlock(&msg_ids(ns
).mutex
);
292 static inline unsigned long
293 copy_msqid_to_user(void __user
*buf
, struct msqid64_ds
*in
, int version
)
297 return copy_to_user(buf
, in
, sizeof(*in
));
302 memset(&out
, 0, sizeof(out
));
304 ipc64_perm_to_ipc_perm(&in
->msg_perm
, &out
.msg_perm
);
306 out
.msg_stime
= in
->msg_stime
;
307 out
.msg_rtime
= in
->msg_rtime
;
308 out
.msg_ctime
= in
->msg_ctime
;
310 if (in
->msg_cbytes
> USHRT_MAX
)
311 out
.msg_cbytes
= USHRT_MAX
;
313 out
.msg_cbytes
= in
->msg_cbytes
;
314 out
.msg_lcbytes
= in
->msg_cbytes
;
316 if (in
->msg_qnum
> USHRT_MAX
)
317 out
.msg_qnum
= USHRT_MAX
;
319 out
.msg_qnum
= in
->msg_qnum
;
321 if (in
->msg_qbytes
> USHRT_MAX
)
322 out
.msg_qbytes
= USHRT_MAX
;
324 out
.msg_qbytes
= in
->msg_qbytes
;
325 out
.msg_lqbytes
= in
->msg_qbytes
;
327 out
.msg_lspid
= in
->msg_lspid
;
328 out
.msg_lrpid
= in
->msg_lrpid
;
330 return copy_to_user(buf
, &out
, sizeof(out
));
338 unsigned long qbytes
;
344 static inline unsigned long
345 copy_msqid_from_user(struct msq_setbuf
*out
, void __user
*buf
, int version
)
350 struct msqid64_ds tbuf
;
352 if (copy_from_user(&tbuf
, buf
, sizeof(tbuf
)))
355 out
->qbytes
= tbuf
.msg_qbytes
;
356 out
->uid
= tbuf
.msg_perm
.uid
;
357 out
->gid
= tbuf
.msg_perm
.gid
;
358 out
->mode
= tbuf
.msg_perm
.mode
;
364 struct msqid_ds tbuf_old
;
366 if (copy_from_user(&tbuf_old
, buf
, sizeof(tbuf_old
)))
369 out
->uid
= tbuf_old
.msg_perm
.uid
;
370 out
->gid
= tbuf_old
.msg_perm
.gid
;
371 out
->mode
= tbuf_old
.msg_perm
.mode
;
373 if (tbuf_old
.msg_qbytes
== 0)
374 out
->qbytes
= tbuf_old
.msg_lqbytes
;
376 out
->qbytes
= tbuf_old
.msg_qbytes
;
385 asmlinkage
long sys_msgctl(int msqid
, int cmd
, struct msqid_ds __user
*buf
)
387 struct kern_ipc_perm
*ipcp
;
388 struct msq_setbuf
uninitialized_var(setbuf
);
389 struct msg_queue
*msq
;
391 struct ipc_namespace
*ns
;
393 if (msqid
< 0 || cmd
< 0)
396 version
= ipc_parse_version(&cmd
);
397 ns
= current
->nsproxy
->ipc_ns
;
403 struct msginfo msginfo
;
409 * We must not return kernel stack data.
410 * due to padding, it's not enough
411 * to set all member fields.
413 err
= security_msg_queue_msgctl(NULL
, cmd
);
417 memset(&msginfo
, 0, sizeof(msginfo
));
418 msginfo
.msgmni
= ns
->msg_ctlmni
;
419 msginfo
.msgmax
= ns
->msg_ctlmax
;
420 msginfo
.msgmnb
= ns
->msg_ctlmnb
;
421 msginfo
.msgssz
= MSGSSZ
;
422 msginfo
.msgseg
= MSGSEG
;
423 mutex_lock(&msg_ids(ns
).mutex
);
424 if (cmd
== MSG_INFO
) {
425 msginfo
.msgpool
= msg_ids(ns
).in_use
;
426 msginfo
.msgmap
= atomic_read(&msg_hdrs
);
427 msginfo
.msgtql
= atomic_read(&msg_bytes
);
429 msginfo
.msgmap
= MSGMAP
;
430 msginfo
.msgpool
= MSGPOOL
;
431 msginfo
.msgtql
= MSGTQL
;
433 max_id
= msg_ids(ns
).max_id
;
434 mutex_unlock(&msg_ids(ns
).mutex
);
435 if (copy_to_user(buf
, &msginfo
, sizeof(struct msginfo
)))
437 return (max_id
< 0) ? 0 : max_id
;
442 struct msqid64_ds tbuf
;
447 if (cmd
== MSG_STAT
&& msqid
>= msg_ids(ns
).entries
->size
)
450 memset(&tbuf
, 0, sizeof(tbuf
));
452 msq
= msg_lock(ns
, msqid
);
456 if (cmd
== MSG_STAT
) {
457 success_return
= msg_buildid(ns
, msqid
, msq
->q_perm
.seq
);
460 if (msg_checkid(ns
, msq
, msqid
))
465 if (ipcperms(&msq
->q_perm
, S_IRUGO
))
468 err
= security_msg_queue_msgctl(msq
, cmd
);
472 kernel_to_ipc64_perm(&msq
->q_perm
, &tbuf
.msg_perm
);
473 tbuf
.msg_stime
= msq
->q_stime
;
474 tbuf
.msg_rtime
= msq
->q_rtime
;
475 tbuf
.msg_ctime
= msq
->q_ctime
;
476 tbuf
.msg_cbytes
= msq
->q_cbytes
;
477 tbuf
.msg_qnum
= msq
->q_qnum
;
478 tbuf
.msg_qbytes
= msq
->q_qbytes
;
479 tbuf
.msg_lspid
= msq
->q_lspid
;
480 tbuf
.msg_lrpid
= msq
->q_lrpid
;
482 if (copy_msqid_to_user(buf
, &tbuf
, version
))
484 return success_return
;
489 if (copy_msqid_from_user(&setbuf
, buf
, version
))
498 mutex_lock(&msg_ids(ns
).mutex
);
499 msq
= msg_lock(ns
, msqid
);
505 if (msg_checkid(ns
, msq
, msqid
))
509 err
= audit_ipc_obj(ipcp
);
512 if (cmd
== IPC_SET
) {
513 err
= audit_ipc_set_perm(setbuf
.qbytes
, setbuf
.uid
, setbuf
.gid
,
520 if (current
->euid
!= ipcp
->cuid
&&
521 current
->euid
!= ipcp
->uid
&& !capable(CAP_SYS_ADMIN
))
522 /* We _could_ check for CAP_CHOWN above, but we don't */
525 err
= security_msg_queue_msgctl(msq
, cmd
);
533 if (setbuf
.qbytes
> ns
->msg_ctlmnb
&& !capable(CAP_SYS_RESOURCE
))
536 msq
->q_qbytes
= setbuf
.qbytes
;
538 ipcp
->uid
= setbuf
.uid
;
539 ipcp
->gid
= setbuf
.gid
;
540 ipcp
->mode
= (ipcp
->mode
& ~S_IRWXUGO
) |
541 (S_IRWXUGO
& setbuf
.mode
);
542 msq
->q_ctime
= get_seconds();
543 /* sleeping receivers might be excluded by
544 * stricter permissions.
546 expunge_all(msq
, -EAGAIN
);
547 /* sleeping senders might be able to send
548 * due to a larger queue size.
550 ss_wakeup(&msq
->q_senders
, 0);
555 freeque(ns
, msq
, msqid
);
560 mutex_unlock(&msg_ids(ns
).mutex
);
570 static int testmsg(struct msg_msg
*msg
, long type
, int mode
)
576 case SEARCH_LESSEQUAL
:
577 if (msg
->m_type
<=type
)
581 if (msg
->m_type
== type
)
584 case SEARCH_NOTEQUAL
:
585 if (msg
->m_type
!= type
)
592 static inline int pipelined_send(struct msg_queue
*msq
, struct msg_msg
*msg
)
594 struct list_head
*tmp
;
596 tmp
= msq
->q_receivers
.next
;
597 while (tmp
!= &msq
->q_receivers
) {
598 struct msg_receiver
*msr
;
600 msr
= list_entry(tmp
, struct msg_receiver
, r_list
);
602 if (testmsg(msg
, msr
->r_msgtype
, msr
->r_mode
) &&
603 !security_msg_queue_msgrcv(msq
, msg
, msr
->r_tsk
,
604 msr
->r_msgtype
, msr
->r_mode
)) {
606 list_del(&msr
->r_list
);
607 if (msr
->r_maxsize
< msg
->m_ts
) {
609 wake_up_process(msr
->r_tsk
);
611 msr
->r_msg
= ERR_PTR(-E2BIG
);
614 msq
->q_lrpid
= msr
->r_tsk
->pid
;
615 msq
->q_rtime
= get_seconds();
616 wake_up_process(msr
->r_tsk
);
627 long do_msgsnd(int msqid
, long mtype
, void __user
*mtext
,
628 size_t msgsz
, int msgflg
)
630 struct msg_queue
*msq
;
633 struct ipc_namespace
*ns
;
635 ns
= current
->nsproxy
->ipc_ns
;
637 if (msgsz
> ns
->msg_ctlmax
|| (long) msgsz
< 0 || msqid
< 0)
642 msg
= load_msg(mtext
, msgsz
);
649 msq
= msg_lock(ns
, msqid
);
655 if (msg_checkid(ns
, msq
, msqid
))
656 goto out_unlock_free
;
662 if (ipcperms(&msq
->q_perm
, S_IWUGO
))
663 goto out_unlock_free
;
665 err
= security_msg_queue_msgsnd(msq
, msg
, msgflg
);
667 goto out_unlock_free
;
669 if (msgsz
+ msq
->q_cbytes
<= msq
->q_qbytes
&&
670 1 + msq
->q_qnum
<= msq
->q_qbytes
) {
674 /* queue full, wait: */
675 if (msgflg
& IPC_NOWAIT
) {
677 goto out_unlock_free
;
684 ipc_lock_by_ptr(&msq
->q_perm
);
686 if (msq
->q_perm
.deleted
) {
688 goto out_unlock_free
;
692 if (signal_pending(current
)) {
693 err
= -ERESTARTNOHAND
;
694 goto out_unlock_free
;
698 msq
->q_lspid
= current
->tgid
;
699 msq
->q_stime
= get_seconds();
701 if (!pipelined_send(msq
, msg
)) {
702 /* noone is waiting for this message, enqueue it */
703 list_add_tail(&msg
->m_list
, &msq
->q_messages
);
704 msq
->q_cbytes
+= msgsz
;
706 atomic_add(msgsz
, &msg_bytes
);
707 atomic_inc(&msg_hdrs
);
722 sys_msgsnd(int msqid
, struct msgbuf __user
*msgp
, size_t msgsz
, int msgflg
)
726 if (get_user(mtype
, &msgp
->mtype
))
728 return do_msgsnd(msqid
, mtype
, msgp
->mtext
, msgsz
, msgflg
);
731 static inline int convert_mode(long *msgtyp
, int msgflg
)
734 * find message of correct type.
735 * msgtyp = 0 => get first.
736 * msgtyp > 0 => get first message of matching type.
737 * msgtyp < 0 => get message with least type must be < abs(msgtype).
743 return SEARCH_LESSEQUAL
;
745 if (msgflg
& MSG_EXCEPT
)
746 return SEARCH_NOTEQUAL
;
750 long do_msgrcv(int msqid
, long *pmtype
, void __user
*mtext
,
751 size_t msgsz
, long msgtyp
, int msgflg
)
753 struct msg_queue
*msq
;
756 struct ipc_namespace
*ns
;
758 if (msqid
< 0 || (long) msgsz
< 0)
760 mode
= convert_mode(&msgtyp
, msgflg
);
761 ns
= current
->nsproxy
->ipc_ns
;
763 msq
= msg_lock(ns
, msqid
);
767 msg
= ERR_PTR(-EIDRM
);
768 if (msg_checkid(ns
, msq
, msqid
))
772 struct msg_receiver msr_d
;
773 struct list_head
*tmp
;
775 msg
= ERR_PTR(-EACCES
);
776 if (ipcperms(&msq
->q_perm
, S_IRUGO
))
779 msg
= ERR_PTR(-EAGAIN
);
780 tmp
= msq
->q_messages
.next
;
781 while (tmp
!= &msq
->q_messages
) {
782 struct msg_msg
*walk_msg
;
784 walk_msg
= list_entry(tmp
, struct msg_msg
, m_list
);
785 if (testmsg(walk_msg
, msgtyp
, mode
) &&
786 !security_msg_queue_msgrcv(msq
, walk_msg
, current
,
790 if (mode
== SEARCH_LESSEQUAL
&&
791 walk_msg
->m_type
!= 1) {
793 msgtyp
= walk_msg
->m_type
- 1;
803 * Found a suitable message.
804 * Unlink it from the queue.
806 if ((msgsz
< msg
->m_ts
) && !(msgflg
& MSG_NOERROR
)) {
807 msg
= ERR_PTR(-E2BIG
);
810 list_del(&msg
->m_list
);
812 msq
->q_rtime
= get_seconds();
813 msq
->q_lrpid
= current
->tgid
;
814 msq
->q_cbytes
-= msg
->m_ts
;
815 atomic_sub(msg
->m_ts
, &msg_bytes
);
816 atomic_dec(&msg_hdrs
);
817 ss_wakeup(&msq
->q_senders
, 0);
821 /* No message waiting. Wait for a message */
822 if (msgflg
& IPC_NOWAIT
) {
823 msg
= ERR_PTR(-ENOMSG
);
826 list_add_tail(&msr_d
.r_list
, &msq
->q_receivers
);
827 msr_d
.r_tsk
= current
;
828 msr_d
.r_msgtype
= msgtyp
;
830 if (msgflg
& MSG_NOERROR
)
831 msr_d
.r_maxsize
= INT_MAX
;
833 msr_d
.r_maxsize
= msgsz
;
834 msr_d
.r_msg
= ERR_PTR(-EAGAIN
);
835 current
->state
= TASK_INTERRUPTIBLE
;
840 /* Lockless receive, part 1:
841 * Disable preemption. We don't hold a reference to the queue
842 * and getting a reference would defeat the idea of a lockless
843 * operation, thus the code relies on rcu to guarantee the
845 * Prior to destruction, expunge_all(-EIRDM) changes r_msg.
846 * Thus if r_msg is -EAGAIN, then the queue not yet destroyed.
847 * rcu_read_lock() prevents preemption between reading r_msg
848 * and the spin_lock() inside ipc_lock_by_ptr().
852 /* Lockless receive, part 2:
853 * Wait until pipelined_send or expunge_all are outside of
854 * wake_up_process(). There is a race with exit(), see
855 * ipc/mqueue.c for the details.
857 msg
= (struct msg_msg
*)msr_d
.r_msg
;
858 while (msg
== NULL
) {
860 msg
= (struct msg_msg
*)msr_d
.r_msg
;
863 /* Lockless receive, part 3:
864 * If there is a message or an error then accept it without
867 if (msg
!= ERR_PTR(-EAGAIN
)) {
872 /* Lockless receive, part 3:
873 * Acquire the queue spinlock.
875 ipc_lock_by_ptr(&msq
->q_perm
);
878 /* Lockless receive, part 4:
879 * Repeat test after acquiring the spinlock.
881 msg
= (struct msg_msg
*)msr_d
.r_msg
;
882 if (msg
!= ERR_PTR(-EAGAIN
))
885 list_del(&msr_d
.r_list
);
886 if (signal_pending(current
)) {
887 msg
= ERR_PTR(-ERESTARTNOHAND
);
896 msgsz
= (msgsz
> msg
->m_ts
) ? msg
->m_ts
: msgsz
;
897 *pmtype
= msg
->m_type
;
898 if (store_msg(mtext
, msg
, msgsz
))
906 asmlinkage
long sys_msgrcv(int msqid
, struct msgbuf __user
*msgp
, size_t msgsz
,
907 long msgtyp
, int msgflg
)
911 err
= do_msgrcv(msqid
, &mtype
, msgp
->mtext
, msgsz
, msgtyp
, msgflg
);
915 if (put_user(mtype
, &msgp
->mtype
))
921 #ifdef CONFIG_PROC_FS
922 static int sysvipc_msg_proc_show(struct seq_file
*s
, void *it
)
924 struct msg_queue
*msq
= it
;
927 "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",