]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - ipc/shm.c
coda_revalidate_inode(): switch to passing inode...
[mirror_ubuntu-bionic-kernel.git] / ipc / shm.c
1 /*
2 * linux/ipc/shm.c
3 * Copyright (C) 1992, 1993 Krishna Balasubramanian
4 * Many improvements/fixes by Bruno Haible.
5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
7 *
8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
15 *
16 * support for audit of ipc object properties and permission changes
17 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
18 *
19 * namespaces support
20 * OpenVZ, SWsoft Inc.
21 * Pavel Emelianov <xemul@openvz.org>
22 *
23 * Better ipc lock (kern_ipc_perm.lock) handling
24 * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
25 */
26
27 #include <linux/slab.h>
28 #include <linux/mm.h>
29 #include <linux/hugetlb.h>
30 #include <linux/shm.h>
31 #include <linux/init.h>
32 #include <linux/file.h>
33 #include <linux/mman.h>
34 #include <linux/shmem_fs.h>
35 #include <linux/security.h>
36 #include <linux/syscalls.h>
37 #include <linux/audit.h>
38 #include <linux/capability.h>
39 #include <linux/ptrace.h>
40 #include <linux/seq_file.h>
41 #include <linux/rwsem.h>
42 #include <linux/nsproxy.h>
43 #include <linux/mount.h>
44 #include <linux/ipc_namespace.h>
45
46 #include <asm/uaccess.h>
47
48 #include "util.h"
49
50 struct shm_file_data {
51 int id;
52 struct ipc_namespace *ns;
53 struct file *file;
54 const struct vm_operations_struct *vm_ops;
55 };
56
57 #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
58
59 static const struct file_operations shm_file_operations;
60 static const struct vm_operations_struct shm_vm_ops;
61
62 #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
63
64 #define shm_unlock(shp) \
65 ipc_unlock(&(shp)->shm_perm)
66
67 static int newseg(struct ipc_namespace *, struct ipc_params *);
68 static void shm_open(struct vm_area_struct *vma);
69 static void shm_close(struct vm_area_struct *vma);
70 static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
71 #ifdef CONFIG_PROC_FS
72 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
73 #endif
74
75 void shm_init_ns(struct ipc_namespace *ns)
76 {
77 ns->shm_ctlmax = SHMMAX;
78 ns->shm_ctlall = SHMALL;
79 ns->shm_ctlmni = SHMMNI;
80 ns->shm_rmid_forced = 0;
81 ns->shm_tot = 0;
82 ipc_init_ids(&shm_ids(ns));
83 }
84
85 /*
86 * Called with shm_ids.rwsem (writer) and the shp structure locked.
87 * Only shm_ids.rwsem remains locked on exit.
88 */
89 static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
90 {
91 struct shmid_kernel *shp;
92 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
93
94 if (shp->shm_nattch){
95 shp->shm_perm.mode |= SHM_DEST;
96 /* Do not find it any more */
97 shp->shm_perm.key = IPC_PRIVATE;
98 shm_unlock(shp);
99 } else
100 shm_destroy(ns, shp);
101 }
102
103 #ifdef CONFIG_IPC_NS
104 void shm_exit_ns(struct ipc_namespace *ns)
105 {
106 free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
107 idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
108 }
109 #endif
110
111 static int __init ipc_ns_init(void)
112 {
113 shm_init_ns(&init_ipc_ns);
114 return 0;
115 }
116
117 pure_initcall(ipc_ns_init);
118
119 void __init shm_init (void)
120 {
121 ipc_init_proc_interface("sysvipc/shm",
122 #if BITS_PER_LONG <= 32
123 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
124 #else
125 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
126 #endif
127 IPC_SHM_IDS, sysvipc_shm_proc_show);
128 }
129
130 static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
131 {
132 struct kern_ipc_perm *ipcp = ipc_obtain_object(&shm_ids(ns), id);
133
134 if (IS_ERR(ipcp))
135 return ERR_CAST(ipcp);
136
137 return container_of(ipcp, struct shmid_kernel, shm_perm);
138 }
139
140 static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
141 {
142 struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
143
144 if (IS_ERR(ipcp))
145 return ERR_CAST(ipcp);
146
147 return container_of(ipcp, struct shmid_kernel, shm_perm);
148 }
149
150 /*
151 * shm_lock_(check_) routines are called in the paths where the rwsem
152 * is not necessarily held.
153 */
154 static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
155 {
156 struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
157
158 if (IS_ERR(ipcp))
159 return (struct shmid_kernel *)ipcp;
160
161 return container_of(ipcp, struct shmid_kernel, shm_perm);
162 }
163
164 static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
165 {
166 rcu_read_lock();
167 ipc_lock_object(&ipcp->shm_perm);
168 }
169
170 static void shm_rcu_free(struct rcu_head *head)
171 {
172 struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
173 struct shmid_kernel *shp = ipc_rcu_to_struct(p);
174
175 security_shm_free(shp);
176 ipc_rcu_free(head);
177 }
178
179 static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
180 {
181 ipc_rmid(&shm_ids(ns), &s->shm_perm);
182 }
183
184
185 /* This is called by fork, once for every shm attach. */
186 static void shm_open(struct vm_area_struct *vma)
187 {
188 struct file *file = vma->vm_file;
189 struct shm_file_data *sfd = shm_file_data(file);
190 struct shmid_kernel *shp;
191
192 shp = shm_lock(sfd->ns, sfd->id);
193 BUG_ON(IS_ERR(shp));
194 shp->shm_atim = get_seconds();
195 shp->shm_lprid = task_tgid_vnr(current);
196 shp->shm_nattch++;
197 shm_unlock(shp);
198 }
199
200 /*
201 * shm_destroy - free the struct shmid_kernel
202 *
203 * @ns: namespace
204 * @shp: struct to free
205 *
206 * It has to be called with shp and shm_ids.rwsem (writer) locked,
207 * but returns with shp unlocked and freed.
208 */
209 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
210 {
211 ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
212 shm_rmid(ns, shp);
213 shm_unlock(shp);
214 if (!is_file_hugepages(shp->shm_file))
215 shmem_lock(shp->shm_file, 0, shp->mlock_user);
216 else if (shp->mlock_user)
217 user_shm_unlock(file_inode(shp->shm_file)->i_size,
218 shp->mlock_user);
219 fput (shp->shm_file);
220 ipc_rcu_putref(shp, shm_rcu_free);
221 }
222
223 /*
224 * shm_may_destroy - identifies whether shm segment should be destroyed now
225 *
226 * Returns true if and only if there are no active users of the segment and
227 * one of the following is true:
228 *
229 * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
230 *
231 * 2) sysctl kernel.shm_rmid_forced is set to 1.
232 */
233 static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
234 {
235 return (shp->shm_nattch == 0) &&
236 (ns->shm_rmid_forced ||
237 (shp->shm_perm.mode & SHM_DEST));
238 }
239
240 /*
241 * remove the attach descriptor vma.
242 * free memory for segment if it is marked destroyed.
243 * The descriptor has already been removed from the current->mm->mmap list
244 * and will later be kfree()d.
245 */
246 static void shm_close(struct vm_area_struct *vma)
247 {
248 struct file * file = vma->vm_file;
249 struct shm_file_data *sfd = shm_file_data(file);
250 struct shmid_kernel *shp;
251 struct ipc_namespace *ns = sfd->ns;
252
253 down_write(&shm_ids(ns).rwsem);
254 /* remove from the list of attaches of the shm segment */
255 shp = shm_lock(ns, sfd->id);
256 BUG_ON(IS_ERR(shp));
257 shp->shm_lprid = task_tgid_vnr(current);
258 shp->shm_dtim = get_seconds();
259 shp->shm_nattch--;
260 if (shm_may_destroy(ns, shp))
261 shm_destroy(ns, shp);
262 else
263 shm_unlock(shp);
264 up_write(&shm_ids(ns).rwsem);
265 }
266
267 /* Called with ns->shm_ids(ns).rwsem locked */
268 static int shm_try_destroy_current(int id, void *p, void *data)
269 {
270 struct ipc_namespace *ns = data;
271 struct kern_ipc_perm *ipcp = p;
272 struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
273
274 if (shp->shm_creator != current)
275 return 0;
276
277 /*
278 * Mark it as orphaned to destroy the segment when
279 * kernel.shm_rmid_forced is changed.
280 * It is noop if the following shm_may_destroy() returns true.
281 */
282 shp->shm_creator = NULL;
283
284 /*
285 * Don't even try to destroy it. If shm_rmid_forced=0 and IPC_RMID
286 * is not set, it shouldn't be deleted here.
287 */
288 if (!ns->shm_rmid_forced)
289 return 0;
290
291 if (shm_may_destroy(ns, shp)) {
292 shm_lock_by_ptr(shp);
293 shm_destroy(ns, shp);
294 }
295 return 0;
296 }
297
298 /* Called with ns->shm_ids(ns).rwsem locked */
299 static int shm_try_destroy_orphaned(int id, void *p, void *data)
300 {
301 struct ipc_namespace *ns = data;
302 struct kern_ipc_perm *ipcp = p;
303 struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
304
305 /*
306 * We want to destroy segments without users and with already
307 * exit'ed originating process.
308 *
309 * As shp->* are changed under rwsem, it's safe to skip shp locking.
310 */
311 if (shp->shm_creator != NULL)
312 return 0;
313
314 if (shm_may_destroy(ns, shp)) {
315 shm_lock_by_ptr(shp);
316 shm_destroy(ns, shp);
317 }
318 return 0;
319 }
320
321 void shm_destroy_orphaned(struct ipc_namespace *ns)
322 {
323 down_write(&shm_ids(ns).rwsem);
324 if (shm_ids(ns).in_use)
325 idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
326 up_write(&shm_ids(ns).rwsem);
327 }
328
329
330 void exit_shm(struct task_struct *task)
331 {
332 struct ipc_namespace *ns = task->nsproxy->ipc_ns;
333
334 if (shm_ids(ns).in_use == 0)
335 return;
336
337 /* Destroy all already created segments, but not mapped yet */
338 down_write(&shm_ids(ns).rwsem);
339 if (shm_ids(ns).in_use)
340 idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns);
341 up_write(&shm_ids(ns).rwsem);
342 }
343
344 static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
345 {
346 struct file *file = vma->vm_file;
347 struct shm_file_data *sfd = shm_file_data(file);
348
349 return sfd->vm_ops->fault(vma, vmf);
350 }
351
352 #ifdef CONFIG_NUMA
353 static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
354 {
355 struct file *file = vma->vm_file;
356 struct shm_file_data *sfd = shm_file_data(file);
357 int err = 0;
358 if (sfd->vm_ops->set_policy)
359 err = sfd->vm_ops->set_policy(vma, new);
360 return err;
361 }
362
363 static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
364 unsigned long addr)
365 {
366 struct file *file = vma->vm_file;
367 struct shm_file_data *sfd = shm_file_data(file);
368 struct mempolicy *pol = NULL;
369
370 if (sfd->vm_ops->get_policy)
371 pol = sfd->vm_ops->get_policy(vma, addr);
372 else if (vma->vm_policy)
373 pol = vma->vm_policy;
374
375 return pol;
376 }
377 #endif
378
379 static int shm_mmap(struct file * file, struct vm_area_struct * vma)
380 {
381 struct shm_file_data *sfd = shm_file_data(file);
382 int ret;
383
384 ret = sfd->file->f_op->mmap(sfd->file, vma);
385 if (ret != 0)
386 return ret;
387 sfd->vm_ops = vma->vm_ops;
388 #ifdef CONFIG_MMU
389 BUG_ON(!sfd->vm_ops->fault);
390 #endif
391 vma->vm_ops = &shm_vm_ops;
392 shm_open(vma);
393
394 return ret;
395 }
396
397 static int shm_release(struct inode *ino, struct file *file)
398 {
399 struct shm_file_data *sfd = shm_file_data(file);
400
401 put_ipc_ns(sfd->ns);
402 shm_file_data(file) = NULL;
403 kfree(sfd);
404 return 0;
405 }
406
407 static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
408 {
409 struct shm_file_data *sfd = shm_file_data(file);
410
411 if (!sfd->file->f_op->fsync)
412 return -EINVAL;
413 return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
414 }
415
416 static long shm_fallocate(struct file *file, int mode, loff_t offset,
417 loff_t len)
418 {
419 struct shm_file_data *sfd = shm_file_data(file);
420
421 if (!sfd->file->f_op->fallocate)
422 return -EOPNOTSUPP;
423 return sfd->file->f_op->fallocate(file, mode, offset, len);
424 }
425
426 static unsigned long shm_get_unmapped_area(struct file *file,
427 unsigned long addr, unsigned long len, unsigned long pgoff,
428 unsigned long flags)
429 {
430 struct shm_file_data *sfd = shm_file_data(file);
431 return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
432 pgoff, flags);
433 }
434
435 static const struct file_operations shm_file_operations = {
436 .mmap = shm_mmap,
437 .fsync = shm_fsync,
438 .release = shm_release,
439 #ifndef CONFIG_MMU
440 .get_unmapped_area = shm_get_unmapped_area,
441 #endif
442 .llseek = noop_llseek,
443 .fallocate = shm_fallocate,
444 };
445
446 static const struct file_operations shm_file_operations_huge = {
447 .mmap = shm_mmap,
448 .fsync = shm_fsync,
449 .release = shm_release,
450 .get_unmapped_area = shm_get_unmapped_area,
451 .llseek = noop_llseek,
452 .fallocate = shm_fallocate,
453 };
454
455 int is_file_shm_hugepages(struct file *file)
456 {
457 return file->f_op == &shm_file_operations_huge;
458 }
459
460 static const struct vm_operations_struct shm_vm_ops = {
461 .open = shm_open, /* callback for a new vm-area open */
462 .close = shm_close, /* callback for when the vm-area is released */
463 .fault = shm_fault,
464 #if defined(CONFIG_NUMA)
465 .set_policy = shm_set_policy,
466 .get_policy = shm_get_policy,
467 #endif
468 };
469
470 /**
471 * newseg - Create a new shared memory segment
472 * @ns: namespace
473 * @params: ptr to the structure that contains key, size and shmflg
474 *
475 * Called with shm_ids.rwsem held as a writer.
476 */
477
478 static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
479 {
480 key_t key = params->key;
481 int shmflg = params->flg;
482 size_t size = params->u.size;
483 int error;
484 struct shmid_kernel *shp;
485 size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
486 struct file * file;
487 char name[13];
488 int id;
489 vm_flags_t acctflag = 0;
490
491 if (size < SHMMIN || size > ns->shm_ctlmax)
492 return -EINVAL;
493
494 if (ns->shm_tot + numpages > ns->shm_ctlall)
495 return -ENOSPC;
496
497 shp = ipc_rcu_alloc(sizeof(*shp));
498 if (!shp)
499 return -ENOMEM;
500
501 shp->shm_perm.key = key;
502 shp->shm_perm.mode = (shmflg & S_IRWXUGO);
503 shp->mlock_user = NULL;
504
505 shp->shm_perm.security = NULL;
506 error = security_shm_alloc(shp);
507 if (error) {
508 ipc_rcu_putref(shp, ipc_rcu_free);
509 return error;
510 }
511
512 sprintf (name, "SYSV%08x", key);
513 if (shmflg & SHM_HUGETLB) {
514 struct hstate *hs;
515 size_t hugesize;
516
517 hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
518 if (!hs) {
519 error = -EINVAL;
520 goto no_file;
521 }
522 hugesize = ALIGN(size, huge_page_size(hs));
523
524 /* hugetlb_file_setup applies strict accounting */
525 if (shmflg & SHM_NORESERVE)
526 acctflag = VM_NORESERVE;
527 file = hugetlb_file_setup(name, hugesize, acctflag,
528 &shp->mlock_user, HUGETLB_SHMFS_INODE,
529 (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
530 } else {
531 /*
532 * Do not allow no accounting for OVERCOMMIT_NEVER, even
533 * if it's asked for.
534 */
535 if ((shmflg & SHM_NORESERVE) &&
536 sysctl_overcommit_memory != OVERCOMMIT_NEVER)
537 acctflag = VM_NORESERVE;
538 file = shmem_file_setup(name, size, acctflag);
539 }
540 error = PTR_ERR(file);
541 if (IS_ERR(file))
542 goto no_file;
543
544 id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
545 if (id < 0) {
546 error = id;
547 goto no_id;
548 }
549
550 shp->shm_cprid = task_tgid_vnr(current);
551 shp->shm_lprid = 0;
552 shp->shm_atim = shp->shm_dtim = 0;
553 shp->shm_ctim = get_seconds();
554 shp->shm_segsz = size;
555 shp->shm_nattch = 0;
556 shp->shm_file = file;
557 shp->shm_creator = current;
558
559 /*
560 * shmid gets reported as "inode#" in /proc/pid/maps.
561 * proc-ps tools use this. Changing this will break them.
562 */
563 file_inode(file)->i_ino = shp->shm_perm.id;
564
565 ns->shm_tot += numpages;
566 error = shp->shm_perm.id;
567
568 ipc_unlock_object(&shp->shm_perm);
569 rcu_read_unlock();
570 return error;
571
572 no_id:
573 if (is_file_hugepages(file) && shp->mlock_user)
574 user_shm_unlock(size, shp->mlock_user);
575 fput(file);
576 no_file:
577 ipc_rcu_putref(shp, shm_rcu_free);
578 return error;
579 }
580
581 /*
582 * Called with shm_ids.rwsem and ipcp locked.
583 */
584 static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
585 {
586 struct shmid_kernel *shp;
587
588 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
589 return security_shm_associate(shp, shmflg);
590 }
591
592 /*
593 * Called with shm_ids.rwsem and ipcp locked.
594 */
595 static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
596 struct ipc_params *params)
597 {
598 struct shmid_kernel *shp;
599
600 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
601 if (shp->shm_segsz < params->u.size)
602 return -EINVAL;
603
604 return 0;
605 }
606
607 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
608 {
609 struct ipc_namespace *ns;
610 struct ipc_ops shm_ops;
611 struct ipc_params shm_params;
612
613 ns = current->nsproxy->ipc_ns;
614
615 shm_ops.getnew = newseg;
616 shm_ops.associate = shm_security;
617 shm_ops.more_checks = shm_more_checks;
618
619 shm_params.key = key;
620 shm_params.flg = shmflg;
621 shm_params.u.size = size;
622
623 return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
624 }
625
626 static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
627 {
628 switch(version) {
629 case IPC_64:
630 return copy_to_user(buf, in, sizeof(*in));
631 case IPC_OLD:
632 {
633 struct shmid_ds out;
634
635 memset(&out, 0, sizeof(out));
636 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
637 out.shm_segsz = in->shm_segsz;
638 out.shm_atime = in->shm_atime;
639 out.shm_dtime = in->shm_dtime;
640 out.shm_ctime = in->shm_ctime;
641 out.shm_cpid = in->shm_cpid;
642 out.shm_lpid = in->shm_lpid;
643 out.shm_nattch = in->shm_nattch;
644
645 return copy_to_user(buf, &out, sizeof(out));
646 }
647 default:
648 return -EINVAL;
649 }
650 }
651
652 static inline unsigned long
653 copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
654 {
655 switch(version) {
656 case IPC_64:
657 if (copy_from_user(out, buf, sizeof(*out)))
658 return -EFAULT;
659 return 0;
660 case IPC_OLD:
661 {
662 struct shmid_ds tbuf_old;
663
664 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
665 return -EFAULT;
666
667 out->shm_perm.uid = tbuf_old.shm_perm.uid;
668 out->shm_perm.gid = tbuf_old.shm_perm.gid;
669 out->shm_perm.mode = tbuf_old.shm_perm.mode;
670
671 return 0;
672 }
673 default:
674 return -EINVAL;
675 }
676 }
677
678 static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
679 {
680 switch(version) {
681 case IPC_64:
682 return copy_to_user(buf, in, sizeof(*in));
683 case IPC_OLD:
684 {
685 struct shminfo out;
686
687 if(in->shmmax > INT_MAX)
688 out.shmmax = INT_MAX;
689 else
690 out.shmmax = (int)in->shmmax;
691
692 out.shmmin = in->shmmin;
693 out.shmmni = in->shmmni;
694 out.shmseg = in->shmseg;
695 out.shmall = in->shmall;
696
697 return copy_to_user(buf, &out, sizeof(out));
698 }
699 default:
700 return -EINVAL;
701 }
702 }
703
704 /*
705 * Calculate and add used RSS and swap pages of a shm.
706 * Called with shm_ids.rwsem held as a reader
707 */
708 static void shm_add_rss_swap(struct shmid_kernel *shp,
709 unsigned long *rss_add, unsigned long *swp_add)
710 {
711 struct inode *inode;
712
713 inode = file_inode(shp->shm_file);
714
715 if (is_file_hugepages(shp->shm_file)) {
716 struct address_space *mapping = inode->i_mapping;
717 struct hstate *h = hstate_file(shp->shm_file);
718 *rss_add += pages_per_huge_page(h) * mapping->nrpages;
719 } else {
720 #ifdef CONFIG_SHMEM
721 struct shmem_inode_info *info = SHMEM_I(inode);
722 spin_lock(&info->lock);
723 *rss_add += inode->i_mapping->nrpages;
724 *swp_add += info->swapped;
725 spin_unlock(&info->lock);
726 #else
727 *rss_add += inode->i_mapping->nrpages;
728 #endif
729 }
730 }
731
732 /*
733 * Called with shm_ids.rwsem held as a reader
734 */
735 static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
736 unsigned long *swp)
737 {
738 int next_id;
739 int total, in_use;
740
741 *rss = 0;
742 *swp = 0;
743
744 in_use = shm_ids(ns).in_use;
745
746 for (total = 0, next_id = 0; total < in_use; next_id++) {
747 struct kern_ipc_perm *ipc;
748 struct shmid_kernel *shp;
749
750 ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
751 if (ipc == NULL)
752 continue;
753 shp = container_of(ipc, struct shmid_kernel, shm_perm);
754
755 shm_add_rss_swap(shp, rss, swp);
756
757 total++;
758 }
759 }
760
761 /*
762 * This function handles some shmctl commands which require the rwsem
763 * to be held in write mode.
764 * NOTE: no locks must be held, the rwsem is taken inside this function.
765 */
766 static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
767 struct shmid_ds __user *buf, int version)
768 {
769 struct kern_ipc_perm *ipcp;
770 struct shmid64_ds shmid64;
771 struct shmid_kernel *shp;
772 int err;
773
774 if (cmd == IPC_SET) {
775 if (copy_shmid_from_user(&shmid64, buf, version))
776 return -EFAULT;
777 }
778
779 down_write(&shm_ids(ns).rwsem);
780 rcu_read_lock();
781
782 ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd,
783 &shmid64.shm_perm, 0);
784 if (IS_ERR(ipcp)) {
785 err = PTR_ERR(ipcp);
786 goto out_unlock1;
787 }
788
789 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
790
791 err = security_shm_shmctl(shp, cmd);
792 if (err)
793 goto out_unlock1;
794
795 switch (cmd) {
796 case IPC_RMID:
797 ipc_lock_object(&shp->shm_perm);
798 /* do_shm_rmid unlocks the ipc object and rcu */
799 do_shm_rmid(ns, ipcp);
800 goto out_up;
801 case IPC_SET:
802 ipc_lock_object(&shp->shm_perm);
803 err = ipc_update_perm(&shmid64.shm_perm, ipcp);
804 if (err)
805 goto out_unlock0;
806 shp->shm_ctim = get_seconds();
807 break;
808 default:
809 err = -EINVAL;
810 goto out_unlock1;
811 }
812
813 out_unlock0:
814 ipc_unlock_object(&shp->shm_perm);
815 out_unlock1:
816 rcu_read_unlock();
817 out_up:
818 up_write(&shm_ids(ns).rwsem);
819 return err;
820 }
821
822 static int shmctl_nolock(struct ipc_namespace *ns, int shmid,
823 int cmd, int version, void __user *buf)
824 {
825 int err;
826 struct shmid_kernel *shp;
827
828 /* preliminary security checks for *_INFO */
829 if (cmd == IPC_INFO || cmd == SHM_INFO) {
830 err = security_shm_shmctl(NULL, cmd);
831 if (err)
832 return err;
833 }
834
835 switch (cmd) {
836 case IPC_INFO:
837 {
838 struct shminfo64 shminfo;
839
840 memset(&shminfo, 0, sizeof(shminfo));
841 shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
842 shminfo.shmmax = ns->shm_ctlmax;
843 shminfo.shmall = ns->shm_ctlall;
844
845 shminfo.shmmin = SHMMIN;
846 if(copy_shminfo_to_user (buf, &shminfo, version))
847 return -EFAULT;
848
849 down_read(&shm_ids(ns).rwsem);
850 err = ipc_get_maxid(&shm_ids(ns));
851 up_read(&shm_ids(ns).rwsem);
852
853 if(err<0)
854 err = 0;
855 goto out;
856 }
857 case SHM_INFO:
858 {
859 struct shm_info shm_info;
860
861 memset(&shm_info, 0, sizeof(shm_info));
862 down_read(&shm_ids(ns).rwsem);
863 shm_info.used_ids = shm_ids(ns).in_use;
864 shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
865 shm_info.shm_tot = ns->shm_tot;
866 shm_info.swap_attempts = 0;
867 shm_info.swap_successes = 0;
868 err = ipc_get_maxid(&shm_ids(ns));
869 up_read(&shm_ids(ns).rwsem);
870 if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
871 err = -EFAULT;
872 goto out;
873 }
874
875 err = err < 0 ? 0 : err;
876 goto out;
877 }
878 case SHM_STAT:
879 case IPC_STAT:
880 {
881 struct shmid64_ds tbuf;
882 int result;
883
884 rcu_read_lock();
885 if (cmd == SHM_STAT) {
886 shp = shm_obtain_object(ns, shmid);
887 if (IS_ERR(shp)) {
888 err = PTR_ERR(shp);
889 goto out_unlock;
890 }
891 result = shp->shm_perm.id;
892 } else {
893 shp = shm_obtain_object_check(ns, shmid);
894 if (IS_ERR(shp)) {
895 err = PTR_ERR(shp);
896 goto out_unlock;
897 }
898 result = 0;
899 }
900
901 err = -EACCES;
902 if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
903 goto out_unlock;
904
905 err = security_shm_shmctl(shp, cmd);
906 if (err)
907 goto out_unlock;
908
909 memset(&tbuf, 0, sizeof(tbuf));
910 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
911 tbuf.shm_segsz = shp->shm_segsz;
912 tbuf.shm_atime = shp->shm_atim;
913 tbuf.shm_dtime = shp->shm_dtim;
914 tbuf.shm_ctime = shp->shm_ctim;
915 tbuf.shm_cpid = shp->shm_cprid;
916 tbuf.shm_lpid = shp->shm_lprid;
917 tbuf.shm_nattch = shp->shm_nattch;
918 rcu_read_unlock();
919
920 if (copy_shmid_to_user(buf, &tbuf, version))
921 err = -EFAULT;
922 else
923 err = result;
924 goto out;
925 }
926 default:
927 return -EINVAL;
928 }
929
930 out_unlock:
931 rcu_read_unlock();
932 out:
933 return err;
934 }
935
936 SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
937 {
938 struct shmid_kernel *shp;
939 int err, version;
940 struct ipc_namespace *ns;
941
942 if (cmd < 0 || shmid < 0)
943 return -EINVAL;
944
945 version = ipc_parse_version(&cmd);
946 ns = current->nsproxy->ipc_ns;
947
948 switch (cmd) {
949 case IPC_INFO:
950 case SHM_INFO:
951 case SHM_STAT:
952 case IPC_STAT:
953 return shmctl_nolock(ns, shmid, cmd, version, buf);
954 case IPC_RMID:
955 case IPC_SET:
956 return shmctl_down(ns, shmid, cmd, buf, version);
957 case SHM_LOCK:
958 case SHM_UNLOCK:
959 {
960 struct file *shm_file;
961
962 rcu_read_lock();
963 shp = shm_obtain_object_check(ns, shmid);
964 if (IS_ERR(shp)) {
965 err = PTR_ERR(shp);
966 goto out_unlock1;
967 }
968
969 audit_ipc_obj(&(shp->shm_perm));
970 err = security_shm_shmctl(shp, cmd);
971 if (err)
972 goto out_unlock1;
973
974 ipc_lock_object(&shp->shm_perm);
975 if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
976 kuid_t euid = current_euid();
977 err = -EPERM;
978 if (!uid_eq(euid, shp->shm_perm.uid) &&
979 !uid_eq(euid, shp->shm_perm.cuid))
980 goto out_unlock0;
981 if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK))
982 goto out_unlock0;
983 }
984
985 shm_file = shp->shm_file;
986 if (is_file_hugepages(shm_file))
987 goto out_unlock0;
988
989 if (cmd == SHM_LOCK) {
990 struct user_struct *user = current_user();
991 err = shmem_lock(shm_file, 1, user);
992 if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
993 shp->shm_perm.mode |= SHM_LOCKED;
994 shp->mlock_user = user;
995 }
996 goto out_unlock0;
997 }
998
999 /* SHM_UNLOCK */
1000 if (!(shp->shm_perm.mode & SHM_LOCKED))
1001 goto out_unlock0;
1002 shmem_lock(shm_file, 0, shp->mlock_user);
1003 shp->shm_perm.mode &= ~SHM_LOCKED;
1004 shp->mlock_user = NULL;
1005 get_file(shm_file);
1006 ipc_unlock_object(&shp->shm_perm);
1007 rcu_read_unlock();
1008 shmem_unlock_mapping(shm_file->f_mapping);
1009
1010 fput(shm_file);
1011 return err;
1012 }
1013 default:
1014 return -EINVAL;
1015 }
1016
1017 out_unlock0:
1018 ipc_unlock_object(&shp->shm_perm);
1019 out_unlock1:
1020 rcu_read_unlock();
1021 return err;
1022 }
1023
1024 /*
1025 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
1026 *
1027 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
1028 * "raddr" thing points to kernel space, and there has to be a wrapper around
1029 * this.
1030 */
1031 long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
1032 unsigned long shmlba)
1033 {
1034 struct shmid_kernel *shp;
1035 unsigned long addr;
1036 unsigned long size;
1037 struct file * file;
1038 int err;
1039 unsigned long flags;
1040 unsigned long prot;
1041 int acc_mode;
1042 struct ipc_namespace *ns;
1043 struct shm_file_data *sfd;
1044 struct path path;
1045 fmode_t f_mode;
1046 unsigned long populate = 0;
1047
1048 err = -EINVAL;
1049 if (shmid < 0)
1050 goto out;
1051 else if ((addr = (ulong)shmaddr)) {
1052 if (addr & (shmlba - 1)) {
1053 if (shmflg & SHM_RND)
1054 addr &= ~(shmlba - 1); /* round down */
1055 else
1056 #ifndef __ARCH_FORCE_SHMLBA
1057 if (addr & ~PAGE_MASK)
1058 #endif
1059 goto out;
1060 }
1061 flags = MAP_SHARED | MAP_FIXED;
1062 } else {
1063 if ((shmflg & SHM_REMAP))
1064 goto out;
1065
1066 flags = MAP_SHARED;
1067 }
1068
1069 if (shmflg & SHM_RDONLY) {
1070 prot = PROT_READ;
1071 acc_mode = S_IRUGO;
1072 f_mode = FMODE_READ;
1073 } else {
1074 prot = PROT_READ | PROT_WRITE;
1075 acc_mode = S_IRUGO | S_IWUGO;
1076 f_mode = FMODE_READ | FMODE_WRITE;
1077 }
1078 if (shmflg & SHM_EXEC) {
1079 prot |= PROT_EXEC;
1080 acc_mode |= S_IXUGO;
1081 }
1082
1083 /*
1084 * We cannot rely on the fs check since SYSV IPC does have an
1085 * additional creator id...
1086 */
1087 ns = current->nsproxy->ipc_ns;
1088 rcu_read_lock();
1089 shp = shm_obtain_object_check(ns, shmid);
1090 if (IS_ERR(shp)) {
1091 err = PTR_ERR(shp);
1092 goto out_unlock;
1093 }
1094
1095 err = -EACCES;
1096 if (ipcperms(ns, &shp->shm_perm, acc_mode))
1097 goto out_unlock;
1098
1099 err = security_shm_shmat(shp, shmaddr, shmflg);
1100 if (err)
1101 goto out_unlock;
1102
1103 ipc_lock_object(&shp->shm_perm);
1104 path = shp->shm_file->f_path;
1105 path_get(&path);
1106 shp->shm_nattch++;
1107 size = i_size_read(path.dentry->d_inode);
1108 ipc_unlock_object(&shp->shm_perm);
1109 rcu_read_unlock();
1110
1111 err = -ENOMEM;
1112 sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
1113 if (!sfd) {
1114 path_put(&path);
1115 goto out_nattch;
1116 }
1117
1118 file = alloc_file(&path, f_mode,
1119 is_file_hugepages(shp->shm_file) ?
1120 &shm_file_operations_huge :
1121 &shm_file_operations);
1122 err = PTR_ERR(file);
1123 if (IS_ERR(file)) {
1124 kfree(sfd);
1125 path_put(&path);
1126 goto out_nattch;
1127 }
1128
1129 file->private_data = sfd;
1130 file->f_mapping = shp->shm_file->f_mapping;
1131 sfd->id = shp->shm_perm.id;
1132 sfd->ns = get_ipc_ns(ns);
1133 sfd->file = shp->shm_file;
1134 sfd->vm_ops = NULL;
1135
1136 err = security_mmap_file(file, prot, flags);
1137 if (err)
1138 goto out_fput;
1139
1140 down_write(&current->mm->mmap_sem);
1141 if (addr && !(shmflg & SHM_REMAP)) {
1142 err = -EINVAL;
1143 if (find_vma_intersection(current->mm, addr, addr + size))
1144 goto invalid;
1145 /*
1146 * If shm segment goes below stack, make sure there is some
1147 * space left for the stack to grow (at least 4 pages).
1148 */
1149 if (addr < current->mm->start_stack &&
1150 addr > current->mm->start_stack - size - PAGE_SIZE * 5)
1151 goto invalid;
1152 }
1153
1154 addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate);
1155 *raddr = addr;
1156 err = 0;
1157 if (IS_ERR_VALUE(addr))
1158 err = (long)addr;
1159 invalid:
1160 up_write(&current->mm->mmap_sem);
1161 if (populate)
1162 mm_populate(addr, populate);
1163
1164 out_fput:
1165 fput(file);
1166
1167 out_nattch:
1168 down_write(&shm_ids(ns).rwsem);
1169 shp = shm_lock(ns, shmid);
1170 BUG_ON(IS_ERR(shp));
1171 shp->shm_nattch--;
1172 if (shm_may_destroy(ns, shp))
1173 shm_destroy(ns, shp);
1174 else
1175 shm_unlock(shp);
1176 up_write(&shm_ids(ns).rwsem);
1177 return err;
1178
1179 out_unlock:
1180 rcu_read_unlock();
1181 out:
1182 return err;
1183 }
1184
1185 SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
1186 {
1187 unsigned long ret;
1188 long err;
1189
1190 err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
1191 if (err)
1192 return err;
1193 force_successful_syscall_return();
1194 return (long)ret;
1195 }
1196
1197 /*
1198 * detach and kill segment if marked destroyed.
1199 * The work is done in shm_close.
1200 */
1201 SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
1202 {
1203 struct mm_struct *mm = current->mm;
1204 struct vm_area_struct *vma;
1205 unsigned long addr = (unsigned long)shmaddr;
1206 int retval = -EINVAL;
1207 #ifdef CONFIG_MMU
1208 loff_t size = 0;
1209 struct vm_area_struct *next;
1210 #endif
1211
1212 if (addr & ~PAGE_MASK)
1213 return retval;
1214
1215 down_write(&mm->mmap_sem);
1216
1217 /*
1218 * This function tries to be smart and unmap shm segments that
1219 * were modified by partial mlock or munmap calls:
1220 * - It first determines the size of the shm segment that should be
1221 * unmapped: It searches for a vma that is backed by shm and that
1222 * started at address shmaddr. It records it's size and then unmaps
1223 * it.
1224 * - Then it unmaps all shm vmas that started at shmaddr and that
1225 * are within the initially determined size.
1226 * Errors from do_munmap are ignored: the function only fails if
1227 * it's called with invalid parameters or if it's called to unmap
1228 * a part of a vma. Both calls in this function are for full vmas,
1229 * the parameters are directly copied from the vma itself and always
1230 * valid - therefore do_munmap cannot fail. (famous last words?)
1231 */
1232 /*
1233 * If it had been mremap()'d, the starting address would not
1234 * match the usual checks anyway. So assume all vma's are
1235 * above the starting address given.
1236 */
1237 vma = find_vma(mm, addr);
1238
1239 #ifdef CONFIG_MMU
1240 while (vma) {
1241 next = vma->vm_next;
1242
1243 /*
1244 * Check if the starting address would match, i.e. it's
1245 * a fragment created by mprotect() and/or munmap(), or it
1246 * otherwise it starts at this address with no hassles.
1247 */
1248 if ((vma->vm_ops == &shm_vm_ops) &&
1249 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1250
1251
1252 size = file_inode(vma->vm_file)->i_size;
1253 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1254 /*
1255 * We discovered the size of the shm segment, so
1256 * break out of here and fall through to the next
1257 * loop that uses the size information to stop
1258 * searching for matching vma's.
1259 */
1260 retval = 0;
1261 vma = next;
1262 break;
1263 }
1264 vma = next;
1265 }
1266
1267 /*
1268 * We need look no further than the maximum address a fragment
1269 * could possibly have landed at. Also cast things to loff_t to
1270 * prevent overflows and make comparisons vs. equal-width types.
1271 */
1272 size = PAGE_ALIGN(size);
1273 while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1274 next = vma->vm_next;
1275
1276 /* finding a matching vma now does not alter retval */
1277 if ((vma->vm_ops == &shm_vm_ops) &&
1278 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
1279
1280 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1281 vma = next;
1282 }
1283
1284 #else /* CONFIG_MMU */
1285 /* under NOMMU conditions, the exact address to be destroyed must be
1286 * given */
1287 if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1288 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1289 retval = 0;
1290 }
1291
1292 #endif
1293
1294 up_write(&mm->mmap_sem);
1295 return retval;
1296 }
1297
1298 #ifdef CONFIG_PROC_FS
1299 static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1300 {
1301 struct user_namespace *user_ns = seq_user_ns(s);
1302 struct shmid_kernel *shp = it;
1303 unsigned long rss = 0, swp = 0;
1304
1305 shm_add_rss_swap(shp, &rss, &swp);
1306
1307 #if BITS_PER_LONG <= 32
1308 #define SIZE_SPEC "%10lu"
1309 #else
1310 #define SIZE_SPEC "%21lu"
1311 #endif
1312
1313 return seq_printf(s,
1314 "%10d %10d %4o " SIZE_SPEC " %5u %5u "
1315 "%5lu %5u %5u %5u %5u %10lu %10lu %10lu "
1316 SIZE_SPEC " " SIZE_SPEC "\n",
1317 shp->shm_perm.key,
1318 shp->shm_perm.id,
1319 shp->shm_perm.mode,
1320 shp->shm_segsz,
1321 shp->shm_cprid,
1322 shp->shm_lprid,
1323 shp->shm_nattch,
1324 from_kuid_munged(user_ns, shp->shm_perm.uid),
1325 from_kgid_munged(user_ns, shp->shm_perm.gid),
1326 from_kuid_munged(user_ns, shp->shm_perm.cuid),
1327 from_kgid_munged(user_ns, shp->shm_perm.cgid),
1328 shp->shm_atim,
1329 shp->shm_dtim,
1330 shp->shm_ctim,
1331 rss * PAGE_SIZE,
1332 swp * PAGE_SIZE);
1333 }
1334 #endif