3 * Copyright (C) 1992, 1993 Krishna Balasubramanian
4 * Many improvements/fixes by Bruno Haible.
5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
16 * support for audit of ipc object properties and permission changes
17 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
21 * Pavel Emelianov <xemul@openvz.org>
23 * Better ipc lock (kern_ipc_perm.lock) handling
24 * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
27 #include <linux/slab.h>
29 #include <linux/hugetlb.h>
30 #include <linux/shm.h>
31 #include <linux/init.h>
32 #include <linux/file.h>
33 #include <linux/mman.h>
34 #include <linux/shmem_fs.h>
35 #include <linux/security.h>
36 #include <linux/syscalls.h>
37 #include <linux/audit.h>
38 #include <linux/capability.h>
39 #include <linux/ptrace.h>
40 #include <linux/seq_file.h>
41 #include <linux/rwsem.h>
42 #include <linux/nsproxy.h>
43 #include <linux/mount.h>
44 #include <linux/ipc_namespace.h>
46 #include <linux/uaccess.h>
50 struct shm_file_data
{
52 struct ipc_namespace
*ns
;
54 const struct vm_operations_struct
*vm_ops
;
57 #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
59 static const struct file_operations shm_file_operations
;
60 static const struct vm_operations_struct shm_vm_ops
;
62 #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
64 #define shm_unlock(shp) \
65 ipc_unlock(&(shp)->shm_perm)
67 static int newseg(struct ipc_namespace
*, struct ipc_params
*);
68 static void shm_open(struct vm_area_struct
*vma
);
69 static void shm_close(struct vm_area_struct
*vma
);
70 static void shm_destroy(struct ipc_namespace
*ns
, struct shmid_kernel
*shp
);
72 static int sysvipc_shm_proc_show(struct seq_file
*s
, void *it
);
75 void shm_init_ns(struct ipc_namespace
*ns
)
77 ns
->shm_ctlmax
= SHMMAX
;
78 ns
->shm_ctlall
= SHMALL
;
79 ns
->shm_ctlmni
= SHMMNI
;
80 ns
->shm_rmid_forced
= 0;
82 ipc_init_ids(&shm_ids(ns
));
86 * Called with shm_ids.rwsem (writer) and the shp structure locked.
87 * Only shm_ids.rwsem remains locked on exit.
89 static void do_shm_rmid(struct ipc_namespace
*ns
, struct kern_ipc_perm
*ipcp
)
91 struct shmid_kernel
*shp
;
92 shp
= container_of(ipcp
, struct shmid_kernel
, shm_perm
);
94 if (shp
->shm_nattch
) {
95 shp
->shm_perm
.mode
|= SHM_DEST
;
96 /* Do not find it any more */
97 shp
->shm_perm
.key
= IPC_PRIVATE
;
100 shm_destroy(ns
, shp
);
104 void shm_exit_ns(struct ipc_namespace
*ns
)
106 free_ipcs(ns
, &shm_ids(ns
), do_shm_rmid
);
107 idr_destroy(&ns
->ids
[IPC_SHM_IDS
].ipcs_idr
);
111 static int __init
ipc_ns_init(void)
113 shm_init_ns(&init_ipc_ns
);
117 pure_initcall(ipc_ns_init
);
119 void __init
shm_init(void)
121 ipc_init_proc_interface("sysvipc/shm",
122 #if BITS_PER_LONG <= 32
123 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
125 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
127 IPC_SHM_IDS
, sysvipc_shm_proc_show
);
130 static inline struct shmid_kernel
*shm_obtain_object(struct ipc_namespace
*ns
, int id
)
132 struct kern_ipc_perm
*ipcp
= ipc_obtain_object_idr(&shm_ids(ns
), id
);
135 return ERR_CAST(ipcp
);
137 return container_of(ipcp
, struct shmid_kernel
, shm_perm
);
140 static inline struct shmid_kernel
*shm_obtain_object_check(struct ipc_namespace
*ns
, int id
)
142 struct kern_ipc_perm
*ipcp
= ipc_obtain_object_check(&shm_ids(ns
), id
);
145 return ERR_CAST(ipcp
);
147 return container_of(ipcp
, struct shmid_kernel
, shm_perm
);
151 * shm_lock_(check_) routines are called in the paths where the rwsem
152 * is not necessarily held.
154 static inline struct shmid_kernel
*shm_lock(struct ipc_namespace
*ns
, int id
)
156 struct kern_ipc_perm
*ipcp
= ipc_lock(&shm_ids(ns
), id
);
159 * We raced in the idr lookup or with shm_destroy(). Either way, the
162 WARN_ON(IS_ERR(ipcp
));
164 return container_of(ipcp
, struct shmid_kernel
, shm_perm
);
167 static inline void shm_lock_by_ptr(struct shmid_kernel
*ipcp
)
170 ipc_lock_object(&ipcp
->shm_perm
);
173 static void shm_rcu_free(struct rcu_head
*head
)
175 struct ipc_rcu
*p
= container_of(head
, struct ipc_rcu
, rcu
);
176 struct shmid_kernel
*shp
= ipc_rcu_to_struct(p
);
178 security_shm_free(shp
);
182 static inline void shm_rmid(struct ipc_namespace
*ns
, struct shmid_kernel
*s
)
184 list_del(&s
->shm_clist
);
185 ipc_rmid(&shm_ids(ns
), &s
->shm_perm
);
189 /* This is called by fork, once for every shm attach. */
190 static void shm_open(struct vm_area_struct
*vma
)
192 struct file
*file
= vma
->vm_file
;
193 struct shm_file_data
*sfd
= shm_file_data(file
);
194 struct shmid_kernel
*shp
;
196 shp
= shm_lock(sfd
->ns
, sfd
->id
);
197 shp
->shm_atim
= get_seconds();
198 shp
->shm_lprid
= task_tgid_vnr(current
);
204 * shm_destroy - free the struct shmid_kernel
207 * @shp: struct to free
209 * It has to be called with shp and shm_ids.rwsem (writer) locked,
210 * but returns with shp unlocked and freed.
212 static void shm_destroy(struct ipc_namespace
*ns
, struct shmid_kernel
*shp
)
214 struct file
*shm_file
;
216 shm_file
= shp
->shm_file
;
217 shp
->shm_file
= NULL
;
218 ns
->shm_tot
-= (shp
->shm_segsz
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
221 if (!is_file_hugepages(shm_file
))
222 shmem_lock(shm_file
, 0, shp
->mlock_user
);
223 else if (shp
->mlock_user
)
224 user_shm_unlock(i_size_read(file_inode(shm_file
)),
227 ipc_rcu_putref(shp
, shm_rcu_free
);
231 * shm_may_destroy - identifies whether shm segment should be destroyed now
233 * Returns true if and only if there are no active users of the segment and
234 * one of the following is true:
236 * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
238 * 2) sysctl kernel.shm_rmid_forced is set to 1.
240 static bool shm_may_destroy(struct ipc_namespace
*ns
, struct shmid_kernel
*shp
)
242 return (shp
->shm_nattch
== 0) &&
243 (ns
->shm_rmid_forced
||
244 (shp
->shm_perm
.mode
& SHM_DEST
));
248 * remove the attach descriptor vma.
249 * free memory for segment if it is marked destroyed.
250 * The descriptor has already been removed from the current->mm->mmap list
251 * and will later be kfree()d.
253 static void shm_close(struct vm_area_struct
*vma
)
255 struct file
*file
= vma
->vm_file
;
256 struct shm_file_data
*sfd
= shm_file_data(file
);
257 struct shmid_kernel
*shp
;
258 struct ipc_namespace
*ns
= sfd
->ns
;
260 down_write(&shm_ids(ns
).rwsem
);
261 /* remove from the list of attaches of the shm segment */
262 shp
= shm_lock(ns
, sfd
->id
);
263 shp
->shm_lprid
= task_tgid_vnr(current
);
264 shp
->shm_dtim
= get_seconds();
266 if (shm_may_destroy(ns
, shp
))
267 shm_destroy(ns
, shp
);
270 up_write(&shm_ids(ns
).rwsem
);
273 /* Called with ns->shm_ids(ns).rwsem locked */
274 static int shm_try_destroy_orphaned(int id
, void *p
, void *data
)
276 struct ipc_namespace
*ns
= data
;
277 struct kern_ipc_perm
*ipcp
= p
;
278 struct shmid_kernel
*shp
= container_of(ipcp
, struct shmid_kernel
, shm_perm
);
281 * We want to destroy segments without users and with already
282 * exit'ed originating process.
284 * As shp->* are changed under rwsem, it's safe to skip shp locking.
286 if (shp
->shm_creator
!= NULL
)
289 if (shm_may_destroy(ns
, shp
)) {
290 shm_lock_by_ptr(shp
);
291 shm_destroy(ns
, shp
);
296 void shm_destroy_orphaned(struct ipc_namespace
*ns
)
298 down_write(&shm_ids(ns
).rwsem
);
299 if (shm_ids(ns
).in_use
)
300 idr_for_each(&shm_ids(ns
).ipcs_idr
, &shm_try_destroy_orphaned
, ns
);
301 up_write(&shm_ids(ns
).rwsem
);
304 /* Locking assumes this will only be called with task == current */
305 void exit_shm(struct task_struct
*task
)
307 struct ipc_namespace
*ns
= task
->nsproxy
->ipc_ns
;
308 struct shmid_kernel
*shp
, *n
;
310 if (list_empty(&task
->sysvshm
.shm_clist
))
314 * If kernel.shm_rmid_forced is not set then only keep track of
315 * which shmids are orphaned, so that a later set of the sysctl
318 if (!ns
->shm_rmid_forced
) {
319 down_read(&shm_ids(ns
).rwsem
);
320 list_for_each_entry(shp
, &task
->sysvshm
.shm_clist
, shm_clist
)
321 shp
->shm_creator
= NULL
;
323 * Only under read lock but we are only called on current
324 * so no entry on the list will be shared.
326 list_del(&task
->sysvshm
.shm_clist
);
327 up_read(&shm_ids(ns
).rwsem
);
332 * Destroy all already created segments, that were not yet mapped,
333 * and mark any mapped as orphan to cover the sysctl toggling.
334 * Destroy is skipped if shm_may_destroy() returns false.
336 down_write(&shm_ids(ns
).rwsem
);
337 list_for_each_entry_safe(shp
, n
, &task
->sysvshm
.shm_clist
, shm_clist
) {
338 shp
->shm_creator
= NULL
;
340 if (shm_may_destroy(ns
, shp
)) {
341 shm_lock_by_ptr(shp
);
342 shm_destroy(ns
, shp
);
346 /* Remove the list head from any segments still attached. */
347 list_del(&task
->sysvshm
.shm_clist
);
348 up_write(&shm_ids(ns
).rwsem
);
351 static int shm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
353 struct file
*file
= vma
->vm_file
;
354 struct shm_file_data
*sfd
= shm_file_data(file
);
356 return sfd
->vm_ops
->fault(vma
, vmf
);
360 static int shm_set_policy(struct vm_area_struct
*vma
, struct mempolicy
*new)
362 struct file
*file
= vma
->vm_file
;
363 struct shm_file_data
*sfd
= shm_file_data(file
);
365 if (sfd
->vm_ops
->set_policy
)
366 err
= sfd
->vm_ops
->set_policy(vma
, new);
370 static struct mempolicy
*shm_get_policy(struct vm_area_struct
*vma
,
373 struct file
*file
= vma
->vm_file
;
374 struct shm_file_data
*sfd
= shm_file_data(file
);
375 struct mempolicy
*pol
= NULL
;
377 if (sfd
->vm_ops
->get_policy
)
378 pol
= sfd
->vm_ops
->get_policy(vma
, addr
);
379 else if (vma
->vm_policy
)
380 pol
= vma
->vm_policy
;
386 static int shm_mmap(struct file
*file
, struct vm_area_struct
*vma
)
388 struct shm_file_data
*sfd
= shm_file_data(file
);
391 ret
= sfd
->file
->f_op
->mmap(sfd
->file
, vma
);
394 sfd
->vm_ops
= vma
->vm_ops
;
396 WARN_ON(!sfd
->vm_ops
->fault
);
398 vma
->vm_ops
= &shm_vm_ops
;
404 static int shm_release(struct inode
*ino
, struct file
*file
)
406 struct shm_file_data
*sfd
= shm_file_data(file
);
409 shm_file_data(file
) = NULL
;
414 static int shm_fsync(struct file
*file
, loff_t start
, loff_t end
, int datasync
)
416 struct shm_file_data
*sfd
= shm_file_data(file
);
418 if (!sfd
->file
->f_op
->fsync
)
420 return sfd
->file
->f_op
->fsync(sfd
->file
, start
, end
, datasync
);
423 static long shm_fallocate(struct file
*file
, int mode
, loff_t offset
,
426 struct shm_file_data
*sfd
= shm_file_data(file
);
428 if (!sfd
->file
->f_op
->fallocate
)
430 return sfd
->file
->f_op
->fallocate(file
, mode
, offset
, len
);
433 static unsigned long shm_get_unmapped_area(struct file
*file
,
434 unsigned long addr
, unsigned long len
, unsigned long pgoff
,
437 struct shm_file_data
*sfd
= shm_file_data(file
);
438 return sfd
->file
->f_op
->get_unmapped_area(sfd
->file
, addr
, len
,
442 static const struct file_operations shm_file_operations
= {
445 .release
= shm_release
,
447 .get_unmapped_area
= shm_get_unmapped_area
,
449 .llseek
= noop_llseek
,
450 .fallocate
= shm_fallocate
,
453 static const struct file_operations shm_file_operations_huge
= {
456 .release
= shm_release
,
457 .get_unmapped_area
= shm_get_unmapped_area
,
458 .llseek
= noop_llseek
,
459 .fallocate
= shm_fallocate
,
462 int is_file_shm_hugepages(struct file
*file
)
464 return file
->f_op
== &shm_file_operations_huge
;
467 static const struct vm_operations_struct shm_vm_ops
= {
468 .open
= shm_open
, /* callback for a new vm-area open */
469 .close
= shm_close
, /* callback for when the vm-area is released */
471 #if defined(CONFIG_NUMA)
472 .set_policy
= shm_set_policy
,
473 .get_policy
= shm_get_policy
,
478 * newseg - Create a new shared memory segment
480 * @params: ptr to the structure that contains key, size and shmflg
482 * Called with shm_ids.rwsem held as a writer.
484 static int newseg(struct ipc_namespace
*ns
, struct ipc_params
*params
)
486 key_t key
= params
->key
;
487 int shmflg
= params
->flg
;
488 size_t size
= params
->u
.size
;
490 struct shmid_kernel
*shp
;
491 size_t numpages
= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
495 vm_flags_t acctflag
= 0;
497 if (size
< SHMMIN
|| size
> ns
->shm_ctlmax
)
500 if (numpages
<< PAGE_SHIFT
< size
)
503 if (ns
->shm_tot
+ numpages
< ns
->shm_tot
||
504 ns
->shm_tot
+ numpages
> ns
->shm_ctlall
)
507 shp
= ipc_rcu_alloc(sizeof(*shp
));
511 shp
->shm_perm
.key
= key
;
512 shp
->shm_perm
.mode
= (shmflg
& S_IRWXUGO
);
513 shp
->mlock_user
= NULL
;
515 shp
->shm_perm
.security
= NULL
;
516 error
= security_shm_alloc(shp
);
518 ipc_rcu_putref(shp
, ipc_rcu_free
);
522 sprintf(name
, "SYSV%08x", key
);
523 if (shmflg
& SHM_HUGETLB
) {
527 hs
= hstate_sizelog((shmflg
>> SHM_HUGE_SHIFT
) & SHM_HUGE_MASK
);
532 hugesize
= ALIGN(size
, huge_page_size(hs
));
534 /* hugetlb_file_setup applies strict accounting */
535 if (shmflg
& SHM_NORESERVE
)
536 acctflag
= VM_NORESERVE
;
537 file
= hugetlb_file_setup(name
, hugesize
, acctflag
,
538 &shp
->mlock_user
, HUGETLB_SHMFS_INODE
,
539 (shmflg
>> SHM_HUGE_SHIFT
) & SHM_HUGE_MASK
);
542 * Do not allow no accounting for OVERCOMMIT_NEVER, even
545 if ((shmflg
& SHM_NORESERVE
) &&
546 sysctl_overcommit_memory
!= OVERCOMMIT_NEVER
)
547 acctflag
= VM_NORESERVE
;
548 file
= shmem_kernel_file_setup(name
, size
, acctflag
);
550 error
= PTR_ERR(file
);
554 shp
->shm_cprid
= task_tgid_vnr(current
);
556 shp
->shm_atim
= shp
->shm_dtim
= 0;
557 shp
->shm_ctim
= get_seconds();
558 shp
->shm_segsz
= size
;
560 shp
->shm_file
= file
;
561 shp
->shm_creator
= current
;
563 id
= ipc_addid(&shm_ids(ns
), &shp
->shm_perm
, ns
->shm_ctlmni
);
569 list_add(&shp
->shm_clist
, ¤t
->sysvshm
.shm_clist
);
572 * shmid gets reported as "inode#" in /proc/pid/maps.
573 * proc-ps tools use this. Changing this will break them.
575 file_inode(file
)->i_ino
= shp
->shm_perm
.id
;
577 ns
->shm_tot
+= numpages
;
578 error
= shp
->shm_perm
.id
;
580 ipc_unlock_object(&shp
->shm_perm
);
585 if (is_file_hugepages(file
) && shp
->mlock_user
)
586 user_shm_unlock(size
, shp
->mlock_user
);
589 ipc_rcu_putref(shp
, shm_rcu_free
);
594 * Called with shm_ids.rwsem and ipcp locked.
596 static inline int shm_security(struct kern_ipc_perm
*ipcp
, int shmflg
)
598 struct shmid_kernel
*shp
;
600 shp
= container_of(ipcp
, struct shmid_kernel
, shm_perm
);
601 return security_shm_associate(shp
, shmflg
);
605 * Called with shm_ids.rwsem and ipcp locked.
607 static inline int shm_more_checks(struct kern_ipc_perm
*ipcp
,
608 struct ipc_params
*params
)
610 struct shmid_kernel
*shp
;
612 shp
= container_of(ipcp
, struct shmid_kernel
, shm_perm
);
613 if (shp
->shm_segsz
< params
->u
.size
)
619 SYSCALL_DEFINE3(shmget
, key_t
, key
, size_t, size
, int, shmflg
)
621 struct ipc_namespace
*ns
;
622 static const struct ipc_ops shm_ops
= {
624 .associate
= shm_security
,
625 .more_checks
= shm_more_checks
,
627 struct ipc_params shm_params
;
629 ns
= current
->nsproxy
->ipc_ns
;
631 shm_params
.key
= key
;
632 shm_params
.flg
= shmflg
;
633 shm_params
.u
.size
= size
;
635 return ipcget(ns
, &shm_ids(ns
), &shm_ops
, &shm_params
);
638 static inline unsigned long copy_shmid_to_user(void __user
*buf
, struct shmid64_ds
*in
, int version
)
642 return copy_to_user(buf
, in
, sizeof(*in
));
647 memset(&out
, 0, sizeof(out
));
648 ipc64_perm_to_ipc_perm(&in
->shm_perm
, &out
.shm_perm
);
649 out
.shm_segsz
= in
->shm_segsz
;
650 out
.shm_atime
= in
->shm_atime
;
651 out
.shm_dtime
= in
->shm_dtime
;
652 out
.shm_ctime
= in
->shm_ctime
;
653 out
.shm_cpid
= in
->shm_cpid
;
654 out
.shm_lpid
= in
->shm_lpid
;
655 out
.shm_nattch
= in
->shm_nattch
;
657 return copy_to_user(buf
, &out
, sizeof(out
));
664 static inline unsigned long
665 copy_shmid_from_user(struct shmid64_ds
*out
, void __user
*buf
, int version
)
669 if (copy_from_user(out
, buf
, sizeof(*out
)))
674 struct shmid_ds tbuf_old
;
676 if (copy_from_user(&tbuf_old
, buf
, sizeof(tbuf_old
)))
679 out
->shm_perm
.uid
= tbuf_old
.shm_perm
.uid
;
680 out
->shm_perm
.gid
= tbuf_old
.shm_perm
.gid
;
681 out
->shm_perm
.mode
= tbuf_old
.shm_perm
.mode
;
690 static inline unsigned long copy_shminfo_to_user(void __user
*buf
, struct shminfo64
*in
, int version
)
694 return copy_to_user(buf
, in
, sizeof(*in
));
699 if (in
->shmmax
> INT_MAX
)
700 out
.shmmax
= INT_MAX
;
702 out
.shmmax
= (int)in
->shmmax
;
704 out
.shmmin
= in
->shmmin
;
705 out
.shmmni
= in
->shmmni
;
706 out
.shmseg
= in
->shmseg
;
707 out
.shmall
= in
->shmall
;
709 return copy_to_user(buf
, &out
, sizeof(out
));
717 * Calculate and add used RSS and swap pages of a shm.
718 * Called with shm_ids.rwsem held as a reader
720 static void shm_add_rss_swap(struct shmid_kernel
*shp
,
721 unsigned long *rss_add
, unsigned long *swp_add
)
725 inode
= file_inode(shp
->shm_file
);
727 if (is_file_hugepages(shp
->shm_file
)) {
728 struct address_space
*mapping
= inode
->i_mapping
;
729 struct hstate
*h
= hstate_file(shp
->shm_file
);
730 *rss_add
+= pages_per_huge_page(h
) * mapping
->nrpages
;
733 struct shmem_inode_info
*info
= SHMEM_I(inode
);
734 spin_lock(&info
->lock
);
735 *rss_add
+= inode
->i_mapping
->nrpages
;
736 *swp_add
+= info
->swapped
;
737 spin_unlock(&info
->lock
);
739 *rss_add
+= inode
->i_mapping
->nrpages
;
745 * Called with shm_ids.rwsem held as a reader
747 static void shm_get_stat(struct ipc_namespace
*ns
, unsigned long *rss
,
756 in_use
= shm_ids(ns
).in_use
;
758 for (total
= 0, next_id
= 0; total
< in_use
; next_id
++) {
759 struct kern_ipc_perm
*ipc
;
760 struct shmid_kernel
*shp
;
762 ipc
= idr_find(&shm_ids(ns
).ipcs_idr
, next_id
);
765 shp
= container_of(ipc
, struct shmid_kernel
, shm_perm
);
767 shm_add_rss_swap(shp
, rss
, swp
);
774 * This function handles some shmctl commands which require the rwsem
775 * to be held in write mode.
776 * NOTE: no locks must be held, the rwsem is taken inside this function.
778 static int shmctl_down(struct ipc_namespace
*ns
, int shmid
, int cmd
,
779 struct shmid_ds __user
*buf
, int version
)
781 struct kern_ipc_perm
*ipcp
;
782 struct shmid64_ds shmid64
;
783 struct shmid_kernel
*shp
;
786 if (cmd
== IPC_SET
) {
787 if (copy_shmid_from_user(&shmid64
, buf
, version
))
791 down_write(&shm_ids(ns
).rwsem
);
794 ipcp
= ipcctl_pre_down_nolock(ns
, &shm_ids(ns
), shmid
, cmd
,
795 &shmid64
.shm_perm
, 0);
801 shp
= container_of(ipcp
, struct shmid_kernel
, shm_perm
);
803 err
= security_shm_shmctl(shp
, cmd
);
809 ipc_lock_object(&shp
->shm_perm
);
810 /* do_shm_rmid unlocks the ipc object and rcu */
811 do_shm_rmid(ns
, ipcp
);
814 ipc_lock_object(&shp
->shm_perm
);
815 err
= ipc_update_perm(&shmid64
.shm_perm
, ipcp
);
818 shp
->shm_ctim
= get_seconds();
826 ipc_unlock_object(&shp
->shm_perm
);
830 up_write(&shm_ids(ns
).rwsem
);
834 static int shmctl_nolock(struct ipc_namespace
*ns
, int shmid
,
835 int cmd
, int version
, void __user
*buf
)
838 struct shmid_kernel
*shp
;
840 /* preliminary security checks for *_INFO */
841 if (cmd
== IPC_INFO
|| cmd
== SHM_INFO
) {
842 err
= security_shm_shmctl(NULL
, cmd
);
850 struct shminfo64 shminfo
;
852 memset(&shminfo
, 0, sizeof(shminfo
));
853 shminfo
.shmmni
= shminfo
.shmseg
= ns
->shm_ctlmni
;
854 shminfo
.shmmax
= ns
->shm_ctlmax
;
855 shminfo
.shmall
= ns
->shm_ctlall
;
857 shminfo
.shmmin
= SHMMIN
;
858 if (copy_shminfo_to_user(buf
, &shminfo
, version
))
861 down_read(&shm_ids(ns
).rwsem
);
862 err
= ipc_get_maxid(&shm_ids(ns
));
863 up_read(&shm_ids(ns
).rwsem
);
871 struct shm_info shm_info
;
873 memset(&shm_info
, 0, sizeof(shm_info
));
874 down_read(&shm_ids(ns
).rwsem
);
875 shm_info
.used_ids
= shm_ids(ns
).in_use
;
876 shm_get_stat(ns
, &shm_info
.shm_rss
, &shm_info
.shm_swp
);
877 shm_info
.shm_tot
= ns
->shm_tot
;
878 shm_info
.swap_attempts
= 0;
879 shm_info
.swap_successes
= 0;
880 err
= ipc_get_maxid(&shm_ids(ns
));
881 up_read(&shm_ids(ns
).rwsem
);
882 if (copy_to_user(buf
, &shm_info
, sizeof(shm_info
))) {
887 err
= err
< 0 ? 0 : err
;
893 struct shmid64_ds tbuf
;
897 if (cmd
== SHM_STAT
) {
898 shp
= shm_obtain_object(ns
, shmid
);
903 result
= shp
->shm_perm
.id
;
905 shp
= shm_obtain_object_check(ns
, shmid
);
914 if (ipcperms(ns
, &shp
->shm_perm
, S_IRUGO
))
917 err
= security_shm_shmctl(shp
, cmd
);
921 memset(&tbuf
, 0, sizeof(tbuf
));
922 kernel_to_ipc64_perm(&shp
->shm_perm
, &tbuf
.shm_perm
);
923 tbuf
.shm_segsz
= shp
->shm_segsz
;
924 tbuf
.shm_atime
= shp
->shm_atim
;
925 tbuf
.shm_dtime
= shp
->shm_dtim
;
926 tbuf
.shm_ctime
= shp
->shm_ctim
;
927 tbuf
.shm_cpid
= shp
->shm_cprid
;
928 tbuf
.shm_lpid
= shp
->shm_lprid
;
929 tbuf
.shm_nattch
= shp
->shm_nattch
;
932 if (copy_shmid_to_user(buf
, &tbuf
, version
))
948 SYSCALL_DEFINE3(shmctl
, int, shmid
, int, cmd
, struct shmid_ds __user
*, buf
)
950 struct shmid_kernel
*shp
;
952 struct ipc_namespace
*ns
;
954 if (cmd
< 0 || shmid
< 0)
957 version
= ipc_parse_version(&cmd
);
958 ns
= current
->nsproxy
->ipc_ns
;
965 return shmctl_nolock(ns
, shmid
, cmd
, version
, buf
);
968 return shmctl_down(ns
, shmid
, cmd
, buf
, version
);
972 struct file
*shm_file
;
975 shp
= shm_obtain_object_check(ns
, shmid
);
981 audit_ipc_obj(&(shp
->shm_perm
));
982 err
= security_shm_shmctl(shp
, cmd
);
986 ipc_lock_object(&shp
->shm_perm
);
988 /* check if shm_destroy() is tearing down shp */
989 if (!ipc_valid_object(&shp
->shm_perm
)) {
994 if (!ns_capable(ns
->user_ns
, CAP_IPC_LOCK
)) {
995 kuid_t euid
= current_euid();
996 if (!uid_eq(euid
, shp
->shm_perm
.uid
) &&
997 !uid_eq(euid
, shp
->shm_perm
.cuid
)) {
1001 if (cmd
== SHM_LOCK
&& !rlimit(RLIMIT_MEMLOCK
)) {
1007 shm_file
= shp
->shm_file
;
1008 if (is_file_hugepages(shm_file
))
1011 if (cmd
== SHM_LOCK
) {
1012 struct user_struct
*user
= current_user();
1013 err
= shmem_lock(shm_file
, 1, user
);
1014 if (!err
&& !(shp
->shm_perm
.mode
& SHM_LOCKED
)) {
1015 shp
->shm_perm
.mode
|= SHM_LOCKED
;
1016 shp
->mlock_user
= user
;
1022 if (!(shp
->shm_perm
.mode
& SHM_LOCKED
))
1024 shmem_lock(shm_file
, 0, shp
->mlock_user
);
1025 shp
->shm_perm
.mode
&= ~SHM_LOCKED
;
1026 shp
->mlock_user
= NULL
;
1028 ipc_unlock_object(&shp
->shm_perm
);
1030 shmem_unlock_mapping(shm_file
->f_mapping
);
1040 ipc_unlock_object(&shp
->shm_perm
);
1047 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
1049 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
1050 * "raddr" thing points to kernel space, and there has to be a wrapper around
1053 long do_shmat(int shmid
, char __user
*shmaddr
, int shmflg
, ulong
*raddr
,
1054 unsigned long shmlba
)
1056 struct shmid_kernel
*shp
;
1061 unsigned long flags
;
1064 struct ipc_namespace
*ns
;
1065 struct shm_file_data
*sfd
;
1068 unsigned long populate
= 0;
1073 else if ((addr
= (ulong
)shmaddr
)) {
1074 if (addr
& (shmlba
- 1)) {
1075 if (shmflg
& SHM_RND
)
1076 addr
&= ~(shmlba
- 1); /* round down */
1078 #ifndef __ARCH_FORCE_SHMLBA
1079 if (addr
& ~PAGE_MASK
)
1083 flags
= MAP_SHARED
| MAP_FIXED
;
1085 if ((shmflg
& SHM_REMAP
))
1091 if (shmflg
& SHM_RDONLY
) {
1094 f_mode
= FMODE_READ
;
1096 prot
= PROT_READ
| PROT_WRITE
;
1097 acc_mode
= S_IRUGO
| S_IWUGO
;
1098 f_mode
= FMODE_READ
| FMODE_WRITE
;
1100 if (shmflg
& SHM_EXEC
) {
1102 acc_mode
|= S_IXUGO
;
1106 * We cannot rely on the fs check since SYSV IPC does have an
1107 * additional creator id...
1109 ns
= current
->nsproxy
->ipc_ns
;
1111 shp
= shm_obtain_object_check(ns
, shmid
);
1118 if (ipcperms(ns
, &shp
->shm_perm
, acc_mode
))
1121 err
= security_shm_shmat(shp
, shmaddr
, shmflg
);
1125 ipc_lock_object(&shp
->shm_perm
);
1127 /* check if shm_destroy() is tearing down shp */
1128 if (!ipc_valid_object(&shp
->shm_perm
)) {
1129 ipc_unlock_object(&shp
->shm_perm
);
1134 path
= shp
->shm_file
->f_path
;
1137 size
= i_size_read(d_inode(path
.dentry
));
1138 ipc_unlock_object(&shp
->shm_perm
);
1142 sfd
= kzalloc(sizeof(*sfd
), GFP_KERNEL
);
1148 file
= alloc_file(&path
, f_mode
,
1149 is_file_hugepages(shp
->shm_file
) ?
1150 &shm_file_operations_huge
:
1151 &shm_file_operations
);
1152 err
= PTR_ERR(file
);
1159 file
->private_data
= sfd
;
1160 file
->f_mapping
= shp
->shm_file
->f_mapping
;
1161 sfd
->id
= shp
->shm_perm
.id
;
1162 sfd
->ns
= get_ipc_ns(ns
);
1163 sfd
->file
= shp
->shm_file
;
1166 err
= security_mmap_file(file
, prot
, flags
);
1170 down_write(¤t
->mm
->mmap_sem
);
1171 if (addr
&& !(shmflg
& SHM_REMAP
)) {
1173 if (addr
+ size
< addr
)
1176 if (find_vma_intersection(current
->mm
, addr
, addr
+ size
))
1180 addr
= do_mmap_pgoff(file
, addr
, size
, prot
, flags
, 0, &populate
);
1183 if (IS_ERR_VALUE(addr
))
1186 up_write(¤t
->mm
->mmap_sem
);
1188 mm_populate(addr
, populate
);
1194 down_write(&shm_ids(ns
).rwsem
);
1195 shp
= shm_lock(ns
, shmid
);
1197 if (shm_may_destroy(ns
, shp
))
1198 shm_destroy(ns
, shp
);
1201 up_write(&shm_ids(ns
).rwsem
);
1210 SYSCALL_DEFINE3(shmat
, int, shmid
, char __user
*, shmaddr
, int, shmflg
)
1215 err
= do_shmat(shmid
, shmaddr
, shmflg
, &ret
, SHMLBA
);
1218 force_successful_syscall_return();
1223 * detach and kill segment if marked destroyed.
1224 * The work is done in shm_close.
1226 SYSCALL_DEFINE1(shmdt
, char __user
*, shmaddr
)
1228 struct mm_struct
*mm
= current
->mm
;
1229 struct vm_area_struct
*vma
;
1230 unsigned long addr
= (unsigned long)shmaddr
;
1231 int retval
= -EINVAL
;
1235 struct vm_area_struct
*next
;
1238 if (addr
& ~PAGE_MASK
)
1241 down_write(&mm
->mmap_sem
);
1244 * This function tries to be smart and unmap shm segments that
1245 * were modified by partial mlock or munmap calls:
1246 * - It first determines the size of the shm segment that should be
1247 * unmapped: It searches for a vma that is backed by shm and that
1248 * started at address shmaddr. It records it's size and then unmaps
1250 * - Then it unmaps all shm vmas that started at shmaddr and that
1251 * are within the initially determined size and that are from the
1252 * same shm segment from which we determined the size.
1253 * Errors from do_munmap are ignored: the function only fails if
1254 * it's called with invalid parameters or if it's called to unmap
1255 * a part of a vma. Both calls in this function are for full vmas,
1256 * the parameters are directly copied from the vma itself and always
1257 * valid - therefore do_munmap cannot fail. (famous last words?)
1260 * If it had been mremap()'d, the starting address would not
1261 * match the usual checks anyway. So assume all vma's are
1262 * above the starting address given.
1264 vma
= find_vma(mm
, addr
);
1268 next
= vma
->vm_next
;
1271 * Check if the starting address would match, i.e. it's
1272 * a fragment created by mprotect() and/or munmap(), or it
1273 * otherwise it starts at this address with no hassles.
1275 if ((vma
->vm_ops
== &shm_vm_ops
) &&
1276 (vma
->vm_start
- addr
)/PAGE_SIZE
== vma
->vm_pgoff
) {
1279 * Record the file of the shm segment being
1280 * unmapped. With mremap(), someone could place
1281 * page from another segment but with equal offsets
1282 * in the range we are unmapping.
1284 file
= vma
->vm_file
;
1285 size
= i_size_read(file_inode(vma
->vm_file
));
1286 do_munmap(mm
, vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
1288 * We discovered the size of the shm segment, so
1289 * break out of here and fall through to the next
1290 * loop that uses the size information to stop
1291 * searching for matching vma's.
1301 * We need look no further than the maximum address a fragment
1302 * could possibly have landed at. Also cast things to loff_t to
1303 * prevent overflows and make comparisons vs. equal-width types.
1305 size
= PAGE_ALIGN(size
);
1306 while (vma
&& (loff_t
)(vma
->vm_end
- addr
) <= size
) {
1307 next
= vma
->vm_next
;
1309 /* finding a matching vma now does not alter retval */
1310 if ((vma
->vm_ops
== &shm_vm_ops
) &&
1311 ((vma
->vm_start
- addr
)/PAGE_SIZE
== vma
->vm_pgoff
) &&
1312 (vma
->vm_file
== file
))
1313 do_munmap(mm
, vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
1317 #else /* CONFIG_MMU */
1318 /* under NOMMU conditions, the exact address to be destroyed must be
1320 if (vma
&& vma
->vm_start
== addr
&& vma
->vm_ops
== &shm_vm_ops
) {
1321 do_munmap(mm
, vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
1327 up_write(&mm
->mmap_sem
);
1331 #ifdef CONFIG_PROC_FS
1332 static int sysvipc_shm_proc_show(struct seq_file
*s
, void *it
)
1334 struct user_namespace
*user_ns
= seq_user_ns(s
);
1335 struct shmid_kernel
*shp
= it
;
1336 unsigned long rss
= 0, swp
= 0;
1338 shm_add_rss_swap(shp
, &rss
, &swp
);
1340 #if BITS_PER_LONG <= 32
1341 #define SIZE_SPEC "%10lu"
1343 #define SIZE_SPEC "%21lu"
1347 "%10d %10d %4o " SIZE_SPEC
" %5u %5u "
1348 "%5lu %5u %5u %5u %5u %10lu %10lu %10lu "
1349 SIZE_SPEC
" " SIZE_SPEC
"\n",
1357 from_kuid_munged(user_ns
, shp
->shm_perm
.uid
),
1358 from_kgid_munged(user_ns
, shp
->shm_perm
.gid
),
1359 from_kuid_munged(user_ns
, shp
->shm_perm
.cuid
),
1360 from_kgid_munged(user_ns
, shp
->shm_perm
.cgid
),