]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/ipc/shm.c | |
3 | * Copyright (C) 1992, 1993 Krishna Balasubramanian | |
4 | * Many improvements/fixes by Bruno Haible. | |
5 | * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994. | |
6 | * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli. | |
7 | * | |
8 | * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com> | |
9 | * BIGMEM support, Andrea Arcangeli <andrea@suse.de> | |
10 | * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr> | |
11 | * HIGHMEM support, Ingo Molnar <mingo@redhat.com> | |
12 | * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com> | |
13 | * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com> | |
14 | * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com> | |
15 | * | |
073115d6 SG |
16 | * support for audit of ipc object properties and permission changes |
17 | * Dustin Kirkland <dustin.kirkland@us.ibm.com> | |
4e982311 KK |
18 | * |
19 | * namespaces support | |
20 | * OpenVZ, SWsoft Inc. | |
21 | * Pavel Emelianov <xemul@openvz.org> | |
c2c737a0 DB |
22 | * |
23 | * Better ipc lock (kern_ipc_perm.lock) handling | |
24 | * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013. | |
1da177e4 LT |
25 | */ |
26 | ||
1da177e4 LT |
27 | #include <linux/slab.h> |
28 | #include <linux/mm.h> | |
29 | #include <linux/hugetlb.h> | |
30 | #include <linux/shm.h> | |
31 | #include <linux/init.h> | |
32 | #include <linux/file.h> | |
33 | #include <linux/mman.h> | |
1da177e4 LT |
34 | #include <linux/shmem_fs.h> |
35 | #include <linux/security.h> | |
36 | #include <linux/syscalls.h> | |
37 | #include <linux/audit.h> | |
c59ede7b | 38 | #include <linux/capability.h> |
7d87e14c | 39 | #include <linux/ptrace.h> |
19b4946c | 40 | #include <linux/seq_file.h> |
3e148c79 | 41 | #include <linux/rwsem.h> |
4e982311 | 42 | #include <linux/nsproxy.h> |
bc56bba8 | 43 | #include <linux/mount.h> |
ae5e1b22 | 44 | #include <linux/ipc_namespace.h> |
7d87e14c | 45 | |
1da177e4 LT |
46 | #include <asm/uaccess.h> |
47 | ||
48 | #include "util.h" | |
49 | ||
bc56bba8 EB |
50 | struct shm_file_data { |
51 | int id; | |
52 | struct ipc_namespace *ns; | |
53 | struct file *file; | |
54 | const struct vm_operations_struct *vm_ops; | |
55 | }; | |
56 | ||
57 | #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data)) | |
58 | ||
9a32144e | 59 | static const struct file_operations shm_file_operations; |
f0f37e2f | 60 | static const struct vm_operations_struct shm_vm_ops; |
1da177e4 | 61 | |
ed2ddbf8 | 62 | #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS]) |
1da177e4 | 63 | |
4e982311 KK |
64 | #define shm_unlock(shp) \ |
65 | ipc_unlock(&(shp)->shm_perm) | |
1da177e4 | 66 | |
7748dbfa | 67 | static int newseg(struct ipc_namespace *, struct ipc_params *); |
bc56bba8 EB |
68 | static void shm_open(struct vm_area_struct *vma); |
69 | static void shm_close(struct vm_area_struct *vma); | |
4e982311 | 70 | static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp); |
1da177e4 | 71 | #ifdef CONFIG_PROC_FS |
19b4946c | 72 | static int sysvipc_shm_proc_show(struct seq_file *s, void *it); |
1da177e4 LT |
73 | #endif |
74 | ||
ed2ddbf8 | 75 | void shm_init_ns(struct ipc_namespace *ns) |
4e982311 | 76 | { |
4e982311 KK |
77 | ns->shm_ctlmax = SHMMAX; |
78 | ns->shm_ctlall = SHMALL; | |
79 | ns->shm_ctlmni = SHMMNI; | |
b34a6b1d | 80 | ns->shm_rmid_forced = 0; |
4e982311 | 81 | ns->shm_tot = 0; |
e8148f75 | 82 | ipc_init_ids(&shm_ids(ns)); |
4e982311 KK |
83 | } |
84 | ||
f4566f04 | 85 | /* |
d9a605e4 DB |
86 | * Called with shm_ids.rwsem (writer) and the shp structure locked. |
87 | * Only shm_ids.rwsem remains locked on exit. | |
f4566f04 | 88 | */ |
01b8b07a | 89 | static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) |
4e982311 | 90 | { |
01b8b07a PP |
91 | struct shmid_kernel *shp; |
92 | shp = container_of(ipcp, struct shmid_kernel, shm_perm); | |
93 | ||
4e982311 KK |
94 | if (shp->shm_nattch){ |
95 | shp->shm_perm.mode |= SHM_DEST; | |
96 | /* Do not find it any more */ | |
97 | shp->shm_perm.key = IPC_PRIVATE; | |
98 | shm_unlock(shp); | |
99 | } else | |
100 | shm_destroy(ns, shp); | |
101 | } | |
102 | ||
ae5e1b22 | 103 | #ifdef CONFIG_IPC_NS |
4e982311 KK |
104 | void shm_exit_ns(struct ipc_namespace *ns) |
105 | { | |
01b8b07a | 106 | free_ipcs(ns, &shm_ids(ns), do_shm_rmid); |
7d6feeb2 | 107 | idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr); |
4e982311 | 108 | } |
ae5e1b22 | 109 | #endif |
1da177e4 | 110 | |
140d0b21 | 111 | static int __init ipc_ns_init(void) |
1da177e4 | 112 | { |
ed2ddbf8 | 113 | shm_init_ns(&init_ipc_ns); |
140d0b21 LT |
114 | return 0; |
115 | } | |
116 | ||
117 | pure_initcall(ipc_ns_init); | |
118 | ||
119 | void __init shm_init (void) | |
120 | { | |
19b4946c | 121 | ipc_init_proc_interface("sysvipc/shm", |
b7952180 HD |
122 | #if BITS_PER_LONG <= 32 |
123 | " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n", | |
124 | #else | |
125 | " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n", | |
126 | #endif | |
4e982311 | 127 | IPC_SHM_IDS, sysvipc_shm_proc_show); |
1da177e4 LT |
128 | } |
129 | ||
8b8d52ac DB |
130 | static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id) |
131 | { | |
132 | struct kern_ipc_perm *ipcp = ipc_obtain_object(&shm_ids(ns), id); | |
133 | ||
134 | if (IS_ERR(ipcp)) | |
135 | return ERR_CAST(ipcp); | |
136 | ||
137 | return container_of(ipcp, struct shmid_kernel, shm_perm); | |
138 | } | |
139 | ||
140 | static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id) | |
141 | { | |
142 | struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id); | |
143 | ||
144 | if (IS_ERR(ipcp)) | |
145 | return ERR_CAST(ipcp); | |
146 | ||
147 | return container_of(ipcp, struct shmid_kernel, shm_perm); | |
148 | } | |
149 | ||
3e148c79 | 150 | /* |
d9a605e4 | 151 | * shm_lock_(check_) routines are called in the paths where the rwsem |
00c2bf85 | 152 | * is not necessarily held. |
3e148c79 | 153 | */ |
023a5355 | 154 | static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id) |
1da177e4 | 155 | { |
03f02c76 ND |
156 | struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id); |
157 | ||
b1ed88b4 PP |
158 | if (IS_ERR(ipcp)) |
159 | return (struct shmid_kernel *)ipcp; | |
160 | ||
03f02c76 | 161 | return container_of(ipcp, struct shmid_kernel, shm_perm); |
023a5355 ND |
162 | } |
163 | ||
4c677e2e VK |
164 | static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp) |
165 | { | |
166 | rcu_read_lock(); | |
cf9d5d78 | 167 | ipc_lock_object(&ipcp->shm_perm); |
4c677e2e VK |
168 | } |
169 | ||
53dad6d3 DB |
170 | static void shm_rcu_free(struct rcu_head *head) |
171 | { | |
172 | struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu); | |
173 | struct shmid_kernel *shp = ipc_rcu_to_struct(p); | |
174 | ||
175 | security_shm_free(shp); | |
176 | ipc_rcu_free(head); | |
177 | } | |
178 | ||
7ca7e564 | 179 | static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s) |
1da177e4 | 180 | { |
7ca7e564 | 181 | ipc_rmid(&shm_ids(ns), &s->shm_perm); |
1da177e4 LT |
182 | } |
183 | ||
1da177e4 | 184 | |
bc56bba8 EB |
185 | /* This is called by fork, once for every shm attach. */ |
186 | static void shm_open(struct vm_area_struct *vma) | |
4e982311 | 187 | { |
bc56bba8 EB |
188 | struct file *file = vma->vm_file; |
189 | struct shm_file_data *sfd = shm_file_data(file); | |
1da177e4 LT |
190 | struct shmid_kernel *shp; |
191 | ||
bc56bba8 | 192 | shp = shm_lock(sfd->ns, sfd->id); |
023a5355 | 193 | BUG_ON(IS_ERR(shp)); |
1da177e4 | 194 | shp->shm_atim = get_seconds(); |
b488893a | 195 | shp->shm_lprid = task_tgid_vnr(current); |
1da177e4 LT |
196 | shp->shm_nattch++; |
197 | shm_unlock(shp); | |
198 | } | |
199 | ||
1da177e4 LT |
200 | /* |
201 | * shm_destroy - free the struct shmid_kernel | |
202 | * | |
f4566f04 | 203 | * @ns: namespace |
1da177e4 LT |
204 | * @shp: struct to free |
205 | * | |
d9a605e4 | 206 | * It has to be called with shp and shm_ids.rwsem (writer) locked, |
1da177e4 LT |
207 | * but returns with shp unlocked and freed. |
208 | */ | |
4e982311 | 209 | static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) |
1da177e4 | 210 | { |
4e982311 | 211 | ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; |
7ca7e564 | 212 | shm_rmid(ns, shp); |
1da177e4 LT |
213 | shm_unlock(shp); |
214 | if (!is_file_hugepages(shp->shm_file)) | |
215 | shmem_lock(shp->shm_file, 0, shp->mlock_user); | |
353d5c30 | 216 | else if (shp->mlock_user) |
496ad9aa | 217 | user_shm_unlock(file_inode(shp->shm_file)->i_size, |
1da177e4 LT |
218 | shp->mlock_user); |
219 | fput (shp->shm_file); | |
53dad6d3 | 220 | ipc_rcu_putref(shp, shm_rcu_free); |
1da177e4 LT |
221 | } |
222 | ||
b34a6b1d VK |
223 | /* |
224 | * shm_may_destroy - identifies whether shm segment should be destroyed now | |
225 | * | |
226 | * Returns true if and only if there are no active users of the segment and | |
227 | * one of the following is true: | |
228 | * | |
229 | * 1) shmctl(id, IPC_RMID, NULL) was called for this shp | |
230 | * | |
231 | * 2) sysctl kernel.shm_rmid_forced is set to 1. | |
232 | */ | |
233 | static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) | |
234 | { | |
235 | return (shp->shm_nattch == 0) && | |
236 | (ns->shm_rmid_forced || | |
237 | (shp->shm_perm.mode & SHM_DEST)); | |
238 | } | |
239 | ||
1da177e4 | 240 | /* |
bc56bba8 | 241 | * remove the attach descriptor vma. |
1da177e4 LT |
242 | * free memory for segment if it is marked destroyed. |
243 | * The descriptor has already been removed from the current->mm->mmap list | |
244 | * and will later be kfree()d. | |
245 | */ | |
bc56bba8 | 246 | static void shm_close(struct vm_area_struct *vma) |
1da177e4 | 247 | { |
bc56bba8 EB |
248 | struct file * file = vma->vm_file; |
249 | struct shm_file_data *sfd = shm_file_data(file); | |
1da177e4 | 250 | struct shmid_kernel *shp; |
bc56bba8 | 251 | struct ipc_namespace *ns = sfd->ns; |
4e982311 | 252 | |
d9a605e4 | 253 | down_write(&shm_ids(ns).rwsem); |
1da177e4 | 254 | /* remove from the list of attaches of the shm segment */ |
00c2bf85 | 255 | shp = shm_lock(ns, sfd->id); |
023a5355 | 256 | BUG_ON(IS_ERR(shp)); |
b488893a | 257 | shp->shm_lprid = task_tgid_vnr(current); |
1da177e4 LT |
258 | shp->shm_dtim = get_seconds(); |
259 | shp->shm_nattch--; | |
b34a6b1d VK |
260 | if (shm_may_destroy(ns, shp)) |
261 | shm_destroy(ns, shp); | |
262 | else | |
263 | shm_unlock(shp); | |
d9a605e4 | 264 | up_write(&shm_ids(ns).rwsem); |
b34a6b1d VK |
265 | } |
266 | ||
d9a605e4 | 267 | /* Called with ns->shm_ids(ns).rwsem locked */ |
b34a6b1d VK |
268 | static int shm_try_destroy_current(int id, void *p, void *data) |
269 | { | |
270 | struct ipc_namespace *ns = data; | |
4c677e2e VK |
271 | struct kern_ipc_perm *ipcp = p; |
272 | struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm); | |
b34a6b1d | 273 | |
4c677e2e | 274 | if (shp->shm_creator != current) |
5774ed01 | 275 | return 0; |
5774ed01 VK |
276 | |
277 | /* | |
278 | * Mark it as orphaned to destroy the segment when | |
279 | * kernel.shm_rmid_forced is changed. | |
280 | * It is noop if the following shm_may_destroy() returns true. | |
281 | */ | |
282 | shp->shm_creator = NULL; | |
283 | ||
284 | /* | |
285 | * Don't even try to destroy it. If shm_rmid_forced=0 and IPC_RMID | |
286 | * is not set, it shouldn't be deleted here. | |
287 | */ | |
4c677e2e | 288 | if (!ns->shm_rmid_forced) |
b34a6b1d | 289 | return 0; |
b34a6b1d | 290 | |
4c677e2e VK |
291 | if (shm_may_destroy(ns, shp)) { |
292 | shm_lock_by_ptr(shp); | |
b34a6b1d | 293 | shm_destroy(ns, shp); |
4c677e2e | 294 | } |
b34a6b1d VK |
295 | return 0; |
296 | } | |
297 | ||
d9a605e4 | 298 | /* Called with ns->shm_ids(ns).rwsem locked */ |
b34a6b1d VK |
299 | static int shm_try_destroy_orphaned(int id, void *p, void *data) |
300 | { | |
301 | struct ipc_namespace *ns = data; | |
4c677e2e VK |
302 | struct kern_ipc_perm *ipcp = p; |
303 | struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm); | |
b34a6b1d VK |
304 | |
305 | /* | |
306 | * We want to destroy segments without users and with already | |
307 | * exit'ed originating process. | |
4c677e2e | 308 | * |
d9a605e4 | 309 | * As shp->* are changed under rwsem, it's safe to skip shp locking. |
b34a6b1d | 310 | */ |
4c677e2e | 311 | if (shp->shm_creator != NULL) |
b34a6b1d | 312 | return 0; |
b34a6b1d | 313 | |
4c677e2e VK |
314 | if (shm_may_destroy(ns, shp)) { |
315 | shm_lock_by_ptr(shp); | |
4e982311 | 316 | shm_destroy(ns, shp); |
4c677e2e | 317 | } |
b34a6b1d VK |
318 | return 0; |
319 | } | |
320 | ||
321 | void shm_destroy_orphaned(struct ipc_namespace *ns) | |
322 | { | |
d9a605e4 | 323 | down_write(&shm_ids(ns).rwsem); |
33a30ed4 | 324 | if (shm_ids(ns).in_use) |
4c677e2e | 325 | idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns); |
d9a605e4 | 326 | up_write(&shm_ids(ns).rwsem); |
b34a6b1d VK |
327 | } |
328 | ||
329 | ||
330 | void exit_shm(struct task_struct *task) | |
331 | { | |
4c677e2e | 332 | struct ipc_namespace *ns = task->nsproxy->ipc_ns; |
b34a6b1d | 333 | |
298507d4 VK |
334 | if (shm_ids(ns).in_use == 0) |
335 | return; | |
336 | ||
b34a6b1d | 337 | /* Destroy all already created segments, but not mapped yet */ |
d9a605e4 | 338 | down_write(&shm_ids(ns).rwsem); |
33a30ed4 | 339 | if (shm_ids(ns).in_use) |
4c677e2e | 340 | idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns); |
d9a605e4 | 341 | up_write(&shm_ids(ns).rwsem); |
1da177e4 LT |
342 | } |
343 | ||
d0217ac0 | 344 | static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
bc56bba8 EB |
345 | { |
346 | struct file *file = vma->vm_file; | |
347 | struct shm_file_data *sfd = shm_file_data(file); | |
348 | ||
d0217ac0 | 349 | return sfd->vm_ops->fault(vma, vmf); |
bc56bba8 EB |
350 | } |
351 | ||
352 | #ifdef CONFIG_NUMA | |
d823e3e7 | 353 | static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new) |
bc56bba8 EB |
354 | { |
355 | struct file *file = vma->vm_file; | |
356 | struct shm_file_data *sfd = shm_file_data(file); | |
357 | int err = 0; | |
358 | if (sfd->vm_ops->set_policy) | |
359 | err = sfd->vm_ops->set_policy(vma, new); | |
360 | return err; | |
361 | } | |
362 | ||
d823e3e7 AB |
363 | static struct mempolicy *shm_get_policy(struct vm_area_struct *vma, |
364 | unsigned long addr) | |
bc56bba8 EB |
365 | { |
366 | struct file *file = vma->vm_file; | |
367 | struct shm_file_data *sfd = shm_file_data(file); | |
368 | struct mempolicy *pol = NULL; | |
369 | ||
370 | if (sfd->vm_ops->get_policy) | |
371 | pol = sfd->vm_ops->get_policy(vma, addr); | |
52cd3b07 | 372 | else if (vma->vm_policy) |
bc56bba8 | 373 | pol = vma->vm_policy; |
52cd3b07 | 374 | |
bc56bba8 EB |
375 | return pol; |
376 | } | |
377 | #endif | |
378 | ||
1da177e4 LT |
379 | static int shm_mmap(struct file * file, struct vm_area_struct * vma) |
380 | { | |
bc56bba8 | 381 | struct shm_file_data *sfd = shm_file_data(file); |
b0e15190 DH |
382 | int ret; |
383 | ||
bc56bba8 EB |
384 | ret = sfd->file->f_op->mmap(sfd->file, vma); |
385 | if (ret != 0) | |
386 | return ret; | |
387 | sfd->vm_ops = vma->vm_ops; | |
2e92a3ba | 388 | #ifdef CONFIG_MMU |
54cb8821 | 389 | BUG_ON(!sfd->vm_ops->fault); |
2e92a3ba | 390 | #endif |
bc56bba8 EB |
391 | vma->vm_ops = &shm_vm_ops; |
392 | shm_open(vma); | |
b0e15190 DH |
393 | |
394 | return ret; | |
1da177e4 LT |
395 | } |
396 | ||
4e982311 KK |
397 | static int shm_release(struct inode *ino, struct file *file) |
398 | { | |
bc56bba8 | 399 | struct shm_file_data *sfd = shm_file_data(file); |
4e982311 | 400 | |
bc56bba8 EB |
401 | put_ipc_ns(sfd->ns); |
402 | shm_file_data(file) = NULL; | |
403 | kfree(sfd); | |
4e982311 KK |
404 | return 0; |
405 | } | |
406 | ||
02c24a82 | 407 | static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync) |
516dffdc | 408 | { |
516dffdc | 409 | struct shm_file_data *sfd = shm_file_data(file); |
516dffdc | 410 | |
7ea80859 CH |
411 | if (!sfd->file->f_op->fsync) |
412 | return -EINVAL; | |
02c24a82 | 413 | return sfd->file->f_op->fsync(sfd->file, start, end, datasync); |
516dffdc AL |
414 | } |
415 | ||
7d8a4569 WD |
416 | static long shm_fallocate(struct file *file, int mode, loff_t offset, |
417 | loff_t len) | |
418 | { | |
419 | struct shm_file_data *sfd = shm_file_data(file); | |
420 | ||
421 | if (!sfd->file->f_op->fallocate) | |
422 | return -EOPNOTSUPP; | |
423 | return sfd->file->f_op->fallocate(file, mode, offset, len); | |
424 | } | |
425 | ||
bc56bba8 EB |
426 | static unsigned long shm_get_unmapped_area(struct file *file, |
427 | unsigned long addr, unsigned long len, unsigned long pgoff, | |
428 | unsigned long flags) | |
429 | { | |
430 | struct shm_file_data *sfd = shm_file_data(file); | |
c4caa778 AV |
431 | return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len, |
432 | pgoff, flags); | |
bc56bba8 | 433 | } |
bc56bba8 | 434 | |
9a32144e | 435 | static const struct file_operations shm_file_operations = { |
4e982311 | 436 | .mmap = shm_mmap, |
516dffdc | 437 | .fsync = shm_fsync, |
4e982311 | 438 | .release = shm_release, |
ed5e5894 DH |
439 | #ifndef CONFIG_MMU |
440 | .get_unmapped_area = shm_get_unmapped_area, | |
441 | #endif | |
6038f373 | 442 | .llseek = noop_llseek, |
7d8a4569 | 443 | .fallocate = shm_fallocate, |
c4caa778 AV |
444 | }; |
445 | ||
446 | static const struct file_operations shm_file_operations_huge = { | |
447 | .mmap = shm_mmap, | |
448 | .fsync = shm_fsync, | |
449 | .release = shm_release, | |
bc56bba8 | 450 | .get_unmapped_area = shm_get_unmapped_area, |
6038f373 | 451 | .llseek = noop_llseek, |
7d8a4569 | 452 | .fallocate = shm_fallocate, |
1da177e4 LT |
453 | }; |
454 | ||
c4caa778 AV |
455 | int is_file_shm_hugepages(struct file *file) |
456 | { | |
457 | return file->f_op == &shm_file_operations_huge; | |
458 | } | |
459 | ||
f0f37e2f | 460 | static const struct vm_operations_struct shm_vm_ops = { |
1da177e4 LT |
461 | .open = shm_open, /* callback for a new vm-area open */ |
462 | .close = shm_close, /* callback for when the vm-area is released */ | |
54cb8821 | 463 | .fault = shm_fault, |
bc56bba8 EB |
464 | #if defined(CONFIG_NUMA) |
465 | .set_policy = shm_set_policy, | |
466 | .get_policy = shm_get_policy, | |
1da177e4 LT |
467 | #endif |
468 | }; | |
469 | ||
f4566f04 ND |
470 | /** |
471 | * newseg - Create a new shared memory segment | |
472 | * @ns: namespace | |
473 | * @params: ptr to the structure that contains key, size and shmflg | |
474 | * | |
d9a605e4 | 475 | * Called with shm_ids.rwsem held as a writer. |
f4566f04 ND |
476 | */ |
477 | ||
7748dbfa | 478 | static int newseg(struct ipc_namespace *ns, struct ipc_params *params) |
1da177e4 | 479 | { |
7748dbfa ND |
480 | key_t key = params->key; |
481 | int shmflg = params->flg; | |
482 | size_t size = params->u.size; | |
1da177e4 LT |
483 | int error; |
484 | struct shmid_kernel *shp; | |
d69f3bad | 485 | size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
1da177e4 LT |
486 | struct file * file; |
487 | char name[13]; | |
488 | int id; | |
ca16d140 | 489 | vm_flags_t acctflag = 0; |
1da177e4 | 490 | |
4e982311 | 491 | if (size < SHMMIN || size > ns->shm_ctlmax) |
1da177e4 LT |
492 | return -EINVAL; |
493 | ||
f66d45e9 | 494 | if (ns->shm_tot + numpages > ns->shm_ctlall) |
1da177e4 LT |
495 | return -ENOSPC; |
496 | ||
497 | shp = ipc_rcu_alloc(sizeof(*shp)); | |
498 | if (!shp) | |
499 | return -ENOMEM; | |
500 | ||
501 | shp->shm_perm.key = key; | |
b33291c0 | 502 | shp->shm_perm.mode = (shmflg & S_IRWXUGO); |
1da177e4 LT |
503 | shp->mlock_user = NULL; |
504 | ||
505 | shp->shm_perm.security = NULL; | |
506 | error = security_shm_alloc(shp); | |
507 | if (error) { | |
53dad6d3 | 508 | ipc_rcu_putref(shp, ipc_rcu_free); |
1da177e4 LT |
509 | return error; |
510 | } | |
511 | ||
9d66586f | 512 | sprintf (name, "SYSV%08x", key); |
1da177e4 | 513 | if (shmflg & SHM_HUGETLB) { |
c103a4dc | 514 | struct hstate *hs; |
091d0d55 LZ |
515 | size_t hugesize; |
516 | ||
c103a4dc | 517 | hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK); |
091d0d55 LZ |
518 | if (!hs) { |
519 | error = -EINVAL; | |
520 | goto no_file; | |
521 | } | |
522 | hugesize = ALIGN(size, huge_page_size(hs)); | |
af73e4d9 | 523 | |
5a6fe125 MG |
524 | /* hugetlb_file_setup applies strict accounting */ |
525 | if (shmflg & SHM_NORESERVE) | |
526 | acctflag = VM_NORESERVE; | |
af73e4d9 | 527 | file = hugetlb_file_setup(name, hugesize, acctflag, |
42d7395f AK |
528 | &shp->mlock_user, HUGETLB_SHMFS_INODE, |
529 | (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK); | |
1da177e4 | 530 | } else { |
bf8f972d BP |
531 | /* |
532 | * Do not allow no accounting for OVERCOMMIT_NEVER, even | |
533 | * if it's asked for. | |
534 | */ | |
535 | if ((shmflg & SHM_NORESERVE) && | |
536 | sysctl_overcommit_memory != OVERCOMMIT_NEVER) | |
fc8744ad | 537 | acctflag = VM_NORESERVE; |
bf8f972d | 538 | file = shmem_file_setup(name, size, acctflag); |
1da177e4 LT |
539 | } |
540 | error = PTR_ERR(file); | |
541 | if (IS_ERR(file)) | |
542 | goto no_file; | |
543 | ||
48dea404 | 544 | id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); |
283bb7fa PP |
545 | if (id < 0) { |
546 | error = id; | |
1da177e4 | 547 | goto no_id; |
283bb7fa | 548 | } |
1da177e4 | 549 | |
b488893a | 550 | shp->shm_cprid = task_tgid_vnr(current); |
1da177e4 LT |
551 | shp->shm_lprid = 0; |
552 | shp->shm_atim = shp->shm_dtim = 0; | |
553 | shp->shm_ctim = get_seconds(); | |
554 | shp->shm_segsz = size; | |
555 | shp->shm_nattch = 0; | |
1da177e4 | 556 | shp->shm_file = file; |
5774ed01 | 557 | shp->shm_creator = current; |
dbfcd91f | 558 | |
30475cc1 BP |
559 | /* |
560 | * shmid gets reported as "inode#" in /proc/pid/maps. | |
561 | * proc-ps tools use this. Changing this will break them. | |
562 | */ | |
496ad9aa | 563 | file_inode(file)->i_ino = shp->shm_perm.id; |
551110a9 | 564 | |
4e982311 | 565 | ns->shm_tot += numpages; |
7ca7e564 | 566 | error = shp->shm_perm.id; |
dbfcd91f | 567 | |
cf9d5d78 | 568 | ipc_unlock_object(&shp->shm_perm); |
dbfcd91f | 569 | rcu_read_unlock(); |
7ca7e564 | 570 | return error; |
1da177e4 LT |
571 | |
572 | no_id: | |
2195d281 | 573 | if (is_file_hugepages(file) && shp->mlock_user) |
353d5c30 | 574 | user_shm_unlock(size, shp->mlock_user); |
1da177e4 LT |
575 | fput(file); |
576 | no_file: | |
53dad6d3 | 577 | ipc_rcu_putref(shp, shm_rcu_free); |
1da177e4 LT |
578 | return error; |
579 | } | |
580 | ||
f4566f04 | 581 | /* |
d9a605e4 | 582 | * Called with shm_ids.rwsem and ipcp locked. |
f4566f04 | 583 | */ |
03f02c76 | 584 | static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg) |
7748dbfa | 585 | { |
03f02c76 ND |
586 | struct shmid_kernel *shp; |
587 | ||
588 | shp = container_of(ipcp, struct shmid_kernel, shm_perm); | |
589 | return security_shm_associate(shp, shmflg); | |
7748dbfa ND |
590 | } |
591 | ||
f4566f04 | 592 | /* |
d9a605e4 | 593 | * Called with shm_ids.rwsem and ipcp locked. |
f4566f04 | 594 | */ |
03f02c76 ND |
595 | static inline int shm_more_checks(struct kern_ipc_perm *ipcp, |
596 | struct ipc_params *params) | |
7748dbfa | 597 | { |
03f02c76 ND |
598 | struct shmid_kernel *shp; |
599 | ||
600 | shp = container_of(ipcp, struct shmid_kernel, shm_perm); | |
601 | if (shp->shm_segsz < params->u.size) | |
7748dbfa ND |
602 | return -EINVAL; |
603 | ||
604 | return 0; | |
605 | } | |
606 | ||
d5460c99 | 607 | SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg) |
1da177e4 | 608 | { |
4e982311 | 609 | struct ipc_namespace *ns; |
7748dbfa ND |
610 | struct ipc_ops shm_ops; |
611 | struct ipc_params shm_params; | |
4e982311 KK |
612 | |
613 | ns = current->nsproxy->ipc_ns; | |
1da177e4 | 614 | |
7748dbfa ND |
615 | shm_ops.getnew = newseg; |
616 | shm_ops.associate = shm_security; | |
617 | shm_ops.more_checks = shm_more_checks; | |
7ca7e564 | 618 | |
7748dbfa ND |
619 | shm_params.key = key; |
620 | shm_params.flg = shmflg; | |
621 | shm_params.u.size = size; | |
1da177e4 | 622 | |
7748dbfa | 623 | return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params); |
1da177e4 LT |
624 | } |
625 | ||
626 | static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version) | |
627 | { | |
628 | switch(version) { | |
629 | case IPC_64: | |
630 | return copy_to_user(buf, in, sizeof(*in)); | |
631 | case IPC_OLD: | |
632 | { | |
633 | struct shmid_ds out; | |
634 | ||
3af54c9b | 635 | memset(&out, 0, sizeof(out)); |
1da177e4 LT |
636 | ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm); |
637 | out.shm_segsz = in->shm_segsz; | |
638 | out.shm_atime = in->shm_atime; | |
639 | out.shm_dtime = in->shm_dtime; | |
640 | out.shm_ctime = in->shm_ctime; | |
641 | out.shm_cpid = in->shm_cpid; | |
642 | out.shm_lpid = in->shm_lpid; | |
643 | out.shm_nattch = in->shm_nattch; | |
644 | ||
645 | return copy_to_user(buf, &out, sizeof(out)); | |
646 | } | |
647 | default: | |
648 | return -EINVAL; | |
649 | } | |
650 | } | |
651 | ||
016d7132 PP |
652 | static inline unsigned long |
653 | copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version) | |
1da177e4 LT |
654 | { |
655 | switch(version) { | |
656 | case IPC_64: | |
016d7132 | 657 | if (copy_from_user(out, buf, sizeof(*out))) |
1da177e4 | 658 | return -EFAULT; |
1da177e4 | 659 | return 0; |
1da177e4 LT |
660 | case IPC_OLD: |
661 | { | |
662 | struct shmid_ds tbuf_old; | |
663 | ||
664 | if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) | |
665 | return -EFAULT; | |
666 | ||
016d7132 PP |
667 | out->shm_perm.uid = tbuf_old.shm_perm.uid; |
668 | out->shm_perm.gid = tbuf_old.shm_perm.gid; | |
669 | out->shm_perm.mode = tbuf_old.shm_perm.mode; | |
1da177e4 LT |
670 | |
671 | return 0; | |
672 | } | |
673 | default: | |
674 | return -EINVAL; | |
675 | } | |
676 | } | |
677 | ||
678 | static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version) | |
679 | { | |
680 | switch(version) { | |
681 | case IPC_64: | |
682 | return copy_to_user(buf, in, sizeof(*in)); | |
683 | case IPC_OLD: | |
684 | { | |
685 | struct shminfo out; | |
686 | ||
687 | if(in->shmmax > INT_MAX) | |
688 | out.shmmax = INT_MAX; | |
689 | else | |
690 | out.shmmax = (int)in->shmmax; | |
691 | ||
692 | out.shmmin = in->shmmin; | |
693 | out.shmmni = in->shmmni; | |
694 | out.shmseg = in->shmseg; | |
695 | out.shmall = in->shmall; | |
696 | ||
697 | return copy_to_user(buf, &out, sizeof(out)); | |
698 | } | |
699 | default: | |
700 | return -EINVAL; | |
701 | } | |
702 | } | |
703 | ||
b7952180 HD |
704 | /* |
705 | * Calculate and add used RSS and swap pages of a shm. | |
d9a605e4 | 706 | * Called with shm_ids.rwsem held as a reader |
b7952180 HD |
707 | */ |
708 | static void shm_add_rss_swap(struct shmid_kernel *shp, | |
709 | unsigned long *rss_add, unsigned long *swp_add) | |
710 | { | |
711 | struct inode *inode; | |
712 | ||
496ad9aa | 713 | inode = file_inode(shp->shm_file); |
b7952180 HD |
714 | |
715 | if (is_file_hugepages(shp->shm_file)) { | |
716 | struct address_space *mapping = inode->i_mapping; | |
717 | struct hstate *h = hstate_file(shp->shm_file); | |
718 | *rss_add += pages_per_huge_page(h) * mapping->nrpages; | |
719 | } else { | |
720 | #ifdef CONFIG_SHMEM | |
721 | struct shmem_inode_info *info = SHMEM_I(inode); | |
722 | spin_lock(&info->lock); | |
723 | *rss_add += inode->i_mapping->nrpages; | |
724 | *swp_add += info->swapped; | |
725 | spin_unlock(&info->lock); | |
726 | #else | |
727 | *rss_add += inode->i_mapping->nrpages; | |
728 | #endif | |
729 | } | |
730 | } | |
731 | ||
f4566f04 | 732 | /* |
d9a605e4 | 733 | * Called with shm_ids.rwsem held as a reader |
f4566f04 | 734 | */ |
4e982311 KK |
735 | static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss, |
736 | unsigned long *swp) | |
1da177e4 | 737 | { |
7ca7e564 ND |
738 | int next_id; |
739 | int total, in_use; | |
1da177e4 LT |
740 | |
741 | *rss = 0; | |
742 | *swp = 0; | |
743 | ||
7ca7e564 ND |
744 | in_use = shm_ids(ns).in_use; |
745 | ||
746 | for (total = 0, next_id = 0; total < in_use; next_id++) { | |
e562aebc | 747 | struct kern_ipc_perm *ipc; |
1da177e4 | 748 | struct shmid_kernel *shp; |
1da177e4 | 749 | |
e562aebc TB |
750 | ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id); |
751 | if (ipc == NULL) | |
1da177e4 | 752 | continue; |
e562aebc | 753 | shp = container_of(ipc, struct shmid_kernel, shm_perm); |
1da177e4 | 754 | |
b7952180 | 755 | shm_add_rss_swap(shp, rss, swp); |
7ca7e564 ND |
756 | |
757 | total++; | |
1da177e4 LT |
758 | } |
759 | } | |
760 | ||
8d4cc8b5 | 761 | /* |
d9a605e4 | 762 | * This function handles some shmctl commands which require the rwsem |
8d4cc8b5 | 763 | * to be held in write mode. |
d9a605e4 | 764 | * NOTE: no locks must be held, the rwsem is taken inside this function. |
8d4cc8b5 PP |
765 | */ |
766 | static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd, | |
767 | struct shmid_ds __user *buf, int version) | |
1da177e4 | 768 | { |
8d4cc8b5 | 769 | struct kern_ipc_perm *ipcp; |
016d7132 | 770 | struct shmid64_ds shmid64; |
8d4cc8b5 PP |
771 | struct shmid_kernel *shp; |
772 | int err; | |
773 | ||
774 | if (cmd == IPC_SET) { | |
016d7132 | 775 | if (copy_shmid_from_user(&shmid64, buf, version)) |
8d4cc8b5 PP |
776 | return -EFAULT; |
777 | } | |
778 | ||
d9a605e4 | 779 | down_write(&shm_ids(ns).rwsem); |
7b4cc5d8 DB |
780 | rcu_read_lock(); |
781 | ||
79ccf0f8 DB |
782 | ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd, |
783 | &shmid64.shm_perm, 0); | |
7b4cc5d8 DB |
784 | if (IS_ERR(ipcp)) { |
785 | err = PTR_ERR(ipcp); | |
7b4cc5d8 DB |
786 | goto out_unlock1; |
787 | } | |
8d4cc8b5 | 788 | |
a5f75e7f | 789 | shp = container_of(ipcp, struct shmid_kernel, shm_perm); |
8d4cc8b5 PP |
790 | |
791 | err = security_shm_shmctl(shp, cmd); | |
792 | if (err) | |
79ccf0f8 | 793 | goto out_unlock1; |
7b4cc5d8 | 794 | |
8d4cc8b5 PP |
795 | switch (cmd) { |
796 | case IPC_RMID: | |
79ccf0f8 | 797 | ipc_lock_object(&shp->shm_perm); |
7b4cc5d8 | 798 | /* do_shm_rmid unlocks the ipc object and rcu */ |
8d4cc8b5 PP |
799 | do_shm_rmid(ns, ipcp); |
800 | goto out_up; | |
801 | case IPC_SET: | |
79ccf0f8 | 802 | ipc_lock_object(&shp->shm_perm); |
1efdb69b EB |
803 | err = ipc_update_perm(&shmid64.shm_perm, ipcp); |
804 | if (err) | |
7b4cc5d8 | 805 | goto out_unlock0; |
8d4cc8b5 PP |
806 | shp->shm_ctim = get_seconds(); |
807 | break; | |
808 | default: | |
809 | err = -EINVAL; | |
79ccf0f8 | 810 | goto out_unlock1; |
8d4cc8b5 | 811 | } |
7b4cc5d8 DB |
812 | |
813 | out_unlock0: | |
814 | ipc_unlock_object(&shp->shm_perm); | |
815 | out_unlock1: | |
816 | rcu_read_unlock(); | |
8d4cc8b5 | 817 | out_up: |
d9a605e4 | 818 | up_write(&shm_ids(ns).rwsem); |
8d4cc8b5 PP |
819 | return err; |
820 | } | |
821 | ||
68eccc1d DB |
822 | static int shmctl_nolock(struct ipc_namespace *ns, int shmid, |
823 | int cmd, int version, void __user *buf) | |
8d4cc8b5 | 824 | { |
68eccc1d | 825 | int err; |
1da177e4 | 826 | struct shmid_kernel *shp; |
1da177e4 | 827 | |
68eccc1d DB |
828 | /* preliminary security checks for *_INFO */ |
829 | if (cmd == IPC_INFO || cmd == SHM_INFO) { | |
830 | err = security_shm_shmctl(NULL, cmd); | |
831 | if (err) | |
832 | return err; | |
1da177e4 LT |
833 | } |
834 | ||
68eccc1d | 835 | switch (cmd) { |
1da177e4 LT |
836 | case IPC_INFO: |
837 | { | |
838 | struct shminfo64 shminfo; | |
839 | ||
e8148f75 | 840 | memset(&shminfo, 0, sizeof(shminfo)); |
4e982311 KK |
841 | shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni; |
842 | shminfo.shmmax = ns->shm_ctlmax; | |
843 | shminfo.shmall = ns->shm_ctlall; | |
1da177e4 LT |
844 | |
845 | shminfo.shmmin = SHMMIN; | |
846 | if(copy_shminfo_to_user (buf, &shminfo, version)) | |
847 | return -EFAULT; | |
f4566f04 | 848 | |
d9a605e4 | 849 | down_read(&shm_ids(ns).rwsem); |
7ca7e564 | 850 | err = ipc_get_maxid(&shm_ids(ns)); |
d9a605e4 | 851 | up_read(&shm_ids(ns).rwsem); |
f4566f04 | 852 | |
1da177e4 LT |
853 | if(err<0) |
854 | err = 0; | |
855 | goto out; | |
856 | } | |
857 | case SHM_INFO: | |
858 | { | |
859 | struct shm_info shm_info; | |
860 | ||
e8148f75 | 861 | memset(&shm_info, 0, sizeof(shm_info)); |
d9a605e4 | 862 | down_read(&shm_ids(ns).rwsem); |
4e982311 KK |
863 | shm_info.used_ids = shm_ids(ns).in_use; |
864 | shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp); | |
865 | shm_info.shm_tot = ns->shm_tot; | |
1da177e4 LT |
866 | shm_info.swap_attempts = 0; |
867 | shm_info.swap_successes = 0; | |
7ca7e564 | 868 | err = ipc_get_maxid(&shm_ids(ns)); |
d9a605e4 | 869 | up_read(&shm_ids(ns).rwsem); |
e8148f75 | 870 | if (copy_to_user(buf, &shm_info, sizeof(shm_info))) { |
1da177e4 LT |
871 | err = -EFAULT; |
872 | goto out; | |
873 | } | |
874 | ||
875 | err = err < 0 ? 0 : err; | |
876 | goto out; | |
877 | } | |
878 | case SHM_STAT: | |
879 | case IPC_STAT: | |
880 | { | |
881 | struct shmid64_ds tbuf; | |
882 | int result; | |
023a5355 | 883 | |
c97cb9cc | 884 | rcu_read_lock(); |
023a5355 | 885 | if (cmd == SHM_STAT) { |
c97cb9cc | 886 | shp = shm_obtain_object(ns, shmid); |
023a5355 ND |
887 | if (IS_ERR(shp)) { |
888 | err = PTR_ERR(shp); | |
c97cb9cc | 889 | goto out_unlock; |
023a5355 | 890 | } |
7ca7e564 | 891 | result = shp->shm_perm.id; |
1da177e4 | 892 | } else { |
c97cb9cc | 893 | shp = shm_obtain_object_check(ns, shmid); |
023a5355 ND |
894 | if (IS_ERR(shp)) { |
895 | err = PTR_ERR(shp); | |
c97cb9cc | 896 | goto out_unlock; |
023a5355 | 897 | } |
1da177e4 LT |
898 | result = 0; |
899 | } | |
c97cb9cc | 900 | |
e8148f75 | 901 | err = -EACCES; |
b0e77598 | 902 | if (ipcperms(ns, &shp->shm_perm, S_IRUGO)) |
1da177e4 | 903 | goto out_unlock; |
c97cb9cc | 904 | |
1da177e4 LT |
905 | err = security_shm_shmctl(shp, cmd); |
906 | if (err) | |
907 | goto out_unlock; | |
c97cb9cc | 908 | |
023a5355 | 909 | memset(&tbuf, 0, sizeof(tbuf)); |
1da177e4 LT |
910 | kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm); |
911 | tbuf.shm_segsz = shp->shm_segsz; | |
912 | tbuf.shm_atime = shp->shm_atim; | |
913 | tbuf.shm_dtime = shp->shm_dtim; | |
914 | tbuf.shm_ctime = shp->shm_ctim; | |
915 | tbuf.shm_cpid = shp->shm_cprid; | |
916 | tbuf.shm_lpid = shp->shm_lprid; | |
bc56bba8 | 917 | tbuf.shm_nattch = shp->shm_nattch; |
c97cb9cc DB |
918 | rcu_read_unlock(); |
919 | ||
920 | if (copy_shmid_to_user(buf, &tbuf, version)) | |
1da177e4 LT |
921 | err = -EFAULT; |
922 | else | |
923 | err = result; | |
924 | goto out; | |
925 | } | |
68eccc1d DB |
926 | default: |
927 | return -EINVAL; | |
928 | } | |
929 | ||
930 | out_unlock: | |
c97cb9cc | 931 | rcu_read_unlock(); |
68eccc1d DB |
932 | out: |
933 | return err; | |
934 | } | |
935 | ||
936 | SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) | |
937 | { | |
938 | struct shmid_kernel *shp; | |
939 | int err, version; | |
940 | struct ipc_namespace *ns; | |
941 | ||
2caacaa8 DB |
942 | if (cmd < 0 || shmid < 0) |
943 | return -EINVAL; | |
68eccc1d DB |
944 | |
945 | version = ipc_parse_version(&cmd); | |
946 | ns = current->nsproxy->ipc_ns; | |
947 | ||
948 | switch (cmd) { | |
949 | case IPC_INFO: | |
950 | case SHM_INFO: | |
951 | case SHM_STAT: | |
952 | case IPC_STAT: | |
953 | return shmctl_nolock(ns, shmid, cmd, version, buf); | |
2caacaa8 DB |
954 | case IPC_RMID: |
955 | case IPC_SET: | |
956 | return shmctl_down(ns, shmid, cmd, buf, version); | |
1da177e4 LT |
957 | case SHM_LOCK: |
958 | case SHM_UNLOCK: | |
959 | { | |
85046579 | 960 | struct file *shm_file; |
89e004ea | 961 | |
2caacaa8 DB |
962 | rcu_read_lock(); |
963 | shp = shm_obtain_object_check(ns, shmid); | |
023a5355 ND |
964 | if (IS_ERR(shp)) { |
965 | err = PTR_ERR(shp); | |
2caacaa8 | 966 | goto out_unlock1; |
1da177e4 | 967 | } |
1da177e4 | 968 | |
a33e6751 | 969 | audit_ipc_obj(&(shp->shm_perm)); |
2caacaa8 DB |
970 | err = security_shm_shmctl(shp, cmd); |
971 | if (err) | |
972 | goto out_unlock1; | |
073115d6 | 973 | |
2caacaa8 | 974 | ipc_lock_object(&shp->shm_perm); |
b0e77598 | 975 | if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) { |
1efdb69b | 976 | kuid_t euid = current_euid(); |
1da177e4 | 977 | err = -EPERM; |
1efdb69b EB |
978 | if (!uid_eq(euid, shp->shm_perm.uid) && |
979 | !uid_eq(euid, shp->shm_perm.cuid)) | |
2caacaa8 | 980 | goto out_unlock0; |
f1eb1332 | 981 | if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) |
2caacaa8 | 982 | goto out_unlock0; |
1da177e4 LT |
983 | } |
984 | ||
85046579 HD |
985 | shm_file = shp->shm_file; |
986 | if (is_file_hugepages(shm_file)) | |
2caacaa8 | 987 | goto out_unlock0; |
85046579 HD |
988 | |
989 | if (cmd == SHM_LOCK) { | |
86a264ab | 990 | struct user_struct *user = current_user(); |
85046579 HD |
991 | err = shmem_lock(shm_file, 1, user); |
992 | if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) { | |
993 | shp->shm_perm.mode |= SHM_LOCKED; | |
994 | shp->mlock_user = user; | |
1da177e4 | 995 | } |
2caacaa8 | 996 | goto out_unlock0; |
1da177e4 | 997 | } |
85046579 HD |
998 | |
999 | /* SHM_UNLOCK */ | |
1000 | if (!(shp->shm_perm.mode & SHM_LOCKED)) | |
2caacaa8 | 1001 | goto out_unlock0; |
85046579 HD |
1002 | shmem_lock(shm_file, 0, shp->mlock_user); |
1003 | shp->shm_perm.mode &= ~SHM_LOCKED; | |
1004 | shp->mlock_user = NULL; | |
1005 | get_file(shm_file); | |
2caacaa8 DB |
1006 | ipc_unlock_object(&shp->shm_perm); |
1007 | rcu_read_unlock(); | |
24513264 | 1008 | shmem_unlock_mapping(shm_file->f_mapping); |
2caacaa8 | 1009 | |
85046579 | 1010 | fput(shm_file); |
8d4cc8b5 | 1011 | return err; |
2caacaa8 | 1012 | } |
1da177e4 | 1013 | default: |
8d4cc8b5 | 1014 | return -EINVAL; |
1da177e4 LT |
1015 | } |
1016 | ||
2caacaa8 DB |
1017 | out_unlock0: |
1018 | ipc_unlock_object(&shp->shm_perm); | |
1019 | out_unlock1: | |
1020 | rcu_read_unlock(); | |
1da177e4 LT |
1021 | return err; |
1022 | } | |
1023 | ||
1024 | /* | |
1025 | * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists. | |
1026 | * | |
1027 | * NOTE! Despite the name, this is NOT a direct system call entrypoint. The | |
1028 | * "raddr" thing points to kernel space, and there has to be a wrapper around | |
1029 | * this. | |
1030 | */ | |
079a96ae WD |
1031 | long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr, |
1032 | unsigned long shmlba) | |
1da177e4 LT |
1033 | { |
1034 | struct shmid_kernel *shp; | |
1035 | unsigned long addr; | |
1036 | unsigned long size; | |
1037 | struct file * file; | |
1038 | int err; | |
1039 | unsigned long flags; | |
1040 | unsigned long prot; | |
1da177e4 | 1041 | int acc_mode; |
4e982311 | 1042 | struct ipc_namespace *ns; |
bc56bba8 EB |
1043 | struct shm_file_data *sfd; |
1044 | struct path path; | |
aeb5d727 | 1045 | fmode_t f_mode; |
41badc15 | 1046 | unsigned long populate = 0; |
1da177e4 | 1047 | |
bc56bba8 EB |
1048 | err = -EINVAL; |
1049 | if (shmid < 0) | |
1da177e4 | 1050 | goto out; |
bc56bba8 | 1051 | else if ((addr = (ulong)shmaddr)) { |
079a96ae | 1052 | if (addr & (shmlba - 1)) { |
1da177e4 | 1053 | if (shmflg & SHM_RND) |
079a96ae | 1054 | addr &= ~(shmlba - 1); /* round down */ |
1da177e4 LT |
1055 | else |
1056 | #ifndef __ARCH_FORCE_SHMLBA | |
1057 | if (addr & ~PAGE_MASK) | |
1058 | #endif | |
bc56bba8 | 1059 | goto out; |
1da177e4 LT |
1060 | } |
1061 | flags = MAP_SHARED | MAP_FIXED; | |
1062 | } else { | |
1063 | if ((shmflg & SHM_REMAP)) | |
bc56bba8 | 1064 | goto out; |
1da177e4 LT |
1065 | |
1066 | flags = MAP_SHARED; | |
1067 | } | |
1068 | ||
1069 | if (shmflg & SHM_RDONLY) { | |
1070 | prot = PROT_READ; | |
1da177e4 | 1071 | acc_mode = S_IRUGO; |
bc56bba8 | 1072 | f_mode = FMODE_READ; |
1da177e4 LT |
1073 | } else { |
1074 | prot = PROT_READ | PROT_WRITE; | |
1da177e4 | 1075 | acc_mode = S_IRUGO | S_IWUGO; |
bc56bba8 | 1076 | f_mode = FMODE_READ | FMODE_WRITE; |
1da177e4 LT |
1077 | } |
1078 | if (shmflg & SHM_EXEC) { | |
1079 | prot |= PROT_EXEC; | |
1080 | acc_mode |= S_IXUGO; | |
1081 | } | |
1082 | ||
1083 | /* | |
1084 | * We cannot rely on the fs check since SYSV IPC does have an | |
1085 | * additional creator id... | |
1086 | */ | |
4e982311 | 1087 | ns = current->nsproxy->ipc_ns; |
c2c737a0 DB |
1088 | rcu_read_lock(); |
1089 | shp = shm_obtain_object_check(ns, shmid); | |
023a5355 ND |
1090 | if (IS_ERR(shp)) { |
1091 | err = PTR_ERR(shp); | |
c2c737a0 | 1092 | goto out_unlock; |
023a5355 | 1093 | } |
bc56bba8 EB |
1094 | |
1095 | err = -EACCES; | |
b0e77598 | 1096 | if (ipcperms(ns, &shp->shm_perm, acc_mode)) |
bc56bba8 | 1097 | goto out_unlock; |
1da177e4 LT |
1098 | |
1099 | err = security_shm_shmat(shp, shmaddr, shmflg); | |
bc56bba8 EB |
1100 | if (err) |
1101 | goto out_unlock; | |
1102 | ||
c2c737a0 | 1103 | ipc_lock_object(&shp->shm_perm); |
2c48b9c4 AV |
1104 | path = shp->shm_file->f_path; |
1105 | path_get(&path); | |
1da177e4 | 1106 | shp->shm_nattch++; |
bc56bba8 | 1107 | size = i_size_read(path.dentry->d_inode); |
c2c737a0 DB |
1108 | ipc_unlock_object(&shp->shm_perm); |
1109 | rcu_read_unlock(); | |
1da177e4 | 1110 | |
bc56bba8 EB |
1111 | err = -ENOMEM; |
1112 | sfd = kzalloc(sizeof(*sfd), GFP_KERNEL); | |
f42569b1 DB |
1113 | if (!sfd) { |
1114 | path_put(&path); | |
1115 | goto out_nattch; | |
1116 | } | |
bc56bba8 | 1117 | |
2c48b9c4 AV |
1118 | file = alloc_file(&path, f_mode, |
1119 | is_file_hugepages(shp->shm_file) ? | |
c4caa778 AV |
1120 | &shm_file_operations_huge : |
1121 | &shm_file_operations); | |
39b65252 | 1122 | err = PTR_ERR(file); |
f42569b1 DB |
1123 | if (IS_ERR(file)) { |
1124 | kfree(sfd); | |
1125 | path_put(&path); | |
1126 | goto out_nattch; | |
1127 | } | |
bc56bba8 | 1128 | |
bc56bba8 | 1129 | file->private_data = sfd; |
bc56bba8 | 1130 | file->f_mapping = shp->shm_file->f_mapping; |
7ca7e564 | 1131 | sfd->id = shp->shm_perm.id; |
bc56bba8 EB |
1132 | sfd->ns = get_ipc_ns(ns); |
1133 | sfd->file = shp->shm_file; | |
1134 | sfd->vm_ops = NULL; | |
1135 | ||
8b3ec681 AV |
1136 | err = security_mmap_file(file, prot, flags); |
1137 | if (err) | |
1138 | goto out_fput; | |
1139 | ||
1da177e4 LT |
1140 | down_write(¤t->mm->mmap_sem); |
1141 | if (addr && !(shmflg & SHM_REMAP)) { | |
bc56bba8 | 1142 | err = -EINVAL; |
1da177e4 LT |
1143 | if (find_vma_intersection(current->mm, addr, addr + size)) |
1144 | goto invalid; | |
1145 | /* | |
1146 | * If shm segment goes below stack, make sure there is some | |
1147 | * space left for the stack to grow (at least 4 pages). | |
1148 | */ | |
1149 | if (addr < current->mm->start_stack && | |
1150 | addr > current->mm->start_stack - size - PAGE_SIZE * 5) | |
1151 | goto invalid; | |
1152 | } | |
f42569b1 | 1153 | |
bebeb3d6 ML |
1154 | addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate); |
1155 | *raddr = addr; | |
bc56bba8 | 1156 | err = 0; |
bebeb3d6 ML |
1157 | if (IS_ERR_VALUE(addr)) |
1158 | err = (long)addr; | |
1da177e4 LT |
1159 | invalid: |
1160 | up_write(¤t->mm->mmap_sem); | |
bebeb3d6 | 1161 | if (populate) |
41badc15 | 1162 | mm_populate(addr, populate); |
1da177e4 | 1163 | |
8b3ec681 | 1164 | out_fput: |
bc56bba8 EB |
1165 | fput(file); |
1166 | ||
1167 | out_nattch: | |
d9a605e4 | 1168 | down_write(&shm_ids(ns).rwsem); |
00c2bf85 | 1169 | shp = shm_lock(ns, shmid); |
023a5355 | 1170 | BUG_ON(IS_ERR(shp)); |
1da177e4 | 1171 | shp->shm_nattch--; |
b34a6b1d | 1172 | if (shm_may_destroy(ns, shp)) |
4e982311 | 1173 | shm_destroy(ns, shp); |
1da177e4 LT |
1174 | else |
1175 | shm_unlock(shp); | |
d9a605e4 | 1176 | up_write(&shm_ids(ns).rwsem); |
1da177e4 | 1177 | return err; |
bc56bba8 EB |
1178 | |
1179 | out_unlock: | |
c2c737a0 | 1180 | rcu_read_unlock(); |
f42569b1 DB |
1181 | out: |
1182 | return err; | |
1da177e4 LT |
1183 | } |
1184 | ||
d5460c99 | 1185 | SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg) |
7d87e14c SR |
1186 | { |
1187 | unsigned long ret; | |
1188 | long err; | |
1189 | ||
079a96ae | 1190 | err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA); |
7d87e14c SR |
1191 | if (err) |
1192 | return err; | |
1193 | force_successful_syscall_return(); | |
1194 | return (long)ret; | |
1195 | } | |
1196 | ||
1da177e4 LT |
1197 | /* |
1198 | * detach and kill segment if marked destroyed. | |
1199 | * The work is done in shm_close. | |
1200 | */ | |
d5460c99 | 1201 | SYSCALL_DEFINE1(shmdt, char __user *, shmaddr) |
1da177e4 LT |
1202 | { |
1203 | struct mm_struct *mm = current->mm; | |
586c7e6a | 1204 | struct vm_area_struct *vma; |
1da177e4 | 1205 | unsigned long addr = (unsigned long)shmaddr; |
1da177e4 | 1206 | int retval = -EINVAL; |
586c7e6a MF |
1207 | #ifdef CONFIG_MMU |
1208 | loff_t size = 0; | |
1209 | struct vm_area_struct *next; | |
1210 | #endif | |
1da177e4 | 1211 | |
df1e2fb5 HD |
1212 | if (addr & ~PAGE_MASK) |
1213 | return retval; | |
1214 | ||
1da177e4 LT |
1215 | down_write(&mm->mmap_sem); |
1216 | ||
1217 | /* | |
1218 | * This function tries to be smart and unmap shm segments that | |
1219 | * were modified by partial mlock or munmap calls: | |
1220 | * - It first determines the size of the shm segment that should be | |
1221 | * unmapped: It searches for a vma that is backed by shm and that | |
1222 | * started at address shmaddr. It records it's size and then unmaps | |
1223 | * it. | |
1224 | * - Then it unmaps all shm vmas that started at shmaddr and that | |
1225 | * are within the initially determined size. | |
1226 | * Errors from do_munmap are ignored: the function only fails if | |
1227 | * it's called with invalid parameters or if it's called to unmap | |
1228 | * a part of a vma. Both calls in this function are for full vmas, | |
1229 | * the parameters are directly copied from the vma itself and always | |
1230 | * valid - therefore do_munmap cannot fail. (famous last words?) | |
1231 | */ | |
1232 | /* | |
1233 | * If it had been mremap()'d, the starting address would not | |
1234 | * match the usual checks anyway. So assume all vma's are | |
1235 | * above the starting address given. | |
1236 | */ | |
1237 | vma = find_vma(mm, addr); | |
1238 | ||
8feae131 | 1239 | #ifdef CONFIG_MMU |
1da177e4 LT |
1240 | while (vma) { |
1241 | next = vma->vm_next; | |
1242 | ||
1243 | /* | |
1244 | * Check if the starting address would match, i.e. it's | |
1245 | * a fragment created by mprotect() and/or munmap(), or it | |
1246 | * otherwise it starts at this address with no hassles. | |
1247 | */ | |
bc56bba8 | 1248 | if ((vma->vm_ops == &shm_vm_ops) && |
1da177e4 LT |
1249 | (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) { |
1250 | ||
1251 | ||
496ad9aa | 1252 | size = file_inode(vma->vm_file)->i_size; |
1da177e4 LT |
1253 | do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); |
1254 | /* | |
1255 | * We discovered the size of the shm segment, so | |
1256 | * break out of here and fall through to the next | |
1257 | * loop that uses the size information to stop | |
1258 | * searching for matching vma's. | |
1259 | */ | |
1260 | retval = 0; | |
1261 | vma = next; | |
1262 | break; | |
1263 | } | |
1264 | vma = next; | |
1265 | } | |
1266 | ||
1267 | /* | |
1268 | * We need look no further than the maximum address a fragment | |
1269 | * could possibly have landed at. Also cast things to loff_t to | |
25985edc | 1270 | * prevent overflows and make comparisons vs. equal-width types. |
1da177e4 | 1271 | */ |
8e36709d | 1272 | size = PAGE_ALIGN(size); |
1da177e4 LT |
1273 | while (vma && (loff_t)(vma->vm_end - addr) <= size) { |
1274 | next = vma->vm_next; | |
1275 | ||
1276 | /* finding a matching vma now does not alter retval */ | |
bc56bba8 | 1277 | if ((vma->vm_ops == &shm_vm_ops) && |
1da177e4 LT |
1278 | (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) |
1279 | ||
1280 | do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); | |
1281 | vma = next; | |
1282 | } | |
1283 | ||
8feae131 DH |
1284 | #else /* CONFIG_MMU */ |
1285 | /* under NOMMU conditions, the exact address to be destroyed must be | |
1286 | * given */ | |
530fcd16 | 1287 | if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) { |
8feae131 DH |
1288 | do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); |
1289 | retval = 0; | |
1290 | } | |
1291 | ||
1292 | #endif | |
1293 | ||
1da177e4 LT |
1294 | up_write(&mm->mmap_sem); |
1295 | return retval; | |
1296 | } | |
1297 | ||
1298 | #ifdef CONFIG_PROC_FS | |
19b4946c | 1299 | static int sysvipc_shm_proc_show(struct seq_file *s, void *it) |
1da177e4 | 1300 | { |
1efdb69b | 1301 | struct user_namespace *user_ns = seq_user_ns(s); |
19b4946c | 1302 | struct shmid_kernel *shp = it; |
b7952180 HD |
1303 | unsigned long rss = 0, swp = 0; |
1304 | ||
1305 | shm_add_rss_swap(shp, &rss, &swp); | |
1da177e4 | 1306 | |
6c826818 PM |
1307 | #if BITS_PER_LONG <= 32 |
1308 | #define SIZE_SPEC "%10lu" | |
1309 | #else | |
1310 | #define SIZE_SPEC "%21lu" | |
1311 | #endif | |
1da177e4 | 1312 | |
6c826818 PM |
1313 | return seq_printf(s, |
1314 | "%10d %10d %4o " SIZE_SPEC " %5u %5u " | |
b7952180 HD |
1315 | "%5lu %5u %5u %5u %5u %10lu %10lu %10lu " |
1316 | SIZE_SPEC " " SIZE_SPEC "\n", | |
19b4946c | 1317 | shp->shm_perm.key, |
7ca7e564 | 1318 | shp->shm_perm.id, |
b33291c0 | 1319 | shp->shm_perm.mode, |
19b4946c MW |
1320 | shp->shm_segsz, |
1321 | shp->shm_cprid, | |
1322 | shp->shm_lprid, | |
bc56bba8 | 1323 | shp->shm_nattch, |
1efdb69b EB |
1324 | from_kuid_munged(user_ns, shp->shm_perm.uid), |
1325 | from_kgid_munged(user_ns, shp->shm_perm.gid), | |
1326 | from_kuid_munged(user_ns, shp->shm_perm.cuid), | |
1327 | from_kgid_munged(user_ns, shp->shm_perm.cgid), | |
19b4946c MW |
1328 | shp->shm_atim, |
1329 | shp->shm_dtim, | |
b7952180 HD |
1330 | shp->shm_ctim, |
1331 | rss * PAGE_SIZE, | |
1332 | swp * PAGE_SIZE); | |
1da177e4 LT |
1333 | } |
1334 | #endif |