]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - mm/madvise.c
brcmfmac: get rid of extern keyword in wl_cfg80211.h
[mirror_ubuntu-zesty-kernel.git] / mm / madvise.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/madvise.c
3 *
4 * Copyright (C) 1999 Linus Torvalds
5 * Copyright (C) 2002 Christoph Hellwig
6 */
7
8#include <linux/mman.h>
9#include <linux/pagemap.h>
10#include <linux/syscalls.h>
05b74384 11#include <linux/mempolicy.h>
afcf938e 12#include <linux/page-isolation.h>
1da177e4 13#include <linux/hugetlb.h>
3f31d075 14#include <linux/falloc.h>
e8edc6e0 15#include <linux/sched.h>
f8af4da3 16#include <linux/ksm.h>
3f31d075 17#include <linux/fs.h>
9ab4233d 18#include <linux/file.h>
1da177e4 19
0a27a14a
NP
20/*
21 * Any behaviour which results in changes to the vma->vm_flags needs to
22 * take mmap_sem for writing. Others, which simply traverse vmas, need
23 * to only take it for reading.
24 */
25static int madvise_need_mmap_write(int behavior)
26{
27 switch (behavior) {
28 case MADV_REMOVE:
29 case MADV_WILLNEED:
30 case MADV_DONTNEED:
31 return 0;
32 default:
33 /* be safe, default to 1. list exceptions explicitly */
34 return 1;
35 }
36}
37
1da177e4
LT
38/*
39 * We can potentially split a vm area into separate
40 * areas, each area with its own behavior.
41 */
05b74384
PM
42static long madvise_behavior(struct vm_area_struct * vma,
43 struct vm_area_struct **prev,
44 unsigned long start, unsigned long end, int behavior)
1da177e4
LT
45{
46 struct mm_struct * mm = vma->vm_mm;
47 int error = 0;
05b74384 48 pgoff_t pgoff;
3866ea90 49 unsigned long new_flags = vma->vm_flags;
e798c6e8
PM
50
51 switch (behavior) {
f8225661
MT
52 case MADV_NORMAL:
53 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
54 break;
e798c6e8 55 case MADV_SEQUENTIAL:
f8225661 56 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
e798c6e8
PM
57 break;
58 case MADV_RANDOM:
f8225661 59 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
e798c6e8 60 break;
f8225661
MT
61 case MADV_DONTFORK:
62 new_flags |= VM_DONTCOPY;
63 break;
64 case MADV_DOFORK:
3866ea90
HD
65 if (vma->vm_flags & VM_IO) {
66 error = -EINVAL;
67 goto out;
68 }
f8225661 69 new_flags &= ~VM_DONTCOPY;
e798c6e8 70 break;
accb61fe
JB
71 case MADV_DONTDUMP:
72 new_flags |= VM_NODUMP;
73 break;
74 case MADV_DODUMP:
75 new_flags &= ~VM_NODUMP;
76 break;
f8af4da3
HD
77 case MADV_MERGEABLE:
78 case MADV_UNMERGEABLE:
79 error = ksm_madvise(vma, start, end, behavior, &new_flags);
80 if (error)
81 goto out;
82 break;
0af4e98b 83 case MADV_HUGEPAGE:
a664b2d8 84 case MADV_NOHUGEPAGE:
60ab3244 85 error = hugepage_madvise(vma, &new_flags, behavior);
0af4e98b
AA
86 if (error)
87 goto out;
88 break;
e798c6e8
PM
89 }
90
05b74384
PM
91 if (new_flags == vma->vm_flags) {
92 *prev = vma;
836d5ffd 93 goto out;
05b74384
PM
94 }
95
96 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
97 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
98 vma->vm_file, pgoff, vma_policy(vma));
99 if (*prev) {
100 vma = *prev;
101 goto success;
102 }
103
104 *prev = vma;
1da177e4
LT
105
106 if (start != vma->vm_start) {
107 error = split_vma(mm, vma, start, 1);
108 if (error)
109 goto out;
110 }
111
112 if (end != vma->vm_end) {
113 error = split_vma(mm, vma, end, 0);
114 if (error)
115 goto out;
116 }
117
836d5ffd 118success:
1da177e4
LT
119 /*
120 * vm_flags is protected by the mmap_sem held in write mode.
121 */
e798c6e8 122 vma->vm_flags = new_flags;
1da177e4
LT
123
124out:
125 if (error == -ENOMEM)
126 error = -EAGAIN;
127 return error;
128}
129
130/*
131 * Schedule all required I/O operations. Do not wait for completion.
132 */
133static long madvise_willneed(struct vm_area_struct * vma,
05b74384 134 struct vm_area_struct ** prev,
1da177e4
LT
135 unsigned long start, unsigned long end)
136{
137 struct file *file = vma->vm_file;
138
1bef4003
S
139 if (!file)
140 return -EBADF;
141
70688e4d 142 if (file->f_mapping->a_ops->get_xip_mem) {
fe77ba6f
CO
143 /* no bad return value, but ignore advice */
144 return 0;
145 }
146
05b74384 147 *prev = vma;
1da177e4
LT
148 start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
149 if (end > vma->vm_end)
150 end = vma->vm_end;
151 end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
152
f7e839dd 153 force_page_cache_readahead(file->f_mapping, file, start, end - start);
1da177e4
LT
154 return 0;
155}
156
157/*
158 * Application no longer needs these pages. If the pages are dirty,
159 * it's OK to just throw them away. The app will be more careful about
160 * data it wants to keep. Be sure to free swap resources too. The
7e6cbea3 161 * zap_page_range call sets things up for shrink_active_list to actually free
1da177e4
LT
162 * these pages later if no one else has touched them in the meantime,
163 * although we could add these pages to a global reuse list for
7e6cbea3 164 * shrink_active_list to pick up before reclaiming other pages.
1da177e4
LT
165 *
166 * NB: This interface discards data rather than pushes it out to swap,
167 * as some implementations do. This has performance implications for
168 * applications like large transactional databases which want to discard
169 * pages in anonymous maps after committing to backing store the data
170 * that was kept in them. There is no reason to write this data out to
171 * the swap area if the application is discarding it.
172 *
173 * An interface that causes the system to free clean pages and flush
174 * dirty pages is already available as msync(MS_INVALIDATE).
175 */
176static long madvise_dontneed(struct vm_area_struct * vma,
05b74384 177 struct vm_area_struct ** prev,
1da177e4
LT
178 unsigned long start, unsigned long end)
179{
05b74384 180 *prev = vma;
6aab341e 181 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
1da177e4
LT
182 return -EINVAL;
183
184 if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
185 struct zap_details details = {
186 .nonlinear_vma = vma,
187 .last_index = ULONG_MAX,
188 };
189 zap_page_range(vma, start, end - start, &details);
190 } else
191 zap_page_range(vma, start, end - start, NULL);
192 return 0;
193}
194
f6b3ec23
BP
195/*
196 * Application wants to free up the pages and associated backing store.
197 * This is effectively punching a hole into the middle of a file.
198 *
199 * NOTE: Currently, only shmfs/tmpfs is supported for this operation.
200 * Other filesystems return -ENOSYS.
201 */
202static long madvise_remove(struct vm_area_struct *vma,
00e9fa2d 203 struct vm_area_struct **prev,
f6b3ec23
BP
204 unsigned long start, unsigned long end)
205{
3f31d075 206 loff_t offset;
90ed52eb 207 int error;
9ab4233d 208 struct file *f;
f6b3ec23 209
90ed52eb 210 *prev = NULL; /* tell sys_madvise we drop mmap_sem */
00e9fa2d 211
f6b3ec23
BP
212 if (vma->vm_flags & (VM_LOCKED|VM_NONLINEAR|VM_HUGETLB))
213 return -EINVAL;
214
9ab4233d
AL
215 f = vma->vm_file;
216
217 if (!f || !f->f_mapping || !f->f_mapping->host) {
f6b3ec23
BP
218 return -EINVAL;
219 }
220
69cf0fac
HD
221 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
222 return -EACCES;
223
f6b3ec23
BP
224 offset = (loff_t)(start - vma->vm_start)
225 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
90ed52eb 226
9ab4233d
AL
227 /*
228 * Filesystem's fallocate may need to take i_mutex. We need to
229 * explicitly grab a reference because the vma (and hence the
230 * vma's reference to the file) can go away as soon as we drop
231 * mmap_sem.
232 */
233 get_file(f);
0a27a14a 234 up_read(&current->mm->mmap_sem);
9ab4233d 235 error = do_fallocate(f,
3f31d075
HD
236 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
237 offset, end - start);
9ab4233d 238 fput(f);
0a27a14a 239 down_read(&current->mm->mmap_sem);
90ed52eb 240 return error;
f6b3ec23
BP
241}
242
9893e49d
AK
243#ifdef CONFIG_MEMORY_FAILURE
244/*
245 * Error injection support for memory error handling.
246 */
afcf938e 247static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end)
9893e49d
AK
248{
249 int ret = 0;
250
251 if (!capable(CAP_SYS_ADMIN))
252 return -EPERM;
253 for (; start < end; start += PAGE_SIZE) {
254 struct page *p;
d15f107d 255 int ret = get_user_pages_fast(start, 1, 0, &p);
9893e49d
AK
256 if (ret != 1)
257 return ret;
afcf938e
AK
258 if (bhv == MADV_SOFT_OFFLINE) {
259 printk(KERN_INFO "Soft offlining page %lx at %lx\n",
260 page_to_pfn(p), start);
261 ret = soft_offline_page(p, MF_COUNT_INCREASED);
262 if (ret)
263 break;
264 continue;
265 }
9893e49d
AK
266 printk(KERN_INFO "Injecting memory failure for page %lx at %lx\n",
267 page_to_pfn(p), start);
268 /* Ignore return value for now */
cd42f4a3 269 memory_failure(page_to_pfn(p), 0, MF_COUNT_INCREASED);
9893e49d
AK
270 }
271 return ret;
272}
273#endif
274
165cd402 275static long
276madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
277 unsigned long start, unsigned long end, int behavior)
1da177e4 278{
1da177e4 279 switch (behavior) {
f6b3ec23 280 case MADV_REMOVE:
3866ea90 281 return madvise_remove(vma, prev, start, end);
1da177e4 282 case MADV_WILLNEED:
3866ea90 283 return madvise_willneed(vma, prev, start, end);
1da177e4 284 case MADV_DONTNEED:
3866ea90 285 return madvise_dontneed(vma, prev, start, end);
1da177e4 286 default:
3866ea90 287 return madvise_behavior(vma, prev, start, end, behavior);
1da177e4 288 }
1da177e4
LT
289}
290
75927af8
NP
291static int
292madvise_behavior_valid(int behavior)
293{
294 switch (behavior) {
295 case MADV_DOFORK:
296 case MADV_DONTFORK:
297 case MADV_NORMAL:
298 case MADV_SEQUENTIAL:
299 case MADV_RANDOM:
300 case MADV_REMOVE:
301 case MADV_WILLNEED:
302 case MADV_DONTNEED:
f8af4da3
HD
303#ifdef CONFIG_KSM
304 case MADV_MERGEABLE:
305 case MADV_UNMERGEABLE:
0af4e98b
AA
306#endif
307#ifdef CONFIG_TRANSPARENT_HUGEPAGE
308 case MADV_HUGEPAGE:
a664b2d8 309 case MADV_NOHUGEPAGE:
f8af4da3 310#endif
accb61fe
JB
311 case MADV_DONTDUMP:
312 case MADV_DODUMP:
75927af8
NP
313 return 1;
314
315 default:
316 return 0;
317 }
318}
3866ea90 319
1da177e4
LT
320/*
321 * The madvise(2) system call.
322 *
323 * Applications can use madvise() to advise the kernel how it should
324 * handle paging I/O in this VM area. The idea is to help the kernel
325 * use appropriate read-ahead and caching techniques. The information
326 * provided is advisory only, and can be safely disregarded by the
327 * kernel without affecting the correct operation of the application.
328 *
329 * behavior values:
330 * MADV_NORMAL - the default behavior is to read clusters. This
331 * results in some read-ahead and read-behind.
332 * MADV_RANDOM - the system should read the minimum amount of data
333 * on any access, since it is unlikely that the appli-
334 * cation will need more than what it asks for.
335 * MADV_SEQUENTIAL - pages in the given range will probably be accessed
336 * once, so they can be aggressively read ahead, and
337 * can be freed soon after they are accessed.
338 * MADV_WILLNEED - the application is notifying the system to read
339 * some pages ahead.
340 * MADV_DONTNEED - the application is finished with the given range,
341 * so the kernel can free resources associated with it.
f6b3ec23
BP
342 * MADV_REMOVE - the application wants to free up the given range of
343 * pages and associated backing store.
3866ea90
HD
344 * MADV_DONTFORK - omit this area from child's address space when forking:
345 * typically, to avoid COWing pages pinned by get_user_pages().
346 * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
f8af4da3
HD
347 * MADV_MERGEABLE - the application recommends that KSM try to merge pages in
348 * this area with pages of identical content from other such areas.
349 * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
1da177e4
LT
350 *
351 * return values:
352 * zero - success
353 * -EINVAL - start + len < 0, start is not page-aligned,
354 * "behavior" is not a valid value, or application
355 * is attempting to release locked or shared pages.
356 * -ENOMEM - addresses in the specified range are not currently
357 * mapped, or are outside the AS of the process.
358 * -EIO - an I/O error occurred while paging in data.
359 * -EBADF - map exists, but area maps something that isn't a file.
360 * -EAGAIN - a kernel resource was temporarily unavailable.
361 */
3480b257 362SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
1da177e4 363{
05b74384
PM
364 unsigned long end, tmp;
365 struct vm_area_struct * vma, *prev;
1da177e4
LT
366 int unmapped_error = 0;
367 int error = -EINVAL;
f7977793 368 int write;
1da177e4
LT
369 size_t len;
370
9893e49d 371#ifdef CONFIG_MEMORY_FAILURE
afcf938e
AK
372 if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE)
373 return madvise_hwpoison(behavior, start, start+len_in);
9893e49d 374#endif
75927af8
NP
375 if (!madvise_behavior_valid(behavior))
376 return error;
377
f7977793
JB
378 write = madvise_need_mmap_write(behavior);
379 if (write)
0a27a14a
NP
380 down_write(&current->mm->mmap_sem);
381 else
382 down_read(&current->mm->mmap_sem);
1da177e4
LT
383
384 if (start & ~PAGE_MASK)
385 goto out;
386 len = (len_in + ~PAGE_MASK) & PAGE_MASK;
387
388 /* Check to see whether len was rounded up from small -ve to zero */
389 if (len_in && !len)
390 goto out;
391
392 end = start + len;
393 if (end < start)
394 goto out;
395
396 error = 0;
397 if (end == start)
398 goto out;
399
400 /*
401 * If the interval [start,end) covers some unmapped address
402 * ranges, just ignore them, but return -ENOMEM at the end.
05b74384 403 * - different from the way of handling in mlock etc.
1da177e4 404 */
05b74384 405 vma = find_vma_prev(current->mm, start, &prev);
836d5ffd
HD
406 if (vma && start > vma->vm_start)
407 prev = vma;
408
1da177e4
LT
409 for (;;) {
410 /* Still start < end. */
411 error = -ENOMEM;
412 if (!vma)
413 goto out;
414
05b74384 415 /* Here start < (end|vma->vm_end). */
1da177e4
LT
416 if (start < vma->vm_start) {
417 unmapped_error = -ENOMEM;
418 start = vma->vm_start;
05b74384
PM
419 if (start >= end)
420 goto out;
1da177e4
LT
421 }
422
05b74384
PM
423 /* Here vma->vm_start <= start < (end|vma->vm_end) */
424 tmp = vma->vm_end;
425 if (end < tmp)
426 tmp = end;
1da177e4 427
05b74384
PM
428 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
429 error = madvise_vma(vma, &prev, start, tmp, behavior);
1da177e4
LT
430 if (error)
431 goto out;
05b74384 432 start = tmp;
90ed52eb 433 if (prev && start < prev->vm_end)
05b74384
PM
434 start = prev->vm_end;
435 error = unmapped_error;
436 if (start >= end)
437 goto out;
90ed52eb
HD
438 if (prev)
439 vma = prev->vm_next;
440 else /* madvise_remove dropped mmap_sem */
441 vma = find_vma(current->mm, start);
1da177e4 442 }
1da177e4 443out:
f7977793 444 if (write)
0a27a14a
NP
445 up_write(&current->mm->mmap_sem);
446 else
447 up_read(&current->mm->mmap_sem);
448
1da177e4
LT
449 return error;
450}