]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - mm/mincore.c
mm: /proc/pid/clear_refs: avoid split_huge_page()
[mirror_ubuntu-bionic-kernel.git] / mm / mincore.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/mincore.c
3 *
2f77d107 4 * Copyright (C) 1994-2006 Linus Torvalds
1da177e4
LT
5 */
6
7/*
8 * The mincore() system call.
9 */
1da177e4 10#include <linux/pagemap.h>
5a0e3ad6 11#include <linux/gfp.h>
1da177e4
LT
12#include <linux/mm.h>
13#include <linux/mman.h>
14#include <linux/syscalls.h>
42da9cbd
NP
15#include <linux/swap.h>
16#include <linux/swapops.h>
4f16fc10 17#include <linux/hugetlb.h>
1da177e4
LT
18
19#include <asm/uaccess.h>
20#include <asm/pgtable.h>
21
f4884010 22static void mincore_hugetlb_page_range(struct vm_area_struct *vma,
25ef0e50 23 unsigned long addr, unsigned long end,
f4884010
JW
24 unsigned char *vec)
25{
26#ifdef CONFIG_HUGETLB_PAGE
27 struct hstate *h;
f4884010 28
f4884010
JW
29 h = hstate_vma(vma);
30 while (1) {
31 unsigned char present;
32 pte_t *ptep;
33 /*
34 * Huge pages are always in RAM for now, but
35 * theoretically it needs to be checked.
36 */
37 ptep = huge_pte_offset(current->mm,
38 addr & huge_page_mask(h));
39 present = ptep && !huge_pte_none(huge_ptep_get(ptep));
40 while (1) {
25ef0e50
JW
41 *vec = present;
42 vec++;
f4884010 43 addr += PAGE_SIZE;
25ef0e50 44 if (addr == end)
f4884010
JW
45 return;
46 /* check hugepage border */
47 if (!(addr & ~huge_page_mask(h)))
48 break;
49 }
50 }
51#else
52 BUG();
53#endif
54}
55
1da177e4
LT
56/*
57 * Later we can get more picky about what "in core" means precisely.
58 * For now, simply check to see if the page is in the page cache,
59 * and is up to date; i.e. that no page-in operation would be required
60 * at this time if an application were to map and access this page.
61 */
42da9cbd 62static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
1da177e4
LT
63{
64 unsigned char present = 0;
42da9cbd 65 struct page *page;
1da177e4 66
42da9cbd
NP
67 /*
68 * When tmpfs swaps out a page from a file, any process mapping that
69 * file will not get a swp_entry_t in its pte, but rather it is like
70 * any other file mapping (ie. marked !present and faulted in with
3c18ddd1 71 * tmpfs's .fault). So swapped out tmpfs mappings are tested here.
42da9cbd 72 */
31475dd6 73#ifdef CONFIG_SWAP
0cd6144a
JW
74 if (shmem_mapping(mapping)) {
75 page = find_get_entry(mapping, pgoff);
76 /*
77 * shmem/tmpfs may return swap: account for swapcache
78 * page too.
79 */
80 if (radix_tree_exceptional_entry(page)) {
81 swp_entry_t swp = radix_to_swp_entry(page);
82 page = find_get_page(swap_address_space(swp), swp.val);
83 }
84 } else
85 page = find_get_page(mapping, pgoff);
86#else
87 page = find_get_page(mapping, pgoff);
31475dd6 88#endif
1da177e4
LT
89 if (page) {
90 present = PageUptodate(page);
91 page_cache_release(page);
92 }
93
94 return present;
95}
96
f4884010 97static void mincore_unmapped_range(struct vm_area_struct *vma,
25ef0e50 98 unsigned long addr, unsigned long end,
f4884010
JW
99 unsigned char *vec)
100{
25ef0e50 101 unsigned long nr = (end - addr) >> PAGE_SHIFT;
f4884010
JW
102 int i;
103
104 if (vma->vm_file) {
105 pgoff_t pgoff;
106
107 pgoff = linear_page_index(vma, addr);
108 for (i = 0; i < nr; i++, pgoff++)
109 vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
110 } else {
111 for (i = 0; i < nr; i++)
112 vec[i] = 0;
113 }
114}
115
116static void mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
25ef0e50 117 unsigned long addr, unsigned long end,
f4884010
JW
118 unsigned char *vec)
119{
25ef0e50 120 unsigned long next;
f4884010
JW
121 spinlock_t *ptl;
122 pte_t *ptep;
f4884010
JW
123
124 ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
25ef0e50 125 do {
f4884010 126 pte_t pte = *ptep;
f4884010 127
25ef0e50 128 next = addr + PAGE_SIZE;
f4884010 129 if (pte_none(pte))
25ef0e50 130 mincore_unmapped_range(vma, addr, next, vec);
f4884010 131 else if (pte_present(pte))
25ef0e50 132 *vec = 1;
0661a336 133 else { /* pte is a swap entry */
f4884010
JW
134 swp_entry_t entry = pte_to_swp_entry(pte);
135
c313dc5d
WY
136 if (non_swap_entry(entry)) {
137 /*
138 * migration or hwpoison entries are always
139 * uptodate
140 */
25ef0e50 141 *vec = 1;
f4884010
JW
142 } else {
143#ifdef CONFIG_SWAP
33806f06 144 *vec = mincore_page(swap_address_space(entry),
0661a336 145 entry.val);
f4884010
JW
146#else
147 WARN_ON(1);
25ef0e50 148 *vec = 1;
f4884010
JW
149#endif
150 }
151 }
25ef0e50
JW
152 vec++;
153 } while (ptep++, addr = next, addr != end);
f4884010
JW
154 pte_unmap_unlock(ptep - 1, ptl);
155}
156
e48293fd
JW
157static void mincore_pmd_range(struct vm_area_struct *vma, pud_t *pud,
158 unsigned long addr, unsigned long end,
159 unsigned char *vec)
160{
161 unsigned long next;
162 pmd_t *pmd;
163
164 pmd = pmd_offset(pud, addr);
165 do {
166 next = pmd_addr_end(addr, end);
0ca1634d
JW
167 if (pmd_trans_huge(*pmd)) {
168 if (mincore_huge_pmd(vma, pmd, addr, next, vec)) {
169 vec += (next - addr) >> PAGE_SHIFT;
170 continue;
171 }
172 /* fall through */
173 }
1a5a9906 174 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
e48293fd
JW
175 mincore_unmapped_range(vma, addr, next, vec);
176 else
177 mincore_pte_range(vma, pmd, addr, next, vec);
178 vec += (next - addr) >> PAGE_SHIFT;
179 } while (pmd++, addr = next, addr != end);
180}
181
182static void mincore_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
183 unsigned long addr, unsigned long end,
184 unsigned char *vec)
185{
186 unsigned long next;
187 pud_t *pud;
188
189 pud = pud_offset(pgd, addr);
190 do {
191 next = pud_addr_end(addr, end);
192 if (pud_none_or_clear_bad(pud))
193 mincore_unmapped_range(vma, addr, next, vec);
194 else
195 mincore_pmd_range(vma, pud, addr, next, vec);
196 vec += (next - addr) >> PAGE_SHIFT;
197 } while (pud++, addr = next, addr != end);
198}
199
200static void mincore_page_range(struct vm_area_struct *vma,
201 unsigned long addr, unsigned long end,
202 unsigned char *vec)
203{
204 unsigned long next;
205 pgd_t *pgd;
206
207 pgd = pgd_offset(vma->vm_mm, addr);
208 do {
209 next = pgd_addr_end(addr, end);
210 if (pgd_none_or_clear_bad(pgd))
211 mincore_unmapped_range(vma, addr, next, vec);
212 else
213 mincore_pud_range(vma, pgd, addr, next, vec);
214 vec += (next - addr) >> PAGE_SHIFT;
215 } while (pgd++, addr = next, addr != end);
216}
217
2f77d107
LT
218/*
219 * Do a chunk of "sys_mincore()". We've already checked
220 * all the arguments, we hold the mmap semaphore: we should
221 * just return the amount of info we're asked for.
222 */
6a60f1b3 223static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec)
1da177e4 224{
6a60f1b3 225 struct vm_area_struct *vma;
25ef0e50 226 unsigned long end;
1da177e4 227
6a60f1b3 228 vma = find_vma(current->mm, addr);
4fb23e43
LT
229 if (!vma || addr < vma->vm_start)
230 return -ENOMEM;
1da177e4 231
25ef0e50 232 end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
6a60f1b3 233
e48293fd
JW
234 if (is_vm_hugetlb_page(vma))
235 mincore_hugetlb_page_range(vma, addr, end, vec);
236 else
237 mincore_page_range(vma, addr, end, vec);
42da9cbd 238
25ef0e50 239 return (end - addr) >> PAGE_SHIFT;
1da177e4
LT
240}
241
242/*
243 * The mincore(2) system call.
244 *
245 * mincore() returns the memory residency status of the pages in the
246 * current process's address space specified by [addr, addr + len).
247 * The status is returned in a vector of bytes. The least significant
248 * bit of each byte is 1 if the referenced page is in memory, otherwise
249 * it is zero.
250 *
251 * Because the status of a page can change after mincore() checks it
252 * but before it returns to the application, the returned vector may
253 * contain stale information. Only locked pages are guaranteed to
254 * remain in memory.
255 *
256 * return values:
257 * zero - success
258 * -EFAULT - vec points to an illegal address
259 * -EINVAL - addr is not a multiple of PAGE_CACHE_SIZE
260 * -ENOMEM - Addresses in the range [addr, addr + len] are
261 * invalid for the address space of this process, or
262 * specify one or more pages which are not currently
263 * mapped
264 * -EAGAIN - A kernel resource was temporarily unavailable.
265 */
3480b257
HC
266SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
267 unsigned char __user *, vec)
1da177e4 268{
2f77d107
LT
269 long retval;
270 unsigned long pages;
271 unsigned char *tmp;
1da177e4 272
2f77d107
LT
273 /* Check the start address: needs to be page-aligned.. */
274 if (start & ~PAGE_CACHE_MASK)
275 return -EINVAL;
1da177e4 276
2f77d107
LT
277 /* ..and we need to be passed a valid user-space range */
278 if (!access_ok(VERIFY_READ, (void __user *) start, len))
279 return -ENOMEM;
1da177e4 280
2f77d107
LT
281 /* This also avoids any overflows on PAGE_CACHE_ALIGN */
282 pages = len >> PAGE_SHIFT;
283 pages += (len & ~PAGE_MASK) != 0;
1da177e4 284
2f77d107
LT
285 if (!access_ok(VERIFY_WRITE, vec, pages))
286 return -EFAULT;
1da177e4 287
2f77d107
LT
288 tmp = (void *) __get_free_page(GFP_USER);
289 if (!tmp)
4fb23e43 290 return -EAGAIN;
2f77d107
LT
291
292 retval = 0;
293 while (pages) {
294 /*
295 * Do at most PAGE_SIZE entries per iteration, due to
296 * the temporary buffer size.
297 */
298 down_read(&current->mm->mmap_sem);
6a60f1b3 299 retval = do_mincore(start, min(pages, PAGE_SIZE), tmp);
2f77d107
LT
300 up_read(&current->mm->mmap_sem);
301
302 if (retval <= 0)
303 break;
304 if (copy_to_user(vec, tmp, retval)) {
305 retval = -EFAULT;
306 break;
1da177e4 307 }
2f77d107
LT
308 pages -= retval;
309 vec += retval;
310 start += retval << PAGE_SHIFT;
311 retval = 0;
1da177e4 312 }
2f77d107
LT
313 free_page((unsigned long) tmp);
314 return retval;
1da177e4 315}