]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * linux/mm/mincore.c | |
3 | * | |
4 | * Copyright (C) 1994-2006 Linus Torvalds | |
5 | */ | |
6 | ||
7 | /* | |
8 | * The mincore() system call. | |
9 | */ | |
10 | #include <linux/pagemap.h> | |
11 | #include <linux/gfp.h> | |
12 | #include <linux/mm.h> | |
13 | #include <linux/mman.h> | |
14 | #include <linux/syscalls.h> | |
15 | #include <linux/swap.h> | |
16 | #include <linux/swapops.h> | |
17 | #include <linux/hugetlb.h> | |
18 | ||
19 | #include <asm/uaccess.h> | |
20 | #include <asm/pgtable.h> | |
21 | ||
22 | static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr, | |
23 | unsigned long end, struct mm_walk *walk) | |
24 | { | |
25 | #ifdef CONFIG_HUGETLB_PAGE | |
26 | unsigned char present; | |
27 | unsigned char *vec = walk->private; | |
28 | ||
29 | /* | |
30 | * Hugepages under user process are always in RAM and never | |
31 | * swapped out, but theoretically it needs to be checked. | |
32 | */ | |
33 | present = pte && !huge_pte_none(huge_ptep_get(pte)); | |
34 | for (; addr != end; vec++, addr += PAGE_SIZE) | |
35 | *vec = present; | |
36 | walk->private = vec; | |
37 | #else | |
38 | BUG(); | |
39 | #endif | |
40 | return 0; | |
41 | } | |
42 | ||
43 | /* | |
44 | * Later we can get more picky about what "in core" means precisely. | |
45 | * For now, simply check to see if the page is in the page cache, | |
46 | * and is up to date; i.e. that no page-in operation would be required | |
47 | * at this time if an application were to map and access this page. | |
48 | */ | |
49 | static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff) | |
50 | { | |
51 | unsigned char present = 0; | |
52 | struct page *page; | |
53 | ||
54 | /* | |
55 | * When tmpfs swaps out a page from a file, any process mapping that | |
56 | * file will not get a swp_entry_t in its pte, but rather it is like | |
57 | * any other file mapping (ie. marked !present and faulted in with | |
58 | * tmpfs's .fault). So swapped out tmpfs mappings are tested here. | |
59 | */ | |
60 | #ifdef CONFIG_SWAP | |
61 | if (shmem_mapping(mapping)) { | |
62 | page = find_get_entry(mapping, pgoff); | |
63 | /* | |
64 | * shmem/tmpfs may return swap: account for swapcache | |
65 | * page too. | |
66 | */ | |
67 | if (radix_tree_exceptional_entry(page)) { | |
68 | swp_entry_t swp = radix_to_swp_entry(page); | |
69 | page = find_get_page(swap_address_space(swp), swp.val); | |
70 | } | |
71 | } else | |
72 | page = find_get_page(mapping, pgoff); | |
73 | #else | |
74 | page = find_get_page(mapping, pgoff); | |
75 | #endif | |
76 | if (page) { | |
77 | present = PageUptodate(page); | |
78 | page_cache_release(page); | |
79 | } | |
80 | ||
81 | return present; | |
82 | } | |
83 | ||
84 | static int __mincore_unmapped_range(unsigned long addr, unsigned long end, | |
85 | struct vm_area_struct *vma, unsigned char *vec) | |
86 | { | |
87 | unsigned long nr = (end - addr) >> PAGE_SHIFT; | |
88 | int i; | |
89 | ||
90 | if (vma->vm_file) { | |
91 | pgoff_t pgoff; | |
92 | ||
93 | pgoff = linear_page_index(vma, addr); | |
94 | for (i = 0; i < nr; i++, pgoff++) | |
95 | vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff); | |
96 | } else { | |
97 | for (i = 0; i < nr; i++) | |
98 | vec[i] = 0; | |
99 | } | |
100 | return nr; | |
101 | } | |
102 | ||
103 | static int mincore_unmapped_range(unsigned long addr, unsigned long end, | |
104 | struct mm_walk *walk) | |
105 | { | |
106 | walk->private += __mincore_unmapped_range(addr, end, | |
107 | walk->vma, walk->private); | |
108 | return 0; | |
109 | } | |
110 | ||
111 | static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, | |
112 | struct mm_walk *walk) | |
113 | { | |
114 | spinlock_t *ptl; | |
115 | struct vm_area_struct *vma = walk->vma; | |
116 | pte_t *ptep; | |
117 | unsigned char *vec = walk->private; | |
118 | int nr = (end - addr) >> PAGE_SHIFT; | |
119 | ||
120 | ptl = pmd_trans_huge_lock(pmd, vma); | |
121 | if (ptl) { | |
122 | memset(vec, 1, nr); | |
123 | spin_unlock(ptl); | |
124 | goto out; | |
125 | } | |
126 | ||
127 | if (pmd_trans_unstable(pmd)) { | |
128 | __mincore_unmapped_range(addr, end, vma, vec); | |
129 | goto out; | |
130 | } | |
131 | ||
132 | ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); | |
133 | for (; addr != end; ptep++, addr += PAGE_SIZE) { | |
134 | pte_t pte = *ptep; | |
135 | ||
136 | if (pte_none(pte)) | |
137 | __mincore_unmapped_range(addr, addr + PAGE_SIZE, | |
138 | vma, vec); | |
139 | else if (pte_present(pte)) | |
140 | *vec = 1; | |
141 | else { /* pte is a swap entry */ | |
142 | swp_entry_t entry = pte_to_swp_entry(pte); | |
143 | ||
144 | if (non_swap_entry(entry)) { | |
145 | /* | |
146 | * migration or hwpoison entries are always | |
147 | * uptodate | |
148 | */ | |
149 | *vec = 1; | |
150 | } else { | |
151 | #ifdef CONFIG_SWAP | |
152 | *vec = mincore_page(swap_address_space(entry), | |
153 | entry.val); | |
154 | #else | |
155 | WARN_ON(1); | |
156 | *vec = 1; | |
157 | #endif | |
158 | } | |
159 | } | |
160 | vec++; | |
161 | } | |
162 | pte_unmap_unlock(ptep - 1, ptl); | |
163 | out: | |
164 | walk->private += nr; | |
165 | cond_resched(); | |
166 | return 0; | |
167 | } | |
168 | ||
169 | /* | |
170 | * Do a chunk of "sys_mincore()". We've already checked | |
171 | * all the arguments, we hold the mmap semaphore: we should | |
172 | * just return the amount of info we're asked for. | |
173 | */ | |
174 | static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec) | |
175 | { | |
176 | struct vm_area_struct *vma; | |
177 | unsigned long end; | |
178 | int err; | |
179 | struct mm_walk mincore_walk = { | |
180 | .pmd_entry = mincore_pte_range, | |
181 | .pte_hole = mincore_unmapped_range, | |
182 | .hugetlb_entry = mincore_hugetlb, | |
183 | .private = vec, | |
184 | }; | |
185 | ||
186 | vma = find_vma(current->mm, addr); | |
187 | if (!vma || addr < vma->vm_start) | |
188 | return -ENOMEM; | |
189 | mincore_walk.mm = vma->vm_mm; | |
190 | end = min(vma->vm_end, addr + (pages << PAGE_SHIFT)); | |
191 | err = walk_page_range(addr, end, &mincore_walk); | |
192 | if (err < 0) | |
193 | return err; | |
194 | return (end - addr) >> PAGE_SHIFT; | |
195 | } | |
196 | ||
197 | /* | |
198 | * The mincore(2) system call. | |
199 | * | |
200 | * mincore() returns the memory residency status of the pages in the | |
201 | * current process's address space specified by [addr, addr + len). | |
202 | * The status is returned in a vector of bytes. The least significant | |
203 | * bit of each byte is 1 if the referenced page is in memory, otherwise | |
204 | * it is zero. | |
205 | * | |
206 | * Because the status of a page can change after mincore() checks it | |
207 | * but before it returns to the application, the returned vector may | |
208 | * contain stale information. Only locked pages are guaranteed to | |
209 | * remain in memory. | |
210 | * | |
211 | * return values: | |
212 | * zero - success | |
213 | * -EFAULT - vec points to an illegal address | |
214 | * -EINVAL - addr is not a multiple of PAGE_CACHE_SIZE | |
215 | * -ENOMEM - Addresses in the range [addr, addr + len] are | |
216 | * invalid for the address space of this process, or | |
217 | * specify one or more pages which are not currently | |
218 | * mapped | |
219 | * -EAGAIN - A kernel resource was temporarily unavailable. | |
220 | */ | |
221 | SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len, | |
222 | unsigned char __user *, vec) | |
223 | { | |
224 | long retval; | |
225 | unsigned long pages; | |
226 | unsigned char *tmp; | |
227 | ||
228 | /* Check the start address: needs to be page-aligned.. */ | |
229 | if (start & ~PAGE_CACHE_MASK) | |
230 | return -EINVAL; | |
231 | ||
232 | /* ..and we need to be passed a valid user-space range */ | |
233 | if (!access_ok(VERIFY_READ, (void __user *) start, len)) | |
234 | return -ENOMEM; | |
235 | ||
236 | /* This also avoids any overflows on PAGE_CACHE_ALIGN */ | |
237 | pages = len >> PAGE_SHIFT; | |
238 | pages += (offset_in_page(len)) != 0; | |
239 | ||
240 | if (!access_ok(VERIFY_WRITE, vec, pages)) | |
241 | return -EFAULT; | |
242 | ||
243 | tmp = (void *) __get_free_page(GFP_USER); | |
244 | if (!tmp) | |
245 | return -EAGAIN; | |
246 | ||
247 | retval = 0; | |
248 | while (pages) { | |
249 | /* | |
250 | * Do at most PAGE_SIZE entries per iteration, due to | |
251 | * the temporary buffer size. | |
252 | */ | |
253 | down_read(¤t->mm->mmap_sem); | |
254 | retval = do_mincore(start, min(pages, PAGE_SIZE), tmp); | |
255 | up_read(¤t->mm->mmap_sem); | |
256 | ||
257 | if (retval <= 0) | |
258 | break; | |
259 | if (copy_to_user(vec, tmp, retval)) { | |
260 | retval = -EFAULT; | |
261 | break; | |
262 | } | |
263 | pages -= retval; | |
264 | vec += retval; | |
265 | start += retval << PAGE_SHIFT; | |
266 | retval = 0; | |
267 | } | |
268 | free_page((unsigned long) tmp); | |
269 | return retval; | |
270 | } |