]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #include <linux/mm.h> |
2 | #include <linux/hugetlb.h> | |
3 | #include <linux/mount.h> | |
4 | #include <linux/seq_file.h> | |
e070ad49 | 5 | #include <linux/highmem.h> |
5096add8 | 6 | #include <linux/ptrace.h> |
6e21c8f1 CL |
7 | #include <linux/pagemap.h> |
8 | #include <linux/mempolicy.h> | |
85863e47 MM |
9 | #include <linux/swap.h> |
10 | #include <linux/swapops.h> | |
e070ad49 | 11 | |
1da177e4 LT |
12 | #include <asm/elf.h> |
13 | #include <asm/uaccess.h> | |
e070ad49 | 14 | #include <asm/tlbflush.h> |
1da177e4 LT |
15 | #include "internal.h" |
16 | ||
df5f8314 | 17 | void task_mem(struct seq_file *m, struct mm_struct *mm) |
1da177e4 LT |
18 | { |
19 | unsigned long data, text, lib; | |
365e9c87 HD |
20 | unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss; |
21 | ||
22 | /* | |
23 | * Note: to minimize their overhead, mm maintains hiwater_vm and | |
24 | * hiwater_rss only when about to *lower* total_vm or rss. Any | |
25 | * collector of these hiwater stats must therefore get total_vm | |
26 | * and rss too, which will usually be the higher. Barriers? not | |
27 | * worth the effort, such snapshots can always be inconsistent. | |
28 | */ | |
29 | hiwater_vm = total_vm = mm->total_vm; | |
30 | if (hiwater_vm < mm->hiwater_vm) | |
31 | hiwater_vm = mm->hiwater_vm; | |
32 | hiwater_rss = total_rss = get_mm_rss(mm); | |
33 | if (hiwater_rss < mm->hiwater_rss) | |
34 | hiwater_rss = mm->hiwater_rss; | |
1da177e4 LT |
35 | |
36 | data = mm->total_vm - mm->shared_vm - mm->stack_vm; | |
37 | text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10; | |
38 | lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text; | |
df5f8314 | 39 | seq_printf(m, |
365e9c87 | 40 | "VmPeak:\t%8lu kB\n" |
1da177e4 LT |
41 | "VmSize:\t%8lu kB\n" |
42 | "VmLck:\t%8lu kB\n" | |
365e9c87 | 43 | "VmHWM:\t%8lu kB\n" |
1da177e4 LT |
44 | "VmRSS:\t%8lu kB\n" |
45 | "VmData:\t%8lu kB\n" | |
46 | "VmStk:\t%8lu kB\n" | |
47 | "VmExe:\t%8lu kB\n" | |
48 | "VmLib:\t%8lu kB\n" | |
49 | "VmPTE:\t%8lu kB\n", | |
365e9c87 HD |
50 | hiwater_vm << (PAGE_SHIFT-10), |
51 | (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10), | |
1da177e4 | 52 | mm->locked_vm << (PAGE_SHIFT-10), |
365e9c87 HD |
53 | hiwater_rss << (PAGE_SHIFT-10), |
54 | total_rss << (PAGE_SHIFT-10), | |
1da177e4 LT |
55 | data << (PAGE_SHIFT-10), |
56 | mm->stack_vm << (PAGE_SHIFT-10), text, lib, | |
57 | (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10); | |
1da177e4 LT |
58 | } |
59 | ||
60 | unsigned long task_vsize(struct mm_struct *mm) | |
61 | { | |
62 | return PAGE_SIZE * mm->total_vm; | |
63 | } | |
64 | ||
65 | int task_statm(struct mm_struct *mm, int *shared, int *text, | |
66 | int *data, int *resident) | |
67 | { | |
4294621f | 68 | *shared = get_mm_counter(mm, file_rss); |
1da177e4 LT |
69 | *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) |
70 | >> PAGE_SHIFT; | |
71 | *data = mm->total_vm - mm->shared_vm; | |
4294621f | 72 | *resident = *shared + get_mm_counter(mm, anon_rss); |
1da177e4 LT |
73 | return mm->total_vm; |
74 | } | |
75 | ||
1da177e4 LT |
76 | static void pad_len_spaces(struct seq_file *m, int len) |
77 | { | |
78 | len = 25 + sizeof(void*) * 6 - len; | |
79 | if (len < 1) | |
80 | len = 1; | |
81 | seq_printf(m, "%*c", len, ' '); | |
82 | } | |
83 | ||
a6198797 MM |
84 | static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma) |
85 | { | |
86 | if (vma && vma != priv->tail_vma) { | |
87 | struct mm_struct *mm = vma->vm_mm; | |
88 | up_read(&mm->mmap_sem); | |
89 | mmput(mm); | |
90 | } | |
91 | } | |
ec4dd3eb | 92 | |
a6198797 | 93 | static void *m_start(struct seq_file *m, loff_t *pos) |
e070ad49 | 94 | { |
a6198797 MM |
95 | struct proc_maps_private *priv = m->private; |
96 | unsigned long last_addr = m->version; | |
97 | struct mm_struct *mm; | |
98 | struct vm_area_struct *vma, *tail_vma = NULL; | |
99 | loff_t l = *pos; | |
100 | ||
101 | /* Clear the per syscall fields in priv */ | |
102 | priv->task = NULL; | |
103 | priv->tail_vma = NULL; | |
104 | ||
105 | /* | |
106 | * We remember last_addr rather than next_addr to hit with | |
107 | * mmap_cache most of the time. We have zero last_addr at | |
108 | * the beginning and also after lseek. We will have -1 last_addr | |
109 | * after the end of the vmas. | |
110 | */ | |
111 | ||
112 | if (last_addr == -1UL) | |
113 | return NULL; | |
114 | ||
115 | priv->task = get_pid_task(priv->pid, PIDTYPE_PID); | |
116 | if (!priv->task) | |
117 | return NULL; | |
118 | ||
119 | mm = mm_for_maps(priv->task); | |
120 | if (!mm) | |
121 | return NULL; | |
122 | ||
123 | tail_vma = get_gate_vma(priv->task); | |
124 | priv->tail_vma = tail_vma; | |
125 | ||
126 | /* Start with last addr hint */ | |
127 | vma = find_vma(mm, last_addr); | |
128 | if (last_addr && vma) { | |
129 | vma = vma->vm_next; | |
130 | goto out; | |
131 | } | |
132 | ||
133 | /* | |
134 | * Check the vma index is within the range and do | |
135 | * sequential scan until m_index. | |
136 | */ | |
137 | vma = NULL; | |
138 | if ((unsigned long)l < mm->map_count) { | |
139 | vma = mm->mmap; | |
140 | while (l-- && vma) | |
141 | vma = vma->vm_next; | |
142 | goto out; | |
143 | } | |
144 | ||
145 | if (l != mm->map_count) | |
146 | tail_vma = NULL; /* After gate vma */ | |
147 | ||
148 | out: | |
149 | if (vma) | |
150 | return vma; | |
151 | ||
152 | /* End of vmas has been reached */ | |
153 | m->version = (tail_vma != NULL)? 0: -1UL; | |
154 | up_read(&mm->mmap_sem); | |
155 | mmput(mm); | |
156 | return tail_vma; | |
157 | } | |
158 | ||
159 | static void *m_next(struct seq_file *m, void *v, loff_t *pos) | |
160 | { | |
161 | struct proc_maps_private *priv = m->private; | |
162 | struct vm_area_struct *vma = v; | |
163 | struct vm_area_struct *tail_vma = priv->tail_vma; | |
164 | ||
165 | (*pos)++; | |
166 | if (vma && (vma != tail_vma) && vma->vm_next) | |
167 | return vma->vm_next; | |
168 | vma_stop(priv, vma); | |
169 | return (vma != tail_vma)? tail_vma: NULL; | |
170 | } | |
171 | ||
172 | static void m_stop(struct seq_file *m, void *v) | |
173 | { | |
174 | struct proc_maps_private *priv = m->private; | |
175 | struct vm_area_struct *vma = v; | |
176 | ||
177 | vma_stop(priv, vma); | |
178 | if (priv->task) | |
179 | put_task_struct(priv->task); | |
180 | } | |
181 | ||
182 | static int do_maps_open(struct inode *inode, struct file *file, | |
03a44825 | 183 | const struct seq_operations *ops) |
a6198797 MM |
184 | { |
185 | struct proc_maps_private *priv; | |
186 | int ret = -ENOMEM; | |
187 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | |
188 | if (priv) { | |
189 | priv->pid = proc_pid(inode); | |
190 | ret = seq_open(file, ops); | |
191 | if (!ret) { | |
192 | struct seq_file *m = file->private_data; | |
193 | m->private = priv; | |
194 | } else { | |
195 | kfree(priv); | |
196 | } | |
197 | } | |
198 | return ret; | |
199 | } | |
e070ad49 | 200 | |
4752c369 | 201 | static int show_map(struct seq_file *m, void *v) |
1da177e4 | 202 | { |
99f89551 EB |
203 | struct proc_maps_private *priv = m->private; |
204 | struct task_struct *task = priv->task; | |
e070ad49 ML |
205 | struct vm_area_struct *vma = v; |
206 | struct mm_struct *mm = vma->vm_mm; | |
207 | struct file *file = vma->vm_file; | |
208 | int flags = vma->vm_flags; | |
1da177e4 LT |
209 | unsigned long ino = 0; |
210 | dev_t dev = 0; | |
211 | int len; | |
212 | ||
006ebb40 | 213 | if (maps_protect && !ptrace_may_access(task, PTRACE_MODE_READ)) |
5096add8 KC |
214 | return -EACCES; |
215 | ||
1da177e4 | 216 | if (file) { |
2fddfeef | 217 | struct inode *inode = vma->vm_file->f_path.dentry->d_inode; |
1da177e4 LT |
218 | dev = inode->i_sb->s_dev; |
219 | ino = inode->i_ino; | |
220 | } | |
221 | ||
222 | seq_printf(m, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n", | |
e070ad49 ML |
223 | vma->vm_start, |
224 | vma->vm_end, | |
1da177e4 LT |
225 | flags & VM_READ ? 'r' : '-', |
226 | flags & VM_WRITE ? 'w' : '-', | |
227 | flags & VM_EXEC ? 'x' : '-', | |
228 | flags & VM_MAYSHARE ? 's' : 'p', | |
e070ad49 | 229 | vma->vm_pgoff << PAGE_SHIFT, |
1da177e4 LT |
230 | MAJOR(dev), MINOR(dev), ino, &len); |
231 | ||
232 | /* | |
233 | * Print the dentry name for named mappings, and a | |
234 | * special [heap] marker for the heap: | |
235 | */ | |
e070ad49 | 236 | if (file) { |
1da177e4 | 237 | pad_len_spaces(m, len); |
c32c2f63 | 238 | seq_path(m, &file->f_path, "\n"); |
1da177e4 | 239 | } else { |
e6e5494c IM |
240 | const char *name = arch_vma_name(vma); |
241 | if (!name) { | |
242 | if (mm) { | |
243 | if (vma->vm_start <= mm->start_brk && | |
e070ad49 | 244 | vma->vm_end >= mm->brk) { |
e6e5494c IM |
245 | name = "[heap]"; |
246 | } else if (vma->vm_start <= mm->start_stack && | |
247 | vma->vm_end >= mm->start_stack) { | |
248 | name = "[stack]"; | |
1da177e4 | 249 | } |
e6e5494c IM |
250 | } else { |
251 | name = "[vdso]"; | |
1da177e4 | 252 | } |
e6e5494c IM |
253 | } |
254 | if (name) { | |
1da177e4 | 255 | pad_len_spaces(m, len); |
e6e5494c | 256 | seq_puts(m, name); |
1da177e4 LT |
257 | } |
258 | } | |
259 | seq_putc(m, '\n'); | |
e070ad49 | 260 | |
e070ad49 ML |
261 | if (m->count < m->size) /* vma is copied successfully */ |
262 | m->version = (vma != get_gate_vma(task))? vma->vm_start: 0; | |
1da177e4 LT |
263 | return 0; |
264 | } | |
265 | ||
03a44825 | 266 | static const struct seq_operations proc_pid_maps_op = { |
a6198797 MM |
267 | .start = m_start, |
268 | .next = m_next, | |
269 | .stop = m_stop, | |
270 | .show = show_map | |
271 | }; | |
272 | ||
273 | static int maps_open(struct inode *inode, struct file *file) | |
274 | { | |
275 | return do_maps_open(inode, file, &proc_pid_maps_op); | |
276 | } | |
277 | ||
278 | const struct file_operations proc_maps_operations = { | |
279 | .open = maps_open, | |
280 | .read = seq_read, | |
281 | .llseek = seq_lseek, | |
282 | .release = seq_release_private, | |
283 | }; | |
284 | ||
285 | /* | |
286 | * Proportional Set Size(PSS): my share of RSS. | |
287 | * | |
288 | * PSS of a process is the count of pages it has in memory, where each | |
289 | * page is divided by the number of processes sharing it. So if a | |
290 | * process has 1000 pages all to itself, and 1000 shared with one other | |
291 | * process, its PSS will be 1500. | |
292 | * | |
293 | * To keep (accumulated) division errors low, we adopt a 64bit | |
294 | * fixed-point pss counter to minimize division errors. So (pss >> | |
295 | * PSS_SHIFT) would be the real byte count. | |
296 | * | |
297 | * A shift of 12 before division means (assuming 4K page size): | |
298 | * - 1M 3-user-pages add up to 8KB errors; | |
299 | * - supports mapcount up to 2^24, or 16M; | |
300 | * - supports PSS up to 2^52 bytes, or 4PB. | |
301 | */ | |
302 | #define PSS_SHIFT 12 | |
303 | ||
1e883281 | 304 | #ifdef CONFIG_PROC_PAGE_MONITOR |
214e471f | 305 | struct mem_size_stats { |
a6198797 MM |
306 | struct vm_area_struct *vma; |
307 | unsigned long resident; | |
308 | unsigned long shared_clean; | |
309 | unsigned long shared_dirty; | |
310 | unsigned long private_clean; | |
311 | unsigned long private_dirty; | |
312 | unsigned long referenced; | |
214e471f | 313 | unsigned long swap; |
a6198797 MM |
314 | u64 pss; |
315 | }; | |
316 | ||
b3ae5acb | 317 | static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, |
2165009b | 318 | struct mm_walk *walk) |
e070ad49 | 319 | { |
2165009b | 320 | struct mem_size_stats *mss = walk->private; |
b3ae5acb | 321 | struct vm_area_struct *vma = mss->vma; |
e070ad49 | 322 | pte_t *pte, ptent; |
705e87c0 | 323 | spinlock_t *ptl; |
e070ad49 | 324 | struct page *page; |
ec4dd3eb | 325 | int mapcount; |
e070ad49 | 326 | |
705e87c0 | 327 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); |
826fad1b | 328 | for (; addr != end; pte++, addr += PAGE_SIZE) { |
e070ad49 | 329 | ptent = *pte; |
214e471f PZ |
330 | |
331 | if (is_swap_pte(ptent)) { | |
332 | mss->swap += PAGE_SIZE; | |
333 | continue; | |
334 | } | |
335 | ||
705e87c0 | 336 | if (!pte_present(ptent)) |
e070ad49 ML |
337 | continue; |
338 | ||
339 | mss->resident += PAGE_SIZE; | |
ad820c5d NP |
340 | |
341 | page = vm_normal_page(vma, addr, ptent); | |
342 | if (!page) | |
e070ad49 ML |
343 | continue; |
344 | ||
f79f177c DR |
345 | /* Accumulate the size in pages that have been accessed. */ |
346 | if (pte_young(ptent) || PageReferenced(page)) | |
347 | mss->referenced += PAGE_SIZE; | |
ec4dd3eb FW |
348 | mapcount = page_mapcount(page); |
349 | if (mapcount >= 2) { | |
e070ad49 ML |
350 | if (pte_dirty(ptent)) |
351 | mss->shared_dirty += PAGE_SIZE; | |
352 | else | |
353 | mss->shared_clean += PAGE_SIZE; | |
ec4dd3eb | 354 | mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount; |
e070ad49 ML |
355 | } else { |
356 | if (pte_dirty(ptent)) | |
357 | mss->private_dirty += PAGE_SIZE; | |
358 | else | |
359 | mss->private_clean += PAGE_SIZE; | |
ec4dd3eb | 360 | mss->pss += (PAGE_SIZE << PSS_SHIFT); |
e070ad49 | 361 | } |
826fad1b | 362 | } |
705e87c0 HD |
363 | pte_unmap_unlock(pte - 1, ptl); |
364 | cond_resched(); | |
b3ae5acb | 365 | return 0; |
e070ad49 ML |
366 | } |
367 | ||
e070ad49 ML |
368 | static int show_smap(struct seq_file *m, void *v) |
369 | { | |
370 | struct vm_area_struct *vma = v; | |
e070ad49 | 371 | struct mem_size_stats mss; |
4752c369 | 372 | int ret; |
2165009b DH |
373 | struct mm_walk smaps_walk = { |
374 | .pmd_entry = smaps_pte_range, | |
375 | .mm = vma->vm_mm, | |
376 | .private = &mss, | |
377 | }; | |
e070ad49 ML |
378 | |
379 | memset(&mss, 0, sizeof mss); | |
b3ae5acb | 380 | mss.vma = vma; |
5ddfae16 | 381 | if (vma->vm_mm && !is_vm_hugetlb_page(vma)) |
2165009b | 382 | walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk); |
4752c369 MM |
383 | |
384 | ret = show_map(m, v); | |
385 | if (ret) | |
386 | return ret; | |
387 | ||
388 | seq_printf(m, | |
389 | "Size: %8lu kB\n" | |
390 | "Rss: %8lu kB\n" | |
391 | "Pss: %8lu kB\n" | |
392 | "Shared_Clean: %8lu kB\n" | |
393 | "Shared_Dirty: %8lu kB\n" | |
394 | "Private_Clean: %8lu kB\n" | |
395 | "Private_Dirty: %8lu kB\n" | |
214e471f PZ |
396 | "Referenced: %8lu kB\n" |
397 | "Swap: %8lu kB\n", | |
4752c369 MM |
398 | (vma->vm_end - vma->vm_start) >> 10, |
399 | mss.resident >> 10, | |
400 | (unsigned long)(mss.pss >> (10 + PSS_SHIFT)), | |
401 | mss.shared_clean >> 10, | |
402 | mss.shared_dirty >> 10, | |
403 | mss.private_clean >> 10, | |
404 | mss.private_dirty >> 10, | |
214e471f PZ |
405 | mss.referenced >> 10, |
406 | mss.swap >> 10); | |
4752c369 MM |
407 | |
408 | return ret; | |
e070ad49 ML |
409 | } |
410 | ||
03a44825 | 411 | static const struct seq_operations proc_pid_smaps_op = { |
a6198797 MM |
412 | .start = m_start, |
413 | .next = m_next, | |
414 | .stop = m_stop, | |
415 | .show = show_smap | |
416 | }; | |
417 | ||
418 | static int smaps_open(struct inode *inode, struct file *file) | |
419 | { | |
420 | return do_maps_open(inode, file, &proc_pid_smaps_op); | |
421 | } | |
422 | ||
423 | const struct file_operations proc_smaps_operations = { | |
424 | .open = smaps_open, | |
425 | .read = seq_read, | |
426 | .llseek = seq_lseek, | |
427 | .release = seq_release_private, | |
428 | }; | |
429 | ||
430 | static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, | |
2165009b | 431 | unsigned long end, struct mm_walk *walk) |
a6198797 | 432 | { |
2165009b | 433 | struct vm_area_struct *vma = walk->private; |
a6198797 MM |
434 | pte_t *pte, ptent; |
435 | spinlock_t *ptl; | |
436 | struct page *page; | |
437 | ||
438 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); | |
439 | for (; addr != end; pte++, addr += PAGE_SIZE) { | |
440 | ptent = *pte; | |
441 | if (!pte_present(ptent)) | |
442 | continue; | |
443 | ||
444 | page = vm_normal_page(vma, addr, ptent); | |
445 | if (!page) | |
446 | continue; | |
447 | ||
448 | /* Clear accessed and referenced bits. */ | |
449 | ptep_test_and_clear_young(vma, addr, pte); | |
450 | ClearPageReferenced(page); | |
451 | } | |
452 | pte_unmap_unlock(pte - 1, ptl); | |
453 | cond_resched(); | |
454 | return 0; | |
455 | } | |
456 | ||
f248dcb3 MM |
457 | static ssize_t clear_refs_write(struct file *file, const char __user *buf, |
458 | size_t count, loff_t *ppos) | |
b813e931 | 459 | { |
f248dcb3 MM |
460 | struct task_struct *task; |
461 | char buffer[PROC_NUMBUF], *end; | |
462 | struct mm_struct *mm; | |
b813e931 DR |
463 | struct vm_area_struct *vma; |
464 | ||
f248dcb3 MM |
465 | memset(buffer, 0, sizeof(buffer)); |
466 | if (count > sizeof(buffer) - 1) | |
467 | count = sizeof(buffer) - 1; | |
468 | if (copy_from_user(buffer, buf, count)) | |
469 | return -EFAULT; | |
470 | if (!simple_strtol(buffer, &end, 0)) | |
471 | return -EINVAL; | |
472 | if (*end == '\n') | |
473 | end++; | |
474 | task = get_proc_task(file->f_path.dentry->d_inode); | |
475 | if (!task) | |
476 | return -ESRCH; | |
477 | mm = get_task_mm(task); | |
478 | if (mm) { | |
20cbc972 AM |
479 | struct mm_walk clear_refs_walk = { |
480 | .pmd_entry = clear_refs_pte_range, | |
481 | .mm = mm, | |
482 | }; | |
f248dcb3 | 483 | down_read(&mm->mmap_sem); |
2165009b DH |
484 | for (vma = mm->mmap; vma; vma = vma->vm_next) { |
485 | clear_refs_walk.private = vma; | |
f248dcb3 | 486 | if (!is_vm_hugetlb_page(vma)) |
2165009b DH |
487 | walk_page_range(vma->vm_start, vma->vm_end, |
488 | &clear_refs_walk); | |
489 | } | |
f248dcb3 MM |
490 | flush_tlb_mm(mm); |
491 | up_read(&mm->mmap_sem); | |
492 | mmput(mm); | |
493 | } | |
494 | put_task_struct(task); | |
495 | if (end - buffer == 0) | |
496 | return -EIO; | |
497 | return end - buffer; | |
b813e931 DR |
498 | } |
499 | ||
f248dcb3 MM |
500 | const struct file_operations proc_clear_refs_operations = { |
501 | .write = clear_refs_write, | |
502 | }; | |
503 | ||
85863e47 | 504 | struct pagemapread { |
aae8679b | 505 | u64 __user *out, *end; |
85863e47 MM |
506 | }; |
507 | ||
f16278c6 HR |
508 | #define PM_ENTRY_BYTES sizeof(u64) |
509 | #define PM_STATUS_BITS 3 | |
510 | #define PM_STATUS_OFFSET (64 - PM_STATUS_BITS) | |
511 | #define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET) | |
512 | #define PM_STATUS(nr) (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK) | |
513 | #define PM_PSHIFT_BITS 6 | |
514 | #define PM_PSHIFT_OFFSET (PM_STATUS_OFFSET - PM_PSHIFT_BITS) | |
515 | #define PM_PSHIFT_MASK (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET) | |
516 | #define PM_PSHIFT(x) (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK) | |
517 | #define PM_PFRAME_MASK ((1LL << PM_PSHIFT_OFFSET) - 1) | |
518 | #define PM_PFRAME(x) ((x) & PM_PFRAME_MASK) | |
519 | ||
520 | #define PM_PRESENT PM_STATUS(4LL) | |
521 | #define PM_SWAP PM_STATUS(2LL) | |
522 | #define PM_NOT_PRESENT PM_PSHIFT(PAGE_SHIFT) | |
85863e47 MM |
523 | #define PM_END_OF_BUFFER 1 |
524 | ||
525 | static int add_to_pagemap(unsigned long addr, u64 pfn, | |
526 | struct pagemapread *pm) | |
527 | { | |
85863e47 MM |
528 | if (put_user(pfn, pm->out)) |
529 | return -EFAULT; | |
aae8679b TT |
530 | pm->out++; |
531 | if (pm->out >= pm->end) | |
532 | return PM_END_OF_BUFFER; | |
85863e47 MM |
533 | return 0; |
534 | } | |
535 | ||
536 | static int pagemap_pte_hole(unsigned long start, unsigned long end, | |
2165009b | 537 | struct mm_walk *walk) |
85863e47 | 538 | { |
2165009b | 539 | struct pagemapread *pm = walk->private; |
85863e47 MM |
540 | unsigned long addr; |
541 | int err = 0; | |
542 | for (addr = start; addr < end; addr += PAGE_SIZE) { | |
543 | err = add_to_pagemap(addr, PM_NOT_PRESENT, pm); | |
544 | if (err) | |
545 | break; | |
546 | } | |
547 | return err; | |
548 | } | |
549 | ||
9d02dbc8 | 550 | static u64 swap_pte_to_pagemap_entry(pte_t pte) |
85863e47 MM |
551 | { |
552 | swp_entry_t e = pte_to_swp_entry(pte); | |
f16278c6 | 553 | return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT); |
85863e47 MM |
554 | } |
555 | ||
bcf8039e DH |
556 | static unsigned long pte_to_pagemap_entry(pte_t pte) |
557 | { | |
558 | unsigned long pme = 0; | |
559 | if (is_swap_pte(pte)) | |
560 | pme = PM_PFRAME(swap_pte_to_pagemap_entry(pte)) | |
561 | | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP; | |
562 | else if (pte_present(pte)) | |
563 | pme = PM_PFRAME(pte_pfn(pte)) | |
564 | | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT; | |
565 | return pme; | |
566 | } | |
567 | ||
85863e47 | 568 | static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, |
2165009b | 569 | struct mm_walk *walk) |
85863e47 | 570 | { |
bcf8039e | 571 | struct vm_area_struct *vma; |
2165009b | 572 | struct pagemapread *pm = walk->private; |
85863e47 MM |
573 | pte_t *pte; |
574 | int err = 0; | |
575 | ||
bcf8039e DH |
576 | /* find the first VMA at or above 'addr' */ |
577 | vma = find_vma(walk->mm, addr); | |
85863e47 MM |
578 | for (; addr != end; addr += PAGE_SIZE) { |
579 | u64 pfn = PM_NOT_PRESENT; | |
bcf8039e DH |
580 | |
581 | /* check to see if we've left 'vma' behind | |
582 | * and need a new, higher one */ | |
583 | if (vma && (addr >= vma->vm_end)) | |
584 | vma = find_vma(walk->mm, addr); | |
585 | ||
586 | /* check that 'vma' actually covers this address, | |
587 | * and that it isn't a huge page vma */ | |
588 | if (vma && (vma->vm_start <= addr) && | |
589 | !is_vm_hugetlb_page(vma)) { | |
590 | pte = pte_offset_map(pmd, addr); | |
591 | pfn = pte_to_pagemap_entry(*pte); | |
592 | /* unmap before userspace copy */ | |
593 | pte_unmap(pte); | |
594 | } | |
85863e47 MM |
595 | err = add_to_pagemap(addr, pfn, pm); |
596 | if (err) | |
597 | return err; | |
598 | } | |
599 | ||
600 | cond_resched(); | |
601 | ||
602 | return err; | |
603 | } | |
604 | ||
85863e47 MM |
605 | /* |
606 | * /proc/pid/pagemap - an array mapping virtual pages to pfns | |
607 | * | |
f16278c6 HR |
608 | * For each page in the address space, this file contains one 64-bit entry |
609 | * consisting of the following: | |
610 | * | |
611 | * Bits 0-55 page frame number (PFN) if present | |
612 | * Bits 0-4 swap type if swapped | |
613 | * Bits 5-55 swap offset if swapped | |
614 | * Bits 55-60 page shift (page size = 1<<page shift) | |
615 | * Bit 61 reserved for future use | |
616 | * Bit 62 page swapped | |
617 | * Bit 63 page present | |
618 | * | |
619 | * If the page is not present but in swap, then the PFN contains an | |
620 | * encoding of the swap file number and the page's offset into the | |
621 | * swap. Unmapped pages return a null PFN. This allows determining | |
85863e47 MM |
622 | * precisely which pages are mapped (or in swap) and comparing mapped |
623 | * pages between processes. | |
624 | * | |
625 | * Efficient users of this interface will use /proc/pid/maps to | |
626 | * determine which areas of memory are actually mapped and llseek to | |
627 | * skip over unmapped regions. | |
628 | */ | |
629 | static ssize_t pagemap_read(struct file *file, char __user *buf, | |
630 | size_t count, loff_t *ppos) | |
631 | { | |
632 | struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode); | |
633 | struct page **pages, *page; | |
634 | unsigned long uaddr, uend; | |
635 | struct mm_struct *mm; | |
636 | struct pagemapread pm; | |
637 | int pagecount; | |
638 | int ret = -ESRCH; | |
5d7e0d2b AM |
639 | struct mm_walk pagemap_walk; |
640 | unsigned long src; | |
641 | unsigned long svpfn; | |
642 | unsigned long start_vaddr; | |
643 | unsigned long end_vaddr; | |
85863e47 MM |
644 | |
645 | if (!task) | |
646 | goto out; | |
647 | ||
648 | ret = -EACCES; | |
006ebb40 | 649 | if (!ptrace_may_access(task, PTRACE_MODE_READ)) |
fb39380b | 650 | goto out_task; |
85863e47 MM |
651 | |
652 | ret = -EINVAL; | |
653 | /* file position must be aligned */ | |
aae8679b | 654 | if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES)) |
fb39380b | 655 | goto out_task; |
85863e47 MM |
656 | |
657 | ret = 0; | |
658 | mm = get_task_mm(task); | |
659 | if (!mm) | |
fb39380b | 660 | goto out_task; |
85863e47 | 661 | |
5d7e0d2b | 662 | |
85863e47 MM |
663 | uaddr = (unsigned long)buf & PAGE_MASK; |
664 | uend = (unsigned long)(buf + count); | |
665 | pagecount = (PAGE_ALIGN(uend) - uaddr) / PAGE_SIZE; | |
5d7e0d2b AM |
666 | ret = 0; |
667 | if (pagecount == 0) | |
668 | goto out_mm; | |
669 | pages = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL); | |
670 | ret = -ENOMEM; | |
85863e47 | 671 | if (!pages) |
fb39380b | 672 | goto out_mm; |
85863e47 MM |
673 | |
674 | down_read(¤t->mm->mmap_sem); | |
675 | ret = get_user_pages(current, current->mm, uaddr, pagecount, | |
676 | 1, 0, pages, NULL); | |
677 | up_read(¤t->mm->mmap_sem); | |
678 | ||
679 | if (ret < 0) | |
680 | goto out_free; | |
681 | ||
fb39380b MT |
682 | if (ret != pagecount) { |
683 | pagecount = ret; | |
684 | ret = -EFAULT; | |
685 | goto out_pages; | |
686 | } | |
687 | ||
aae8679b TT |
688 | pm.out = (u64 *)buf; |
689 | pm.end = (u64 *)(buf + count); | |
85863e47 | 690 | |
5d7e0d2b AM |
691 | pagemap_walk.pmd_entry = pagemap_pte_range; |
692 | pagemap_walk.pte_hole = pagemap_pte_hole; | |
693 | pagemap_walk.mm = mm; | |
694 | pagemap_walk.private = ± | |
695 | ||
696 | src = *ppos; | |
697 | svpfn = src / PM_ENTRY_BYTES; | |
698 | start_vaddr = svpfn << PAGE_SHIFT; | |
699 | end_vaddr = TASK_SIZE_OF(task); | |
700 | ||
701 | /* watch out for wraparound */ | |
702 | if (svpfn > TASK_SIZE_OF(task) >> PAGE_SHIFT) | |
703 | start_vaddr = end_vaddr; | |
704 | ||
705 | /* | |
706 | * The odds are that this will stop walking way | |
707 | * before end_vaddr, because the length of the | |
708 | * user buffer is tracked in "pm", and the walk | |
709 | * will stop when we hit the end of the buffer. | |
710 | */ | |
711 | ret = walk_page_range(start_vaddr, end_vaddr, &pagemap_walk); | |
712 | if (ret == PM_END_OF_BUFFER) | |
713 | ret = 0; | |
714 | /* don't need mmap_sem for these, but this looks cleaner */ | |
715 | *ppos += (char *)pm.out - buf; | |
716 | if (!ret) | |
717 | ret = (char *)pm.out - buf; | |
85863e47 | 718 | |
fb39380b | 719 | out_pages: |
85863e47 MM |
720 | for (; pagecount; pagecount--) { |
721 | page = pages[pagecount-1]; | |
722 | if (!PageReserved(page)) | |
723 | SetPageDirty(page); | |
724 | page_cache_release(page); | |
725 | } | |
85863e47 MM |
726 | out_free: |
727 | kfree(pages); | |
fb39380b MT |
728 | out_mm: |
729 | mmput(mm); | |
85863e47 MM |
730 | out_task: |
731 | put_task_struct(task); | |
732 | out: | |
733 | return ret; | |
734 | } | |
735 | ||
736 | const struct file_operations proc_pagemap_operations = { | |
737 | .llseek = mem_lseek, /* borrow this */ | |
738 | .read = pagemap_read, | |
739 | }; | |
1e883281 | 740 | #endif /* CONFIG_PROC_PAGE_MONITOR */ |
85863e47 | 741 | |
6e21c8f1 | 742 | #ifdef CONFIG_NUMA |
1a75a6c8 | 743 | extern int show_numa_map(struct seq_file *m, void *v); |
6e21c8f1 | 744 | |
5096add8 KC |
745 | static int show_numa_map_checked(struct seq_file *m, void *v) |
746 | { | |
747 | struct proc_maps_private *priv = m->private; | |
748 | struct task_struct *task = priv->task; | |
749 | ||
006ebb40 | 750 | if (maps_protect && !ptrace_may_access(task, PTRACE_MODE_READ)) |
5096add8 KC |
751 | return -EACCES; |
752 | ||
753 | return show_numa_map(m, v); | |
754 | } | |
755 | ||
03a44825 | 756 | static const struct seq_operations proc_pid_numa_maps_op = { |
1a75a6c8 CL |
757 | .start = m_start, |
758 | .next = m_next, | |
759 | .stop = m_stop, | |
5096add8 | 760 | .show = show_numa_map_checked |
6e21c8f1 | 761 | }; |
662795de EB |
762 | |
763 | static int numa_maps_open(struct inode *inode, struct file *file) | |
764 | { | |
765 | return do_maps_open(inode, file, &proc_pid_numa_maps_op); | |
766 | } | |
767 | ||
00977a59 | 768 | const struct file_operations proc_numa_maps_operations = { |
662795de EB |
769 | .open = numa_maps_open, |
770 | .read = seq_read, | |
771 | .llseek = seq_lseek, | |
99f89551 | 772 | .release = seq_release_private, |
662795de | 773 | }; |
6e21c8f1 | 774 | #endif |