2 * linux/mm/process_vm_access.c
4 * Copyright (C) 2010-2011 Christopher Yeoh <cyeoh@au1.ibm.com>, IBM Corp.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #include <linux/uio.h>
14 #include <linux/sched.h>
15 #include <linux/highmem.h>
16 #include <linux/ptrace.h>
17 #include <linux/slab.h>
18 #include <linux/syscalls.h>
21 #include <linux/compat.h>
25 * process_vm_rw_pages - read/write pages from task specified
26 * @task: task to read/write from
28 * @process_pages: struct pages area that can store at least
29 * nr_pages_to_copy struct page pointers
30 * @pa: address of page in task to start copying from/to
31 * @start_offset: offset in page to start copying from/to
32 * @len: number of bytes to copy
33 * @lvec: iovec array specifying where to copy to/from
34 * @lvec_cnt: number of elements in iovec array
35 * @lvec_current: index in iovec array we are up to
36 * @lvec_offset: offset in bytes from current iovec iov_base we are up to
37 * @vm_write: 0 means copy from, 1 means copy to
38 * @nr_pages_to_copy: number of pages to copy
39 * @bytes_copied: returns number of bytes successfully copied
40 * Returns 0 on success, error code otherwise
42 static int process_vm_rw_pages(struct page
**pages
,
45 struct iov_iter
*iter
,
47 ssize_t
*bytes_copied
)
51 /* Do the copy for each page */
52 while (iov_iter_count(iter
) && len
) {
53 struct page
*page
= *pages
++;
54 size_t copy
= PAGE_SIZE
- offset
;
61 if (copy
> iov_iter_count(iter
))
62 copy
= iov_iter_count(iter
);
63 copied
= iov_iter_copy_from_user(page
, iter
,
65 iov_iter_advance(iter
, copied
);
66 set_page_dirty_lock(page
);
68 copied
= copy_page_to_iter(page
, offset
, copy
, iter
);
70 *bytes_copied
+= copied
;
72 if (copied
< copy
&& iov_iter_count(iter
))
79 /* Maximum number of pages kmalloc'd to hold struct page's during copy */
80 #define PVM_MAX_KMALLOC_PAGES (PAGE_SIZE * 2)
83 * process_vm_rw_single_vec - read/write pages from task specified
84 * @addr: start memory address of target process
85 * @len: size of area to copy to/from
86 * @lvec: iovec array specifying where to copy to/from locally
87 * @lvec_cnt: number of elements in iovec array
88 * @lvec_current: index in iovec array we are up to
89 * @lvec_offset: offset in bytes from current iovec iov_base we are up to
90 * @process_pages: struct pages area that can store at least
91 * nr_pages_to_copy struct page pointers
93 * @task: task to read/write from
94 * @vm_write: 0 means copy from, 1 means copy to
95 * @bytes_copied: returns number of bytes successfully copied
96 * Returns 0 on success or on failure error code
98 static int process_vm_rw_single_vec(unsigned long addr
,
100 struct iov_iter
*iter
,
101 struct page
**process_pages
,
102 struct mm_struct
*mm
,
103 struct task_struct
*task
,
105 ssize_t
*bytes_copied
)
107 unsigned long pa
= addr
& PAGE_MASK
;
108 unsigned long start_offset
= addr
- pa
;
109 unsigned long nr_pages
;
110 ssize_t bytes_copied_loop
;
112 unsigned long nr_pages_copied
= 0;
113 unsigned long max_pages_per_loop
= PVM_MAX_KMALLOC_PAGES
114 / sizeof(struct pages
*);
118 /* Work out address and page range required */
121 nr_pages
= (addr
+ len
- 1) / PAGE_SIZE
- addr
/ PAGE_SIZE
+ 1;
123 while ((nr_pages_copied
< nr_pages
) && iov_iter_count(iter
)) {
124 int nr_pages_to_copy
;
127 nr_pages_to_copy
= min(nr_pages
- nr_pages_copied
,
130 /* Get the pages we're interested in */
131 down_read(&mm
->mmap_sem
);
132 pages_pinned
= get_user_pages(task
, mm
, pa
,
134 vm_write
, 0, process_pages
, NULL
);
135 up_read(&mm
->mmap_sem
);
137 if (pages_pinned
<= 0)
140 n
= pages_pinned
* PAGE_SIZE
- start_offset
;
144 rc
= process_vm_rw_pages(process_pages
,
145 start_offset
, n
, iter
,
150 *bytes_copied
+= bytes_copied_loop
;
151 nr_pages_copied
+= pages_pinned
;
152 pa
+= pages_pinned
* PAGE_SIZE
;
154 put_page(process_pages
[--pages_pinned
]);
162 /* Maximum number of entries for process pages array
163 which lives on stack */
164 #define PVM_MAX_PP_ARRAY_COUNT 16
167 * process_vm_rw_core - core of reading/writing pages from task specified
168 * @pid: PID of process to read/write from/to
169 * @lvec: iovec array specifying where to copy to/from locally
170 * @liovcnt: size of lvec array
171 * @rvec: iovec array specifying where to copy to/from in the other process
172 * @riovcnt: size of rvec array
173 * @flags: currently unused
174 * @vm_write: 0 if reading from other process, 1 if writing to other process
175 * Returns the number of bytes read/written or error code. May
176 * return less bytes than expected if an error occurs during the copying
179 static ssize_t
process_vm_rw_core(pid_t pid
, struct iov_iter
*iter
,
180 const struct iovec
*rvec
,
181 unsigned long riovcnt
,
182 unsigned long flags
, int vm_write
)
184 struct task_struct
*task
;
185 struct page
*pp_stack
[PVM_MAX_PP_ARRAY_COUNT
];
186 struct page
**process_pages
= pp_stack
;
187 struct mm_struct
*mm
;
190 ssize_t bytes_copied_loop
;
191 ssize_t bytes_copied
= 0;
192 unsigned long nr_pages
= 0;
193 unsigned long nr_pages_iov
;
197 * Work out how many pages of struct pages we're going to need
198 * when eventually calling get_user_pages
200 for (i
= 0; i
< riovcnt
; i
++) {
201 iov_len
= rvec
[i
].iov_len
;
203 nr_pages_iov
= ((unsigned long)rvec
[i
].iov_base
205 / PAGE_SIZE
- (unsigned long)rvec
[i
].iov_base
207 nr_pages
= max(nr_pages
, nr_pages_iov
);
214 if (nr_pages
> PVM_MAX_PP_ARRAY_COUNT
) {
215 /* For reliability don't try to kmalloc more than
217 process_pages
= kmalloc(min_t(size_t, PVM_MAX_KMALLOC_PAGES
,
218 sizeof(struct pages
*)*nr_pages
),
225 /* Get process information */
227 task
= find_task_by_vpid(pid
);
229 get_task_struct(task
);
233 goto free_proc_pages
;
236 mm
= mm_access(task
, PTRACE_MODE_ATTACH
);
237 if (!mm
|| IS_ERR(mm
)) {
238 rc
= IS_ERR(mm
) ? PTR_ERR(mm
) : -ESRCH
;
240 * Explicitly map EACCES to EPERM as EPERM is a more a
241 * appropriate error code for process_vw_readv/writev
245 goto put_task_struct
;
248 for (i
= 0; i
< riovcnt
&& iov_iter_count(iter
); i
++) {
249 rc
= process_vm_rw_single_vec(
250 (unsigned long)rvec
[i
].iov_base
, rvec
[i
].iov_len
,
251 iter
, process_pages
, mm
, task
, vm_write
,
253 bytes_copied
+= bytes_copied_loop
;
255 /* If we have managed to copy any data at all then
256 we return the number of bytes copied. Otherwise
257 we return the error code */
269 put_task_struct(task
);
272 if (process_pages
!= pp_stack
)
273 kfree(process_pages
);
278 * process_vm_rw - check iovecs before calling core routine
279 * @pid: PID of process to read/write from/to
280 * @lvec: iovec array specifying where to copy to/from locally
281 * @liovcnt: size of lvec array
282 * @rvec: iovec array specifying where to copy to/from in the other process
283 * @riovcnt: size of rvec array
284 * @flags: currently unused
285 * @vm_write: 0 if reading from other process, 1 if writing to other process
286 * Returns the number of bytes read/written or error code. May
287 * return less bytes than expected if an error occurs during the copying
290 static ssize_t
process_vm_rw(pid_t pid
,
291 const struct iovec __user
*lvec
,
292 unsigned long liovcnt
,
293 const struct iovec __user
*rvec
,
294 unsigned long riovcnt
,
295 unsigned long flags
, int vm_write
)
297 struct iovec iovstack_l
[UIO_FASTIOV
];
298 struct iovec iovstack_r
[UIO_FASTIOV
];
299 struct iovec
*iov_l
= iovstack_l
;
300 struct iovec
*iov_r
= iovstack_r
;
301 struct iov_iter iter
;
309 rc
= rw_copy_check_uvector(WRITE
, lvec
, liovcnt
, UIO_FASTIOV
,
312 rc
= rw_copy_check_uvector(READ
, lvec
, liovcnt
, UIO_FASTIOV
,
317 iov_iter_init(&iter
, iov_l
, liovcnt
, rc
, 0);
319 rc
= rw_copy_check_uvector(CHECK_IOVEC_ONLY
, rvec
, riovcnt
, UIO_FASTIOV
,
324 rc
= process_vm_rw_core(pid
, &iter
, iov_r
, riovcnt
, flags
, vm_write
);
327 if (iov_r
!= iovstack_r
)
329 if (iov_l
!= iovstack_l
)
335 SYSCALL_DEFINE6(process_vm_readv
, pid_t
, pid
, const struct iovec __user
*, lvec
,
336 unsigned long, liovcnt
, const struct iovec __user
*, rvec
,
337 unsigned long, riovcnt
, unsigned long, flags
)
339 return process_vm_rw(pid
, lvec
, liovcnt
, rvec
, riovcnt
, flags
, 0);
342 SYSCALL_DEFINE6(process_vm_writev
, pid_t
, pid
,
343 const struct iovec __user
*, lvec
,
344 unsigned long, liovcnt
, const struct iovec __user
*, rvec
,
345 unsigned long, riovcnt
, unsigned long, flags
)
347 return process_vm_rw(pid
, lvec
, liovcnt
, rvec
, riovcnt
, flags
, 1);
353 compat_process_vm_rw(compat_pid_t pid
,
354 const struct compat_iovec __user
*lvec
,
355 unsigned long liovcnt
,
356 const struct compat_iovec __user
*rvec
,
357 unsigned long riovcnt
,
358 unsigned long flags
, int vm_write
)
360 struct iovec iovstack_l
[UIO_FASTIOV
];
361 struct iovec iovstack_r
[UIO_FASTIOV
];
362 struct iovec
*iov_l
= iovstack_l
;
363 struct iovec
*iov_r
= iovstack_r
;
364 struct iov_iter iter
;
365 ssize_t rc
= -EFAULT
;
371 rc
= compat_rw_copy_check_uvector(WRITE
, lvec
, liovcnt
,
372 UIO_FASTIOV
, iovstack_l
,
375 rc
= compat_rw_copy_check_uvector(READ
, lvec
, liovcnt
,
376 UIO_FASTIOV
, iovstack_l
,
380 iov_iter_init(&iter
, iov_l
, liovcnt
, rc
, 0);
381 rc
= compat_rw_copy_check_uvector(CHECK_IOVEC_ONLY
, rvec
, riovcnt
,
382 UIO_FASTIOV
, iovstack_r
,
387 rc
= process_vm_rw_core(pid
, &iter
, iov_r
, riovcnt
, flags
, vm_write
);
390 if (iov_r
!= iovstack_r
)
392 if (iov_l
!= iovstack_l
)
398 compat_sys_process_vm_readv(compat_pid_t pid
,
399 const struct compat_iovec __user
*lvec
,
400 unsigned long liovcnt
,
401 const struct compat_iovec __user
*rvec
,
402 unsigned long riovcnt
,
405 return compat_process_vm_rw(pid
, lvec
, liovcnt
, rvec
,
410 compat_sys_process_vm_writev(compat_pid_t pid
,
411 const struct compat_iovec __user
*lvec
,
412 unsigned long liovcnt
,
413 const struct compat_iovec __user
*rvec
,
414 unsigned long riovcnt
,
417 return compat_process_vm_rw(pid
, lvec
, liovcnt
, rvec
,