1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * linux/mm/process_vm_access.c
5 * Copyright (C) 2010-2011 Christopher Yeoh <cyeoh@au1.ibm.com>, IBM Corp.
8 #include <linux/compat.h>
10 #include <linux/uio.h>
11 #include <linux/sched.h>
12 #include <linux/sched/mm.h>
13 #include <linux/highmem.h>
14 #include <linux/ptrace.h>
15 #include <linux/slab.h>
16 #include <linux/syscalls.h>
19 * process_vm_rw_pages - read/write pages from task specified
20 * @pages: array of pointers to pages we want to copy
21 * @offset: offset in page to start copying from/to
22 * @len: number of bytes to copy
23 * @iter: where to copy to/from locally
24 * @vm_write: 0 means copy from, 1 means copy to
25 * Returns 0 on success, error code otherwise
27 static int process_vm_rw_pages(struct page
**pages
,
30 struct iov_iter
*iter
,
33 /* Do the copy for each page */
34 while (len
&& iov_iter_count(iter
)) {
35 struct page
*page
= *pages
++;
36 size_t copy
= PAGE_SIZE
- offset
;
43 copied
= copy_page_from_iter(page
, offset
, copy
, iter
);
45 copied
= copy_page_to_iter(page
, offset
, copy
, iter
);
48 if (copied
< copy
&& iov_iter_count(iter
))
55 /* Maximum number of pages kmalloc'd to hold struct page's during copy */
56 #define PVM_MAX_KMALLOC_PAGES (PAGE_SIZE * 2)
59 * process_vm_rw_single_vec - read/write pages from task specified
60 * @addr: start memory address of target process
61 * @len: size of area to copy to/from
62 * @iter: where to copy to/from locally
63 * @process_pages: struct pages area that can store at least
64 * nr_pages_to_copy struct page pointers
66 * @task: task to read/write from
67 * @vm_write: 0 means copy from, 1 means copy to
68 * Returns 0 on success or on failure error code
70 static int process_vm_rw_single_vec(unsigned long addr
,
72 struct iov_iter
*iter
,
73 struct page
**process_pages
,
75 struct task_struct
*task
,
78 unsigned long pa
= addr
& PAGE_MASK
;
79 unsigned long start_offset
= addr
- pa
;
80 unsigned long nr_pages
;
82 unsigned long max_pages_per_loop
= PVM_MAX_KMALLOC_PAGES
83 / sizeof(struct pages
*);
84 unsigned int flags
= 0;
86 /* Work out address and page range required */
89 nr_pages
= (addr
+ len
- 1) / PAGE_SIZE
- addr
/ PAGE_SIZE
+ 1;
94 while (!rc
&& nr_pages
&& iov_iter_count(iter
)) {
95 int pinned_pages
= min(nr_pages
, max_pages_per_loop
);
100 * Get the pages we're interested in. We must
101 * access remotely because task/mm might not
102 * current/current->mm
105 pinned_pages
= pin_user_pages_remote(mm
, pa
, pinned_pages
,
106 flags
, process_pages
,
109 mmap_read_unlock(mm
);
110 if (pinned_pages
<= 0)
113 bytes
= pinned_pages
* PAGE_SIZE
- start_offset
;
117 rc
= process_vm_rw_pages(process_pages
,
118 start_offset
, bytes
, iter
,
122 nr_pages
-= pinned_pages
;
123 pa
+= pinned_pages
* PAGE_SIZE
;
125 /* If vm_write is set, the pages need to be made dirty: */
126 unpin_user_pages_dirty_lock(process_pages
, pinned_pages
,
133 /* Maximum number of entries for process pages array
134 which lives on stack */
135 #define PVM_MAX_PP_ARRAY_COUNT 16
138 * process_vm_rw_core - core of reading/writing pages from task specified
139 * @pid: PID of process to read/write from/to
140 * @iter: where to copy to/from locally
141 * @rvec: iovec array specifying where to copy to/from in the other process
142 * @riovcnt: size of rvec array
143 * @flags: currently unused
144 * @vm_write: 0 if reading from other process, 1 if writing to other process
146 * Returns the number of bytes read/written or error code. May
147 * return less bytes than expected if an error occurs during the copying
150 static ssize_t
process_vm_rw_core(pid_t pid
, struct iov_iter
*iter
,
151 const struct iovec
*rvec
,
152 unsigned long riovcnt
,
153 unsigned long flags
, int vm_write
)
155 struct task_struct
*task
;
156 struct page
*pp_stack
[PVM_MAX_PP_ARRAY_COUNT
];
157 struct page
**process_pages
= pp_stack
;
158 struct mm_struct
*mm
;
161 unsigned long nr_pages
= 0;
162 unsigned long nr_pages_iov
;
164 size_t total_len
= iov_iter_count(iter
);
167 * Work out how many pages of struct pages we're going to need
168 * when eventually calling get_user_pages
170 for (i
= 0; i
< riovcnt
; i
++) {
171 iov_len
= rvec
[i
].iov_len
;
173 nr_pages_iov
= ((unsigned long)rvec
[i
].iov_base
175 / PAGE_SIZE
- (unsigned long)rvec
[i
].iov_base
177 nr_pages
= max(nr_pages
, nr_pages_iov
);
184 if (nr_pages
> PVM_MAX_PP_ARRAY_COUNT
) {
185 /* For reliability don't try to kmalloc more than
187 process_pages
= kmalloc(min_t(size_t, PVM_MAX_KMALLOC_PAGES
,
188 sizeof(struct pages
*)*nr_pages
),
195 /* Get process information */
196 task
= find_get_task_by_vpid(pid
);
199 goto free_proc_pages
;
202 mm
= mm_access(task
, PTRACE_MODE_ATTACH_REALCREDS
);
203 if (!mm
|| IS_ERR(mm
)) {
204 rc
= IS_ERR(mm
) ? PTR_ERR(mm
) : -ESRCH
;
206 * Explicitly map EACCES to EPERM as EPERM is a more
207 * appropriate error code for process_vw_readv/writev
211 goto put_task_struct
;
214 for (i
= 0; i
< riovcnt
&& iov_iter_count(iter
) && !rc
; i
++)
215 rc
= process_vm_rw_single_vec(
216 (unsigned long)rvec
[i
].iov_base
, rvec
[i
].iov_len
,
217 iter
, process_pages
, mm
, task
, vm_write
);
219 /* copied = space before - space after */
220 total_len
-= iov_iter_count(iter
);
222 /* If we have managed to copy any data at all then
223 we return the number of bytes copied. Otherwise
224 we return the error code */
231 put_task_struct(task
);
234 if (process_pages
!= pp_stack
)
235 kfree(process_pages
);
240 * process_vm_rw - check iovecs before calling core routine
241 * @pid: PID of process to read/write from/to
242 * @lvec: iovec array specifying where to copy to/from locally
243 * @liovcnt: size of lvec array
244 * @rvec: iovec array specifying where to copy to/from in the other process
245 * @riovcnt: size of rvec array
246 * @flags: currently unused
247 * @vm_write: 0 if reading from other process, 1 if writing to other process
249 * Returns the number of bytes read/written or error code. May
250 * return less bytes than expected if an error occurs during the copying
253 static ssize_t
process_vm_rw(pid_t pid
,
254 const struct iovec __user
*lvec
,
255 unsigned long liovcnt
,
256 const struct iovec __user
*rvec
,
257 unsigned long riovcnt
,
258 unsigned long flags
, int vm_write
)
260 struct iovec iovstack_l
[UIO_FASTIOV
];
261 struct iovec iovstack_r
[UIO_FASTIOV
];
262 struct iovec
*iov_l
= iovstack_l
;
264 struct iov_iter iter
;
266 int dir
= vm_write
? WRITE
: READ
;
272 rc
= import_iovec(dir
, lvec
, liovcnt
, UIO_FASTIOV
, &iov_l
, &iter
);
275 if (!iov_iter_count(&iter
))
277 iov_r
= iovec_from_user(rvec
, riovcnt
, UIO_FASTIOV
, iovstack_r
,
278 in_compat_syscall());
283 rc
= process_vm_rw_core(pid
, &iter
, iov_r
, riovcnt
, flags
, vm_write
);
284 if (iov_r
!= iovstack_r
)
291 SYSCALL_DEFINE6(process_vm_readv
, pid_t
, pid
, const struct iovec __user
*, lvec
,
292 unsigned long, liovcnt
, const struct iovec __user
*, rvec
,
293 unsigned long, riovcnt
, unsigned long, flags
)
295 return process_vm_rw(pid
, lvec
, liovcnt
, rvec
, riovcnt
, flags
, 0);
298 SYSCALL_DEFINE6(process_vm_writev
, pid_t
, pid
,
299 const struct iovec __user
*, lvec
,
300 unsigned long, liovcnt
, const struct iovec __user
*, rvec
,
301 unsigned long, riovcnt
, unsigned long, flags
)
303 return process_vm_rw(pid
, lvec
, liovcnt
, rvec
, riovcnt
, flags
, 1);