1 #include <linux/export.h>
3 #include <linux/pagemap.h>
4 #include <linux/slab.h>
5 #include <linux/vmalloc.h>
7 size_t copy_page_to_iter(struct page
*page
, size_t offset
, size_t bytes
,
10 size_t skip
, copy
, left
, wanted
;
11 const struct iovec
*iov
;
15 if (unlikely(bytes
> i
->count
))
24 buf
= iov
->iov_base
+ skip
;
25 copy
= min(bytes
, iov
->iov_len
- skip
);
27 if (!fault_in_pages_writeable(buf
, copy
)) {
28 kaddr
= kmap_atomic(page
);
29 from
= kaddr
+ offset
;
31 /* first chunk, usually the only one */
32 left
= __copy_to_user_inatomic(buf
, from
, copy
);
38 while (unlikely(!left
&& bytes
)) {
41 copy
= min(bytes
, iov
->iov_len
);
42 left
= __copy_to_user_inatomic(buf
, from
, copy
);
52 offset
= from
- kaddr
;
55 copy
= min(bytes
, iov
->iov_len
- skip
);
57 /* Too bad - revert to non-atomic kmap */
59 from
= kaddr
+ offset
;
60 left
= __copy_to_user(buf
, from
, copy
);
65 while (unlikely(!left
&& bytes
)) {
68 copy
= min(bytes
, iov
->iov_len
);
69 left
= __copy_to_user(buf
, from
, copy
);
77 if (skip
== iov
->iov_len
) {
81 i
->count
-= wanted
- bytes
;
82 i
->nr_segs
-= iov
- i
->iov
;
85 return wanted
- bytes
;
87 EXPORT_SYMBOL(copy_page_to_iter
);
89 size_t copy_page_from_iter(struct page
*page
, size_t offset
, size_t bytes
,
92 size_t skip
, copy
, left
, wanted
;
93 const struct iovec
*iov
;
97 if (unlikely(bytes
> i
->count
))
100 if (unlikely(!bytes
))
105 skip
= i
->iov_offset
;
106 buf
= iov
->iov_base
+ skip
;
107 copy
= min(bytes
, iov
->iov_len
- skip
);
109 if (!fault_in_pages_readable(buf
, copy
)) {
110 kaddr
= kmap_atomic(page
);
113 /* first chunk, usually the only one */
114 left
= __copy_from_user_inatomic(to
, buf
, copy
);
120 while (unlikely(!left
&& bytes
)) {
123 copy
= min(bytes
, iov
->iov_len
);
124 left
= __copy_from_user_inatomic(to
, buf
, copy
);
130 if (likely(!bytes
)) {
131 kunmap_atomic(kaddr
);
136 kunmap_atomic(kaddr
);
137 copy
= min(bytes
, iov
->iov_len
- skip
);
139 /* Too bad - revert to non-atomic kmap */
142 left
= __copy_from_user(to
, buf
, copy
);
147 while (unlikely(!left
&& bytes
)) {
150 copy
= min(bytes
, iov
->iov_len
);
151 left
= __copy_from_user(to
, buf
, copy
);
159 if (skip
== iov
->iov_len
) {
163 i
->count
-= wanted
- bytes
;
164 i
->nr_segs
-= iov
- i
->iov
;
166 i
->iov_offset
= skip
;
167 return wanted
- bytes
;
169 EXPORT_SYMBOL(copy_page_from_iter
);
171 static size_t __iovec_copy_from_user_inatomic(char *vaddr
,
172 const struct iovec
*iov
, size_t base
, size_t bytes
)
174 size_t copied
= 0, left
= 0;
177 char __user
*buf
= iov
->iov_base
+ base
;
178 int copy
= min(bytes
, iov
->iov_len
- base
);
181 left
= __copy_from_user_inatomic(vaddr
, buf
, copy
);
190 return copied
- left
;
194 * Copy as much as we can into the page and return the number of bytes which
195 * were successfully copied. If a fault is encountered then return the number of
196 * bytes which were copied.
198 size_t iov_iter_copy_from_user_atomic(struct page
*page
,
199 struct iov_iter
*i
, unsigned long offset
, size_t bytes
)
204 kaddr
= kmap_atomic(page
);
205 if (likely(i
->nr_segs
== 1)) {
207 char __user
*buf
= i
->iov
->iov_base
+ i
->iov_offset
;
208 left
= __copy_from_user_inatomic(kaddr
+ offset
, buf
, bytes
);
209 copied
= bytes
- left
;
211 copied
= __iovec_copy_from_user_inatomic(kaddr
+ offset
,
212 i
->iov
, i
->iov_offset
, bytes
);
214 kunmap_atomic(kaddr
);
218 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic
);
220 void iov_iter_advance(struct iov_iter
*i
, size_t bytes
)
222 BUG_ON(i
->count
< bytes
);
224 if (likely(i
->nr_segs
== 1)) {
225 i
->iov_offset
+= bytes
;
228 const struct iovec
*iov
= i
->iov
;
229 size_t base
= i
->iov_offset
;
230 unsigned long nr_segs
= i
->nr_segs
;
233 * The !iov->iov_len check ensures we skip over unlikely
234 * zero-length segments (without overruning the iovec).
236 while (bytes
|| unlikely(i
->count
&& !iov
->iov_len
)) {
239 copy
= min(bytes
, iov
->iov_len
- base
);
240 BUG_ON(!i
->count
|| i
->count
< copy
);
244 if (iov
->iov_len
== base
) {
251 i
->iov_offset
= base
;
252 i
->nr_segs
= nr_segs
;
255 EXPORT_SYMBOL(iov_iter_advance
);
258 * Fault in the first iovec of the given iov_iter, to a maximum length
259 * of bytes. Returns 0 on success, or non-zero if the memory could not be
260 * accessed (ie. because it is an invalid address).
262 * writev-intensive code may want this to prefault several iovecs -- that
263 * would be possible (callers must not rely on the fact that _only_ the
264 * first iovec will be faulted with the current implementation).
266 int iov_iter_fault_in_readable(struct iov_iter
*i
, size_t bytes
)
268 char __user
*buf
= i
->iov
->iov_base
+ i
->iov_offset
;
269 bytes
= min(bytes
, i
->iov
->iov_len
- i
->iov_offset
);
270 return fault_in_pages_readable(buf
, bytes
);
272 EXPORT_SYMBOL(iov_iter_fault_in_readable
);
275 * Return the count of just the current iov_iter segment.
277 size_t iov_iter_single_seg_count(const struct iov_iter
*i
)
279 const struct iovec
*iov
= i
->iov
;
283 return min(i
->count
, iov
->iov_len
- i
->iov_offset
);
285 EXPORT_SYMBOL(iov_iter_single_seg_count
);
287 unsigned long iov_iter_alignment(const struct iov_iter
*i
)
289 const struct iovec
*iov
= i
->iov
;
291 size_t size
= i
->count
;
297 res
= (unsigned long)iov
->iov_base
+ i
->iov_offset
;
298 n
= iov
->iov_len
- i
->iov_offset
;
303 while (size
> (++iov
)->iov_len
) {
304 res
|= (unsigned long)iov
->iov_base
| iov
->iov_len
;
305 size
-= iov
->iov_len
;
307 res
|= (unsigned long)iov
->iov_base
| size
;
310 EXPORT_SYMBOL(iov_iter_alignment
);
312 void iov_iter_init(struct iov_iter
*i
, int direction
,
313 const struct iovec
*iov
, unsigned long nr_segs
,
316 /* It will get better. Eventually... */
317 if (segment_eq(get_fs(), KERNEL_DS
))
318 direction
|= REQ_KERNEL
;
321 i
->nr_segs
= nr_segs
;
325 EXPORT_SYMBOL(iov_iter_init
);
327 ssize_t
iov_iter_get_pages(struct iov_iter
*i
,
328 struct page
**pages
, size_t maxsize
,
331 size_t offset
= i
->iov_offset
;
332 const struct iovec
*iov
= i
->iov
;
338 len
= iov
->iov_len
- offset
;
343 addr
= (unsigned long)iov
->iov_base
+ offset
;
344 len
+= *start
= addr
& (PAGE_SIZE
- 1);
345 addr
&= ~(PAGE_SIZE
- 1);
346 n
= (len
+ PAGE_SIZE
- 1) / PAGE_SIZE
;
347 res
= get_user_pages_fast(addr
, n
, (i
->type
& WRITE
) != WRITE
, pages
);
348 if (unlikely(res
< 0))
350 return (res
== n
? len
: res
* PAGE_SIZE
) - *start
;
352 EXPORT_SYMBOL(iov_iter_get_pages
);
354 ssize_t
iov_iter_get_pages_alloc(struct iov_iter
*i
,
355 struct page
***pages
, size_t maxsize
,
358 size_t offset
= i
->iov_offset
;
359 const struct iovec
*iov
= i
->iov
;
366 len
= iov
->iov_len
- offset
;
371 addr
= (unsigned long)iov
->iov_base
+ offset
;
372 len
+= *start
= addr
& (PAGE_SIZE
- 1);
373 addr
&= ~(PAGE_SIZE
- 1);
374 n
= (len
+ PAGE_SIZE
- 1) / PAGE_SIZE
;
376 p
= kmalloc(n
* sizeof(struct page
*), GFP_KERNEL
);
378 p
= vmalloc(n
* sizeof(struct page
*));
382 res
= get_user_pages_fast(addr
, n
, (i
->type
& WRITE
) != WRITE
, p
);
383 if (unlikely(res
< 0)) {
388 return (res
== n
? len
: res
* PAGE_SIZE
) - *start
;
390 EXPORT_SYMBOL(iov_iter_get_pages_alloc
);
392 int iov_iter_npages(const struct iov_iter
*i
, int maxpages
)
394 size_t offset
= i
->iov_offset
;
395 size_t size
= i
->count
;
396 const struct iovec
*iov
= i
->iov
;
400 for (n
= 0; size
&& n
< i
->nr_segs
; n
++, iov
++) {
401 unsigned long addr
= (unsigned long)iov
->iov_base
+ offset
;
402 size_t len
= iov
->iov_len
- offset
;
404 if (unlikely(!len
)) /* empty segment */
408 npages
+= (addr
+ len
+ PAGE_SIZE
- 1) / PAGE_SIZE
410 if (npages
>= maxpages
) /* don't bother going further */
415 return min(npages
, maxpages
);
417 EXPORT_SYMBOL(iov_iter_npages
);