]>
Commit | Line | Data |
---|---|---|
4f18cd31 AV |
1 | #include <linux/export.h> |
2 | #include <linux/uio.h> | |
3 | #include <linux/pagemap.h> | |
91f79c43 AV |
4 | #include <linux/slab.h> |
5 | #include <linux/vmalloc.h> | |
4f18cd31 | 6 | |
04a31165 AV |
7 | #define iterate_iovec(i, n, __v, __p, skip, STEP) { \ |
8 | size_t left; \ | |
9 | size_t wanted = n; \ | |
10 | __p = i->iov; \ | |
11 | __v.iov_len = min(n, __p->iov_len - skip); \ | |
12 | if (likely(__v.iov_len)) { \ | |
13 | __v.iov_base = __p->iov_base + skip; \ | |
14 | left = (STEP); \ | |
15 | __v.iov_len -= left; \ | |
16 | skip += __v.iov_len; \ | |
17 | n -= __v.iov_len; \ | |
18 | } else { \ | |
19 | left = 0; \ | |
20 | } \ | |
21 | while (unlikely(!left && n)) { \ | |
22 | __p++; \ | |
23 | __v.iov_len = min(n, __p->iov_len); \ | |
24 | if (unlikely(!__v.iov_len)) \ | |
25 | continue; \ | |
26 | __v.iov_base = __p->iov_base; \ | |
27 | left = (STEP); \ | |
28 | __v.iov_len -= left; \ | |
29 | skip = __v.iov_len; \ | |
30 | n -= __v.iov_len; \ | |
31 | } \ | |
32 | n = wanted - n; \ | |
33 | } | |
34 | ||
35 | #define iterate_bvec(i, n, __v, __p, skip, STEP) { \ | |
36 | size_t wanted = n; \ | |
37 | __p = i->bvec; \ | |
38 | __v.bv_len = min_t(size_t, n, __p->bv_len - skip); \ | |
39 | if (likely(__v.bv_len)) { \ | |
40 | __v.bv_page = __p->bv_page; \ | |
41 | __v.bv_offset = __p->bv_offset + skip; \ | |
42 | (void)(STEP); \ | |
43 | skip += __v.bv_len; \ | |
44 | n -= __v.bv_len; \ | |
45 | } \ | |
46 | while (unlikely(n)) { \ | |
47 | __p++; \ | |
48 | __v.bv_len = min_t(size_t, n, __p->bv_len); \ | |
49 | if (unlikely(!__v.bv_len)) \ | |
50 | continue; \ | |
51 | __v.bv_page = __p->bv_page; \ | |
52 | __v.bv_offset = __p->bv_offset; \ | |
53 | (void)(STEP); \ | |
54 | skip = __v.bv_len; \ | |
55 | n -= __v.bv_len; \ | |
56 | } \ | |
57 | n = wanted; \ | |
58 | } | |
59 | ||
60 | #define iterate_all_kinds(i, n, v, I, B) { \ | |
61 | size_t skip = i->iov_offset; \ | |
62 | if (unlikely(i->type & ITER_BVEC)) { \ | |
63 | const struct bio_vec *bvec; \ | |
64 | struct bio_vec v; \ | |
65 | iterate_bvec(i, n, v, bvec, skip, (B)) \ | |
66 | } else { \ | |
67 | const struct iovec *iov; \ | |
68 | struct iovec v; \ | |
69 | iterate_iovec(i, n, v, iov, skip, (I)) \ | |
70 | } \ | |
71 | } | |
72 | ||
c35e0248 MW |
73 | static size_t copy_to_iter_iovec(void *from, size_t bytes, struct iov_iter *i) |
74 | { | |
75 | size_t skip, copy, left, wanted; | |
76 | const struct iovec *iov; | |
77 | char __user *buf; | |
78 | ||
79 | if (unlikely(bytes > i->count)) | |
80 | bytes = i->count; | |
81 | ||
82 | if (unlikely(!bytes)) | |
83 | return 0; | |
84 | ||
85 | wanted = bytes; | |
86 | iov = i->iov; | |
87 | skip = i->iov_offset; | |
88 | buf = iov->iov_base + skip; | |
89 | copy = min(bytes, iov->iov_len - skip); | |
90 | ||
91 | left = __copy_to_user(buf, from, copy); | |
92 | copy -= left; | |
93 | skip += copy; | |
94 | from += copy; | |
95 | bytes -= copy; | |
96 | while (unlikely(!left && bytes)) { | |
97 | iov++; | |
98 | buf = iov->iov_base; | |
99 | copy = min(bytes, iov->iov_len); | |
100 | left = __copy_to_user(buf, from, copy); | |
101 | copy -= left; | |
102 | skip = copy; | |
103 | from += copy; | |
104 | bytes -= copy; | |
105 | } | |
106 | ||
107 | if (skip == iov->iov_len) { | |
108 | iov++; | |
109 | skip = 0; | |
110 | } | |
111 | i->count -= wanted - bytes; | |
112 | i->nr_segs -= iov - i->iov; | |
113 | i->iov = iov; | |
114 | i->iov_offset = skip; | |
115 | return wanted - bytes; | |
116 | } | |
117 | ||
118 | static size_t copy_from_iter_iovec(void *to, size_t bytes, struct iov_iter *i) | |
119 | { | |
120 | size_t skip, copy, left, wanted; | |
121 | const struct iovec *iov; | |
122 | char __user *buf; | |
123 | ||
124 | if (unlikely(bytes > i->count)) | |
125 | bytes = i->count; | |
126 | ||
127 | if (unlikely(!bytes)) | |
128 | return 0; | |
129 | ||
130 | wanted = bytes; | |
131 | iov = i->iov; | |
132 | skip = i->iov_offset; | |
133 | buf = iov->iov_base + skip; | |
134 | copy = min(bytes, iov->iov_len - skip); | |
135 | ||
136 | left = __copy_from_user(to, buf, copy); | |
137 | copy -= left; | |
138 | skip += copy; | |
139 | to += copy; | |
140 | bytes -= copy; | |
141 | while (unlikely(!left && bytes)) { | |
142 | iov++; | |
143 | buf = iov->iov_base; | |
144 | copy = min(bytes, iov->iov_len); | |
145 | left = __copy_from_user(to, buf, copy); | |
146 | copy -= left; | |
147 | skip = copy; | |
148 | to += copy; | |
149 | bytes -= copy; | |
150 | } | |
151 | ||
152 | if (skip == iov->iov_len) { | |
153 | iov++; | |
154 | skip = 0; | |
155 | } | |
156 | i->count -= wanted - bytes; | |
157 | i->nr_segs -= iov - i->iov; | |
158 | i->iov = iov; | |
159 | i->iov_offset = skip; | |
160 | return wanted - bytes; | |
161 | } | |
162 | ||
62a8067a | 163 | static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes, |
4f18cd31 AV |
164 | struct iov_iter *i) |
165 | { | |
166 | size_t skip, copy, left, wanted; | |
167 | const struct iovec *iov; | |
168 | char __user *buf; | |
169 | void *kaddr, *from; | |
170 | ||
171 | if (unlikely(bytes > i->count)) | |
172 | bytes = i->count; | |
173 | ||
174 | if (unlikely(!bytes)) | |
175 | return 0; | |
176 | ||
177 | wanted = bytes; | |
178 | iov = i->iov; | |
179 | skip = i->iov_offset; | |
180 | buf = iov->iov_base + skip; | |
181 | copy = min(bytes, iov->iov_len - skip); | |
182 | ||
183 | if (!fault_in_pages_writeable(buf, copy)) { | |
184 | kaddr = kmap_atomic(page); | |
185 | from = kaddr + offset; | |
186 | ||
187 | /* first chunk, usually the only one */ | |
188 | left = __copy_to_user_inatomic(buf, from, copy); | |
189 | copy -= left; | |
190 | skip += copy; | |
191 | from += copy; | |
192 | bytes -= copy; | |
193 | ||
194 | while (unlikely(!left && bytes)) { | |
195 | iov++; | |
196 | buf = iov->iov_base; | |
197 | copy = min(bytes, iov->iov_len); | |
198 | left = __copy_to_user_inatomic(buf, from, copy); | |
199 | copy -= left; | |
200 | skip = copy; | |
201 | from += copy; | |
202 | bytes -= copy; | |
203 | } | |
204 | if (likely(!bytes)) { | |
205 | kunmap_atomic(kaddr); | |
206 | goto done; | |
207 | } | |
208 | offset = from - kaddr; | |
209 | buf += copy; | |
210 | kunmap_atomic(kaddr); | |
211 | copy = min(bytes, iov->iov_len - skip); | |
212 | } | |
213 | /* Too bad - revert to non-atomic kmap */ | |
214 | kaddr = kmap(page); | |
215 | from = kaddr + offset; | |
216 | left = __copy_to_user(buf, from, copy); | |
217 | copy -= left; | |
218 | skip += copy; | |
219 | from += copy; | |
220 | bytes -= copy; | |
221 | while (unlikely(!left && bytes)) { | |
222 | iov++; | |
223 | buf = iov->iov_base; | |
224 | copy = min(bytes, iov->iov_len); | |
225 | left = __copy_to_user(buf, from, copy); | |
226 | copy -= left; | |
227 | skip = copy; | |
228 | from += copy; | |
229 | bytes -= copy; | |
230 | } | |
231 | kunmap(page); | |
232 | done: | |
81055e58 AV |
233 | if (skip == iov->iov_len) { |
234 | iov++; | |
235 | skip = 0; | |
236 | } | |
4f18cd31 AV |
237 | i->count -= wanted - bytes; |
238 | i->nr_segs -= iov - i->iov; | |
239 | i->iov = iov; | |
240 | i->iov_offset = skip; | |
241 | return wanted - bytes; | |
242 | } | |
4f18cd31 | 243 | |
62a8067a | 244 | static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes, |
f0d1bec9 AV |
245 | struct iov_iter *i) |
246 | { | |
247 | size_t skip, copy, left, wanted; | |
248 | const struct iovec *iov; | |
249 | char __user *buf; | |
250 | void *kaddr, *to; | |
251 | ||
252 | if (unlikely(bytes > i->count)) | |
253 | bytes = i->count; | |
254 | ||
255 | if (unlikely(!bytes)) | |
256 | return 0; | |
257 | ||
258 | wanted = bytes; | |
259 | iov = i->iov; | |
260 | skip = i->iov_offset; | |
261 | buf = iov->iov_base + skip; | |
262 | copy = min(bytes, iov->iov_len - skip); | |
263 | ||
264 | if (!fault_in_pages_readable(buf, copy)) { | |
265 | kaddr = kmap_atomic(page); | |
266 | to = kaddr + offset; | |
267 | ||
268 | /* first chunk, usually the only one */ | |
269 | left = __copy_from_user_inatomic(to, buf, copy); | |
270 | copy -= left; | |
271 | skip += copy; | |
272 | to += copy; | |
273 | bytes -= copy; | |
274 | ||
275 | while (unlikely(!left && bytes)) { | |
276 | iov++; | |
277 | buf = iov->iov_base; | |
278 | copy = min(bytes, iov->iov_len); | |
279 | left = __copy_from_user_inatomic(to, buf, copy); | |
280 | copy -= left; | |
281 | skip = copy; | |
282 | to += copy; | |
283 | bytes -= copy; | |
284 | } | |
285 | if (likely(!bytes)) { | |
286 | kunmap_atomic(kaddr); | |
287 | goto done; | |
288 | } | |
289 | offset = to - kaddr; | |
290 | buf += copy; | |
291 | kunmap_atomic(kaddr); | |
292 | copy = min(bytes, iov->iov_len - skip); | |
293 | } | |
294 | /* Too bad - revert to non-atomic kmap */ | |
295 | kaddr = kmap(page); | |
296 | to = kaddr + offset; | |
297 | left = __copy_from_user(to, buf, copy); | |
298 | copy -= left; | |
299 | skip += copy; | |
300 | to += copy; | |
301 | bytes -= copy; | |
302 | while (unlikely(!left && bytes)) { | |
303 | iov++; | |
304 | buf = iov->iov_base; | |
305 | copy = min(bytes, iov->iov_len); | |
306 | left = __copy_from_user(to, buf, copy); | |
307 | copy -= left; | |
308 | skip = copy; | |
309 | to += copy; | |
310 | bytes -= copy; | |
311 | } | |
312 | kunmap(page); | |
313 | done: | |
81055e58 AV |
314 | if (skip == iov->iov_len) { |
315 | iov++; | |
316 | skip = 0; | |
317 | } | |
f0d1bec9 AV |
318 | i->count -= wanted - bytes; |
319 | i->nr_segs -= iov - i->iov; | |
320 | i->iov = iov; | |
321 | i->iov_offset = skip; | |
322 | return wanted - bytes; | |
323 | } | |
f0d1bec9 | 324 | |
c35e0248 MW |
325 | static size_t zero_iovec(size_t bytes, struct iov_iter *i) |
326 | { | |
327 | size_t skip, copy, left, wanted; | |
328 | const struct iovec *iov; | |
329 | char __user *buf; | |
330 | ||
331 | if (unlikely(bytes > i->count)) | |
332 | bytes = i->count; | |
333 | ||
334 | if (unlikely(!bytes)) | |
335 | return 0; | |
336 | ||
337 | wanted = bytes; | |
338 | iov = i->iov; | |
339 | skip = i->iov_offset; | |
340 | buf = iov->iov_base + skip; | |
341 | copy = min(bytes, iov->iov_len - skip); | |
342 | ||
343 | left = __clear_user(buf, copy); | |
344 | copy -= left; | |
345 | skip += copy; | |
346 | bytes -= copy; | |
347 | ||
348 | while (unlikely(!left && bytes)) { | |
349 | iov++; | |
350 | buf = iov->iov_base; | |
351 | copy = min(bytes, iov->iov_len); | |
352 | left = __clear_user(buf, copy); | |
353 | copy -= left; | |
354 | skip = copy; | |
355 | bytes -= copy; | |
356 | } | |
357 | ||
358 | if (skip == iov->iov_len) { | |
359 | iov++; | |
360 | skip = 0; | |
361 | } | |
362 | i->count -= wanted - bytes; | |
363 | i->nr_segs -= iov - i->iov; | |
364 | i->iov = iov; | |
365 | i->iov_offset = skip; | |
366 | return wanted - bytes; | |
367 | } | |
368 | ||
62a8067a | 369 | static void advance_iovec(struct iov_iter *i, size_t bytes) |
4f18cd31 AV |
370 | { |
371 | BUG_ON(i->count < bytes); | |
372 | ||
373 | if (likely(i->nr_segs == 1)) { | |
374 | i->iov_offset += bytes; | |
375 | i->count -= bytes; | |
376 | } else { | |
377 | const struct iovec *iov = i->iov; | |
378 | size_t base = i->iov_offset; | |
379 | unsigned long nr_segs = i->nr_segs; | |
380 | ||
381 | /* | |
382 | * The !iov->iov_len check ensures we skip over unlikely | |
383 | * zero-length segments (without overruning the iovec). | |
384 | */ | |
385 | while (bytes || unlikely(i->count && !iov->iov_len)) { | |
386 | int copy; | |
387 | ||
388 | copy = min(bytes, iov->iov_len - base); | |
389 | BUG_ON(!i->count || i->count < copy); | |
390 | i->count -= copy; | |
391 | bytes -= copy; | |
392 | base += copy; | |
393 | if (iov->iov_len == base) { | |
394 | iov++; | |
395 | nr_segs--; | |
396 | base = 0; | |
397 | } | |
398 | } | |
399 | i->iov = iov; | |
400 | i->iov_offset = base; | |
401 | i->nr_segs = nr_segs; | |
402 | } | |
403 | } | |
4f18cd31 AV |
404 | |
405 | /* | |
406 | * Fault in the first iovec of the given iov_iter, to a maximum length | |
407 | * of bytes. Returns 0 on success, or non-zero if the memory could not be | |
408 | * accessed (ie. because it is an invalid address). | |
409 | * | |
410 | * writev-intensive code may want this to prefault several iovecs -- that | |
411 | * would be possible (callers must not rely on the fact that _only_ the | |
412 | * first iovec will be faulted with the current implementation). | |
413 | */ | |
414 | int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes) | |
415 | { | |
62a8067a AV |
416 | if (!(i->type & ITER_BVEC)) { |
417 | char __user *buf = i->iov->iov_base + i->iov_offset; | |
418 | bytes = min(bytes, i->iov->iov_len - i->iov_offset); | |
419 | return fault_in_pages_readable(buf, bytes); | |
420 | } | |
421 | return 0; | |
4f18cd31 AV |
422 | } |
423 | EXPORT_SYMBOL(iov_iter_fault_in_readable); | |
424 | ||
71d8e532 AV |
425 | void iov_iter_init(struct iov_iter *i, int direction, |
426 | const struct iovec *iov, unsigned long nr_segs, | |
427 | size_t count) | |
428 | { | |
429 | /* It will get better. Eventually... */ | |
430 | if (segment_eq(get_fs(), KERNEL_DS)) | |
62a8067a | 431 | direction |= ITER_KVEC; |
71d8e532 AV |
432 | i->type = direction; |
433 | i->iov = iov; | |
434 | i->nr_segs = nr_segs; | |
435 | i->iov_offset = 0; | |
436 | i->count = count; | |
437 | } | |
438 | EXPORT_SYMBOL(iov_iter_init); | |
7b2c99d1 | 439 | |
62a8067a | 440 | static ssize_t get_pages_iovec(struct iov_iter *i, |
2c80929c | 441 | struct page **pages, size_t maxsize, unsigned maxpages, |
7b2c99d1 AV |
442 | size_t *start) |
443 | { | |
444 | size_t offset = i->iov_offset; | |
445 | const struct iovec *iov = i->iov; | |
446 | size_t len; | |
447 | unsigned long addr; | |
448 | int n; | |
449 | int res; | |
450 | ||
451 | len = iov->iov_len - offset; | |
452 | if (len > i->count) | |
453 | len = i->count; | |
2c80929c MS |
454 | if (len > maxsize) |
455 | len = maxsize; | |
7b2c99d1 AV |
456 | addr = (unsigned long)iov->iov_base + offset; |
457 | len += *start = addr & (PAGE_SIZE - 1); | |
c7f3888a AV |
458 | if (len > maxpages * PAGE_SIZE) |
459 | len = maxpages * PAGE_SIZE; | |
7b2c99d1 AV |
460 | addr &= ~(PAGE_SIZE - 1); |
461 | n = (len + PAGE_SIZE - 1) / PAGE_SIZE; | |
462 | res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages); | |
463 | if (unlikely(res < 0)) | |
464 | return res; | |
465 | return (res == n ? len : res * PAGE_SIZE) - *start; | |
466 | } | |
f67da30c | 467 | |
62a8067a | 468 | static ssize_t get_pages_alloc_iovec(struct iov_iter *i, |
91f79c43 AV |
469 | struct page ***pages, size_t maxsize, |
470 | size_t *start) | |
471 | { | |
472 | size_t offset = i->iov_offset; | |
473 | const struct iovec *iov = i->iov; | |
474 | size_t len; | |
475 | unsigned long addr; | |
476 | void *p; | |
477 | int n; | |
478 | int res; | |
479 | ||
480 | len = iov->iov_len - offset; | |
481 | if (len > i->count) | |
482 | len = i->count; | |
483 | if (len > maxsize) | |
484 | len = maxsize; | |
485 | addr = (unsigned long)iov->iov_base + offset; | |
486 | len += *start = addr & (PAGE_SIZE - 1); | |
487 | addr &= ~(PAGE_SIZE - 1); | |
488 | n = (len + PAGE_SIZE - 1) / PAGE_SIZE; | |
489 | ||
490 | p = kmalloc(n * sizeof(struct page *), GFP_KERNEL); | |
491 | if (!p) | |
492 | p = vmalloc(n * sizeof(struct page *)); | |
493 | if (!p) | |
494 | return -ENOMEM; | |
495 | ||
496 | res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p); | |
497 | if (unlikely(res < 0)) { | |
498 | kvfree(p); | |
499 | return res; | |
500 | } | |
501 | *pages = p; | |
502 | return (res == n ? len : res * PAGE_SIZE) - *start; | |
503 | } | |
91f79c43 | 504 | |
62a8067a | 505 | static int iov_iter_npages_iovec(const struct iov_iter *i, int maxpages) |
f67da30c AV |
506 | { |
507 | size_t offset = i->iov_offset; | |
508 | size_t size = i->count; | |
509 | const struct iovec *iov = i->iov; | |
510 | int npages = 0; | |
511 | int n; | |
512 | ||
513 | for (n = 0; size && n < i->nr_segs; n++, iov++) { | |
514 | unsigned long addr = (unsigned long)iov->iov_base + offset; | |
515 | size_t len = iov->iov_len - offset; | |
516 | offset = 0; | |
517 | if (unlikely(!len)) /* empty segment */ | |
518 | continue; | |
519 | if (len > size) | |
520 | len = size; | |
521 | npages += (addr + len + PAGE_SIZE - 1) / PAGE_SIZE | |
522 | - addr / PAGE_SIZE; | |
523 | if (npages >= maxpages) /* don't bother going further */ | |
524 | return maxpages; | |
525 | size -= len; | |
526 | offset = 0; | |
527 | } | |
528 | return min(npages, maxpages); | |
529 | } | |
62a8067a AV |
530 | |
531 | static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len) | |
532 | { | |
533 | char *from = kmap_atomic(page); | |
534 | memcpy(to, from + offset, len); | |
535 | kunmap_atomic(from); | |
536 | } | |
537 | ||
538 | static void memcpy_to_page(struct page *page, size_t offset, char *from, size_t len) | |
539 | { | |
540 | char *to = kmap_atomic(page); | |
541 | memcpy(to + offset, from, len); | |
542 | kunmap_atomic(to); | |
543 | } | |
544 | ||
c35e0248 MW |
545 | static void memzero_page(struct page *page, size_t offset, size_t len) |
546 | { | |
547 | char *addr = kmap_atomic(page); | |
548 | memset(addr + offset, 0, len); | |
549 | kunmap_atomic(addr); | |
550 | } | |
551 | ||
552 | static size_t copy_to_iter_bvec(void *from, size_t bytes, struct iov_iter *i) | |
62a8067a AV |
553 | { |
554 | size_t skip, copy, wanted; | |
555 | const struct bio_vec *bvec; | |
62a8067a AV |
556 | |
557 | if (unlikely(bytes > i->count)) | |
558 | bytes = i->count; | |
559 | ||
560 | if (unlikely(!bytes)) | |
561 | return 0; | |
562 | ||
563 | wanted = bytes; | |
564 | bvec = i->bvec; | |
565 | skip = i->iov_offset; | |
566 | copy = min_t(size_t, bytes, bvec->bv_len - skip); | |
567 | ||
62a8067a AV |
568 | memcpy_to_page(bvec->bv_page, skip + bvec->bv_offset, from, copy); |
569 | skip += copy; | |
570 | from += copy; | |
571 | bytes -= copy; | |
572 | while (bytes) { | |
573 | bvec++; | |
574 | copy = min(bytes, (size_t)bvec->bv_len); | |
575 | memcpy_to_page(bvec->bv_page, bvec->bv_offset, from, copy); | |
576 | skip = copy; | |
577 | from += copy; | |
578 | bytes -= copy; | |
579 | } | |
62a8067a AV |
580 | if (skip == bvec->bv_len) { |
581 | bvec++; | |
582 | skip = 0; | |
583 | } | |
584 | i->count -= wanted - bytes; | |
585 | i->nr_segs -= bvec - i->bvec; | |
586 | i->bvec = bvec; | |
587 | i->iov_offset = skip; | |
588 | return wanted - bytes; | |
589 | } | |
590 | ||
c35e0248 | 591 | static size_t copy_from_iter_bvec(void *to, size_t bytes, struct iov_iter *i) |
62a8067a AV |
592 | { |
593 | size_t skip, copy, wanted; | |
594 | const struct bio_vec *bvec; | |
62a8067a AV |
595 | |
596 | if (unlikely(bytes > i->count)) | |
597 | bytes = i->count; | |
598 | ||
599 | if (unlikely(!bytes)) | |
600 | return 0; | |
601 | ||
602 | wanted = bytes; | |
603 | bvec = i->bvec; | |
604 | skip = i->iov_offset; | |
605 | ||
62a8067a AV |
606 | copy = min(bytes, bvec->bv_len - skip); |
607 | ||
608 | memcpy_from_page(to, bvec->bv_page, bvec->bv_offset + skip, copy); | |
609 | ||
610 | to += copy; | |
611 | skip += copy; | |
612 | bytes -= copy; | |
613 | ||
614 | while (bytes) { | |
615 | bvec++; | |
616 | copy = min(bytes, (size_t)bvec->bv_len); | |
617 | memcpy_from_page(to, bvec->bv_page, bvec->bv_offset, copy); | |
618 | skip = copy; | |
619 | to += copy; | |
620 | bytes -= copy; | |
621 | } | |
62a8067a AV |
622 | if (skip == bvec->bv_len) { |
623 | bvec++; | |
624 | skip = 0; | |
625 | } | |
626 | i->count -= wanted; | |
627 | i->nr_segs -= bvec - i->bvec; | |
628 | i->bvec = bvec; | |
629 | i->iov_offset = skip; | |
630 | return wanted; | |
631 | } | |
632 | ||
c35e0248 MW |
633 | static size_t copy_page_to_iter_bvec(struct page *page, size_t offset, |
634 | size_t bytes, struct iov_iter *i) | |
635 | { | |
636 | void *kaddr = kmap_atomic(page); | |
637 | size_t wanted = copy_to_iter_bvec(kaddr + offset, bytes, i); | |
638 | kunmap_atomic(kaddr); | |
639 | return wanted; | |
640 | } | |
641 | ||
642 | static size_t copy_page_from_iter_bvec(struct page *page, size_t offset, | |
643 | size_t bytes, struct iov_iter *i) | |
644 | { | |
645 | void *kaddr = kmap_atomic(page); | |
646 | size_t wanted = copy_from_iter_bvec(kaddr + offset, bytes, i); | |
647 | kunmap_atomic(kaddr); | |
648 | return wanted; | |
649 | } | |
650 | ||
651 | static size_t zero_bvec(size_t bytes, struct iov_iter *i) | |
652 | { | |
653 | size_t skip, copy, wanted; | |
654 | const struct bio_vec *bvec; | |
655 | ||
656 | if (unlikely(bytes > i->count)) | |
657 | bytes = i->count; | |
658 | ||
659 | if (unlikely(!bytes)) | |
660 | return 0; | |
661 | ||
662 | wanted = bytes; | |
663 | bvec = i->bvec; | |
664 | skip = i->iov_offset; | |
665 | copy = min_t(size_t, bytes, bvec->bv_len - skip); | |
666 | ||
667 | memzero_page(bvec->bv_page, skip + bvec->bv_offset, copy); | |
668 | skip += copy; | |
669 | bytes -= copy; | |
670 | while (bytes) { | |
671 | bvec++; | |
672 | copy = min(bytes, (size_t)bvec->bv_len); | |
673 | memzero_page(bvec->bv_page, bvec->bv_offset, copy); | |
674 | skip = copy; | |
675 | bytes -= copy; | |
676 | } | |
677 | if (skip == bvec->bv_len) { | |
678 | bvec++; | |
679 | skip = 0; | |
680 | } | |
681 | i->count -= wanted - bytes; | |
682 | i->nr_segs -= bvec - i->bvec; | |
683 | i->bvec = bvec; | |
684 | i->iov_offset = skip; | |
685 | return wanted - bytes; | |
686 | } | |
687 | ||
62a8067a AV |
688 | static void advance_bvec(struct iov_iter *i, size_t bytes) |
689 | { | |
690 | BUG_ON(i->count < bytes); | |
691 | ||
692 | if (likely(i->nr_segs == 1)) { | |
693 | i->iov_offset += bytes; | |
694 | i->count -= bytes; | |
695 | } else { | |
696 | const struct bio_vec *bvec = i->bvec; | |
697 | size_t base = i->iov_offset; | |
698 | unsigned long nr_segs = i->nr_segs; | |
699 | ||
700 | /* | |
701 | * The !iov->iov_len check ensures we skip over unlikely | |
702 | * zero-length segments (without overruning the iovec). | |
703 | */ | |
704 | while (bytes || unlikely(i->count && !bvec->bv_len)) { | |
705 | int copy; | |
706 | ||
707 | copy = min(bytes, bvec->bv_len - base); | |
708 | BUG_ON(!i->count || i->count < copy); | |
709 | i->count -= copy; | |
710 | bytes -= copy; | |
711 | base += copy; | |
712 | if (bvec->bv_len == base) { | |
713 | bvec++; | |
714 | nr_segs--; | |
715 | base = 0; | |
716 | } | |
717 | } | |
718 | i->bvec = bvec; | |
719 | i->iov_offset = base; | |
720 | i->nr_segs = nr_segs; | |
721 | } | |
722 | } | |
723 | ||
62a8067a | 724 | static ssize_t get_pages_bvec(struct iov_iter *i, |
2c80929c | 725 | struct page **pages, size_t maxsize, unsigned maxpages, |
62a8067a AV |
726 | size_t *start) |
727 | { | |
728 | const struct bio_vec *bvec = i->bvec; | |
729 | size_t len = bvec->bv_len - i->iov_offset; | |
730 | if (len > i->count) | |
731 | len = i->count; | |
2c80929c MS |
732 | if (len > maxsize) |
733 | len = maxsize; | |
c7f3888a | 734 | /* can't be more than PAGE_SIZE */ |
62a8067a AV |
735 | *start = bvec->bv_offset + i->iov_offset; |
736 | ||
737 | get_page(*pages = bvec->bv_page); | |
738 | ||
739 | return len; | |
740 | } | |
741 | ||
742 | static ssize_t get_pages_alloc_bvec(struct iov_iter *i, | |
743 | struct page ***pages, size_t maxsize, | |
744 | size_t *start) | |
745 | { | |
746 | const struct bio_vec *bvec = i->bvec; | |
747 | size_t len = bvec->bv_len - i->iov_offset; | |
748 | if (len > i->count) | |
749 | len = i->count; | |
750 | if (len > maxsize) | |
751 | len = maxsize; | |
752 | *start = bvec->bv_offset + i->iov_offset; | |
753 | ||
754 | *pages = kmalloc(sizeof(struct page *), GFP_KERNEL); | |
755 | if (!*pages) | |
756 | return -ENOMEM; | |
757 | ||
758 | get_page(**pages = bvec->bv_page); | |
759 | ||
760 | return len; | |
761 | } | |
762 | ||
763 | static int iov_iter_npages_bvec(const struct iov_iter *i, int maxpages) | |
764 | { | |
765 | size_t offset = i->iov_offset; | |
766 | size_t size = i->count; | |
767 | const struct bio_vec *bvec = i->bvec; | |
768 | int npages = 0; | |
769 | int n; | |
770 | ||
771 | for (n = 0; size && n < i->nr_segs; n++, bvec++) { | |
772 | size_t len = bvec->bv_len - offset; | |
773 | offset = 0; | |
774 | if (unlikely(!len)) /* empty segment */ | |
775 | continue; | |
776 | if (len > size) | |
777 | len = size; | |
778 | npages++; | |
779 | if (npages >= maxpages) /* don't bother going further */ | |
780 | return maxpages; | |
781 | size -= len; | |
782 | offset = 0; | |
783 | } | |
784 | return min(npages, maxpages); | |
785 | } | |
786 | ||
787 | size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, | |
788 | struct iov_iter *i) | |
789 | { | |
790 | if (i->type & ITER_BVEC) | |
791 | return copy_page_to_iter_bvec(page, offset, bytes, i); | |
792 | else | |
793 | return copy_page_to_iter_iovec(page, offset, bytes, i); | |
794 | } | |
795 | EXPORT_SYMBOL(copy_page_to_iter); | |
796 | ||
797 | size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, | |
798 | struct iov_iter *i) | |
799 | { | |
800 | if (i->type & ITER_BVEC) | |
801 | return copy_page_from_iter_bvec(page, offset, bytes, i); | |
802 | else | |
803 | return copy_page_from_iter_iovec(page, offset, bytes, i); | |
804 | } | |
805 | EXPORT_SYMBOL(copy_page_from_iter); | |
806 | ||
c35e0248 MW |
807 | size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i) |
808 | { | |
809 | if (i->type & ITER_BVEC) | |
810 | return copy_to_iter_bvec(addr, bytes, i); | |
811 | else | |
812 | return copy_to_iter_iovec(addr, bytes, i); | |
813 | } | |
814 | EXPORT_SYMBOL(copy_to_iter); | |
815 | ||
816 | size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) | |
817 | { | |
818 | if (i->type & ITER_BVEC) | |
819 | return copy_from_iter_bvec(addr, bytes, i); | |
820 | else | |
821 | return copy_from_iter_iovec(addr, bytes, i); | |
822 | } | |
823 | EXPORT_SYMBOL(copy_from_iter); | |
824 | ||
825 | size_t iov_iter_zero(size_t bytes, struct iov_iter *i) | |
826 | { | |
827 | if (i->type & ITER_BVEC) { | |
828 | return zero_bvec(bytes, i); | |
829 | } else { | |
830 | return zero_iovec(bytes, i); | |
831 | } | |
832 | } | |
833 | EXPORT_SYMBOL(iov_iter_zero); | |
834 | ||
62a8067a AV |
835 | size_t iov_iter_copy_from_user_atomic(struct page *page, |
836 | struct iov_iter *i, unsigned long offset, size_t bytes) | |
837 | { | |
04a31165 AV |
838 | char *kaddr = kmap_atomic(page), *p = kaddr + offset; |
839 | iterate_all_kinds(i, bytes, v, | |
840 | __copy_from_user_inatomic((p += v.iov_len) - v.iov_len, | |
841 | v.iov_base, v.iov_len), | |
842 | memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page, | |
843 | v.bv_offset, v.bv_len) | |
844 | ) | |
845 | kunmap_atomic(kaddr); | |
846 | return bytes; | |
62a8067a AV |
847 | } |
848 | EXPORT_SYMBOL(iov_iter_copy_from_user_atomic); | |
849 | ||
850 | void iov_iter_advance(struct iov_iter *i, size_t size) | |
851 | { | |
852 | if (i->type & ITER_BVEC) | |
853 | advance_bvec(i, size); | |
854 | else | |
855 | advance_iovec(i, size); | |
856 | } | |
857 | EXPORT_SYMBOL(iov_iter_advance); | |
858 | ||
859 | /* | |
860 | * Return the count of just the current iov_iter segment. | |
861 | */ | |
862 | size_t iov_iter_single_seg_count(const struct iov_iter *i) | |
863 | { | |
864 | if (i->nr_segs == 1) | |
865 | return i->count; | |
866 | else if (i->type & ITER_BVEC) | |
62a8067a | 867 | return min(i->count, i->bvec->bv_len - i->iov_offset); |
ad0eab92 PM |
868 | else |
869 | return min(i->count, i->iov->iov_len - i->iov_offset); | |
62a8067a AV |
870 | } |
871 | EXPORT_SYMBOL(iov_iter_single_seg_count); | |
872 | ||
873 | unsigned long iov_iter_alignment(const struct iov_iter *i) | |
874 | { | |
04a31165 AV |
875 | unsigned long res = 0; |
876 | size_t size = i->count; | |
877 | ||
878 | if (!size) | |
879 | return 0; | |
880 | ||
881 | iterate_all_kinds(i, size, v, | |
882 | (res |= (unsigned long)v.iov_base | v.iov_len, 0), | |
883 | res |= v.bv_offset | v.bv_len | |
884 | ) | |
885 | return res; | |
62a8067a AV |
886 | } |
887 | EXPORT_SYMBOL(iov_iter_alignment); | |
888 | ||
889 | ssize_t iov_iter_get_pages(struct iov_iter *i, | |
2c80929c | 890 | struct page **pages, size_t maxsize, unsigned maxpages, |
62a8067a AV |
891 | size_t *start) |
892 | { | |
893 | if (i->type & ITER_BVEC) | |
2c80929c | 894 | return get_pages_bvec(i, pages, maxsize, maxpages, start); |
62a8067a | 895 | else |
2c80929c | 896 | return get_pages_iovec(i, pages, maxsize, maxpages, start); |
62a8067a AV |
897 | } |
898 | EXPORT_SYMBOL(iov_iter_get_pages); | |
899 | ||
900 | ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, | |
901 | struct page ***pages, size_t maxsize, | |
902 | size_t *start) | |
903 | { | |
904 | if (i->type & ITER_BVEC) | |
905 | return get_pages_alloc_bvec(i, pages, maxsize, start); | |
906 | else | |
907 | return get_pages_alloc_iovec(i, pages, maxsize, start); | |
908 | } | |
909 | EXPORT_SYMBOL(iov_iter_get_pages_alloc); | |
910 | ||
911 | int iov_iter_npages(const struct iov_iter *i, int maxpages) | |
912 | { | |
913 | if (i->type & ITER_BVEC) | |
914 | return iov_iter_npages_bvec(i, maxpages); | |
915 | else | |
916 | return iov_iter_npages_iovec(i, maxpages); | |
917 | } | |
f67da30c | 918 | EXPORT_SYMBOL(iov_iter_npages); |