]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - mm/iov_iter.c
Merge branches 'for-3.16/i2c-hid', 'for-3.16/rmi4', 'for-3.16/sony' and 'for-3.16...
[mirror_ubuntu-bionic-kernel.git] / mm / iov_iter.c
1 #include <linux/export.h>
2 #include <linux/uio.h>
3 #include <linux/pagemap.h>
4
5 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
6 struct iov_iter *i)
7 {
8 size_t skip, copy, left, wanted;
9 const struct iovec *iov;
10 char __user *buf;
11 void *kaddr, *from;
12
13 if (unlikely(bytes > i->count))
14 bytes = i->count;
15
16 if (unlikely(!bytes))
17 return 0;
18
19 wanted = bytes;
20 iov = i->iov;
21 skip = i->iov_offset;
22 buf = iov->iov_base + skip;
23 copy = min(bytes, iov->iov_len - skip);
24
25 if (!fault_in_pages_writeable(buf, copy)) {
26 kaddr = kmap_atomic(page);
27 from = kaddr + offset;
28
29 /* first chunk, usually the only one */
30 left = __copy_to_user_inatomic(buf, from, copy);
31 copy -= left;
32 skip += copy;
33 from += copy;
34 bytes -= copy;
35
36 while (unlikely(!left && bytes)) {
37 iov++;
38 buf = iov->iov_base;
39 copy = min(bytes, iov->iov_len);
40 left = __copy_to_user_inatomic(buf, from, copy);
41 copy -= left;
42 skip = copy;
43 from += copy;
44 bytes -= copy;
45 }
46 if (likely(!bytes)) {
47 kunmap_atomic(kaddr);
48 goto done;
49 }
50 offset = from - kaddr;
51 buf += copy;
52 kunmap_atomic(kaddr);
53 copy = min(bytes, iov->iov_len - skip);
54 }
55 /* Too bad - revert to non-atomic kmap */
56 kaddr = kmap(page);
57 from = kaddr + offset;
58 left = __copy_to_user(buf, from, copy);
59 copy -= left;
60 skip += copy;
61 from += copy;
62 bytes -= copy;
63 while (unlikely(!left && bytes)) {
64 iov++;
65 buf = iov->iov_base;
66 copy = min(bytes, iov->iov_len);
67 left = __copy_to_user(buf, from, copy);
68 copy -= left;
69 skip = copy;
70 from += copy;
71 bytes -= copy;
72 }
73 kunmap(page);
74 done:
75 i->count -= wanted - bytes;
76 i->nr_segs -= iov - i->iov;
77 i->iov = iov;
78 i->iov_offset = skip;
79 return wanted - bytes;
80 }
81 EXPORT_SYMBOL(copy_page_to_iter);
82
83 static size_t __iovec_copy_from_user_inatomic(char *vaddr,
84 const struct iovec *iov, size_t base, size_t bytes)
85 {
86 size_t copied = 0, left = 0;
87
88 while (bytes) {
89 char __user *buf = iov->iov_base + base;
90 int copy = min(bytes, iov->iov_len - base);
91
92 base = 0;
93 left = __copy_from_user_inatomic(vaddr, buf, copy);
94 copied += copy;
95 bytes -= copy;
96 vaddr += copy;
97 iov++;
98
99 if (unlikely(left))
100 break;
101 }
102 return copied - left;
103 }
104
105 /*
106 * Copy as much as we can into the page and return the number of bytes which
107 * were successfully copied. If a fault is encountered then return the number of
108 * bytes which were copied.
109 */
110 size_t iov_iter_copy_from_user_atomic(struct page *page,
111 struct iov_iter *i, unsigned long offset, size_t bytes)
112 {
113 char *kaddr;
114 size_t copied;
115
116 kaddr = kmap_atomic(page);
117 if (likely(i->nr_segs == 1)) {
118 int left;
119 char __user *buf = i->iov->iov_base + i->iov_offset;
120 left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
121 copied = bytes - left;
122 } else {
123 copied = __iovec_copy_from_user_inatomic(kaddr + offset,
124 i->iov, i->iov_offset, bytes);
125 }
126 kunmap_atomic(kaddr);
127
128 return copied;
129 }
130 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
131
132 /*
133 * This has the same sideeffects and return value as
134 * iov_iter_copy_from_user_atomic().
135 * The difference is that it attempts to resolve faults.
136 * Page must not be locked.
137 */
138 size_t iov_iter_copy_from_user(struct page *page,
139 struct iov_iter *i, unsigned long offset, size_t bytes)
140 {
141 char *kaddr;
142 size_t copied;
143
144 kaddr = kmap(page);
145 if (likely(i->nr_segs == 1)) {
146 int left;
147 char __user *buf = i->iov->iov_base + i->iov_offset;
148 left = __copy_from_user(kaddr + offset, buf, bytes);
149 copied = bytes - left;
150 } else {
151 copied = __iovec_copy_from_user_inatomic(kaddr + offset,
152 i->iov, i->iov_offset, bytes);
153 }
154 kunmap(page);
155 return copied;
156 }
157 EXPORT_SYMBOL(iov_iter_copy_from_user);
158
159 void iov_iter_advance(struct iov_iter *i, size_t bytes)
160 {
161 BUG_ON(i->count < bytes);
162
163 if (likely(i->nr_segs == 1)) {
164 i->iov_offset += bytes;
165 i->count -= bytes;
166 } else {
167 const struct iovec *iov = i->iov;
168 size_t base = i->iov_offset;
169 unsigned long nr_segs = i->nr_segs;
170
171 /*
172 * The !iov->iov_len check ensures we skip over unlikely
173 * zero-length segments (without overruning the iovec).
174 */
175 while (bytes || unlikely(i->count && !iov->iov_len)) {
176 int copy;
177
178 copy = min(bytes, iov->iov_len - base);
179 BUG_ON(!i->count || i->count < copy);
180 i->count -= copy;
181 bytes -= copy;
182 base += copy;
183 if (iov->iov_len == base) {
184 iov++;
185 nr_segs--;
186 base = 0;
187 }
188 }
189 i->iov = iov;
190 i->iov_offset = base;
191 i->nr_segs = nr_segs;
192 }
193 }
194 EXPORT_SYMBOL(iov_iter_advance);
195
196 /*
197 * Fault in the first iovec of the given iov_iter, to a maximum length
198 * of bytes. Returns 0 on success, or non-zero if the memory could not be
199 * accessed (ie. because it is an invalid address).
200 *
201 * writev-intensive code may want this to prefault several iovecs -- that
202 * would be possible (callers must not rely on the fact that _only_ the
203 * first iovec will be faulted with the current implementation).
204 */
205 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
206 {
207 char __user *buf = i->iov->iov_base + i->iov_offset;
208 bytes = min(bytes, i->iov->iov_len - i->iov_offset);
209 return fault_in_pages_readable(buf, bytes);
210 }
211 EXPORT_SYMBOL(iov_iter_fault_in_readable);
212
213 /*
214 * Return the count of just the current iov_iter segment.
215 */
216 size_t iov_iter_single_seg_count(const struct iov_iter *i)
217 {
218 const struct iovec *iov = i->iov;
219 if (i->nr_segs == 1)
220 return i->count;
221 else
222 return min(i->count, iov->iov_len - i->iov_offset);
223 }
224 EXPORT_SYMBOL(iov_iter_single_seg_count);