]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/sunrpc/xdr.c
sunrpc: correct the computation for page_ptr when truncating
[mirror_ubuntu-bionic-kernel.git] / net / sunrpc / xdr.c
1 /*
2 * linux/net/sunrpc/xdr.c
3 *
4 * Generic XDR support.
5 *
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
7 */
8
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/types.h>
12 #include <linux/string.h>
13 #include <linux/kernel.h>
14 #include <linux/pagemap.h>
15 #include <linux/errno.h>
16 #include <linux/sunrpc/xdr.h>
17 #include <linux/sunrpc/msg_prot.h>
18
19 /*
20 * XDR functions for basic NFS types
21 */
22 __be32 *
23 xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
24 {
25 unsigned int quadlen = XDR_QUADLEN(obj->len);
26
27 p[quadlen] = 0; /* zero trailing bytes */
28 *p++ = cpu_to_be32(obj->len);
29 memcpy(p, obj->data, obj->len);
30 return p + XDR_QUADLEN(obj->len);
31 }
32 EXPORT_SYMBOL_GPL(xdr_encode_netobj);
33
34 __be32 *
35 xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
36 {
37 unsigned int len;
38
39 if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ)
40 return NULL;
41 obj->len = len;
42 obj->data = (u8 *) p;
43 return p + XDR_QUADLEN(len);
44 }
45 EXPORT_SYMBOL_GPL(xdr_decode_netobj);
46
47 /**
48 * xdr_encode_opaque_fixed - Encode fixed length opaque data
49 * @p: pointer to current position in XDR buffer.
50 * @ptr: pointer to data to encode (or NULL)
51 * @nbytes: size of data.
52 *
53 * Copy the array of data of length nbytes at ptr to the XDR buffer
54 * at position p, then align to the next 32-bit boundary by padding
55 * with zero bytes (see RFC1832).
56 * Note: if ptr is NULL, only the padding is performed.
57 *
58 * Returns the updated current XDR buffer position
59 *
60 */
61 __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
62 {
63 if (likely(nbytes != 0)) {
64 unsigned int quadlen = XDR_QUADLEN(nbytes);
65 unsigned int padding = (quadlen << 2) - nbytes;
66
67 if (ptr != NULL)
68 memcpy(p, ptr, nbytes);
69 if (padding != 0)
70 memset((char *)p + nbytes, 0, padding);
71 p += quadlen;
72 }
73 return p;
74 }
75 EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
76
77 /**
78 * xdr_encode_opaque - Encode variable length opaque data
79 * @p: pointer to current position in XDR buffer.
80 * @ptr: pointer to data to encode (or NULL)
81 * @nbytes: size of data.
82 *
83 * Returns the updated current XDR buffer position
84 */
85 __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
86 {
87 *p++ = cpu_to_be32(nbytes);
88 return xdr_encode_opaque_fixed(p, ptr, nbytes);
89 }
90 EXPORT_SYMBOL_GPL(xdr_encode_opaque);
91
92 __be32 *
93 xdr_encode_string(__be32 *p, const char *string)
94 {
95 return xdr_encode_array(p, string, strlen(string));
96 }
97 EXPORT_SYMBOL_GPL(xdr_encode_string);
98
99 __be32 *
100 xdr_decode_string_inplace(__be32 *p, char **sp,
101 unsigned int *lenp, unsigned int maxlen)
102 {
103 u32 len;
104
105 len = be32_to_cpu(*p++);
106 if (len > maxlen)
107 return NULL;
108 *lenp = len;
109 *sp = (char *) p;
110 return p + XDR_QUADLEN(len);
111 }
112 EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
113
114 /**
115 * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf
116 * @buf: XDR buffer where string resides
117 * @len: length of string, in bytes
118 *
119 */
120 void
121 xdr_terminate_string(struct xdr_buf *buf, const u32 len)
122 {
123 char *kaddr;
124
125 kaddr = kmap_atomic(buf->pages[0]);
126 kaddr[buf->page_base + len] = '\0';
127 kunmap_atomic(kaddr);
128 }
129 EXPORT_SYMBOL_GPL(xdr_terminate_string);
130
131 void
132 xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
133 struct page **pages, unsigned int base, unsigned int len)
134 {
135 struct kvec *head = xdr->head;
136 struct kvec *tail = xdr->tail;
137 char *buf = (char *)head->iov_base;
138 unsigned int buflen = head->iov_len;
139
140 head->iov_len = offset;
141
142 xdr->pages = pages;
143 xdr->page_base = base;
144 xdr->page_len = len;
145
146 tail->iov_base = buf + offset;
147 tail->iov_len = buflen - offset;
148
149 xdr->buflen += len;
150 }
151 EXPORT_SYMBOL_GPL(xdr_inline_pages);
152
153 /*
154 * Helper routines for doing 'memmove' like operations on a struct xdr_buf
155 */
156
157 /**
158 * _shift_data_right_pages
159 * @pages: vector of pages containing both the source and dest memory area.
160 * @pgto_base: page vector address of destination
161 * @pgfrom_base: page vector address of source
162 * @len: number of bytes to copy
163 *
164 * Note: the addresses pgto_base and pgfrom_base are both calculated in
165 * the same way:
166 * if a memory area starts at byte 'base' in page 'pages[i]',
167 * then its address is given as (i << PAGE_SHIFT) + base
168 * Also note: pgfrom_base must be < pgto_base, but the memory areas
169 * they point to may overlap.
170 */
171 static void
172 _shift_data_right_pages(struct page **pages, size_t pgto_base,
173 size_t pgfrom_base, size_t len)
174 {
175 struct page **pgfrom, **pgto;
176 char *vfrom, *vto;
177 size_t copy;
178
179 BUG_ON(pgto_base <= pgfrom_base);
180
181 pgto_base += len;
182 pgfrom_base += len;
183
184 pgto = pages + (pgto_base >> PAGE_SHIFT);
185 pgfrom = pages + (pgfrom_base >> PAGE_SHIFT);
186
187 pgto_base &= ~PAGE_MASK;
188 pgfrom_base &= ~PAGE_MASK;
189
190 do {
191 /* Are any pointers crossing a page boundary? */
192 if (pgto_base == 0) {
193 pgto_base = PAGE_SIZE;
194 pgto--;
195 }
196 if (pgfrom_base == 0) {
197 pgfrom_base = PAGE_SIZE;
198 pgfrom--;
199 }
200
201 copy = len;
202 if (copy > pgto_base)
203 copy = pgto_base;
204 if (copy > pgfrom_base)
205 copy = pgfrom_base;
206 pgto_base -= copy;
207 pgfrom_base -= copy;
208
209 vto = kmap_atomic(*pgto);
210 if (*pgto != *pgfrom) {
211 vfrom = kmap_atomic(*pgfrom);
212 memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
213 kunmap_atomic(vfrom);
214 } else
215 memmove(vto + pgto_base, vto + pgfrom_base, copy);
216 flush_dcache_page(*pgto);
217 kunmap_atomic(vto);
218
219 } while ((len -= copy) != 0);
220 }
221
222 /**
223 * _copy_to_pages
224 * @pages: array of pages
225 * @pgbase: page vector address of destination
226 * @p: pointer to source data
227 * @len: length
228 *
229 * Copies data from an arbitrary memory location into an array of pages
230 * The copy is assumed to be non-overlapping.
231 */
232 static void
233 _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
234 {
235 struct page **pgto;
236 char *vto;
237 size_t copy;
238
239 pgto = pages + (pgbase >> PAGE_SHIFT);
240 pgbase &= ~PAGE_MASK;
241
242 for (;;) {
243 copy = PAGE_SIZE - pgbase;
244 if (copy > len)
245 copy = len;
246
247 vto = kmap_atomic(*pgto);
248 memcpy(vto + pgbase, p, copy);
249 kunmap_atomic(vto);
250
251 len -= copy;
252 if (len == 0)
253 break;
254
255 pgbase += copy;
256 if (pgbase == PAGE_SIZE) {
257 flush_dcache_page(*pgto);
258 pgbase = 0;
259 pgto++;
260 }
261 p += copy;
262 }
263 flush_dcache_page(*pgto);
264 }
265
266 /**
267 * _copy_from_pages
268 * @p: pointer to destination
269 * @pages: array of pages
270 * @pgbase: offset of source data
271 * @len: length
272 *
273 * Copies data into an arbitrary memory location from an array of pages
274 * The copy is assumed to be non-overlapping.
275 */
276 void
277 _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
278 {
279 struct page **pgfrom;
280 char *vfrom;
281 size_t copy;
282
283 pgfrom = pages + (pgbase >> PAGE_SHIFT);
284 pgbase &= ~PAGE_MASK;
285
286 do {
287 copy = PAGE_SIZE - pgbase;
288 if (copy > len)
289 copy = len;
290
291 vfrom = kmap_atomic(*pgfrom);
292 memcpy(p, vfrom + pgbase, copy);
293 kunmap_atomic(vfrom);
294
295 pgbase += copy;
296 if (pgbase == PAGE_SIZE) {
297 pgbase = 0;
298 pgfrom++;
299 }
300 p += copy;
301
302 } while ((len -= copy) != 0);
303 }
304 EXPORT_SYMBOL_GPL(_copy_from_pages);
305
306 /**
307 * xdr_shrink_bufhead
308 * @buf: xdr_buf
309 * @len: bytes to remove from buf->head[0]
310 *
311 * Shrinks XDR buffer's header kvec buf->head[0] by
312 * 'len' bytes. The extra data is not lost, but is instead
313 * moved into the inlined pages and/or the tail.
314 */
315 static void
316 xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
317 {
318 struct kvec *head, *tail;
319 size_t copy, offs;
320 unsigned int pglen = buf->page_len;
321
322 tail = buf->tail;
323 head = buf->head;
324
325 WARN_ON_ONCE(len > head->iov_len);
326 if (len > head->iov_len)
327 len = head->iov_len;
328
329 /* Shift the tail first */
330 if (tail->iov_len != 0) {
331 if (tail->iov_len > len) {
332 copy = tail->iov_len - len;
333 memmove((char *)tail->iov_base + len,
334 tail->iov_base, copy);
335 }
336 /* Copy from the inlined pages into the tail */
337 copy = len;
338 if (copy > pglen)
339 copy = pglen;
340 offs = len - copy;
341 if (offs >= tail->iov_len)
342 copy = 0;
343 else if (copy > tail->iov_len - offs)
344 copy = tail->iov_len - offs;
345 if (copy != 0)
346 _copy_from_pages((char *)tail->iov_base + offs,
347 buf->pages,
348 buf->page_base + pglen + offs - len,
349 copy);
350 /* Do we also need to copy data from the head into the tail ? */
351 if (len > pglen) {
352 offs = copy = len - pglen;
353 if (copy > tail->iov_len)
354 copy = tail->iov_len;
355 memcpy(tail->iov_base,
356 (char *)head->iov_base +
357 head->iov_len - offs,
358 copy);
359 }
360 }
361 /* Now handle pages */
362 if (pglen != 0) {
363 if (pglen > len)
364 _shift_data_right_pages(buf->pages,
365 buf->page_base + len,
366 buf->page_base,
367 pglen - len);
368 copy = len;
369 if (len > pglen)
370 copy = pglen;
371 _copy_to_pages(buf->pages, buf->page_base,
372 (char *)head->iov_base + head->iov_len - len,
373 copy);
374 }
375 head->iov_len -= len;
376 buf->buflen -= len;
377 /* Have we truncated the message? */
378 if (buf->len > buf->buflen)
379 buf->len = buf->buflen;
380 }
381
382 /**
383 * xdr_shrink_pagelen
384 * @buf: xdr_buf
385 * @len: bytes to remove from buf->pages
386 *
387 * Shrinks XDR buffer's page array buf->pages by
388 * 'len' bytes. The extra data is not lost, but is instead
389 * moved into the tail.
390 */
391 static void
392 xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
393 {
394 struct kvec *tail;
395 size_t copy;
396 unsigned int pglen = buf->page_len;
397 unsigned int tailbuf_len;
398
399 tail = buf->tail;
400 BUG_ON (len > pglen);
401
402 tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len;
403
404 /* Shift the tail first */
405 if (tailbuf_len != 0) {
406 unsigned int free_space = tailbuf_len - tail->iov_len;
407
408 if (len < free_space)
409 free_space = len;
410 tail->iov_len += free_space;
411
412 copy = len;
413 if (tail->iov_len > len) {
414 char *p = (char *)tail->iov_base + len;
415 memmove(p, tail->iov_base, tail->iov_len - len);
416 } else
417 copy = tail->iov_len;
418 /* Copy from the inlined pages into the tail */
419 _copy_from_pages((char *)tail->iov_base,
420 buf->pages, buf->page_base + pglen - len,
421 copy);
422 }
423 buf->page_len -= len;
424 buf->buflen -= len;
425 /* Have we truncated the message? */
426 if (buf->len > buf->buflen)
427 buf->len = buf->buflen;
428 }
429
430 void
431 xdr_shift_buf(struct xdr_buf *buf, size_t len)
432 {
433 xdr_shrink_bufhead(buf, len);
434 }
435 EXPORT_SYMBOL_GPL(xdr_shift_buf);
436
437 /**
438 * xdr_stream_pos - Return the current offset from the start of the xdr_stream
439 * @xdr: pointer to struct xdr_stream
440 */
441 unsigned int xdr_stream_pos(const struct xdr_stream *xdr)
442 {
443 return (unsigned int)(XDR_QUADLEN(xdr->buf->len) - xdr->nwords) << 2;
444 }
445 EXPORT_SYMBOL_GPL(xdr_stream_pos);
446
447 /**
448 * xdr_init_encode - Initialize a struct xdr_stream for sending data.
449 * @xdr: pointer to xdr_stream struct
450 * @buf: pointer to XDR buffer in which to encode data
451 * @p: current pointer inside XDR buffer
452 *
453 * Note: at the moment the RPC client only passes the length of our
454 * scratch buffer in the xdr_buf's header kvec. Previously this
455 * meant we needed to call xdr_adjust_iovec() after encoding the
456 * data. With the new scheme, the xdr_stream manages the details
457 * of the buffer length, and takes care of adjusting the kvec
458 * length for us.
459 */
460 void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
461 {
462 struct kvec *iov = buf->head;
463 int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
464
465 xdr_set_scratch_buffer(xdr, NULL, 0);
466 BUG_ON(scratch_len < 0);
467 xdr->buf = buf;
468 xdr->iov = iov;
469 xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
470 xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
471 BUG_ON(iov->iov_len > scratch_len);
472
473 if (p != xdr->p && p != NULL) {
474 size_t len;
475
476 BUG_ON(p < xdr->p || p > xdr->end);
477 len = (char *)p - (char *)xdr->p;
478 xdr->p = p;
479 buf->len += len;
480 iov->iov_len += len;
481 }
482 }
483 EXPORT_SYMBOL_GPL(xdr_init_encode);
484
485 /**
486 * xdr_commit_encode - Ensure all data is written to buffer
487 * @xdr: pointer to xdr_stream
488 *
489 * We handle encoding across page boundaries by giving the caller a
490 * temporary location to write to, then later copying the data into
491 * place; xdr_commit_encode does that copying.
492 *
493 * Normally the caller doesn't need to call this directly, as the
494 * following xdr_reserve_space will do it. But an explicit call may be
495 * required at the end of encoding, or any other time when the xdr_buf
496 * data might be read.
497 */
498 void xdr_commit_encode(struct xdr_stream *xdr)
499 {
500 int shift = xdr->scratch.iov_len;
501 void *page;
502
503 if (shift == 0)
504 return;
505 page = page_address(*xdr->page_ptr);
506 memcpy(xdr->scratch.iov_base, page, shift);
507 memmove(page, page + shift, (void *)xdr->p - page);
508 xdr->scratch.iov_len = 0;
509 }
510 EXPORT_SYMBOL_GPL(xdr_commit_encode);
511
512 static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
513 size_t nbytes)
514 {
515 static __be32 *p;
516 int space_left;
517 int frag1bytes, frag2bytes;
518
519 if (nbytes > PAGE_SIZE)
520 return NULL; /* Bigger buffers require special handling */
521 if (xdr->buf->len + nbytes > xdr->buf->buflen)
522 return NULL; /* Sorry, we're totally out of space */
523 frag1bytes = (xdr->end - xdr->p) << 2;
524 frag2bytes = nbytes - frag1bytes;
525 if (xdr->iov)
526 xdr->iov->iov_len += frag1bytes;
527 else
528 xdr->buf->page_len += frag1bytes;
529 xdr->page_ptr++;
530 xdr->iov = NULL;
531 /*
532 * If the last encode didn't end exactly on a page boundary, the
533 * next one will straddle boundaries. Encode into the next
534 * page, then copy it back later in xdr_commit_encode. We use
535 * the "scratch" iov to track any temporarily unused fragment of
536 * space at the end of the previous buffer:
537 */
538 xdr->scratch.iov_base = xdr->p;
539 xdr->scratch.iov_len = frag1bytes;
540 p = page_address(*xdr->page_ptr);
541 /*
542 * Note this is where the next encode will start after we've
543 * shifted this one back:
544 */
545 xdr->p = (void *)p + frag2bytes;
546 space_left = xdr->buf->buflen - xdr->buf->len;
547 xdr->end = (void *)p + min_t(int, space_left, PAGE_SIZE);
548 xdr->buf->page_len += frag2bytes;
549 xdr->buf->len += nbytes;
550 return p;
551 }
552
553 /**
554 * xdr_reserve_space - Reserve buffer space for sending
555 * @xdr: pointer to xdr_stream
556 * @nbytes: number of bytes to reserve
557 *
558 * Checks that we have enough buffer space to encode 'nbytes' more
559 * bytes of data. If so, update the total xdr_buf length, and
560 * adjust the length of the current kvec.
561 */
562 __be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
563 {
564 __be32 *p = xdr->p;
565 __be32 *q;
566
567 xdr_commit_encode(xdr);
568 /* align nbytes on the next 32-bit boundary */
569 nbytes += 3;
570 nbytes &= ~3;
571 q = p + (nbytes >> 2);
572 if (unlikely(q > xdr->end || q < p))
573 return xdr_get_next_encode_buffer(xdr, nbytes);
574 xdr->p = q;
575 if (xdr->iov)
576 xdr->iov->iov_len += nbytes;
577 else
578 xdr->buf->page_len += nbytes;
579 xdr->buf->len += nbytes;
580 return p;
581 }
582 EXPORT_SYMBOL_GPL(xdr_reserve_space);
583
584 /**
585 * xdr_truncate_encode - truncate an encode buffer
586 * @xdr: pointer to xdr_stream
587 * @len: new length of buffer
588 *
589 * Truncates the xdr stream, so that xdr->buf->len == len,
590 * and xdr->p points at offset len from the start of the buffer, and
591 * head, tail, and page lengths are adjusted to correspond.
592 *
593 * If this means moving xdr->p to a different buffer, we assume that
594 * that the end pointer should be set to the end of the current page,
595 * except in the case of the head buffer when we assume the head
596 * buffer's current length represents the end of the available buffer.
597 *
598 * This is *not* safe to use on a buffer that already has inlined page
599 * cache pages (as in a zero-copy server read reply), except for the
600 * simple case of truncating from one position in the tail to another.
601 *
602 */
603 void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
604 {
605 struct xdr_buf *buf = xdr->buf;
606 struct kvec *head = buf->head;
607 struct kvec *tail = buf->tail;
608 int fraglen;
609 int new;
610
611 if (len > buf->len) {
612 WARN_ON_ONCE(1);
613 return;
614 }
615 xdr_commit_encode(xdr);
616
617 fraglen = min_t(int, buf->len - len, tail->iov_len);
618 tail->iov_len -= fraglen;
619 buf->len -= fraglen;
620 if (tail->iov_len) {
621 xdr->p = tail->iov_base + tail->iov_len;
622 WARN_ON_ONCE(!xdr->end);
623 WARN_ON_ONCE(!xdr->iov);
624 return;
625 }
626 WARN_ON_ONCE(fraglen);
627 fraglen = min_t(int, buf->len - len, buf->page_len);
628 buf->page_len -= fraglen;
629 buf->len -= fraglen;
630
631 new = buf->page_base + buf->page_len;
632
633 xdr->page_ptr = buf->pages + (new >> PAGE_SHIFT);
634
635 if (buf->page_len) {
636 xdr->p = page_address(*xdr->page_ptr);
637 xdr->end = (void *)xdr->p + PAGE_SIZE;
638 xdr->p = (void *)xdr->p + (new % PAGE_SIZE);
639 WARN_ON_ONCE(xdr->iov);
640 return;
641 }
642 if (fraglen)
643 xdr->end = head->iov_base + head->iov_len;
644 /* (otherwise assume xdr->end is already set) */
645 xdr->page_ptr--;
646 head->iov_len = len;
647 buf->len = len;
648 xdr->p = head->iov_base + head->iov_len;
649 xdr->iov = buf->head;
650 }
651 EXPORT_SYMBOL(xdr_truncate_encode);
652
653 /**
654 * xdr_restrict_buflen - decrease available buffer space
655 * @xdr: pointer to xdr_stream
656 * @newbuflen: new maximum number of bytes available
657 *
658 * Adjust our idea of how much space is available in the buffer.
659 * If we've already used too much space in the buffer, returns -1.
660 * If the available space is already smaller than newbuflen, returns 0
661 * and does nothing. Otherwise, adjusts xdr->buf->buflen to newbuflen
662 * and ensures xdr->end is set at most offset newbuflen from the start
663 * of the buffer.
664 */
665 int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen)
666 {
667 struct xdr_buf *buf = xdr->buf;
668 int left_in_this_buf = (void *)xdr->end - (void *)xdr->p;
669 int end_offset = buf->len + left_in_this_buf;
670
671 if (newbuflen < 0 || newbuflen < buf->len)
672 return -1;
673 if (newbuflen > buf->buflen)
674 return 0;
675 if (newbuflen < end_offset)
676 xdr->end = (void *)xdr->end + newbuflen - end_offset;
677 buf->buflen = newbuflen;
678 return 0;
679 }
680 EXPORT_SYMBOL(xdr_restrict_buflen);
681
682 /**
683 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
684 * @xdr: pointer to xdr_stream
685 * @pages: list of pages
686 * @base: offset of first byte
687 * @len: length of data in bytes
688 *
689 */
690 void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
691 unsigned int len)
692 {
693 struct xdr_buf *buf = xdr->buf;
694 struct kvec *iov = buf->tail;
695 buf->pages = pages;
696 buf->page_base = base;
697 buf->page_len = len;
698
699 iov->iov_base = (char *)xdr->p;
700 iov->iov_len = 0;
701 xdr->iov = iov;
702
703 if (len & 3) {
704 unsigned int pad = 4 - (len & 3);
705
706 BUG_ON(xdr->p >= xdr->end);
707 iov->iov_base = (char *)xdr->p + (len & 3);
708 iov->iov_len += pad;
709 len += pad;
710 *xdr->p++ = 0;
711 }
712 buf->buflen += len;
713 buf->len += len;
714 }
715 EXPORT_SYMBOL_GPL(xdr_write_pages);
716
717 static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
718 unsigned int len)
719 {
720 if (len > iov->iov_len)
721 len = iov->iov_len;
722 xdr->p = (__be32*)iov->iov_base;
723 xdr->end = (__be32*)(iov->iov_base + len);
724 xdr->iov = iov;
725 xdr->page_ptr = NULL;
726 }
727
728 static int xdr_set_page_base(struct xdr_stream *xdr,
729 unsigned int base, unsigned int len)
730 {
731 unsigned int pgnr;
732 unsigned int maxlen;
733 unsigned int pgoff;
734 unsigned int pgend;
735 void *kaddr;
736
737 maxlen = xdr->buf->page_len;
738 if (base >= maxlen)
739 return -EINVAL;
740 maxlen -= base;
741 if (len > maxlen)
742 len = maxlen;
743
744 base += xdr->buf->page_base;
745
746 pgnr = base >> PAGE_SHIFT;
747 xdr->page_ptr = &xdr->buf->pages[pgnr];
748 kaddr = page_address(*xdr->page_ptr);
749
750 pgoff = base & ~PAGE_MASK;
751 xdr->p = (__be32*)(kaddr + pgoff);
752
753 pgend = pgoff + len;
754 if (pgend > PAGE_SIZE)
755 pgend = PAGE_SIZE;
756 xdr->end = (__be32*)(kaddr + pgend);
757 xdr->iov = NULL;
758 return 0;
759 }
760
761 static void xdr_set_next_page(struct xdr_stream *xdr)
762 {
763 unsigned int newbase;
764
765 newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
766 newbase -= xdr->buf->page_base;
767
768 if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0)
769 xdr_set_iov(xdr, xdr->buf->tail, xdr->nwords << 2);
770 }
771
772 static bool xdr_set_next_buffer(struct xdr_stream *xdr)
773 {
774 if (xdr->page_ptr != NULL)
775 xdr_set_next_page(xdr);
776 else if (xdr->iov == xdr->buf->head) {
777 if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0)
778 xdr_set_iov(xdr, xdr->buf->tail, xdr->nwords << 2);
779 }
780 return xdr->p != xdr->end;
781 }
782
783 /**
784 * xdr_init_decode - Initialize an xdr_stream for decoding data.
785 * @xdr: pointer to xdr_stream struct
786 * @buf: pointer to XDR buffer from which to decode data
787 * @p: current pointer inside XDR buffer
788 */
789 void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
790 {
791 xdr->buf = buf;
792 xdr->scratch.iov_base = NULL;
793 xdr->scratch.iov_len = 0;
794 xdr->nwords = XDR_QUADLEN(buf->len);
795 if (buf->head[0].iov_len != 0)
796 xdr_set_iov(xdr, buf->head, buf->len);
797 else if (buf->page_len != 0)
798 xdr_set_page_base(xdr, 0, buf->len);
799 else
800 xdr_set_iov(xdr, buf->head, buf->len);
801 if (p != NULL && p > xdr->p && xdr->end >= p) {
802 xdr->nwords -= p - xdr->p;
803 xdr->p = p;
804 }
805 }
806 EXPORT_SYMBOL_GPL(xdr_init_decode);
807
808 /**
809 * xdr_init_decode_pages - Initialize an xdr_stream for decoding into pages
810 * @xdr: pointer to xdr_stream struct
811 * @buf: pointer to XDR buffer from which to decode data
812 * @pages: list of pages to decode into
813 * @len: length in bytes of buffer in pages
814 */
815 void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
816 struct page **pages, unsigned int len)
817 {
818 memset(buf, 0, sizeof(*buf));
819 buf->pages = pages;
820 buf->page_len = len;
821 buf->buflen = len;
822 buf->len = len;
823 xdr_init_decode(xdr, buf, NULL);
824 }
825 EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
826
827 static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
828 {
829 unsigned int nwords = XDR_QUADLEN(nbytes);
830 __be32 *p = xdr->p;
831 __be32 *q = p + nwords;
832
833 if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p))
834 return NULL;
835 xdr->p = q;
836 xdr->nwords -= nwords;
837 return p;
838 }
839
840 /**
841 * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
842 * @xdr: pointer to xdr_stream struct
843 * @buf: pointer to an empty buffer
844 * @buflen: size of 'buf'
845 *
846 * The scratch buffer is used when decoding from an array of pages.
847 * If an xdr_inline_decode() call spans across page boundaries, then
848 * we copy the data into the scratch buffer in order to allow linear
849 * access.
850 */
851 void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
852 {
853 xdr->scratch.iov_base = buf;
854 xdr->scratch.iov_len = buflen;
855 }
856 EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer);
857
858 static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
859 {
860 __be32 *p;
861 char *cpdest = xdr->scratch.iov_base;
862 size_t cplen = (char *)xdr->end - (char *)xdr->p;
863
864 if (nbytes > xdr->scratch.iov_len)
865 return NULL;
866 p = __xdr_inline_decode(xdr, cplen);
867 if (p == NULL)
868 return NULL;
869 memcpy(cpdest, p, cplen);
870 cpdest += cplen;
871 nbytes -= cplen;
872 if (!xdr_set_next_buffer(xdr))
873 return NULL;
874 p = __xdr_inline_decode(xdr, nbytes);
875 if (p == NULL)
876 return NULL;
877 memcpy(cpdest, p, nbytes);
878 return xdr->scratch.iov_base;
879 }
880
881 /**
882 * xdr_inline_decode - Retrieve XDR data to decode
883 * @xdr: pointer to xdr_stream struct
884 * @nbytes: number of bytes of data to decode
885 *
886 * Check if the input buffer is long enough to enable us to decode
887 * 'nbytes' more bytes of data starting at the current position.
888 * If so return the current pointer, then update the current
889 * pointer position.
890 */
891 __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
892 {
893 __be32 *p;
894
895 if (nbytes == 0)
896 return xdr->p;
897 if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
898 return NULL;
899 p = __xdr_inline_decode(xdr, nbytes);
900 if (p != NULL)
901 return p;
902 return xdr_copy_to_scratch(xdr, nbytes);
903 }
904 EXPORT_SYMBOL_GPL(xdr_inline_decode);
905
906 static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len)
907 {
908 struct xdr_buf *buf = xdr->buf;
909 struct kvec *iov;
910 unsigned int nwords = XDR_QUADLEN(len);
911 unsigned int cur = xdr_stream_pos(xdr);
912
913 if (xdr->nwords == 0)
914 return 0;
915 /* Realign pages to current pointer position */
916 iov = buf->head;
917 if (iov->iov_len > cur) {
918 xdr_shrink_bufhead(buf, iov->iov_len - cur);
919 xdr->nwords = XDR_QUADLEN(buf->len - cur);
920 }
921
922 if (nwords > xdr->nwords) {
923 nwords = xdr->nwords;
924 len = nwords << 2;
925 }
926 if (buf->page_len <= len)
927 len = buf->page_len;
928 else if (nwords < xdr->nwords) {
929 /* Truncate page data and move it into the tail */
930 xdr_shrink_pagelen(buf, buf->page_len - len);
931 xdr->nwords = XDR_QUADLEN(buf->len - cur);
932 }
933 return len;
934 }
935
936 /**
937 * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
938 * @xdr: pointer to xdr_stream struct
939 * @len: number of bytes of page data
940 *
941 * Moves data beyond the current pointer position from the XDR head[] buffer
942 * into the page list. Any data that lies beyond current position + "len"
943 * bytes is moved into the XDR tail[].
944 *
945 * Returns the number of XDR encoded bytes now contained in the pages
946 */
947 unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
948 {
949 struct xdr_buf *buf = xdr->buf;
950 struct kvec *iov;
951 unsigned int nwords;
952 unsigned int end;
953 unsigned int padding;
954
955 len = xdr_align_pages(xdr, len);
956 if (len == 0)
957 return 0;
958 nwords = XDR_QUADLEN(len);
959 padding = (nwords << 2) - len;
960 xdr->iov = iov = buf->tail;
961 /* Compute remaining message length. */
962 end = ((xdr->nwords - nwords) << 2) + padding;
963 if (end > iov->iov_len)
964 end = iov->iov_len;
965
966 /*
967 * Position current pointer at beginning of tail, and
968 * set remaining message length.
969 */
970 xdr->p = (__be32 *)((char *)iov->iov_base + padding);
971 xdr->end = (__be32 *)((char *)iov->iov_base + end);
972 xdr->page_ptr = NULL;
973 xdr->nwords = XDR_QUADLEN(end - padding);
974 return len;
975 }
976 EXPORT_SYMBOL_GPL(xdr_read_pages);
977
978 /**
979 * xdr_enter_page - decode data from the XDR page
980 * @xdr: pointer to xdr_stream struct
981 * @len: number of bytes of page data
982 *
983 * Moves data beyond the current pointer position from the XDR head[] buffer
984 * into the page list. Any data that lies beyond current position + "len"
985 * bytes is moved into the XDR tail[]. The current pointer is then
986 * repositioned at the beginning of the first XDR page.
987 */
988 void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
989 {
990 len = xdr_align_pages(xdr, len);
991 /*
992 * Position current pointer at beginning of tail, and
993 * set remaining message length.
994 */
995 if (len != 0)
996 xdr_set_page_base(xdr, 0, len);
997 }
998 EXPORT_SYMBOL_GPL(xdr_enter_page);
999
1000 static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
1001
1002 void
1003 xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
1004 {
1005 buf->head[0] = *iov;
1006 buf->tail[0] = empty_iov;
1007 buf->page_len = 0;
1008 buf->buflen = buf->len = iov->iov_len;
1009 }
1010 EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
1011
1012 /**
1013 * xdr_buf_subsegment - set subbuf to a portion of buf
1014 * @buf: an xdr buffer
1015 * @subbuf: the result buffer
1016 * @base: beginning of range in bytes
1017 * @len: length of range in bytes
1018 *
1019 * sets @subbuf to an xdr buffer representing the portion of @buf of
1020 * length @len starting at offset @base.
1021 *
1022 * @buf and @subbuf may be pointers to the same struct xdr_buf.
1023 *
1024 * Returns -1 if base of length are out of bounds.
1025 */
1026 int
1027 xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
1028 unsigned int base, unsigned int len)
1029 {
1030 subbuf->buflen = subbuf->len = len;
1031 if (base < buf->head[0].iov_len) {
1032 subbuf->head[0].iov_base = buf->head[0].iov_base + base;
1033 subbuf->head[0].iov_len = min_t(unsigned int, len,
1034 buf->head[0].iov_len - base);
1035 len -= subbuf->head[0].iov_len;
1036 base = 0;
1037 } else {
1038 base -= buf->head[0].iov_len;
1039 subbuf->head[0].iov_len = 0;
1040 }
1041
1042 if (base < buf->page_len) {
1043 subbuf->page_len = min(buf->page_len - base, len);
1044 base += buf->page_base;
1045 subbuf->page_base = base & ~PAGE_MASK;
1046 subbuf->pages = &buf->pages[base >> PAGE_SHIFT];
1047 len -= subbuf->page_len;
1048 base = 0;
1049 } else {
1050 base -= buf->page_len;
1051 subbuf->page_len = 0;
1052 }
1053
1054 if (base < buf->tail[0].iov_len) {
1055 subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
1056 subbuf->tail[0].iov_len = min_t(unsigned int, len,
1057 buf->tail[0].iov_len - base);
1058 len -= subbuf->tail[0].iov_len;
1059 base = 0;
1060 } else {
1061 base -= buf->tail[0].iov_len;
1062 subbuf->tail[0].iov_len = 0;
1063 }
1064
1065 if (base || len)
1066 return -1;
1067 return 0;
1068 }
1069 EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
1070
1071 /**
1072 * xdr_buf_trim - lop at most "len" bytes off the end of "buf"
1073 * @buf: buf to be trimmed
1074 * @len: number of bytes to reduce "buf" by
1075 *
1076 * Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note
1077 * that it's possible that we'll trim less than that amount if the xdr_buf is
1078 * too small, or if (for instance) it's all in the head and the parser has
1079 * already read too far into it.
1080 */
1081 void xdr_buf_trim(struct xdr_buf *buf, unsigned int len)
1082 {
1083 size_t cur;
1084 unsigned int trim = len;
1085
1086 if (buf->tail[0].iov_len) {
1087 cur = min_t(size_t, buf->tail[0].iov_len, trim);
1088 buf->tail[0].iov_len -= cur;
1089 trim -= cur;
1090 if (!trim)
1091 goto fix_len;
1092 }
1093
1094 if (buf->page_len) {
1095 cur = min_t(unsigned int, buf->page_len, trim);
1096 buf->page_len -= cur;
1097 trim -= cur;
1098 if (!trim)
1099 goto fix_len;
1100 }
1101
1102 if (buf->head[0].iov_len) {
1103 cur = min_t(size_t, buf->head[0].iov_len, trim);
1104 buf->head[0].iov_len -= cur;
1105 trim -= cur;
1106 }
1107 fix_len:
1108 buf->len -= (len - trim);
1109 }
1110 EXPORT_SYMBOL_GPL(xdr_buf_trim);
1111
1112 static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
1113 {
1114 unsigned int this_len;
1115
1116 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
1117 memcpy(obj, subbuf->head[0].iov_base, this_len);
1118 len -= this_len;
1119 obj += this_len;
1120 this_len = min_t(unsigned int, len, subbuf->page_len);
1121 if (this_len)
1122 _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
1123 len -= this_len;
1124 obj += this_len;
1125 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
1126 memcpy(obj, subbuf->tail[0].iov_base, this_len);
1127 }
1128
1129 /* obj is assumed to point to allocated memory of size at least len: */
1130 int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
1131 {
1132 struct xdr_buf subbuf;
1133 int status;
1134
1135 status = xdr_buf_subsegment(buf, &subbuf, base, len);
1136 if (status != 0)
1137 return status;
1138 __read_bytes_from_xdr_buf(&subbuf, obj, len);
1139 return 0;
1140 }
1141 EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf);
1142
1143 static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
1144 {
1145 unsigned int this_len;
1146
1147 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
1148 memcpy(subbuf->head[0].iov_base, obj, this_len);
1149 len -= this_len;
1150 obj += this_len;
1151 this_len = min_t(unsigned int, len, subbuf->page_len);
1152 if (this_len)
1153 _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
1154 len -= this_len;
1155 obj += this_len;
1156 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
1157 memcpy(subbuf->tail[0].iov_base, obj, this_len);
1158 }
1159
1160 /* obj is assumed to point to allocated memory of size at least len: */
1161 int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
1162 {
1163 struct xdr_buf subbuf;
1164 int status;
1165
1166 status = xdr_buf_subsegment(buf, &subbuf, base, len);
1167 if (status != 0)
1168 return status;
1169 __write_bytes_to_xdr_buf(&subbuf, obj, len);
1170 return 0;
1171 }
1172 EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf);
1173
1174 int
1175 xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
1176 {
1177 __be32 raw;
1178 int status;
1179
1180 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
1181 if (status)
1182 return status;
1183 *obj = be32_to_cpu(raw);
1184 return 0;
1185 }
1186 EXPORT_SYMBOL_GPL(xdr_decode_word);
1187
1188 int
1189 xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
1190 {
1191 __be32 raw = cpu_to_be32(obj);
1192
1193 return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
1194 }
1195 EXPORT_SYMBOL_GPL(xdr_encode_word);
1196
1197 /* If the netobj starting offset bytes from the start of xdr_buf is contained
1198 * entirely in the head or the tail, set object to point to it; otherwise
1199 * try to find space for it at the end of the tail, copy it there, and
1200 * set obj to point to it. */
1201 int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset)
1202 {
1203 struct xdr_buf subbuf;
1204
1205 if (xdr_decode_word(buf, offset, &obj->len))
1206 return -EFAULT;
1207 if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len))
1208 return -EFAULT;
1209
1210 /* Is the obj contained entirely in the head? */
1211 obj->data = subbuf.head[0].iov_base;
1212 if (subbuf.head[0].iov_len == obj->len)
1213 return 0;
1214 /* ..or is the obj contained entirely in the tail? */
1215 obj->data = subbuf.tail[0].iov_base;
1216 if (subbuf.tail[0].iov_len == obj->len)
1217 return 0;
1218
1219 /* use end of tail as storage for obj:
1220 * (We don't copy to the beginning because then we'd have
1221 * to worry about doing a potentially overlapping copy.
1222 * This assumes the object is at most half the length of the
1223 * tail.) */
1224 if (obj->len > buf->buflen - buf->len)
1225 return -ENOMEM;
1226 if (buf->tail[0].iov_len != 0)
1227 obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
1228 else
1229 obj->data = buf->head[0].iov_base + buf->head[0].iov_len;
1230 __read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len);
1231 return 0;
1232 }
1233 EXPORT_SYMBOL_GPL(xdr_buf_read_netobj);
1234
1235 /* Returns 0 on success, or else a negative error code. */
1236 static int
1237 xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
1238 struct xdr_array2_desc *desc, int encode)
1239 {
1240 char *elem = NULL, *c;
1241 unsigned int copied = 0, todo, avail_here;
1242 struct page **ppages = NULL;
1243 int err;
1244
1245 if (encode) {
1246 if (xdr_encode_word(buf, base, desc->array_len) != 0)
1247 return -EINVAL;
1248 } else {
1249 if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
1250 desc->array_len > desc->array_maxlen ||
1251 (unsigned long) base + 4 + desc->array_len *
1252 desc->elem_size > buf->len)
1253 return -EINVAL;
1254 }
1255 base += 4;
1256
1257 if (!desc->xcode)
1258 return 0;
1259
1260 todo = desc->array_len * desc->elem_size;
1261
1262 /* process head */
1263 if (todo && base < buf->head->iov_len) {
1264 c = buf->head->iov_base + base;
1265 avail_here = min_t(unsigned int, todo,
1266 buf->head->iov_len - base);
1267 todo -= avail_here;
1268
1269 while (avail_here >= desc->elem_size) {
1270 err = desc->xcode(desc, c);
1271 if (err)
1272 goto out;
1273 c += desc->elem_size;
1274 avail_here -= desc->elem_size;
1275 }
1276 if (avail_here) {
1277 if (!elem) {
1278 elem = kmalloc(desc->elem_size, GFP_KERNEL);
1279 err = -ENOMEM;
1280 if (!elem)
1281 goto out;
1282 }
1283 if (encode) {
1284 err = desc->xcode(desc, elem);
1285 if (err)
1286 goto out;
1287 memcpy(c, elem, avail_here);
1288 } else
1289 memcpy(elem, c, avail_here);
1290 copied = avail_here;
1291 }
1292 base = buf->head->iov_len; /* align to start of pages */
1293 }
1294
1295 /* process pages array */
1296 base -= buf->head->iov_len;
1297 if (todo && base < buf->page_len) {
1298 unsigned int avail_page;
1299
1300 avail_here = min(todo, buf->page_len - base);
1301 todo -= avail_here;
1302
1303 base += buf->page_base;
1304 ppages = buf->pages + (base >> PAGE_SHIFT);
1305 base &= ~PAGE_MASK;
1306 avail_page = min_t(unsigned int, PAGE_SIZE - base,
1307 avail_here);
1308 c = kmap(*ppages) + base;
1309
1310 while (avail_here) {
1311 avail_here -= avail_page;
1312 if (copied || avail_page < desc->elem_size) {
1313 unsigned int l = min(avail_page,
1314 desc->elem_size - copied);
1315 if (!elem) {
1316 elem = kmalloc(desc->elem_size,
1317 GFP_KERNEL);
1318 err = -ENOMEM;
1319 if (!elem)
1320 goto out;
1321 }
1322 if (encode) {
1323 if (!copied) {
1324 err = desc->xcode(desc, elem);
1325 if (err)
1326 goto out;
1327 }
1328 memcpy(c, elem + copied, l);
1329 copied += l;
1330 if (copied == desc->elem_size)
1331 copied = 0;
1332 } else {
1333 memcpy(elem + copied, c, l);
1334 copied += l;
1335 if (copied == desc->elem_size) {
1336 err = desc->xcode(desc, elem);
1337 if (err)
1338 goto out;
1339 copied = 0;
1340 }
1341 }
1342 avail_page -= l;
1343 c += l;
1344 }
1345 while (avail_page >= desc->elem_size) {
1346 err = desc->xcode(desc, c);
1347 if (err)
1348 goto out;
1349 c += desc->elem_size;
1350 avail_page -= desc->elem_size;
1351 }
1352 if (avail_page) {
1353 unsigned int l = min(avail_page,
1354 desc->elem_size - copied);
1355 if (!elem) {
1356 elem = kmalloc(desc->elem_size,
1357 GFP_KERNEL);
1358 err = -ENOMEM;
1359 if (!elem)
1360 goto out;
1361 }
1362 if (encode) {
1363 if (!copied) {
1364 err = desc->xcode(desc, elem);
1365 if (err)
1366 goto out;
1367 }
1368 memcpy(c, elem + copied, l);
1369 copied += l;
1370 if (copied == desc->elem_size)
1371 copied = 0;
1372 } else {
1373 memcpy(elem + copied, c, l);
1374 copied += l;
1375 if (copied == desc->elem_size) {
1376 err = desc->xcode(desc, elem);
1377 if (err)
1378 goto out;
1379 copied = 0;
1380 }
1381 }
1382 }
1383 if (avail_here) {
1384 kunmap(*ppages);
1385 ppages++;
1386 c = kmap(*ppages);
1387 }
1388
1389 avail_page = min(avail_here,
1390 (unsigned int) PAGE_SIZE);
1391 }
1392 base = buf->page_len; /* align to start of tail */
1393 }
1394
1395 /* process tail */
1396 base -= buf->page_len;
1397 if (todo) {
1398 c = buf->tail->iov_base + base;
1399 if (copied) {
1400 unsigned int l = desc->elem_size - copied;
1401
1402 if (encode)
1403 memcpy(c, elem + copied, l);
1404 else {
1405 memcpy(elem + copied, c, l);
1406 err = desc->xcode(desc, elem);
1407 if (err)
1408 goto out;
1409 }
1410 todo -= l;
1411 c += l;
1412 }
1413 while (todo) {
1414 err = desc->xcode(desc, c);
1415 if (err)
1416 goto out;
1417 c += desc->elem_size;
1418 todo -= desc->elem_size;
1419 }
1420 }
1421 err = 0;
1422
1423 out:
1424 kfree(elem);
1425 if (ppages)
1426 kunmap(*ppages);
1427 return err;
1428 }
1429
1430 int
1431 xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
1432 struct xdr_array2_desc *desc)
1433 {
1434 if (base >= buf->len)
1435 return -EINVAL;
1436
1437 return xdr_xcode_array2(buf, base, desc, 0);
1438 }
1439 EXPORT_SYMBOL_GPL(xdr_decode_array2);
1440
1441 int
1442 xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
1443 struct xdr_array2_desc *desc)
1444 {
1445 if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
1446 buf->head->iov_len + buf->page_len + buf->tail->iov_len)
1447 return -EINVAL;
1448
1449 return xdr_xcode_array2(buf, base, desc, 1);
1450 }
1451 EXPORT_SYMBOL_GPL(xdr_encode_array2);
1452
1453 int
1454 xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
1455 int (*actor)(struct scatterlist *, void *), void *data)
1456 {
1457 int i, ret = 0;
1458 unsigned int page_len, thislen, page_offset;
1459 struct scatterlist sg[1];
1460
1461 sg_init_table(sg, 1);
1462
1463 if (offset >= buf->head[0].iov_len) {
1464 offset -= buf->head[0].iov_len;
1465 } else {
1466 thislen = buf->head[0].iov_len - offset;
1467 if (thislen > len)
1468 thislen = len;
1469 sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
1470 ret = actor(sg, data);
1471 if (ret)
1472 goto out;
1473 offset = 0;
1474 len -= thislen;
1475 }
1476 if (len == 0)
1477 goto out;
1478
1479 if (offset >= buf->page_len) {
1480 offset -= buf->page_len;
1481 } else {
1482 page_len = buf->page_len - offset;
1483 if (page_len > len)
1484 page_len = len;
1485 len -= page_len;
1486 page_offset = (offset + buf->page_base) & (PAGE_SIZE - 1);
1487 i = (offset + buf->page_base) >> PAGE_SHIFT;
1488 thislen = PAGE_SIZE - page_offset;
1489 do {
1490 if (thislen > page_len)
1491 thislen = page_len;
1492 sg_set_page(sg, buf->pages[i], thislen, page_offset);
1493 ret = actor(sg, data);
1494 if (ret)
1495 goto out;
1496 page_len -= thislen;
1497 i++;
1498 page_offset = 0;
1499 thislen = PAGE_SIZE;
1500 } while (page_len != 0);
1501 offset = 0;
1502 }
1503 if (len == 0)
1504 goto out;
1505 if (offset < buf->tail[0].iov_len) {
1506 thislen = buf->tail[0].iov_len - offset;
1507 if (thislen > len)
1508 thislen = len;
1509 sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
1510 ret = actor(sg, data);
1511 len -= thislen;
1512 }
1513 if (len != 0)
1514 ret = -EINVAL;
1515 out:
1516 return ret;
1517 }
1518 EXPORT_SYMBOL_GPL(xdr_process_buf);
1519
1520 /**
1521 * xdr_stream_decode_string_dup - Decode and duplicate variable length string
1522 * @xdr: pointer to xdr_stream
1523 * @str: location to store pointer to string
1524 * @maxlen: maximum acceptable string length
1525 * @gfp_flags: GFP mask to use
1526 *
1527 * Return values:
1528 * On success, returns length of NUL-terminated string stored in *@ptr
1529 * %-EBADMSG on XDR buffer overflow
1530 * %-EMSGSIZE if the size of the string would exceed @maxlen
1531 * %-ENOMEM on memory allocation failure
1532 */
1533 ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str,
1534 size_t maxlen, gfp_t gfp_flags)
1535 {
1536 void *p;
1537 ssize_t ret;
1538
1539 ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen);
1540 if (ret > 0) {
1541 char *s = kmalloc(ret + 1, gfp_flags);
1542 if (s != NULL) {
1543 memcpy(s, p, ret);
1544 s[ret] = '\0';
1545 *str = s;
1546 return strlen(s);
1547 }
1548 ret = -ENOMEM;
1549 }
1550 *str = NULL;
1551 return ret;
1552 }
1553 EXPORT_SYMBOL_GPL(xdr_stream_decode_string_dup);