]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/sunrpc/xdr.c
Merge tag 'sound-3.7' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound
[mirror_ubuntu-bionic-kernel.git] / net / sunrpc / xdr.c
1 /*
2 * linux/net/sunrpc/xdr.c
3 *
4 * Generic XDR support.
5 *
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
7 */
8
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/types.h>
12 #include <linux/string.h>
13 #include <linux/kernel.h>
14 #include <linux/pagemap.h>
15 #include <linux/errno.h>
16 #include <linux/sunrpc/xdr.h>
17 #include <linux/sunrpc/msg_prot.h>
18
19 /*
20 * XDR functions for basic NFS types
21 */
22 __be32 *
23 xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
24 {
25 unsigned int quadlen = XDR_QUADLEN(obj->len);
26
27 p[quadlen] = 0; /* zero trailing bytes */
28 *p++ = cpu_to_be32(obj->len);
29 memcpy(p, obj->data, obj->len);
30 return p + XDR_QUADLEN(obj->len);
31 }
32 EXPORT_SYMBOL_GPL(xdr_encode_netobj);
33
34 __be32 *
35 xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
36 {
37 unsigned int len;
38
39 if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ)
40 return NULL;
41 obj->len = len;
42 obj->data = (u8 *) p;
43 return p + XDR_QUADLEN(len);
44 }
45 EXPORT_SYMBOL_GPL(xdr_decode_netobj);
46
47 /**
48 * xdr_encode_opaque_fixed - Encode fixed length opaque data
49 * @p: pointer to current position in XDR buffer.
50 * @ptr: pointer to data to encode (or NULL)
51 * @nbytes: size of data.
52 *
53 * Copy the array of data of length nbytes at ptr to the XDR buffer
54 * at position p, then align to the next 32-bit boundary by padding
55 * with zero bytes (see RFC1832).
56 * Note: if ptr is NULL, only the padding is performed.
57 *
58 * Returns the updated current XDR buffer position
59 *
60 */
61 __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
62 {
63 if (likely(nbytes != 0)) {
64 unsigned int quadlen = XDR_QUADLEN(nbytes);
65 unsigned int padding = (quadlen << 2) - nbytes;
66
67 if (ptr != NULL)
68 memcpy(p, ptr, nbytes);
69 if (padding != 0)
70 memset((char *)p + nbytes, 0, padding);
71 p += quadlen;
72 }
73 return p;
74 }
75 EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
76
77 /**
78 * xdr_encode_opaque - Encode variable length opaque data
79 * @p: pointer to current position in XDR buffer.
80 * @ptr: pointer to data to encode (or NULL)
81 * @nbytes: size of data.
82 *
83 * Returns the updated current XDR buffer position
84 */
85 __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
86 {
87 *p++ = cpu_to_be32(nbytes);
88 return xdr_encode_opaque_fixed(p, ptr, nbytes);
89 }
90 EXPORT_SYMBOL_GPL(xdr_encode_opaque);
91
92 __be32 *
93 xdr_encode_string(__be32 *p, const char *string)
94 {
95 return xdr_encode_array(p, string, strlen(string));
96 }
97 EXPORT_SYMBOL_GPL(xdr_encode_string);
98
99 __be32 *
100 xdr_decode_string_inplace(__be32 *p, char **sp,
101 unsigned int *lenp, unsigned int maxlen)
102 {
103 u32 len;
104
105 len = be32_to_cpu(*p++);
106 if (len > maxlen)
107 return NULL;
108 *lenp = len;
109 *sp = (char *) p;
110 return p + XDR_QUADLEN(len);
111 }
112 EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
113
114 /**
115 * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf
116 * @buf: XDR buffer where string resides
117 * @len: length of string, in bytes
118 *
119 */
120 void
121 xdr_terminate_string(struct xdr_buf *buf, const u32 len)
122 {
123 char *kaddr;
124
125 kaddr = kmap_atomic(buf->pages[0]);
126 kaddr[buf->page_base + len] = '\0';
127 kunmap_atomic(kaddr);
128 }
129 EXPORT_SYMBOL_GPL(xdr_terminate_string);
130
131 void
132 xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
133 struct page **pages, unsigned int base, unsigned int len)
134 {
135 struct kvec *head = xdr->head;
136 struct kvec *tail = xdr->tail;
137 char *buf = (char *)head->iov_base;
138 unsigned int buflen = head->iov_len;
139
140 head->iov_len = offset;
141
142 xdr->pages = pages;
143 xdr->page_base = base;
144 xdr->page_len = len;
145
146 tail->iov_base = buf + offset;
147 tail->iov_len = buflen - offset;
148
149 xdr->buflen += len;
150 }
151 EXPORT_SYMBOL_GPL(xdr_inline_pages);
152
153 /*
154 * Helper routines for doing 'memmove' like operations on a struct xdr_buf
155 */
156
157 /**
158 * _shift_data_right_pages
159 * @pages: vector of pages containing both the source and dest memory area.
160 * @pgto_base: page vector address of destination
161 * @pgfrom_base: page vector address of source
162 * @len: number of bytes to copy
163 *
164 * Note: the addresses pgto_base and pgfrom_base are both calculated in
165 * the same way:
166 * if a memory area starts at byte 'base' in page 'pages[i]',
167 * then its address is given as (i << PAGE_CACHE_SHIFT) + base
168 * Also note: pgfrom_base must be < pgto_base, but the memory areas
169 * they point to may overlap.
170 */
171 static void
172 _shift_data_right_pages(struct page **pages, size_t pgto_base,
173 size_t pgfrom_base, size_t len)
174 {
175 struct page **pgfrom, **pgto;
176 char *vfrom, *vto;
177 size_t copy;
178
179 BUG_ON(pgto_base <= pgfrom_base);
180
181 pgto_base += len;
182 pgfrom_base += len;
183
184 pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT);
185 pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT);
186
187 pgto_base &= ~PAGE_CACHE_MASK;
188 pgfrom_base &= ~PAGE_CACHE_MASK;
189
190 do {
191 /* Are any pointers crossing a page boundary? */
192 if (pgto_base == 0) {
193 pgto_base = PAGE_CACHE_SIZE;
194 pgto--;
195 }
196 if (pgfrom_base == 0) {
197 pgfrom_base = PAGE_CACHE_SIZE;
198 pgfrom--;
199 }
200
201 copy = len;
202 if (copy > pgto_base)
203 copy = pgto_base;
204 if (copy > pgfrom_base)
205 copy = pgfrom_base;
206 pgto_base -= copy;
207 pgfrom_base -= copy;
208
209 vto = kmap_atomic(*pgto);
210 vfrom = kmap_atomic(*pgfrom);
211 memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
212 flush_dcache_page(*pgto);
213 kunmap_atomic(vfrom);
214 kunmap_atomic(vto);
215
216 } while ((len -= copy) != 0);
217 }
218
219 /**
220 * _copy_to_pages
221 * @pages: array of pages
222 * @pgbase: page vector address of destination
223 * @p: pointer to source data
224 * @len: length
225 *
226 * Copies data from an arbitrary memory location into an array of pages
227 * The copy is assumed to be non-overlapping.
228 */
229 static void
230 _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
231 {
232 struct page **pgto;
233 char *vto;
234 size_t copy;
235
236 pgto = pages + (pgbase >> PAGE_CACHE_SHIFT);
237 pgbase &= ~PAGE_CACHE_MASK;
238
239 for (;;) {
240 copy = PAGE_CACHE_SIZE - pgbase;
241 if (copy > len)
242 copy = len;
243
244 vto = kmap_atomic(*pgto);
245 memcpy(vto + pgbase, p, copy);
246 kunmap_atomic(vto);
247
248 len -= copy;
249 if (len == 0)
250 break;
251
252 pgbase += copy;
253 if (pgbase == PAGE_CACHE_SIZE) {
254 flush_dcache_page(*pgto);
255 pgbase = 0;
256 pgto++;
257 }
258 p += copy;
259 }
260 flush_dcache_page(*pgto);
261 }
262
263 /**
264 * _copy_from_pages
265 * @p: pointer to destination
266 * @pages: array of pages
267 * @pgbase: offset of source data
268 * @len: length
269 *
270 * Copies data into an arbitrary memory location from an array of pages
271 * The copy is assumed to be non-overlapping.
272 */
273 void
274 _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
275 {
276 struct page **pgfrom;
277 char *vfrom;
278 size_t copy;
279
280 pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT);
281 pgbase &= ~PAGE_CACHE_MASK;
282
283 do {
284 copy = PAGE_CACHE_SIZE - pgbase;
285 if (copy > len)
286 copy = len;
287
288 vfrom = kmap_atomic(*pgfrom);
289 memcpy(p, vfrom + pgbase, copy);
290 kunmap_atomic(vfrom);
291
292 pgbase += copy;
293 if (pgbase == PAGE_CACHE_SIZE) {
294 pgbase = 0;
295 pgfrom++;
296 }
297 p += copy;
298
299 } while ((len -= copy) != 0);
300 }
301 EXPORT_SYMBOL_GPL(_copy_from_pages);
302
303 /**
304 * xdr_shrink_bufhead
305 * @buf: xdr_buf
306 * @len: bytes to remove from buf->head[0]
307 *
308 * Shrinks XDR buffer's header kvec buf->head[0] by
309 * 'len' bytes. The extra data is not lost, but is instead
310 * moved into the inlined pages and/or the tail.
311 */
312 static void
313 xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
314 {
315 struct kvec *head, *tail;
316 size_t copy, offs;
317 unsigned int pglen = buf->page_len;
318
319 tail = buf->tail;
320 head = buf->head;
321 BUG_ON (len > head->iov_len);
322
323 /* Shift the tail first */
324 if (tail->iov_len != 0) {
325 if (tail->iov_len > len) {
326 copy = tail->iov_len - len;
327 memmove((char *)tail->iov_base + len,
328 tail->iov_base, copy);
329 }
330 /* Copy from the inlined pages into the tail */
331 copy = len;
332 if (copy > pglen)
333 copy = pglen;
334 offs = len - copy;
335 if (offs >= tail->iov_len)
336 copy = 0;
337 else if (copy > tail->iov_len - offs)
338 copy = tail->iov_len - offs;
339 if (copy != 0)
340 _copy_from_pages((char *)tail->iov_base + offs,
341 buf->pages,
342 buf->page_base + pglen + offs - len,
343 copy);
344 /* Do we also need to copy data from the head into the tail ? */
345 if (len > pglen) {
346 offs = copy = len - pglen;
347 if (copy > tail->iov_len)
348 copy = tail->iov_len;
349 memcpy(tail->iov_base,
350 (char *)head->iov_base +
351 head->iov_len - offs,
352 copy);
353 }
354 }
355 /* Now handle pages */
356 if (pglen != 0) {
357 if (pglen > len)
358 _shift_data_right_pages(buf->pages,
359 buf->page_base + len,
360 buf->page_base,
361 pglen - len);
362 copy = len;
363 if (len > pglen)
364 copy = pglen;
365 _copy_to_pages(buf->pages, buf->page_base,
366 (char *)head->iov_base + head->iov_len - len,
367 copy);
368 }
369 head->iov_len -= len;
370 buf->buflen -= len;
371 /* Have we truncated the message? */
372 if (buf->len > buf->buflen)
373 buf->len = buf->buflen;
374 }
375
376 /**
377 * xdr_shrink_pagelen
378 * @buf: xdr_buf
379 * @len: bytes to remove from buf->pages
380 *
381 * Shrinks XDR buffer's page array buf->pages by
382 * 'len' bytes. The extra data is not lost, but is instead
383 * moved into the tail.
384 */
385 static void
386 xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
387 {
388 struct kvec *tail;
389 size_t copy;
390 unsigned int pglen = buf->page_len;
391 unsigned int tailbuf_len;
392
393 tail = buf->tail;
394 BUG_ON (len > pglen);
395
396 tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len;
397
398 /* Shift the tail first */
399 if (tailbuf_len != 0) {
400 unsigned int free_space = tailbuf_len - tail->iov_len;
401
402 if (len < free_space)
403 free_space = len;
404 tail->iov_len += free_space;
405
406 copy = len;
407 if (tail->iov_len > len) {
408 char *p = (char *)tail->iov_base + len;
409 memmove(p, tail->iov_base, tail->iov_len - len);
410 } else
411 copy = tail->iov_len;
412 /* Copy from the inlined pages into the tail */
413 _copy_from_pages((char *)tail->iov_base,
414 buf->pages, buf->page_base + pglen - len,
415 copy);
416 }
417 buf->page_len -= len;
418 buf->buflen -= len;
419 /* Have we truncated the message? */
420 if (buf->len > buf->buflen)
421 buf->len = buf->buflen;
422 }
423
424 void
425 xdr_shift_buf(struct xdr_buf *buf, size_t len)
426 {
427 xdr_shrink_bufhead(buf, len);
428 }
429 EXPORT_SYMBOL_GPL(xdr_shift_buf);
430
431 /**
432 * xdr_stream_pos - Return the current offset from the start of the xdr_stream
433 * @xdr: pointer to struct xdr_stream
434 */
435 unsigned int xdr_stream_pos(const struct xdr_stream *xdr)
436 {
437 return (unsigned int)(XDR_QUADLEN(xdr->buf->len) - xdr->nwords) << 2;
438 }
439 EXPORT_SYMBOL_GPL(xdr_stream_pos);
440
441 /**
442 * xdr_init_encode - Initialize a struct xdr_stream for sending data.
443 * @xdr: pointer to xdr_stream struct
444 * @buf: pointer to XDR buffer in which to encode data
445 * @p: current pointer inside XDR buffer
446 *
447 * Note: at the moment the RPC client only passes the length of our
448 * scratch buffer in the xdr_buf's header kvec. Previously this
449 * meant we needed to call xdr_adjust_iovec() after encoding the
450 * data. With the new scheme, the xdr_stream manages the details
451 * of the buffer length, and takes care of adjusting the kvec
452 * length for us.
453 */
454 void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
455 {
456 struct kvec *iov = buf->head;
457 int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
458
459 BUG_ON(scratch_len < 0);
460 xdr->buf = buf;
461 xdr->iov = iov;
462 xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
463 xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
464 BUG_ON(iov->iov_len > scratch_len);
465
466 if (p != xdr->p && p != NULL) {
467 size_t len;
468
469 BUG_ON(p < xdr->p || p > xdr->end);
470 len = (char *)p - (char *)xdr->p;
471 xdr->p = p;
472 buf->len += len;
473 iov->iov_len += len;
474 }
475 }
476 EXPORT_SYMBOL_GPL(xdr_init_encode);
477
478 /**
479 * xdr_reserve_space - Reserve buffer space for sending
480 * @xdr: pointer to xdr_stream
481 * @nbytes: number of bytes to reserve
482 *
483 * Checks that we have enough buffer space to encode 'nbytes' more
484 * bytes of data. If so, update the total xdr_buf length, and
485 * adjust the length of the current kvec.
486 */
487 __be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
488 {
489 __be32 *p = xdr->p;
490 __be32 *q;
491
492 /* align nbytes on the next 32-bit boundary */
493 nbytes += 3;
494 nbytes &= ~3;
495 q = p + (nbytes >> 2);
496 if (unlikely(q > xdr->end || q < p))
497 return NULL;
498 xdr->p = q;
499 xdr->iov->iov_len += nbytes;
500 xdr->buf->len += nbytes;
501 return p;
502 }
503 EXPORT_SYMBOL_GPL(xdr_reserve_space);
504
505 /**
506 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
507 * @xdr: pointer to xdr_stream
508 * @pages: list of pages
509 * @base: offset of first byte
510 * @len: length of data in bytes
511 *
512 */
513 void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
514 unsigned int len)
515 {
516 struct xdr_buf *buf = xdr->buf;
517 struct kvec *iov = buf->tail;
518 buf->pages = pages;
519 buf->page_base = base;
520 buf->page_len = len;
521
522 iov->iov_base = (char *)xdr->p;
523 iov->iov_len = 0;
524 xdr->iov = iov;
525
526 if (len & 3) {
527 unsigned int pad = 4 - (len & 3);
528
529 BUG_ON(xdr->p >= xdr->end);
530 iov->iov_base = (char *)xdr->p + (len & 3);
531 iov->iov_len += pad;
532 len += pad;
533 *xdr->p++ = 0;
534 }
535 buf->buflen += len;
536 buf->len += len;
537 }
538 EXPORT_SYMBOL_GPL(xdr_write_pages);
539
540 static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
541 unsigned int len)
542 {
543 if (len > iov->iov_len)
544 len = iov->iov_len;
545 xdr->p = (__be32*)iov->iov_base;
546 xdr->end = (__be32*)(iov->iov_base + len);
547 xdr->iov = iov;
548 xdr->page_ptr = NULL;
549 }
550
551 static int xdr_set_page_base(struct xdr_stream *xdr,
552 unsigned int base, unsigned int len)
553 {
554 unsigned int pgnr;
555 unsigned int maxlen;
556 unsigned int pgoff;
557 unsigned int pgend;
558 void *kaddr;
559
560 maxlen = xdr->buf->page_len;
561 if (base >= maxlen)
562 return -EINVAL;
563 maxlen -= base;
564 if (len > maxlen)
565 len = maxlen;
566
567 base += xdr->buf->page_base;
568
569 pgnr = base >> PAGE_SHIFT;
570 xdr->page_ptr = &xdr->buf->pages[pgnr];
571 kaddr = page_address(*xdr->page_ptr);
572
573 pgoff = base & ~PAGE_MASK;
574 xdr->p = (__be32*)(kaddr + pgoff);
575
576 pgend = pgoff + len;
577 if (pgend > PAGE_SIZE)
578 pgend = PAGE_SIZE;
579 xdr->end = (__be32*)(kaddr + pgend);
580 xdr->iov = NULL;
581 return 0;
582 }
583
584 static void xdr_set_next_page(struct xdr_stream *xdr)
585 {
586 unsigned int newbase;
587
588 newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
589 newbase -= xdr->buf->page_base;
590
591 if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0)
592 xdr_set_iov(xdr, xdr->buf->tail, xdr->buf->len);
593 }
594
595 static bool xdr_set_next_buffer(struct xdr_stream *xdr)
596 {
597 if (xdr->page_ptr != NULL)
598 xdr_set_next_page(xdr);
599 else if (xdr->iov == xdr->buf->head) {
600 if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0)
601 xdr_set_iov(xdr, xdr->buf->tail, xdr->buf->len);
602 }
603 return xdr->p != xdr->end;
604 }
605
606 /**
607 * xdr_init_decode - Initialize an xdr_stream for decoding data.
608 * @xdr: pointer to xdr_stream struct
609 * @buf: pointer to XDR buffer from which to decode data
610 * @p: current pointer inside XDR buffer
611 */
612 void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
613 {
614 xdr->buf = buf;
615 xdr->scratch.iov_base = NULL;
616 xdr->scratch.iov_len = 0;
617 xdr->nwords = XDR_QUADLEN(buf->len);
618 if (buf->head[0].iov_len != 0)
619 xdr_set_iov(xdr, buf->head, buf->len);
620 else if (buf->page_len != 0)
621 xdr_set_page_base(xdr, 0, buf->len);
622 if (p != NULL && p > xdr->p && xdr->end >= p) {
623 xdr->nwords -= p - xdr->p;
624 xdr->p = p;
625 }
626 }
627 EXPORT_SYMBOL_GPL(xdr_init_decode);
628
629 /**
630 * xdr_init_decode - Initialize an xdr_stream for decoding data.
631 * @xdr: pointer to xdr_stream struct
632 * @buf: pointer to XDR buffer from which to decode data
633 * @pages: list of pages to decode into
634 * @len: length in bytes of buffer in pages
635 */
636 void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
637 struct page **pages, unsigned int len)
638 {
639 memset(buf, 0, sizeof(*buf));
640 buf->pages = pages;
641 buf->page_len = len;
642 buf->buflen = len;
643 buf->len = len;
644 xdr_init_decode(xdr, buf, NULL);
645 }
646 EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
647
648 static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
649 {
650 unsigned int nwords = XDR_QUADLEN(nbytes);
651 __be32 *p = xdr->p;
652 __be32 *q = p + nwords;
653
654 if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p))
655 return NULL;
656 xdr->p = q;
657 xdr->nwords -= nwords;
658 return p;
659 }
660
661 /**
662 * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
663 * @xdr: pointer to xdr_stream struct
664 * @buf: pointer to an empty buffer
665 * @buflen: size of 'buf'
666 *
667 * The scratch buffer is used when decoding from an array of pages.
668 * If an xdr_inline_decode() call spans across page boundaries, then
669 * we copy the data into the scratch buffer in order to allow linear
670 * access.
671 */
672 void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
673 {
674 xdr->scratch.iov_base = buf;
675 xdr->scratch.iov_len = buflen;
676 }
677 EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer);
678
679 static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
680 {
681 __be32 *p;
682 void *cpdest = xdr->scratch.iov_base;
683 size_t cplen = (char *)xdr->end - (char *)xdr->p;
684
685 if (nbytes > xdr->scratch.iov_len)
686 return NULL;
687 memcpy(cpdest, xdr->p, cplen);
688 cpdest += cplen;
689 nbytes -= cplen;
690 if (!xdr_set_next_buffer(xdr))
691 return NULL;
692 p = __xdr_inline_decode(xdr, nbytes);
693 if (p == NULL)
694 return NULL;
695 memcpy(cpdest, p, nbytes);
696 return xdr->scratch.iov_base;
697 }
698
699 /**
700 * xdr_inline_decode - Retrieve XDR data to decode
701 * @xdr: pointer to xdr_stream struct
702 * @nbytes: number of bytes of data to decode
703 *
704 * Check if the input buffer is long enough to enable us to decode
705 * 'nbytes' more bytes of data starting at the current position.
706 * If so return the current pointer, then update the current
707 * pointer position.
708 */
709 __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
710 {
711 __be32 *p;
712
713 if (nbytes == 0)
714 return xdr->p;
715 if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
716 return NULL;
717 p = __xdr_inline_decode(xdr, nbytes);
718 if (p != NULL)
719 return p;
720 return xdr_copy_to_scratch(xdr, nbytes);
721 }
722 EXPORT_SYMBOL_GPL(xdr_inline_decode);
723
724 static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len)
725 {
726 struct xdr_buf *buf = xdr->buf;
727 struct kvec *iov;
728 unsigned int nwords = XDR_QUADLEN(len);
729 unsigned int cur = xdr_stream_pos(xdr);
730
731 if (xdr->nwords == 0)
732 return 0;
733 /* Realign pages to current pointer position */
734 iov = buf->head;
735 if (iov->iov_len > cur) {
736 xdr_shrink_bufhead(buf, iov->iov_len - cur);
737 xdr->nwords = XDR_QUADLEN(buf->len - cur);
738 }
739
740 if (nwords > xdr->nwords) {
741 nwords = xdr->nwords;
742 len = nwords << 2;
743 }
744 if (buf->page_len <= len)
745 len = buf->page_len;
746 else if (nwords < xdr->nwords) {
747 /* Truncate page data and move it into the tail */
748 xdr_shrink_pagelen(buf, buf->page_len - len);
749 xdr->nwords = XDR_QUADLEN(buf->len - cur);
750 }
751 return len;
752 }
753
754 /**
755 * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
756 * @xdr: pointer to xdr_stream struct
757 * @len: number of bytes of page data
758 *
759 * Moves data beyond the current pointer position from the XDR head[] buffer
760 * into the page list. Any data that lies beyond current position + "len"
761 * bytes is moved into the XDR tail[].
762 *
763 * Returns the number of XDR encoded bytes now contained in the pages
764 */
765 unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
766 {
767 struct xdr_buf *buf = xdr->buf;
768 struct kvec *iov;
769 unsigned int nwords;
770 unsigned int end;
771 unsigned int padding;
772
773 len = xdr_align_pages(xdr, len);
774 if (len == 0)
775 return 0;
776 nwords = XDR_QUADLEN(len);
777 padding = (nwords << 2) - len;
778 xdr->iov = iov = buf->tail;
779 /* Compute remaining message length. */
780 end = ((xdr->nwords - nwords) << 2) + padding;
781 if (end > iov->iov_len)
782 end = iov->iov_len;
783
784 /*
785 * Position current pointer at beginning of tail, and
786 * set remaining message length.
787 */
788 xdr->p = (__be32 *)((char *)iov->iov_base + padding);
789 xdr->end = (__be32 *)((char *)iov->iov_base + end);
790 xdr->page_ptr = NULL;
791 xdr->nwords = XDR_QUADLEN(end - padding);
792 return len;
793 }
794 EXPORT_SYMBOL_GPL(xdr_read_pages);
795
796 /**
797 * xdr_enter_page - decode data from the XDR page
798 * @xdr: pointer to xdr_stream struct
799 * @len: number of bytes of page data
800 *
801 * Moves data beyond the current pointer position from the XDR head[] buffer
802 * into the page list. Any data that lies beyond current position + "len"
803 * bytes is moved into the XDR tail[]. The current pointer is then
804 * repositioned at the beginning of the first XDR page.
805 */
806 void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
807 {
808 len = xdr_align_pages(xdr, len);
809 /*
810 * Position current pointer at beginning of tail, and
811 * set remaining message length.
812 */
813 if (len != 0)
814 xdr_set_page_base(xdr, 0, len);
815 }
816 EXPORT_SYMBOL_GPL(xdr_enter_page);
817
818 static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
819
820 void
821 xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
822 {
823 buf->head[0] = *iov;
824 buf->tail[0] = empty_iov;
825 buf->page_len = 0;
826 buf->buflen = buf->len = iov->iov_len;
827 }
828 EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
829
830 /* Sets subbuf to the portion of buf of length len beginning base bytes
831 * from the start of buf. Returns -1 if base of length are out of bounds. */
832 int
833 xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
834 unsigned int base, unsigned int len)
835 {
836 subbuf->buflen = subbuf->len = len;
837 if (base < buf->head[0].iov_len) {
838 subbuf->head[0].iov_base = buf->head[0].iov_base + base;
839 subbuf->head[0].iov_len = min_t(unsigned int, len,
840 buf->head[0].iov_len - base);
841 len -= subbuf->head[0].iov_len;
842 base = 0;
843 } else {
844 subbuf->head[0].iov_base = NULL;
845 subbuf->head[0].iov_len = 0;
846 base -= buf->head[0].iov_len;
847 }
848
849 if (base < buf->page_len) {
850 subbuf->page_len = min(buf->page_len - base, len);
851 base += buf->page_base;
852 subbuf->page_base = base & ~PAGE_CACHE_MASK;
853 subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT];
854 len -= subbuf->page_len;
855 base = 0;
856 } else {
857 base -= buf->page_len;
858 subbuf->page_len = 0;
859 }
860
861 if (base < buf->tail[0].iov_len) {
862 subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
863 subbuf->tail[0].iov_len = min_t(unsigned int, len,
864 buf->tail[0].iov_len - base);
865 len -= subbuf->tail[0].iov_len;
866 base = 0;
867 } else {
868 subbuf->tail[0].iov_base = NULL;
869 subbuf->tail[0].iov_len = 0;
870 base -= buf->tail[0].iov_len;
871 }
872
873 if (base || len)
874 return -1;
875 return 0;
876 }
877 EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
878
879 static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
880 {
881 unsigned int this_len;
882
883 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
884 memcpy(obj, subbuf->head[0].iov_base, this_len);
885 len -= this_len;
886 obj += this_len;
887 this_len = min_t(unsigned int, len, subbuf->page_len);
888 if (this_len)
889 _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
890 len -= this_len;
891 obj += this_len;
892 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
893 memcpy(obj, subbuf->tail[0].iov_base, this_len);
894 }
895
896 /* obj is assumed to point to allocated memory of size at least len: */
897 int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
898 {
899 struct xdr_buf subbuf;
900 int status;
901
902 status = xdr_buf_subsegment(buf, &subbuf, base, len);
903 if (status != 0)
904 return status;
905 __read_bytes_from_xdr_buf(&subbuf, obj, len);
906 return 0;
907 }
908 EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf);
909
910 static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
911 {
912 unsigned int this_len;
913
914 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
915 memcpy(subbuf->head[0].iov_base, obj, this_len);
916 len -= this_len;
917 obj += this_len;
918 this_len = min_t(unsigned int, len, subbuf->page_len);
919 if (this_len)
920 _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
921 len -= this_len;
922 obj += this_len;
923 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
924 memcpy(subbuf->tail[0].iov_base, obj, this_len);
925 }
926
927 /* obj is assumed to point to allocated memory of size at least len: */
928 int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
929 {
930 struct xdr_buf subbuf;
931 int status;
932
933 status = xdr_buf_subsegment(buf, &subbuf, base, len);
934 if (status != 0)
935 return status;
936 __write_bytes_to_xdr_buf(&subbuf, obj, len);
937 return 0;
938 }
939 EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf);
940
941 int
942 xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
943 {
944 __be32 raw;
945 int status;
946
947 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
948 if (status)
949 return status;
950 *obj = be32_to_cpu(raw);
951 return 0;
952 }
953 EXPORT_SYMBOL_GPL(xdr_decode_word);
954
955 int
956 xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
957 {
958 __be32 raw = cpu_to_be32(obj);
959
960 return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
961 }
962 EXPORT_SYMBOL_GPL(xdr_encode_word);
963
964 /* If the netobj starting offset bytes from the start of xdr_buf is contained
965 * entirely in the head or the tail, set object to point to it; otherwise
966 * try to find space for it at the end of the tail, copy it there, and
967 * set obj to point to it. */
968 int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset)
969 {
970 struct xdr_buf subbuf;
971
972 if (xdr_decode_word(buf, offset, &obj->len))
973 return -EFAULT;
974 if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len))
975 return -EFAULT;
976
977 /* Is the obj contained entirely in the head? */
978 obj->data = subbuf.head[0].iov_base;
979 if (subbuf.head[0].iov_len == obj->len)
980 return 0;
981 /* ..or is the obj contained entirely in the tail? */
982 obj->data = subbuf.tail[0].iov_base;
983 if (subbuf.tail[0].iov_len == obj->len)
984 return 0;
985
986 /* use end of tail as storage for obj:
987 * (We don't copy to the beginning because then we'd have
988 * to worry about doing a potentially overlapping copy.
989 * This assumes the object is at most half the length of the
990 * tail.) */
991 if (obj->len > buf->buflen - buf->len)
992 return -ENOMEM;
993 if (buf->tail[0].iov_len != 0)
994 obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
995 else
996 obj->data = buf->head[0].iov_base + buf->head[0].iov_len;
997 __read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len);
998 return 0;
999 }
1000 EXPORT_SYMBOL_GPL(xdr_buf_read_netobj);
1001
1002 /* Returns 0 on success, or else a negative error code. */
1003 static int
1004 xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
1005 struct xdr_array2_desc *desc, int encode)
1006 {
1007 char *elem = NULL, *c;
1008 unsigned int copied = 0, todo, avail_here;
1009 struct page **ppages = NULL;
1010 int err;
1011
1012 if (encode) {
1013 if (xdr_encode_word(buf, base, desc->array_len) != 0)
1014 return -EINVAL;
1015 } else {
1016 if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
1017 desc->array_len > desc->array_maxlen ||
1018 (unsigned long) base + 4 + desc->array_len *
1019 desc->elem_size > buf->len)
1020 return -EINVAL;
1021 }
1022 base += 4;
1023
1024 if (!desc->xcode)
1025 return 0;
1026
1027 todo = desc->array_len * desc->elem_size;
1028
1029 /* process head */
1030 if (todo && base < buf->head->iov_len) {
1031 c = buf->head->iov_base + base;
1032 avail_here = min_t(unsigned int, todo,
1033 buf->head->iov_len - base);
1034 todo -= avail_here;
1035
1036 while (avail_here >= desc->elem_size) {
1037 err = desc->xcode(desc, c);
1038 if (err)
1039 goto out;
1040 c += desc->elem_size;
1041 avail_here -= desc->elem_size;
1042 }
1043 if (avail_here) {
1044 if (!elem) {
1045 elem = kmalloc(desc->elem_size, GFP_KERNEL);
1046 err = -ENOMEM;
1047 if (!elem)
1048 goto out;
1049 }
1050 if (encode) {
1051 err = desc->xcode(desc, elem);
1052 if (err)
1053 goto out;
1054 memcpy(c, elem, avail_here);
1055 } else
1056 memcpy(elem, c, avail_here);
1057 copied = avail_here;
1058 }
1059 base = buf->head->iov_len; /* align to start of pages */
1060 }
1061
1062 /* process pages array */
1063 base -= buf->head->iov_len;
1064 if (todo && base < buf->page_len) {
1065 unsigned int avail_page;
1066
1067 avail_here = min(todo, buf->page_len - base);
1068 todo -= avail_here;
1069
1070 base += buf->page_base;
1071 ppages = buf->pages + (base >> PAGE_CACHE_SHIFT);
1072 base &= ~PAGE_CACHE_MASK;
1073 avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base,
1074 avail_here);
1075 c = kmap(*ppages) + base;
1076
1077 while (avail_here) {
1078 avail_here -= avail_page;
1079 if (copied || avail_page < desc->elem_size) {
1080 unsigned int l = min(avail_page,
1081 desc->elem_size - copied);
1082 if (!elem) {
1083 elem = kmalloc(desc->elem_size,
1084 GFP_KERNEL);
1085 err = -ENOMEM;
1086 if (!elem)
1087 goto out;
1088 }
1089 if (encode) {
1090 if (!copied) {
1091 err = desc->xcode(desc, elem);
1092 if (err)
1093 goto out;
1094 }
1095 memcpy(c, elem + copied, l);
1096 copied += l;
1097 if (copied == desc->elem_size)
1098 copied = 0;
1099 } else {
1100 memcpy(elem + copied, c, l);
1101 copied += l;
1102 if (copied == desc->elem_size) {
1103 err = desc->xcode(desc, elem);
1104 if (err)
1105 goto out;
1106 copied = 0;
1107 }
1108 }
1109 avail_page -= l;
1110 c += l;
1111 }
1112 while (avail_page >= desc->elem_size) {
1113 err = desc->xcode(desc, c);
1114 if (err)
1115 goto out;
1116 c += desc->elem_size;
1117 avail_page -= desc->elem_size;
1118 }
1119 if (avail_page) {
1120 unsigned int l = min(avail_page,
1121 desc->elem_size - copied);
1122 if (!elem) {
1123 elem = kmalloc(desc->elem_size,
1124 GFP_KERNEL);
1125 err = -ENOMEM;
1126 if (!elem)
1127 goto out;
1128 }
1129 if (encode) {
1130 if (!copied) {
1131 err = desc->xcode(desc, elem);
1132 if (err)
1133 goto out;
1134 }
1135 memcpy(c, elem + copied, l);
1136 copied += l;
1137 if (copied == desc->elem_size)
1138 copied = 0;
1139 } else {
1140 memcpy(elem + copied, c, l);
1141 copied += l;
1142 if (copied == desc->elem_size) {
1143 err = desc->xcode(desc, elem);
1144 if (err)
1145 goto out;
1146 copied = 0;
1147 }
1148 }
1149 }
1150 if (avail_here) {
1151 kunmap(*ppages);
1152 ppages++;
1153 c = kmap(*ppages);
1154 }
1155
1156 avail_page = min(avail_here,
1157 (unsigned int) PAGE_CACHE_SIZE);
1158 }
1159 base = buf->page_len; /* align to start of tail */
1160 }
1161
1162 /* process tail */
1163 base -= buf->page_len;
1164 if (todo) {
1165 c = buf->tail->iov_base + base;
1166 if (copied) {
1167 unsigned int l = desc->elem_size - copied;
1168
1169 if (encode)
1170 memcpy(c, elem + copied, l);
1171 else {
1172 memcpy(elem + copied, c, l);
1173 err = desc->xcode(desc, elem);
1174 if (err)
1175 goto out;
1176 }
1177 todo -= l;
1178 c += l;
1179 }
1180 while (todo) {
1181 err = desc->xcode(desc, c);
1182 if (err)
1183 goto out;
1184 c += desc->elem_size;
1185 todo -= desc->elem_size;
1186 }
1187 }
1188 err = 0;
1189
1190 out:
1191 kfree(elem);
1192 if (ppages)
1193 kunmap(*ppages);
1194 return err;
1195 }
1196
1197 int
1198 xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
1199 struct xdr_array2_desc *desc)
1200 {
1201 if (base >= buf->len)
1202 return -EINVAL;
1203
1204 return xdr_xcode_array2(buf, base, desc, 0);
1205 }
1206 EXPORT_SYMBOL_GPL(xdr_decode_array2);
1207
1208 int
1209 xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
1210 struct xdr_array2_desc *desc)
1211 {
1212 if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
1213 buf->head->iov_len + buf->page_len + buf->tail->iov_len)
1214 return -EINVAL;
1215
1216 return xdr_xcode_array2(buf, base, desc, 1);
1217 }
1218 EXPORT_SYMBOL_GPL(xdr_encode_array2);
1219
1220 int
1221 xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
1222 int (*actor)(struct scatterlist *, void *), void *data)
1223 {
1224 int i, ret = 0;
1225 unsigned int page_len, thislen, page_offset;
1226 struct scatterlist sg[1];
1227
1228 sg_init_table(sg, 1);
1229
1230 if (offset >= buf->head[0].iov_len) {
1231 offset -= buf->head[0].iov_len;
1232 } else {
1233 thislen = buf->head[0].iov_len - offset;
1234 if (thislen > len)
1235 thislen = len;
1236 sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
1237 ret = actor(sg, data);
1238 if (ret)
1239 goto out;
1240 offset = 0;
1241 len -= thislen;
1242 }
1243 if (len == 0)
1244 goto out;
1245
1246 if (offset >= buf->page_len) {
1247 offset -= buf->page_len;
1248 } else {
1249 page_len = buf->page_len - offset;
1250 if (page_len > len)
1251 page_len = len;
1252 len -= page_len;
1253 page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
1254 i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
1255 thislen = PAGE_CACHE_SIZE - page_offset;
1256 do {
1257 if (thislen > page_len)
1258 thislen = page_len;
1259 sg_set_page(sg, buf->pages[i], thislen, page_offset);
1260 ret = actor(sg, data);
1261 if (ret)
1262 goto out;
1263 page_len -= thislen;
1264 i++;
1265 page_offset = 0;
1266 thislen = PAGE_CACHE_SIZE;
1267 } while (page_len != 0);
1268 offset = 0;
1269 }
1270 if (len == 0)
1271 goto out;
1272 if (offset < buf->tail[0].iov_len) {
1273 thislen = buf->tail[0].iov_len - offset;
1274 if (thislen > len)
1275 thislen = len;
1276 sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
1277 ret = actor(sg, data);
1278 len -= thislen;
1279 }
1280 if (len != 0)
1281 ret = -EINVAL;
1282 out:
1283 return ret;
1284 }
1285 EXPORT_SYMBOL_GPL(xdr_process_buf);
1286