]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/nfs/blocklayout/blocklayout.c
Merge branches 'for-4.4/upstream-fixes', 'for-4.5/async-suspend', 'for-4.5/container...
[mirror_ubuntu-artful-kernel.git] / fs / nfs / blocklayout / blocklayout.c
1 /*
2 * linux/fs/nfs/blocklayout/blocklayout.c
3 *
4 * Module for the NFSv4.1 pNFS block layout driver.
5 *
6 * Copyright (c) 2006 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Andy Adamson <andros@citi.umich.edu>
10 * Fred Isaman <iisaman@umich.edu>
11 *
12 * permission is granted to use, copy, create derivative works and
13 * redistribute this software and such derivative works for any purpose,
14 * so long as the name of the university of michigan is not used in
15 * any advertising or publicity pertaining to the use or distribution
16 * of this software without specific, written prior authorization. if
17 * the above copyright notice or any other identification of the
18 * university of michigan is included in any copy of any portion of
19 * this software, then the disclaimer below must also be included.
20 *
21 * this software is provided as is, without representation from the
22 * university of michigan as to its fitness for any purpose, and without
23 * warranty by the university of michigan of any kind, either express
24 * or implied, including without limitation the implied warranties of
25 * merchantability and fitness for a particular purpose. the regents
26 * of the university of michigan shall not be liable for any damages,
27 * including special, indirect, incidental, or consequential damages,
28 * with respect to any claim arising out or in connection with the use
29 * of the software, even if it has been or is hereafter advised of the
30 * possibility of such damages.
31 */
32
33 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <linux/mount.h>
36 #include <linux/namei.h>
37 #include <linux/bio.h> /* struct bio */
38 #include <linux/prefetch.h>
39 #include <linux/pagevec.h>
40
41 #include "../pnfs.h"
42 #include "../nfs4session.h"
43 #include "../internal.h"
44 #include "blocklayout.h"
45
46 #define NFSDBG_FACILITY NFSDBG_PNFS_LD
47
48 MODULE_LICENSE("GPL");
49 MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>");
50 MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver");
51
52 static bool is_hole(struct pnfs_block_extent *be)
53 {
54 switch (be->be_state) {
55 case PNFS_BLOCK_NONE_DATA:
56 return true;
57 case PNFS_BLOCK_INVALID_DATA:
58 return be->be_tag ? false : true;
59 default:
60 return false;
61 }
62 }
63
64 /* The data we are handed might be spread across several bios. We need
65 * to track when the last one is finished.
66 */
67 struct parallel_io {
68 struct kref refcnt;
69 void (*pnfs_callback) (void *data);
70 void *data;
71 };
72
73 static inline struct parallel_io *alloc_parallel(void *data)
74 {
75 struct parallel_io *rv;
76
77 rv = kmalloc(sizeof(*rv), GFP_NOFS);
78 if (rv) {
79 rv->data = data;
80 kref_init(&rv->refcnt);
81 }
82 return rv;
83 }
84
85 static inline void get_parallel(struct parallel_io *p)
86 {
87 kref_get(&p->refcnt);
88 }
89
90 static void destroy_parallel(struct kref *kref)
91 {
92 struct parallel_io *p = container_of(kref, struct parallel_io, refcnt);
93
94 dprintk("%s enter\n", __func__);
95 p->pnfs_callback(p->data);
96 kfree(p);
97 }
98
99 static inline void put_parallel(struct parallel_io *p)
100 {
101 kref_put(&p->refcnt, destroy_parallel);
102 }
103
104 static struct bio *
105 bl_submit_bio(int rw, struct bio *bio)
106 {
107 if (bio) {
108 get_parallel(bio->bi_private);
109 dprintk("%s submitting %s bio %u@%llu\n", __func__,
110 rw == READ ? "read" : "write", bio->bi_iter.bi_size,
111 (unsigned long long)bio->bi_iter.bi_sector);
112 submit_bio(rw, bio);
113 }
114 return NULL;
115 }
116
117 static struct bio *
118 bl_alloc_init_bio(int npg, struct block_device *bdev, sector_t disk_sector,
119 bio_end_io_t end_io, struct parallel_io *par)
120 {
121 struct bio *bio;
122
123 npg = min(npg, BIO_MAX_PAGES);
124 bio = bio_alloc(GFP_NOIO, npg);
125 if (!bio && (current->flags & PF_MEMALLOC)) {
126 while (!bio && (npg /= 2))
127 bio = bio_alloc(GFP_NOIO, npg);
128 }
129
130 if (bio) {
131 bio->bi_iter.bi_sector = disk_sector;
132 bio->bi_bdev = bdev;
133 bio->bi_end_io = end_io;
134 bio->bi_private = par;
135 }
136 return bio;
137 }
138
139 static struct bio *
140 do_add_page_to_bio(struct bio *bio, int npg, int rw, sector_t isect,
141 struct page *page, struct pnfs_block_dev_map *map,
142 struct pnfs_block_extent *be, bio_end_io_t end_io,
143 struct parallel_io *par, unsigned int offset, int *len)
144 {
145 struct pnfs_block_dev *dev =
146 container_of(be->be_device, struct pnfs_block_dev, node);
147 u64 disk_addr, end;
148
149 dprintk("%s: npg %d rw %d isect %llu offset %u len %d\n", __func__,
150 npg, rw, (unsigned long long)isect, offset, *len);
151
152 /* translate to device offset */
153 isect += be->be_v_offset;
154 isect -= be->be_f_offset;
155
156 /* translate to physical disk offset */
157 disk_addr = (u64)isect << SECTOR_SHIFT;
158 if (disk_addr < map->start || disk_addr >= map->start + map->len) {
159 if (!dev->map(dev, disk_addr, map))
160 return ERR_PTR(-EIO);
161 bio = bl_submit_bio(rw, bio);
162 }
163 disk_addr += map->disk_offset;
164 disk_addr -= map->start;
165
166 /* limit length to what the device mapping allows */
167 end = disk_addr + *len;
168 if (end >= map->start + map->len)
169 *len = map->start + map->len - disk_addr;
170
171 retry:
172 if (!bio) {
173 bio = bl_alloc_init_bio(npg, map->bdev,
174 disk_addr >> SECTOR_SHIFT, end_io, par);
175 if (!bio)
176 return ERR_PTR(-ENOMEM);
177 }
178 if (bio_add_page(bio, page, *len, offset) < *len) {
179 bio = bl_submit_bio(rw, bio);
180 goto retry;
181 }
182 return bio;
183 }
184
185 static void bl_end_io_read(struct bio *bio)
186 {
187 struct parallel_io *par = bio->bi_private;
188
189 if (bio->bi_error) {
190 struct nfs_pgio_header *header = par->data;
191
192 if (!header->pnfs_error)
193 header->pnfs_error = -EIO;
194 pnfs_set_lo_fail(header->lseg);
195 }
196
197 bio_put(bio);
198 put_parallel(par);
199 }
200
201 static void bl_read_cleanup(struct work_struct *work)
202 {
203 struct rpc_task *task;
204 struct nfs_pgio_header *hdr;
205 dprintk("%s enter\n", __func__);
206 task = container_of(work, struct rpc_task, u.tk_work);
207 hdr = container_of(task, struct nfs_pgio_header, task);
208 pnfs_ld_read_done(hdr);
209 }
210
211 static void
212 bl_end_par_io_read(void *data)
213 {
214 struct nfs_pgio_header *hdr = data;
215
216 hdr->task.tk_status = hdr->pnfs_error;
217 INIT_WORK(&hdr->task.u.tk_work, bl_read_cleanup);
218 schedule_work(&hdr->task.u.tk_work);
219 }
220
221 static enum pnfs_try_status
222 bl_read_pagelist(struct nfs_pgio_header *header)
223 {
224 struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg);
225 struct pnfs_block_dev_map map = { .start = NFS4_MAX_UINT64 };
226 struct bio *bio = NULL;
227 struct pnfs_block_extent be;
228 sector_t isect, extent_length = 0;
229 struct parallel_io *par;
230 loff_t f_offset = header->args.offset;
231 size_t bytes_left = header->args.count;
232 unsigned int pg_offset = header->args.pgbase, pg_len;
233 struct page **pages = header->args.pages;
234 int pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT;
235 const bool is_dio = (header->dreq != NULL);
236 struct blk_plug plug;
237 int i;
238
239 dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__,
240 header->page_array.npages, f_offset,
241 (unsigned int)header->args.count);
242
243 par = alloc_parallel(header);
244 if (!par)
245 return PNFS_NOT_ATTEMPTED;
246 par->pnfs_callback = bl_end_par_io_read;
247
248 blk_start_plug(&plug);
249
250 isect = (sector_t) (f_offset >> SECTOR_SHIFT);
251 /* Code assumes extents are page-aligned */
252 for (i = pg_index; i < header->page_array.npages; i++) {
253 if (extent_length <= 0) {
254 /* We've used up the previous extent */
255 bio = bl_submit_bio(READ, bio);
256
257 /* Get the next one */
258 if (!ext_tree_lookup(bl, isect, &be, false)) {
259 header->pnfs_error = -EIO;
260 goto out;
261 }
262 extent_length = be.be_length - (isect - be.be_f_offset);
263 }
264
265 if (is_dio) {
266 if (pg_offset + bytes_left > PAGE_CACHE_SIZE)
267 pg_len = PAGE_CACHE_SIZE - pg_offset;
268 else
269 pg_len = bytes_left;
270 } else {
271 BUG_ON(pg_offset != 0);
272 pg_len = PAGE_CACHE_SIZE;
273 }
274
275 if (is_hole(&be)) {
276 bio = bl_submit_bio(READ, bio);
277 /* Fill hole w/ zeroes w/o accessing device */
278 dprintk("%s Zeroing page for hole\n", __func__);
279 zero_user_segment(pages[i], pg_offset, pg_len);
280
281 /* invalidate map */
282 map.start = NFS4_MAX_UINT64;
283 } else {
284 bio = do_add_page_to_bio(bio,
285 header->page_array.npages - i,
286 READ,
287 isect, pages[i], &map, &be,
288 bl_end_io_read, par,
289 pg_offset, &pg_len);
290 if (IS_ERR(bio)) {
291 header->pnfs_error = PTR_ERR(bio);
292 bio = NULL;
293 goto out;
294 }
295 }
296 isect += (pg_len >> SECTOR_SHIFT);
297 extent_length -= (pg_len >> SECTOR_SHIFT);
298 f_offset += pg_len;
299 bytes_left -= pg_len;
300 pg_offset = 0;
301 }
302 if ((isect << SECTOR_SHIFT) >= header->inode->i_size) {
303 header->res.eof = 1;
304 header->res.count = header->inode->i_size - header->args.offset;
305 } else {
306 header->res.count = (isect << SECTOR_SHIFT) - header->args.offset;
307 }
308 out:
309 bl_submit_bio(READ, bio);
310 blk_finish_plug(&plug);
311 put_parallel(par);
312 return PNFS_ATTEMPTED;
313 }
314
315 static void bl_end_io_write(struct bio *bio)
316 {
317 struct parallel_io *par = bio->bi_private;
318 struct nfs_pgio_header *header = par->data;
319
320 if (bio->bi_error) {
321 if (!header->pnfs_error)
322 header->pnfs_error = -EIO;
323 pnfs_set_lo_fail(header->lseg);
324 }
325 bio_put(bio);
326 put_parallel(par);
327 }
328
329 /* Function scheduled for call during bl_end_par_io_write,
330 * it marks sectors as written and extends the commitlist.
331 */
332 static void bl_write_cleanup(struct work_struct *work)
333 {
334 struct rpc_task *task = container_of(work, struct rpc_task, u.tk_work);
335 struct nfs_pgio_header *hdr =
336 container_of(task, struct nfs_pgio_header, task);
337
338 dprintk("%s enter\n", __func__);
339
340 if (likely(!hdr->pnfs_error)) {
341 struct pnfs_block_layout *bl = BLK_LSEG2EXT(hdr->lseg);
342 u64 start = hdr->args.offset & (loff_t)PAGE_CACHE_MASK;
343 u64 end = (hdr->args.offset + hdr->args.count +
344 PAGE_CACHE_SIZE - 1) & (loff_t)PAGE_CACHE_MASK;
345
346 ext_tree_mark_written(bl, start >> SECTOR_SHIFT,
347 (end - start) >> SECTOR_SHIFT);
348 }
349
350 pnfs_ld_write_done(hdr);
351 }
352
353 /* Called when last of bios associated with a bl_write_pagelist call finishes */
354 static void bl_end_par_io_write(void *data)
355 {
356 struct nfs_pgio_header *hdr = data;
357
358 hdr->task.tk_status = hdr->pnfs_error;
359 hdr->verf.committed = NFS_FILE_SYNC;
360 INIT_WORK(&hdr->task.u.tk_work, bl_write_cleanup);
361 schedule_work(&hdr->task.u.tk_work);
362 }
363
364 static enum pnfs_try_status
365 bl_write_pagelist(struct nfs_pgio_header *header, int sync)
366 {
367 struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg);
368 struct pnfs_block_dev_map map = { .start = NFS4_MAX_UINT64 };
369 struct bio *bio = NULL;
370 struct pnfs_block_extent be;
371 sector_t isect, extent_length = 0;
372 struct parallel_io *par = NULL;
373 loff_t offset = header->args.offset;
374 size_t count = header->args.count;
375 struct page **pages = header->args.pages;
376 int pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT;
377 unsigned int pg_len;
378 struct blk_plug plug;
379 int i;
380
381 dprintk("%s enter, %Zu@%lld\n", __func__, count, offset);
382
383 /* At this point, header->page_aray is a (sequential) list of nfs_pages.
384 * We want to write each, and if there is an error set pnfs_error
385 * to have it redone using nfs.
386 */
387 par = alloc_parallel(header);
388 if (!par)
389 return PNFS_NOT_ATTEMPTED;
390 par->pnfs_callback = bl_end_par_io_write;
391
392 blk_start_plug(&plug);
393
394 /* we always write out the whole page */
395 offset = offset & (loff_t)PAGE_CACHE_MASK;
396 isect = offset >> SECTOR_SHIFT;
397
398 for (i = pg_index; i < header->page_array.npages; i++) {
399 if (extent_length <= 0) {
400 /* We've used up the previous extent */
401 bio = bl_submit_bio(WRITE, bio);
402 /* Get the next one */
403 if (!ext_tree_lookup(bl, isect, &be, true)) {
404 header->pnfs_error = -EINVAL;
405 goto out;
406 }
407
408 extent_length = be.be_length - (isect - be.be_f_offset);
409 }
410
411 pg_len = PAGE_CACHE_SIZE;
412 bio = do_add_page_to_bio(bio, header->page_array.npages - i,
413 WRITE, isect, pages[i], &map, &be,
414 bl_end_io_write, par,
415 0, &pg_len);
416 if (IS_ERR(bio)) {
417 header->pnfs_error = PTR_ERR(bio);
418 bio = NULL;
419 goto out;
420 }
421
422 offset += pg_len;
423 count -= pg_len;
424 isect += (pg_len >> SECTOR_SHIFT);
425 extent_length -= (pg_len >> SECTOR_SHIFT);
426 }
427
428 header->res.count = header->args.count;
429 out:
430 bl_submit_bio(WRITE, bio);
431 blk_finish_plug(&plug);
432 put_parallel(par);
433 return PNFS_ATTEMPTED;
434 }
435
436 static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo)
437 {
438 struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
439 int err;
440
441 dprintk("%s enter\n", __func__);
442
443 err = ext_tree_remove(bl, true, 0, LLONG_MAX);
444 WARN_ON(err);
445
446 kfree(bl);
447 }
448
449 static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode,
450 gfp_t gfp_flags)
451 {
452 struct pnfs_block_layout *bl;
453
454 dprintk("%s enter\n", __func__);
455 bl = kzalloc(sizeof(*bl), gfp_flags);
456 if (!bl)
457 return NULL;
458
459 bl->bl_ext_rw = RB_ROOT;
460 bl->bl_ext_ro = RB_ROOT;
461 spin_lock_init(&bl->bl_ext_lock);
462
463 return &bl->bl_layout;
464 }
465
466 static void bl_free_lseg(struct pnfs_layout_segment *lseg)
467 {
468 dprintk("%s enter\n", __func__);
469 kfree(lseg);
470 }
471
472 /* Tracks info needed to ensure extents in layout obey constraints of spec */
473 struct layout_verification {
474 u32 mode; /* R or RW */
475 u64 start; /* Expected start of next non-COW extent */
476 u64 inval; /* Start of INVAL coverage */
477 u64 cowread; /* End of COW read coverage */
478 };
479
480 /* Verify the extent meets the layout requirements of the pnfs-block draft,
481 * section 2.3.1.
482 */
483 static int verify_extent(struct pnfs_block_extent *be,
484 struct layout_verification *lv)
485 {
486 if (lv->mode == IOMODE_READ) {
487 if (be->be_state == PNFS_BLOCK_READWRITE_DATA ||
488 be->be_state == PNFS_BLOCK_INVALID_DATA)
489 return -EIO;
490 if (be->be_f_offset != lv->start)
491 return -EIO;
492 lv->start += be->be_length;
493 return 0;
494 }
495 /* lv->mode == IOMODE_RW */
496 if (be->be_state == PNFS_BLOCK_READWRITE_DATA) {
497 if (be->be_f_offset != lv->start)
498 return -EIO;
499 if (lv->cowread > lv->start)
500 return -EIO;
501 lv->start += be->be_length;
502 lv->inval = lv->start;
503 return 0;
504 } else if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
505 if (be->be_f_offset != lv->start)
506 return -EIO;
507 lv->start += be->be_length;
508 return 0;
509 } else if (be->be_state == PNFS_BLOCK_READ_DATA) {
510 if (be->be_f_offset > lv->start)
511 return -EIO;
512 if (be->be_f_offset < lv->inval)
513 return -EIO;
514 if (be->be_f_offset < lv->cowread)
515 return -EIO;
516 /* It looks like you might want to min this with lv->start,
517 * but you really don't.
518 */
519 lv->inval = lv->inval + be->be_length;
520 lv->cowread = be->be_f_offset + be->be_length;
521 return 0;
522 } else
523 return -EIO;
524 }
525
526 static int decode_sector_number(__be32 **rp, sector_t *sp)
527 {
528 uint64_t s;
529
530 *rp = xdr_decode_hyper(*rp, &s);
531 if (s & 0x1ff) {
532 printk(KERN_WARNING "NFS: %s: sector not aligned\n", __func__);
533 return -1;
534 }
535 *sp = s >> SECTOR_SHIFT;
536 return 0;
537 }
538
539 static int
540 bl_alloc_extent(struct xdr_stream *xdr, struct pnfs_layout_hdr *lo,
541 struct layout_verification *lv, struct list_head *extents,
542 gfp_t gfp_mask)
543 {
544 struct pnfs_block_extent *be;
545 struct nfs4_deviceid id;
546 int error;
547 __be32 *p;
548
549 p = xdr_inline_decode(xdr, 28 + NFS4_DEVICEID4_SIZE);
550 if (!p)
551 return -EIO;
552
553 be = kzalloc(sizeof(*be), GFP_NOFS);
554 if (!be)
555 return -ENOMEM;
556
557 memcpy(&id, p, NFS4_DEVICEID4_SIZE);
558 p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
559
560 error = -EIO;
561 be->be_device = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode), &id,
562 lo->plh_lc_cred, gfp_mask);
563 if (!be->be_device)
564 goto out_free_be;
565
566 /*
567 * The next three values are read in as bytes, but stored in the
568 * extent structure in 512-byte granularity.
569 */
570 if (decode_sector_number(&p, &be->be_f_offset) < 0)
571 goto out_put_deviceid;
572 if (decode_sector_number(&p, &be->be_length) < 0)
573 goto out_put_deviceid;
574 if (decode_sector_number(&p, &be->be_v_offset) < 0)
575 goto out_put_deviceid;
576 be->be_state = be32_to_cpup(p++);
577
578 error = verify_extent(be, lv);
579 if (error) {
580 dprintk("%s: extent verification failed\n", __func__);
581 goto out_put_deviceid;
582 }
583
584 list_add_tail(&be->be_list, extents);
585 return 0;
586
587 out_put_deviceid:
588 nfs4_put_deviceid_node(be->be_device);
589 out_free_be:
590 kfree(be);
591 return error;
592 }
593
594 static struct pnfs_layout_segment *
595 bl_alloc_lseg(struct pnfs_layout_hdr *lo, struct nfs4_layoutget_res *lgr,
596 gfp_t gfp_mask)
597 {
598 struct layout_verification lv = {
599 .mode = lgr->range.iomode,
600 .start = lgr->range.offset >> SECTOR_SHIFT,
601 .inval = lgr->range.offset >> SECTOR_SHIFT,
602 .cowread = lgr->range.offset >> SECTOR_SHIFT,
603 };
604 struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
605 struct pnfs_layout_segment *lseg;
606 struct xdr_buf buf;
607 struct xdr_stream xdr;
608 struct page *scratch;
609 int status, i;
610 uint32_t count;
611 __be32 *p;
612 LIST_HEAD(extents);
613
614 dprintk("---> %s\n", __func__);
615
616 lseg = kzalloc(sizeof(*lseg), gfp_mask);
617 if (!lseg)
618 return ERR_PTR(-ENOMEM);
619
620 status = -ENOMEM;
621 scratch = alloc_page(gfp_mask);
622 if (!scratch)
623 goto out;
624
625 xdr_init_decode_pages(&xdr, &buf,
626 lgr->layoutp->pages, lgr->layoutp->len);
627 xdr_set_scratch_buffer(&xdr, page_address(scratch), PAGE_SIZE);
628
629 status = -EIO;
630 p = xdr_inline_decode(&xdr, 4);
631 if (unlikely(!p))
632 goto out_free_scratch;
633
634 count = be32_to_cpup(p++);
635 dprintk("%s: number of extents %d\n", __func__, count);
636
637 /*
638 * Decode individual extents, putting them in temporary staging area
639 * until whole layout is decoded to make error recovery easier.
640 */
641 for (i = 0; i < count; i++) {
642 status = bl_alloc_extent(&xdr, lo, &lv, &extents, gfp_mask);
643 if (status)
644 goto process_extents;
645 }
646
647 if (lgr->range.offset + lgr->range.length !=
648 lv.start << SECTOR_SHIFT) {
649 dprintk("%s Final length mismatch\n", __func__);
650 status = -EIO;
651 goto process_extents;
652 }
653
654 if (lv.start < lv.cowread) {
655 dprintk("%s Final uncovered COW extent\n", __func__);
656 status = -EIO;
657 }
658
659 process_extents:
660 while (!list_empty(&extents)) {
661 struct pnfs_block_extent *be =
662 list_first_entry(&extents, struct pnfs_block_extent,
663 be_list);
664 list_del(&be->be_list);
665
666 if (!status)
667 status = ext_tree_insert(bl, be);
668
669 if (status) {
670 nfs4_put_deviceid_node(be->be_device);
671 kfree(be);
672 }
673 }
674
675 out_free_scratch:
676 __free_page(scratch);
677 out:
678 dprintk("%s returns %d\n", __func__, status);
679 if (status) {
680 kfree(lseg);
681 return ERR_PTR(status);
682 }
683 return lseg;
684 }
685
686 static void
687 bl_return_range(struct pnfs_layout_hdr *lo,
688 struct pnfs_layout_range *range)
689 {
690 struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
691 sector_t offset = range->offset >> SECTOR_SHIFT, end;
692
693 if (range->offset % 8) {
694 dprintk("%s: offset %lld not block size aligned\n",
695 __func__, range->offset);
696 return;
697 }
698
699 if (range->length != NFS4_MAX_UINT64) {
700 if (range->length % 8) {
701 dprintk("%s: length %lld not block size aligned\n",
702 __func__, range->length);
703 return;
704 }
705
706 end = offset + (range->length >> SECTOR_SHIFT);
707 } else {
708 end = round_down(NFS4_MAX_UINT64, PAGE_SIZE);
709 }
710
711 ext_tree_remove(bl, range->iomode & IOMODE_RW, offset, end);
712 }
713
714 static int
715 bl_prepare_layoutcommit(struct nfs4_layoutcommit_args *arg)
716 {
717 return ext_tree_prepare_commit(arg);
718 }
719
720 static void
721 bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
722 {
723 ext_tree_mark_committed(&lcdata->args, lcdata->res.status);
724 }
725
726 static int
727 bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
728 {
729 dprintk("%s enter\n", __func__);
730
731 if (server->pnfs_blksize == 0) {
732 dprintk("%s Server did not return blksize\n", __func__);
733 return -EINVAL;
734 }
735 if (server->pnfs_blksize > PAGE_SIZE) {
736 printk(KERN_ERR "%s: pNFS blksize %d not supported.\n",
737 __func__, server->pnfs_blksize);
738 return -EINVAL;
739 }
740
741 return 0;
742 }
743
744 static bool
745 is_aligned_req(struct nfs_pageio_descriptor *pgio,
746 struct nfs_page *req, unsigned int alignment)
747 {
748 /*
749 * Always accept buffered writes, higher layers take care of the
750 * right alignment.
751 */
752 if (pgio->pg_dreq == NULL)
753 return true;
754
755 if (!IS_ALIGNED(req->wb_offset, alignment))
756 return false;
757
758 if (IS_ALIGNED(req->wb_bytes, alignment))
759 return true;
760
761 if (req_offset(req) + req->wb_bytes == i_size_read(pgio->pg_inode)) {
762 /*
763 * If the write goes up to the inode size, just write
764 * the full page. Data past the inode size is
765 * guaranteed to be zeroed by the higher level client
766 * code, and this behaviour is mandated by RFC 5663
767 * section 2.3.2.
768 */
769 return true;
770 }
771
772 return false;
773 }
774
775 static void
776 bl_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
777 {
778 if (!is_aligned_req(pgio, req, SECTOR_SIZE)) {
779 nfs_pageio_reset_read_mds(pgio);
780 return;
781 }
782
783 pnfs_generic_pg_init_read(pgio, req);
784 }
785
786 /*
787 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
788 * of bytes (maximum @req->wb_bytes) that can be coalesced.
789 */
790 static size_t
791 bl_pg_test_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
792 struct nfs_page *req)
793 {
794 if (!is_aligned_req(pgio, req, SECTOR_SIZE))
795 return 0;
796 return pnfs_generic_pg_test(pgio, prev, req);
797 }
798
799 /*
800 * Return the number of contiguous bytes for a given inode
801 * starting at page frame idx.
802 */
803 static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx)
804 {
805 struct address_space *mapping = inode->i_mapping;
806 pgoff_t end;
807
808 /* Optimize common case that writes from 0 to end of file */
809 end = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE);
810 if (end != inode->i_mapping->nrpages) {
811 rcu_read_lock();
812 end = page_cache_next_hole(mapping, idx + 1, ULONG_MAX);
813 rcu_read_unlock();
814 }
815
816 if (!end)
817 return i_size_read(inode) - (idx << PAGE_CACHE_SHIFT);
818 else
819 return (end - idx) << PAGE_CACHE_SHIFT;
820 }
821
822 static void
823 bl_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
824 {
825 u64 wb_size;
826
827 if (!is_aligned_req(pgio, req, PAGE_SIZE)) {
828 nfs_pageio_reset_write_mds(pgio);
829 return;
830 }
831
832 if (pgio->pg_dreq == NULL)
833 wb_size = pnfs_num_cont_bytes(pgio->pg_inode,
834 req->wb_index);
835 else
836 wb_size = nfs_dreq_bytes_left(pgio->pg_dreq);
837
838 pnfs_generic_pg_init_write(pgio, req, wb_size);
839 }
840
841 /*
842 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
843 * of bytes (maximum @req->wb_bytes) that can be coalesced.
844 */
845 static size_t
846 bl_pg_test_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
847 struct nfs_page *req)
848 {
849 if (!is_aligned_req(pgio, req, PAGE_SIZE))
850 return 0;
851 return pnfs_generic_pg_test(pgio, prev, req);
852 }
853
854 static const struct nfs_pageio_ops bl_pg_read_ops = {
855 .pg_init = bl_pg_init_read,
856 .pg_test = bl_pg_test_read,
857 .pg_doio = pnfs_generic_pg_readpages,
858 .pg_cleanup = pnfs_generic_pg_cleanup,
859 };
860
861 static const struct nfs_pageio_ops bl_pg_write_ops = {
862 .pg_init = bl_pg_init_write,
863 .pg_test = bl_pg_test_write,
864 .pg_doio = pnfs_generic_pg_writepages,
865 .pg_cleanup = pnfs_generic_pg_cleanup,
866 };
867
868 static struct pnfs_layoutdriver_type blocklayout_type = {
869 .id = LAYOUT_BLOCK_VOLUME,
870 .name = "LAYOUT_BLOCK_VOLUME",
871 .owner = THIS_MODULE,
872 .flags = PNFS_LAYOUTRET_ON_SETATTR |
873 PNFS_READ_WHOLE_PAGE,
874 .read_pagelist = bl_read_pagelist,
875 .write_pagelist = bl_write_pagelist,
876 .alloc_layout_hdr = bl_alloc_layout_hdr,
877 .free_layout_hdr = bl_free_layout_hdr,
878 .alloc_lseg = bl_alloc_lseg,
879 .free_lseg = bl_free_lseg,
880 .return_range = bl_return_range,
881 .prepare_layoutcommit = bl_prepare_layoutcommit,
882 .cleanup_layoutcommit = bl_cleanup_layoutcommit,
883 .set_layoutdriver = bl_set_layoutdriver,
884 .alloc_deviceid_node = bl_alloc_deviceid_node,
885 .free_deviceid_node = bl_free_deviceid_node,
886 .pg_read_ops = &bl_pg_read_ops,
887 .pg_write_ops = &bl_pg_write_ops,
888 .sync = pnfs_generic_sync,
889 };
890
891 static int __init nfs4blocklayout_init(void)
892 {
893 int ret;
894
895 dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__);
896
897 ret = pnfs_register_layoutdriver(&blocklayout_type);
898 if (ret)
899 goto out;
900 ret = bl_init_pipefs();
901 if (ret)
902 goto out_unregister;
903 return 0;
904
905 out_unregister:
906 pnfs_unregister_layoutdriver(&blocklayout_type);
907 out:
908 return ret;
909 }
910
911 static void __exit nfs4blocklayout_exit(void)
912 {
913 dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n",
914 __func__);
915
916 bl_cleanup_pipefs();
917 pnfs_unregister_layoutdriver(&blocklayout_type);
918 }
919
920 MODULE_ALIAS("nfs-layouttype4-3");
921
922 module_init(nfs4blocklayout_init);
923 module_exit(nfs4blocklayout_exit);