]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - fs/nfs/blocklayout/blocklayout.c
pnfs: add return_range method
[mirror_ubuntu-bionic-kernel.git] / fs / nfs / blocklayout / blocklayout.c
CommitLineData
155e7524
FI
1/*
2 * linux/fs/nfs/blocklayout/blocklayout.c
3 *
4 * Module for the NFSv4.1 pNFS block layout driver.
5 *
6 * Copyright (c) 2006 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Andy Adamson <andros@citi.umich.edu>
10 * Fred Isaman <iisaman@umich.edu>
11 *
12 * permission is granted to use, copy, create derivative works and
13 * redistribute this software and such derivative works for any purpose,
14 * so long as the name of the university of michigan is not used in
15 * any advertising or publicity pertaining to the use or distribution
16 * of this software without specific, written prior authorization. if
17 * the above copyright notice or any other identification of the
18 * university of michigan is included in any copy of any portion of
19 * this software, then the disclaimer below must also be included.
20 *
21 * this software is provided as is, without representation from the
22 * university of michigan as to its fitness for any purpose, and without
23 * warranty by the university of michigan of any kind, either express
24 * or implied, including without limitation the implied warranties of
25 * merchantability and fitness for a particular purpose. the regents
26 * of the university of michigan shall not be liable for any damages,
27 * including special, indirect, incidental, or consequential damages,
28 * with respect to any claim arising out or in connection with the use
29 * of the software, even if it has been or is hereafter advised of the
30 * possibility of such damages.
31 */
9549ec01 32
155e7524
FI
33#include <linux/module.h>
34#include <linux/init.h>
fe0a9b74
JR
35#include <linux/mount.h>
36#include <linux/namei.h>
9549ec01 37#include <linux/bio.h> /* struct bio */
71cdd40f 38#include <linux/buffer_head.h> /* various write calls */
88c9e421 39#include <linux/prefetch.h>
6296556f 40#include <linux/pagevec.h>
155e7524 41
10bd295a 42#include "../pnfs.h"
76e697ba 43#include "../nfs4session.h"
10bd295a 44#include "../internal.h"
155e7524
FI
45#include "blocklayout.h"
46
47#define NFSDBG_FACILITY NFSDBG_PNFS_LD
48
49MODULE_LICENSE("GPL");
50MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>");
51MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver");
52
9549ec01
FI
53static void print_page(struct page *page)
54{
55 dprintk("PRINTPAGE page %p\n", page);
56 dprintk(" PagePrivate %d\n", PagePrivate(page));
57 dprintk(" PageUptodate %d\n", PageUptodate(page));
58 dprintk(" PageError %d\n", PageError(page));
59 dprintk(" PageDirty %d\n", PageDirty(page));
60 dprintk(" PageReferenced %d\n", PageReferenced(page));
61 dprintk(" PageLocked %d\n", PageLocked(page));
62 dprintk(" PageWriteback %d\n", PageWriteback(page));
63 dprintk(" PageMappedToDisk %d\n", PageMappedToDisk(page));
64 dprintk("\n");
65}
66
67/* Given the be associated with isect, determine if page data needs to be
68 * initialized.
69 */
70static int is_hole(struct pnfs_block_extent *be, sector_t isect)
71{
72 if (be->be_state == PNFS_BLOCK_NONE_DATA)
73 return 1;
74 else if (be->be_state != PNFS_BLOCK_INVALID_DATA)
75 return 0;
76 else
77 return !bl_is_sector_init(be->be_inval, isect);
78}
79
650e2d39
FI
80/* Given the be associated with isect, determine if page data can be
81 * written to disk.
82 */
83static int is_writable(struct pnfs_block_extent *be, sector_t isect)
84{
71cdd40f
PT
85 return (be->be_state == PNFS_BLOCK_READWRITE_DATA ||
86 be->be_state == PNFS_BLOCK_INVALID_DATA);
650e2d39
FI
87}
88
9549ec01
FI
89/* The data we are handed might be spread across several bios. We need
90 * to track when the last one is finished.
91 */
92struct parallel_io {
93 struct kref refcnt;
7c5465d6 94 void (*pnfs_callback) (void *data, int num_se);
9549ec01 95 void *data;
7c5465d6 96 int bse_count;
9549ec01
FI
97};
98
99static inline struct parallel_io *alloc_parallel(void *data)
100{
101 struct parallel_io *rv;
102
103 rv = kmalloc(sizeof(*rv), GFP_NOFS);
104 if (rv) {
105 rv->data = data;
106 kref_init(&rv->refcnt);
7c5465d6 107 rv->bse_count = 0;
9549ec01
FI
108 }
109 return rv;
110}
111
112static inline void get_parallel(struct parallel_io *p)
113{
114 kref_get(&p->refcnt);
115}
116
117static void destroy_parallel(struct kref *kref)
118{
119 struct parallel_io *p = container_of(kref, struct parallel_io, refcnt);
120
121 dprintk("%s enter\n", __func__);
7c5465d6 122 p->pnfs_callback(p->data, p->bse_count);
9549ec01
FI
123 kfree(p);
124}
125
126static inline void put_parallel(struct parallel_io *p)
127{
128 kref_put(&p->refcnt, destroy_parallel);
129}
130
131static struct bio *
132bl_submit_bio(int rw, struct bio *bio)
133{
134 if (bio) {
135 get_parallel(bio->bi_private);
136 dprintk("%s submitting %s bio %u@%llu\n", __func__,
4f024f37
KO
137 rw == READ ? "read" : "write", bio->bi_iter.bi_size,
138 (unsigned long long)bio->bi_iter.bi_sector);
9549ec01
FI
139 submit_bio(rw, bio);
140 }
141 return NULL;
142}
143
144static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
145 struct pnfs_block_extent *be,
146 void (*end_io)(struct bio *, int err),
147 struct parallel_io *par)
148{
149 struct bio *bio;
150
74a6eeb4 151 npg = min(npg, BIO_MAX_PAGES);
9549ec01 152 bio = bio_alloc(GFP_NOIO, npg);
74a6eeb4
PT
153 if (!bio && (current->flags & PF_MEMALLOC)) {
154 while (!bio && (npg /= 2))
155 bio = bio_alloc(GFP_NOIO, npg);
156 }
9549ec01 157
74a6eeb4 158 if (bio) {
4f024f37
KO
159 bio->bi_iter.bi_sector = isect - be->be_f_offset +
160 be->be_v_offset;
74a6eeb4
PT
161 bio->bi_bdev = be->be_mdev;
162 bio->bi_end_io = end_io;
163 bio->bi_private = par;
164 }
9549ec01
FI
165 return bio;
166}
167
fe6e1e8d 168static struct bio *do_add_page_to_bio(struct bio *bio, int npg, int rw,
9549ec01
FI
169 sector_t isect, struct page *page,
170 struct pnfs_block_extent *be,
171 void (*end_io)(struct bio *, int err),
fe6e1e8d
PT
172 struct parallel_io *par,
173 unsigned int offset, int len)
9549ec01 174{
fe6e1e8d
PT
175 isect = isect + (offset >> SECTOR_SHIFT);
176 dprintk("%s: npg %d rw %d isect %llu offset %u len %d\n", __func__,
177 npg, rw, (unsigned long long)isect, offset, len);
9549ec01
FI
178retry:
179 if (!bio) {
180 bio = bl_alloc_init_bio(npg, isect, be, end_io, par);
181 if (!bio)
182 return ERR_PTR(-ENOMEM);
183 }
fe6e1e8d 184 if (bio_add_page(bio, page, len, offset) < len) {
9549ec01
FI
185 bio = bl_submit_bio(rw, bio);
186 goto retry;
187 }
188 return bio;
189}
190
fe6e1e8d
PT
191static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
192 sector_t isect, struct page *page,
193 struct pnfs_block_extent *be,
194 void (*end_io)(struct bio *, int err),
195 struct parallel_io *par)
196{
197 return do_add_page_to_bio(bio, npg, rw, isect, page, be,
198 end_io, par, 0, PAGE_CACHE_SIZE);
199}
200
9549ec01
FI
201/* This is basically copied from mpage_end_io_read */
202static void bl_end_io_read(struct bio *bio, int err)
203{
204 struct parallel_io *par = bio->bi_private;
2c30c71b
KO
205 struct bio_vec *bvec;
206 int i;
9549ec01 207
2c30c71b
KO
208 if (!err)
209 bio_for_each_segment_all(bvec, bio, i)
210 SetPageUptodate(bvec->bv_page);
9549ec01 211
2c30c71b 212 if (err) {
d45f60c6 213 struct nfs_pgio_header *header = par->data;
cd841605
FI
214
215 if (!header->pnfs_error)
216 header->pnfs_error = -EIO;
217 pnfs_set_lo_fail(header->lseg);
9549ec01
FI
218 }
219 bio_put(bio);
220 put_parallel(par);
221}
222
223static void bl_read_cleanup(struct work_struct *work)
224{
225 struct rpc_task *task;
d45f60c6 226 struct nfs_pgio_header *hdr;
9549ec01
FI
227 dprintk("%s enter\n", __func__);
228 task = container_of(work, struct rpc_task, u.tk_work);
d45f60c6
WAA
229 hdr = container_of(task, struct nfs_pgio_header, task);
230 pnfs_ld_read_done(hdr);
9549ec01
FI
231}
232
233static void
7c5465d6 234bl_end_par_io_read(void *data, int unused)
9549ec01 235{
d45f60c6 236 struct nfs_pgio_header *hdr = data;
9549ec01 237
d45f60c6
WAA
238 hdr->task.tk_status = hdr->pnfs_error;
239 INIT_WORK(&hdr->task.u.tk_work, bl_read_cleanup);
240 schedule_work(&hdr->task.u.tk_work);
9549ec01
FI
241}
242
155e7524 243static enum pnfs_try_status
d45f60c6 244bl_read_pagelist(struct nfs_pgio_header *hdr)
155e7524 245{
d45f60c6 246 struct nfs_pgio_header *header = hdr;
9549ec01
FI
247 int i, hole;
248 struct bio *bio = NULL;
249 struct pnfs_block_extent *be = NULL, *cow_read = NULL;
250 sector_t isect, extent_length = 0;
251 struct parallel_io *par;
d45f60c6
WAA
252 loff_t f_offset = hdr->args.offset;
253 size_t bytes_left = hdr->args.count;
f742dc4a 254 unsigned int pg_offset, pg_len;
d45f60c6
WAA
255 struct page **pages = hdr->args.pages;
256 int pg_index = hdr->args.pgbase >> PAGE_CACHE_SHIFT;
f742dc4a 257 const bool is_dio = (header->dreq != NULL);
be98fd0a 258 struct blk_plug plug;
9549ec01 259
6f00866d 260 dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__,
d45f60c6
WAA
261 hdr->page_array.npages, f_offset,
262 (unsigned int)hdr->args.count);
9549ec01 263
d45f60c6 264 par = alloc_parallel(hdr);
9549ec01
FI
265 if (!par)
266 goto use_mds;
9549ec01
FI
267 par->pnfs_callback = bl_end_par_io_read;
268 /* At this point, we can no longer jump to use_mds */
269
be98fd0a
CH
270 blk_start_plug(&plug);
271
9549ec01
FI
272 isect = (sector_t) (f_offset >> SECTOR_SHIFT);
273 /* Code assumes extents are page-aligned */
d45f60c6 274 for (i = pg_index; i < hdr->page_array.npages; i++) {
921b81a8 275 if (extent_length <= 0) {
9549ec01
FI
276 /* We've used up the previous extent */
277 bl_put_extent(be);
278 bl_put_extent(cow_read);
279 bio = bl_submit_bio(READ, bio);
280 /* Get the next one */
cd841605 281 be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg),
9549ec01
FI
282 isect, &cow_read);
283 if (!be) {
cd841605 284 header->pnfs_error = -EIO;
9549ec01
FI
285 goto out;
286 }
287 extent_length = be->be_length -
288 (isect - be->be_f_offset);
289 if (cow_read) {
290 sector_t cow_length = cow_read->be_length -
291 (isect - cow_read->be_f_offset);
292 extent_length = min(extent_length, cow_length);
293 }
294 }
f742dc4a
PT
295
296 if (is_dio) {
297 pg_offset = f_offset & ~PAGE_CACHE_MASK;
298 if (pg_offset + bytes_left > PAGE_CACHE_SIZE)
299 pg_len = PAGE_CACHE_SIZE - pg_offset;
300 else
301 pg_len = bytes_left;
302
303 f_offset += pg_len;
304 bytes_left -= pg_len;
305 isect += (pg_offset >> SECTOR_SHIFT);
921b81a8 306 extent_length -= (pg_offset >> SECTOR_SHIFT);
f742dc4a
PT
307 } else {
308 pg_offset = 0;
309 pg_len = PAGE_CACHE_SIZE;
310 }
311
9549ec01
FI
312 hole = is_hole(be, isect);
313 if (hole && !cow_read) {
314 bio = bl_submit_bio(READ, bio);
315 /* Fill hole w/ zeroes w/o accessing device */
316 dprintk("%s Zeroing page for hole\n", __func__);
f742dc4a 317 zero_user_segment(pages[i], pg_offset, pg_len);
9549ec01
FI
318 print_page(pages[i]);
319 SetPageUptodate(pages[i]);
320 } else {
321 struct pnfs_block_extent *be_read;
322
323 be_read = (hole && cow_read) ? cow_read : be;
823b0c9d 324 bio = do_add_page_to_bio(bio,
d45f60c6 325 hdr->page_array.npages - i,
30dd374f 326 READ,
9549ec01 327 isect, pages[i], be_read,
f742dc4a
PT
328 bl_end_io_read, par,
329 pg_offset, pg_len);
9549ec01 330 if (IS_ERR(bio)) {
cd841605 331 header->pnfs_error = PTR_ERR(bio);
e6d05a75 332 bio = NULL;
9549ec01
FI
333 goto out;
334 }
335 }
f742dc4a 336 isect += (pg_len >> SECTOR_SHIFT);
921b81a8 337 extent_length -= (pg_len >> SECTOR_SHIFT);
9549ec01 338 }
cd841605 339 if ((isect << SECTOR_SHIFT) >= header->inode->i_size) {
d45f60c6
WAA
340 hdr->res.eof = 1;
341 hdr->res.count = header->inode->i_size - hdr->args.offset;
9549ec01 342 } else {
d45f60c6 343 hdr->res.count = (isect << SECTOR_SHIFT) - hdr->args.offset;
9549ec01
FI
344 }
345out:
346 bl_put_extent(be);
347 bl_put_extent(cow_read);
348 bl_submit_bio(READ, bio);
be98fd0a 349 blk_finish_plug(&plug);
9549ec01
FI
350 put_parallel(par);
351 return PNFS_ATTEMPTED;
352
353 use_mds:
354 dprintk("Giving up and using normal NFS\n");
155e7524
FI
355 return PNFS_NOT_ATTEMPTED;
356}
357
31e6306a
FI
358static void mark_extents_written(struct pnfs_block_layout *bl,
359 __u64 offset, __u32 count)
360{
361 sector_t isect, end;
362 struct pnfs_block_extent *be;
7c5465d6 363 struct pnfs_block_short_extent *se;
31e6306a
FI
364
365 dprintk("%s(%llu, %u)\n", __func__, offset, count);
366 if (count == 0)
367 return;
368 isect = (offset & (long)(PAGE_CACHE_MASK)) >> SECTOR_SHIFT;
369 end = (offset + count + PAGE_CACHE_SIZE - 1) & (long)(PAGE_CACHE_MASK);
370 end >>= SECTOR_SHIFT;
371 while (isect < end) {
372 sector_t len;
373 be = bl_find_get_extent(bl, isect, NULL);
374 BUG_ON(!be); /* FIXME */
375 len = min(end, be->be_f_offset + be->be_length) - isect;
7c5465d6
PT
376 if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
377 se = bl_pop_one_short_extent(be->be_inval);
378 BUG_ON(!se);
379 bl_mark_for_commit(be, isect, len, se);
380 }
31e6306a
FI
381 isect += len;
382 bl_put_extent(be);
383 }
384}
385
71cdd40f
PT
386static void bl_end_io_write_zero(struct bio *bio, int err)
387{
388 struct parallel_io *par = bio->bi_private;
2c30c71b
KO
389 struct bio_vec *bvec;
390 int i;
71cdd40f 391
2c30c71b 392 bio_for_each_segment_all(bvec, bio, i) {
71cdd40f 393 /* This is the zeroing page we added */
2c30c71b
KO
394 end_page_writeback(bvec->bv_page);
395 page_cache_release(bvec->bv_page);
396 }
7c5465d6 397
2c30c71b 398 if (unlikely(err)) {
d45f60c6 399 struct nfs_pgio_header *header = par->data;
cd841605
FI
400
401 if (!header->pnfs_error)
402 header->pnfs_error = -EIO;
403 pnfs_set_lo_fail(header->lseg);
71cdd40f
PT
404 }
405 bio_put(bio);
406 put_parallel(par);
407}
408
650e2d39
FI
409static void bl_end_io_write(struct bio *bio, int err)
410{
411 struct parallel_io *par = bio->bi_private;
412 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
d45f60c6 413 struct nfs_pgio_header *header = par->data;
650e2d39
FI
414
415 if (!uptodate) {
cd841605
FI
416 if (!header->pnfs_error)
417 header->pnfs_error = -EIO;
418 pnfs_set_lo_fail(header->lseg);
650e2d39
FI
419 }
420 bio_put(bio);
421 put_parallel(par);
422}
423
424/* Function scheduled for call during bl_end_par_io_write,
425 * it marks sectors as written and extends the commitlist.
426 */
427static void bl_write_cleanup(struct work_struct *work)
428{
429 struct rpc_task *task;
d45f60c6 430 struct nfs_pgio_header *hdr;
650e2d39
FI
431 dprintk("%s enter\n", __func__);
432 task = container_of(work, struct rpc_task, u.tk_work);
d45f60c6
WAA
433 hdr = container_of(task, struct nfs_pgio_header, task);
434 if (likely(!hdr->pnfs_error)) {
31e6306a 435 /* Marks for LAYOUTCOMMIT */
d45f60c6
WAA
436 mark_extents_written(BLK_LSEG2EXT(hdr->lseg),
437 hdr->args.offset, hdr->args.count);
31e6306a 438 }
d45f60c6 439 pnfs_ld_write_done(hdr);
650e2d39
FI
440}
441
442/* Called when last of bios associated with a bl_write_pagelist call finishes */
7c5465d6 443static void bl_end_par_io_write(void *data, int num_se)
650e2d39 444{
d45f60c6 445 struct nfs_pgio_header *hdr = data;
650e2d39 446
d45f60c6
WAA
447 if (unlikely(hdr->pnfs_error)) {
448 bl_free_short_extents(&BLK_LSEG2EXT(hdr->lseg)->bl_inval,
7c5465d6
PT
449 num_se);
450 }
451
d45f60c6 452 hdr->task.tk_status = hdr->pnfs_error;
c65e6254 453 hdr->verf.committed = NFS_FILE_SYNC;
d45f60c6
WAA
454 INIT_WORK(&hdr->task.u.tk_work, bl_write_cleanup);
455 schedule_work(&hdr->task.u.tk_work);
650e2d39
FI
456}
457
71cdd40f
PT
458/* FIXME STUB - mark intersection of layout and page as bad, so is not
459 * used again.
460 */
461static void mark_bad_read(void)
462{
463 return;
464}
465
466/*
467 * map_block: map a requested I/0 block (isect) into an offset in the LVM
468 * block_device
469 */
470static void
471map_block(struct buffer_head *bh, sector_t isect, struct pnfs_block_extent *be)
472{
473 dprintk("%s enter be=%p\n", __func__, be);
474
475 set_buffer_mapped(bh);
476 bh->b_bdev = be->be_mdev;
477 bh->b_blocknr = (isect - be->be_f_offset + be->be_v_offset) >>
478 (be->be_mdev->bd_inode->i_blkbits - SECTOR_SHIFT);
479
480 dprintk("%s isect %llu, bh->b_blocknr %ld, using bsize %Zd\n",
481 __func__, (unsigned long long)isect, (long)bh->b_blocknr,
482 bh->b_size);
483 return;
484}
485
fe6e1e8d
PT
486static void
487bl_read_single_end_io(struct bio *bio, int error)
488{
489 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
490 struct page *page = bvec->bv_page;
491
492 /* Only one page in bvec */
493 unlock_page(page);
494}
495
496static int
497bl_do_readpage_sync(struct page *page, struct pnfs_block_extent *be,
498 unsigned int offset, unsigned int len)
499{
500 struct bio *bio;
501 struct page *shadow_page;
502 sector_t isect;
503 char *kaddr, *kshadow_addr;
504 int ret = 0;
505
506 dprintk("%s: offset %u len %u\n", __func__, offset, len);
507
508 shadow_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
509 if (shadow_page == NULL)
510 return -ENOMEM;
511
512 bio = bio_alloc(GFP_NOIO, 1);
513 if (bio == NULL)
514 return -ENOMEM;
515
516 isect = (page->index << PAGE_CACHE_SECTOR_SHIFT) +
517 (offset / SECTOR_SIZE);
518
4f024f37 519 bio->bi_iter.bi_sector = isect - be->be_f_offset + be->be_v_offset;
fe6e1e8d
PT
520 bio->bi_bdev = be->be_mdev;
521 bio->bi_end_io = bl_read_single_end_io;
522
523 lock_page(shadow_page);
524 if (bio_add_page(bio, shadow_page,
525 SECTOR_SIZE, round_down(offset, SECTOR_SIZE)) == 0) {
526 unlock_page(shadow_page);
527 bio_put(bio);
528 return -EIO;
529 }
530
531 submit_bio(READ, bio);
532 wait_on_page_locked(shadow_page);
533 if (unlikely(!test_bit(BIO_UPTODATE, &bio->bi_flags))) {
534 ret = -EIO;
535 } else {
536 kaddr = kmap_atomic(page);
537 kshadow_addr = kmap_atomic(shadow_page);
538 memcpy(kaddr + offset, kshadow_addr + offset, len);
539 kunmap_atomic(kshadow_addr);
540 kunmap_atomic(kaddr);
541 }
542 __free_page(shadow_page);
543 bio_put(bio);
544
545 return ret;
546}
547
548static int
549bl_read_partial_page_sync(struct page *page, struct pnfs_block_extent *be,
550 unsigned int dirty_offset, unsigned int dirty_len,
551 bool full_page)
552{
553 int ret = 0;
554 unsigned int start, end;
555
556 if (full_page) {
557 start = 0;
558 end = PAGE_CACHE_SIZE;
559 } else {
560 start = round_down(dirty_offset, SECTOR_SIZE);
561 end = round_up(dirty_offset + dirty_len, SECTOR_SIZE);
562 }
563
564 dprintk("%s: offset %u len %d\n", __func__, dirty_offset, dirty_len);
565 if (!be) {
566 zero_user_segments(page, start, dirty_offset,
567 dirty_offset + dirty_len, end);
568 if (start == 0 && end == PAGE_CACHE_SIZE &&
569 trylock_page(page)) {
570 SetPageUptodate(page);
571 unlock_page(page);
572 }
573 return ret;
574 }
575
576 if (start != dirty_offset)
577 ret = bl_do_readpage_sync(page, be, start, dirty_offset - start);
578
579 if (!ret && (dirty_offset + dirty_len < end))
580 ret = bl_do_readpage_sync(page, be, dirty_offset + dirty_len,
581 end - dirty_offset - dirty_len);
582
583 return ret;
584}
585
71cdd40f
PT
586/* Given an unmapped page, zero it or read in page for COW, page is locked
587 * by caller.
588 */
589static int
590init_page_for_write(struct page *page, struct pnfs_block_extent *cow_read)
591{
592 struct buffer_head *bh = NULL;
593 int ret = 0;
594 sector_t isect;
595
596 dprintk("%s enter, %p\n", __func__, page);
597 BUG_ON(PageUptodate(page));
598 if (!cow_read) {
599 zero_user_segment(page, 0, PAGE_SIZE);
600 SetPageUptodate(page);
601 goto cleanup;
602 }
603
604 bh = alloc_page_buffers(page, PAGE_CACHE_SIZE, 0);
605 if (!bh) {
606 ret = -ENOMEM;
607 goto cleanup;
608 }
609
610 isect = (sector_t) page->index << PAGE_CACHE_SECTOR_SHIFT;
611 map_block(bh, isect, cow_read);
612 if (!bh_uptodate_or_lock(bh))
613 ret = bh_submit_read(bh);
614 if (ret)
615 goto cleanup;
616 SetPageUptodate(page);
617
618cleanup:
71cdd40f
PT
619 if (bh)
620 free_buffer_head(bh);
621 if (ret) {
622 /* Need to mark layout with bad read...should now
623 * just use nfs4 for reads and writes.
624 */
625 mark_bad_read();
626 }
627 return ret;
628}
629
72c50887
PT
630/* Find or create a zeroing page marked being writeback.
631 * Return ERR_PTR on error, NULL to indicate skip this page and page itself
632 * to indicate write out.
633 */
634static struct page *
635bl_find_get_zeroing_page(struct inode *inode, pgoff_t index,
636 struct pnfs_block_extent *cow_read)
637{
638 struct page *page;
639 int locked = 0;
640 page = find_get_page(inode->i_mapping, index);
641 if (page)
642 goto check_page;
643
644 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
645 if (unlikely(!page)) {
646 dprintk("%s oom\n", __func__);
647 return ERR_PTR(-ENOMEM);
648 }
649 locked = 1;
650
651check_page:
652 /* PageDirty: Other will write this out
653 * PageWriteback: Other is writing this out
654 * PageUptodate: It was read before
655 */
656 if (PageDirty(page) || PageWriteback(page)) {
657 print_page(page);
658 if (locked)
659 unlock_page(page);
660 page_cache_release(page);
661 return NULL;
662 }
663
664 if (!locked) {
665 lock_page(page);
666 locked = 1;
667 goto check_page;
668 }
669 if (!PageUptodate(page)) {
670 /* New page, readin or zero it */
671 init_page_for_write(page, cow_read);
672 }
673 set_page_writeback(page);
674 unlock_page(page);
675
676 return page;
677}
678
155e7524 679static enum pnfs_try_status
d45f60c6 680bl_write_pagelist(struct nfs_pgio_header *header, int sync)
155e7524 681{
71cdd40f 682 int i, ret, npg_zero, pg_index, last = 0;
650e2d39 683 struct bio *bio = NULL;
71cdd40f
PT
684 struct pnfs_block_extent *be = NULL, *cow_read = NULL;
685 sector_t isect, last_isect = 0, extent_length = 0;
96c9eae6 686 struct parallel_io *par = NULL;
d45f60c6
WAA
687 loff_t offset = header->args.offset;
688 size_t count = header->args.count;
fe6e1e8d 689 unsigned int pg_offset, pg_len, saved_len;
d45f60c6 690 struct page **pages = header->args.pages;
71cdd40f
PT
691 struct page *page;
692 pgoff_t index;
693 u64 temp;
694 int npg_per_block =
cd841605 695 NFS_SERVER(header->inode)->pnfs_blksize >> PAGE_CACHE_SHIFT;
be98fd0a 696 struct blk_plug plug;
650e2d39
FI
697
698 dprintk("%s enter, %Zu@%lld\n", __func__, count, offset);
96c9eae6 699
be98fd0a
CH
700 blk_start_plug(&plug);
701
96c9eae6
PT
702 if (header->dreq != NULL &&
703 (!IS_ALIGNED(offset, NFS_SERVER(header->inode)->pnfs_blksize) ||
704 !IS_ALIGNED(count, NFS_SERVER(header->inode)->pnfs_blksize))) {
705 dprintk("pnfsblock nonblock aligned DIO writes. Resend MDS\n");
706 goto out_mds;
707 }
d45f60c6 708 /* At this point, header->page_aray is a (sequential) list of nfs_pages.
71cdd40f
PT
709 * We want to write each, and if there is an error set pnfs_error
710 * to have it redone using nfs.
650e2d39 711 */
d45f60c6 712 par = alloc_parallel(header);
650e2d39 713 if (!par)
7c5465d6 714 goto out_mds;
650e2d39
FI
715 par->pnfs_callback = bl_end_par_io_write;
716 /* At this point, have to be more careful with error handling */
717
718 isect = (sector_t) ((offset & (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
cd841605 719 be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg), isect, &cow_read);
71cdd40f
PT
720 if (!be || !is_writable(be, isect)) {
721 dprintk("%s no matching extents!\n", __func__);
7c5465d6 722 goto out_mds;
71cdd40f
PT
723 }
724
725 /* First page inside INVALID extent */
726 if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
7c5465d6
PT
727 if (likely(!bl_push_one_short_extent(be->be_inval)))
728 par->bse_count++;
729 else
730 goto out_mds;
71cdd40f
PT
731 temp = offset >> PAGE_CACHE_SHIFT;
732 npg_zero = do_div(temp, npg_per_block);
733 isect = (sector_t) (((offset - npg_zero * PAGE_CACHE_SIZE) &
734 (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
735 extent_length = be->be_length - (isect - be->be_f_offset);
736
737fill_invalid_ext:
738 dprintk("%s need to zero %d pages\n", __func__, npg_zero);
739 for (;npg_zero > 0; npg_zero--) {
75422745
PT
740 if (bl_is_sector_init(be->be_inval, isect)) {
741 dprintk("isect %llu already init\n",
742 (unsigned long long)isect);
743 goto next_page;
744 }
71cdd40f
PT
745 /* page ref released in bl_end_io_write_zero */
746 index = isect >> PAGE_CACHE_SECTOR_SHIFT;
747 dprintk("%s zero %dth page: index %lu isect %llu\n",
748 __func__, npg_zero, index,
749 (unsigned long long)isect);
cd841605 750 page = bl_find_get_zeroing_page(header->inode, index,
72c50887
PT
751 cow_read);
752 if (unlikely(IS_ERR(page))) {
cd841605 753 header->pnfs_error = PTR_ERR(page);
71cdd40f 754 goto out;
72c50887 755 } else if (page == NULL)
71cdd40f 756 goto next_page;
71cdd40f
PT
757
758 ret = bl_mark_sectors_init(be->be_inval, isect,
60c52e3a 759 PAGE_CACHE_SECTORS);
71cdd40f
PT
760 if (unlikely(ret)) {
761 dprintk("%s bl_mark_sectors_init fail %d\n",
762 __func__, ret);
763 end_page_writeback(page);
764 page_cache_release(page);
cd841605 765 header->pnfs_error = ret;
71cdd40f
PT
766 goto out;
767 }
7c5465d6
PT
768 if (likely(!bl_push_one_short_extent(be->be_inval)))
769 par->bse_count++;
770 else {
771 end_page_writeback(page);
772 page_cache_release(page);
cd841605 773 header->pnfs_error = -ENOMEM;
7c5465d6
PT
774 goto out;
775 }
776 /* FIXME: This should be done in bi_end_io */
cd841605 777 mark_extents_written(BLK_LSEG2EXT(header->lseg),
7c5465d6
PT
778 page->index << PAGE_CACHE_SHIFT,
779 PAGE_CACHE_SIZE);
780
71cdd40f
PT
781 bio = bl_add_page_to_bio(bio, npg_zero, WRITE,
782 isect, page, be,
783 bl_end_io_write_zero, par);
784 if (IS_ERR(bio)) {
cd841605 785 header->pnfs_error = PTR_ERR(bio);
e6d05a75 786 bio = NULL;
71cdd40f
PT
787 goto out;
788 }
71cdd40f
PT
789next_page:
790 isect += PAGE_CACHE_SECTORS;
791 extent_length -= PAGE_CACHE_SECTORS;
792 }
793 if (last)
794 goto write_done;
795 }
796 bio = bl_submit_bio(WRITE, bio);
797
798 /* Middle pages */
d45f60c6
WAA
799 pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT;
800 for (i = pg_index; i < header->page_array.npages; i++) {
921b81a8 801 if (extent_length <= 0) {
650e2d39
FI
802 /* We've used up the previous extent */
803 bl_put_extent(be);
fe6e1e8d 804 bl_put_extent(cow_read);
650e2d39
FI
805 bio = bl_submit_bio(WRITE, bio);
806 /* Get the next one */
cd841605 807 be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg),
fe6e1e8d 808 isect, &cow_read);
650e2d39 809 if (!be || !is_writable(be, isect)) {
cd841605 810 header->pnfs_error = -EINVAL;
650e2d39
FI
811 goto out;
812 }
7c5465d6
PT
813 if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
814 if (likely(!bl_push_one_short_extent(
815 be->be_inval)))
816 par->bse_count++;
817 else {
cd841605 818 header->pnfs_error = -ENOMEM;
7c5465d6
PT
819 goto out;
820 }
821 }
650e2d39 822 extent_length = be->be_length -
71cdd40f 823 (isect - be->be_f_offset);
650e2d39 824 }
fe6e1e8d
PT
825
826 dprintk("%s offset %lld count %Zu\n", __func__, offset, count);
827 pg_offset = offset & ~PAGE_CACHE_MASK;
828 if (pg_offset + count > PAGE_CACHE_SIZE)
829 pg_len = PAGE_CACHE_SIZE - pg_offset;
830 else
831 pg_len = count;
832
833 saved_len = pg_len;
834 if (be->be_state == PNFS_BLOCK_INVALID_DATA &&
835 !bl_is_sector_init(be->be_inval, isect)) {
836 ret = bl_read_partial_page_sync(pages[i], cow_read,
837 pg_offset, pg_len, true);
838 if (ret) {
839 dprintk("%s bl_read_partial_page_sync fail %d\n",
840 __func__, ret);
841 header->pnfs_error = ret;
842 goto out;
843 }
844
71cdd40f 845 ret = bl_mark_sectors_init(be->be_inval, isect,
60c52e3a 846 PAGE_CACHE_SECTORS);
71cdd40f
PT
847 if (unlikely(ret)) {
848 dprintk("%s bl_mark_sectors_init fail %d\n",
849 __func__, ret);
cd841605 850 header->pnfs_error = ret;
71cdd40f 851 goto out;
650e2d39 852 }
fe6e1e8d
PT
853
854 /* Expand to full page write */
855 pg_offset = 0;
856 pg_len = PAGE_CACHE_SIZE;
857 } else if ((pg_offset & (SECTOR_SIZE - 1)) ||
858 (pg_len & (SECTOR_SIZE - 1))){
859 /* ahh, nasty case. We have to do sync full sector
860 * read-modify-write cycles.
861 */
862 unsigned int saved_offset = pg_offset;
863 ret = bl_read_partial_page_sync(pages[i], be, pg_offset,
864 pg_len, false);
865 pg_offset = round_down(pg_offset, SECTOR_SIZE);
866 pg_len = round_up(saved_offset + pg_len, SECTOR_SIZE)
867 - pg_offset;
71cdd40f 868 }
fe6e1e8d
PT
869
870
d45f60c6 871 bio = do_add_page_to_bio(bio, header->page_array.npages - i,
823b0c9d 872 WRITE,
71cdd40f 873 isect, pages[i], be,
fe6e1e8d
PT
874 bl_end_io_write, par,
875 pg_offset, pg_len);
71cdd40f 876 if (IS_ERR(bio)) {
cd841605 877 header->pnfs_error = PTR_ERR(bio);
e6d05a75 878 bio = NULL;
71cdd40f 879 goto out;
650e2d39 880 }
fe6e1e8d
PT
881 offset += saved_len;
882 count -= saved_len;
650e2d39 883 isect += PAGE_CACHE_SECTORS;
71cdd40f 884 last_isect = isect;
650e2d39
FI
885 extent_length -= PAGE_CACHE_SECTORS;
886 }
71cdd40f
PT
887
888 /* Last page inside INVALID extent */
889 if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
890 bio = bl_submit_bio(WRITE, bio);
891 temp = last_isect >> PAGE_CACHE_SECTOR_SHIFT;
892 npg_zero = npg_per_block - do_div(temp, npg_per_block);
893 if (npg_zero < npg_per_block) {
894 last = 1;
895 goto fill_invalid_ext;
896 }
897 }
898
899write_done:
d45f60c6 900 header->res.count = header->args.count;
650e2d39
FI
901out:
902 bl_put_extent(be);
fe6e1e8d 903 bl_put_extent(cow_read);
650e2d39 904 bl_submit_bio(WRITE, bio);
be98fd0a 905 blk_finish_plug(&plug);
650e2d39
FI
906 put_parallel(par);
907 return PNFS_ATTEMPTED;
7c5465d6 908out_mds:
be98fd0a 909 blk_finish_plug(&plug);
7c5465d6 910 bl_put_extent(be);
fe6e1e8d 911 bl_put_extent(cow_read);
7c5465d6
PT
912 kfree(par);
913 return PNFS_NOT_ATTEMPTED;
155e7524
FI
914}
915
9e692969 916/* FIXME - range ignored */
155e7524 917static void
9e692969 918release_extents(struct pnfs_block_layout *bl, struct pnfs_layout_range *range)
155e7524 919{
9e692969
FI
920 int i;
921 struct pnfs_block_extent *be;
922
923 spin_lock(&bl->bl_ext_lock);
924 for (i = 0; i < EXTENT_LISTS; i++) {
925 while (!list_empty(&bl->bl_extents[i])) {
926 be = list_first_entry(&bl->bl_extents[i],
927 struct pnfs_block_extent,
928 be_node);
929 list_del(&be->be_node);
930 bl_put_extent(be);
931 }
932 }
933 spin_unlock(&bl->bl_ext_lock);
155e7524
FI
934}
935
155e7524
FI
936static void
937release_inval_marks(struct pnfs_inval_markings *marks)
938{
c1c2a4cd 939 struct pnfs_inval_tracking *pos, *temp;
7c5465d6 940 struct pnfs_block_short_extent *se, *stemp;
c1c2a4cd
FI
941
942 list_for_each_entry_safe(pos, temp, &marks->im_tree.mtt_stub, it_link) {
943 list_del(&pos->it_link);
944 kfree(pos);
945 }
7c5465d6
PT
946
947 list_for_each_entry_safe(se, stemp, &marks->im_extents, bse_node) {
948 list_del(&se->bse_node);
949 kfree(se);
950 }
155e7524
FI
951 return;
952}
953
954static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo)
955{
956 struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
957
958 dprintk("%s enter\n", __func__);
959 release_extents(bl, NULL);
960 release_inval_marks(&bl->bl_inval);
961 kfree(bl);
962}
963
964static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode,
965 gfp_t gfp_flags)
966{
967 struct pnfs_block_layout *bl;
968
969 dprintk("%s enter\n", __func__);
970 bl = kzalloc(sizeof(*bl), gfp_flags);
971 if (!bl)
972 return NULL;
973 spin_lock_init(&bl->bl_ext_lock);
974 INIT_LIST_HEAD(&bl->bl_extents[0]);
975 INIT_LIST_HEAD(&bl->bl_extents[1]);
976 INIT_LIST_HEAD(&bl->bl_commit);
977 INIT_LIST_HEAD(&bl->bl_committing);
978 bl->bl_count = 0;
979 bl->bl_blocksize = NFS_SERVER(inode)->pnfs_blksize >> SECTOR_SHIFT;
980 BL_INIT_INVAL_MARKS(&bl->bl_inval, bl->bl_blocksize);
981 return &bl->bl_layout;
982}
983
a60d2ebd 984static void bl_free_lseg(struct pnfs_layout_segment *lseg)
155e7524 985{
a60d2ebd
FI
986 dprintk("%s enter\n", __func__);
987 kfree(lseg);
155e7524
FI
988}
989
a60d2ebd
FI
990/* We pretty much ignore lseg, and store all data layout wide, so we
991 * can correctly merge.
992 */
993static struct pnfs_layout_segment *bl_alloc_lseg(struct pnfs_layout_hdr *lo,
994 struct nfs4_layoutget_res *lgr,
995 gfp_t gfp_flags)
155e7524 996{
a60d2ebd
FI
997 struct pnfs_layout_segment *lseg;
998 int status;
999
1000 dprintk("%s enter\n", __func__);
1001 lseg = kzalloc(sizeof(*lseg), gfp_flags);
1002 if (!lseg)
1003 return ERR_PTR(-ENOMEM);
1004 status = nfs4_blk_process_layoutget(lo, lgr, gfp_flags);
1005 if (status) {
1006 /* We don't want to call the full-blown bl_free_lseg,
1007 * since on error extents were not touched.
1008 */
1009 kfree(lseg);
1010 return ERR_PTR(status);
1011 }
1012 return lseg;
155e7524
FI
1013}
1014
1015static void
1016bl_encode_layoutcommit(struct pnfs_layout_hdr *lo, struct xdr_stream *xdr,
1017 const struct nfs4_layoutcommit_args *arg)
1018{
90ace12a
FI
1019 dprintk("%s enter\n", __func__);
1020 encode_pnfs_block_layoutupdate(BLK_LO2EXT(lo), xdr, arg);
155e7524
FI
1021}
1022
1023static void
1024bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
1025{
b2be7811
FI
1026 struct pnfs_layout_hdr *lo = NFS_I(lcdata->args.inode)->layout;
1027
1028 dprintk("%s enter\n", __func__);
1029 clean_pnfs_block_layoutupdate(BLK_LO2EXT(lo), &lcdata->args, lcdata->res.status);
155e7524
FI
1030}
1031
2f9fd182
FI
1032static void free_blk_mountid(struct block_mount_id *mid)
1033{
1034 if (mid) {
93a3844e
PT
1035 struct pnfs_block_dev *dev, *tmp;
1036
1037 /* No need to take bm_lock as we are last user freeing bm_devlist */
1038 list_for_each_entry_safe(dev, tmp, &mid->bm_devlist, bm_node) {
2f9fd182
FI
1039 list_del(&dev->bm_node);
1040 bl_free_block_dev(dev);
1041 }
2f9fd182
FI
1042 kfree(mid);
1043 }
1044}
1045
78e4e05c 1046/* This is mostly copied from the filelayout_get_device_info function.
2f9fd182
FI
1047 * It seems much of this should be at the generic pnfs level.
1048 */
1049static struct pnfs_block_dev *
1050nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh,
1051 struct nfs4_deviceid *d_id)
1052{
1053 struct pnfs_device *dev;
516f2e24 1054 struct pnfs_block_dev *rv;
2f9fd182
FI
1055 u32 max_resp_sz;
1056 int max_pages;
1057 struct page **pages = NULL;
1058 int i, rc;
1059
1060 /*
1061 * Use the session max response size as the basis for setting
1062 * GETDEVICEINFO's maxcount
1063 */
1064 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
10bd295a 1065 max_pages = nfs_page_array_len(0, max_resp_sz);
2f9fd182
FI
1066 dprintk("%s max_resp_sz %u max_pages %d\n",
1067 __func__, max_resp_sz, max_pages);
1068
1069 dev = kmalloc(sizeof(*dev), GFP_NOFS);
1070 if (!dev) {
1071 dprintk("%s kmalloc failed\n", __func__);
516f2e24 1072 return ERR_PTR(-ENOMEM);
2f9fd182
FI
1073 }
1074
f15b5041 1075 pages = kcalloc(max_pages, sizeof(struct page *), GFP_NOFS);
2f9fd182
FI
1076 if (pages == NULL) {
1077 kfree(dev);
516f2e24 1078 return ERR_PTR(-ENOMEM);
2f9fd182
FI
1079 }
1080 for (i = 0; i < max_pages; i++) {
1081 pages[i] = alloc_page(GFP_NOFS);
516f2e24
JR
1082 if (!pages[i]) {
1083 rv = ERR_PTR(-ENOMEM);
2f9fd182 1084 goto out_free;
516f2e24 1085 }
2f9fd182
FI
1086 }
1087
1088 memcpy(&dev->dev_id, d_id, sizeof(*d_id));
1089 dev->layout_type = LAYOUT_BLOCK_VOLUME;
1090 dev->pages = pages;
1091 dev->pgbase = 0;
1092 dev->pglen = PAGE_SIZE * max_pages;
1093 dev->mincount = 0;
968fe252 1094 dev->maxcount = max_resp_sz - nfs41_maxgetdevinfo_overhead;
2f9fd182
FI
1095
1096 dprintk("%s: dev_id: %s\n", __func__, dev->dev_id.data);
cd5875fe 1097 rc = nfs4_proc_getdeviceinfo(server, dev, NULL);
2f9fd182 1098 dprintk("%s getdevice info returns %d\n", __func__, rc);
516f2e24
JR
1099 if (rc) {
1100 rv = ERR_PTR(rc);
2f9fd182 1101 goto out_free;
516f2e24 1102 }
2f9fd182
FI
1103
1104 rv = nfs4_blk_decode_device(server, dev);
1105 out_free:
1106 for (i = 0; i < max_pages; i++)
1107 __free_page(pages[i]);
1108 kfree(pages);
1109 kfree(dev);
1110 return rv;
1111}
1112
155e7524
FI
1113static int
1114bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
1115{
2f9fd182
FI
1116 struct block_mount_id *b_mt_id = NULL;
1117 struct pnfs_devicelist *dlist = NULL;
1118 struct pnfs_block_dev *bdev;
1119 LIST_HEAD(block_disklist);
516f2e24 1120 int status, i;
2f9fd182 1121
155e7524 1122 dprintk("%s enter\n", __func__);
2f9fd182
FI
1123
1124 if (server->pnfs_blksize == 0) {
1125 dprintk("%s Server did not return blksize\n", __func__);
1126 return -EINVAL;
1127 }
e3aaf7f2
CH
1128 if (server->pnfs_blksize > PAGE_SIZE) {
1129 printk(KERN_ERR "%s: pNFS blksize %d not supported.\n",
1130 __func__, server->pnfs_blksize);
1131 return -EINVAL;
1132 }
1133
2f9fd182
FI
1134 b_mt_id = kzalloc(sizeof(struct block_mount_id), GFP_NOFS);
1135 if (!b_mt_id) {
1136 status = -ENOMEM;
1137 goto out_error;
1138 }
1139 /* Initialize nfs4 block layout mount id */
1140 spin_lock_init(&b_mt_id->bm_lock);
1141 INIT_LIST_HEAD(&b_mt_id->bm_devlist);
1142
1143 dlist = kmalloc(sizeof(struct pnfs_devicelist), GFP_NOFS);
1144 if (!dlist) {
1145 status = -ENOMEM;
1146 goto out_error;
1147 }
1148 dlist->eof = 0;
1149 while (!dlist->eof) {
1150 status = nfs4_proc_getdevicelist(server, fh, dlist);
1151 if (status)
1152 goto out_error;
1153 dprintk("%s GETDEVICELIST numdevs=%i, eof=%i\n",
1154 __func__, dlist->num_devs, dlist->eof);
1155 for (i = 0; i < dlist->num_devs; i++) {
1156 bdev = nfs4_blk_get_deviceinfo(server, fh,
1157 &dlist->dev_id[i]);
516f2e24
JR
1158 if (IS_ERR(bdev)) {
1159 status = PTR_ERR(bdev);
2f9fd182
FI
1160 goto out_error;
1161 }
1162 spin_lock(&b_mt_id->bm_lock);
1163 list_add(&bdev->bm_node, &b_mt_id->bm_devlist);
1164 spin_unlock(&b_mt_id->bm_lock);
1165 }
1166 }
1167 dprintk("%s SUCCESS\n", __func__);
1168 server->pnfs_ld_data = b_mt_id;
1169
1170 out_return:
1171 kfree(dlist);
1172 return status;
1173
1174 out_error:
1175 free_blk_mountid(b_mt_id);
1176 goto out_return;
155e7524
FI
1177}
1178
1179static int
1180bl_clear_layoutdriver(struct nfs_server *server)
1181{
2f9fd182
FI
1182 struct block_mount_id *b_mt_id = server->pnfs_ld_data;
1183
155e7524 1184 dprintk("%s enter\n", __func__);
2f9fd182
FI
1185 free_blk_mountid(b_mt_id);
1186 dprintk("%s RETURNS\n", __func__);
155e7524
FI
1187 return 0;
1188}
1189
f742dc4a
PT
1190static bool
1191is_aligned_req(struct nfs_page *req, unsigned int alignment)
1192{
1193 return IS_ALIGNED(req->wb_offset, alignment) &&
1194 IS_ALIGNED(req->wb_bytes, alignment);
1195}
1196
1197static void
1198bl_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
1199{
1200 if (pgio->pg_dreq != NULL &&
1201 !is_aligned_req(req, SECTOR_SIZE))
1202 nfs_pageio_reset_read_mds(pgio);
1203 else
1204 pnfs_generic_pg_init_read(pgio, req);
1205}
1206
b4fdac1a
WAA
1207/*
1208 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
1209 * of bytes (maximum @req->wb_bytes) that can be coalesced.
1210 */
1211static size_t
f742dc4a
PT
1212bl_pg_test_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
1213 struct nfs_page *req)
1214{
1215 if (pgio->pg_dreq != NULL &&
1216 !is_aligned_req(req, SECTOR_SIZE))
b4fdac1a 1217 return 0;
f742dc4a
PT
1218
1219 return pnfs_generic_pg_test(pgio, prev, req);
1220}
1221
6296556f
PT
1222/*
1223 * Return the number of contiguous bytes for a given inode
1224 * starting at page frame idx.
1225 */
1226static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx)
1227{
1228 struct address_space *mapping = inode->i_mapping;
1229 pgoff_t end;
1230
1231 /* Optimize common case that writes from 0 to end of file */
1232 end = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE);
1233 if (end != NFS_I(inode)->npages) {
1234 rcu_read_lock();
e7b563bb 1235 end = page_cache_next_hole(mapping, idx + 1, ULONG_MAX);
6296556f
PT
1236 rcu_read_unlock();
1237 }
1238
1239 if (!end)
1240 return i_size_read(inode) - (idx << PAGE_CACHE_SHIFT);
1241 else
1242 return (end - idx) << PAGE_CACHE_SHIFT;
1243}
1244
6f018efa 1245static void
96c9eae6
PT
1246bl_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
1247{
1248 if (pgio->pg_dreq != NULL &&
6296556f 1249 !is_aligned_req(req, PAGE_CACHE_SIZE)) {
96c9eae6 1250 nfs_pageio_reset_write_mds(pgio);
6296556f
PT
1251 } else {
1252 u64 wb_size;
1253 if (pgio->pg_dreq == NULL)
1254 wb_size = pnfs_num_cont_bytes(pgio->pg_inode,
1255 req->wb_index);
1256 else
1257 wb_size = nfs_dreq_bytes_left(pgio->pg_dreq);
1258
1259 pnfs_generic_pg_init_write(pgio, req, wb_size);
1260 }
96c9eae6
PT
1261}
1262
b4fdac1a
WAA
1263/*
1264 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
1265 * of bytes (maximum @req->wb_bytes) that can be coalesced.
1266 */
1267static size_t
96c9eae6
PT
1268bl_pg_test_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
1269 struct nfs_page *req)
1270{
1271 if (pgio->pg_dreq != NULL &&
1272 !is_aligned_req(req, PAGE_CACHE_SIZE))
b4fdac1a 1273 return 0;
96c9eae6
PT
1274
1275 return pnfs_generic_pg_test(pgio, prev, req);
1276}
1277
e9643fe8 1278static const struct nfs_pageio_ops bl_pg_read_ops = {
f742dc4a
PT
1279 .pg_init = bl_pg_init_read,
1280 .pg_test = bl_pg_test_read,
e9643fe8
BH
1281 .pg_doio = pnfs_generic_pg_readpages,
1282};
1283
1284static const struct nfs_pageio_ops bl_pg_write_ops = {
96c9eae6
PT
1285 .pg_init = bl_pg_init_write,
1286 .pg_test = bl_pg_test_write,
e9643fe8
BH
1287 .pg_doio = pnfs_generic_pg_writepages,
1288};
1289
155e7524
FI
1290static struct pnfs_layoutdriver_type blocklayout_type = {
1291 .id = LAYOUT_BLOCK_VOLUME,
1292 .name = "LAYOUT_BLOCK_VOLUME",
5a12cca6 1293 .owner = THIS_MODULE,
155e7524
FI
1294 .read_pagelist = bl_read_pagelist,
1295 .write_pagelist = bl_write_pagelist,
1296 .alloc_layout_hdr = bl_alloc_layout_hdr,
1297 .free_layout_hdr = bl_free_layout_hdr,
1298 .alloc_lseg = bl_alloc_lseg,
1299 .free_lseg = bl_free_lseg,
1300 .encode_layoutcommit = bl_encode_layoutcommit,
1301 .cleanup_layoutcommit = bl_cleanup_layoutcommit,
1302 .set_layoutdriver = bl_set_layoutdriver,
1303 .clear_layoutdriver = bl_clear_layoutdriver,
e9643fe8
BH
1304 .pg_read_ops = &bl_pg_read_ops,
1305 .pg_write_ops = &bl_pg_write_ops,
155e7524
FI
1306};
1307
fe0a9b74 1308static const struct rpc_pipe_ops bl_upcall_ops = {
c1225158 1309 .upcall = rpc_pipe_generic_upcall,
fe0a9b74
JR
1310 .downcall = bl_pipe_downcall,
1311 .destroy_msg = bl_pipe_destroy_msg,
1312};
1313
332dfab6
SK
1314static struct dentry *nfs4blocklayout_register_sb(struct super_block *sb,
1315 struct rpc_pipe *pipe)
1316{
1317 struct dentry *dir, *dentry;
1318
1319 dir = rpc_d_lookup_sb(sb, NFS_PIPE_DIRNAME);
1320 if (dir == NULL)
1321 return ERR_PTR(-ENOENT);
1322 dentry = rpc_mkpipe_dentry(dir, "blocklayout", NULL, pipe);
1323 dput(dir);
1324 return dentry;
1325}
1326
1327static void nfs4blocklayout_unregister_sb(struct super_block *sb,
1328 struct rpc_pipe *pipe)
1329{
1330 if (pipe->dentry)
1331 rpc_unlink(pipe->dentry);
1332}
1333
627f3066
SK
1334static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event,
1335 void *ptr)
1336{
1337 struct super_block *sb = ptr;
1338 struct net *net = sb->s_fs_info;
1339 struct nfs_net *nn = net_generic(net, nfs_net_id);
1340 struct dentry *dentry;
1341 int ret = 0;
1342
1343 if (!try_module_get(THIS_MODULE))
1344 return 0;
1345
1346 if (nn->bl_device_pipe == NULL) {
1347 module_put(THIS_MODULE);
1348 return 0;
1349 }
1350
1351 switch (event) {
1352 case RPC_PIPEFS_MOUNT:
1353 dentry = nfs4blocklayout_register_sb(sb, nn->bl_device_pipe);
1354 if (IS_ERR(dentry)) {
1355 ret = PTR_ERR(dentry);
1356 break;
1357 }
1358 nn->bl_device_pipe->dentry = dentry;
1359 break;
1360 case RPC_PIPEFS_UMOUNT:
1361 if (nn->bl_device_pipe->dentry)
1362 nfs4blocklayout_unregister_sb(sb, nn->bl_device_pipe);
1363 break;
1364 default:
1365 ret = -ENOTSUPP;
1366 break;
1367 }
1368 module_put(THIS_MODULE);
1369 return ret;
1370}
1371
1372static struct notifier_block nfs4blocklayout_block = {
1373 .notifier_call = rpc_pipefs_event,
1374};
1375
332dfab6
SK
1376static struct dentry *nfs4blocklayout_register_net(struct net *net,
1377 struct rpc_pipe *pipe)
1378{
1379 struct super_block *pipefs_sb;
1380 struct dentry *dentry;
1381
1382 pipefs_sb = rpc_get_sb_net(net);
1383 if (!pipefs_sb)
2561d618 1384 return NULL;
332dfab6
SK
1385 dentry = nfs4blocklayout_register_sb(pipefs_sb, pipe);
1386 rpc_put_sb_net(net);
1387 return dentry;
1388}
1389
1390static void nfs4blocklayout_unregister_net(struct net *net,
1391 struct rpc_pipe *pipe)
1392{
1393 struct super_block *pipefs_sb;
1394
1395 pipefs_sb = rpc_get_sb_net(net);
1396 if (pipefs_sb) {
1397 nfs4blocklayout_unregister_sb(pipefs_sb, pipe);
1398 rpc_put_sb_net(net);
1399 }
1400}
1401
9e2e74db
SK
1402static int nfs4blocklayout_net_init(struct net *net)
1403{
1404 struct nfs_net *nn = net_generic(net, nfs_net_id);
1405 struct dentry *dentry;
1406
5ffaf855 1407 init_waitqueue_head(&nn->bl_wq);
9e2e74db
SK
1408 nn->bl_device_pipe = rpc_mkpipe_data(&bl_upcall_ops, 0);
1409 if (IS_ERR(nn->bl_device_pipe))
1410 return PTR_ERR(nn->bl_device_pipe);
1411 dentry = nfs4blocklayout_register_net(net, nn->bl_device_pipe);
1412 if (IS_ERR(dentry)) {
1413 rpc_destroy_pipe_data(nn->bl_device_pipe);
1414 return PTR_ERR(dentry);
1415 }
1416 nn->bl_device_pipe->dentry = dentry;
1417 return 0;
1418}
1419
1420static void nfs4blocklayout_net_exit(struct net *net)
1421{
1422 struct nfs_net *nn = net_generic(net, nfs_net_id);
1423
1424 nfs4blocklayout_unregister_net(net, nn->bl_device_pipe);
1425 rpc_destroy_pipe_data(nn->bl_device_pipe);
1426 nn->bl_device_pipe = NULL;
1427}
1428
1429static struct pernet_operations nfs4blocklayout_net_ops = {
1430 .init = nfs4blocklayout_net_init,
1431 .exit = nfs4blocklayout_net_exit,
1432};
1433
155e7524
FI
1434static int __init nfs4blocklayout_init(void)
1435{
1436 int ret;
1437
1438 dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__);
1439
1440 ret = pnfs_register_layoutdriver(&blocklayout_type);
fe0a9b74
JR
1441 if (ret)
1442 goto out;
1443
627f3066 1444 ret = rpc_pipefs_notifier_register(&nfs4blocklayout_block);
9e2e74db
SK
1445 if (ret)
1446 goto out_remove;
627f3066
SK
1447 ret = register_pernet_subsys(&nfs4blocklayout_net_ops);
1448 if (ret)
1449 goto out_notifier;
fe0a9b74
JR
1450out:
1451 return ret;
1452
627f3066
SK
1453out_notifier:
1454 rpc_pipefs_notifier_unregister(&nfs4blocklayout_block);
fe0a9b74
JR
1455out_remove:
1456 pnfs_unregister_layoutdriver(&blocklayout_type);
155e7524
FI
1457 return ret;
1458}
1459
1460static void __exit nfs4blocklayout_exit(void)
1461{
1462 dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n",
1463 __func__);
1464
627f3066 1465 rpc_pipefs_notifier_unregister(&nfs4blocklayout_block);
9e2e74db 1466 unregister_pernet_subsys(&nfs4blocklayout_net_ops);
155e7524
FI
1467 pnfs_unregister_layoutdriver(&blocklayout_type);
1468}
1469
1470MODULE_ALIAS("nfs-layouttype4-3");
1471
1472module_init(nfs4blocklayout_init);
1473module_exit(nfs4blocklayout_exit);