]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/nfs/read.c
Merge tag 'davinci-for-v4.16/dt' of https://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-bionic-kernel.git] / fs / nfs / read.c
1 /*
2 * linux/fs/nfs/read.c
3 *
4 * Block I/O for NFS
5 *
6 * Partial copy of Linus' read cache modifications to fs/nfs/file.c
7 * modified for async RPC by okir@monad.swb.de
8 */
9
10 #include <linux/time.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/fcntl.h>
14 #include <linux/stat.h>
15 #include <linux/mm.h>
16 #include <linux/slab.h>
17 #include <linux/pagemap.h>
18 #include <linux/sunrpc/clnt.h>
19 #include <linux/nfs_fs.h>
20 #include <linux/nfs_page.h>
21 #include <linux/module.h>
22
23 #include "nfs4_fs.h"
24 #include "internal.h"
25 #include "iostat.h"
26 #include "fscache.h"
27 #include "pnfs.h"
28 #include "nfstrace.h"
29
30 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
31
32 static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
33 static const struct nfs_rw_ops nfs_rw_read_ops;
34
35 static struct kmem_cache *nfs_rdata_cachep;
36
37 static struct nfs_pgio_header *nfs_readhdr_alloc(void)
38 {
39 struct nfs_pgio_header *p = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
40
41 if (p)
42 p->rw_mode = FMODE_READ;
43 return p;
44 }
45
46 static void nfs_readhdr_free(struct nfs_pgio_header *rhdr)
47 {
48 kmem_cache_free(nfs_rdata_cachep, rhdr);
49 }
50
51 static
52 int nfs_return_empty_page(struct page *page)
53 {
54 zero_user(page, 0, PAGE_SIZE);
55 SetPageUptodate(page);
56 unlock_page(page);
57 return 0;
58 }
59
60 void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
61 struct inode *inode, bool force_mds,
62 const struct nfs_pgio_completion_ops *compl_ops)
63 {
64 struct nfs_server *server = NFS_SERVER(inode);
65 const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops;
66
67 #ifdef CONFIG_NFS_V4_1
68 if (server->pnfs_curr_ld && !force_mds)
69 pg_ops = server->pnfs_curr_ld->pg_read_ops;
70 #endif
71 nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_read_ops,
72 server->rsize, 0);
73 }
74 EXPORT_SYMBOL_GPL(nfs_pageio_init_read);
75
76 void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
77 {
78 struct nfs_pgio_mirror *mirror;
79
80 if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
81 pgio->pg_ops->pg_cleanup(pgio);
82
83 pgio->pg_ops = &nfs_pgio_rw_ops;
84
85 /* read path should never have more than one mirror */
86 WARN_ON_ONCE(pgio->pg_mirror_count != 1);
87
88 mirror = &pgio->pg_mirrors[0];
89 mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize;
90 }
91 EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
92
93 static void nfs_readpage_release(struct nfs_page *req)
94 {
95 struct inode *inode = d_inode(req->wb_context->dentry);
96
97 dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id,
98 (unsigned long long)NFS_FILEID(inode), req->wb_bytes,
99 (long long)req_offset(req));
100
101 if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) {
102 if (PageUptodate(req->wb_page))
103 nfs_readpage_to_fscache(inode, req->wb_page, 0);
104
105 unlock_page(req->wb_page);
106 }
107 nfs_release_request(req);
108 }
109
110 int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
111 struct page *page)
112 {
113 struct nfs_page *new;
114 unsigned int len;
115 struct nfs_pageio_descriptor pgio;
116 struct nfs_pgio_mirror *pgm;
117
118 len = nfs_page_length(page);
119 if (len == 0)
120 return nfs_return_empty_page(page);
121 new = nfs_create_request(ctx, page, NULL, 0, len);
122 if (IS_ERR(new)) {
123 unlock_page(page);
124 return PTR_ERR(new);
125 }
126 if (len < PAGE_SIZE)
127 zero_user_segment(page, len, PAGE_SIZE);
128
129 nfs_pageio_init_read(&pgio, inode, false,
130 &nfs_async_read_completion_ops);
131 if (!nfs_pageio_add_request(&pgio, new)) {
132 nfs_list_remove_request(new);
133 nfs_readpage_release(new);
134 }
135 nfs_pageio_complete(&pgio);
136
137 /* It doesn't make sense to do mirrored reads! */
138 WARN_ON_ONCE(pgio.pg_mirror_count != 1);
139
140 pgm = &pgio.pg_mirrors[0];
141 NFS_I(inode)->read_io += pgm->pg_bytes_written;
142
143 return pgio.pg_error < 0 ? pgio.pg_error : 0;
144 }
145
146 static void nfs_page_group_set_uptodate(struct nfs_page *req)
147 {
148 if (nfs_page_group_sync_on_bit(req, PG_UPTODATE))
149 SetPageUptodate(req->wb_page);
150 }
151
152 static void nfs_read_completion(struct nfs_pgio_header *hdr)
153 {
154 unsigned long bytes = 0;
155
156 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
157 goto out;
158 while (!list_empty(&hdr->pages)) {
159 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
160 struct page *page = req->wb_page;
161 unsigned long start = req->wb_pgbase;
162 unsigned long end = req->wb_pgbase + req->wb_bytes;
163
164 if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
165 /* note: regions of the page not covered by a
166 * request are zeroed in nfs_readpage_async /
167 * readpage_async_filler */
168 if (bytes > hdr->good_bytes) {
169 /* nothing in this request was good, so zero
170 * the full extent of the request */
171 zero_user_segment(page, start, end);
172
173 } else if (hdr->good_bytes - bytes < req->wb_bytes) {
174 /* part of this request has good bytes, but
175 * not all. zero the bad bytes */
176 start += hdr->good_bytes - bytes;
177 WARN_ON(start < req->wb_pgbase);
178 zero_user_segment(page, start, end);
179 }
180 }
181 bytes += req->wb_bytes;
182 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
183 if (bytes <= hdr->good_bytes)
184 nfs_page_group_set_uptodate(req);
185 } else
186 nfs_page_group_set_uptodate(req);
187 nfs_list_remove_request(req);
188 nfs_readpage_release(req);
189 }
190 out:
191 hdr->release(hdr);
192 }
193
194 static void nfs_initiate_read(struct nfs_pgio_header *hdr,
195 struct rpc_message *msg,
196 const struct nfs_rpc_ops *rpc_ops,
197 struct rpc_task_setup *task_setup_data, int how)
198 {
199 struct inode *inode = hdr->inode;
200 int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0;
201
202 task_setup_data->flags |= swap_flags;
203 rpc_ops->read_setup(hdr, msg);
204 trace_nfs_initiate_read(inode, hdr->io_start, hdr->good_bytes);
205 }
206
207 static void
208 nfs_async_read_error(struct list_head *head)
209 {
210 struct nfs_page *req;
211
212 while (!list_empty(head)) {
213 req = nfs_list_entry(head->next);
214 nfs_list_remove_request(req);
215 nfs_readpage_release(req);
216 }
217 }
218
219 static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
220 .error_cleanup = nfs_async_read_error,
221 .completion = nfs_read_completion,
222 };
223
224 /*
225 * This is the callback from RPC telling us whether a reply was
226 * received or some error occurred (timeout or socket shutdown).
227 */
228 static int nfs_readpage_done(struct rpc_task *task,
229 struct nfs_pgio_header *hdr,
230 struct inode *inode)
231 {
232 int status = NFS_PROTO(inode)->read_done(task, hdr);
233 if (status != 0)
234 return status;
235
236 nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, hdr->res.count);
237 trace_nfs_readpage_done(inode, task->tk_status,
238 hdr->args.offset, hdr->res.eof);
239
240 if (task->tk_status == -ESTALE) {
241 set_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
242 nfs_mark_for_revalidate(inode);
243 }
244 return 0;
245 }
246
247 static void nfs_readpage_retry(struct rpc_task *task,
248 struct nfs_pgio_header *hdr)
249 {
250 struct nfs_pgio_args *argp = &hdr->args;
251 struct nfs_pgio_res *resp = &hdr->res;
252
253 /* This is a short read! */
254 nfs_inc_stats(hdr->inode, NFSIOS_SHORTREAD);
255 /* Has the server at least made some progress? */
256 if (resp->count == 0) {
257 nfs_set_pgio_error(hdr, -EIO, argp->offset);
258 return;
259 }
260
261 /* For non rpc-based layout drivers, retry-through-MDS */
262 if (!task->tk_ops) {
263 hdr->pnfs_error = -EAGAIN;
264 return;
265 }
266
267 /* Yes, so retry the read at the end of the hdr */
268 hdr->mds_offset += resp->count;
269 argp->offset += resp->count;
270 argp->pgbase += resp->count;
271 argp->count -= resp->count;
272 rpc_restart_call_prepare(task);
273 }
274
275 static void nfs_readpage_result(struct rpc_task *task,
276 struct nfs_pgio_header *hdr)
277 {
278 if (hdr->res.eof) {
279 loff_t bound;
280
281 bound = hdr->args.offset + hdr->res.count;
282 spin_lock(&hdr->lock);
283 if (bound < hdr->io_start + hdr->good_bytes) {
284 set_bit(NFS_IOHDR_EOF, &hdr->flags);
285 clear_bit(NFS_IOHDR_ERROR, &hdr->flags);
286 hdr->good_bytes = bound - hdr->io_start;
287 }
288 spin_unlock(&hdr->lock);
289 } else if (hdr->res.count < hdr->args.count)
290 nfs_readpage_retry(task, hdr);
291 }
292
293 /*
294 * Read a page over NFS.
295 * We read the page synchronously in the following case:
296 * - The error flag is set for this page. This happens only when a
297 * previous async read operation failed.
298 */
299 int nfs_readpage(struct file *file, struct page *page)
300 {
301 struct nfs_open_context *ctx;
302 struct inode *inode = page_file_mapping(page)->host;
303 int error;
304
305 dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
306 page, PAGE_SIZE, page_index(page));
307 nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
308 nfs_add_stats(inode, NFSIOS_READPAGES, 1);
309
310 /*
311 * Try to flush any pending writes to the file..
312 *
313 * NOTE! Because we own the page lock, there cannot
314 * be any new pending writes generated at this point
315 * for this page (other pages can be written to).
316 */
317 error = nfs_wb_page(inode, page);
318 if (error)
319 goto out_unlock;
320 if (PageUptodate(page))
321 goto out_unlock;
322
323 error = -ESTALE;
324 if (NFS_STALE(inode))
325 goto out_unlock;
326
327 if (file == NULL) {
328 error = -EBADF;
329 ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
330 if (ctx == NULL)
331 goto out_unlock;
332 } else
333 ctx = get_nfs_open_context(nfs_file_open_context(file));
334
335 if (!IS_SYNC(inode)) {
336 error = nfs_readpage_from_fscache(ctx, inode, page);
337 if (error == 0)
338 goto out;
339 }
340
341 error = nfs_readpage_async(ctx, inode, page);
342
343 out:
344 put_nfs_open_context(ctx);
345 return error;
346 out_unlock:
347 unlock_page(page);
348 return error;
349 }
350
351 struct nfs_readdesc {
352 struct nfs_pageio_descriptor *pgio;
353 struct nfs_open_context *ctx;
354 };
355
356 static int
357 readpage_async_filler(void *data, struct page *page)
358 {
359 struct nfs_readdesc *desc = (struct nfs_readdesc *)data;
360 struct nfs_page *new;
361 unsigned int len;
362 int error;
363
364 len = nfs_page_length(page);
365 if (len == 0)
366 return nfs_return_empty_page(page);
367
368 new = nfs_create_request(desc->ctx, page, NULL, 0, len);
369 if (IS_ERR(new))
370 goto out_error;
371
372 if (len < PAGE_SIZE)
373 zero_user_segment(page, len, PAGE_SIZE);
374 if (!nfs_pageio_add_request(desc->pgio, new)) {
375 nfs_list_remove_request(new);
376 nfs_readpage_release(new);
377 error = desc->pgio->pg_error;
378 goto out;
379 }
380 return 0;
381 out_error:
382 error = PTR_ERR(new);
383 unlock_page(page);
384 out:
385 return error;
386 }
387
388 int nfs_readpages(struct file *filp, struct address_space *mapping,
389 struct list_head *pages, unsigned nr_pages)
390 {
391 struct nfs_pageio_descriptor pgio;
392 struct nfs_pgio_mirror *pgm;
393 struct nfs_readdesc desc = {
394 .pgio = &pgio,
395 };
396 struct inode *inode = mapping->host;
397 unsigned long npages;
398 int ret = -ESTALE;
399
400 dprintk("NFS: nfs_readpages (%s/%Lu %d)\n",
401 inode->i_sb->s_id,
402 (unsigned long long)NFS_FILEID(inode),
403 nr_pages);
404 nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
405
406 if (NFS_STALE(inode))
407 goto out;
408
409 if (filp == NULL) {
410 desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
411 if (desc.ctx == NULL)
412 return -EBADF;
413 } else
414 desc.ctx = get_nfs_open_context(nfs_file_open_context(filp));
415
416 /* attempt to read as many of the pages as possible from the cache
417 * - this returns -ENOBUFS immediately if the cookie is negative
418 */
419 ret = nfs_readpages_from_fscache(desc.ctx, inode, mapping,
420 pages, &nr_pages);
421 if (ret == 0)
422 goto read_complete; /* all pages were read */
423
424 nfs_pageio_init_read(&pgio, inode, false,
425 &nfs_async_read_completion_ops);
426
427 ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
428 nfs_pageio_complete(&pgio);
429
430 /* It doesn't make sense to do mirrored reads! */
431 WARN_ON_ONCE(pgio.pg_mirror_count != 1);
432
433 pgm = &pgio.pg_mirrors[0];
434 NFS_I(inode)->read_io += pgm->pg_bytes_written;
435 npages = (pgm->pg_bytes_written + PAGE_SIZE - 1) >>
436 PAGE_SHIFT;
437 nfs_add_stats(inode, NFSIOS_READPAGES, npages);
438 read_complete:
439 put_nfs_open_context(desc.ctx);
440 out:
441 return ret;
442 }
443
444 int __init nfs_init_readpagecache(void)
445 {
446 nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
447 sizeof(struct nfs_pgio_header),
448 0, SLAB_HWCACHE_ALIGN,
449 NULL);
450 if (nfs_rdata_cachep == NULL)
451 return -ENOMEM;
452
453 return 0;
454 }
455
456 void nfs_destroy_readpagecache(void)
457 {
458 kmem_cache_destroy(nfs_rdata_cachep);
459 }
460
461 static const struct nfs_rw_ops nfs_rw_read_ops = {
462 .rw_alloc_header = nfs_readhdr_alloc,
463 .rw_free_header = nfs_readhdr_free,
464 .rw_done = nfs_readpage_done,
465 .rw_result = nfs_readpage_result,
466 .rw_initiate = nfs_initiate_read,
467 };