]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * linux/fs/nfs/read.c | |
3 | * | |
4 | * Block I/O for NFS | |
5 | * | |
6 | * Partial copy of Linus' read cache modifications to fs/nfs/file.c | |
7 | * modified for async RPC by okir@monad.swb.de | |
8 | */ | |
9 | ||
10 | #include <linux/time.h> | |
11 | #include <linux/kernel.h> | |
12 | #include <linux/errno.h> | |
13 | #include <linux/fcntl.h> | |
14 | #include <linux/stat.h> | |
15 | #include <linux/mm.h> | |
16 | #include <linux/slab.h> | |
17 | #include <linux/pagemap.h> | |
18 | #include <linux/sunrpc/clnt.h> | |
19 | #include <linux/nfs_fs.h> | |
20 | #include <linux/nfs_page.h> | |
21 | #include <linux/module.h> | |
22 | ||
23 | #include "nfs4_fs.h" | |
24 | #include "internal.h" | |
25 | #include "iostat.h" | |
26 | #include "fscache.h" | |
27 | #include "pnfs.h" | |
28 | ||
29 | #define NFSDBG_FACILITY NFSDBG_PAGECACHE | |
30 | ||
31 | static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops; | |
32 | static const struct nfs_rw_ops nfs_rw_read_ops; | |
33 | ||
34 | static struct kmem_cache *nfs_rdata_cachep; | |
35 | ||
36 | static struct nfs_pgio_header *nfs_readhdr_alloc(void) | |
37 | { | |
38 | return kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL); | |
39 | } | |
40 | ||
41 | static void nfs_readhdr_free(struct nfs_pgio_header *rhdr) | |
42 | { | |
43 | kmem_cache_free(nfs_rdata_cachep, rhdr); | |
44 | } | |
45 | ||
46 | static | |
47 | int nfs_return_empty_page(struct page *page) | |
48 | { | |
49 | zero_user(page, 0, PAGE_SIZE); | |
50 | SetPageUptodate(page); | |
51 | unlock_page(page); | |
52 | return 0; | |
53 | } | |
54 | ||
55 | void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, | |
56 | struct inode *inode, bool force_mds, | |
57 | const struct nfs_pgio_completion_ops *compl_ops) | |
58 | { | |
59 | struct nfs_server *server = NFS_SERVER(inode); | |
60 | const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops; | |
61 | ||
62 | #ifdef CONFIG_NFS_V4_1 | |
63 | if (server->pnfs_curr_ld && !force_mds) | |
64 | pg_ops = server->pnfs_curr_ld->pg_read_ops; | |
65 | #endif | |
66 | nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_read_ops, | |
67 | server->rsize, 0); | |
68 | } | |
69 | EXPORT_SYMBOL_GPL(nfs_pageio_init_read); | |
70 | ||
71 | void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio) | |
72 | { | |
73 | struct nfs_pgio_mirror *mirror; | |
74 | ||
75 | if (pgio->pg_ops && pgio->pg_ops->pg_cleanup) | |
76 | pgio->pg_ops->pg_cleanup(pgio); | |
77 | ||
78 | pgio->pg_ops = &nfs_pgio_rw_ops; | |
79 | ||
80 | /* read path should never have more than one mirror */ | |
81 | WARN_ON_ONCE(pgio->pg_mirror_count != 1); | |
82 | ||
83 | mirror = &pgio->pg_mirrors[0]; | |
84 | mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize; | |
85 | } | |
86 | EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds); | |
87 | ||
88 | static void nfs_readpage_release(struct nfs_page *req) | |
89 | { | |
90 | struct inode *inode = d_inode(req->wb_context->dentry); | |
91 | ||
92 | dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id, | |
93 | (unsigned long long)NFS_FILEID(inode), req->wb_bytes, | |
94 | (long long)req_offset(req)); | |
95 | ||
96 | if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) { | |
97 | if (PageUptodate(req->wb_page)) | |
98 | nfs_readpage_to_fscache(inode, req->wb_page, 0); | |
99 | ||
100 | unlock_page(req->wb_page); | |
101 | } | |
102 | nfs_release_request(req); | |
103 | } | |
104 | ||
105 | int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode, | |
106 | struct page *page) | |
107 | { | |
108 | struct nfs_page *new; | |
109 | unsigned int len; | |
110 | struct nfs_pageio_descriptor pgio; | |
111 | struct nfs_pgio_mirror *pgm; | |
112 | ||
113 | len = nfs_page_length(page); | |
114 | if (len == 0) | |
115 | return nfs_return_empty_page(page); | |
116 | new = nfs_create_request(ctx, page, NULL, 0, len); | |
117 | if (IS_ERR(new)) { | |
118 | unlock_page(page); | |
119 | return PTR_ERR(new); | |
120 | } | |
121 | if (len < PAGE_SIZE) | |
122 | zero_user_segment(page, len, PAGE_SIZE); | |
123 | ||
124 | nfs_pageio_init_read(&pgio, inode, false, | |
125 | &nfs_async_read_completion_ops); | |
126 | if (!nfs_pageio_add_request(&pgio, new)) { | |
127 | nfs_list_remove_request(new); | |
128 | nfs_readpage_release(new); | |
129 | } | |
130 | nfs_pageio_complete(&pgio); | |
131 | ||
132 | /* It doesn't make sense to do mirrored reads! */ | |
133 | WARN_ON_ONCE(pgio.pg_mirror_count != 1); | |
134 | ||
135 | pgm = &pgio.pg_mirrors[0]; | |
136 | NFS_I(inode)->read_io += pgm->pg_bytes_written; | |
137 | ||
138 | return pgio.pg_error < 0 ? pgio.pg_error : 0; | |
139 | } | |
140 | ||
141 | static void nfs_page_group_set_uptodate(struct nfs_page *req) | |
142 | { | |
143 | if (nfs_page_group_sync_on_bit(req, PG_UPTODATE)) | |
144 | SetPageUptodate(req->wb_page); | |
145 | } | |
146 | ||
147 | static void nfs_read_completion(struct nfs_pgio_header *hdr) | |
148 | { | |
149 | unsigned long bytes = 0; | |
150 | ||
151 | if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) | |
152 | goto out; | |
153 | while (!list_empty(&hdr->pages)) { | |
154 | struct nfs_page *req = nfs_list_entry(hdr->pages.next); | |
155 | struct page *page = req->wb_page; | |
156 | unsigned long start = req->wb_pgbase; | |
157 | unsigned long end = req->wb_pgbase + req->wb_bytes; | |
158 | ||
159 | if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) { | |
160 | /* note: regions of the page not covered by a | |
161 | * request are zeroed in nfs_readpage_async / | |
162 | * readpage_async_filler */ | |
163 | if (bytes > hdr->good_bytes) { | |
164 | /* nothing in this request was good, so zero | |
165 | * the full extent of the request */ | |
166 | zero_user_segment(page, start, end); | |
167 | ||
168 | } else if (hdr->good_bytes - bytes < req->wb_bytes) { | |
169 | /* part of this request has good bytes, but | |
170 | * not all. zero the bad bytes */ | |
171 | start += hdr->good_bytes - bytes; | |
172 | WARN_ON(start < req->wb_pgbase); | |
173 | zero_user_segment(page, start, end); | |
174 | } | |
175 | } | |
176 | bytes += req->wb_bytes; | |
177 | if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) { | |
178 | if (bytes <= hdr->good_bytes) | |
179 | nfs_page_group_set_uptodate(req); | |
180 | } else | |
181 | nfs_page_group_set_uptodate(req); | |
182 | nfs_list_remove_request(req); | |
183 | nfs_readpage_release(req); | |
184 | } | |
185 | out: | |
186 | hdr->release(hdr); | |
187 | } | |
188 | ||
189 | static void nfs_initiate_read(struct nfs_pgio_header *hdr, | |
190 | struct rpc_message *msg, | |
191 | const struct nfs_rpc_ops *rpc_ops, | |
192 | struct rpc_task_setup *task_setup_data, int how) | |
193 | { | |
194 | struct inode *inode = hdr->inode; | |
195 | int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0; | |
196 | ||
197 | task_setup_data->flags |= swap_flags; | |
198 | rpc_ops->read_setup(hdr, msg); | |
199 | } | |
200 | ||
201 | static void | |
202 | nfs_async_read_error(struct list_head *head) | |
203 | { | |
204 | struct nfs_page *req; | |
205 | ||
206 | while (!list_empty(head)) { | |
207 | req = nfs_list_entry(head->next); | |
208 | nfs_list_remove_request(req); | |
209 | nfs_readpage_release(req); | |
210 | } | |
211 | } | |
212 | ||
213 | static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = { | |
214 | .error_cleanup = nfs_async_read_error, | |
215 | .completion = nfs_read_completion, | |
216 | }; | |
217 | ||
218 | /* | |
219 | * This is the callback from RPC telling us whether a reply was | |
220 | * received or some error occurred (timeout or socket shutdown). | |
221 | */ | |
222 | static int nfs_readpage_done(struct rpc_task *task, | |
223 | struct nfs_pgio_header *hdr, | |
224 | struct inode *inode) | |
225 | { | |
226 | int status = NFS_PROTO(inode)->read_done(task, hdr); | |
227 | if (status != 0) | |
228 | return status; | |
229 | ||
230 | nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, hdr->res.count); | |
231 | ||
232 | if (task->tk_status == -ESTALE) { | |
233 | set_bit(NFS_INO_STALE, &NFS_I(inode)->flags); | |
234 | nfs_mark_for_revalidate(inode); | |
235 | } | |
236 | return 0; | |
237 | } | |
238 | ||
239 | static void nfs_readpage_retry(struct rpc_task *task, | |
240 | struct nfs_pgio_header *hdr) | |
241 | { | |
242 | struct nfs_pgio_args *argp = &hdr->args; | |
243 | struct nfs_pgio_res *resp = &hdr->res; | |
244 | ||
245 | /* This is a short read! */ | |
246 | nfs_inc_stats(hdr->inode, NFSIOS_SHORTREAD); | |
247 | /* Has the server at least made some progress? */ | |
248 | if (resp->count == 0) { | |
249 | nfs_set_pgio_error(hdr, -EIO, argp->offset); | |
250 | return; | |
251 | } | |
252 | ||
253 | /* For non rpc-based layout drivers, retry-through-MDS */ | |
254 | if (!task->tk_ops) { | |
255 | hdr->pnfs_error = -EAGAIN; | |
256 | return; | |
257 | } | |
258 | ||
259 | /* Yes, so retry the read at the end of the hdr */ | |
260 | hdr->mds_offset += resp->count; | |
261 | argp->offset += resp->count; | |
262 | argp->pgbase += resp->count; | |
263 | argp->count -= resp->count; | |
264 | rpc_restart_call_prepare(task); | |
265 | } | |
266 | ||
267 | static void nfs_readpage_result(struct rpc_task *task, | |
268 | struct nfs_pgio_header *hdr) | |
269 | { | |
270 | if (hdr->res.eof) { | |
271 | loff_t bound; | |
272 | ||
273 | bound = hdr->args.offset + hdr->res.count; | |
274 | spin_lock(&hdr->lock); | |
275 | if (bound < hdr->io_start + hdr->good_bytes) { | |
276 | set_bit(NFS_IOHDR_EOF, &hdr->flags); | |
277 | clear_bit(NFS_IOHDR_ERROR, &hdr->flags); | |
278 | hdr->good_bytes = bound - hdr->io_start; | |
279 | } | |
280 | spin_unlock(&hdr->lock); | |
281 | } else if (hdr->res.count < hdr->args.count) | |
282 | nfs_readpage_retry(task, hdr); | |
283 | } | |
284 | ||
285 | /* | |
286 | * Read a page over NFS. | |
287 | * We read the page synchronously in the following case: | |
288 | * - The error flag is set for this page. This happens only when a | |
289 | * previous async read operation failed. | |
290 | */ | |
291 | int nfs_readpage(struct file *file, struct page *page) | |
292 | { | |
293 | struct nfs_open_context *ctx; | |
294 | struct inode *inode = page_file_mapping(page)->host; | |
295 | int error; | |
296 | ||
297 | dprintk("NFS: nfs_readpage (%p %ld@%lu)\n", | |
298 | page, PAGE_SIZE, page_file_index(page)); | |
299 | nfs_inc_stats(inode, NFSIOS_VFSREADPAGE); | |
300 | nfs_add_stats(inode, NFSIOS_READPAGES, 1); | |
301 | ||
302 | /* | |
303 | * Try to flush any pending writes to the file.. | |
304 | * | |
305 | * NOTE! Because we own the page lock, there cannot | |
306 | * be any new pending writes generated at this point | |
307 | * for this page (other pages can be written to). | |
308 | */ | |
309 | error = nfs_wb_page(inode, page); | |
310 | if (error) | |
311 | goto out_unlock; | |
312 | if (PageUptodate(page)) | |
313 | goto out_unlock; | |
314 | ||
315 | error = -ESTALE; | |
316 | if (NFS_STALE(inode)) | |
317 | goto out_unlock; | |
318 | ||
319 | if (file == NULL) { | |
320 | error = -EBADF; | |
321 | ctx = nfs_find_open_context(inode, NULL, FMODE_READ); | |
322 | if (ctx == NULL) | |
323 | goto out_unlock; | |
324 | } else | |
325 | ctx = get_nfs_open_context(nfs_file_open_context(file)); | |
326 | ||
327 | if (!IS_SYNC(inode)) { | |
328 | error = nfs_readpage_from_fscache(ctx, inode, page); | |
329 | if (error == 0) | |
330 | goto out; | |
331 | } | |
332 | ||
333 | error = nfs_readpage_async(ctx, inode, page); | |
334 | ||
335 | out: | |
336 | put_nfs_open_context(ctx); | |
337 | return error; | |
338 | out_unlock: | |
339 | unlock_page(page); | |
340 | return error; | |
341 | } | |
342 | ||
343 | struct nfs_readdesc { | |
344 | struct nfs_pageio_descriptor *pgio; | |
345 | struct nfs_open_context *ctx; | |
346 | }; | |
347 | ||
348 | static int | |
349 | readpage_async_filler(void *data, struct page *page) | |
350 | { | |
351 | struct nfs_readdesc *desc = (struct nfs_readdesc *)data; | |
352 | struct nfs_page *new; | |
353 | unsigned int len; | |
354 | int error; | |
355 | ||
356 | len = nfs_page_length(page); | |
357 | if (len == 0) | |
358 | return nfs_return_empty_page(page); | |
359 | ||
360 | new = nfs_create_request(desc->ctx, page, NULL, 0, len); | |
361 | if (IS_ERR(new)) | |
362 | goto out_error; | |
363 | ||
364 | if (len < PAGE_SIZE) | |
365 | zero_user_segment(page, len, PAGE_SIZE); | |
366 | if (!nfs_pageio_add_request(desc->pgio, new)) { | |
367 | nfs_list_remove_request(new); | |
368 | nfs_readpage_release(new); | |
369 | error = desc->pgio->pg_error; | |
370 | goto out_unlock; | |
371 | } | |
372 | return 0; | |
373 | out_error: | |
374 | error = PTR_ERR(new); | |
375 | out_unlock: | |
376 | unlock_page(page); | |
377 | return error; | |
378 | } | |
379 | ||
380 | int nfs_readpages(struct file *filp, struct address_space *mapping, | |
381 | struct list_head *pages, unsigned nr_pages) | |
382 | { | |
383 | struct nfs_pageio_descriptor pgio; | |
384 | struct nfs_pgio_mirror *pgm; | |
385 | struct nfs_readdesc desc = { | |
386 | .pgio = &pgio, | |
387 | }; | |
388 | struct inode *inode = mapping->host; | |
389 | unsigned long npages; | |
390 | int ret = -ESTALE; | |
391 | ||
392 | dprintk("NFS: nfs_readpages (%s/%Lu %d)\n", | |
393 | inode->i_sb->s_id, | |
394 | (unsigned long long)NFS_FILEID(inode), | |
395 | nr_pages); | |
396 | nfs_inc_stats(inode, NFSIOS_VFSREADPAGES); | |
397 | ||
398 | if (NFS_STALE(inode)) | |
399 | goto out; | |
400 | ||
401 | if (filp == NULL) { | |
402 | desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ); | |
403 | if (desc.ctx == NULL) | |
404 | return -EBADF; | |
405 | } else | |
406 | desc.ctx = get_nfs_open_context(nfs_file_open_context(filp)); | |
407 | ||
408 | /* attempt to read as many of the pages as possible from the cache | |
409 | * - this returns -ENOBUFS immediately if the cookie is negative | |
410 | */ | |
411 | ret = nfs_readpages_from_fscache(desc.ctx, inode, mapping, | |
412 | pages, &nr_pages); | |
413 | if (ret == 0) | |
414 | goto read_complete; /* all pages were read */ | |
415 | ||
416 | nfs_pageio_init_read(&pgio, inode, false, | |
417 | &nfs_async_read_completion_ops); | |
418 | ||
419 | ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc); | |
420 | nfs_pageio_complete(&pgio); | |
421 | ||
422 | /* It doesn't make sense to do mirrored reads! */ | |
423 | WARN_ON_ONCE(pgio.pg_mirror_count != 1); | |
424 | ||
425 | pgm = &pgio.pg_mirrors[0]; | |
426 | NFS_I(inode)->read_io += pgm->pg_bytes_written; | |
427 | npages = (pgm->pg_bytes_written + PAGE_SIZE - 1) >> | |
428 | PAGE_SHIFT; | |
429 | nfs_add_stats(inode, NFSIOS_READPAGES, npages); | |
430 | read_complete: | |
431 | put_nfs_open_context(desc.ctx); | |
432 | out: | |
433 | return ret; | |
434 | } | |
435 | ||
436 | int __init nfs_init_readpagecache(void) | |
437 | { | |
438 | nfs_rdata_cachep = kmem_cache_create("nfs_read_data", | |
439 | sizeof(struct nfs_pgio_header), | |
440 | 0, SLAB_HWCACHE_ALIGN, | |
441 | NULL); | |
442 | if (nfs_rdata_cachep == NULL) | |
443 | return -ENOMEM; | |
444 | ||
445 | return 0; | |
446 | } | |
447 | ||
448 | void nfs_destroy_readpagecache(void) | |
449 | { | |
450 | kmem_cache_destroy(nfs_rdata_cachep); | |
451 | } | |
452 | ||
453 | static const struct nfs_rw_ops nfs_rw_read_ops = { | |
454 | .rw_mode = FMODE_READ, | |
455 | .rw_alloc_header = nfs_readhdr_alloc, | |
456 | .rw_free_header = nfs_readhdr_free, | |
457 | .rw_done = nfs_readpage_done, | |
458 | .rw_result = nfs_readpage_result, | |
459 | .rw_initiate = nfs_initiate_read, | |
460 | }; |