]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/nfs/pagelist.c
NFS: merge _full and _partial read rpc_ops
[mirror_ubuntu-artful-kernel.git] / fs / nfs / pagelist.c
1 /*
2 * linux/fs/nfs/pagelist.c
3 *
4 * A set of helper functions for managing NFS read and write requests.
5 * The main purpose of these routines is to provide support for the
6 * coalescing of several requests into a single RPC call.
7 *
8 * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no>
9 *
10 */
11
12 #include <linux/slab.h>
13 #include <linux/file.h>
14 #include <linux/sched.h>
15 #include <linux/sunrpc/clnt.h>
16 #include <linux/nfs.h>
17 #include <linux/nfs3.h>
18 #include <linux/nfs4.h>
19 #include <linux/nfs_page.h>
20 #include <linux/nfs_fs.h>
21 #include <linux/nfs_mount.h>
22 #include <linux/export.h>
23
24 #include "internal.h"
25 #include "pnfs.h"
26
27 static struct kmem_cache *nfs_page_cachep;
28
29 bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount)
30 {
31 p->npages = pagecount;
32 if (pagecount <= ARRAY_SIZE(p->page_array))
33 p->pagevec = p->page_array;
34 else {
35 p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
36 if (!p->pagevec)
37 p->npages = 0;
38 }
39 return p->pagevec != NULL;
40 }
41
42 void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
43 struct nfs_pgio_header *hdr,
44 void (*release)(struct nfs_pgio_header *hdr))
45 {
46 hdr->req = nfs_list_entry(desc->pg_list.next);
47 hdr->inode = desc->pg_inode;
48 hdr->cred = hdr->req->wb_context->cred;
49 hdr->io_start = req_offset(hdr->req);
50 hdr->good_bytes = desc->pg_count;
51 hdr->release = release;
52 }
53
54 void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
55 {
56 spin_lock(&hdr->lock);
57 if (pos < hdr->io_start + hdr->good_bytes) {
58 set_bit(NFS_IOHDR_ERROR, &hdr->flags);
59 clear_bit(NFS_IOHDR_EOF, &hdr->flags);
60 hdr->good_bytes = pos - hdr->io_start;
61 hdr->error = error;
62 }
63 spin_unlock(&hdr->lock);
64 }
65
66 static inline struct nfs_page *
67 nfs_page_alloc(void)
68 {
69 struct nfs_page *p = kmem_cache_zalloc(nfs_page_cachep, GFP_KERNEL);
70 if (p)
71 INIT_LIST_HEAD(&p->wb_list);
72 return p;
73 }
74
75 static inline void
76 nfs_page_free(struct nfs_page *p)
77 {
78 kmem_cache_free(nfs_page_cachep, p);
79 }
80
81 /**
82 * nfs_create_request - Create an NFS read/write request.
83 * @ctx: open context to use
84 * @inode: inode to which the request is attached
85 * @page: page to write
86 * @offset: starting offset within the page for the write
87 * @count: number of bytes to read/write
88 *
89 * The page must be locked by the caller. This makes sure we never
90 * create two different requests for the same page.
91 * User should ensure it is safe to sleep in this function.
92 */
93 struct nfs_page *
94 nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
95 struct page *page,
96 unsigned int offset, unsigned int count)
97 {
98 struct nfs_page *req;
99
100 /* try to allocate the request struct */
101 req = nfs_page_alloc();
102 if (req == NULL)
103 return ERR_PTR(-ENOMEM);
104
105 /* get lock context early so we can deal with alloc failures */
106 req->wb_lock_context = nfs_get_lock_context(ctx);
107 if (req->wb_lock_context == NULL) {
108 nfs_page_free(req);
109 return ERR_PTR(-ENOMEM);
110 }
111
112 /* Initialize the request struct. Initially, we assume a
113 * long write-back delay. This will be adjusted in
114 * update_nfs_request below if the region is not locked. */
115 req->wb_page = page;
116 atomic_set(&req->wb_complete, 0);
117 req->wb_index = page->index;
118 page_cache_get(page);
119 BUG_ON(PagePrivate(page));
120 BUG_ON(!PageLocked(page));
121 BUG_ON(page->mapping->host != inode);
122 req->wb_offset = offset;
123 req->wb_pgbase = offset;
124 req->wb_bytes = count;
125 req->wb_context = get_nfs_open_context(ctx);
126 kref_init(&req->wb_kref);
127 return req;
128 }
129
130 /**
131 * nfs_unlock_request - Unlock request and wake up sleepers.
132 * @req:
133 */
134 void nfs_unlock_request(struct nfs_page *req)
135 {
136 if (!NFS_WBACK_BUSY(req)) {
137 printk(KERN_ERR "NFS: Invalid unlock attempted\n");
138 BUG();
139 }
140 smp_mb__before_clear_bit();
141 clear_bit(PG_BUSY, &req->wb_flags);
142 smp_mb__after_clear_bit();
143 wake_up_bit(&req->wb_flags, PG_BUSY);
144 nfs_release_request(req);
145 }
146
147 /*
148 * nfs_clear_request - Free up all resources allocated to the request
149 * @req:
150 *
151 * Release page and open context resources associated with a read/write
152 * request after it has completed.
153 */
154 static void nfs_clear_request(struct nfs_page *req)
155 {
156 struct page *page = req->wb_page;
157 struct nfs_open_context *ctx = req->wb_context;
158 struct nfs_lock_context *l_ctx = req->wb_lock_context;
159
160 if (page != NULL) {
161 page_cache_release(page);
162 req->wb_page = NULL;
163 }
164 if (l_ctx != NULL) {
165 nfs_put_lock_context(l_ctx);
166 req->wb_lock_context = NULL;
167 }
168 if (ctx != NULL) {
169 put_nfs_open_context(ctx);
170 req->wb_context = NULL;
171 }
172 }
173
174
175 /**
176 * nfs_release_request - Release the count on an NFS read/write request
177 * @req: request to release
178 *
179 * Note: Should never be called with the spinlock held!
180 */
181 static void nfs_free_request(struct kref *kref)
182 {
183 struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
184
185 /* Release struct file and open context */
186 nfs_clear_request(req);
187 nfs_page_free(req);
188 }
189
190 void nfs_release_request(struct nfs_page *req)
191 {
192 kref_put(&req->wb_kref, nfs_free_request);
193 }
194
195 static int nfs_wait_bit_uninterruptible(void *word)
196 {
197 io_schedule();
198 return 0;
199 }
200
201 /**
202 * nfs_wait_on_request - Wait for a request to complete.
203 * @req: request to wait upon.
204 *
205 * Interruptible by fatal signals only.
206 * The user is responsible for holding a count on the request.
207 */
208 int
209 nfs_wait_on_request(struct nfs_page *req)
210 {
211 return wait_on_bit(&req->wb_flags, PG_BUSY,
212 nfs_wait_bit_uninterruptible,
213 TASK_UNINTERRUPTIBLE);
214 }
215
216 bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_page *prev, struct nfs_page *req)
217 {
218 /*
219 * FIXME: ideally we should be able to coalesce all requests
220 * that are not block boundary aligned, but currently this
221 * is problematic for the case of bsize < PAGE_CACHE_SIZE,
222 * since nfs_flush_multi and nfs_pagein_multi assume you
223 * can have only one struct nfs_page.
224 */
225 if (desc->pg_bsize < PAGE_SIZE)
226 return 0;
227
228 return desc->pg_count + req->wb_bytes <= desc->pg_bsize;
229 }
230 EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
231
232 /**
233 * nfs_pageio_init - initialise a page io descriptor
234 * @desc: pointer to descriptor
235 * @inode: pointer to inode
236 * @doio: pointer to io function
237 * @bsize: io block size
238 * @io_flags: extra parameters for the io function
239 */
240 void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
241 struct inode *inode,
242 const struct nfs_pageio_ops *pg_ops,
243 size_t bsize,
244 int io_flags)
245 {
246 INIT_LIST_HEAD(&desc->pg_list);
247 desc->pg_bytes_written = 0;
248 desc->pg_count = 0;
249 desc->pg_bsize = bsize;
250 desc->pg_base = 0;
251 desc->pg_moreio = 0;
252 desc->pg_recoalesce = 0;
253 desc->pg_inode = inode;
254 desc->pg_ops = pg_ops;
255 desc->pg_ioflags = io_flags;
256 desc->pg_error = 0;
257 desc->pg_lseg = NULL;
258 }
259
260 /**
261 * nfs_can_coalesce_requests - test two requests for compatibility
262 * @prev: pointer to nfs_page
263 * @req: pointer to nfs_page
264 *
265 * The nfs_page structures 'prev' and 'req' are compared to ensure that the
266 * page data area they describe is contiguous, and that their RPC
267 * credentials, NFSv4 open state, and lockowners are the same.
268 *
269 * Return 'true' if this is the case, else return 'false'.
270 */
271 static bool nfs_can_coalesce_requests(struct nfs_page *prev,
272 struct nfs_page *req,
273 struct nfs_pageio_descriptor *pgio)
274 {
275 if (req->wb_context->cred != prev->wb_context->cred)
276 return false;
277 if (req->wb_lock_context->lockowner != prev->wb_lock_context->lockowner)
278 return false;
279 if (req->wb_context->state != prev->wb_context->state)
280 return false;
281 if (req->wb_index != (prev->wb_index + 1))
282 return false;
283 if (req->wb_pgbase != 0)
284 return false;
285 if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE)
286 return false;
287 return pgio->pg_ops->pg_test(pgio, prev, req);
288 }
289
290 /**
291 * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list.
292 * @desc: destination io descriptor
293 * @req: request
294 *
295 * Returns true if the request 'req' was successfully coalesced into the
296 * existing list of pages 'desc'.
297 */
298 static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
299 struct nfs_page *req)
300 {
301 if (desc->pg_count != 0) {
302 struct nfs_page *prev;
303
304 prev = nfs_list_entry(desc->pg_list.prev);
305 if (!nfs_can_coalesce_requests(prev, req, desc))
306 return 0;
307 } else {
308 if (desc->pg_ops->pg_init)
309 desc->pg_ops->pg_init(desc, req);
310 desc->pg_base = req->wb_pgbase;
311 }
312 nfs_list_remove_request(req);
313 nfs_list_add_request(req, &desc->pg_list);
314 desc->pg_count += req->wb_bytes;
315 return 1;
316 }
317
318 /*
319 * Helper for nfs_pageio_add_request and nfs_pageio_complete
320 */
321 static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
322 {
323 if (!list_empty(&desc->pg_list)) {
324 int error = desc->pg_ops->pg_doio(desc);
325 if (error < 0)
326 desc->pg_error = error;
327 else
328 desc->pg_bytes_written += desc->pg_count;
329 }
330 if (list_empty(&desc->pg_list)) {
331 desc->pg_count = 0;
332 desc->pg_base = 0;
333 }
334 }
335
336 /**
337 * nfs_pageio_add_request - Attempt to coalesce a request into a page list.
338 * @desc: destination io descriptor
339 * @req: request
340 *
341 * Returns true if the request 'req' was successfully coalesced into the
342 * existing list of pages 'desc'.
343 */
344 static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
345 struct nfs_page *req)
346 {
347 while (!nfs_pageio_do_add_request(desc, req)) {
348 desc->pg_moreio = 1;
349 nfs_pageio_doio(desc);
350 if (desc->pg_error < 0)
351 return 0;
352 desc->pg_moreio = 0;
353 if (desc->pg_recoalesce)
354 return 0;
355 }
356 return 1;
357 }
358
359 static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
360 {
361 LIST_HEAD(head);
362
363 do {
364 list_splice_init(&desc->pg_list, &head);
365 desc->pg_bytes_written -= desc->pg_count;
366 desc->pg_count = 0;
367 desc->pg_base = 0;
368 desc->pg_recoalesce = 0;
369
370 while (!list_empty(&head)) {
371 struct nfs_page *req;
372
373 req = list_first_entry(&head, struct nfs_page, wb_list);
374 nfs_list_remove_request(req);
375 if (__nfs_pageio_add_request(desc, req))
376 continue;
377 if (desc->pg_error < 0)
378 return 0;
379 break;
380 }
381 } while (desc->pg_recoalesce);
382 return 1;
383 }
384
385 int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
386 struct nfs_page *req)
387 {
388 int ret;
389
390 do {
391 ret = __nfs_pageio_add_request(desc, req);
392 if (ret)
393 break;
394 if (desc->pg_error < 0)
395 break;
396 ret = nfs_do_recoalesce(desc);
397 } while (ret);
398 return ret;
399 }
400
401 /**
402 * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor
403 * @desc: pointer to io descriptor
404 */
405 void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
406 {
407 for (;;) {
408 nfs_pageio_doio(desc);
409 if (!desc->pg_recoalesce)
410 break;
411 if (!nfs_do_recoalesce(desc))
412 break;
413 }
414 }
415
416 /**
417 * nfs_pageio_cond_complete - Conditional I/O completion
418 * @desc: pointer to io descriptor
419 * @index: page index
420 *
421 * It is important to ensure that processes don't try to take locks
422 * on non-contiguous ranges of pages as that might deadlock. This
423 * function should be called before attempting to wait on a locked
424 * nfs_page. It will complete the I/O if the page index 'index'
425 * is not contiguous with the existing list of pages in 'desc'.
426 */
427 void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
428 {
429 if (!list_empty(&desc->pg_list)) {
430 struct nfs_page *prev = nfs_list_entry(desc->pg_list.prev);
431 if (index != prev->wb_index + 1)
432 nfs_pageio_complete(desc);
433 }
434 }
435
436 int __init nfs_init_nfspagecache(void)
437 {
438 nfs_page_cachep = kmem_cache_create("nfs_page",
439 sizeof(struct nfs_page),
440 0, SLAB_HWCACHE_ALIGN,
441 NULL);
442 if (nfs_page_cachep == NULL)
443 return -ENOMEM;
444
445 return 0;
446 }
447
448 void nfs_destroy_nfspagecache(void)
449 {
450 kmem_cache_destroy(nfs_page_cachep);
451 }
452