]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/fs/nfs/pagelist.c | |
3 | * | |
4 | * A set of helper functions for managing NFS read and write requests. | |
5 | * The main purpose of these routines is to provide support for the | |
6 | * coalescing of several requests into a single RPC call. | |
7 | * | |
8 | * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no> | |
9 | * | |
10 | */ | |
11 | ||
1da177e4 LT |
12 | #include <linux/slab.h> |
13 | #include <linux/file.h> | |
e8edc6e0 | 14 | #include <linux/sched.h> |
1da177e4 | 15 | #include <linux/sunrpc/clnt.h> |
1313e603 | 16 | #include <linux/nfs.h> |
1da177e4 LT |
17 | #include <linux/nfs3.h> |
18 | #include <linux/nfs4.h> | |
19 | #include <linux/nfs_page.h> | |
20 | #include <linux/nfs_fs.h> | |
21 | #include <linux/nfs_mount.h> | |
afeacc8c | 22 | #include <linux/export.h> |
1da177e4 | 23 | |
8d5658c9 | 24 | #include "internal.h" |
bae724ef | 25 | #include "pnfs.h" |
8d5658c9 | 26 | |
0eecb214 AS |
27 | #define NFSDBG_FACILITY NFSDBG_PAGECACHE |
28 | ||
e18b890b | 29 | static struct kmem_cache *nfs_page_cachep; |
ef2c488c | 30 | static const struct rpc_call_ops nfs_pgio_common_ops; |
1da177e4 | 31 | |
2bfc6e56 WAA |
32 | static void nfs_free_request(struct nfs_page *); |
33 | ||
00bfa30a | 34 | static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount) |
30dd374f FI |
35 | { |
36 | p->npages = pagecount; | |
37 | if (pagecount <= ARRAY_SIZE(p->page_array)) | |
38 | p->pagevec = p->page_array; | |
39 | else { | |
40 | p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL); | |
41 | if (!p->pagevec) | |
42 | p->npages = 0; | |
43 | } | |
44 | return p->pagevec != NULL; | |
45 | } | |
46 | ||
4db6e0b7 FI |
47 | void nfs_pgheader_init(struct nfs_pageio_descriptor *desc, |
48 | struct nfs_pgio_header *hdr, | |
49 | void (*release)(struct nfs_pgio_header *hdr)) | |
50 | { | |
51 | hdr->req = nfs_list_entry(desc->pg_list.next); | |
52 | hdr->inode = desc->pg_inode; | |
53 | hdr->cred = hdr->req->wb_context->cred; | |
54 | hdr->io_start = req_offset(hdr->req); | |
55 | hdr->good_bytes = desc->pg_count; | |
584aa810 | 56 | hdr->dreq = desc->pg_dreq; |
f6166384 | 57 | hdr->layout_private = desc->pg_layout_private; |
4db6e0b7 | 58 | hdr->release = release; |
061ae2ed | 59 | hdr->completion_ops = desc->pg_completion_ops; |
584aa810 FI |
60 | if (hdr->completion_ops->init_hdr) |
61 | hdr->completion_ops->init_hdr(hdr); | |
4db6e0b7 | 62 | } |
89d77c8f | 63 | EXPORT_SYMBOL_GPL(nfs_pgheader_init); |
4db6e0b7 FI |
64 | |
65 | void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos) | |
66 | { | |
67 | spin_lock(&hdr->lock); | |
68 | if (pos < hdr->io_start + hdr->good_bytes) { | |
69 | set_bit(NFS_IOHDR_ERROR, &hdr->flags); | |
70 | clear_bit(NFS_IOHDR_EOF, &hdr->flags); | |
71 | hdr->good_bytes = pos - hdr->io_start; | |
72 | hdr->error = error; | |
73 | } | |
74 | spin_unlock(&hdr->lock); | |
75 | } | |
76 | ||
1da177e4 LT |
77 | static inline struct nfs_page * |
78 | nfs_page_alloc(void) | |
79 | { | |
192e501b | 80 | struct nfs_page *p = kmem_cache_zalloc(nfs_page_cachep, GFP_NOIO); |
72895b1a | 81 | if (p) |
1da177e4 | 82 | INIT_LIST_HEAD(&p->wb_list); |
1da177e4 LT |
83 | return p; |
84 | } | |
85 | ||
86 | static inline void | |
87 | nfs_page_free(struct nfs_page *p) | |
88 | { | |
89 | kmem_cache_free(nfs_page_cachep, p); | |
90 | } | |
91 | ||
577b4232 TM |
92 | static void |
93 | nfs_iocounter_inc(struct nfs_io_counter *c) | |
94 | { | |
95 | atomic_inc(&c->io_count); | |
96 | } | |
97 | ||
98 | static void | |
99 | nfs_iocounter_dec(struct nfs_io_counter *c) | |
100 | { | |
101 | if (atomic_dec_and_test(&c->io_count)) { | |
102 | clear_bit(NFS_IO_INPROGRESS, &c->flags); | |
103 | smp_mb__after_clear_bit(); | |
104 | wake_up_bit(&c->flags, NFS_IO_INPROGRESS); | |
105 | } | |
106 | } | |
107 | ||
108 | static int | |
109 | __nfs_iocounter_wait(struct nfs_io_counter *c) | |
110 | { | |
111 | wait_queue_head_t *wq = bit_waitqueue(&c->flags, NFS_IO_INPROGRESS); | |
112 | DEFINE_WAIT_BIT(q, &c->flags, NFS_IO_INPROGRESS); | |
113 | int ret = 0; | |
114 | ||
115 | do { | |
116 | prepare_to_wait(wq, &q.wait, TASK_KILLABLE); | |
117 | set_bit(NFS_IO_INPROGRESS, &c->flags); | |
118 | if (atomic_read(&c->io_count) == 0) | |
119 | break; | |
120 | ret = nfs_wait_bit_killable(&c->flags); | |
121 | } while (atomic_read(&c->io_count) != 0); | |
122 | finish_wait(wq, &q.wait); | |
123 | return ret; | |
124 | } | |
125 | ||
126 | /** | |
127 | * nfs_iocounter_wait - wait for i/o to complete | |
128 | * @c: nfs_io_counter to use | |
129 | * | |
130 | * returns -ERESTARTSYS if interrupted by a fatal signal. | |
131 | * Otherwise returns 0 once the io_count hits 0. | |
132 | */ | |
133 | int | |
134 | nfs_iocounter_wait(struct nfs_io_counter *c) | |
135 | { | |
136 | if (atomic_read(&c->io_count) == 0) | |
137 | return 0; | |
138 | return __nfs_iocounter_wait(c); | |
139 | } | |
140 | ||
2bfc6e56 WAA |
141 | /* |
142 | * nfs_page_group_lock - lock the head of the page group | |
143 | * @req - request in group that is to be locked | |
144 | * | |
145 | * this lock must be held if modifying the page group list | |
146 | */ | |
147 | void | |
148 | nfs_page_group_lock(struct nfs_page *req) | |
149 | { | |
150 | struct nfs_page *head = req->wb_head; | |
151 | int err = -EAGAIN; | |
152 | ||
153 | WARN_ON_ONCE(head != head->wb_head); | |
154 | ||
155 | while (err) | |
156 | err = wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK, | |
157 | nfs_wait_bit_killable, TASK_KILLABLE); | |
158 | } | |
159 | ||
160 | /* | |
161 | * nfs_page_group_unlock - unlock the head of the page group | |
162 | * @req - request in group that is to be unlocked | |
163 | */ | |
164 | void | |
165 | nfs_page_group_unlock(struct nfs_page *req) | |
166 | { | |
167 | struct nfs_page *head = req->wb_head; | |
168 | ||
169 | WARN_ON_ONCE(head != head->wb_head); | |
170 | ||
171 | smp_mb__before_clear_bit(); | |
172 | clear_bit(PG_HEADLOCK, &head->wb_flags); | |
173 | smp_mb__after_clear_bit(); | |
174 | wake_up_bit(&head->wb_flags, PG_HEADLOCK); | |
175 | } | |
176 | ||
177 | /* | |
178 | * nfs_page_group_sync_on_bit_locked | |
179 | * | |
180 | * must be called with page group lock held | |
181 | */ | |
182 | static bool | |
183 | nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit) | |
184 | { | |
185 | struct nfs_page *head = req->wb_head; | |
186 | struct nfs_page *tmp; | |
187 | ||
188 | WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &head->wb_flags)); | |
189 | WARN_ON_ONCE(test_and_set_bit(bit, &req->wb_flags)); | |
190 | ||
191 | tmp = req->wb_this_page; | |
192 | while (tmp != req) { | |
193 | if (!test_bit(bit, &tmp->wb_flags)) | |
194 | return false; | |
195 | tmp = tmp->wb_this_page; | |
196 | } | |
197 | ||
198 | /* true! reset all bits */ | |
199 | tmp = req; | |
200 | do { | |
201 | clear_bit(bit, &tmp->wb_flags); | |
202 | tmp = tmp->wb_this_page; | |
203 | } while (tmp != req); | |
204 | ||
205 | return true; | |
206 | } | |
207 | ||
208 | /* | |
209 | * nfs_page_group_sync_on_bit - set bit on current request, but only | |
210 | * return true if the bit is set for all requests in page group | |
211 | * @req - request in page group | |
212 | * @bit - PG_* bit that is used to sync page group | |
213 | */ | |
214 | bool nfs_page_group_sync_on_bit(struct nfs_page *req, unsigned int bit) | |
215 | { | |
216 | bool ret; | |
217 | ||
218 | nfs_page_group_lock(req); | |
219 | ret = nfs_page_group_sync_on_bit_locked(req, bit); | |
220 | nfs_page_group_unlock(req); | |
221 | ||
222 | return ret; | |
223 | } | |
224 | ||
225 | /* | |
226 | * nfs_page_group_init - Initialize the page group linkage for @req | |
227 | * @req - a new nfs request | |
228 | * @prev - the previous request in page group, or NULL if @req is the first | |
229 | * or only request in the group (the head). | |
230 | */ | |
231 | static inline void | |
232 | nfs_page_group_init(struct nfs_page *req, struct nfs_page *prev) | |
233 | { | |
234 | WARN_ON_ONCE(prev == req); | |
235 | ||
236 | if (!prev) { | |
237 | req->wb_head = req; | |
238 | req->wb_this_page = req; | |
239 | } else { | |
240 | WARN_ON_ONCE(prev->wb_this_page != prev->wb_head); | |
241 | WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &prev->wb_head->wb_flags)); | |
242 | req->wb_head = prev->wb_head; | |
243 | req->wb_this_page = prev->wb_this_page; | |
244 | prev->wb_this_page = req; | |
245 | ||
246 | /* grab extra ref if head request has extra ref from | |
247 | * the write/commit path to handle handoff between write | |
248 | * and commit lists */ | |
249 | if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags)) | |
250 | kref_get(&req->wb_kref); | |
251 | } | |
252 | } | |
253 | ||
254 | /* | |
255 | * nfs_page_group_destroy - sync the destruction of page groups | |
256 | * @req - request that no longer needs the page group | |
257 | * | |
258 | * releases the page group reference from each member once all | |
259 | * members have called this function. | |
260 | */ | |
261 | static void | |
262 | nfs_page_group_destroy(struct kref *kref) | |
263 | { | |
264 | struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref); | |
265 | struct nfs_page *tmp, *next; | |
266 | ||
267 | if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN)) | |
268 | return; | |
269 | ||
270 | tmp = req; | |
271 | do { | |
272 | next = tmp->wb_this_page; | |
273 | /* unlink and free */ | |
274 | tmp->wb_this_page = tmp; | |
275 | tmp->wb_head = tmp; | |
276 | nfs_free_request(tmp); | |
277 | tmp = next; | |
278 | } while (tmp != req); | |
279 | } | |
280 | ||
1da177e4 LT |
281 | /** |
282 | * nfs_create_request - Create an NFS read/write request. | |
c02f557d | 283 | * @ctx: open context to use |
1da177e4 | 284 | * @page: page to write |
2bfc6e56 | 285 | * @last: last nfs request created for this page group or NULL if head |
1da177e4 LT |
286 | * @offset: starting offset within the page for the write |
287 | * @count: number of bytes to read/write | |
288 | * | |
289 | * The page must be locked by the caller. This makes sure we never | |
a19b89ca | 290 | * create two different requests for the same page. |
1da177e4 LT |
291 | * User should ensure it is safe to sleep in this function. |
292 | */ | |
293 | struct nfs_page * | |
8c8f1ac1 | 294 | nfs_create_request(struct nfs_open_context *ctx, struct page *page, |
2bfc6e56 WAA |
295 | struct nfs_page *last, unsigned int offset, |
296 | unsigned int count) | |
1da177e4 | 297 | { |
1da177e4 | 298 | struct nfs_page *req; |
b3c54de6 | 299 | struct nfs_lock_context *l_ctx; |
1da177e4 | 300 | |
c58c8441 TM |
301 | if (test_bit(NFS_CONTEXT_BAD, &ctx->flags)) |
302 | return ERR_PTR(-EBADF); | |
18eb8842 TM |
303 | /* try to allocate the request struct */ |
304 | req = nfs_page_alloc(); | |
305 | if (req == NULL) | |
306 | return ERR_PTR(-ENOMEM); | |
1da177e4 | 307 | |
015f0212 | 308 | /* get lock context early so we can deal with alloc failures */ |
b3c54de6 TM |
309 | l_ctx = nfs_get_lock_context(ctx); |
310 | if (IS_ERR(l_ctx)) { | |
015f0212 | 311 | nfs_page_free(req); |
b3c54de6 | 312 | return ERR_CAST(l_ctx); |
015f0212 | 313 | } |
b3c54de6 | 314 | req->wb_lock_context = l_ctx; |
577b4232 | 315 | nfs_iocounter_inc(&l_ctx->io_count); |
015f0212 | 316 | |
1da177e4 LT |
317 | /* Initialize the request struct. Initially, we assume a |
318 | * long write-back delay. This will be adjusted in | |
319 | * update_nfs_request below if the region is not locked. */ | |
320 | req->wb_page = page; | |
d56b4ddf | 321 | req->wb_index = page_file_index(page); |
1da177e4 LT |
322 | page_cache_get(page); |
323 | req->wb_offset = offset; | |
324 | req->wb_pgbase = offset; | |
325 | req->wb_bytes = count; | |
1da177e4 | 326 | req->wb_context = get_nfs_open_context(ctx); |
c03b4024 | 327 | kref_init(&req->wb_kref); |
2bfc6e56 | 328 | nfs_page_group_init(req, last); |
1da177e4 LT |
329 | return req; |
330 | } | |
331 | ||
332 | /** | |
1d1afcbc | 333 | * nfs_unlock_request - Unlock request and wake up sleepers. |
1da177e4 LT |
334 | * @req: |
335 | */ | |
1d1afcbc | 336 | void nfs_unlock_request(struct nfs_page *req) |
1da177e4 LT |
337 | { |
338 | if (!NFS_WBACK_BUSY(req)) { | |
339 | printk(KERN_ERR "NFS: Invalid unlock attempted\n"); | |
340 | BUG(); | |
341 | } | |
342 | smp_mb__before_clear_bit(); | |
343 | clear_bit(PG_BUSY, &req->wb_flags); | |
344 | smp_mb__after_clear_bit(); | |
464a98bd | 345 | wake_up_bit(&req->wb_flags, PG_BUSY); |
3aff4ebb TM |
346 | } |
347 | ||
348 | /** | |
1d1afcbc TM |
349 | * nfs_unlock_and_release_request - Unlock request and release the nfs_page |
350 | * @req: | |
3aff4ebb | 351 | */ |
1d1afcbc | 352 | void nfs_unlock_and_release_request(struct nfs_page *req) |
3aff4ebb | 353 | { |
1d1afcbc | 354 | nfs_unlock_request(req); |
1da177e4 LT |
355 | nfs_release_request(req); |
356 | } | |
357 | ||
4d65c520 | 358 | /* |
1da177e4 LT |
359 | * nfs_clear_request - Free up all resources allocated to the request |
360 | * @req: | |
361 | * | |
bb6fbc45 TM |
362 | * Release page and open context resources associated with a read/write |
363 | * request after it has completed. | |
1da177e4 | 364 | */ |
4d65c520 | 365 | static void nfs_clear_request(struct nfs_page *req) |
1da177e4 | 366 | { |
cd52ed35 | 367 | struct page *page = req->wb_page; |
bb6fbc45 | 368 | struct nfs_open_context *ctx = req->wb_context; |
f11ac8db | 369 | struct nfs_lock_context *l_ctx = req->wb_lock_context; |
bb6fbc45 | 370 | |
cd52ed35 | 371 | if (page != NULL) { |
cd52ed35 | 372 | page_cache_release(page); |
1da177e4 LT |
373 | req->wb_page = NULL; |
374 | } | |
f11ac8db | 375 | if (l_ctx != NULL) { |
577b4232 | 376 | nfs_iocounter_dec(&l_ctx->io_count); |
f11ac8db TM |
377 | nfs_put_lock_context(l_ctx); |
378 | req->wb_lock_context = NULL; | |
379 | } | |
bb6fbc45 TM |
380 | if (ctx != NULL) { |
381 | put_nfs_open_context(ctx); | |
382 | req->wb_context = NULL; | |
383 | } | |
1da177e4 LT |
384 | } |
385 | ||
1da177e4 LT |
386 | /** |
387 | * nfs_release_request - Release the count on an NFS read/write request | |
388 | * @req: request to release | |
389 | * | |
390 | * Note: Should never be called with the spinlock held! | |
391 | */ | |
2bfc6e56 | 392 | static void nfs_free_request(struct nfs_page *req) |
1da177e4 | 393 | { |
2bfc6e56 WAA |
394 | WARN_ON_ONCE(req->wb_this_page != req); |
395 | ||
396 | /* extra debug: make sure no sync bits are still set */ | |
397 | WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags)); | |
67d0338e WAA |
398 | WARN_ON_ONCE(test_bit(PG_UNLOCKPAGE, &req->wb_flags)); |
399 | WARN_ON_ONCE(test_bit(PG_UPTODATE, &req->wb_flags)); | |
20633f04 WAA |
400 | WARN_ON_ONCE(test_bit(PG_WB_END, &req->wb_flags)); |
401 | WARN_ON_ONCE(test_bit(PG_REMOVE, &req->wb_flags)); | |
1da177e4 | 402 | |
bb6fbc45 | 403 | /* Release struct file and open context */ |
1da177e4 | 404 | nfs_clear_request(req); |
1da177e4 LT |
405 | nfs_page_free(req); |
406 | } | |
407 | ||
c03b4024 TM |
408 | void nfs_release_request(struct nfs_page *req) |
409 | { | |
2bfc6e56 | 410 | kref_put(&req->wb_kref, nfs_page_group_destroy); |
c03b4024 TM |
411 | } |
412 | ||
9f557cd8 TM |
413 | static int nfs_wait_bit_uninterruptible(void *word) |
414 | { | |
415 | io_schedule(); | |
416 | return 0; | |
417 | } | |
418 | ||
1da177e4 LT |
419 | /** |
420 | * nfs_wait_on_request - Wait for a request to complete. | |
421 | * @req: request to wait upon. | |
422 | * | |
150030b7 | 423 | * Interruptible by fatal signals only. |
1da177e4 LT |
424 | * The user is responsible for holding a count on the request. |
425 | */ | |
426 | int | |
427 | nfs_wait_on_request(struct nfs_page *req) | |
428 | { | |
9f557cd8 TM |
429 | return wait_on_bit(&req->wb_flags, PG_BUSY, |
430 | nfs_wait_bit_uninterruptible, | |
431 | TASK_UNINTERRUPTIBLE); | |
1da177e4 LT |
432 | } |
433 | ||
b4fdac1a WAA |
434 | /* |
435 | * nfs_generic_pg_test - determine if requests can be coalesced | |
436 | * @desc: pointer to descriptor | |
437 | * @prev: previous request in desc, or NULL | |
438 | * @req: this request | |
439 | * | |
440 | * Returns zero if @req can be coalesced into @desc, otherwise it returns | |
441 | * the size of the request. | |
442 | */ | |
443 | size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, | |
444 | struct nfs_page *prev, struct nfs_page *req) | |
5b36c7dc | 445 | { |
f0cb9ab8 WAA |
446 | if (desc->pg_count > desc->pg_bsize) { |
447 | /* should never happen */ | |
448 | WARN_ON_ONCE(1); | |
5b36c7dc | 449 | return 0; |
f0cb9ab8 | 450 | } |
5b36c7dc | 451 | |
f0cb9ab8 | 452 | return min(desc->pg_bsize - desc->pg_count, (size_t)req->wb_bytes); |
5b36c7dc | 453 | } |
19345cb2 | 454 | EXPORT_SYMBOL_GPL(nfs_generic_pg_test); |
5b36c7dc | 455 | |
00bfa30a AS |
456 | static inline struct nfs_rw_header *NFS_RW_HEADER(struct nfs_pgio_header *hdr) |
457 | { | |
458 | return container_of(hdr, struct nfs_rw_header, header); | |
459 | } | |
460 | ||
4a0de55c AS |
461 | /** |
462 | * nfs_rw_header_alloc - Allocate a header for a read or write | |
463 | * @ops: Read or write function vector | |
464 | */ | |
465 | struct nfs_rw_header *nfs_rw_header_alloc(const struct nfs_rw_ops *ops) | |
466 | { | |
467 | struct nfs_rw_header *header = ops->rw_alloc_header(); | |
468 | ||
469 | if (header) { | |
470 | struct nfs_pgio_header *hdr = &header->header; | |
471 | ||
472 | INIT_LIST_HEAD(&hdr->pages); | |
473 | INIT_LIST_HEAD(&hdr->rpc_list); | |
474 | spin_lock_init(&hdr->lock); | |
475 | atomic_set(&hdr->refcnt, 0); | |
476 | hdr->rw_ops = ops; | |
477 | } | |
478 | return header; | |
479 | } | |
480 | EXPORT_SYMBOL_GPL(nfs_rw_header_alloc); | |
481 | ||
482 | /* | |
483 | * nfs_rw_header_free - Free a read or write header | |
484 | * @hdr: The header to free | |
485 | */ | |
486 | void nfs_rw_header_free(struct nfs_pgio_header *hdr) | |
487 | { | |
488 | hdr->rw_ops->rw_free_header(NFS_RW_HEADER(hdr)); | |
489 | } | |
490 | EXPORT_SYMBOL_GPL(nfs_rw_header_free); | |
491 | ||
00bfa30a AS |
492 | /** |
493 | * nfs_pgio_data_alloc - Allocate pageio data | |
494 | * @hdr: The header making a request | |
495 | * @pagecount: Number of pages to create | |
496 | */ | |
ef2c488c AS |
497 | static struct nfs_pgio_data *nfs_pgio_data_alloc(struct nfs_pgio_header *hdr, |
498 | unsigned int pagecount) | |
00bfa30a AS |
499 | { |
500 | struct nfs_pgio_data *data, *prealloc; | |
501 | ||
502 | prealloc = &NFS_RW_HEADER(hdr)->rpc_data; | |
503 | if (prealloc->header == NULL) | |
504 | data = prealloc; | |
505 | else | |
506 | data = kzalloc(sizeof(*data), GFP_KERNEL); | |
507 | if (!data) | |
508 | goto out; | |
509 | ||
510 | if (nfs_pgarray_set(&data->pages, pagecount)) { | |
511 | data->header = hdr; | |
512 | atomic_inc(&hdr->refcnt); | |
513 | } else { | |
514 | if (data != prealloc) | |
515 | kfree(data); | |
516 | data = NULL; | |
517 | } | |
518 | out: | |
519 | return data; | |
520 | } | |
521 | ||
522 | /** | |
523 | * nfs_pgio_data_release - Properly free pageio data | |
524 | * @data: The data to release | |
525 | */ | |
526 | void nfs_pgio_data_release(struct nfs_pgio_data *data) | |
527 | { | |
528 | struct nfs_pgio_header *hdr = data->header; | |
529 | struct nfs_rw_header *pageio_header = NFS_RW_HEADER(hdr); | |
530 | ||
531 | put_nfs_open_context(data->args.context); | |
532 | if (data->pages.pagevec != data->pages.page_array) | |
533 | kfree(data->pages.pagevec); | |
534 | if (data == &pageio_header->rpc_data) { | |
535 | data->header = NULL; | |
536 | data = NULL; | |
537 | } | |
538 | if (atomic_dec_and_test(&hdr->refcnt)) | |
539 | hdr->completion_ops->completion(hdr); | |
540 | /* Note: we only free the rpc_task after callbacks are done. | |
541 | * See the comment in rpc_free_task() for why | |
542 | */ | |
543 | kfree(data); | |
544 | } | |
545 | EXPORT_SYMBOL_GPL(nfs_pgio_data_release); | |
546 | ||
ce59515c AS |
547 | /** |
548 | * nfs_pgio_rpcsetup - Set up arguments for a pageio call | |
549 | * @data: The pageio data | |
550 | * @count: Number of bytes to read | |
551 | * @offset: Initial offset | |
552 | * @how: How to commit data (writes only) | |
553 | * @cinfo: Commit information for the call (writes only) | |
554 | */ | |
ef2c488c | 555 | static void nfs_pgio_rpcsetup(struct nfs_pgio_data *data, |
ce59515c AS |
556 | unsigned int count, unsigned int offset, |
557 | int how, struct nfs_commit_info *cinfo) | |
558 | { | |
559 | struct nfs_page *req = data->header->req; | |
560 | ||
561 | /* Set up the RPC argument and reply structs | |
562 | * NB: take care not to mess about with data->commit et al. */ | |
563 | ||
564 | data->args.fh = NFS_FH(data->header->inode); | |
565 | data->args.offset = req_offset(req) + offset; | |
566 | /* pnfs_set_layoutcommit needs this */ | |
567 | data->mds_offset = data->args.offset; | |
568 | data->args.pgbase = req->wb_pgbase + offset; | |
569 | data->args.pages = data->pages.pagevec; | |
570 | data->args.count = count; | |
571 | data->args.context = get_nfs_open_context(req->wb_context); | |
572 | data->args.lock_context = req->wb_lock_context; | |
573 | data->args.stable = NFS_UNSTABLE; | |
574 | switch (how & (FLUSH_STABLE | FLUSH_COND_STABLE)) { | |
575 | case 0: | |
576 | break; | |
577 | case FLUSH_COND_STABLE: | |
578 | if (nfs_reqs_to_commit(cinfo)) | |
579 | break; | |
580 | default: | |
581 | data->args.stable = NFS_FILE_SYNC; | |
582 | } | |
583 | ||
584 | data->res.fattr = &data->fattr; | |
585 | data->res.count = count; | |
586 | data->res.eof = 0; | |
587 | data->res.verf = &data->verf; | |
588 | nfs_fattr_init(&data->fattr); | |
589 | } | |
590 | ||
a4cdda59 AS |
591 | /** |
592 | * nfs_pgio_prepare - Prepare pageio data to go over the wire | |
593 | * @task: The current task | |
594 | * @calldata: pageio data to prepare | |
595 | */ | |
6f92fa45 | 596 | static void nfs_pgio_prepare(struct rpc_task *task, void *calldata) |
a4cdda59 AS |
597 | { |
598 | struct nfs_pgio_data *data = calldata; | |
599 | int err; | |
600 | err = NFS_PROTO(data->header->inode)->pgio_rpc_prepare(task, data); | |
601 | if (err) | |
602 | rpc_exit(task, err); | |
603 | } | |
604 | ||
1ed26f33 AS |
605 | int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_data *data, |
606 | const struct rpc_call_ops *call_ops, int how, int flags) | |
607 | { | |
608 | struct rpc_task *task; | |
609 | struct rpc_message msg = { | |
610 | .rpc_argp = &data->args, | |
611 | .rpc_resp = &data->res, | |
612 | .rpc_cred = data->header->cred, | |
613 | }; | |
614 | struct rpc_task_setup task_setup_data = { | |
615 | .rpc_client = clnt, | |
616 | .task = &data->task, | |
617 | .rpc_message = &msg, | |
618 | .callback_ops = call_ops, | |
619 | .callback_data = data, | |
620 | .workqueue = nfsiod_workqueue, | |
621 | .flags = RPC_TASK_ASYNC | flags, | |
622 | }; | |
623 | int ret = 0; | |
624 | ||
625 | data->header->rw_ops->rw_initiate(data, &msg, &task_setup_data, how); | |
626 | ||
627 | dprintk("NFS: %5u initiated pgio call " | |
628 | "(req %s/%llu, %u bytes @ offset %llu)\n", | |
629 | data->task.tk_pid, | |
630 | data->header->inode->i_sb->s_id, | |
631 | (unsigned long long)NFS_FILEID(data->header->inode), | |
632 | data->args.count, | |
633 | (unsigned long long)data->args.offset); | |
634 | ||
635 | task = rpc_run_task(&task_setup_data); | |
636 | if (IS_ERR(task)) { | |
637 | ret = PTR_ERR(task); | |
638 | goto out; | |
639 | } | |
640 | if (how & FLUSH_SYNC) { | |
641 | ret = rpc_wait_for_completion_task(task); | |
642 | if (ret == 0) | |
643 | ret = task->tk_status; | |
644 | } | |
645 | rpc_put_task(task); | |
646 | out: | |
647 | return ret; | |
648 | } | |
649 | EXPORT_SYMBOL_GPL(nfs_initiate_pgio); | |
650 | ||
cf485fcd | 651 | static int nfs_do_multiple_pgios(struct list_head *head, |
c3766276 AS |
652 | const struct rpc_call_ops *call_ops, |
653 | int how) | |
654 | { | |
655 | struct nfs_pgio_data *data; | |
656 | int ret = 0; | |
657 | ||
658 | while (!list_empty(head)) { | |
659 | int ret2; | |
660 | ||
661 | data = list_first_entry(head, struct nfs_pgio_data, list); | |
662 | list_del_init(&data->list); | |
663 | ||
664 | ret2 = nfs_initiate_pgio(NFS_CLIENT(data->header->inode), | |
665 | data, call_ops, how, 0); | |
666 | if (ret == 0) | |
667 | ret = ret2; | |
668 | } | |
669 | return ret; | |
670 | } | |
671 | ||
844c9e69 AS |
672 | /** |
673 | * nfs_pgio_error - Clean up from a pageio error | |
674 | * @desc: IO descriptor | |
675 | * @hdr: pageio header | |
676 | */ | |
ef2c488c | 677 | static int nfs_pgio_error(struct nfs_pageio_descriptor *desc, |
844c9e69 AS |
678 | struct nfs_pgio_header *hdr) |
679 | { | |
680 | struct nfs_pgio_data *data; | |
681 | ||
682 | set_bit(NFS_IOHDR_REDO, &hdr->flags); | |
683 | while (!list_empty(&hdr->rpc_list)) { | |
684 | data = list_first_entry(&hdr->rpc_list, struct nfs_pgio_data, list); | |
685 | list_del(&data->list); | |
686 | nfs_pgio_data_release(data); | |
687 | } | |
688 | desc->pg_completion_ops->error_cleanup(&desc->pg_list); | |
689 | return -ENOMEM; | |
690 | } | |
691 | ||
a4cdda59 AS |
692 | /** |
693 | * nfs_pgio_release - Release pageio data | |
694 | * @calldata: The pageio data to release | |
695 | */ | |
6f92fa45 | 696 | static void nfs_pgio_release(void *calldata) |
a4cdda59 AS |
697 | { |
698 | struct nfs_pgio_data *data = calldata; | |
699 | if (data->header->rw_ops->rw_release) | |
700 | data->header->rw_ops->rw_release(data); | |
701 | nfs_pgio_data_release(data); | |
702 | } | |
703 | ||
1da177e4 | 704 | /** |
d8a5ad75 TM |
705 | * nfs_pageio_init - initialise a page io descriptor |
706 | * @desc: pointer to descriptor | |
bcb71bba TM |
707 | * @inode: pointer to inode |
708 | * @doio: pointer to io function | |
709 | * @bsize: io block size | |
710 | * @io_flags: extra parameters for the io function | |
d8a5ad75 | 711 | */ |
bcb71bba TM |
712 | void nfs_pageio_init(struct nfs_pageio_descriptor *desc, |
713 | struct inode *inode, | |
1751c363 | 714 | const struct nfs_pageio_ops *pg_ops, |
061ae2ed | 715 | const struct nfs_pgio_completion_ops *compl_ops, |
4a0de55c | 716 | const struct nfs_rw_ops *rw_ops, |
84dde76c | 717 | size_t bsize, |
bcb71bba | 718 | int io_flags) |
d8a5ad75 TM |
719 | { |
720 | INIT_LIST_HEAD(&desc->pg_list); | |
bcb71bba | 721 | desc->pg_bytes_written = 0; |
d8a5ad75 TM |
722 | desc->pg_count = 0; |
723 | desc->pg_bsize = bsize; | |
724 | desc->pg_base = 0; | |
b31268ac | 725 | desc->pg_moreio = 0; |
d9156f9f | 726 | desc->pg_recoalesce = 0; |
bcb71bba | 727 | desc->pg_inode = inode; |
1751c363 | 728 | desc->pg_ops = pg_ops; |
061ae2ed | 729 | desc->pg_completion_ops = compl_ops; |
4a0de55c | 730 | desc->pg_rw_ops = rw_ops; |
bcb71bba TM |
731 | desc->pg_ioflags = io_flags; |
732 | desc->pg_error = 0; | |
94ad1c80 | 733 | desc->pg_lseg = NULL; |
584aa810 | 734 | desc->pg_dreq = NULL; |
f6166384 | 735 | desc->pg_layout_private = NULL; |
d8a5ad75 | 736 | } |
89d77c8f | 737 | EXPORT_SYMBOL_GPL(nfs_pageio_init); |
d8a5ad75 | 738 | |
0eecb214 AS |
739 | /** |
740 | * nfs_pgio_result - Basic pageio error handling | |
741 | * @task: The task that ran | |
742 | * @calldata: Pageio data to check | |
743 | */ | |
6f92fa45 | 744 | static void nfs_pgio_result(struct rpc_task *task, void *calldata) |
0eecb214 AS |
745 | { |
746 | struct nfs_pgio_data *data = calldata; | |
747 | struct inode *inode = data->header->inode; | |
748 | ||
749 | dprintk("NFS: %s: %5u, (status %d)\n", __func__, | |
750 | task->tk_pid, task->tk_status); | |
751 | ||
752 | if (data->header->rw_ops->rw_done(task, data, inode) != 0) | |
753 | return; | |
754 | if (task->tk_status < 0) | |
755 | nfs_set_pgio_error(data->header, task->tk_status, data->args.offset); | |
756 | else | |
757 | data->header->rw_ops->rw_result(task, data); | |
758 | } | |
759 | ||
ef2c488c AS |
760 | /* |
761 | * Create an RPC task for the given read or write request and kick it. | |
762 | * The page must have been locked by the caller. | |
763 | * | |
764 | * It may happen that the page we're passed is not marked dirty. | |
765 | * This is the case if nfs_updatepage detects a conflicting request | |
766 | * that has been written but not committed. | |
767 | */ | |
f0cb9ab8 WAA |
768 | int nfs_generic_pgio(struct nfs_pageio_descriptor *desc, |
769 | struct nfs_pgio_header *hdr) | |
ef2c488c AS |
770 | { |
771 | struct nfs_page *req; | |
772 | struct page **pages; | |
773 | struct nfs_pgio_data *data; | |
774 | struct list_head *head = &desc->pg_list; | |
775 | struct nfs_commit_info cinfo; | |
776 | ||
777 | data = nfs_pgio_data_alloc(hdr, nfs_page_array_len(desc->pg_base, | |
778 | desc->pg_count)); | |
779 | if (!data) | |
780 | return nfs_pgio_error(desc, hdr); | |
781 | ||
782 | nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq); | |
783 | pages = data->pages.pagevec; | |
784 | while (!list_empty(head)) { | |
785 | req = nfs_list_entry(head->next); | |
786 | nfs_list_remove_request(req); | |
787 | nfs_list_add_request(req, &hdr->pages); | |
788 | *pages++ = req->wb_page; | |
789 | } | |
790 | ||
791 | if ((desc->pg_ioflags & FLUSH_COND_STABLE) && | |
792 | (desc->pg_moreio || nfs_reqs_to_commit(&cinfo))) | |
793 | desc->pg_ioflags &= ~FLUSH_COND_STABLE; | |
794 | ||
795 | /* Set up the argument struct */ | |
796 | nfs_pgio_rpcsetup(data, desc->pg_count, 0, desc->pg_ioflags, &cinfo); | |
797 | list_add(&data->list, &hdr->rpc_list); | |
798 | desc->pg_rpc_callops = &nfs_pgio_common_ops; | |
799 | return 0; | |
800 | } | |
f0cb9ab8 | 801 | EXPORT_SYMBOL_GPL(nfs_generic_pgio); |
ef2c488c | 802 | |
41d8d5b7 | 803 | static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc) |
cf485fcd AS |
804 | { |
805 | struct nfs_rw_header *rw_hdr; | |
806 | struct nfs_pgio_header *hdr; | |
807 | int ret; | |
808 | ||
809 | rw_hdr = nfs_rw_header_alloc(desc->pg_rw_ops); | |
810 | if (!rw_hdr) { | |
811 | desc->pg_completion_ops->error_cleanup(&desc->pg_list); | |
812 | return -ENOMEM; | |
813 | } | |
814 | hdr = &rw_hdr->header; | |
815 | nfs_pgheader_init(desc, hdr, nfs_rw_header_free); | |
816 | atomic_inc(&hdr->refcnt); | |
817 | ret = nfs_generic_pgio(desc, hdr); | |
818 | if (ret == 0) | |
819 | ret = nfs_do_multiple_pgios(&hdr->rpc_list, | |
820 | desc->pg_rpc_callops, | |
821 | desc->pg_ioflags); | |
822 | if (atomic_dec_and_test(&hdr->refcnt)) | |
823 | hdr->completion_ops->completion(hdr); | |
824 | return ret; | |
825 | } | |
826 | ||
4109bb74 TM |
827 | static bool nfs_match_open_context(const struct nfs_open_context *ctx1, |
828 | const struct nfs_open_context *ctx2) | |
829 | { | |
830 | return ctx1->cred == ctx2->cred && ctx1->state == ctx2->state; | |
831 | } | |
832 | ||
833 | static bool nfs_match_lock_context(const struct nfs_lock_context *l1, | |
834 | const struct nfs_lock_context *l2) | |
835 | { | |
836 | return l1->lockowner.l_owner == l2->lockowner.l_owner | |
837 | && l1->lockowner.l_pid == l2->lockowner.l_pid; | |
838 | } | |
839 | ||
d8a5ad75 TM |
840 | /** |
841 | * nfs_can_coalesce_requests - test two requests for compatibility | |
842 | * @prev: pointer to nfs_page | |
843 | * @req: pointer to nfs_page | |
844 | * | |
845 | * The nfs_page structures 'prev' and 'req' are compared to ensure that the | |
846 | * page data area they describe is contiguous, and that their RPC | |
847 | * credentials, NFSv4 open state, and lockowners are the same. | |
848 | * | |
849 | * Return 'true' if this is the case, else return 'false'. | |
850 | */ | |
18ad0a9f BH |
851 | static bool nfs_can_coalesce_requests(struct nfs_page *prev, |
852 | struct nfs_page *req, | |
853 | struct nfs_pageio_descriptor *pgio) | |
d8a5ad75 | 854 | { |
b4fdac1a WAA |
855 | size_t size; |
856 | ||
ab75e417 WAA |
857 | if (prev) { |
858 | if (!nfs_match_open_context(req->wb_context, prev->wb_context)) | |
859 | return false; | |
860 | if (req->wb_context->dentry->d_inode->i_flock != NULL && | |
861 | !nfs_match_lock_context(req->wb_lock_context, | |
862 | prev->wb_lock_context)) | |
863 | return false; | |
ab75e417 WAA |
864 | if (req_offset(req) != req_offset(prev) + prev->wb_bytes) |
865 | return false; | |
866 | } | |
b4fdac1a | 867 | size = pgio->pg_ops->pg_test(pgio, prev, req); |
f0cb9ab8 WAA |
868 | WARN_ON_ONCE(size > req->wb_bytes); |
869 | if (size && size < req->wb_bytes) | |
870 | req->wb_bytes = size; | |
b4fdac1a | 871 | return size > 0; |
d8a5ad75 TM |
872 | } |
873 | ||
874 | /** | |
bcb71bba | 875 | * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list. |
d8a5ad75 TM |
876 | * @desc: destination io descriptor |
877 | * @req: request | |
878 | * | |
879 | * Returns true if the request 'req' was successfully coalesced into the | |
880 | * existing list of pages 'desc'. | |
881 | */ | |
bcb71bba TM |
882 | static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc, |
883 | struct nfs_page *req) | |
d8a5ad75 | 884 | { |
ab75e417 | 885 | struct nfs_page *prev = NULL; |
d8a5ad75 | 886 | if (desc->pg_count != 0) { |
d8a5ad75 | 887 | prev = nfs_list_entry(desc->pg_list.prev); |
5b36c7dc | 888 | } else { |
d8007d4d TM |
889 | if (desc->pg_ops->pg_init) |
890 | desc->pg_ops->pg_init(desc, req); | |
d8a5ad75 | 891 | desc->pg_base = req->wb_pgbase; |
5b36c7dc | 892 | } |
ab75e417 WAA |
893 | if (!nfs_can_coalesce_requests(prev, req, desc)) |
894 | return 0; | |
d8a5ad75 TM |
895 | nfs_list_remove_request(req); |
896 | nfs_list_add_request(req, &desc->pg_list); | |
5b36c7dc | 897 | desc->pg_count += req->wb_bytes; |
d8a5ad75 TM |
898 | return 1; |
899 | } | |
900 | ||
bcb71bba TM |
901 | /* |
902 | * Helper for nfs_pageio_add_request and nfs_pageio_complete | |
903 | */ | |
904 | static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc) | |
905 | { | |
906 | if (!list_empty(&desc->pg_list)) { | |
1751c363 | 907 | int error = desc->pg_ops->pg_doio(desc); |
bcb71bba TM |
908 | if (error < 0) |
909 | desc->pg_error = error; | |
910 | else | |
911 | desc->pg_bytes_written += desc->pg_count; | |
912 | } | |
913 | if (list_empty(&desc->pg_list)) { | |
914 | desc->pg_count = 0; | |
915 | desc->pg_base = 0; | |
916 | } | |
917 | } | |
918 | ||
919 | /** | |
920 | * nfs_pageio_add_request - Attempt to coalesce a request into a page list. | |
921 | * @desc: destination io descriptor | |
922 | * @req: request | |
923 | * | |
2bfc6e56 WAA |
924 | * This may split a request into subrequests which are all part of the |
925 | * same page group. | |
926 | * | |
bcb71bba TM |
927 | * Returns true if the request 'req' was successfully coalesced into the |
928 | * existing list of pages 'desc'. | |
929 | */ | |
d9156f9f | 930 | static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, |
8b09bee3 | 931 | struct nfs_page *req) |
bcb71bba | 932 | { |
2bfc6e56 WAA |
933 | struct nfs_page *subreq; |
934 | unsigned int bytes_left = 0; | |
935 | unsigned int offset, pgbase; | |
936 | ||
937 | nfs_page_group_lock(req); | |
938 | ||
939 | subreq = req; | |
940 | bytes_left = subreq->wb_bytes; | |
941 | offset = subreq->wb_offset; | |
942 | pgbase = subreq->wb_pgbase; | |
943 | ||
944 | do { | |
945 | if (!nfs_pageio_do_add_request(desc, subreq)) { | |
946 | /* make sure pg_test call(s) did nothing */ | |
947 | WARN_ON_ONCE(subreq->wb_bytes != bytes_left); | |
948 | WARN_ON_ONCE(subreq->wb_offset != offset); | |
949 | WARN_ON_ONCE(subreq->wb_pgbase != pgbase); | |
950 | ||
951 | nfs_page_group_unlock(req); | |
952 | desc->pg_moreio = 1; | |
953 | nfs_pageio_doio(desc); | |
954 | if (desc->pg_error < 0) | |
955 | return 0; | |
956 | desc->pg_moreio = 0; | |
957 | if (desc->pg_recoalesce) | |
958 | return 0; | |
959 | /* retry add_request for this subreq */ | |
960 | nfs_page_group_lock(req); | |
961 | continue; | |
962 | } | |
963 | ||
964 | /* check for buggy pg_test call(s) */ | |
965 | WARN_ON_ONCE(subreq->wb_bytes + subreq->wb_pgbase > PAGE_SIZE); | |
966 | WARN_ON_ONCE(subreq->wb_bytes > bytes_left); | |
967 | WARN_ON_ONCE(subreq->wb_bytes == 0); | |
968 | ||
969 | bytes_left -= subreq->wb_bytes; | |
970 | offset += subreq->wb_bytes; | |
971 | pgbase += subreq->wb_bytes; | |
972 | ||
973 | if (bytes_left) { | |
974 | subreq = nfs_create_request(req->wb_context, | |
975 | req->wb_page, | |
976 | subreq, pgbase, bytes_left); | |
977 | nfs_lock_request(subreq); | |
978 | subreq->wb_offset = offset; | |
979 | subreq->wb_index = req->wb_index; | |
980 | } | |
981 | } while (bytes_left > 0); | |
982 | ||
983 | nfs_page_group_unlock(req); | |
bcb71bba TM |
984 | return 1; |
985 | } | |
986 | ||
d9156f9f TM |
987 | static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc) |
988 | { | |
989 | LIST_HEAD(head); | |
990 | ||
991 | do { | |
992 | list_splice_init(&desc->pg_list, &head); | |
993 | desc->pg_bytes_written -= desc->pg_count; | |
994 | desc->pg_count = 0; | |
995 | desc->pg_base = 0; | |
996 | desc->pg_recoalesce = 0; | |
997 | ||
998 | while (!list_empty(&head)) { | |
999 | struct nfs_page *req; | |
1000 | ||
1001 | req = list_first_entry(&head, struct nfs_page, wb_list); | |
1002 | nfs_list_remove_request(req); | |
1003 | if (__nfs_pageio_add_request(desc, req)) | |
1004 | continue; | |
1005 | if (desc->pg_error < 0) | |
1006 | return 0; | |
1007 | break; | |
1008 | } | |
1009 | } while (desc->pg_recoalesce); | |
1010 | return 1; | |
1011 | } | |
1012 | ||
1013 | int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, | |
1014 | struct nfs_page *req) | |
1015 | { | |
1016 | int ret; | |
1017 | ||
1018 | do { | |
1019 | ret = __nfs_pageio_add_request(desc, req); | |
1020 | if (ret) | |
1021 | break; | |
1022 | if (desc->pg_error < 0) | |
1023 | break; | |
1024 | ret = nfs_do_recoalesce(desc); | |
1025 | } while (ret); | |
1026 | return ret; | |
1027 | } | |
89d77c8f | 1028 | EXPORT_SYMBOL_GPL(nfs_pageio_add_request); |
d9156f9f | 1029 | |
bcb71bba TM |
1030 | /** |
1031 | * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor | |
1032 | * @desc: pointer to io descriptor | |
1033 | */ | |
1034 | void nfs_pageio_complete(struct nfs_pageio_descriptor *desc) | |
1035 | { | |
d9156f9f TM |
1036 | for (;;) { |
1037 | nfs_pageio_doio(desc); | |
1038 | if (!desc->pg_recoalesce) | |
1039 | break; | |
1040 | if (!nfs_do_recoalesce(desc)) | |
1041 | break; | |
1042 | } | |
bcb71bba | 1043 | } |
89d77c8f | 1044 | EXPORT_SYMBOL_GPL(nfs_pageio_complete); |
bcb71bba | 1045 | |
7fe7f848 TM |
1046 | /** |
1047 | * nfs_pageio_cond_complete - Conditional I/O completion | |
1048 | * @desc: pointer to io descriptor | |
1049 | * @index: page index | |
1050 | * | |
1051 | * It is important to ensure that processes don't try to take locks | |
1052 | * on non-contiguous ranges of pages as that might deadlock. This | |
1053 | * function should be called before attempting to wait on a locked | |
1054 | * nfs_page. It will complete the I/O if the page index 'index' | |
1055 | * is not contiguous with the existing list of pages in 'desc'. | |
1056 | */ | |
1057 | void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index) | |
1058 | { | |
1059 | if (!list_empty(&desc->pg_list)) { | |
1060 | struct nfs_page *prev = nfs_list_entry(desc->pg_list.prev); | |
1061 | if (index != prev->wb_index + 1) | |
d9156f9f | 1062 | nfs_pageio_complete(desc); |
7fe7f848 TM |
1063 | } |
1064 | } | |
1065 | ||
f7b422b1 | 1066 | int __init nfs_init_nfspagecache(void) |
1da177e4 LT |
1067 | { |
1068 | nfs_page_cachep = kmem_cache_create("nfs_page", | |
1069 | sizeof(struct nfs_page), | |
1070 | 0, SLAB_HWCACHE_ALIGN, | |
20c2df83 | 1071 | NULL); |
1da177e4 LT |
1072 | if (nfs_page_cachep == NULL) |
1073 | return -ENOMEM; | |
1074 | ||
1075 | return 0; | |
1076 | } | |
1077 | ||
266bee88 | 1078 | void nfs_destroy_nfspagecache(void) |
1da177e4 | 1079 | { |
1a1d92c1 | 1080 | kmem_cache_destroy(nfs_page_cachep); |
1da177e4 LT |
1081 | } |
1082 | ||
ef2c488c | 1083 | static const struct rpc_call_ops nfs_pgio_common_ops = { |
6f92fa45 AS |
1084 | .rpc_call_prepare = nfs_pgio_prepare, |
1085 | .rpc_call_done = nfs_pgio_result, | |
1086 | .rpc_release = nfs_pgio_release, | |
1087 | }; | |
41d8d5b7 AS |
1088 | |
1089 | const struct nfs_pageio_ops nfs_pgio_rw_ops = { | |
1090 | .pg_test = nfs_generic_pg_test, | |
1091 | .pg_doio = nfs_generic_pg_pgios, | |
1092 | }; |