]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/fs/nfs/pagelist.c | |
3 | * | |
4 | * A set of helper functions for managing NFS read and write requests. | |
5 | * The main purpose of these routines is to provide support for the | |
6 | * coalescing of several requests into a single RPC call. | |
7 | * | |
8 | * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no> | |
9 | * | |
10 | */ | |
11 | ||
1da177e4 LT |
12 | #include <linux/slab.h> |
13 | #include <linux/file.h> | |
14 | #include <linux/sunrpc/clnt.h> | |
15 | #include <linux/nfs3.h> | |
16 | #include <linux/nfs4.h> | |
17 | #include <linux/nfs_page.h> | |
18 | #include <linux/nfs_fs.h> | |
19 | #include <linux/nfs_mount.h> | |
20 | ||
8d5658c9 TM |
21 | #include "internal.h" |
22 | ||
1da177e4 LT |
23 | #define NFS_PARANOIA 1 |
24 | ||
e18b890b | 25 | static struct kmem_cache *nfs_page_cachep; |
1da177e4 LT |
26 | |
27 | static inline struct nfs_page * | |
28 | nfs_page_alloc(void) | |
29 | { | |
30 | struct nfs_page *p; | |
e94b1766 | 31 | p = kmem_cache_alloc(nfs_page_cachep, GFP_KERNEL); |
1da177e4 LT |
32 | if (p) { |
33 | memset(p, 0, sizeof(*p)); | |
34 | INIT_LIST_HEAD(&p->wb_list); | |
35 | } | |
36 | return p; | |
37 | } | |
38 | ||
39 | static inline void | |
40 | nfs_page_free(struct nfs_page *p) | |
41 | { | |
42 | kmem_cache_free(nfs_page_cachep, p); | |
43 | } | |
44 | ||
45 | /** | |
46 | * nfs_create_request - Create an NFS read/write request. | |
47 | * @file: file descriptor to use | |
48 | * @inode: inode to which the request is attached | |
49 | * @page: page to write | |
50 | * @offset: starting offset within the page for the write | |
51 | * @count: number of bytes to read/write | |
52 | * | |
53 | * The page must be locked by the caller. This makes sure we never | |
54 | * create two different requests for the same page, and avoids | |
55 | * a possible deadlock when we reach the hard limit on the number | |
56 | * of dirty pages. | |
57 | * User should ensure it is safe to sleep in this function. | |
58 | */ | |
59 | struct nfs_page * | |
60 | nfs_create_request(struct nfs_open_context *ctx, struct inode *inode, | |
61 | struct page *page, | |
62 | unsigned int offset, unsigned int count) | |
63 | { | |
64 | struct nfs_server *server = NFS_SERVER(inode); | |
65 | struct nfs_page *req; | |
66 | ||
67 | /* Deal with hard limits. */ | |
68 | for (;;) { | |
69 | /* try to allocate the request struct */ | |
70 | req = nfs_page_alloc(); | |
71 | if (req != NULL) | |
72 | break; | |
73 | ||
74 | /* Try to free up at least one request in order to stay | |
75 | * below the hard limit | |
76 | */ | |
77 | if (signalled() && (server->flags & NFS_MOUNT_INTR)) | |
78 | return ERR_PTR(-ERESTARTSYS); | |
79 | yield(); | |
80 | } | |
81 | ||
82 | /* Initialize the request struct. Initially, we assume a | |
83 | * long write-back delay. This will be adjusted in | |
84 | * update_nfs_request below if the region is not locked. */ | |
85 | req->wb_page = page; | |
86 | atomic_set(&req->wb_complete, 0); | |
87 | req->wb_index = page->index; | |
88 | page_cache_get(page); | |
cd52ed35 TM |
89 | BUG_ON(PagePrivate(page)); |
90 | BUG_ON(!PageLocked(page)); | |
91 | BUG_ON(page->mapping->host != inode); | |
1da177e4 LT |
92 | req->wb_offset = offset; |
93 | req->wb_pgbase = offset; | |
94 | req->wb_bytes = count; | |
95 | atomic_set(&req->wb_count, 1); | |
96 | req->wb_context = get_nfs_open_context(ctx); | |
97 | ||
98 | return req; | |
99 | } | |
100 | ||
101 | /** | |
102 | * nfs_unlock_request - Unlock request and wake up sleepers. | |
103 | * @req: | |
104 | */ | |
105 | void nfs_unlock_request(struct nfs_page *req) | |
106 | { | |
107 | if (!NFS_WBACK_BUSY(req)) { | |
108 | printk(KERN_ERR "NFS: Invalid unlock attempted\n"); | |
109 | BUG(); | |
110 | } | |
111 | smp_mb__before_clear_bit(); | |
112 | clear_bit(PG_BUSY, &req->wb_flags); | |
113 | smp_mb__after_clear_bit(); | |
464a98bd | 114 | wake_up_bit(&req->wb_flags, PG_BUSY); |
1da177e4 LT |
115 | nfs_release_request(req); |
116 | } | |
117 | ||
c6a556b8 TM |
118 | /** |
119 | * nfs_set_page_writeback_locked - Lock a request for writeback | |
120 | * @req: | |
121 | */ | |
122 | int nfs_set_page_writeback_locked(struct nfs_page *req) | |
123 | { | |
124 | struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode); | |
125 | ||
126 | if (!nfs_lock_request(req)) | |
127 | return 0; | |
128 | radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_WRITEBACK); | |
129 | return 1; | |
130 | } | |
131 | ||
132 | /** | |
133 | * nfs_clear_page_writeback - Unlock request and wake up sleepers | |
134 | */ | |
135 | void nfs_clear_page_writeback(struct nfs_page *req) | |
136 | { | |
137 | struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode); | |
138 | ||
deb7d638 TM |
139 | if (req->wb_page != NULL) { |
140 | spin_lock(&nfsi->req_lock); | |
141 | radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_WRITEBACK); | |
142 | spin_unlock(&nfsi->req_lock); | |
143 | } | |
c6a556b8 TM |
144 | nfs_unlock_request(req); |
145 | } | |
146 | ||
1da177e4 LT |
147 | /** |
148 | * nfs_clear_request - Free up all resources allocated to the request | |
149 | * @req: | |
150 | * | |
151 | * Release page resources associated with a write request after it | |
152 | * has completed. | |
153 | */ | |
154 | void nfs_clear_request(struct nfs_page *req) | |
155 | { | |
cd52ed35 TM |
156 | struct page *page = req->wb_page; |
157 | if (page != NULL) { | |
cd52ed35 | 158 | page_cache_release(page); |
1da177e4 LT |
159 | req->wb_page = NULL; |
160 | } | |
161 | } | |
162 | ||
163 | ||
164 | /** | |
165 | * nfs_release_request - Release the count on an NFS read/write request | |
166 | * @req: request to release | |
167 | * | |
168 | * Note: Should never be called with the spinlock held! | |
169 | */ | |
170 | void | |
171 | nfs_release_request(struct nfs_page *req) | |
172 | { | |
173 | if (!atomic_dec_and_test(&req->wb_count)) | |
174 | return; | |
175 | ||
176 | #ifdef NFS_PARANOIA | |
177 | BUG_ON (!list_empty(&req->wb_list)); | |
178 | BUG_ON (NFS_WBACK_BUSY(req)); | |
179 | #endif | |
180 | ||
181 | /* Release struct file or cached credential */ | |
182 | nfs_clear_request(req); | |
183 | put_nfs_open_context(req->wb_context); | |
184 | nfs_page_free(req); | |
185 | } | |
186 | ||
464a98bd TM |
187 | static int nfs_wait_bit_interruptible(void *word) |
188 | { | |
189 | int ret = 0; | |
190 | ||
191 | if (signal_pending(current)) | |
192 | ret = -ERESTARTSYS; | |
193 | else | |
194 | schedule(); | |
195 | return ret; | |
196 | } | |
197 | ||
1da177e4 LT |
198 | /** |
199 | * nfs_wait_on_request - Wait for a request to complete. | |
200 | * @req: request to wait upon. | |
201 | * | |
202 | * Interruptible by signals only if mounted with intr flag. | |
203 | * The user is responsible for holding a count on the request. | |
204 | */ | |
205 | int | |
206 | nfs_wait_on_request(struct nfs_page *req) | |
207 | { | |
464a98bd TM |
208 | struct rpc_clnt *clnt = NFS_CLIENT(req->wb_context->dentry->d_inode); |
209 | sigset_t oldmask; | |
210 | int ret = 0; | |
211 | ||
212 | if (!test_bit(PG_BUSY, &req->wb_flags)) | |
213 | goto out; | |
214 | /* | |
215 | * Note: the call to rpc_clnt_sigmask() suffices to ensure that we | |
216 | * are not interrupted if intr flag is not set | |
217 | */ | |
218 | rpc_clnt_sigmask(clnt, &oldmask); | |
219 | ret = out_of_line_wait_on_bit(&req->wb_flags, PG_BUSY, | |
220 | nfs_wait_bit_interruptible, TASK_INTERRUPTIBLE); | |
221 | rpc_clnt_sigunmask(clnt, &oldmask); | |
222 | out: | |
223 | return ret; | |
1da177e4 LT |
224 | } |
225 | ||
226 | /** | |
d8a5ad75 TM |
227 | * nfs_pageio_init - initialise a page io descriptor |
228 | * @desc: pointer to descriptor | |
bcb71bba TM |
229 | * @inode: pointer to inode |
230 | * @doio: pointer to io function | |
231 | * @bsize: io block size | |
232 | * @io_flags: extra parameters for the io function | |
d8a5ad75 | 233 | */ |
bcb71bba TM |
234 | void nfs_pageio_init(struct nfs_pageio_descriptor *desc, |
235 | struct inode *inode, | |
8d5658c9 | 236 | int (*doio)(struct inode *, struct list_head *, unsigned int, size_t, int), |
bcb71bba TM |
237 | unsigned int bsize, |
238 | int io_flags) | |
d8a5ad75 TM |
239 | { |
240 | INIT_LIST_HEAD(&desc->pg_list); | |
bcb71bba | 241 | desc->pg_bytes_written = 0; |
d8a5ad75 TM |
242 | desc->pg_count = 0; |
243 | desc->pg_bsize = bsize; | |
244 | desc->pg_base = 0; | |
bcb71bba TM |
245 | desc->pg_inode = inode; |
246 | desc->pg_doio = doio; | |
247 | desc->pg_ioflags = io_flags; | |
248 | desc->pg_error = 0; | |
d8a5ad75 TM |
249 | } |
250 | ||
251 | /** | |
252 | * nfs_can_coalesce_requests - test two requests for compatibility | |
253 | * @prev: pointer to nfs_page | |
254 | * @req: pointer to nfs_page | |
255 | * | |
256 | * The nfs_page structures 'prev' and 'req' are compared to ensure that the | |
257 | * page data area they describe is contiguous, and that their RPC | |
258 | * credentials, NFSv4 open state, and lockowners are the same. | |
259 | * | |
260 | * Return 'true' if this is the case, else return 'false'. | |
261 | */ | |
262 | static int nfs_can_coalesce_requests(struct nfs_page *prev, | |
263 | struct nfs_page *req) | |
264 | { | |
265 | if (req->wb_context->cred != prev->wb_context->cred) | |
266 | return 0; | |
267 | if (req->wb_context->lockowner != prev->wb_context->lockowner) | |
268 | return 0; | |
269 | if (req->wb_context->state != prev->wb_context->state) | |
270 | return 0; | |
271 | if (req->wb_index != (prev->wb_index + 1)) | |
272 | return 0; | |
273 | if (req->wb_pgbase != 0) | |
274 | return 0; | |
275 | if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE) | |
276 | return 0; | |
277 | return 1; | |
278 | } | |
279 | ||
280 | /** | |
bcb71bba | 281 | * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list. |
d8a5ad75 TM |
282 | * @desc: destination io descriptor |
283 | * @req: request | |
284 | * | |
285 | * Returns true if the request 'req' was successfully coalesced into the | |
286 | * existing list of pages 'desc'. | |
287 | */ | |
bcb71bba TM |
288 | static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc, |
289 | struct nfs_page *req) | |
d8a5ad75 TM |
290 | { |
291 | size_t newlen = req->wb_bytes; | |
292 | ||
293 | if (desc->pg_count != 0) { | |
294 | struct nfs_page *prev; | |
295 | ||
296 | /* | |
297 | * FIXME: ideally we should be able to coalesce all requests | |
298 | * that are not block boundary aligned, but currently this | |
299 | * is problematic for the case of bsize < PAGE_CACHE_SIZE, | |
300 | * since nfs_flush_multi and nfs_pagein_multi assume you | |
301 | * can have only one struct nfs_page. | |
302 | */ | |
8d5658c9 TM |
303 | if (desc->pg_bsize < PAGE_SIZE) |
304 | return 0; | |
d8a5ad75 | 305 | newlen += desc->pg_count; |
8d5658c9 | 306 | if (newlen > desc->pg_bsize) |
d8a5ad75 TM |
307 | return 0; |
308 | prev = nfs_list_entry(desc->pg_list.prev); | |
309 | if (!nfs_can_coalesce_requests(prev, req)) | |
310 | return 0; | |
311 | } else | |
312 | desc->pg_base = req->wb_pgbase; | |
313 | nfs_list_remove_request(req); | |
314 | nfs_list_add_request(req, &desc->pg_list); | |
315 | desc->pg_count = newlen; | |
316 | return 1; | |
317 | } | |
318 | ||
bcb71bba TM |
319 | /* |
320 | * Helper for nfs_pageio_add_request and nfs_pageio_complete | |
321 | */ | |
322 | static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc) | |
323 | { | |
324 | if (!list_empty(&desc->pg_list)) { | |
325 | int error = desc->pg_doio(desc->pg_inode, | |
326 | &desc->pg_list, | |
8d5658c9 TM |
327 | nfs_page_array_len(desc->pg_base, |
328 | desc->pg_count), | |
bcb71bba TM |
329 | desc->pg_count, |
330 | desc->pg_ioflags); | |
331 | if (error < 0) | |
332 | desc->pg_error = error; | |
333 | else | |
334 | desc->pg_bytes_written += desc->pg_count; | |
335 | } | |
336 | if (list_empty(&desc->pg_list)) { | |
337 | desc->pg_count = 0; | |
338 | desc->pg_base = 0; | |
339 | } | |
340 | } | |
341 | ||
342 | /** | |
343 | * nfs_pageio_add_request - Attempt to coalesce a request into a page list. | |
344 | * @desc: destination io descriptor | |
345 | * @req: request | |
346 | * | |
347 | * Returns true if the request 'req' was successfully coalesced into the | |
348 | * existing list of pages 'desc'. | |
349 | */ | |
8b09bee3 TM |
350 | int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, |
351 | struct nfs_page *req) | |
bcb71bba TM |
352 | { |
353 | while (!nfs_pageio_do_add_request(desc, req)) { | |
354 | nfs_pageio_doio(desc); | |
355 | if (desc->pg_error < 0) | |
356 | return 0; | |
357 | } | |
358 | return 1; | |
359 | } | |
360 | ||
bcb71bba TM |
361 | /** |
362 | * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor | |
363 | * @desc: pointer to io descriptor | |
364 | */ | |
365 | void nfs_pageio_complete(struct nfs_pageio_descriptor *desc) | |
366 | { | |
367 | nfs_pageio_doio(desc); | |
368 | } | |
369 | ||
3da28eb1 | 370 | #define NFS_SCAN_MAXENTRIES 16 |
1da177e4 LT |
371 | /** |
372 | * nfs_scan_list - Scan a list for matching requests | |
d2ccddf0 | 373 | * @nfsi: NFS inode |
1da177e4 LT |
374 | * @head: One of the NFS inode request lists |
375 | * @dst: Destination list | |
376 | * @idx_start: lower bound of page->index to scan | |
377 | * @npages: idx_start + npages sets the upper bound to scan. | |
378 | * | |
379 | * Moves elements from one of the inode request lists. | |
380 | * If the number of requests is set to 0, the entire address_space | |
381 | * starting at index idx_start, is scanned. | |
382 | * The requests are *not* checked to ensure that they form a contiguous set. | |
383 | * You must be holding the inode's req_lock when calling this function | |
384 | */ | |
d2ccddf0 | 385 | int nfs_scan_list(struct nfs_inode *nfsi, struct list_head *head, |
ca52fec1 | 386 | struct list_head *dst, pgoff_t idx_start, |
d2ccddf0 | 387 | unsigned int npages) |
1da177e4 | 388 | { |
d2ccddf0 TM |
389 | struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES]; |
390 | struct nfs_page *req; | |
ca52fec1 | 391 | pgoff_t idx_end; |
d2ccddf0 TM |
392 | int found, i; |
393 | int res; | |
1da177e4 LT |
394 | |
395 | res = 0; | |
396 | if (npages == 0) | |
397 | idx_end = ~0; | |
398 | else | |
399 | idx_end = idx_start + npages - 1; | |
400 | ||
d2ccddf0 TM |
401 | for (;;) { |
402 | found = radix_tree_gang_lookup(&nfsi->nfs_page_tree, | |
403 | (void **)&pgvec[0], idx_start, | |
404 | NFS_SCAN_MAXENTRIES); | |
405 | if (found <= 0) | |
1da177e4 | 406 | break; |
d2ccddf0 TM |
407 | for (i = 0; i < found; i++) { |
408 | req = pgvec[i]; | |
409 | if (req->wb_index > idx_end) | |
410 | goto out; | |
411 | idx_start = req->wb_index + 1; | |
412 | if (req->wb_list_head != head) | |
413 | continue; | |
414 | if (nfs_set_page_writeback_locked(req)) { | |
415 | nfs_list_remove_request(req); | |
416 | nfs_list_add_request(req, dst); | |
417 | res++; | |
418 | } | |
419 | } | |
1da177e4 | 420 | |
1da177e4 | 421 | } |
d2ccddf0 | 422 | out: |
1da177e4 LT |
423 | return res; |
424 | } | |
425 | ||
f7b422b1 | 426 | int __init nfs_init_nfspagecache(void) |
1da177e4 LT |
427 | { |
428 | nfs_page_cachep = kmem_cache_create("nfs_page", | |
429 | sizeof(struct nfs_page), | |
430 | 0, SLAB_HWCACHE_ALIGN, | |
431 | NULL, NULL); | |
432 | if (nfs_page_cachep == NULL) | |
433 | return -ENOMEM; | |
434 | ||
435 | return 0; | |
436 | } | |
437 | ||
266bee88 | 438 | void nfs_destroy_nfspagecache(void) |
1da177e4 | 439 | { |
1a1d92c1 | 440 | kmem_cache_destroy(nfs_page_cachep); |
1da177e4 LT |
441 | } |
442 |