]>
Commit | Line | Data |
---|---|---|
b5108822 DH |
1 | /* Cache page management and data I/O routines |
2 | * | |
3 | * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved. | |
4 | * Written by David Howells (dhowells@redhat.com) | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | */ | |
11 | ||
12 | #define FSCACHE_DEBUG_LEVEL PAGE | |
13 | #include <linux/module.h> | |
14 | #include <linux/fscache-cache.h> | |
15 | #include <linux/buffer_head.h> | |
16 | #include <linux/pagevec.h> | |
5a0e3ad6 | 17 | #include <linux/slab.h> |
b5108822 DH |
18 | #include "internal.h" |
19 | ||
20 | /* | |
21 | * check to see if a page is being written to the cache | |
22 | */ | |
23 | bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page) | |
24 | { | |
25 | void *val; | |
26 | ||
27 | rcu_read_lock(); | |
28 | val = radix_tree_lookup(&cookie->stores, page->index); | |
29 | rcu_read_unlock(); | |
30 | ||
31 | return val != NULL; | |
32 | } | |
33 | EXPORT_SYMBOL(__fscache_check_page_write); | |
34 | ||
35 | /* | |
36 | * wait for a page to finish being written to the cache | |
37 | */ | |
38 | void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page) | |
39 | { | |
40 | wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0); | |
41 | ||
42 | wait_event(*wq, !__fscache_check_page_write(cookie, page)); | |
43 | } | |
44 | EXPORT_SYMBOL(__fscache_wait_on_page_write); | |
45 | ||
201a1542 DH |
46 | /* |
47 | * decide whether a page can be released, possibly by cancelling a store to it | |
48 | * - we're allowed to sleep if __GFP_WAIT is flagged | |
49 | */ | |
50 | bool __fscache_maybe_release_page(struct fscache_cookie *cookie, | |
51 | struct page *page, | |
52 | gfp_t gfp) | |
53 | { | |
54 | struct page *xpage; | |
55 | void *val; | |
56 | ||
57 | _enter("%p,%p,%x", cookie, page, gfp); | |
58 | ||
59 | rcu_read_lock(); | |
60 | val = radix_tree_lookup(&cookie->stores, page->index); | |
61 | if (!val) { | |
62 | rcu_read_unlock(); | |
63 | fscache_stat(&fscache_n_store_vmscan_not_storing); | |
64 | __fscache_uncache_page(cookie, page); | |
65 | return true; | |
66 | } | |
67 | ||
68 | /* see if the page is actually undergoing storage - if so we can't get | |
69 | * rid of it till the cache has finished with it */ | |
70 | if (radix_tree_tag_get(&cookie->stores, page->index, | |
71 | FSCACHE_COOKIE_STORING_TAG)) { | |
72 | rcu_read_unlock(); | |
73 | goto page_busy; | |
74 | } | |
75 | ||
76 | /* the page is pending storage, so we attempt to cancel the store and | |
77 | * discard the store request so that the page can be reclaimed */ | |
78 | spin_lock(&cookie->stores_lock); | |
79 | rcu_read_unlock(); | |
80 | ||
81 | if (radix_tree_tag_get(&cookie->stores, page->index, | |
82 | FSCACHE_COOKIE_STORING_TAG)) { | |
83 | /* the page started to undergo storage whilst we were looking, | |
84 | * so now we can only wait or return */ | |
85 | spin_unlock(&cookie->stores_lock); | |
86 | goto page_busy; | |
87 | } | |
88 | ||
89 | xpage = radix_tree_delete(&cookie->stores, page->index); | |
90 | spin_unlock(&cookie->stores_lock); | |
91 | ||
92 | if (xpage) { | |
93 | fscache_stat(&fscache_n_store_vmscan_cancelled); | |
94 | fscache_stat(&fscache_n_store_radix_deletes); | |
95 | ASSERTCMP(xpage, ==, page); | |
96 | } else { | |
97 | fscache_stat(&fscache_n_store_vmscan_gone); | |
98 | } | |
99 | ||
100 | wake_up_bit(&cookie->flags, 0); | |
101 | if (xpage) | |
102 | page_cache_release(xpage); | |
103 | __fscache_uncache_page(cookie, page); | |
104 | return true; | |
105 | ||
106 | page_busy: | |
107 | /* we might want to wait here, but that could deadlock the allocator as | |
108 | * the slow-work threads writing to the cache may all end up sleeping | |
109 | * on memory allocation */ | |
110 | fscache_stat(&fscache_n_store_vmscan_busy); | |
111 | return false; | |
112 | } | |
113 | EXPORT_SYMBOL(__fscache_maybe_release_page); | |
114 | ||
b5108822 DH |
115 | /* |
116 | * note that a page has finished being written to the cache | |
117 | */ | |
1bccf513 DH |
118 | static void fscache_end_page_write(struct fscache_object *object, |
119 | struct page *page) | |
b5108822 | 120 | { |
1bccf513 DH |
121 | struct fscache_cookie *cookie; |
122 | struct page *xpage = NULL; | |
b5108822 | 123 | |
1bccf513 DH |
124 | spin_lock(&object->lock); |
125 | cookie = object->cookie; | |
126 | if (cookie) { | |
127 | /* delete the page from the tree if it is now no longer | |
128 | * pending */ | |
129 | spin_lock(&cookie->stores_lock); | |
201a1542 DH |
130 | radix_tree_tag_clear(&cookie->stores, page->index, |
131 | FSCACHE_COOKIE_STORING_TAG); | |
285e728b DH |
132 | if (!radix_tree_tag_get(&cookie->stores, page->index, |
133 | FSCACHE_COOKIE_PENDING_TAG)) { | |
134 | fscache_stat(&fscache_n_store_radix_deletes); | |
135 | xpage = radix_tree_delete(&cookie->stores, page->index); | |
136 | } | |
1bccf513 DH |
137 | spin_unlock(&cookie->stores_lock); |
138 | wake_up_bit(&cookie->flags, 0); | |
139 | } | |
140 | spin_unlock(&object->lock); | |
141 | if (xpage) | |
142 | page_cache_release(xpage); | |
b5108822 DH |
143 | } |
144 | ||
145 | /* | |
146 | * actually apply the changed attributes to a cache object | |
147 | */ | |
148 | static void fscache_attr_changed_op(struct fscache_operation *op) | |
149 | { | |
150 | struct fscache_object *object = op->object; | |
440f0aff | 151 | int ret; |
b5108822 DH |
152 | |
153 | _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id); | |
154 | ||
155 | fscache_stat(&fscache_n_attr_changed_calls); | |
156 | ||
440f0aff DH |
157 | if (fscache_object_is_active(object)) { |
158 | fscache_set_op_state(op, "CallFS"); | |
52bd75fd | 159 | fscache_stat(&fscache_n_cop_attr_changed); |
440f0aff | 160 | ret = object->cache->ops->attr_changed(object); |
52bd75fd | 161 | fscache_stat_d(&fscache_n_cop_attr_changed); |
440f0aff DH |
162 | fscache_set_op_state(op, "Done"); |
163 | if (ret < 0) | |
164 | fscache_abort_object(object); | |
165 | } | |
b5108822 DH |
166 | |
167 | _leave(""); | |
168 | } | |
169 | ||
170 | /* | |
171 | * notification that the attributes on an object have changed | |
172 | */ | |
173 | int __fscache_attr_changed(struct fscache_cookie *cookie) | |
174 | { | |
175 | struct fscache_operation *op; | |
176 | struct fscache_object *object; | |
177 | ||
178 | _enter("%p", cookie); | |
179 | ||
180 | ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); | |
181 | ||
182 | fscache_stat(&fscache_n_attr_changed); | |
183 | ||
184 | op = kzalloc(sizeof(*op), GFP_KERNEL); | |
185 | if (!op) { | |
186 | fscache_stat(&fscache_n_attr_changed_nomem); | |
187 | _leave(" = -ENOMEM"); | |
188 | return -ENOMEM; | |
189 | } | |
190 | ||
191 | fscache_operation_init(op, NULL); | |
192 | fscache_operation_init_slow(op, fscache_attr_changed_op); | |
193 | op->flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_EXCLUSIVE); | |
440f0aff | 194 | fscache_set_op_name(op, "Attr"); |
b5108822 DH |
195 | |
196 | spin_lock(&cookie->lock); | |
197 | ||
198 | if (hlist_empty(&cookie->backing_objects)) | |
199 | goto nobufs; | |
200 | object = hlist_entry(cookie->backing_objects.first, | |
201 | struct fscache_object, cookie_link); | |
202 | ||
203 | if (fscache_submit_exclusive_op(object, op) < 0) | |
204 | goto nobufs; | |
205 | spin_unlock(&cookie->lock); | |
206 | fscache_stat(&fscache_n_attr_changed_ok); | |
207 | fscache_put_operation(op); | |
208 | _leave(" = 0"); | |
209 | return 0; | |
210 | ||
211 | nobufs: | |
212 | spin_unlock(&cookie->lock); | |
213 | kfree(op); | |
214 | fscache_stat(&fscache_n_attr_changed_nobufs); | |
215 | _leave(" = %d", -ENOBUFS); | |
216 | return -ENOBUFS; | |
217 | } | |
218 | EXPORT_SYMBOL(__fscache_attr_changed); | |
219 | ||
220 | /* | |
221 | * handle secondary execution given to a retrieval op on behalf of the | |
222 | * cache | |
223 | */ | |
224 | static void fscache_retrieval_work(struct work_struct *work) | |
225 | { | |
226 | struct fscache_retrieval *op = | |
227 | container_of(work, struct fscache_retrieval, op.fast_work); | |
228 | unsigned long start; | |
229 | ||
230 | _enter("{OP%x}", op->op.debug_id); | |
231 | ||
232 | start = jiffies; | |
233 | op->op.processor(&op->op); | |
234 | fscache_hist(fscache_ops_histogram, start); | |
235 | fscache_put_operation(&op->op); | |
236 | } | |
237 | ||
238 | /* | |
239 | * release a retrieval op reference | |
240 | */ | |
241 | static void fscache_release_retrieval_op(struct fscache_operation *_op) | |
242 | { | |
243 | struct fscache_retrieval *op = | |
244 | container_of(_op, struct fscache_retrieval, op); | |
245 | ||
246 | _enter("{OP%x}", op->op.debug_id); | |
247 | ||
248 | fscache_hist(fscache_retrieval_histogram, op->start_time); | |
249 | if (op->context) | |
250 | fscache_put_context(op->op.object->cookie, op->context); | |
251 | ||
252 | _leave(""); | |
253 | } | |
254 | ||
255 | /* | |
256 | * allocate a retrieval op | |
257 | */ | |
258 | static struct fscache_retrieval *fscache_alloc_retrieval( | |
259 | struct address_space *mapping, | |
260 | fscache_rw_complete_t end_io_func, | |
261 | void *context) | |
262 | { | |
263 | struct fscache_retrieval *op; | |
264 | ||
265 | /* allocate a retrieval operation and attempt to submit it */ | |
266 | op = kzalloc(sizeof(*op), GFP_NOIO); | |
267 | if (!op) { | |
268 | fscache_stat(&fscache_n_retrievals_nomem); | |
269 | return NULL; | |
270 | } | |
271 | ||
272 | fscache_operation_init(&op->op, fscache_release_retrieval_op); | |
273 | op->op.flags = FSCACHE_OP_MYTHREAD | (1 << FSCACHE_OP_WAITING); | |
274 | op->mapping = mapping; | |
275 | op->end_io_func = end_io_func; | |
276 | op->context = context; | |
277 | op->start_time = jiffies; | |
278 | INIT_WORK(&op->op.fast_work, fscache_retrieval_work); | |
279 | INIT_LIST_HEAD(&op->to_do); | |
440f0aff | 280 | fscache_set_op_name(&op->op, "Retr"); |
b5108822 DH |
281 | return op; |
282 | } | |
283 | ||
284 | /* | |
285 | * wait for a deferred lookup to complete | |
286 | */ | |
287 | static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie) | |
288 | { | |
289 | unsigned long jif; | |
290 | ||
291 | _enter(""); | |
292 | ||
293 | if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) { | |
294 | _leave(" = 0 [imm]"); | |
295 | return 0; | |
296 | } | |
297 | ||
298 | fscache_stat(&fscache_n_retrievals_wait); | |
299 | ||
300 | jif = jiffies; | |
301 | if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP, | |
302 | fscache_wait_bit_interruptible, | |
303 | TASK_INTERRUPTIBLE) != 0) { | |
304 | fscache_stat(&fscache_n_retrievals_intr); | |
305 | _leave(" = -ERESTARTSYS"); | |
306 | return -ERESTARTSYS; | |
307 | } | |
308 | ||
309 | ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)); | |
310 | ||
311 | smp_rmb(); | |
312 | fscache_hist(fscache_retrieval_delay_histogram, jif); | |
313 | _leave(" = 0 [dly]"); | |
314 | return 0; | |
315 | } | |
316 | ||
60d543ca DH |
317 | /* |
318 | * wait for an object to become active (or dead) | |
319 | */ | |
320 | static int fscache_wait_for_retrieval_activation(struct fscache_object *object, | |
321 | struct fscache_retrieval *op, | |
322 | atomic_t *stat_op_waits, | |
323 | atomic_t *stat_object_dead) | |
324 | { | |
325 | int ret; | |
326 | ||
327 | if (!test_bit(FSCACHE_OP_WAITING, &op->op.flags)) | |
328 | goto check_if_dead; | |
329 | ||
330 | _debug(">>> WT"); | |
331 | fscache_stat(stat_op_waits); | |
332 | if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, | |
333 | fscache_wait_bit_interruptible, | |
334 | TASK_INTERRUPTIBLE) < 0) { | |
335 | ret = fscache_cancel_op(&op->op); | |
336 | if (ret == 0) | |
337 | return -ERESTARTSYS; | |
338 | ||
339 | /* it's been removed from the pending queue by another party, | |
340 | * so we should get to run shortly */ | |
341 | wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, | |
342 | fscache_wait_bit, TASK_UNINTERRUPTIBLE); | |
343 | } | |
344 | _debug("<<< GO"); | |
345 | ||
346 | check_if_dead: | |
347 | if (unlikely(fscache_object_is_dead(object))) { | |
348 | fscache_stat(stat_object_dead); | |
349 | return -ENOBUFS; | |
350 | } | |
351 | return 0; | |
352 | } | |
353 | ||
b5108822 DH |
354 | /* |
355 | * read a page from the cache or allocate a block in which to store it | |
356 | * - we return: | |
357 | * -ENOMEM - out of memory, nothing done | |
358 | * -ERESTARTSYS - interrupted | |
359 | * -ENOBUFS - no backing object available in which to cache the block | |
360 | * -ENODATA - no data available in the backing object for this block | |
361 | * 0 - dispatched a read - it'll call end_io_func() when finished | |
362 | */ | |
363 | int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, | |
364 | struct page *page, | |
365 | fscache_rw_complete_t end_io_func, | |
366 | void *context, | |
367 | gfp_t gfp) | |
368 | { | |
369 | struct fscache_retrieval *op; | |
370 | struct fscache_object *object; | |
371 | int ret; | |
372 | ||
373 | _enter("%p,%p,,,", cookie, page); | |
374 | ||
375 | fscache_stat(&fscache_n_retrievals); | |
376 | ||
377 | if (hlist_empty(&cookie->backing_objects)) | |
378 | goto nobufs; | |
379 | ||
380 | ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); | |
381 | ASSERTCMP(page, !=, NULL); | |
382 | ||
383 | if (fscache_wait_for_deferred_lookup(cookie) < 0) | |
384 | return -ERESTARTSYS; | |
385 | ||
386 | op = fscache_alloc_retrieval(page->mapping, end_io_func, context); | |
387 | if (!op) { | |
388 | _leave(" = -ENOMEM"); | |
389 | return -ENOMEM; | |
390 | } | |
440f0aff | 391 | fscache_set_op_name(&op->op, "RetrRA1"); |
b5108822 DH |
392 | |
393 | spin_lock(&cookie->lock); | |
394 | ||
395 | if (hlist_empty(&cookie->backing_objects)) | |
396 | goto nobufs_unlock; | |
397 | object = hlist_entry(cookie->backing_objects.first, | |
398 | struct fscache_object, cookie_link); | |
399 | ||
400 | ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP); | |
401 | ||
4fbf4291 DH |
402 | atomic_inc(&object->n_reads); |
403 | set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags); | |
404 | ||
b5108822 DH |
405 | if (fscache_submit_op(object, &op->op) < 0) |
406 | goto nobufs_unlock; | |
407 | spin_unlock(&cookie->lock); | |
408 | ||
409 | fscache_stat(&fscache_n_retrieval_ops); | |
410 | ||
411 | /* pin the netfs read context in case we need to do the actual netfs | |
412 | * read because we've encountered a cache read failure */ | |
413 | fscache_get_context(object->cookie, op->context); | |
414 | ||
415 | /* we wait for the operation to become active, and then process it | |
416 | * *here*, in this thread, and not in the thread pool */ | |
60d543ca DH |
417 | ret = fscache_wait_for_retrieval_activation( |
418 | object, op, | |
419 | __fscache_stat(&fscache_n_retrieval_op_waits), | |
420 | __fscache_stat(&fscache_n_retrievals_object_dead)); | |
421 | if (ret < 0) | |
422 | goto error; | |
b5108822 DH |
423 | |
424 | /* ask the cache to honour the operation */ | |
425 | if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) { | |
52bd75fd | 426 | fscache_stat(&fscache_n_cop_allocate_page); |
b5108822 | 427 | ret = object->cache->ops->allocate_page(op, page, gfp); |
52bd75fd | 428 | fscache_stat_d(&fscache_n_cop_allocate_page); |
b5108822 DH |
429 | if (ret == 0) |
430 | ret = -ENODATA; | |
431 | } else { | |
52bd75fd | 432 | fscache_stat(&fscache_n_cop_read_or_alloc_page); |
b5108822 | 433 | ret = object->cache->ops->read_or_alloc_page(op, page, gfp); |
52bd75fd | 434 | fscache_stat_d(&fscache_n_cop_read_or_alloc_page); |
b5108822 DH |
435 | } |
436 | ||
5753c441 | 437 | error: |
b5108822 DH |
438 | if (ret == -ENOMEM) |
439 | fscache_stat(&fscache_n_retrievals_nomem); | |
440 | else if (ret == -ERESTARTSYS) | |
441 | fscache_stat(&fscache_n_retrievals_intr); | |
442 | else if (ret == -ENODATA) | |
443 | fscache_stat(&fscache_n_retrievals_nodata); | |
444 | else if (ret < 0) | |
445 | fscache_stat(&fscache_n_retrievals_nobufs); | |
446 | else | |
447 | fscache_stat(&fscache_n_retrievals_ok); | |
448 | ||
449 | fscache_put_retrieval(op); | |
450 | _leave(" = %d", ret); | |
451 | return ret; | |
452 | ||
453 | nobufs_unlock: | |
454 | spin_unlock(&cookie->lock); | |
455 | kfree(op); | |
456 | nobufs: | |
457 | fscache_stat(&fscache_n_retrievals_nobufs); | |
458 | _leave(" = -ENOBUFS"); | |
459 | return -ENOBUFS; | |
460 | } | |
461 | EXPORT_SYMBOL(__fscache_read_or_alloc_page); | |
462 | ||
463 | /* | |
464 | * read a list of page from the cache or allocate a block in which to store | |
465 | * them | |
466 | * - we return: | |
467 | * -ENOMEM - out of memory, some pages may be being read | |
468 | * -ERESTARTSYS - interrupted, some pages may be being read | |
469 | * -ENOBUFS - no backing object or space available in which to cache any | |
470 | * pages not being read | |
471 | * -ENODATA - no data available in the backing object for some or all of | |
472 | * the pages | |
473 | * 0 - dispatched a read on all pages | |
474 | * | |
475 | * end_io_func() will be called for each page read from the cache as it is | |
476 | * finishes being read | |
477 | * | |
478 | * any pages for which a read is dispatched will be removed from pages and | |
479 | * nr_pages | |
480 | */ | |
481 | int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, | |
482 | struct address_space *mapping, | |
483 | struct list_head *pages, | |
484 | unsigned *nr_pages, | |
485 | fscache_rw_complete_t end_io_func, | |
486 | void *context, | |
487 | gfp_t gfp) | |
488 | { | |
b5108822 DH |
489 | struct fscache_retrieval *op; |
490 | struct fscache_object *object; | |
491 | int ret; | |
492 | ||
493 | _enter("%p,,%d,,,", cookie, *nr_pages); | |
494 | ||
495 | fscache_stat(&fscache_n_retrievals); | |
496 | ||
497 | if (hlist_empty(&cookie->backing_objects)) | |
498 | goto nobufs; | |
499 | ||
500 | ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); | |
501 | ASSERTCMP(*nr_pages, >, 0); | |
502 | ASSERT(!list_empty(pages)); | |
503 | ||
504 | if (fscache_wait_for_deferred_lookup(cookie) < 0) | |
505 | return -ERESTARTSYS; | |
506 | ||
507 | op = fscache_alloc_retrieval(mapping, end_io_func, context); | |
508 | if (!op) | |
509 | return -ENOMEM; | |
440f0aff | 510 | fscache_set_op_name(&op->op, "RetrRAN"); |
b5108822 DH |
511 | |
512 | spin_lock(&cookie->lock); | |
513 | ||
514 | if (hlist_empty(&cookie->backing_objects)) | |
515 | goto nobufs_unlock; | |
516 | object = hlist_entry(cookie->backing_objects.first, | |
517 | struct fscache_object, cookie_link); | |
518 | ||
4fbf4291 DH |
519 | atomic_inc(&object->n_reads); |
520 | set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags); | |
521 | ||
b5108822 DH |
522 | if (fscache_submit_op(object, &op->op) < 0) |
523 | goto nobufs_unlock; | |
524 | spin_unlock(&cookie->lock); | |
525 | ||
526 | fscache_stat(&fscache_n_retrieval_ops); | |
527 | ||
528 | /* pin the netfs read context in case we need to do the actual netfs | |
529 | * read because we've encountered a cache read failure */ | |
530 | fscache_get_context(object->cookie, op->context); | |
531 | ||
532 | /* we wait for the operation to become active, and then process it | |
533 | * *here*, in this thread, and not in the thread pool */ | |
60d543ca DH |
534 | ret = fscache_wait_for_retrieval_activation( |
535 | object, op, | |
536 | __fscache_stat(&fscache_n_retrieval_op_waits), | |
537 | __fscache_stat(&fscache_n_retrievals_object_dead)); | |
538 | if (ret < 0) | |
539 | goto error; | |
b5108822 DH |
540 | |
541 | /* ask the cache to honour the operation */ | |
52bd75fd DH |
542 | if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) { |
543 | fscache_stat(&fscache_n_cop_allocate_pages); | |
544 | ret = object->cache->ops->allocate_pages( | |
545 | op, pages, nr_pages, gfp); | |
546 | fscache_stat_d(&fscache_n_cop_allocate_pages); | |
547 | } else { | |
548 | fscache_stat(&fscache_n_cop_read_or_alloc_pages); | |
549 | ret = object->cache->ops->read_or_alloc_pages( | |
550 | op, pages, nr_pages, gfp); | |
551 | fscache_stat_d(&fscache_n_cop_read_or_alloc_pages); | |
552 | } | |
b5108822 | 553 | |
5753c441 | 554 | error: |
b5108822 DH |
555 | if (ret == -ENOMEM) |
556 | fscache_stat(&fscache_n_retrievals_nomem); | |
557 | else if (ret == -ERESTARTSYS) | |
558 | fscache_stat(&fscache_n_retrievals_intr); | |
559 | else if (ret == -ENODATA) | |
560 | fscache_stat(&fscache_n_retrievals_nodata); | |
561 | else if (ret < 0) | |
562 | fscache_stat(&fscache_n_retrievals_nobufs); | |
563 | else | |
564 | fscache_stat(&fscache_n_retrievals_ok); | |
565 | ||
566 | fscache_put_retrieval(op); | |
567 | _leave(" = %d", ret); | |
568 | return ret; | |
569 | ||
570 | nobufs_unlock: | |
571 | spin_unlock(&cookie->lock); | |
572 | kfree(op); | |
573 | nobufs: | |
574 | fscache_stat(&fscache_n_retrievals_nobufs); | |
575 | _leave(" = -ENOBUFS"); | |
576 | return -ENOBUFS; | |
577 | } | |
578 | EXPORT_SYMBOL(__fscache_read_or_alloc_pages); | |
579 | ||
580 | /* | |
581 | * allocate a block in the cache on which to store a page | |
582 | * - we return: | |
583 | * -ENOMEM - out of memory, nothing done | |
584 | * -ERESTARTSYS - interrupted | |
585 | * -ENOBUFS - no backing object available in which to cache the block | |
586 | * 0 - block allocated | |
587 | */ | |
588 | int __fscache_alloc_page(struct fscache_cookie *cookie, | |
589 | struct page *page, | |
590 | gfp_t gfp) | |
591 | { | |
592 | struct fscache_retrieval *op; | |
593 | struct fscache_object *object; | |
594 | int ret; | |
595 | ||
596 | _enter("%p,%p,,,", cookie, page); | |
597 | ||
598 | fscache_stat(&fscache_n_allocs); | |
599 | ||
600 | if (hlist_empty(&cookie->backing_objects)) | |
601 | goto nobufs; | |
602 | ||
603 | ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); | |
604 | ASSERTCMP(page, !=, NULL); | |
605 | ||
606 | if (fscache_wait_for_deferred_lookup(cookie) < 0) | |
607 | return -ERESTARTSYS; | |
608 | ||
609 | op = fscache_alloc_retrieval(page->mapping, NULL, NULL); | |
610 | if (!op) | |
611 | return -ENOMEM; | |
440f0aff | 612 | fscache_set_op_name(&op->op, "RetrAL1"); |
b5108822 DH |
613 | |
614 | spin_lock(&cookie->lock); | |
615 | ||
616 | if (hlist_empty(&cookie->backing_objects)) | |
617 | goto nobufs_unlock; | |
618 | object = hlist_entry(cookie->backing_objects.first, | |
619 | struct fscache_object, cookie_link); | |
620 | ||
621 | if (fscache_submit_op(object, &op->op) < 0) | |
622 | goto nobufs_unlock; | |
623 | spin_unlock(&cookie->lock); | |
624 | ||
625 | fscache_stat(&fscache_n_alloc_ops); | |
626 | ||
60d543ca DH |
627 | ret = fscache_wait_for_retrieval_activation( |
628 | object, op, | |
629 | __fscache_stat(&fscache_n_alloc_op_waits), | |
630 | __fscache_stat(&fscache_n_allocs_object_dead)); | |
631 | if (ret < 0) | |
632 | goto error; | |
b5108822 DH |
633 | |
634 | /* ask the cache to honour the operation */ | |
52bd75fd | 635 | fscache_stat(&fscache_n_cop_allocate_page); |
b5108822 | 636 | ret = object->cache->ops->allocate_page(op, page, gfp); |
52bd75fd | 637 | fscache_stat_d(&fscache_n_cop_allocate_page); |
b5108822 | 638 | |
5753c441 DH |
639 | error: |
640 | if (ret == -ERESTARTSYS) | |
641 | fscache_stat(&fscache_n_allocs_intr); | |
642 | else if (ret < 0) | |
b5108822 DH |
643 | fscache_stat(&fscache_n_allocs_nobufs); |
644 | else | |
645 | fscache_stat(&fscache_n_allocs_ok); | |
646 | ||
647 | fscache_put_retrieval(op); | |
648 | _leave(" = %d", ret); | |
649 | return ret; | |
650 | ||
651 | nobufs_unlock: | |
652 | spin_unlock(&cookie->lock); | |
653 | kfree(op); | |
654 | nobufs: | |
655 | fscache_stat(&fscache_n_allocs_nobufs); | |
656 | _leave(" = -ENOBUFS"); | |
657 | return -ENOBUFS; | |
658 | } | |
659 | EXPORT_SYMBOL(__fscache_alloc_page); | |
660 | ||
661 | /* | |
662 | * release a write op reference | |
663 | */ | |
664 | static void fscache_release_write_op(struct fscache_operation *_op) | |
665 | { | |
666 | _enter("{OP%x}", _op->debug_id); | |
667 | } | |
668 | ||
669 | /* | |
670 | * perform the background storage of a page into the cache | |
671 | */ | |
672 | static void fscache_write_op(struct fscache_operation *_op) | |
673 | { | |
674 | struct fscache_storage *op = | |
675 | container_of(_op, struct fscache_storage, op); | |
676 | struct fscache_object *object = op->op.object; | |
1bccf513 | 677 | struct fscache_cookie *cookie; |
b5108822 DH |
678 | struct page *page; |
679 | unsigned n; | |
680 | void *results[1]; | |
681 | int ret; | |
682 | ||
683 | _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage)); | |
684 | ||
440f0aff DH |
685 | fscache_set_op_state(&op->op, "GetPage"); |
686 | ||
b5108822 | 687 | spin_lock(&object->lock); |
1bccf513 | 688 | cookie = object->cookie; |
b5108822 | 689 | |
1bccf513 | 690 | if (!fscache_object_is_active(object) || !cookie) { |
b5108822 | 691 | spin_unlock(&object->lock); |
b5108822 DH |
692 | _leave(""); |
693 | return; | |
694 | } | |
695 | ||
1bccf513 DH |
696 | spin_lock(&cookie->stores_lock); |
697 | ||
b5108822 DH |
698 | fscache_stat(&fscache_n_store_calls); |
699 | ||
700 | /* find a page to store */ | |
701 | page = NULL; | |
702 | n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1, | |
703 | FSCACHE_COOKIE_PENDING_TAG); | |
704 | if (n != 1) | |
705 | goto superseded; | |
706 | page = results[0]; | |
707 | _debug("gang %d [%lx]", n, page->index); | |
1bccf513 DH |
708 | if (page->index > op->store_limit) { |
709 | fscache_stat(&fscache_n_store_pages_over_limit); | |
b5108822 | 710 | goto superseded; |
1bccf513 | 711 | } |
b5108822 | 712 | |
08a66859 DC |
713 | radix_tree_tag_set(&cookie->stores, page->index, |
714 | FSCACHE_COOKIE_STORING_TAG); | |
715 | radix_tree_tag_clear(&cookie->stores, page->index, | |
716 | FSCACHE_COOKIE_PENDING_TAG); | |
b5108822 | 717 | |
1bccf513 | 718 | spin_unlock(&cookie->stores_lock); |
b5108822 | 719 | spin_unlock(&object->lock); |
b5108822 | 720 | |
08a66859 DC |
721 | fscache_set_op_state(&op->op, "Store"); |
722 | fscache_stat(&fscache_n_store_pages); | |
723 | fscache_stat(&fscache_n_cop_write_page); | |
724 | ret = object->cache->ops->write_page(op, page); | |
725 | fscache_stat_d(&fscache_n_cop_write_page); | |
726 | fscache_set_op_state(&op->op, "EndWrite"); | |
727 | fscache_end_page_write(object, page); | |
728 | if (ret < 0) { | |
729 | fscache_set_op_state(&op->op, "Abort"); | |
730 | fscache_abort_object(object); | |
731 | } else { | |
732 | fscache_enqueue_operation(&op->op); | |
b5108822 DH |
733 | } |
734 | ||
735 | _leave(""); | |
736 | return; | |
737 | ||
738 | superseded: | |
739 | /* this writer is going away and there aren't any more things to | |
740 | * write */ | |
741 | _debug("cease"); | |
1bccf513 | 742 | spin_unlock(&cookie->stores_lock); |
b5108822 DH |
743 | clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); |
744 | spin_unlock(&object->lock); | |
b5108822 DH |
745 | _leave(""); |
746 | } | |
747 | ||
748 | /* | |
749 | * request a page be stored in the cache | |
750 | * - returns: | |
751 | * -ENOMEM - out of memory, nothing done | |
752 | * -ENOBUFS - no backing object available in which to cache the page | |
753 | * 0 - dispatched a write - it'll call end_io_func() when finished | |
754 | * | |
755 | * if the cookie still has a backing object at this point, that object can be | |
756 | * in one of a few states with respect to storage processing: | |
757 | * | |
758 | * (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is | |
759 | * set) | |
760 | * | |
761 | * (a) no writes yet (set FSCACHE_COOKIE_PENDING_FILL and queue deferred | |
762 | * fill op) | |
763 | * | |
764 | * (b) writes deferred till post-creation (mark page for writing and | |
765 | * return immediately) | |
766 | * | |
767 | * (2) negative lookup, object created, initial fill being made from netfs | |
768 | * (FSCACHE_COOKIE_INITIAL_FILL is set) | |
769 | * | |
770 | * (a) fill point not yet reached this page (mark page for writing and | |
771 | * return) | |
772 | * | |
773 | * (b) fill point passed this page (queue op to store this page) | |
774 | * | |
775 | * (3) object extant (queue op to store this page) | |
776 | * | |
777 | * any other state is invalid | |
778 | */ | |
779 | int __fscache_write_page(struct fscache_cookie *cookie, | |
780 | struct page *page, | |
781 | gfp_t gfp) | |
782 | { | |
783 | struct fscache_storage *op; | |
784 | struct fscache_object *object; | |
785 | int ret; | |
786 | ||
787 | _enter("%p,%x,", cookie, (u32) page->flags); | |
788 | ||
789 | ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); | |
790 | ASSERT(PageFsCache(page)); | |
791 | ||
792 | fscache_stat(&fscache_n_stores); | |
793 | ||
794 | op = kzalloc(sizeof(*op), GFP_NOIO); | |
795 | if (!op) | |
796 | goto nomem; | |
797 | ||
798 | fscache_operation_init(&op->op, fscache_release_write_op); | |
799 | fscache_operation_init_slow(&op->op, fscache_write_op); | |
800 | op->op.flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_WAITING); | |
440f0aff | 801 | fscache_set_op_name(&op->op, "Write1"); |
b5108822 DH |
802 | |
803 | ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM); | |
804 | if (ret < 0) | |
805 | goto nomem_free; | |
806 | ||
807 | ret = -ENOBUFS; | |
808 | spin_lock(&cookie->lock); | |
809 | ||
810 | if (hlist_empty(&cookie->backing_objects)) | |
811 | goto nobufs; | |
812 | object = hlist_entry(cookie->backing_objects.first, | |
813 | struct fscache_object, cookie_link); | |
814 | if (test_bit(FSCACHE_IOERROR, &object->cache->flags)) | |
815 | goto nobufs; | |
816 | ||
817 | /* add the page to the pending-storage radix tree on the backing | |
818 | * object */ | |
819 | spin_lock(&object->lock); | |
1bccf513 | 820 | spin_lock(&cookie->stores_lock); |
b5108822 DH |
821 | |
822 | _debug("store limit %llx", (unsigned long long) object->store_limit); | |
823 | ||
824 | ret = radix_tree_insert(&cookie->stores, page->index, page); | |
825 | if (ret < 0) { | |
826 | if (ret == -EEXIST) | |
827 | goto already_queued; | |
828 | _debug("insert failed %d", ret); | |
829 | goto nobufs_unlock_obj; | |
830 | } | |
831 | ||
832 | radix_tree_tag_set(&cookie->stores, page->index, | |
833 | FSCACHE_COOKIE_PENDING_TAG); | |
834 | page_cache_get(page); | |
835 | ||
836 | /* we only want one writer at a time, but we do need to queue new | |
837 | * writers after exclusive ops */ | |
838 | if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags)) | |
839 | goto already_pending; | |
840 | ||
1bccf513 | 841 | spin_unlock(&cookie->stores_lock); |
b5108822 DH |
842 | spin_unlock(&object->lock); |
843 | ||
844 | op->op.debug_id = atomic_inc_return(&fscache_op_debug_id); | |
845 | op->store_limit = object->store_limit; | |
846 | ||
847 | if (fscache_submit_op(object, &op->op) < 0) | |
848 | goto submit_failed; | |
849 | ||
850 | spin_unlock(&cookie->lock); | |
851 | radix_tree_preload_end(); | |
852 | fscache_stat(&fscache_n_store_ops); | |
853 | fscache_stat(&fscache_n_stores_ok); | |
854 | ||
855 | /* the slow work queue now carries its own ref on the object */ | |
856 | fscache_put_operation(&op->op); | |
857 | _leave(" = 0"); | |
858 | return 0; | |
859 | ||
860 | already_queued: | |
861 | fscache_stat(&fscache_n_stores_again); | |
862 | already_pending: | |
1bccf513 | 863 | spin_unlock(&cookie->stores_lock); |
b5108822 DH |
864 | spin_unlock(&object->lock); |
865 | spin_unlock(&cookie->lock); | |
866 | radix_tree_preload_end(); | |
867 | kfree(op); | |
868 | fscache_stat(&fscache_n_stores_ok); | |
869 | _leave(" = 0"); | |
870 | return 0; | |
871 | ||
872 | submit_failed: | |
1bccf513 | 873 | spin_lock(&cookie->stores_lock); |
b5108822 | 874 | radix_tree_delete(&cookie->stores, page->index); |
1bccf513 | 875 | spin_unlock(&cookie->stores_lock); |
b5108822 DH |
876 | page_cache_release(page); |
877 | ret = -ENOBUFS; | |
878 | goto nobufs; | |
879 | ||
880 | nobufs_unlock_obj: | |
1147d0f9 | 881 | spin_unlock(&cookie->stores_lock); |
b5108822 DH |
882 | spin_unlock(&object->lock); |
883 | nobufs: | |
884 | spin_unlock(&cookie->lock); | |
885 | radix_tree_preload_end(); | |
886 | kfree(op); | |
887 | fscache_stat(&fscache_n_stores_nobufs); | |
888 | _leave(" = -ENOBUFS"); | |
889 | return -ENOBUFS; | |
890 | ||
891 | nomem_free: | |
892 | kfree(op); | |
893 | nomem: | |
894 | fscache_stat(&fscache_n_stores_oom); | |
895 | _leave(" = -ENOMEM"); | |
896 | return -ENOMEM; | |
897 | } | |
898 | EXPORT_SYMBOL(__fscache_write_page); | |
899 | ||
900 | /* | |
901 | * remove a page from the cache | |
902 | */ | |
903 | void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page) | |
904 | { | |
905 | struct fscache_object *object; | |
906 | ||
907 | _enter(",%p", page); | |
908 | ||
909 | ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); | |
910 | ASSERTCMP(page, !=, NULL); | |
911 | ||
912 | fscache_stat(&fscache_n_uncaches); | |
913 | ||
914 | /* cache withdrawal may beat us to it */ | |
915 | if (!PageFsCache(page)) | |
916 | goto done; | |
917 | ||
918 | /* get the object */ | |
919 | spin_lock(&cookie->lock); | |
920 | ||
921 | if (hlist_empty(&cookie->backing_objects)) { | |
922 | ClearPageFsCache(page); | |
923 | goto done_unlock; | |
924 | } | |
925 | ||
926 | object = hlist_entry(cookie->backing_objects.first, | |
927 | struct fscache_object, cookie_link); | |
928 | ||
929 | /* there might now be stuff on disk we could read */ | |
930 | clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags); | |
931 | ||
932 | /* only invoke the cache backend if we managed to mark the page | |
933 | * uncached here; this deals with synchronisation vs withdrawal */ | |
934 | if (TestClearPageFsCache(page) && | |
935 | object->cache->ops->uncache_page) { | |
936 | /* the cache backend releases the cookie lock */ | |
52bd75fd | 937 | fscache_stat(&fscache_n_cop_uncache_page); |
b5108822 | 938 | object->cache->ops->uncache_page(object, page); |
52bd75fd | 939 | fscache_stat_d(&fscache_n_cop_uncache_page); |
b5108822 DH |
940 | goto done; |
941 | } | |
942 | ||
943 | done_unlock: | |
944 | spin_unlock(&cookie->lock); | |
945 | done: | |
946 | _leave(""); | |
947 | } | |
948 | EXPORT_SYMBOL(__fscache_uncache_page); | |
949 | ||
950 | /** | |
951 | * fscache_mark_pages_cached - Mark pages as being cached | |
952 | * @op: The retrieval op pages are being marked for | |
953 | * @pagevec: The pages to be marked | |
954 | * | |
955 | * Mark a bunch of netfs pages as being cached. After this is called, | |
956 | * the netfs must call fscache_uncache_page() to remove the mark. | |
957 | */ | |
958 | void fscache_mark_pages_cached(struct fscache_retrieval *op, | |
959 | struct pagevec *pagevec) | |
960 | { | |
961 | struct fscache_cookie *cookie = op->op.object->cookie; | |
962 | unsigned long loop; | |
963 | ||
964 | #ifdef CONFIG_FSCACHE_STATS | |
965 | atomic_add(pagevec->nr, &fscache_n_marks); | |
966 | #endif | |
967 | ||
968 | for (loop = 0; loop < pagevec->nr; loop++) { | |
969 | struct page *page = pagevec->pages[loop]; | |
970 | ||
971 | _debug("- mark %p{%lx}", page, page->index); | |
972 | if (TestSetPageFsCache(page)) { | |
973 | static bool once_only; | |
974 | if (!once_only) { | |
975 | once_only = true; | |
976 | printk(KERN_WARNING "FS-Cache:" | |
977 | " Cookie type %s marked page %lx" | |
978 | " multiple times\n", | |
979 | cookie->def->name, page->index); | |
980 | } | |
981 | } | |
982 | } | |
983 | ||
984 | if (cookie->def->mark_pages_cached) | |
985 | cookie->def->mark_pages_cached(cookie->netfs_data, | |
986 | op->mapping, pagevec); | |
987 | pagevec_reinit(pagevec); | |
988 | } | |
989 | EXPORT_SYMBOL(fscache_mark_pages_cached); |