1 /* Cache page management and data I/O routines
3 * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #define FSCACHE_DEBUG_LEVEL PAGE
13 #include <linux/module.h>
14 #include <linux/fscache-cache.h>
15 #include <linux/buffer_head.h>
16 #include <linux/pagevec.h>
17 #include <linux/slab.h>
21 * check to see if a page is being written to the cache
23 bool __fscache_check_page_write(struct fscache_cookie
*cookie
, struct page
*page
)
28 val
= radix_tree_lookup(&cookie
->stores
, page
->index
);
33 EXPORT_SYMBOL(__fscache_check_page_write
);
36 * wait for a page to finish being written to the cache
38 void __fscache_wait_on_page_write(struct fscache_cookie
*cookie
, struct page
*page
)
40 wait_queue_head_t
*wq
= bit_waitqueue(&cookie
->flags
, 0);
42 wait_event(*wq
, !__fscache_check_page_write(cookie
, page
));
44 EXPORT_SYMBOL(__fscache_wait_on_page_write
);
47 * wait for a page to finish being written to the cache. Put a timeout here
48 * since we might be called recursively via parent fs.
51 bool release_page_wait_timeout(struct fscache_cookie
*cookie
, struct page
*page
)
53 wait_queue_head_t
*wq
= bit_waitqueue(&cookie
->flags
, 0);
55 return wait_event_timeout(*wq
, !__fscache_check_page_write(cookie
, page
),
60 * decide whether a page can be released, possibly by cancelling a store to it
61 * - we're allowed to sleep if __GFP_WAIT is flagged
63 bool __fscache_maybe_release_page(struct fscache_cookie
*cookie
,
70 _enter("%p,%p,%x", cookie
, page
, gfp
);
74 val
= radix_tree_lookup(&cookie
->stores
, page
->index
);
77 fscache_stat(&fscache_n_store_vmscan_not_storing
);
78 __fscache_uncache_page(cookie
, page
);
82 /* see if the page is actually undergoing storage - if so we can't get
83 * rid of it till the cache has finished with it */
84 if (radix_tree_tag_get(&cookie
->stores
, page
->index
,
85 FSCACHE_COOKIE_STORING_TAG
)) {
90 /* the page is pending storage, so we attempt to cancel the store and
91 * discard the store request so that the page can be reclaimed */
92 spin_lock(&cookie
->stores_lock
);
95 if (radix_tree_tag_get(&cookie
->stores
, page
->index
,
96 FSCACHE_COOKIE_STORING_TAG
)) {
97 /* the page started to undergo storage whilst we were looking,
98 * so now we can only wait or return */
99 spin_unlock(&cookie
->stores_lock
);
103 xpage
= radix_tree_delete(&cookie
->stores
, page
->index
);
104 spin_unlock(&cookie
->stores_lock
);
107 fscache_stat(&fscache_n_store_vmscan_cancelled
);
108 fscache_stat(&fscache_n_store_radix_deletes
);
109 ASSERTCMP(xpage
, ==, page
);
111 fscache_stat(&fscache_n_store_vmscan_gone
);
114 wake_up_bit(&cookie
->flags
, 0);
116 page_cache_release(xpage
);
117 __fscache_uncache_page(cookie
, page
);
121 /* We will wait here if we're allowed to, but that could deadlock the
122 * allocator as the work threads writing to the cache may all end up
123 * sleeping on memory allocation, so we may need to impose a timeout
125 if (!(gfp
& __GFP_WAIT
) || !(gfp
& __GFP_FS
)) {
126 fscache_stat(&fscache_n_store_vmscan_busy
);
130 fscache_stat(&fscache_n_store_vmscan_wait
);
131 if (!release_page_wait_timeout(cookie
, page
))
132 _debug("fscache writeout timeout page: %p{%lx}",
138 EXPORT_SYMBOL(__fscache_maybe_release_page
);
141 * note that a page has finished being written to the cache
143 static void fscache_end_page_write(struct fscache_object
*object
,
146 struct fscache_cookie
*cookie
;
147 struct page
*xpage
= NULL
;
149 spin_lock(&object
->lock
);
150 cookie
= object
->cookie
;
152 /* delete the page from the tree if it is now no longer
154 spin_lock(&cookie
->stores_lock
);
155 radix_tree_tag_clear(&cookie
->stores
, page
->index
,
156 FSCACHE_COOKIE_STORING_TAG
);
157 if (!radix_tree_tag_get(&cookie
->stores
, page
->index
,
158 FSCACHE_COOKIE_PENDING_TAG
)) {
159 fscache_stat(&fscache_n_store_radix_deletes
);
160 xpage
= radix_tree_delete(&cookie
->stores
, page
->index
);
162 spin_unlock(&cookie
->stores_lock
);
163 wake_up_bit(&cookie
->flags
, 0);
165 spin_unlock(&object
->lock
);
167 page_cache_release(xpage
);
171 * actually apply the changed attributes to a cache object
173 static void fscache_attr_changed_op(struct fscache_operation
*op
)
175 struct fscache_object
*object
= op
->object
;
178 _enter("{OBJ%x OP%x}", object
->debug_id
, op
->debug_id
);
180 fscache_stat(&fscache_n_attr_changed_calls
);
182 if (fscache_object_is_active(object
)) {
183 fscache_stat(&fscache_n_cop_attr_changed
);
184 ret
= object
->cache
->ops
->attr_changed(object
);
185 fscache_stat_d(&fscache_n_cop_attr_changed
);
187 fscache_abort_object(object
);
190 fscache_op_complete(op
, true);
195 * notification that the attributes on an object have changed
197 int __fscache_attr_changed(struct fscache_cookie
*cookie
)
199 struct fscache_operation
*op
;
200 struct fscache_object
*object
;
201 bool wake_cookie
= false;
203 _enter("%p", cookie
);
205 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
207 fscache_stat(&fscache_n_attr_changed
);
209 op
= kzalloc(sizeof(*op
), GFP_KERNEL
);
211 fscache_stat(&fscache_n_attr_changed_nomem
);
212 _leave(" = -ENOMEM");
216 fscache_operation_init(op
, fscache_attr_changed_op
, NULL
);
217 op
->flags
= FSCACHE_OP_ASYNC
|
218 (1 << FSCACHE_OP_EXCLUSIVE
) |
219 (1 << FSCACHE_OP_UNUSE_COOKIE
);
221 spin_lock(&cookie
->lock
);
223 if (!fscache_cookie_enabled(cookie
) ||
224 hlist_empty(&cookie
->backing_objects
))
226 object
= hlist_entry(cookie
->backing_objects
.first
,
227 struct fscache_object
, cookie_link
);
229 __fscache_use_cookie(cookie
);
230 if (fscache_submit_exclusive_op(object
, op
) < 0)
232 spin_unlock(&cookie
->lock
);
233 fscache_stat(&fscache_n_attr_changed_ok
);
234 fscache_put_operation(op
);
239 wake_cookie
= __fscache_unuse_cookie(cookie
);
241 spin_unlock(&cookie
->lock
);
244 __fscache_wake_unused_cookie(cookie
);
245 fscache_stat(&fscache_n_attr_changed_nobufs
);
246 _leave(" = %d", -ENOBUFS
);
249 EXPORT_SYMBOL(__fscache_attr_changed
);
252 * release a retrieval op reference
254 static void fscache_release_retrieval_op(struct fscache_operation
*_op
)
256 struct fscache_retrieval
*op
=
257 container_of(_op
, struct fscache_retrieval
, op
);
259 _enter("{OP%x}", op
->op
.debug_id
);
261 ASSERTCMP(atomic_read(&op
->n_pages
), ==, 0);
263 fscache_hist(fscache_retrieval_histogram
, op
->start_time
);
265 fscache_put_context(op
->op
.object
->cookie
, op
->context
);
271 * allocate a retrieval op
273 static struct fscache_retrieval
*fscache_alloc_retrieval(
274 struct fscache_cookie
*cookie
,
275 struct address_space
*mapping
,
276 fscache_rw_complete_t end_io_func
,
279 struct fscache_retrieval
*op
;
281 /* allocate a retrieval operation and attempt to submit it */
282 op
= kzalloc(sizeof(*op
), GFP_NOIO
);
284 fscache_stat(&fscache_n_retrievals_nomem
);
288 fscache_operation_init(&op
->op
, NULL
, fscache_release_retrieval_op
);
289 op
->op
.flags
= FSCACHE_OP_MYTHREAD
|
290 (1UL << FSCACHE_OP_WAITING
) |
291 (1UL << FSCACHE_OP_UNUSE_COOKIE
);
292 op
->mapping
= mapping
;
293 op
->end_io_func
= end_io_func
;
294 op
->context
= context
;
295 op
->start_time
= jiffies
;
296 INIT_LIST_HEAD(&op
->to_do
);
301 * wait for a deferred lookup to complete
303 int fscache_wait_for_deferred_lookup(struct fscache_cookie
*cookie
)
309 if (!test_bit(FSCACHE_COOKIE_LOOKING_UP
, &cookie
->flags
)) {
310 _leave(" = 0 [imm]");
314 fscache_stat(&fscache_n_retrievals_wait
);
317 if (wait_on_bit(&cookie
->flags
, FSCACHE_COOKIE_LOOKING_UP
,
318 TASK_INTERRUPTIBLE
) != 0) {
319 fscache_stat(&fscache_n_retrievals_intr
);
320 _leave(" = -ERESTARTSYS");
324 ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP
, &cookie
->flags
));
327 fscache_hist(fscache_retrieval_delay_histogram
, jif
);
328 _leave(" = 0 [dly]");
333 * Handle cancellation of a pending retrieval op
335 static void fscache_do_cancel_retrieval(struct fscache_operation
*_op
)
337 struct fscache_retrieval
*op
=
338 container_of(_op
, struct fscache_retrieval
, op
);
340 atomic_set(&op
->n_pages
, 0);
344 * wait for an object to become active (or dead)
346 int fscache_wait_for_operation_activation(struct fscache_object
*object
,
347 struct fscache_operation
*op
,
348 atomic_t
*stat_op_waits
,
349 atomic_t
*stat_object_dead
,
350 void (*do_cancel
)(struct fscache_operation
*))
354 if (!test_bit(FSCACHE_OP_WAITING
, &op
->flags
))
359 fscache_stat(stat_op_waits
);
360 if (wait_on_bit(&op
->flags
, FSCACHE_OP_WAITING
,
361 TASK_INTERRUPTIBLE
) != 0) {
362 ret
= fscache_cancel_op(op
, do_cancel
);
366 /* it's been removed from the pending queue by another party,
367 * so we should get to run shortly */
368 wait_on_bit(&op
->flags
, FSCACHE_OP_WAITING
,
369 TASK_UNINTERRUPTIBLE
);
374 if (op
->state
== FSCACHE_OP_ST_CANCELLED
) {
375 if (stat_object_dead
)
376 fscache_stat(stat_object_dead
);
377 _leave(" = -ENOBUFS [cancelled]");
380 if (unlikely(fscache_object_is_dying(object
) ||
381 fscache_cache_is_broken(object
))) {
382 enum fscache_operation_state state
= op
->state
;
383 fscache_cancel_op(op
, do_cancel
);
384 if (stat_object_dead
)
385 fscache_stat(stat_object_dead
);
386 _leave(" = -ENOBUFS [obj dead %d]", state
);
393 * read a page from the cache or allocate a block in which to store it
395 * -ENOMEM - out of memory, nothing done
396 * -ERESTARTSYS - interrupted
397 * -ENOBUFS - no backing object available in which to cache the block
398 * -ENODATA - no data available in the backing object for this block
399 * 0 - dispatched a read - it'll call end_io_func() when finished
401 int __fscache_read_or_alloc_page(struct fscache_cookie
*cookie
,
403 fscache_rw_complete_t end_io_func
,
407 struct fscache_retrieval
*op
;
408 struct fscache_object
*object
;
409 bool wake_cookie
= false;
412 _enter("%p,%p,,,", cookie
, page
);
414 fscache_stat(&fscache_n_retrievals
);
416 if (hlist_empty(&cookie
->backing_objects
))
419 if (test_bit(FSCACHE_COOKIE_INVALIDATING
, &cookie
->flags
)) {
420 _leave(" = -ENOBUFS [invalidating]");
424 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
425 ASSERTCMP(page
, !=, NULL
);
427 if (fscache_wait_for_deferred_lookup(cookie
) < 0)
430 op
= fscache_alloc_retrieval(cookie
, page
->mapping
,
431 end_io_func
, context
);
433 _leave(" = -ENOMEM");
436 atomic_set(&op
->n_pages
, 1);
438 spin_lock(&cookie
->lock
);
440 if (!fscache_cookie_enabled(cookie
) ||
441 hlist_empty(&cookie
->backing_objects
))
443 object
= hlist_entry(cookie
->backing_objects
.first
,
444 struct fscache_object
, cookie_link
);
446 ASSERT(test_bit(FSCACHE_OBJECT_IS_LOOKED_UP
, &object
->flags
));
448 __fscache_use_cookie(cookie
);
449 atomic_inc(&object
->n_reads
);
450 __set_bit(FSCACHE_OP_DEC_READ_CNT
, &op
->op
.flags
);
452 if (fscache_submit_op(object
, &op
->op
) < 0)
453 goto nobufs_unlock_dec
;
454 spin_unlock(&cookie
->lock
);
456 fscache_stat(&fscache_n_retrieval_ops
);
458 /* pin the netfs read context in case we need to do the actual netfs
459 * read because we've encountered a cache read failure */
460 fscache_get_context(object
->cookie
, op
->context
);
462 /* we wait for the operation to become active, and then process it
463 * *here*, in this thread, and not in the thread pool */
464 ret
= fscache_wait_for_operation_activation(
466 __fscache_stat(&fscache_n_retrieval_op_waits
),
467 __fscache_stat(&fscache_n_retrievals_object_dead
),
468 fscache_do_cancel_retrieval
);
472 /* ask the cache to honour the operation */
473 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET
, &object
->cookie
->flags
)) {
474 fscache_stat(&fscache_n_cop_allocate_page
);
475 ret
= object
->cache
->ops
->allocate_page(op
, page
, gfp
);
476 fscache_stat_d(&fscache_n_cop_allocate_page
);
480 fscache_stat(&fscache_n_cop_read_or_alloc_page
);
481 ret
= object
->cache
->ops
->read_or_alloc_page(op
, page
, gfp
);
482 fscache_stat_d(&fscache_n_cop_read_or_alloc_page
);
487 fscache_stat(&fscache_n_retrievals_nomem
);
488 else if (ret
== -ERESTARTSYS
)
489 fscache_stat(&fscache_n_retrievals_intr
);
490 else if (ret
== -ENODATA
)
491 fscache_stat(&fscache_n_retrievals_nodata
);
493 fscache_stat(&fscache_n_retrievals_nobufs
);
495 fscache_stat(&fscache_n_retrievals_ok
);
497 fscache_put_retrieval(op
);
498 _leave(" = %d", ret
);
502 atomic_dec(&object
->n_reads
);
503 wake_cookie
= __fscache_unuse_cookie(cookie
);
505 spin_unlock(&cookie
->lock
);
507 __fscache_wake_unused_cookie(cookie
);
510 fscache_stat(&fscache_n_retrievals_nobufs
);
511 _leave(" = -ENOBUFS");
514 EXPORT_SYMBOL(__fscache_read_or_alloc_page
);
517 * read a list of page from the cache or allocate a block in which to store
520 * -ENOMEM - out of memory, some pages may be being read
521 * -ERESTARTSYS - interrupted, some pages may be being read
522 * -ENOBUFS - no backing object or space available in which to cache any
523 * pages not being read
524 * -ENODATA - no data available in the backing object for some or all of
526 * 0 - dispatched a read on all pages
528 * end_io_func() will be called for each page read from the cache as it is
529 * finishes being read
531 * any pages for which a read is dispatched will be removed from pages and
534 int __fscache_read_or_alloc_pages(struct fscache_cookie
*cookie
,
535 struct address_space
*mapping
,
536 struct list_head
*pages
,
538 fscache_rw_complete_t end_io_func
,
542 struct fscache_retrieval
*op
;
543 struct fscache_object
*object
;
544 bool wake_cookie
= false;
547 _enter("%p,,%d,,,", cookie
, *nr_pages
);
549 fscache_stat(&fscache_n_retrievals
);
551 if (hlist_empty(&cookie
->backing_objects
))
554 if (test_bit(FSCACHE_COOKIE_INVALIDATING
, &cookie
->flags
)) {
555 _leave(" = -ENOBUFS [invalidating]");
559 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
560 ASSERTCMP(*nr_pages
, >, 0);
561 ASSERT(!list_empty(pages
));
563 if (fscache_wait_for_deferred_lookup(cookie
) < 0)
566 op
= fscache_alloc_retrieval(cookie
, mapping
, end_io_func
, context
);
569 atomic_set(&op
->n_pages
, *nr_pages
);
571 spin_lock(&cookie
->lock
);
573 if (!fscache_cookie_enabled(cookie
) ||
574 hlist_empty(&cookie
->backing_objects
))
576 object
= hlist_entry(cookie
->backing_objects
.first
,
577 struct fscache_object
, cookie_link
);
579 __fscache_use_cookie(cookie
);
580 atomic_inc(&object
->n_reads
);
581 __set_bit(FSCACHE_OP_DEC_READ_CNT
, &op
->op
.flags
);
583 if (fscache_submit_op(object
, &op
->op
) < 0)
584 goto nobufs_unlock_dec
;
585 spin_unlock(&cookie
->lock
);
587 fscache_stat(&fscache_n_retrieval_ops
);
589 /* pin the netfs read context in case we need to do the actual netfs
590 * read because we've encountered a cache read failure */
591 fscache_get_context(object
->cookie
, op
->context
);
593 /* we wait for the operation to become active, and then process it
594 * *here*, in this thread, and not in the thread pool */
595 ret
= fscache_wait_for_operation_activation(
597 __fscache_stat(&fscache_n_retrieval_op_waits
),
598 __fscache_stat(&fscache_n_retrievals_object_dead
),
599 fscache_do_cancel_retrieval
);
603 /* ask the cache to honour the operation */
604 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET
, &object
->cookie
->flags
)) {
605 fscache_stat(&fscache_n_cop_allocate_pages
);
606 ret
= object
->cache
->ops
->allocate_pages(
607 op
, pages
, nr_pages
, gfp
);
608 fscache_stat_d(&fscache_n_cop_allocate_pages
);
610 fscache_stat(&fscache_n_cop_read_or_alloc_pages
);
611 ret
= object
->cache
->ops
->read_or_alloc_pages(
612 op
, pages
, nr_pages
, gfp
);
613 fscache_stat_d(&fscache_n_cop_read_or_alloc_pages
);
618 fscache_stat(&fscache_n_retrievals_nomem
);
619 else if (ret
== -ERESTARTSYS
)
620 fscache_stat(&fscache_n_retrievals_intr
);
621 else if (ret
== -ENODATA
)
622 fscache_stat(&fscache_n_retrievals_nodata
);
624 fscache_stat(&fscache_n_retrievals_nobufs
);
626 fscache_stat(&fscache_n_retrievals_ok
);
628 fscache_put_retrieval(op
);
629 _leave(" = %d", ret
);
633 atomic_dec(&object
->n_reads
);
634 wake_cookie
= __fscache_unuse_cookie(cookie
);
636 spin_unlock(&cookie
->lock
);
639 __fscache_wake_unused_cookie(cookie
);
641 fscache_stat(&fscache_n_retrievals_nobufs
);
642 _leave(" = -ENOBUFS");
645 EXPORT_SYMBOL(__fscache_read_or_alloc_pages
);
648 * allocate a block in the cache on which to store a page
650 * -ENOMEM - out of memory, nothing done
651 * -ERESTARTSYS - interrupted
652 * -ENOBUFS - no backing object available in which to cache the block
653 * 0 - block allocated
655 int __fscache_alloc_page(struct fscache_cookie
*cookie
,
659 struct fscache_retrieval
*op
;
660 struct fscache_object
*object
;
661 bool wake_cookie
= false;
664 _enter("%p,%p,,,", cookie
, page
);
666 fscache_stat(&fscache_n_allocs
);
668 if (hlist_empty(&cookie
->backing_objects
))
671 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
672 ASSERTCMP(page
, !=, NULL
);
674 if (test_bit(FSCACHE_COOKIE_INVALIDATING
, &cookie
->flags
)) {
675 _leave(" = -ENOBUFS [invalidating]");
679 if (fscache_wait_for_deferred_lookup(cookie
) < 0)
682 op
= fscache_alloc_retrieval(cookie
, page
->mapping
, NULL
, NULL
);
685 atomic_set(&op
->n_pages
, 1);
687 spin_lock(&cookie
->lock
);
689 if (!fscache_cookie_enabled(cookie
) ||
690 hlist_empty(&cookie
->backing_objects
))
692 object
= hlist_entry(cookie
->backing_objects
.first
,
693 struct fscache_object
, cookie_link
);
695 __fscache_use_cookie(cookie
);
696 if (fscache_submit_op(object
, &op
->op
) < 0)
697 goto nobufs_unlock_dec
;
698 spin_unlock(&cookie
->lock
);
700 fscache_stat(&fscache_n_alloc_ops
);
702 ret
= fscache_wait_for_operation_activation(
704 __fscache_stat(&fscache_n_alloc_op_waits
),
705 __fscache_stat(&fscache_n_allocs_object_dead
),
706 fscache_do_cancel_retrieval
);
710 /* ask the cache to honour the operation */
711 fscache_stat(&fscache_n_cop_allocate_page
);
712 ret
= object
->cache
->ops
->allocate_page(op
, page
, gfp
);
713 fscache_stat_d(&fscache_n_cop_allocate_page
);
716 if (ret
== -ERESTARTSYS
)
717 fscache_stat(&fscache_n_allocs_intr
);
719 fscache_stat(&fscache_n_allocs_nobufs
);
721 fscache_stat(&fscache_n_allocs_ok
);
723 fscache_put_retrieval(op
);
724 _leave(" = %d", ret
);
728 wake_cookie
= __fscache_unuse_cookie(cookie
);
730 spin_unlock(&cookie
->lock
);
733 __fscache_wake_unused_cookie(cookie
);
735 fscache_stat(&fscache_n_allocs_nobufs
);
736 _leave(" = -ENOBUFS");
739 EXPORT_SYMBOL(__fscache_alloc_page
);
742 * Unmark pages allocate in the readahead code path (via:
743 * fscache_readpages_or_alloc) after delegating to the base filesystem
745 void __fscache_readpages_cancel(struct fscache_cookie
*cookie
,
746 struct list_head
*pages
)
750 list_for_each_entry(page
, pages
, lru
) {
751 if (PageFsCache(page
))
752 __fscache_uncache_page(cookie
, page
);
755 EXPORT_SYMBOL(__fscache_readpages_cancel
);
758 * release a write op reference
760 static void fscache_release_write_op(struct fscache_operation
*_op
)
762 _enter("{OP%x}", _op
->debug_id
);
766 * perform the background storage of a page into the cache
768 static void fscache_write_op(struct fscache_operation
*_op
)
770 struct fscache_storage
*op
=
771 container_of(_op
, struct fscache_storage
, op
);
772 struct fscache_object
*object
= op
->op
.object
;
773 struct fscache_cookie
*cookie
;
779 _enter("{OP%x,%d}", op
->op
.debug_id
, atomic_read(&op
->op
.usage
));
781 spin_lock(&object
->lock
);
782 cookie
= object
->cookie
;
784 if (!fscache_object_is_active(object
)) {
785 /* If we get here, then the on-disk cache object likely longer
786 * exists, so we should just cancel this write operation.
788 spin_unlock(&object
->lock
);
789 fscache_op_complete(&op
->op
, false);
790 _leave(" [inactive]");
795 /* If we get here, then the cookie belonging to the object was
796 * detached, probably by the cookie being withdrawn due to
797 * memory pressure, which means that the pages we might write
798 * to the cache from no longer exist - therefore, we can just
799 * cancel this write operation.
801 spin_unlock(&object
->lock
);
802 fscache_op_complete(&op
->op
, false);
803 _leave(" [cancel] op{f=%lx s=%u} obj{s=%s f=%lx}",
804 _op
->flags
, _op
->state
, object
->state
->short_name
,
809 spin_lock(&cookie
->stores_lock
);
811 fscache_stat(&fscache_n_store_calls
);
813 /* find a page to store */
815 n
= radix_tree_gang_lookup_tag(&cookie
->stores
, results
, 0, 1,
816 FSCACHE_COOKIE_PENDING_TAG
);
820 _debug("gang %d [%lx]", n
, page
->index
);
821 if (page
->index
> op
->store_limit
) {
822 fscache_stat(&fscache_n_store_pages_over_limit
);
826 radix_tree_tag_set(&cookie
->stores
, page
->index
,
827 FSCACHE_COOKIE_STORING_TAG
);
828 radix_tree_tag_clear(&cookie
->stores
, page
->index
,
829 FSCACHE_COOKIE_PENDING_TAG
);
831 spin_unlock(&cookie
->stores_lock
);
832 spin_unlock(&object
->lock
);
834 fscache_stat(&fscache_n_store_pages
);
835 fscache_stat(&fscache_n_cop_write_page
);
836 ret
= object
->cache
->ops
->write_page(op
, page
);
837 fscache_stat_d(&fscache_n_cop_write_page
);
838 fscache_end_page_write(object
, page
);
840 fscache_abort_object(object
);
841 fscache_op_complete(&op
->op
, true);
843 fscache_enqueue_operation(&op
->op
);
850 /* this writer is going away and there aren't any more things to
853 spin_unlock(&cookie
->stores_lock
);
854 clear_bit(FSCACHE_OBJECT_PENDING_WRITE
, &object
->flags
);
855 spin_unlock(&object
->lock
);
856 fscache_op_complete(&op
->op
, true);
861 * Clear the pages pending writing for invalidation
863 void fscache_invalidate_writes(struct fscache_cookie
*cookie
)
872 spin_lock(&cookie
->stores_lock
);
873 n
= radix_tree_gang_lookup_tag(&cookie
->stores
, results
, 0,
875 FSCACHE_COOKIE_PENDING_TAG
);
877 spin_unlock(&cookie
->stores_lock
);
881 for (i
= n
- 1; i
>= 0; i
--) {
883 radix_tree_delete(&cookie
->stores
, page
->index
);
886 spin_unlock(&cookie
->stores_lock
);
888 for (i
= n
- 1; i
>= 0; i
--)
889 page_cache_release(results
[i
]);
896 * request a page be stored in the cache
898 * -ENOMEM - out of memory, nothing done
899 * -ENOBUFS - no backing object available in which to cache the page
900 * 0 - dispatched a write - it'll call end_io_func() when finished
902 * if the cookie still has a backing object at this point, that object can be
903 * in one of a few states with respect to storage processing:
905 * (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is
910 * (b) writes deferred till post-creation (mark page for writing and
911 * return immediately)
913 * (2) negative lookup, object created, initial fill being made from netfs
915 * (a) fill point not yet reached this page (mark page for writing and
918 * (b) fill point passed this page (queue op to store this page)
920 * (3) object extant (queue op to store this page)
922 * any other state is invalid
924 int __fscache_write_page(struct fscache_cookie
*cookie
,
928 struct fscache_storage
*op
;
929 struct fscache_object
*object
;
930 bool wake_cookie
= false;
933 _enter("%p,%x,", cookie
, (u32
) page
->flags
);
935 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
936 ASSERT(PageFsCache(page
));
938 fscache_stat(&fscache_n_stores
);
940 if (test_bit(FSCACHE_COOKIE_INVALIDATING
, &cookie
->flags
)) {
941 _leave(" = -ENOBUFS [invalidating]");
945 op
= kzalloc(sizeof(*op
), GFP_NOIO
| __GFP_NOMEMALLOC
| __GFP_NORETRY
);
949 fscache_operation_init(&op
->op
, fscache_write_op
,
950 fscache_release_write_op
);
951 op
->op
.flags
= FSCACHE_OP_ASYNC
|
952 (1 << FSCACHE_OP_WAITING
) |
953 (1 << FSCACHE_OP_UNUSE_COOKIE
);
955 ret
= radix_tree_maybe_preload(gfp
& ~__GFP_HIGHMEM
);
960 spin_lock(&cookie
->lock
);
962 if (!fscache_cookie_enabled(cookie
) ||
963 hlist_empty(&cookie
->backing_objects
))
965 object
= hlist_entry(cookie
->backing_objects
.first
,
966 struct fscache_object
, cookie_link
);
967 if (test_bit(FSCACHE_IOERROR
, &object
->cache
->flags
))
970 /* add the page to the pending-storage radix tree on the backing
972 spin_lock(&object
->lock
);
973 spin_lock(&cookie
->stores_lock
);
975 _debug("store limit %llx", (unsigned long long) object
->store_limit
);
977 ret
= radix_tree_insert(&cookie
->stores
, page
->index
, page
);
981 _debug("insert failed %d", ret
);
982 goto nobufs_unlock_obj
;
985 radix_tree_tag_set(&cookie
->stores
, page
->index
,
986 FSCACHE_COOKIE_PENDING_TAG
);
987 page_cache_get(page
);
989 /* we only want one writer at a time, but we do need to queue new
990 * writers after exclusive ops */
991 if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE
, &object
->flags
))
992 goto already_pending
;
994 spin_unlock(&cookie
->stores_lock
);
995 spin_unlock(&object
->lock
);
997 op
->op
.debug_id
= atomic_inc_return(&fscache_op_debug_id
);
998 op
->store_limit
= object
->store_limit
;
1000 __fscache_use_cookie(cookie
);
1001 if (fscache_submit_op(object
, &op
->op
) < 0)
1004 spin_unlock(&cookie
->lock
);
1005 radix_tree_preload_end();
1006 fscache_stat(&fscache_n_store_ops
);
1007 fscache_stat(&fscache_n_stores_ok
);
1009 /* the work queue now carries its own ref on the object */
1010 fscache_put_operation(&op
->op
);
1015 fscache_stat(&fscache_n_stores_again
);
1017 spin_unlock(&cookie
->stores_lock
);
1018 spin_unlock(&object
->lock
);
1019 spin_unlock(&cookie
->lock
);
1020 radix_tree_preload_end();
1022 fscache_stat(&fscache_n_stores_ok
);
1027 spin_lock(&cookie
->stores_lock
);
1028 radix_tree_delete(&cookie
->stores
, page
->index
);
1029 spin_unlock(&cookie
->stores_lock
);
1030 wake_cookie
= __fscache_unuse_cookie(cookie
);
1031 page_cache_release(page
);
1036 spin_unlock(&cookie
->stores_lock
);
1037 spin_unlock(&object
->lock
);
1039 spin_unlock(&cookie
->lock
);
1040 radix_tree_preload_end();
1043 __fscache_wake_unused_cookie(cookie
);
1044 fscache_stat(&fscache_n_stores_nobufs
);
1045 _leave(" = -ENOBUFS");
1051 fscache_stat(&fscache_n_stores_oom
);
1052 _leave(" = -ENOMEM");
1055 EXPORT_SYMBOL(__fscache_write_page
);
1058 * remove a page from the cache
1060 void __fscache_uncache_page(struct fscache_cookie
*cookie
, struct page
*page
)
1062 struct fscache_object
*object
;
1064 _enter(",%p", page
);
1066 ASSERTCMP(cookie
->def
->type
, !=, FSCACHE_COOKIE_TYPE_INDEX
);
1067 ASSERTCMP(page
, !=, NULL
);
1069 fscache_stat(&fscache_n_uncaches
);
1071 /* cache withdrawal may beat us to it */
1072 if (!PageFsCache(page
))
1075 /* get the object */
1076 spin_lock(&cookie
->lock
);
1078 if (hlist_empty(&cookie
->backing_objects
)) {
1079 ClearPageFsCache(page
);
1083 object
= hlist_entry(cookie
->backing_objects
.first
,
1084 struct fscache_object
, cookie_link
);
1086 /* there might now be stuff on disk we could read */
1087 clear_bit(FSCACHE_COOKIE_NO_DATA_YET
, &cookie
->flags
);
1089 /* only invoke the cache backend if we managed to mark the page
1090 * uncached here; this deals with synchronisation vs withdrawal */
1091 if (TestClearPageFsCache(page
) &&
1092 object
->cache
->ops
->uncache_page
) {
1093 /* the cache backend releases the cookie lock */
1094 fscache_stat(&fscache_n_cop_uncache_page
);
1095 object
->cache
->ops
->uncache_page(object
, page
);
1096 fscache_stat_d(&fscache_n_cop_uncache_page
);
1101 spin_unlock(&cookie
->lock
);
1105 EXPORT_SYMBOL(__fscache_uncache_page
);
1108 * fscache_mark_page_cached - Mark a page as being cached
1109 * @op: The retrieval op pages are being marked for
1110 * @page: The page to be marked
1112 * Mark a netfs page as being cached. After this is called, the netfs
1113 * must call fscache_uncache_page() to remove the mark.
1115 void fscache_mark_page_cached(struct fscache_retrieval
*op
, struct page
*page
)
1117 struct fscache_cookie
*cookie
= op
->op
.object
->cookie
;
1119 #ifdef CONFIG_FSCACHE_STATS
1120 atomic_inc(&fscache_n_marks
);
1123 _debug("- mark %p{%lx}", page
, page
->index
);
1124 if (TestSetPageFsCache(page
)) {
1125 static bool once_only
;
1128 pr_warn("Cookie type %s marked page %lx multiple times\n",
1129 cookie
->def
->name
, page
->index
);
1133 if (cookie
->def
->mark_page_cached
)
1134 cookie
->def
->mark_page_cached(cookie
->netfs_data
,
1137 EXPORT_SYMBOL(fscache_mark_page_cached
);
1140 * fscache_mark_pages_cached - Mark pages as being cached
1141 * @op: The retrieval op pages are being marked for
1142 * @pagevec: The pages to be marked
1144 * Mark a bunch of netfs pages as being cached. After this is called,
1145 * the netfs must call fscache_uncache_page() to remove the mark.
1147 void fscache_mark_pages_cached(struct fscache_retrieval
*op
,
1148 struct pagevec
*pagevec
)
1152 for (loop
= 0; loop
< pagevec
->nr
; loop
++)
1153 fscache_mark_page_cached(op
, pagevec
->pages
[loop
]);
1155 pagevec_reinit(pagevec
);
1157 EXPORT_SYMBOL(fscache_mark_pages_cached
);
1160 * Uncache all the pages in an inode that are marked PG_fscache, assuming them
1161 * to be associated with the given cookie.
1163 void __fscache_uncache_all_inode_pages(struct fscache_cookie
*cookie
,
1164 struct inode
*inode
)
1166 struct address_space
*mapping
= inode
->i_mapping
;
1167 struct pagevec pvec
;
1171 _enter("%p,%p", cookie
, inode
);
1173 if (!mapping
|| mapping
->nrpages
== 0) {
1174 _leave(" [no pages]");
1178 pagevec_init(&pvec
, 0);
1181 if (!pagevec_lookup(&pvec
, mapping
, next
, PAGEVEC_SIZE
))
1183 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
1184 struct page
*page
= pvec
.pages
[i
];
1186 if (PageFsCache(page
)) {
1187 __fscache_wait_on_page_write(cookie
, page
);
1188 __fscache_uncache_page(cookie
, page
);
1191 pagevec_release(&pvec
);
1197 EXPORT_SYMBOL(__fscache_uncache_all_inode_pages
);