1 /* FS-Cache worker operation management routines
3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 * See Documentation/filesystems/caching/operations.txt
14 #define FSCACHE_DEBUG_LEVEL OPERATION
15 #include <linux/module.h>
16 #include <linux/seq_file.h>
17 #include <linux/slab.h>
20 atomic_t fscache_op_debug_id
;
21 EXPORT_SYMBOL(fscache_op_debug_id
);
24 * fscache_enqueue_operation - Enqueue an operation for processing
25 * @op: The operation to enqueue
27 * Enqueue an operation for processing by the FS-Cache thread pool.
29 * This will get its own ref on the object.
31 void fscache_enqueue_operation(struct fscache_operation
*op
)
33 _enter("{OBJ%x OP%x,%u}",
34 op
->object
->debug_id
, op
->debug_id
, atomic_read(&op
->usage
));
36 ASSERT(list_empty(&op
->pend_link
));
37 ASSERT(op
->processor
!= NULL
);
38 ASSERT(fscache_object_is_available(op
->object
));
39 ASSERTCMP(atomic_read(&op
->usage
), >, 0);
40 ASSERTCMP(op
->state
, ==, FSCACHE_OP_ST_IN_PROGRESS
);
42 fscache_stat(&fscache_n_op_enqueue
);
43 switch (op
->flags
& FSCACHE_OP_TYPE
) {
44 case FSCACHE_OP_ASYNC
:
45 _debug("queue async");
46 atomic_inc(&op
->usage
);
47 if (!queue_work(fscache_op_wq
, &op
->work
))
48 fscache_put_operation(op
);
50 case FSCACHE_OP_MYTHREAD
:
51 _debug("queue for caller's attention");
54 pr_err("Unexpected op type %lx", op
->flags
);
59 EXPORT_SYMBOL(fscache_enqueue_operation
);
64 static void fscache_run_op(struct fscache_object
*object
,
65 struct fscache_operation
*op
)
67 ASSERTCMP(op
->state
, ==, FSCACHE_OP_ST_PENDING
);
69 op
->state
= FSCACHE_OP_ST_IN_PROGRESS
;
70 object
->n_in_progress
++;
71 if (test_and_clear_bit(FSCACHE_OP_WAITING
, &op
->flags
))
72 wake_up_bit(&op
->flags
, FSCACHE_OP_WAITING
);
74 fscache_enqueue_operation(op
);
75 fscache_stat(&fscache_n_op_run
);
79 * report an unexpected submission
81 static void fscache_report_unexpected_submission(struct fscache_object
*object
,
82 struct fscache_operation
*op
,
83 const struct fscache_state
*ostate
)
85 static bool once_only
;
86 struct fscache_operation
*p
;
93 kdebug("unexpected submission OP%x [OBJ%x %s]",
94 op
->debug_id
, object
->debug_id
, object
->state
->name
);
95 kdebug("objstate=%s [%s]", object
->state
->name
, ostate
->name
);
96 kdebug("objflags=%lx", object
->flags
);
97 kdebug("objevent=%lx [%lx]", object
->events
, object
->event_mask
);
98 kdebug("ops=%u inp=%u exc=%u",
99 object
->n_ops
, object
->n_in_progress
, object
->n_exclusive
);
101 if (!list_empty(&object
->pending_ops
)) {
103 list_for_each_entry(p
, &object
->pending_ops
, pend_link
) {
104 ASSERTCMP(p
->object
, ==, object
);
105 kdebug("%p %p", op
->processor
, op
->release
);
116 * submit an exclusive operation for an object
117 * - other ops are excluded from running simultaneously with this one
118 * - this gets any extra refs it needs on an op
120 int fscache_submit_exclusive_op(struct fscache_object
*object
,
121 struct fscache_operation
*op
)
123 const struct fscache_state
*ostate
;
127 _enter("{OBJ%x OP%x},", object
->debug_id
, op
->debug_id
);
129 ASSERTCMP(op
->state
, ==, FSCACHE_OP_ST_INITIALISED
);
130 ASSERTCMP(atomic_read(&op
->usage
), >, 0);
132 spin_lock(&object
->lock
);
133 ASSERTCMP(object
->n_ops
, >=, object
->n_in_progress
);
134 ASSERTCMP(object
->n_ops
, >=, object
->n_exclusive
);
135 ASSERT(list_empty(&op
->pend_link
));
137 ostate
= object
->state
;
140 op
->state
= FSCACHE_OP_ST_PENDING
;
141 flags
= READ_ONCE(object
->flags
);
142 if (unlikely(!(flags
& BIT(FSCACHE_OBJECT_IS_LIVE
)))) {
143 fscache_stat(&fscache_n_op_rejected
);
144 op
->state
= FSCACHE_OP_ST_CANCELLED
;
146 } else if (unlikely(fscache_cache_is_broken(object
))) {
147 op
->state
= FSCACHE_OP_ST_CANCELLED
;
149 } else if (flags
& BIT(FSCACHE_OBJECT_IS_AVAILABLE
)) {
152 object
->n_exclusive
++; /* reads and writes must wait */
154 if (object
->n_in_progress
> 0) {
155 atomic_inc(&op
->usage
);
156 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
157 fscache_stat(&fscache_n_op_pend
);
158 } else if (!list_empty(&object
->pending_ops
)) {
159 atomic_inc(&op
->usage
);
160 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
161 fscache_stat(&fscache_n_op_pend
);
162 fscache_start_operations(object
);
164 ASSERTCMP(object
->n_in_progress
, ==, 0);
165 fscache_run_op(object
, op
);
168 /* need to issue a new write op after this */
169 clear_bit(FSCACHE_OBJECT_PENDING_WRITE
, &object
->flags
);
171 } else if (flags
& BIT(FSCACHE_OBJECT_IS_LOOKED_UP
)) {
174 object
->n_exclusive
++; /* reads and writes must wait */
175 atomic_inc(&op
->usage
);
176 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
177 fscache_stat(&fscache_n_op_pend
);
179 } else if (flags
& BIT(FSCACHE_OBJECT_KILLED_BY_CACHE
)) {
180 op
->state
= FSCACHE_OP_ST_CANCELLED
;
183 fscache_report_unexpected_submission(object
, op
, ostate
);
184 op
->state
= FSCACHE_OP_ST_CANCELLED
;
188 spin_unlock(&object
->lock
);
193 * submit an operation for an object
194 * - objects may be submitted only in the following states:
195 * - during object creation (write ops may be submitted)
196 * - whilst the object is active
197 * - after an I/O error incurred in one of the two above states (op rejected)
198 * - this gets any extra refs it needs on an op
200 int fscache_submit_op(struct fscache_object
*object
,
201 struct fscache_operation
*op
)
203 const struct fscache_state
*ostate
;
207 _enter("{OBJ%x OP%x},{%u}",
208 object
->debug_id
, op
->debug_id
, atomic_read(&op
->usage
));
210 ASSERTCMP(op
->state
, ==, FSCACHE_OP_ST_INITIALISED
);
211 ASSERTCMP(atomic_read(&op
->usage
), >, 0);
213 spin_lock(&object
->lock
);
214 ASSERTCMP(object
->n_ops
, >=, object
->n_in_progress
);
215 ASSERTCMP(object
->n_ops
, >=, object
->n_exclusive
);
216 ASSERT(list_empty(&op
->pend_link
));
218 ostate
= object
->state
;
221 op
->state
= FSCACHE_OP_ST_PENDING
;
222 flags
= READ_ONCE(object
->flags
);
223 if (unlikely(!(flags
& BIT(FSCACHE_OBJECT_IS_LIVE
)))) {
224 fscache_stat(&fscache_n_op_rejected
);
225 op
->state
= FSCACHE_OP_ST_CANCELLED
;
227 } else if (unlikely(fscache_cache_is_broken(object
))) {
228 op
->state
= FSCACHE_OP_ST_CANCELLED
;
230 } else if (flags
& BIT(FSCACHE_OBJECT_IS_AVAILABLE
)) {
234 if (object
->n_exclusive
> 0) {
235 atomic_inc(&op
->usage
);
236 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
237 fscache_stat(&fscache_n_op_pend
);
238 } else if (!list_empty(&object
->pending_ops
)) {
239 atomic_inc(&op
->usage
);
240 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
241 fscache_stat(&fscache_n_op_pend
);
242 fscache_start_operations(object
);
244 ASSERTCMP(object
->n_exclusive
, ==, 0);
245 fscache_run_op(object
, op
);
248 } else if (flags
& BIT(FSCACHE_OBJECT_IS_LOOKED_UP
)) {
251 atomic_inc(&op
->usage
);
252 list_add_tail(&op
->pend_link
, &object
->pending_ops
);
253 fscache_stat(&fscache_n_op_pend
);
255 } else if (flags
& BIT(FSCACHE_OBJECT_KILLED_BY_CACHE
)) {
256 op
->state
= FSCACHE_OP_ST_CANCELLED
;
259 fscache_report_unexpected_submission(object
, op
, ostate
);
260 ASSERT(!fscache_object_is_active(object
));
261 op
->state
= FSCACHE_OP_ST_CANCELLED
;
265 spin_unlock(&object
->lock
);
270 * queue an object for withdrawal on error, aborting all following asynchronous
273 void fscache_abort_object(struct fscache_object
*object
)
275 _enter("{OBJ%x}", object
->debug_id
);
277 fscache_raise_event(object
, FSCACHE_OBJECT_EV_ERROR
);
281 * Jump start the operation processing on an object. The caller must hold
284 void fscache_start_operations(struct fscache_object
*object
)
286 struct fscache_operation
*op
;
289 while (!list_empty(&object
->pending_ops
) && !stop
) {
290 op
= list_entry(object
->pending_ops
.next
,
291 struct fscache_operation
, pend_link
);
293 if (test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
)) {
294 if (object
->n_in_progress
> 0)
298 list_del_init(&op
->pend_link
);
299 fscache_run_op(object
, op
);
301 /* the pending queue was holding a ref on the object */
302 fscache_put_operation(op
);
305 ASSERTCMP(object
->n_in_progress
, <=, object
->n_ops
);
307 _debug("woke %d ops on OBJ%x",
308 object
->n_in_progress
, object
->debug_id
);
312 * cancel an operation that's pending on an object
314 int fscache_cancel_op(struct fscache_operation
*op
,
315 void (*do_cancel
)(struct fscache_operation
*),
316 bool cancel_in_progress_op
)
318 struct fscache_object
*object
= op
->object
;
322 _enter("OBJ%x OP%x}", op
->object
->debug_id
, op
->debug_id
);
324 ASSERTCMP(op
->state
, >=, FSCACHE_OP_ST_PENDING
);
325 ASSERTCMP(op
->state
, !=, FSCACHE_OP_ST_CANCELLED
);
326 ASSERTCMP(atomic_read(&op
->usage
), >, 0);
328 spin_lock(&object
->lock
);
331 if (op
->state
== FSCACHE_OP_ST_PENDING
) {
332 ASSERT(!list_empty(&op
->pend_link
));
333 list_del_init(&op
->pend_link
);
335 fscache_stat(&fscache_n_op_cancelled
);
338 op
->state
= FSCACHE_OP_ST_CANCELLED
;
339 if (test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
))
340 object
->n_exclusive
--;
341 if (test_and_clear_bit(FSCACHE_OP_WAITING
, &op
->flags
))
342 wake_up_bit(&op
->flags
, FSCACHE_OP_WAITING
);
344 } else if (op
->state
== FSCACHE_OP_ST_IN_PROGRESS
&& cancel_in_progress_op
) {
345 fscache_stat(&fscache_n_op_cancelled
);
348 op
->state
= FSCACHE_OP_ST_CANCELLED
;
349 if (test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
))
350 object
->n_exclusive
--;
351 if (test_and_clear_bit(FSCACHE_OP_WAITING
, &op
->flags
))
352 wake_up_bit(&op
->flags
, FSCACHE_OP_WAITING
);
357 fscache_put_operation(op
);
358 spin_unlock(&object
->lock
);
359 _leave(" = %d", ret
);
364 * Cancel all pending operations on an object
366 void fscache_cancel_all_ops(struct fscache_object
*object
)
368 struct fscache_operation
*op
;
370 _enter("OBJ%x", object
->debug_id
);
372 spin_lock(&object
->lock
);
374 while (!list_empty(&object
->pending_ops
)) {
375 op
= list_entry(object
->pending_ops
.next
,
376 struct fscache_operation
, pend_link
);
377 fscache_stat(&fscache_n_op_cancelled
);
378 list_del_init(&op
->pend_link
);
380 ASSERTCMP(op
->state
, ==, FSCACHE_OP_ST_PENDING
);
381 op
->state
= FSCACHE_OP_ST_CANCELLED
;
383 if (test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
))
384 object
->n_exclusive
--;
385 if (test_and_clear_bit(FSCACHE_OP_WAITING
, &op
->flags
))
386 wake_up_bit(&op
->flags
, FSCACHE_OP_WAITING
);
387 fscache_put_operation(op
);
388 cond_resched_lock(&object
->lock
);
391 spin_unlock(&object
->lock
);
396 * Record the completion or cancellation of an in-progress operation.
398 void fscache_op_complete(struct fscache_operation
*op
, bool cancelled
)
400 struct fscache_object
*object
= op
->object
;
402 _enter("OBJ%x", object
->debug_id
);
404 ASSERTCMP(op
->state
, ==, FSCACHE_OP_ST_IN_PROGRESS
);
405 ASSERTCMP(object
->n_in_progress
, >, 0);
406 ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
),
407 object
->n_exclusive
, >, 0);
408 ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
),
409 object
->n_in_progress
, ==, 1);
411 spin_lock(&object
->lock
);
413 op
->state
= cancelled
?
414 FSCACHE_OP_ST_CANCELLED
: FSCACHE_OP_ST_COMPLETE
;
416 if (test_bit(FSCACHE_OP_EXCLUSIVE
, &op
->flags
))
417 object
->n_exclusive
--;
418 object
->n_in_progress
--;
419 if (object
->n_in_progress
== 0)
420 fscache_start_operations(object
);
422 spin_unlock(&object
->lock
);
425 EXPORT_SYMBOL(fscache_op_complete
);
428 * release an operation
429 * - queues pending ops if this is the last in-progress op
431 void fscache_put_operation(struct fscache_operation
*op
)
433 struct fscache_object
*object
;
434 struct fscache_cache
*cache
;
436 _enter("{OBJ%x OP%x,%d}",
437 op
->object
->debug_id
, op
->debug_id
, atomic_read(&op
->usage
));
439 ASSERTCMP(atomic_read(&op
->usage
), >, 0);
441 if (!atomic_dec_and_test(&op
->usage
))
445 ASSERTIFCMP(op
->state
!= FSCACHE_OP_ST_COMPLETE
,
446 op
->state
, ==, FSCACHE_OP_ST_CANCELLED
);
447 op
->state
= FSCACHE_OP_ST_DEAD
;
449 fscache_stat(&fscache_n_op_release
);
458 if (test_bit(FSCACHE_OP_DEC_READ_CNT
, &op
->flags
))
459 atomic_dec(&object
->n_reads
);
460 if (test_bit(FSCACHE_OP_UNUSE_COOKIE
, &op
->flags
))
461 fscache_unuse_cookie(object
);
463 /* now... we may get called with the object spinlock held, so we
464 * complete the cleanup here only if we can immediately acquire the
465 * lock, and defer it otherwise */
466 if (!spin_trylock(&object
->lock
)) {
468 fscache_stat(&fscache_n_op_deferred_release
);
470 cache
= object
->cache
;
471 spin_lock(&cache
->op_gc_list_lock
);
472 list_add_tail(&op
->pend_link
, &cache
->op_gc_list
);
473 spin_unlock(&cache
->op_gc_list_lock
);
474 schedule_work(&cache
->op_gc
);
479 ASSERTCMP(object
->n_ops
, >, 0);
481 if (object
->n_ops
== 0)
482 fscache_raise_event(object
, FSCACHE_OBJECT_EV_CLEARED
);
484 spin_unlock(&object
->lock
);
489 EXPORT_SYMBOL(fscache_put_operation
);
492 * garbage collect operations that have had their release deferred
494 void fscache_operation_gc(struct work_struct
*work
)
496 struct fscache_operation
*op
;
497 struct fscache_object
*object
;
498 struct fscache_cache
*cache
=
499 container_of(work
, struct fscache_cache
, op_gc
);
505 spin_lock(&cache
->op_gc_list_lock
);
506 if (list_empty(&cache
->op_gc_list
)) {
507 spin_unlock(&cache
->op_gc_list_lock
);
511 op
= list_entry(cache
->op_gc_list
.next
,
512 struct fscache_operation
, pend_link
);
513 list_del(&op
->pend_link
);
514 spin_unlock(&cache
->op_gc_list_lock
);
517 spin_lock(&object
->lock
);
519 _debug("GC DEFERRED REL OBJ%x OP%x",
520 object
->debug_id
, op
->debug_id
);
521 fscache_stat(&fscache_n_op_gc
);
523 ASSERTCMP(atomic_read(&op
->usage
), ==, 0);
524 ASSERTCMP(op
->state
, ==, FSCACHE_OP_ST_DEAD
);
526 ASSERTCMP(object
->n_ops
, >, 0);
528 if (object
->n_ops
== 0)
529 fscache_raise_event(object
, FSCACHE_OBJECT_EV_CLEARED
);
531 spin_unlock(&object
->lock
);
534 } while (count
++ < 20);
536 if (!list_empty(&cache
->op_gc_list
))
537 schedule_work(&cache
->op_gc
);
543 * execute an operation using fs_op_wq to provide processing context -
544 * the caller holds a ref to this object, so we don't need to hold one
546 void fscache_op_work_func(struct work_struct
*work
)
548 struct fscache_operation
*op
=
549 container_of(work
, struct fscache_operation
, work
);
552 _enter("{OBJ%x OP%x,%d}",
553 op
->object
->debug_id
, op
->debug_id
, atomic_read(&op
->usage
));
555 ASSERT(op
->processor
!= NULL
);
558 fscache_hist(fscache_ops_histogram
, start
);
559 fscache_put_operation(op
);