]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/fscache/operation.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[mirror_ubuntu-artful-kernel.git] / fs / fscache / operation.c
CommitLineData
952efe7b
DH
1/* FS-Cache worker operation management routines
2 *
3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * See Documentation/filesystems/caching/operations.txt
12 */
13
14#define FSCACHE_DEBUG_LEVEL OPERATION
15#include <linux/module.h>
440f0aff 16#include <linux/seq_file.h>
5a0e3ad6 17#include <linux/slab.h>
952efe7b
DH
18#include "internal.h"
19
20atomic_t fscache_op_debug_id;
21EXPORT_SYMBOL(fscache_op_debug_id);
22
23/**
24 * fscache_enqueue_operation - Enqueue an operation for processing
25 * @op: The operation to enqueue
26 *
27 * Enqueue an operation for processing by the FS-Cache thread pool.
28 *
29 * This will get its own ref on the object.
30 */
31void fscache_enqueue_operation(struct fscache_operation *op)
32{
33 _enter("{OBJ%x OP%x,%u}",
34 op->object->debug_id, op->debug_id, atomic_read(&op->usage));
35
440f0aff
DH
36 fscache_set_op_state(op, "EnQ");
37
5753c441 38 ASSERT(list_empty(&op->pend_link));
952efe7b
DH
39 ASSERT(op->processor != NULL);
40 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
41 ASSERTCMP(atomic_read(&op->usage), >, 0);
42
5753c441
DH
43 fscache_stat(&fscache_n_op_enqueue);
44 switch (op->flags & FSCACHE_OP_TYPE) {
45 case FSCACHE_OP_FAST:
46 _debug("queue fast");
47 atomic_inc(&op->usage);
48 if (!schedule_work(&op->fast_work))
49 fscache_put_operation(op);
50 break;
51 case FSCACHE_OP_SLOW:
52 _debug("queue slow");
53 slow_work_enqueue(&op->slow_work);
54 break;
55 case FSCACHE_OP_MYTHREAD:
56 _debug("queue for caller's attention");
57 break;
58 default:
59 printk(KERN_ERR "FS-Cache: Unexpected op type %lx",
60 op->flags);
61 BUG();
62 break;
952efe7b
DH
63 }
64}
65EXPORT_SYMBOL(fscache_enqueue_operation);
66
67/*
68 * start an op running
69 */
70static void fscache_run_op(struct fscache_object *object,
71 struct fscache_operation *op)
72{
440f0aff
DH
73 fscache_set_op_state(op, "Run");
74
952efe7b
DH
75 object->n_in_progress++;
76 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
77 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
78 if (op->processor)
79 fscache_enqueue_operation(op);
80 fscache_stat(&fscache_n_op_run);
81}
82
83/*
84 * submit an exclusive operation for an object
85 * - other ops are excluded from running simultaneously with this one
86 * - this gets any extra refs it needs on an op
87 */
88int fscache_submit_exclusive_op(struct fscache_object *object,
89 struct fscache_operation *op)
90{
91 int ret;
92
93 _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id);
94
440f0aff
DH
95 fscache_set_op_state(op, "SubmitX");
96
952efe7b
DH
97 spin_lock(&object->lock);
98 ASSERTCMP(object->n_ops, >=, object->n_in_progress);
99 ASSERTCMP(object->n_ops, >=, object->n_exclusive);
5753c441 100 ASSERT(list_empty(&op->pend_link));
952efe7b
DH
101
102 ret = -ENOBUFS;
103 if (fscache_object_is_active(object)) {
104 op->object = object;
105 object->n_ops++;
106 object->n_exclusive++; /* reads and writes must wait */
107
108 if (object->n_ops > 0) {
109 atomic_inc(&op->usage);
110 list_add_tail(&op->pend_link, &object->pending_ops);
111 fscache_stat(&fscache_n_op_pend);
112 } else if (!list_empty(&object->pending_ops)) {
113 atomic_inc(&op->usage);
114 list_add_tail(&op->pend_link, &object->pending_ops);
115 fscache_stat(&fscache_n_op_pend);
116 fscache_start_operations(object);
117 } else {
118 ASSERTCMP(object->n_in_progress, ==, 0);
119 fscache_run_op(object, op);
120 }
121
122 /* need to issue a new write op after this */
123 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
124 ret = 0;
125 } else if (object->state == FSCACHE_OBJECT_CREATING) {
126 op->object = object;
127 object->n_ops++;
128 object->n_exclusive++; /* reads and writes must wait */
129 atomic_inc(&op->usage);
130 list_add_tail(&op->pend_link, &object->pending_ops);
131 fscache_stat(&fscache_n_op_pend);
132 ret = 0;
133 } else {
134 /* not allowed to submit ops in any other state */
135 BUG();
136 }
137
138 spin_unlock(&object->lock);
139 return ret;
140}
141
142/*
143 * report an unexpected submission
144 */
145static void fscache_report_unexpected_submission(struct fscache_object *object,
146 struct fscache_operation *op,
147 unsigned long ostate)
148{
149 static bool once_only;
150 struct fscache_operation *p;
151 unsigned n;
152
153 if (once_only)
154 return;
155 once_only = true;
156
157 kdebug("unexpected submission OP%x [OBJ%x %s]",
158 op->debug_id, object->debug_id,
159 fscache_object_states[object->state]);
160 kdebug("objstate=%s [%s]",
161 fscache_object_states[object->state],
162 fscache_object_states[ostate]);
163 kdebug("objflags=%lx", object->flags);
164 kdebug("objevent=%lx [%lx]", object->events, object->event_mask);
165 kdebug("ops=%u inp=%u exc=%u",
166 object->n_ops, object->n_in_progress, object->n_exclusive);
167
168 if (!list_empty(&object->pending_ops)) {
169 n = 0;
170 list_for_each_entry(p, &object->pending_ops, pend_link) {
171 ASSERTCMP(p->object, ==, object);
172 kdebug("%p %p", op->processor, op->release);
173 n++;
174 }
175
176 kdebug("n=%u", n);
177 }
178
179 dump_stack();
180}
181
182/*
183 * submit an operation for an object
184 * - objects may be submitted only in the following states:
185 * - during object creation (write ops may be submitted)
186 * - whilst the object is active
187 * - after an I/O error incurred in one of the two above states (op rejected)
188 * - this gets any extra refs it needs on an op
189 */
190int fscache_submit_op(struct fscache_object *object,
191 struct fscache_operation *op)
192{
193 unsigned long ostate;
194 int ret;
195
196 _enter("{OBJ%x OP%x},{%u}",
197 object->debug_id, op->debug_id, atomic_read(&op->usage));
198
199 ASSERTCMP(atomic_read(&op->usage), >, 0);
200
440f0aff
DH
201 fscache_set_op_state(op, "Submit");
202
952efe7b
DH
203 spin_lock(&object->lock);
204 ASSERTCMP(object->n_ops, >=, object->n_in_progress);
205 ASSERTCMP(object->n_ops, >=, object->n_exclusive);
5753c441 206 ASSERT(list_empty(&op->pend_link));
952efe7b
DH
207
208 ostate = object->state;
209 smp_rmb();
210
211 if (fscache_object_is_active(object)) {
212 op->object = object;
213 object->n_ops++;
214
215 if (object->n_exclusive > 0) {
216 atomic_inc(&op->usage);
217 list_add_tail(&op->pend_link, &object->pending_ops);
218 fscache_stat(&fscache_n_op_pend);
219 } else if (!list_empty(&object->pending_ops)) {
220 atomic_inc(&op->usage);
221 list_add_tail(&op->pend_link, &object->pending_ops);
222 fscache_stat(&fscache_n_op_pend);
223 fscache_start_operations(object);
224 } else {
225 ASSERTCMP(object->n_exclusive, ==, 0);
226 fscache_run_op(object, op);
227 }
228 ret = 0;
229 } else if (object->state == FSCACHE_OBJECT_CREATING) {
230 op->object = object;
231 object->n_ops++;
232 atomic_inc(&op->usage);
233 list_add_tail(&op->pend_link, &object->pending_ops);
234 fscache_stat(&fscache_n_op_pend);
235 ret = 0;
e3d4d28b
DH
236 } else if (object->state == FSCACHE_OBJECT_DYING ||
237 object->state == FSCACHE_OBJECT_LC_DYING ||
238 object->state == FSCACHE_OBJECT_WITHDRAWING) {
239 fscache_stat(&fscache_n_op_rejected);
240 ret = -ENOBUFS;
952efe7b
DH
241 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
242 fscache_report_unexpected_submission(object, op, ostate);
243 ASSERT(!fscache_object_is_active(object));
244 ret = -ENOBUFS;
245 } else {
246 ret = -ENOBUFS;
247 }
248
249 spin_unlock(&object->lock);
250 return ret;
251}
252
253/*
254 * queue an object for withdrawal on error, aborting all following asynchronous
255 * operations
256 */
257void fscache_abort_object(struct fscache_object *object)
258{
259 _enter("{OBJ%x}", object->debug_id);
260
261 fscache_raise_event(object, FSCACHE_OBJECT_EV_ERROR);
262}
263
264/*
265 * jump start the operation processing on an object
266 * - caller must hold object->lock
267 */
268void fscache_start_operations(struct fscache_object *object)
269{
270 struct fscache_operation *op;
271 bool stop = false;
272
273 while (!list_empty(&object->pending_ops) && !stop) {
274 op = list_entry(object->pending_ops.next,
275 struct fscache_operation, pend_link);
276
277 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) {
278 if (object->n_in_progress > 0)
279 break;
280 stop = true;
281 }
282 list_del_init(&op->pend_link);
5753c441 283 fscache_run_op(object, op);
952efe7b
DH
284
285 /* the pending queue was holding a ref on the object */
286 fscache_put_operation(op);
287 }
288
289 ASSERTCMP(object->n_in_progress, <=, object->n_ops);
290
291 _debug("woke %d ops on OBJ%x",
292 object->n_in_progress, object->debug_id);
293}
294
5753c441
DH
295/*
296 * cancel an operation that's pending on an object
297 */
298int fscache_cancel_op(struct fscache_operation *op)
299{
300 struct fscache_object *object = op->object;
301 int ret;
302
303 _enter("OBJ%x OP%x}", op->object->debug_id, op->debug_id);
304
305 spin_lock(&object->lock);
306
307 ret = -EBUSY;
308 if (!list_empty(&op->pend_link)) {
309 fscache_stat(&fscache_n_op_cancelled);
310 list_del_init(&op->pend_link);
311 object->n_ops--;
312 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
313 object->n_exclusive--;
314 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
315 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
316 fscache_put_operation(op);
317 ret = 0;
318 }
319
320 spin_unlock(&object->lock);
321 _leave(" = %d", ret);
322 return ret;
323}
324
952efe7b
DH
325/*
326 * release an operation
327 * - queues pending ops if this is the last in-progress op
328 */
329void fscache_put_operation(struct fscache_operation *op)
330{
331 struct fscache_object *object;
332 struct fscache_cache *cache;
333
334 _enter("{OBJ%x OP%x,%d}",
335 op->object->debug_id, op->debug_id, atomic_read(&op->usage));
336
337 ASSERTCMP(atomic_read(&op->usage), >, 0);
338
339 if (!atomic_dec_and_test(&op->usage))
340 return;
341
440f0aff
DH
342 fscache_set_op_state(op, "Put");
343
952efe7b
DH
344 _debug("PUT OP");
345 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
346 BUG();
347
348 fscache_stat(&fscache_n_op_release);
349
350 if (op->release) {
351 op->release(op);
352 op->release = NULL;
353 }
354
355 object = op->object;
356
4fbf4291
DH
357 if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags))
358 atomic_dec(&object->n_reads);
359
952efe7b
DH
360 /* now... we may get called with the object spinlock held, so we
361 * complete the cleanup here only if we can immediately acquire the
362 * lock, and defer it otherwise */
363 if (!spin_trylock(&object->lock)) {
364 _debug("defer put");
365 fscache_stat(&fscache_n_op_deferred_release);
366
367 cache = object->cache;
368 spin_lock(&cache->op_gc_list_lock);
369 list_add_tail(&op->pend_link, &cache->op_gc_list);
370 spin_unlock(&cache->op_gc_list_lock);
371 schedule_work(&cache->op_gc);
372 _leave(" [defer]");
373 return;
374 }
375
376 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) {
377 ASSERTCMP(object->n_exclusive, >, 0);
378 object->n_exclusive--;
379 }
380
381 ASSERTCMP(object->n_in_progress, >, 0);
382 object->n_in_progress--;
383 if (object->n_in_progress == 0)
384 fscache_start_operations(object);
385
386 ASSERTCMP(object->n_ops, >, 0);
387 object->n_ops--;
388 if (object->n_ops == 0)
389 fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
390
391 spin_unlock(&object->lock);
392
393 kfree(op);
394 _leave(" [done]");
395}
396EXPORT_SYMBOL(fscache_put_operation);
397
398/*
399 * garbage collect operations that have had their release deferred
400 */
401void fscache_operation_gc(struct work_struct *work)
402{
403 struct fscache_operation *op;
404 struct fscache_object *object;
405 struct fscache_cache *cache =
406 container_of(work, struct fscache_cache, op_gc);
407 int count = 0;
408
409 _enter("");
410
411 do {
412 spin_lock(&cache->op_gc_list_lock);
413 if (list_empty(&cache->op_gc_list)) {
414 spin_unlock(&cache->op_gc_list_lock);
415 break;
416 }
417
418 op = list_entry(cache->op_gc_list.next,
419 struct fscache_operation, pend_link);
420 list_del(&op->pend_link);
421 spin_unlock(&cache->op_gc_list_lock);
422
423 object = op->object;
424
425 _debug("GC DEFERRED REL OBJ%x OP%x",
426 object->debug_id, op->debug_id);
427 fscache_stat(&fscache_n_op_gc);
428
429 ASSERTCMP(atomic_read(&op->usage), ==, 0);
430
431 spin_lock(&object->lock);
432 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) {
433 ASSERTCMP(object->n_exclusive, >, 0);
434 object->n_exclusive--;
435 }
436
437 ASSERTCMP(object->n_in_progress, >, 0);
438 object->n_in_progress--;
439 if (object->n_in_progress == 0)
440 fscache_start_operations(object);
441
442 ASSERTCMP(object->n_ops, >, 0);
443 object->n_ops--;
444 if (object->n_ops == 0)
445 fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
446
447 spin_unlock(&object->lock);
448
449 } while (count++ < 20);
450
451 if (!list_empty(&cache->op_gc_list))
452 schedule_work(&cache->op_gc);
453
454 _leave("");
455}
456
457/*
458 * allow the slow work item processor to get a ref on an operation
459 */
460static int fscache_op_get_ref(struct slow_work *work)
461{
462 struct fscache_operation *op =
463 container_of(work, struct fscache_operation, slow_work);
464
465 atomic_inc(&op->usage);
466 return 0;
467}
468
469/*
470 * allow the slow work item processor to discard a ref on an operation
471 */
472static void fscache_op_put_ref(struct slow_work *work)
473{
474 struct fscache_operation *op =
475 container_of(work, struct fscache_operation, slow_work);
476
477 fscache_put_operation(op);
478}
479
480/*
481 * execute an operation using the slow thread pool to provide processing context
482 * - the caller holds a ref to this object, so we don't need to hold one
483 */
484static void fscache_op_execute(struct slow_work *work)
485{
486 struct fscache_operation *op =
487 container_of(work, struct fscache_operation, slow_work);
488 unsigned long start;
489
490 _enter("{OBJ%x OP%x,%d}",
491 op->object->debug_id, op->debug_id, atomic_read(&op->usage));
492
493 ASSERT(op->processor != NULL);
494 start = jiffies;
495 op->processor(op);
496 fscache_hist(fscache_ops_histogram, start);
497
498 _leave("");
499}
500
440f0aff
DH
501/*
502 * describe an operation for slow-work debugging
503 */
a53f4f9e 504#ifdef CONFIG_SLOW_WORK_DEBUG
440f0aff
DH
505static void fscache_op_desc(struct slow_work *work, struct seq_file *m)
506{
507 struct fscache_operation *op =
508 container_of(work, struct fscache_operation, slow_work);
509
510 seq_printf(m, "FSC: OBJ%x OP%x: %s/%s fl=%lx",
511 op->object->debug_id, op->debug_id,
512 op->name, op->state, op->flags);
513}
514#endif
515
952efe7b 516const struct slow_work_ops fscache_op_slow_work_ops = {
3d7a641e 517 .owner = THIS_MODULE,
952efe7b
DH
518 .get_ref = fscache_op_get_ref,
519 .put_ref = fscache_op_put_ref,
520 .execute = fscache_op_execute,
a53f4f9e 521#ifdef CONFIG_SLOW_WORK_DEBUG
440f0aff
DH
522 .desc = fscache_op_desc,
523#endif
952efe7b 524};