]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/fscache/operation.c
UBUNTU: [Config] arm64: snapdragon: DRM_MSM=m
[mirror_ubuntu-bionic-kernel.git] / fs / fscache / operation.c
1 /* FS-Cache worker operation management routines
2 *
3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * See Documentation/filesystems/caching/operations.txt
12 */
13
14 #define FSCACHE_DEBUG_LEVEL OPERATION
15 #include <linux/module.h>
16 #include <linux/seq_file.h>
17 #include <linux/slab.h>
18 #include "internal.h"
19
20 atomic_t fscache_op_debug_id;
21 EXPORT_SYMBOL(fscache_op_debug_id);
22
23 static void fscache_operation_dummy_cancel(struct fscache_operation *op)
24 {
25 }
26
27 /**
28 * fscache_operation_init - Do basic initialisation of an operation
29 * @op: The operation to initialise
30 * @release: The release function to assign
31 *
32 * Do basic initialisation of an operation. The caller must still set flags,
33 * object and processor if needed.
34 */
35 void fscache_operation_init(struct fscache_operation *op,
36 fscache_operation_processor_t processor,
37 fscache_operation_cancel_t cancel,
38 fscache_operation_release_t release)
39 {
40 INIT_WORK(&op->work, fscache_op_work_func);
41 atomic_set(&op->usage, 1);
42 op->state = FSCACHE_OP_ST_INITIALISED;
43 op->debug_id = atomic_inc_return(&fscache_op_debug_id);
44 op->processor = processor;
45 op->cancel = cancel ?: fscache_operation_dummy_cancel;
46 op->release = release;
47 INIT_LIST_HEAD(&op->pend_link);
48 fscache_stat(&fscache_n_op_initialised);
49 }
50 EXPORT_SYMBOL(fscache_operation_init);
51
52 /**
53 * fscache_enqueue_operation - Enqueue an operation for processing
54 * @op: The operation to enqueue
55 *
56 * Enqueue an operation for processing by the FS-Cache thread pool.
57 *
58 * This will get its own ref on the object.
59 */
60 void fscache_enqueue_operation(struct fscache_operation *op)
61 {
62 _enter("{OBJ%x OP%x,%u}",
63 op->object->debug_id, op->debug_id, atomic_read(&op->usage));
64
65 ASSERT(list_empty(&op->pend_link));
66 ASSERT(op->processor != NULL);
67 ASSERT(fscache_object_is_available(op->object));
68 ASSERTCMP(atomic_read(&op->usage), >, 0);
69 ASSERTIFCMP(op->state != FSCACHE_OP_ST_IN_PROGRESS,
70 op->state, ==, FSCACHE_OP_ST_CANCELLED);
71
72 fscache_stat(&fscache_n_op_enqueue);
73 switch (op->flags & FSCACHE_OP_TYPE) {
74 case FSCACHE_OP_ASYNC:
75 _debug("queue async");
76 atomic_inc(&op->usage);
77 if (!queue_work(fscache_op_wq, &op->work))
78 fscache_put_operation(op);
79 break;
80 case FSCACHE_OP_MYTHREAD:
81 _debug("queue for caller's attention");
82 break;
83 default:
84 pr_err("Unexpected op type %lx", op->flags);
85 BUG();
86 break;
87 }
88 }
89 EXPORT_SYMBOL(fscache_enqueue_operation);
90
91 /*
92 * start an op running
93 */
94 static void fscache_run_op(struct fscache_object *object,
95 struct fscache_operation *op)
96 {
97 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
98
99 op->state = FSCACHE_OP_ST_IN_PROGRESS;
100 object->n_in_progress++;
101 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
102 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
103 if (op->processor)
104 fscache_enqueue_operation(op);
105 fscache_stat(&fscache_n_op_run);
106 }
107
108 /*
109 * report an unexpected submission
110 */
111 static void fscache_report_unexpected_submission(struct fscache_object *object,
112 struct fscache_operation *op,
113 const struct fscache_state *ostate)
114 {
115 static bool once_only;
116 struct fscache_operation *p;
117 unsigned n;
118
119 if (once_only)
120 return;
121 once_only = true;
122
123 kdebug("unexpected submission OP%x [OBJ%x %s]",
124 op->debug_id, object->debug_id, object->state->name);
125 kdebug("objstate=%s [%s]", object->state->name, ostate->name);
126 kdebug("objflags=%lx", object->flags);
127 kdebug("objevent=%lx [%lx]", object->events, object->event_mask);
128 kdebug("ops=%u inp=%u exc=%u",
129 object->n_ops, object->n_in_progress, object->n_exclusive);
130
131 if (!list_empty(&object->pending_ops)) {
132 n = 0;
133 list_for_each_entry(p, &object->pending_ops, pend_link) {
134 ASSERTCMP(p->object, ==, object);
135 kdebug("%p %p", op->processor, op->release);
136 n++;
137 }
138
139 kdebug("n=%u", n);
140 }
141
142 dump_stack();
143 }
144
145 /*
146 * submit an exclusive operation for an object
147 * - other ops are excluded from running simultaneously with this one
148 * - this gets any extra refs it needs on an op
149 */
150 int fscache_submit_exclusive_op(struct fscache_object *object,
151 struct fscache_operation *op)
152 {
153 const struct fscache_state *ostate;
154 unsigned long flags;
155 int ret;
156
157 _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id);
158
159 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_INITIALISED);
160 ASSERTCMP(atomic_read(&op->usage), >, 0);
161
162 spin_lock(&object->lock);
163 ASSERTCMP(object->n_ops, >=, object->n_in_progress);
164 ASSERTCMP(object->n_ops, >=, object->n_exclusive);
165 ASSERT(list_empty(&op->pend_link));
166
167 ostate = object->state;
168 smp_rmb();
169
170 op->state = FSCACHE_OP_ST_PENDING;
171 flags = READ_ONCE(object->flags);
172 if (unlikely(!(flags & BIT(FSCACHE_OBJECT_IS_LIVE)))) {
173 fscache_stat(&fscache_n_op_rejected);
174 op->cancel(op);
175 op->state = FSCACHE_OP_ST_CANCELLED;
176 ret = -ENOBUFS;
177 } else if (unlikely(fscache_cache_is_broken(object))) {
178 op->cancel(op);
179 op->state = FSCACHE_OP_ST_CANCELLED;
180 ret = -EIO;
181 } else if (flags & BIT(FSCACHE_OBJECT_IS_AVAILABLE)) {
182 op->object = object;
183 object->n_ops++;
184 object->n_exclusive++; /* reads and writes must wait */
185
186 if (object->n_in_progress > 0) {
187 atomic_inc(&op->usage);
188 list_add_tail(&op->pend_link, &object->pending_ops);
189 fscache_stat(&fscache_n_op_pend);
190 } else if (!list_empty(&object->pending_ops)) {
191 atomic_inc(&op->usage);
192 list_add_tail(&op->pend_link, &object->pending_ops);
193 fscache_stat(&fscache_n_op_pend);
194 fscache_start_operations(object);
195 } else {
196 ASSERTCMP(object->n_in_progress, ==, 0);
197 fscache_run_op(object, op);
198 }
199
200 /* need to issue a new write op after this */
201 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
202 ret = 0;
203 } else if (flags & BIT(FSCACHE_OBJECT_IS_LOOKED_UP)) {
204 op->object = object;
205 object->n_ops++;
206 object->n_exclusive++; /* reads and writes must wait */
207 atomic_inc(&op->usage);
208 list_add_tail(&op->pend_link, &object->pending_ops);
209 fscache_stat(&fscache_n_op_pend);
210 ret = 0;
211 } else if (flags & BIT(FSCACHE_OBJECT_KILLED_BY_CACHE)) {
212 op->cancel(op);
213 op->state = FSCACHE_OP_ST_CANCELLED;
214 ret = -ENOBUFS;
215 } else {
216 fscache_report_unexpected_submission(object, op, ostate);
217 op->cancel(op);
218 op->state = FSCACHE_OP_ST_CANCELLED;
219 ret = -ENOBUFS;
220 }
221
222 spin_unlock(&object->lock);
223 return ret;
224 }
225
226 /*
227 * submit an operation for an object
228 * - objects may be submitted only in the following states:
229 * - during object creation (write ops may be submitted)
230 * - whilst the object is active
231 * - after an I/O error incurred in one of the two above states (op rejected)
232 * - this gets any extra refs it needs on an op
233 */
234 int fscache_submit_op(struct fscache_object *object,
235 struct fscache_operation *op)
236 {
237 const struct fscache_state *ostate;
238 unsigned long flags;
239 int ret;
240
241 _enter("{OBJ%x OP%x},{%u}",
242 object->debug_id, op->debug_id, atomic_read(&op->usage));
243
244 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_INITIALISED);
245 ASSERTCMP(atomic_read(&op->usage), >, 0);
246
247 spin_lock(&object->lock);
248 ASSERTCMP(object->n_ops, >=, object->n_in_progress);
249 ASSERTCMP(object->n_ops, >=, object->n_exclusive);
250 ASSERT(list_empty(&op->pend_link));
251
252 ostate = object->state;
253 smp_rmb();
254
255 op->state = FSCACHE_OP_ST_PENDING;
256 flags = READ_ONCE(object->flags);
257 if (unlikely(!(flags & BIT(FSCACHE_OBJECT_IS_LIVE)))) {
258 fscache_stat(&fscache_n_op_rejected);
259 op->cancel(op);
260 op->state = FSCACHE_OP_ST_CANCELLED;
261 ret = -ENOBUFS;
262 } else if (unlikely(fscache_cache_is_broken(object))) {
263 op->cancel(op);
264 op->state = FSCACHE_OP_ST_CANCELLED;
265 ret = -EIO;
266 } else if (flags & BIT(FSCACHE_OBJECT_IS_AVAILABLE)) {
267 op->object = object;
268 object->n_ops++;
269
270 if (object->n_exclusive > 0) {
271 atomic_inc(&op->usage);
272 list_add_tail(&op->pend_link, &object->pending_ops);
273 fscache_stat(&fscache_n_op_pend);
274 } else if (!list_empty(&object->pending_ops)) {
275 atomic_inc(&op->usage);
276 list_add_tail(&op->pend_link, &object->pending_ops);
277 fscache_stat(&fscache_n_op_pend);
278 fscache_start_operations(object);
279 } else {
280 ASSERTCMP(object->n_exclusive, ==, 0);
281 fscache_run_op(object, op);
282 }
283 ret = 0;
284 } else if (flags & BIT(FSCACHE_OBJECT_IS_LOOKED_UP)) {
285 op->object = object;
286 object->n_ops++;
287 atomic_inc(&op->usage);
288 list_add_tail(&op->pend_link, &object->pending_ops);
289 fscache_stat(&fscache_n_op_pend);
290 ret = 0;
291 } else if (flags & BIT(FSCACHE_OBJECT_KILLED_BY_CACHE)) {
292 op->cancel(op);
293 op->state = FSCACHE_OP_ST_CANCELLED;
294 ret = -ENOBUFS;
295 } else {
296 fscache_report_unexpected_submission(object, op, ostate);
297 ASSERT(!fscache_object_is_active(object));
298 op->cancel(op);
299 op->state = FSCACHE_OP_ST_CANCELLED;
300 ret = -ENOBUFS;
301 }
302
303 spin_unlock(&object->lock);
304 return ret;
305 }
306
307 /*
308 * queue an object for withdrawal on error, aborting all following asynchronous
309 * operations
310 */
311 void fscache_abort_object(struct fscache_object *object)
312 {
313 _enter("{OBJ%x}", object->debug_id);
314
315 fscache_raise_event(object, FSCACHE_OBJECT_EV_ERROR);
316 }
317
318 /*
319 * Jump start the operation processing on an object. The caller must hold
320 * object->lock.
321 */
322 void fscache_start_operations(struct fscache_object *object)
323 {
324 struct fscache_operation *op;
325 bool stop = false;
326
327 while (!list_empty(&object->pending_ops) && !stop) {
328 op = list_entry(object->pending_ops.next,
329 struct fscache_operation, pend_link);
330
331 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) {
332 if (object->n_in_progress > 0)
333 break;
334 stop = true;
335 }
336 list_del_init(&op->pend_link);
337 fscache_run_op(object, op);
338
339 /* the pending queue was holding a ref on the object */
340 fscache_put_operation(op);
341 }
342
343 ASSERTCMP(object->n_in_progress, <=, object->n_ops);
344
345 _debug("woke %d ops on OBJ%x",
346 object->n_in_progress, object->debug_id);
347 }
348
349 /*
350 * cancel an operation that's pending on an object
351 */
352 int fscache_cancel_op(struct fscache_operation *op,
353 bool cancel_in_progress_op)
354 {
355 struct fscache_object *object = op->object;
356 bool put = false;
357 int ret;
358
359 _enter("OBJ%x OP%x}", op->object->debug_id, op->debug_id);
360
361 ASSERTCMP(op->state, >=, FSCACHE_OP_ST_PENDING);
362 ASSERTCMP(op->state, !=, FSCACHE_OP_ST_CANCELLED);
363 ASSERTCMP(atomic_read(&op->usage), >, 0);
364
365 spin_lock(&object->lock);
366
367 ret = -EBUSY;
368 if (op->state == FSCACHE_OP_ST_PENDING) {
369 ASSERT(!list_empty(&op->pend_link));
370 list_del_init(&op->pend_link);
371 put = true;
372
373 fscache_stat(&fscache_n_op_cancelled);
374 op->cancel(op);
375 op->state = FSCACHE_OP_ST_CANCELLED;
376 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
377 object->n_exclusive--;
378 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
379 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
380 ret = 0;
381 } else if (op->state == FSCACHE_OP_ST_IN_PROGRESS && cancel_in_progress_op) {
382 ASSERTCMP(object->n_in_progress, >, 0);
383 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
384 object->n_exclusive--;
385 object->n_in_progress--;
386 if (object->n_in_progress == 0)
387 fscache_start_operations(object);
388
389 fscache_stat(&fscache_n_op_cancelled);
390 op->cancel(op);
391 op->state = FSCACHE_OP_ST_CANCELLED;
392 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
393 object->n_exclusive--;
394 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
395 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
396 ret = 0;
397 }
398
399 if (put)
400 fscache_put_operation(op);
401 spin_unlock(&object->lock);
402 _leave(" = %d", ret);
403 return ret;
404 }
405
406 /*
407 * Cancel all pending operations on an object
408 */
409 void fscache_cancel_all_ops(struct fscache_object *object)
410 {
411 struct fscache_operation *op;
412
413 _enter("OBJ%x", object->debug_id);
414
415 spin_lock(&object->lock);
416
417 while (!list_empty(&object->pending_ops)) {
418 op = list_entry(object->pending_ops.next,
419 struct fscache_operation, pend_link);
420 fscache_stat(&fscache_n_op_cancelled);
421 list_del_init(&op->pend_link);
422
423 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
424 op->cancel(op);
425 op->state = FSCACHE_OP_ST_CANCELLED;
426
427 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
428 object->n_exclusive--;
429 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
430 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
431 fscache_put_operation(op);
432 cond_resched_lock(&object->lock);
433 }
434
435 spin_unlock(&object->lock);
436 _leave("");
437 }
438
439 /*
440 * Record the completion or cancellation of an in-progress operation.
441 */
442 void fscache_op_complete(struct fscache_operation *op, bool cancelled)
443 {
444 struct fscache_object *object = op->object;
445
446 _enter("OBJ%x", object->debug_id);
447
448 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
449 ASSERTCMP(object->n_in_progress, >, 0);
450 ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags),
451 object->n_exclusive, >, 0);
452 ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags),
453 object->n_in_progress, ==, 1);
454
455 spin_lock(&object->lock);
456
457 if (!cancelled) {
458 op->state = FSCACHE_OP_ST_COMPLETE;
459 } else {
460 op->cancel(op);
461 op->state = FSCACHE_OP_ST_CANCELLED;
462 }
463
464 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
465 object->n_exclusive--;
466 object->n_in_progress--;
467 if (object->n_in_progress == 0)
468 fscache_start_operations(object);
469
470 spin_unlock(&object->lock);
471 _leave("");
472 }
473 EXPORT_SYMBOL(fscache_op_complete);
474
475 /*
476 * release an operation
477 * - queues pending ops if this is the last in-progress op
478 */
479 void fscache_put_operation(struct fscache_operation *op)
480 {
481 struct fscache_object *object;
482 struct fscache_cache *cache;
483
484 _enter("{OBJ%x OP%x,%d}",
485 op->object ? op->object->debug_id : 0,
486 op->debug_id, atomic_read(&op->usage));
487
488 ASSERTCMP(atomic_read(&op->usage), >, 0);
489
490 if (!atomic_dec_and_test(&op->usage))
491 return;
492
493 _debug("PUT OP");
494 ASSERTIFCMP(op->state != FSCACHE_OP_ST_INITIALISED &&
495 op->state != FSCACHE_OP_ST_COMPLETE,
496 op->state, ==, FSCACHE_OP_ST_CANCELLED);
497
498 fscache_stat(&fscache_n_op_release);
499
500 if (op->release) {
501 op->release(op);
502 op->release = NULL;
503 }
504 op->state = FSCACHE_OP_ST_DEAD;
505
506 object = op->object;
507 if (likely(object)) {
508 if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags))
509 atomic_dec(&object->n_reads);
510 if (test_bit(FSCACHE_OP_UNUSE_COOKIE, &op->flags))
511 fscache_unuse_cookie(object);
512
513 /* now... we may get called with the object spinlock held, so we
514 * complete the cleanup here only if we can immediately acquire the
515 * lock, and defer it otherwise */
516 if (!spin_trylock(&object->lock)) {
517 _debug("defer put");
518 fscache_stat(&fscache_n_op_deferred_release);
519
520 cache = object->cache;
521 spin_lock(&cache->op_gc_list_lock);
522 list_add_tail(&op->pend_link, &cache->op_gc_list);
523 spin_unlock(&cache->op_gc_list_lock);
524 schedule_work(&cache->op_gc);
525 _leave(" [defer]");
526 return;
527 }
528
529 ASSERTCMP(object->n_ops, >, 0);
530 object->n_ops--;
531 if (object->n_ops == 0)
532 fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
533
534 spin_unlock(&object->lock);
535 }
536
537 kfree(op);
538 _leave(" [done]");
539 }
540 EXPORT_SYMBOL(fscache_put_operation);
541
542 /*
543 * garbage collect operations that have had their release deferred
544 */
545 void fscache_operation_gc(struct work_struct *work)
546 {
547 struct fscache_operation *op;
548 struct fscache_object *object;
549 struct fscache_cache *cache =
550 container_of(work, struct fscache_cache, op_gc);
551 int count = 0;
552
553 _enter("");
554
555 do {
556 spin_lock(&cache->op_gc_list_lock);
557 if (list_empty(&cache->op_gc_list)) {
558 spin_unlock(&cache->op_gc_list_lock);
559 break;
560 }
561
562 op = list_entry(cache->op_gc_list.next,
563 struct fscache_operation, pend_link);
564 list_del(&op->pend_link);
565 spin_unlock(&cache->op_gc_list_lock);
566
567 object = op->object;
568 spin_lock(&object->lock);
569
570 _debug("GC DEFERRED REL OBJ%x OP%x",
571 object->debug_id, op->debug_id);
572 fscache_stat(&fscache_n_op_gc);
573
574 ASSERTCMP(atomic_read(&op->usage), ==, 0);
575 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
576
577 ASSERTCMP(object->n_ops, >, 0);
578 object->n_ops--;
579 if (object->n_ops == 0)
580 fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
581
582 spin_unlock(&object->lock);
583 kfree(op);
584
585 } while (count++ < 20);
586
587 if (!list_empty(&cache->op_gc_list))
588 schedule_work(&cache->op_gc);
589
590 _leave("");
591 }
592
593 /*
594 * execute an operation using fs_op_wq to provide processing context -
595 * the caller holds a ref to this object, so we don't need to hold one
596 */
597 void fscache_op_work_func(struct work_struct *work)
598 {
599 struct fscache_operation *op =
600 container_of(work, struct fscache_operation, work);
601 unsigned long start;
602
603 _enter("{OBJ%x OP%x,%d}",
604 op->object->debug_id, op->debug_id, atomic_read(&op->usage));
605
606 ASSERT(op->processor != NULL);
607 start = jiffies;
608 op->processor(op);
609 fscache_hist(fscache_ops_histogram, start);
610 fscache_put_operation(op);
611
612 _leave("");
613 }