]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/md/persistent-data/dm-block-manager.c
c70ad6e303d306de20b0bf6b363038cd4c5aa3e9
[mirror_ubuntu-bionic-kernel.git] / drivers / md / persistent-data / dm-block-manager.c
1 /*
2 * Copyright (C) 2011 Red Hat, Inc.
3 *
4 * This file is released under the GPL.
5 */
6 #include "dm-block-manager.h"
7 #include "dm-persistent-data-internal.h"
8 #include "../dm-bufio.h"
9
10 #include <linux/crc32c.h>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/rwsem.h>
14 #include <linux/device-mapper.h>
15 #include <linux/stacktrace.h>
16
17 #define DM_MSG_PREFIX "block manager"
18
19 /*----------------------------------------------------------------*/
20
21 /*
22 * This is a read/write semaphore with a couple of differences.
23 *
24 * i) There is a restriction on the number of concurrent read locks that
25 * may be held at once. This is just an implementation detail.
26 *
27 * ii) Recursive locking attempts are detected and return EINVAL. A stack
28 * trace is also emitted for the previous lock aquisition.
29 *
30 * iii) Priority is given to write locks.
31 */
32 #define MAX_HOLDERS 4
33 #define MAX_STACK 10
34
35 typedef unsigned long stack_entries[MAX_STACK];
36
37 struct block_lock {
38 spinlock_t lock;
39 __s32 count;
40 struct list_head waiters;
41 struct task_struct *holders[MAX_HOLDERS];
42
43 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
44 struct stack_trace traces[MAX_HOLDERS];
45 stack_entries entries[MAX_HOLDERS];
46 #endif
47 };
48
49 struct waiter {
50 struct list_head list;
51 struct task_struct *task;
52 int wants_write;
53 };
54
55 static unsigned __find_holder(struct block_lock *lock,
56 struct task_struct *task)
57 {
58 unsigned i;
59
60 for (i = 0; i < MAX_HOLDERS; i++)
61 if (lock->holders[i] == task)
62 break;
63
64 BUG_ON(i == MAX_HOLDERS);
65 return i;
66 }
67
68 /* call this *after* you increment lock->count */
69 static void __add_holder(struct block_lock *lock, struct task_struct *task)
70 {
71 unsigned h = __find_holder(lock, NULL);
72 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
73 struct stack_trace *t;
74 #endif
75
76 get_task_struct(task);
77 lock->holders[h] = task;
78
79 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
80 t = lock->traces + h;
81 t->nr_entries = 0;
82 t->max_entries = MAX_STACK;
83 t->entries = lock->entries[h];
84 t->skip = 2;
85 save_stack_trace(t);
86 #endif
87 }
88
89 /* call this *before* you decrement lock->count */
90 static void __del_holder(struct block_lock *lock, struct task_struct *task)
91 {
92 unsigned h = __find_holder(lock, task);
93 lock->holders[h] = NULL;
94 put_task_struct(task);
95 }
96
97 static int __check_holder(struct block_lock *lock)
98 {
99 unsigned i;
100 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
101 static struct stack_trace t;
102 static stack_entries entries;
103 #endif
104
105 for (i = 0; i < MAX_HOLDERS; i++) {
106 if (lock->holders[i] == current) {
107 DMERR("recursive lock detected in pool metadata");
108 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
109 DMERR("previously held here:");
110 print_stack_trace(lock->traces + i, 4);
111
112 DMERR("subsequent aquisition attempted here:");
113 t.nr_entries = 0;
114 t.max_entries = MAX_STACK;
115 t.entries = entries;
116 t.skip = 3;
117 save_stack_trace(&t);
118 print_stack_trace(&t, 4);
119 #endif
120 return -EINVAL;
121 }
122 }
123
124 return 0;
125 }
126
127 static void __wait(struct waiter *w)
128 {
129 for (;;) {
130 set_task_state(current, TASK_UNINTERRUPTIBLE);
131
132 if (!w->task)
133 break;
134
135 schedule();
136 }
137
138 set_task_state(current, TASK_RUNNING);
139 }
140
141 static void __wake_waiter(struct waiter *w)
142 {
143 struct task_struct *task;
144
145 list_del(&w->list);
146 task = w->task;
147 smp_mb();
148 w->task = NULL;
149 wake_up_process(task);
150 }
151
152 /*
153 * We either wake a few readers or a single writer.
154 */
155 static void __wake_many(struct block_lock *lock)
156 {
157 struct waiter *w, *tmp;
158
159 BUG_ON(lock->count < 0);
160 list_for_each_entry_safe(w, tmp, &lock->waiters, list) {
161 if (lock->count >= MAX_HOLDERS)
162 return;
163
164 if (w->wants_write) {
165 if (lock->count > 0)
166 return; /* still read locked */
167
168 lock->count = -1;
169 __add_holder(lock, w->task);
170 __wake_waiter(w);
171 return;
172 }
173
174 lock->count++;
175 __add_holder(lock, w->task);
176 __wake_waiter(w);
177 }
178 }
179
180 static void bl_init(struct block_lock *lock)
181 {
182 int i;
183
184 spin_lock_init(&lock->lock);
185 lock->count = 0;
186 INIT_LIST_HEAD(&lock->waiters);
187 for (i = 0; i < MAX_HOLDERS; i++)
188 lock->holders[i] = NULL;
189 }
190
191 static int __available_for_read(struct block_lock *lock)
192 {
193 return lock->count >= 0 &&
194 lock->count < MAX_HOLDERS &&
195 list_empty(&lock->waiters);
196 }
197
198 static int bl_down_read(struct block_lock *lock)
199 {
200 int r;
201 struct waiter w;
202
203 spin_lock(&lock->lock);
204 r = __check_holder(lock);
205 if (r) {
206 spin_unlock(&lock->lock);
207 return r;
208 }
209
210 if (__available_for_read(lock)) {
211 lock->count++;
212 __add_holder(lock, current);
213 spin_unlock(&lock->lock);
214 return 0;
215 }
216
217 get_task_struct(current);
218
219 w.task = current;
220 w.wants_write = 0;
221 list_add_tail(&w.list, &lock->waiters);
222 spin_unlock(&lock->lock);
223
224 __wait(&w);
225 put_task_struct(current);
226 return 0;
227 }
228
229 static int bl_down_read_nonblock(struct block_lock *lock)
230 {
231 int r;
232
233 spin_lock(&lock->lock);
234 r = __check_holder(lock);
235 if (r)
236 goto out;
237
238 if (__available_for_read(lock)) {
239 lock->count++;
240 __add_holder(lock, current);
241 r = 0;
242 } else
243 r = -EWOULDBLOCK;
244
245 out:
246 spin_unlock(&lock->lock);
247 return r;
248 }
249
250 static void bl_up_read(struct block_lock *lock)
251 {
252 spin_lock(&lock->lock);
253 BUG_ON(lock->count <= 0);
254 __del_holder(lock, current);
255 --lock->count;
256 if (!list_empty(&lock->waiters))
257 __wake_many(lock);
258 spin_unlock(&lock->lock);
259 }
260
261 static int bl_down_write(struct block_lock *lock)
262 {
263 int r;
264 struct waiter w;
265
266 spin_lock(&lock->lock);
267 r = __check_holder(lock);
268 if (r) {
269 spin_unlock(&lock->lock);
270 return r;
271 }
272
273 if (lock->count == 0 && list_empty(&lock->waiters)) {
274 lock->count = -1;
275 __add_holder(lock, current);
276 spin_unlock(&lock->lock);
277 return 0;
278 }
279
280 get_task_struct(current);
281 w.task = current;
282 w.wants_write = 1;
283
284 /*
285 * Writers given priority. We know there's only one mutator in the
286 * system, so ignoring the ordering reversal.
287 */
288 list_add(&w.list, &lock->waiters);
289 spin_unlock(&lock->lock);
290
291 __wait(&w);
292 put_task_struct(current);
293
294 return 0;
295 }
296
297 static void bl_up_write(struct block_lock *lock)
298 {
299 spin_lock(&lock->lock);
300 __del_holder(lock, current);
301 lock->count = 0;
302 if (!list_empty(&lock->waiters))
303 __wake_many(lock);
304 spin_unlock(&lock->lock);
305 }
306
307 static void report_recursive_bug(dm_block_t b, int r)
308 {
309 if (r == -EINVAL)
310 DMERR("recursive acquisition of block %llu requested.",
311 (unsigned long long) b);
312 }
313
314 /*----------------------------------------------------------------*/
315
316 /*
317 * Block manager is currently implemented using dm-bufio. struct
318 * dm_block_manager and struct dm_block map directly onto a couple of
319 * structs in the bufio interface. I want to retain the freedom to move
320 * away from bufio in the future. So these structs are just cast within
321 * this .c file, rather than making it through to the public interface.
322 */
323 static struct dm_buffer *to_buffer(struct dm_block *b)
324 {
325 return (struct dm_buffer *) b;
326 }
327
328 dm_block_t dm_block_location(struct dm_block *b)
329 {
330 return dm_bufio_get_block_number(to_buffer(b));
331 }
332 EXPORT_SYMBOL_GPL(dm_block_location);
333
334 void *dm_block_data(struct dm_block *b)
335 {
336 return dm_bufio_get_block_data(to_buffer(b));
337 }
338 EXPORT_SYMBOL_GPL(dm_block_data);
339
340 struct buffer_aux {
341 struct dm_block_validator *validator;
342 struct block_lock lock;
343 int write_locked;
344 };
345
346 static void dm_block_manager_alloc_callback(struct dm_buffer *buf)
347 {
348 struct buffer_aux *aux = dm_bufio_get_aux_data(buf);
349 aux->validator = NULL;
350 bl_init(&aux->lock);
351 }
352
353 static void dm_block_manager_write_callback(struct dm_buffer *buf)
354 {
355 struct buffer_aux *aux = dm_bufio_get_aux_data(buf);
356 if (aux->validator) {
357 aux->validator->prepare_for_write(aux->validator, (struct dm_block *) buf,
358 dm_bufio_get_block_size(dm_bufio_get_client(buf)));
359 }
360 }
361
362 /*----------------------------------------------------------------
363 * Public interface
364 *--------------------------------------------------------------*/
365 struct dm_block_manager {
366 struct dm_bufio_client *bufio;
367 };
368
369 struct dm_block_manager *dm_block_manager_create(struct block_device *bdev,
370 unsigned block_size,
371 unsigned cache_size,
372 unsigned max_held_per_thread)
373 {
374 int r;
375 struct dm_block_manager *bm;
376
377 bm = kmalloc(sizeof(*bm), GFP_KERNEL);
378 if (!bm) {
379 r = -ENOMEM;
380 goto bad;
381 }
382
383 bm->bufio = dm_bufio_client_create(bdev, block_size, max_held_per_thread,
384 sizeof(struct buffer_aux),
385 dm_block_manager_alloc_callback,
386 dm_block_manager_write_callback);
387 if (IS_ERR(bm->bufio)) {
388 r = PTR_ERR(bm->bufio);
389 kfree(bm);
390 goto bad;
391 }
392
393 return bm;
394
395 bad:
396 return ERR_PTR(r);
397 }
398 EXPORT_SYMBOL_GPL(dm_block_manager_create);
399
400 void dm_block_manager_destroy(struct dm_block_manager *bm)
401 {
402 dm_bufio_client_destroy(bm->bufio);
403 kfree(bm);
404 }
405 EXPORT_SYMBOL_GPL(dm_block_manager_destroy);
406
407 unsigned dm_bm_block_size(struct dm_block_manager *bm)
408 {
409 return dm_bufio_get_block_size(bm->bufio);
410 }
411 EXPORT_SYMBOL_GPL(dm_bm_block_size);
412
413 dm_block_t dm_bm_nr_blocks(struct dm_block_manager *bm)
414 {
415 return dm_bufio_get_device_size(bm->bufio);
416 }
417
418 static int dm_bm_validate_buffer(struct dm_block_manager *bm,
419 struct dm_buffer *buf,
420 struct buffer_aux *aux,
421 struct dm_block_validator *v)
422 {
423 if (unlikely(!aux->validator)) {
424 int r;
425 if (!v)
426 return 0;
427 r = v->check(v, (struct dm_block *) buf, dm_bufio_get_block_size(bm->bufio));
428 if (unlikely(r))
429 return r;
430 aux->validator = v;
431 } else {
432 if (unlikely(aux->validator != v)) {
433 DMERR("validator mismatch (old=%s vs new=%s) for block %llu",
434 aux->validator->name, v ? v->name : "NULL",
435 (unsigned long long)
436 dm_bufio_get_block_number(buf));
437 return -EINVAL;
438 }
439 }
440
441 return 0;
442 }
443 int dm_bm_read_lock(struct dm_block_manager *bm, dm_block_t b,
444 struct dm_block_validator *v,
445 struct dm_block **result)
446 {
447 struct buffer_aux *aux;
448 void *p;
449 int r;
450
451 p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result);
452 if (unlikely(IS_ERR(p)))
453 return PTR_ERR(p);
454
455 aux = dm_bufio_get_aux_data(to_buffer(*result));
456 r = bl_down_read(&aux->lock);
457 if (unlikely(r)) {
458 dm_bufio_release(to_buffer(*result));
459 report_recursive_bug(b, r);
460 return r;
461 }
462
463 aux->write_locked = 0;
464
465 r = dm_bm_validate_buffer(bm, to_buffer(*result), aux, v);
466 if (unlikely(r)) {
467 bl_up_read(&aux->lock);
468 dm_bufio_release(to_buffer(*result));
469 return r;
470 }
471
472 return 0;
473 }
474 EXPORT_SYMBOL_GPL(dm_bm_read_lock);
475
476 int dm_bm_write_lock(struct dm_block_manager *bm,
477 dm_block_t b, struct dm_block_validator *v,
478 struct dm_block **result)
479 {
480 struct buffer_aux *aux;
481 void *p;
482 int r;
483
484 p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result);
485 if (unlikely(IS_ERR(p)))
486 return PTR_ERR(p);
487
488 aux = dm_bufio_get_aux_data(to_buffer(*result));
489 r = bl_down_write(&aux->lock);
490 if (r) {
491 dm_bufio_release(to_buffer(*result));
492 report_recursive_bug(b, r);
493 return r;
494 }
495
496 aux->write_locked = 1;
497
498 r = dm_bm_validate_buffer(bm, to_buffer(*result), aux, v);
499 if (unlikely(r)) {
500 bl_up_write(&aux->lock);
501 dm_bufio_release(to_buffer(*result));
502 return r;
503 }
504
505 return 0;
506 }
507 EXPORT_SYMBOL_GPL(dm_bm_write_lock);
508
509 int dm_bm_read_try_lock(struct dm_block_manager *bm,
510 dm_block_t b, struct dm_block_validator *v,
511 struct dm_block **result)
512 {
513 struct buffer_aux *aux;
514 void *p;
515 int r;
516
517 p = dm_bufio_get(bm->bufio, b, (struct dm_buffer **) result);
518 if (unlikely(IS_ERR(p)))
519 return PTR_ERR(p);
520 if (unlikely(!p))
521 return -EWOULDBLOCK;
522
523 aux = dm_bufio_get_aux_data(to_buffer(*result));
524 r = bl_down_read_nonblock(&aux->lock);
525 if (r < 0) {
526 dm_bufio_release(to_buffer(*result));
527 report_recursive_bug(b, r);
528 return r;
529 }
530 aux->write_locked = 0;
531
532 r = dm_bm_validate_buffer(bm, to_buffer(*result), aux, v);
533 if (unlikely(r)) {
534 bl_up_read(&aux->lock);
535 dm_bufio_release(to_buffer(*result));
536 return r;
537 }
538
539 return 0;
540 }
541
542 int dm_bm_write_lock_zero(struct dm_block_manager *bm,
543 dm_block_t b, struct dm_block_validator *v,
544 struct dm_block **result)
545 {
546 int r;
547 struct buffer_aux *aux;
548 void *p;
549
550 p = dm_bufio_new(bm->bufio, b, (struct dm_buffer **) result);
551 if (unlikely(IS_ERR(p)))
552 return PTR_ERR(p);
553
554 memset(p, 0, dm_bm_block_size(bm));
555
556 aux = dm_bufio_get_aux_data(to_buffer(*result));
557 r = bl_down_write(&aux->lock);
558 if (r) {
559 dm_bufio_release(to_buffer(*result));
560 return r;
561 }
562
563 aux->write_locked = 1;
564 aux->validator = v;
565
566 return 0;
567 }
568
569 int dm_bm_unlock(struct dm_block *b)
570 {
571 struct buffer_aux *aux;
572 aux = dm_bufio_get_aux_data(to_buffer(b));
573
574 if (aux->write_locked) {
575 dm_bufio_mark_buffer_dirty(to_buffer(b));
576 bl_up_write(&aux->lock);
577 } else
578 bl_up_read(&aux->lock);
579
580 dm_bufio_release(to_buffer(b));
581
582 return 0;
583 }
584 EXPORT_SYMBOL_GPL(dm_bm_unlock);
585
586 int dm_bm_unlock_move(struct dm_block *b, dm_block_t n)
587 {
588 struct buffer_aux *aux;
589
590 aux = dm_bufio_get_aux_data(to_buffer(b));
591
592 if (aux->write_locked) {
593 dm_bufio_mark_buffer_dirty(to_buffer(b));
594 bl_up_write(&aux->lock);
595 } else
596 bl_up_read(&aux->lock);
597
598 dm_bufio_release_move(to_buffer(b), n);
599 return 0;
600 }
601
602 int dm_bm_flush_and_unlock(struct dm_block_manager *bm,
603 struct dm_block *superblock)
604 {
605 int r;
606
607 r = dm_bufio_write_dirty_buffers(bm->bufio);
608 if (unlikely(r)) {
609 dm_bm_unlock(superblock);
610 return r;
611 }
612
613 dm_bm_unlock(superblock);
614
615 return dm_bufio_write_dirty_buffers(bm->bufio);
616 }
617
618 u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor)
619 {
620 return crc32c(~(u32) 0, data, len) ^ init_xor;
621 }
622 EXPORT_SYMBOL_GPL(dm_bm_checksum);
623
624 /*----------------------------------------------------------------*/
625
626 MODULE_LICENSE("GPL");
627 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
628 MODULE_DESCRIPTION("Immutable metadata library for dm");
629
630 /*----------------------------------------------------------------*/