]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/md/dm-bio-prison-v2.c
Merge branch 'smp-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-bionic-kernel.git] / drivers / md / dm-bio-prison-v2.c
1 /*
2 * Copyright (C) 2012-2017 Red Hat, Inc.
3 *
4 * This file is released under the GPL.
5 */
6
7 #include "dm.h"
8 #include "dm-bio-prison-v2.h"
9
10 #include <linux/spinlock.h>
11 #include <linux/mempool.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/rwsem.h>
15
16 /*----------------------------------------------------------------*/
17
18 #define MIN_CELLS 1024
19
20 struct dm_bio_prison_v2 {
21 struct workqueue_struct *wq;
22
23 spinlock_t lock;
24 mempool_t *cell_pool;
25 struct rb_root cells;
26 };
27
28 static struct kmem_cache *_cell_cache;
29
30 /*----------------------------------------------------------------*/
31
32 /*
33 * @nr_cells should be the number of cells you want in use _concurrently_.
34 * Don't confuse it with the number of distinct keys.
35 */
36 struct dm_bio_prison_v2 *dm_bio_prison_create_v2(struct workqueue_struct *wq)
37 {
38 struct dm_bio_prison_v2 *prison = kmalloc(sizeof(*prison), GFP_KERNEL);
39
40 if (!prison)
41 return NULL;
42
43 prison->wq = wq;
44 spin_lock_init(&prison->lock);
45
46 prison->cell_pool = mempool_create_slab_pool(MIN_CELLS, _cell_cache);
47 if (!prison->cell_pool) {
48 kfree(prison);
49 return NULL;
50 }
51
52 prison->cells = RB_ROOT;
53
54 return prison;
55 }
56 EXPORT_SYMBOL_GPL(dm_bio_prison_create_v2);
57
58 void dm_bio_prison_destroy_v2(struct dm_bio_prison_v2 *prison)
59 {
60 mempool_destroy(prison->cell_pool);
61 kfree(prison);
62 }
63 EXPORT_SYMBOL_GPL(dm_bio_prison_destroy_v2);
64
65 struct dm_bio_prison_cell_v2 *dm_bio_prison_alloc_cell_v2(struct dm_bio_prison_v2 *prison, gfp_t gfp)
66 {
67 return mempool_alloc(prison->cell_pool, gfp);
68 }
69 EXPORT_SYMBOL_GPL(dm_bio_prison_alloc_cell_v2);
70
71 void dm_bio_prison_free_cell_v2(struct dm_bio_prison_v2 *prison,
72 struct dm_bio_prison_cell_v2 *cell)
73 {
74 mempool_free(cell, prison->cell_pool);
75 }
76 EXPORT_SYMBOL_GPL(dm_bio_prison_free_cell_v2);
77
78 static void __setup_new_cell(struct dm_cell_key_v2 *key,
79 struct dm_bio_prison_cell_v2 *cell)
80 {
81 memset(cell, 0, sizeof(*cell));
82 memcpy(&cell->key, key, sizeof(cell->key));
83 bio_list_init(&cell->bios);
84 }
85
86 static int cmp_keys(struct dm_cell_key_v2 *lhs,
87 struct dm_cell_key_v2 *rhs)
88 {
89 if (lhs->virtual < rhs->virtual)
90 return -1;
91
92 if (lhs->virtual > rhs->virtual)
93 return 1;
94
95 if (lhs->dev < rhs->dev)
96 return -1;
97
98 if (lhs->dev > rhs->dev)
99 return 1;
100
101 if (lhs->block_end <= rhs->block_begin)
102 return -1;
103
104 if (lhs->block_begin >= rhs->block_end)
105 return 1;
106
107 return 0;
108 }
109
110 /*
111 * Returns true if node found, otherwise it inserts a new one.
112 */
113 static bool __find_or_insert(struct dm_bio_prison_v2 *prison,
114 struct dm_cell_key_v2 *key,
115 struct dm_bio_prison_cell_v2 *cell_prealloc,
116 struct dm_bio_prison_cell_v2 **result)
117 {
118 int r;
119 struct rb_node **new = &prison->cells.rb_node, *parent = NULL;
120
121 while (*new) {
122 struct dm_bio_prison_cell_v2 *cell =
123 rb_entry(*new, struct dm_bio_prison_cell_v2, node);
124
125 r = cmp_keys(key, &cell->key);
126
127 parent = *new;
128 if (r < 0)
129 new = &((*new)->rb_left);
130
131 else if (r > 0)
132 new = &((*new)->rb_right);
133
134 else {
135 *result = cell;
136 return true;
137 }
138 }
139
140 __setup_new_cell(key, cell_prealloc);
141 *result = cell_prealloc;
142 rb_link_node(&cell_prealloc->node, parent, new);
143 rb_insert_color(&cell_prealloc->node, &prison->cells);
144
145 return false;
146 }
147
148 static bool __get(struct dm_bio_prison_v2 *prison,
149 struct dm_cell_key_v2 *key,
150 unsigned lock_level,
151 struct bio *inmate,
152 struct dm_bio_prison_cell_v2 *cell_prealloc,
153 struct dm_bio_prison_cell_v2 **cell)
154 {
155 if (__find_or_insert(prison, key, cell_prealloc, cell)) {
156 if ((*cell)->exclusive_lock) {
157 if (lock_level <= (*cell)->exclusive_level) {
158 bio_list_add(&(*cell)->bios, inmate);
159 return false;
160 }
161 }
162
163 (*cell)->shared_count++;
164
165 } else
166 (*cell)->shared_count = 1;
167
168 return true;
169 }
170
171 bool dm_cell_get_v2(struct dm_bio_prison_v2 *prison,
172 struct dm_cell_key_v2 *key,
173 unsigned lock_level,
174 struct bio *inmate,
175 struct dm_bio_prison_cell_v2 *cell_prealloc,
176 struct dm_bio_prison_cell_v2 **cell_result)
177 {
178 int r;
179 unsigned long flags;
180
181 spin_lock_irqsave(&prison->lock, flags);
182 r = __get(prison, key, lock_level, inmate, cell_prealloc, cell_result);
183 spin_unlock_irqrestore(&prison->lock, flags);
184
185 return r;
186 }
187 EXPORT_SYMBOL_GPL(dm_cell_get_v2);
188
189 static bool __put(struct dm_bio_prison_v2 *prison,
190 struct dm_bio_prison_cell_v2 *cell)
191 {
192 BUG_ON(!cell->shared_count);
193 cell->shared_count--;
194
195 // FIXME: shared locks granted above the lock level could starve this
196 if (!cell->shared_count) {
197 if (cell->exclusive_lock){
198 if (cell->quiesce_continuation) {
199 queue_work(prison->wq, cell->quiesce_continuation);
200 cell->quiesce_continuation = NULL;
201 }
202 } else {
203 rb_erase(&cell->node, &prison->cells);
204 return true;
205 }
206 }
207
208 return false;
209 }
210
211 bool dm_cell_put_v2(struct dm_bio_prison_v2 *prison,
212 struct dm_bio_prison_cell_v2 *cell)
213 {
214 bool r;
215 unsigned long flags;
216
217 spin_lock_irqsave(&prison->lock, flags);
218 r = __put(prison, cell);
219 spin_unlock_irqrestore(&prison->lock, flags);
220
221 return r;
222 }
223 EXPORT_SYMBOL_GPL(dm_cell_put_v2);
224
225 static int __lock(struct dm_bio_prison_v2 *prison,
226 struct dm_cell_key_v2 *key,
227 unsigned lock_level,
228 struct dm_bio_prison_cell_v2 *cell_prealloc,
229 struct dm_bio_prison_cell_v2 **cell_result)
230 {
231 struct dm_bio_prison_cell_v2 *cell;
232
233 if (__find_or_insert(prison, key, cell_prealloc, &cell)) {
234 if (cell->exclusive_lock)
235 return -EBUSY;
236
237 cell->exclusive_lock = true;
238 cell->exclusive_level = lock_level;
239 *cell_result = cell;
240
241 // FIXME: we don't yet know what level these shared locks
242 // were taken at, so have to quiesce them all.
243 return cell->shared_count > 0;
244
245 } else {
246 cell = cell_prealloc;
247 cell->shared_count = 0;
248 cell->exclusive_lock = true;
249 cell->exclusive_level = lock_level;
250 *cell_result = cell;
251 }
252
253 return 0;
254 }
255
256 int dm_cell_lock_v2(struct dm_bio_prison_v2 *prison,
257 struct dm_cell_key_v2 *key,
258 unsigned lock_level,
259 struct dm_bio_prison_cell_v2 *cell_prealloc,
260 struct dm_bio_prison_cell_v2 **cell_result)
261 {
262 int r;
263 unsigned long flags;
264
265 spin_lock_irqsave(&prison->lock, flags);
266 r = __lock(prison, key, lock_level, cell_prealloc, cell_result);
267 spin_unlock_irqrestore(&prison->lock, flags);
268
269 return r;
270 }
271 EXPORT_SYMBOL_GPL(dm_cell_lock_v2);
272
273 static void __quiesce(struct dm_bio_prison_v2 *prison,
274 struct dm_bio_prison_cell_v2 *cell,
275 struct work_struct *continuation)
276 {
277 if (!cell->shared_count)
278 queue_work(prison->wq, continuation);
279 else
280 cell->quiesce_continuation = continuation;
281 }
282
283 void dm_cell_quiesce_v2(struct dm_bio_prison_v2 *prison,
284 struct dm_bio_prison_cell_v2 *cell,
285 struct work_struct *continuation)
286 {
287 unsigned long flags;
288
289 spin_lock_irqsave(&prison->lock, flags);
290 __quiesce(prison, cell, continuation);
291 spin_unlock_irqrestore(&prison->lock, flags);
292 }
293 EXPORT_SYMBOL_GPL(dm_cell_quiesce_v2);
294
295 static int __promote(struct dm_bio_prison_v2 *prison,
296 struct dm_bio_prison_cell_v2 *cell,
297 unsigned new_lock_level)
298 {
299 if (!cell->exclusive_lock)
300 return -EINVAL;
301
302 cell->exclusive_level = new_lock_level;
303 return cell->shared_count > 0;
304 }
305
306 int dm_cell_lock_promote_v2(struct dm_bio_prison_v2 *prison,
307 struct dm_bio_prison_cell_v2 *cell,
308 unsigned new_lock_level)
309 {
310 int r;
311 unsigned long flags;
312
313 spin_lock_irqsave(&prison->lock, flags);
314 r = __promote(prison, cell, new_lock_level);
315 spin_unlock_irqrestore(&prison->lock, flags);
316
317 return r;
318 }
319 EXPORT_SYMBOL_GPL(dm_cell_lock_promote_v2);
320
321 static bool __unlock(struct dm_bio_prison_v2 *prison,
322 struct dm_bio_prison_cell_v2 *cell,
323 struct bio_list *bios)
324 {
325 BUG_ON(!cell->exclusive_lock);
326
327 bio_list_merge(bios, &cell->bios);
328 bio_list_init(&cell->bios);
329
330 if (cell->shared_count) {
331 cell->exclusive_lock = 0;
332 return false;
333 }
334
335 rb_erase(&cell->node, &prison->cells);
336 return true;
337 }
338
339 bool dm_cell_unlock_v2(struct dm_bio_prison_v2 *prison,
340 struct dm_bio_prison_cell_v2 *cell,
341 struct bio_list *bios)
342 {
343 bool r;
344 unsigned long flags;
345
346 spin_lock_irqsave(&prison->lock, flags);
347 r = __unlock(prison, cell, bios);
348 spin_unlock_irqrestore(&prison->lock, flags);
349
350 return r;
351 }
352 EXPORT_SYMBOL_GPL(dm_cell_unlock_v2);
353
354 /*----------------------------------------------------------------*/
355
356 int __init dm_bio_prison_init_v2(void)
357 {
358 _cell_cache = KMEM_CACHE(dm_bio_prison_cell_v2, 0);
359 if (!_cell_cache)
360 return -ENOMEM;
361
362 return 0;
363 }
364
365 void dm_bio_prison_exit_v2(void)
366 {
367 kmem_cache_destroy(_cell_cache);
368 _cell_cache = NULL;
369 }