]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/space_map.c
d99c7c0b3997c6139b41669822ddbb96d5338067
[mirror_zfs.git] / module / zfs / space_map.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 #include <sys/zfs_context.h>
27 #include <sys/spa.h>
28 #include <sys/dmu.h>
29 #include <sys/zio.h>
30 #include <sys/space_map.h>
31
32 static kmem_cache_t *space_seg_cache;
33
34 void
35 space_map_init(void)
36 {
37 ASSERT(space_seg_cache == NULL);
38 space_seg_cache = kmem_cache_create("space_seg_cache",
39 sizeof (space_seg_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
40 }
41
42 void
43 space_map_fini(void)
44 {
45 kmem_cache_destroy(space_seg_cache);
46 space_seg_cache = NULL;
47 }
48
49 /*
50 * Space map routines.
51 * NOTE: caller is responsible for all locking.
52 */
53 static int
54 space_map_seg_compare(const void *x1, const void *x2)
55 {
56 const space_seg_t *s1 = x1;
57 const space_seg_t *s2 = x2;
58
59 if (s1->ss_start < s2->ss_start) {
60 if (s1->ss_end > s2->ss_start)
61 return (0);
62 return (-1);
63 }
64 if (s1->ss_start > s2->ss_start) {
65 if (s1->ss_start < s2->ss_end)
66 return (0);
67 return (1);
68 }
69 return (0);
70 }
71
72 void
73 space_map_create(space_map_t *sm, uint64_t start, uint64_t size, uint8_t shift,
74 kmutex_t *lp)
75 {
76 bzero(sm, sizeof (*sm));
77
78 cv_init(&sm->sm_load_cv, NULL, CV_DEFAULT, NULL);
79
80 avl_create(&sm->sm_root, space_map_seg_compare,
81 sizeof (space_seg_t), offsetof(struct space_seg, ss_node));
82
83 sm->sm_start = start;
84 sm->sm_size = size;
85 sm->sm_shift = shift;
86 sm->sm_lock = lp;
87 }
88
89 void
90 space_map_destroy(space_map_t *sm)
91 {
92 ASSERT(!sm->sm_loaded && !sm->sm_loading);
93 VERIFY3U(sm->sm_space, ==, 0);
94 avl_destroy(&sm->sm_root);
95 cv_destroy(&sm->sm_load_cv);
96 }
97
98 void
99 space_map_add(space_map_t *sm, uint64_t start, uint64_t size)
100 {
101 avl_index_t where;
102 space_seg_t ssearch, *ss_before, *ss_after, *ss;
103 uint64_t end = start + size;
104 int merge_before, merge_after;
105
106 ASSERT(MUTEX_HELD(sm->sm_lock));
107 VERIFY(size != 0);
108 VERIFY3U(start, >=, sm->sm_start);
109 VERIFY3U(end, <=, sm->sm_start + sm->sm_size);
110 VERIFY(sm->sm_space + size <= sm->sm_size);
111 VERIFY(P2PHASE(start, 1ULL << sm->sm_shift) == 0);
112 VERIFY(P2PHASE(size, 1ULL << sm->sm_shift) == 0);
113
114 ssearch.ss_start = start;
115 ssearch.ss_end = end;
116 ss = avl_find(&sm->sm_root, &ssearch, &where);
117
118 if (ss != NULL && ss->ss_start <= start && ss->ss_end >= end) {
119 zfs_panic_recover("zfs: allocating allocated segment"
120 "(offset=%llu size=%llu)\n",
121 (longlong_t)start, (longlong_t)size);
122 return;
123 }
124
125 /* Make sure we don't overlap with either of our neighbors */
126 VERIFY(ss == NULL);
127
128 ss_before = avl_nearest(&sm->sm_root, where, AVL_BEFORE);
129 ss_after = avl_nearest(&sm->sm_root, where, AVL_AFTER);
130
131 merge_before = (ss_before != NULL && ss_before->ss_end == start);
132 merge_after = (ss_after != NULL && ss_after->ss_start == end);
133
134 if (merge_before && merge_after) {
135 avl_remove(&sm->sm_root, ss_before);
136 if (sm->sm_pp_root) {
137 avl_remove(sm->sm_pp_root, ss_before);
138 avl_remove(sm->sm_pp_root, ss_after);
139 }
140 ss_after->ss_start = ss_before->ss_start;
141 kmem_cache_free(space_seg_cache, ss_before);
142 ss = ss_after;
143 } else if (merge_before) {
144 ss_before->ss_end = end;
145 if (sm->sm_pp_root)
146 avl_remove(sm->sm_pp_root, ss_before);
147 ss = ss_before;
148 } else if (merge_after) {
149 ss_after->ss_start = start;
150 if (sm->sm_pp_root)
151 avl_remove(sm->sm_pp_root, ss_after);
152 ss = ss_after;
153 } else {
154 ss = kmem_cache_alloc(space_seg_cache, KM_PUSHPAGE);
155 ss->ss_start = start;
156 ss->ss_end = end;
157 avl_insert(&sm->sm_root, ss, where);
158 }
159
160 if (sm->sm_pp_root)
161 avl_add(sm->sm_pp_root, ss);
162
163 sm->sm_space += size;
164 }
165
166 void
167 space_map_remove(space_map_t *sm, uint64_t start, uint64_t size)
168 {
169 avl_index_t where;
170 space_seg_t ssearch, *ss, *newseg;
171 uint64_t end = start + size;
172 int left_over, right_over;
173
174 ASSERT(MUTEX_HELD(sm->sm_lock));
175 VERIFY(size != 0);
176 VERIFY(P2PHASE(start, 1ULL << sm->sm_shift) == 0);
177 VERIFY(P2PHASE(size, 1ULL << sm->sm_shift) == 0);
178
179 ssearch.ss_start = start;
180 ssearch.ss_end = end;
181 ss = avl_find(&sm->sm_root, &ssearch, &where);
182
183 /* Make sure we completely overlap with someone */
184 if (ss == NULL) {
185 zfs_panic_recover("zfs: freeing free segment "
186 "(offset=%llu size=%llu)",
187 (longlong_t)start, (longlong_t)size);
188 return;
189 }
190 VERIFY3U(ss->ss_start, <=, start);
191 VERIFY3U(ss->ss_end, >=, end);
192 VERIFY(sm->sm_space - size <= sm->sm_size);
193
194 left_over = (ss->ss_start != start);
195 right_over = (ss->ss_end != end);
196
197 if (sm->sm_pp_root)
198 avl_remove(sm->sm_pp_root, ss);
199
200 if (left_over && right_over) {
201 newseg = kmem_cache_alloc(space_seg_cache, KM_PUSHPAGE);
202 newseg->ss_start = end;
203 newseg->ss_end = ss->ss_end;
204 ss->ss_end = start;
205 avl_insert_here(&sm->sm_root, newseg, ss, AVL_AFTER);
206 if (sm->sm_pp_root)
207 avl_add(sm->sm_pp_root, newseg);
208 } else if (left_over) {
209 ss->ss_end = start;
210 } else if (right_over) {
211 ss->ss_start = end;
212 } else {
213 avl_remove(&sm->sm_root, ss);
214 kmem_cache_free(space_seg_cache, ss);
215 ss = NULL;
216 }
217
218 if (sm->sm_pp_root && ss != NULL)
219 avl_add(sm->sm_pp_root, ss);
220
221 sm->sm_space -= size;
222 }
223
224 boolean_t
225 space_map_contains(space_map_t *sm, uint64_t start, uint64_t size)
226 {
227 avl_index_t where;
228 space_seg_t ssearch, *ss;
229 uint64_t end = start + size;
230
231 ASSERT(MUTEX_HELD(sm->sm_lock));
232 VERIFY(size != 0);
233 VERIFY(P2PHASE(start, 1ULL << sm->sm_shift) == 0);
234 VERIFY(P2PHASE(size, 1ULL << sm->sm_shift) == 0);
235
236 ssearch.ss_start = start;
237 ssearch.ss_end = end;
238 ss = avl_find(&sm->sm_root, &ssearch, &where);
239
240 return (ss != NULL && ss->ss_start <= start && ss->ss_end >= end);
241 }
242
243 void
244 space_map_vacate(space_map_t *sm, space_map_func_t *func, space_map_t *mdest)
245 {
246 space_seg_t *ss;
247 void *cookie = NULL;
248
249 ASSERT(MUTEX_HELD(sm->sm_lock));
250
251 while ((ss = avl_destroy_nodes(&sm->sm_root, &cookie)) != NULL) {
252 if (func != NULL)
253 func(mdest, ss->ss_start, ss->ss_end - ss->ss_start);
254 kmem_cache_free(space_seg_cache, ss);
255 }
256 sm->sm_space = 0;
257 }
258
259 void
260 space_map_walk(space_map_t *sm, space_map_func_t *func, space_map_t *mdest)
261 {
262 space_seg_t *ss;
263
264 ASSERT(MUTEX_HELD(sm->sm_lock));
265
266 for (ss = avl_first(&sm->sm_root); ss; ss = AVL_NEXT(&sm->sm_root, ss))
267 func(mdest, ss->ss_start, ss->ss_end - ss->ss_start);
268 }
269
270 /*
271 * Wait for any in-progress space_map_load() to complete.
272 */
273 void
274 space_map_load_wait(space_map_t *sm)
275 {
276 ASSERT(MUTEX_HELD(sm->sm_lock));
277
278 while (sm->sm_loading) {
279 ASSERT(!sm->sm_loaded);
280 cv_wait(&sm->sm_load_cv, sm->sm_lock);
281 }
282 }
283
284 /*
285 * Note: space_map_load() will drop sm_lock across dmu_read() calls.
286 * The caller must be OK with this.
287 */
288 int
289 space_map_load(space_map_t *sm, space_map_ops_t *ops, uint8_t maptype,
290 space_map_obj_t *smo, objset_t *os)
291 {
292 uint64_t *entry, *entry_map, *entry_map_end;
293 uint64_t bufsize, size, offset, end, space;
294 uint64_t mapstart = sm->sm_start;
295 int error = 0;
296
297 ASSERT(MUTEX_HELD(sm->sm_lock));
298 ASSERT(!sm->sm_loaded);
299 ASSERT(!sm->sm_loading);
300
301 sm->sm_loading = B_TRUE;
302 end = smo->smo_objsize;
303 space = smo->smo_alloc;
304
305 ASSERT(sm->sm_ops == NULL);
306 VERIFY3U(sm->sm_space, ==, 0);
307
308 if (maptype == SM_FREE) {
309 space_map_add(sm, sm->sm_start, sm->sm_size);
310 space = sm->sm_size - space;
311 }
312
313 bufsize = 1ULL << SPACE_MAP_BLOCKSHIFT;
314 entry_map = zio_buf_alloc(bufsize);
315
316 mutex_exit(sm->sm_lock);
317 if (end > bufsize)
318 dmu_prefetch(os, smo->smo_object, bufsize, end - bufsize);
319 mutex_enter(sm->sm_lock);
320
321 for (offset = 0; offset < end; offset += bufsize) {
322 size = MIN(end - offset, bufsize);
323 VERIFY(P2PHASE(size, sizeof (uint64_t)) == 0);
324 VERIFY(size != 0);
325
326 dprintf("object=%llu offset=%llx size=%llx\n",
327 smo->smo_object, offset, size);
328
329 mutex_exit(sm->sm_lock);
330 error = dmu_read(os, smo->smo_object, offset, size, entry_map,
331 DMU_READ_PREFETCH);
332 mutex_enter(sm->sm_lock);
333 if (error != 0)
334 break;
335
336 entry_map_end = entry_map + (size / sizeof (uint64_t));
337 for (entry = entry_map; entry < entry_map_end; entry++) {
338 uint64_t e = *entry;
339
340 if (SM_DEBUG_DECODE(e)) /* Skip debug entries */
341 continue;
342
343 (SM_TYPE_DECODE(e) == maptype ?
344 space_map_add : space_map_remove)(sm,
345 (SM_OFFSET_DECODE(e) << sm->sm_shift) + mapstart,
346 SM_RUN_DECODE(e) << sm->sm_shift);
347 }
348 }
349
350 if (error == 0) {
351 VERIFY3U(sm->sm_space, ==, space);
352
353 sm->sm_loaded = B_TRUE;
354 sm->sm_ops = ops;
355 if (ops != NULL)
356 ops->smop_load(sm);
357 } else {
358 space_map_vacate(sm, NULL, NULL);
359 }
360
361 zio_buf_free(entry_map, bufsize);
362
363 sm->sm_loading = B_FALSE;
364
365 cv_broadcast(&sm->sm_load_cv);
366
367 return (error);
368 }
369
370 void
371 space_map_unload(space_map_t *sm)
372 {
373 ASSERT(MUTEX_HELD(sm->sm_lock));
374
375 if (sm->sm_loaded && sm->sm_ops != NULL)
376 sm->sm_ops->smop_unload(sm);
377
378 sm->sm_loaded = B_FALSE;
379 sm->sm_ops = NULL;
380
381 space_map_vacate(sm, NULL, NULL);
382 }
383
384 uint64_t
385 space_map_maxsize(space_map_t *sm)
386 {
387 ASSERT(sm->sm_ops != NULL);
388 return (sm->sm_ops->smop_max(sm));
389 }
390
391 uint64_t
392 space_map_alloc(space_map_t *sm, uint64_t size)
393 {
394 uint64_t start;
395
396 start = sm->sm_ops->smop_alloc(sm, size);
397 if (start != -1ULL)
398 space_map_remove(sm, start, size);
399 return (start);
400 }
401
402 void
403 space_map_claim(space_map_t *sm, uint64_t start, uint64_t size)
404 {
405 sm->sm_ops->smop_claim(sm, start, size);
406 space_map_remove(sm, start, size);
407 }
408
409 void
410 space_map_free(space_map_t *sm, uint64_t start, uint64_t size)
411 {
412 space_map_add(sm, start, size);
413 sm->sm_ops->smop_free(sm, start, size);
414 }
415
416 /*
417 * Note: space_map_sync() will drop sm_lock across dmu_write() calls.
418 */
419 void
420 space_map_sync(space_map_t *sm, uint8_t maptype,
421 space_map_obj_t *smo, objset_t *os, dmu_tx_t *tx)
422 {
423 spa_t *spa = dmu_objset_spa(os);
424 void *cookie = NULL;
425 space_seg_t *ss;
426 uint64_t bufsize, start, size, run_len, delta, sm_space;
427 uint64_t *entry, *entry_map, *entry_map_end;
428
429 ASSERT(MUTEX_HELD(sm->sm_lock));
430
431 if (sm->sm_space == 0)
432 return;
433
434 dprintf("object %4llu, txg %llu, pass %d, %c, count %lu, space %llx\n",
435 smo->smo_object, dmu_tx_get_txg(tx), spa_sync_pass(spa),
436 maptype == SM_ALLOC ? 'A' : 'F', avl_numnodes(&sm->sm_root),
437 sm->sm_space);
438
439 if (maptype == SM_ALLOC)
440 smo->smo_alloc += sm->sm_space;
441 else
442 smo->smo_alloc -= sm->sm_space;
443
444 bufsize = (8 + avl_numnodes(&sm->sm_root)) * sizeof (uint64_t);
445 bufsize = MIN(bufsize, 1ULL << SPACE_MAP_BLOCKSHIFT);
446 entry_map = zio_buf_alloc(bufsize);
447 entry_map_end = entry_map + (bufsize / sizeof (uint64_t));
448 entry = entry_map;
449
450 *entry++ = SM_DEBUG_ENCODE(1) |
451 SM_DEBUG_ACTION_ENCODE(maptype) |
452 SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(spa)) |
453 SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx));
454
455 delta = 0;
456 sm_space = sm->sm_space;
457 while ((ss = avl_destroy_nodes(&sm->sm_root, &cookie)) != NULL) {
458 size = ss->ss_end - ss->ss_start;
459 start = (ss->ss_start - sm->sm_start) >> sm->sm_shift;
460
461 delta += size;
462 size >>= sm->sm_shift;
463
464 while (size) {
465 run_len = MIN(size, SM_RUN_MAX);
466
467 if (entry == entry_map_end) {
468 mutex_exit(sm->sm_lock);
469 dmu_write(os, smo->smo_object, smo->smo_objsize,
470 bufsize, entry_map, tx);
471 mutex_enter(sm->sm_lock);
472 smo->smo_objsize += bufsize;
473 entry = entry_map;
474 }
475
476 *entry++ = SM_OFFSET_ENCODE(start) |
477 SM_TYPE_ENCODE(maptype) |
478 SM_RUN_ENCODE(run_len);
479
480 start += run_len;
481 size -= run_len;
482 }
483 kmem_cache_free(space_seg_cache, ss);
484 }
485
486 if (entry != entry_map) {
487 size = (entry - entry_map) * sizeof (uint64_t);
488 mutex_exit(sm->sm_lock);
489 dmu_write(os, smo->smo_object, smo->smo_objsize,
490 size, entry_map, tx);
491 mutex_enter(sm->sm_lock);
492 smo->smo_objsize += size;
493 }
494
495 /*
496 * Ensure that the space_map's accounting wasn't changed
497 * while we were in the middle of writing it out.
498 */
499 VERIFY3U(sm->sm_space, ==, sm_space);
500
501 zio_buf_free(entry_map, bufsize);
502
503 sm->sm_space -= delta;
504 VERIFY3U(sm->sm_space, ==, 0);
505 }
506
507 void
508 space_map_truncate(space_map_obj_t *smo, objset_t *os, dmu_tx_t *tx)
509 {
510 VERIFY(dmu_free_range(os, smo->smo_object, 0, -1ULL, tx) == 0);
511
512 smo->smo_objsize = 0;
513 smo->smo_alloc = 0;
514 }
515
516 /*
517 * Space map reference trees.
518 *
519 * A space map is a collection of integers. Every integer is either
520 * in the map, or it's not. A space map reference tree generalizes
521 * the idea: it allows its members to have arbitrary reference counts,
522 * as opposed to the implicit reference count of 0 or 1 in a space map.
523 * This representation comes in handy when computing the union or
524 * intersection of multiple space maps. For example, the union of
525 * N space maps is the subset of the reference tree with refcnt >= 1.
526 * The intersection of N space maps is the subset with refcnt >= N.
527 *
528 * [It's very much like a Fourier transform. Unions and intersections
529 * are hard to perform in the 'space map domain', so we convert the maps
530 * into the 'reference count domain', where it's trivial, then invert.]
531 *
532 * vdev_dtl_reassess() uses computations of this form to determine
533 * DTL_MISSING and DTL_OUTAGE for interior vdevs -- e.g. a RAID-Z vdev
534 * has an outage wherever refcnt >= vdev_nparity + 1, and a mirror vdev
535 * has an outage wherever refcnt >= vdev_children.
536 */
537 static int
538 space_map_ref_compare(const void *x1, const void *x2)
539 {
540 const space_ref_t *sr1 = x1;
541 const space_ref_t *sr2 = x2;
542
543 if (sr1->sr_offset < sr2->sr_offset)
544 return (-1);
545 if (sr1->sr_offset > sr2->sr_offset)
546 return (1);
547
548 if (sr1 < sr2)
549 return (-1);
550 if (sr1 > sr2)
551 return (1);
552
553 return (0);
554 }
555
556 void
557 space_map_ref_create(avl_tree_t *t)
558 {
559 avl_create(t, space_map_ref_compare,
560 sizeof (space_ref_t), offsetof(space_ref_t, sr_node));
561 }
562
563 void
564 space_map_ref_destroy(avl_tree_t *t)
565 {
566 space_ref_t *sr;
567 void *cookie = NULL;
568
569 while ((sr = avl_destroy_nodes(t, &cookie)) != NULL)
570 kmem_free(sr, sizeof (*sr));
571
572 avl_destroy(t);
573 }
574
575 static void
576 space_map_ref_add_node(avl_tree_t *t, uint64_t offset, int64_t refcnt)
577 {
578 space_ref_t *sr;
579
580 sr = kmem_alloc(sizeof (*sr), KM_PUSHPAGE);
581 sr->sr_offset = offset;
582 sr->sr_refcnt = refcnt;
583
584 avl_add(t, sr);
585 }
586
587 void
588 space_map_ref_add_seg(avl_tree_t *t, uint64_t start, uint64_t end,
589 int64_t refcnt)
590 {
591 space_map_ref_add_node(t, start, refcnt);
592 space_map_ref_add_node(t, end, -refcnt);
593 }
594
595 /*
596 * Convert (or add) a space map into a reference tree.
597 */
598 void
599 space_map_ref_add_map(avl_tree_t *t, space_map_t *sm, int64_t refcnt)
600 {
601 space_seg_t *ss;
602
603 ASSERT(MUTEX_HELD(sm->sm_lock));
604
605 for (ss = avl_first(&sm->sm_root); ss; ss = AVL_NEXT(&sm->sm_root, ss))
606 space_map_ref_add_seg(t, ss->ss_start, ss->ss_end, refcnt);
607 }
608
609 /*
610 * Convert a reference tree into a space map. The space map will contain
611 * all members of the reference tree for which refcnt >= minref.
612 */
613 void
614 space_map_ref_generate_map(avl_tree_t *t, space_map_t *sm, int64_t minref)
615 {
616 uint64_t start = -1ULL;
617 int64_t refcnt = 0;
618 space_ref_t *sr;
619
620 ASSERT(MUTEX_HELD(sm->sm_lock));
621
622 space_map_vacate(sm, NULL, NULL);
623
624 for (sr = avl_first(t); sr != NULL; sr = AVL_NEXT(t, sr)) {
625 refcnt += sr->sr_refcnt;
626 if (refcnt >= minref) {
627 if (start == -1ULL) {
628 start = sr->sr_offset;
629 }
630 } else {
631 if (start != -1ULL) {
632 uint64_t end = sr->sr_offset;
633 ASSERT(start <= end);
634 if (end > start)
635 space_map_add(sm, start, end - start);
636 start = -1ULL;
637 }
638 }
639 }
640 ASSERT(refcnt == 0);
641 ASSERT(start == -1ULL);
642 }