]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/space_map.c
OpenZFS 7614, 9064 - zfs device evacuation/removal
[mirror_zfs.git] / module / zfs / space_map.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25 /*
26 * Copyright (c) 2012, 2016 by Delphix. All rights reserved.
27 */
28
29 #include <sys/zfs_context.h>
30 #include <sys/spa.h>
31 #include <sys/dmu.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dnode.h>
34 #include <sys/dsl_pool.h>
35 #include <sys/zio.h>
36 #include <sys/space_map.h>
37 #include <sys/refcount.h>
38 #include <sys/zfeature.h>
39
40 /*
41 * The data for a given space map can be kept on blocks of any size.
42 * Larger blocks entail fewer i/o operations, but they also cause the
43 * DMU to keep more data in-core, and also to waste more i/o bandwidth
44 * when only a few blocks have changed since the last transaction group.
45 */
46 int space_map_blksz = (1 << 12);
47
48 /*
49 * Iterate through the space map, invoking the callback on each (non-debug)
50 * space map entry.
51 */
52 int
53 space_map_iterate(space_map_t *sm, sm_cb_t callback, void *arg)
54 {
55 uint64_t *entry, *entry_map, *entry_map_end;
56 uint64_t bufsize, size, offset, end;
57 int error = 0;
58
59 end = space_map_length(sm);
60
61 bufsize = MAX(sm->sm_blksz, SPA_MINBLOCKSIZE);
62 entry_map = vmem_alloc(bufsize, KM_SLEEP);
63
64 if (end > bufsize) {
65 dmu_prefetch(sm->sm_os, space_map_object(sm), 0, bufsize,
66 end - bufsize, ZIO_PRIORITY_SYNC_READ);
67 }
68
69 for (offset = 0; offset < end && error == 0; offset += bufsize) {
70 size = MIN(end - offset, bufsize);
71 VERIFY(P2PHASE(size, sizeof (uint64_t)) == 0);
72 VERIFY(size != 0);
73 ASSERT3U(sm->sm_blksz, !=, 0);
74
75 dprintf("object=%llu offset=%llx size=%llx\n",
76 space_map_object(sm), offset, size);
77
78 error = dmu_read(sm->sm_os, space_map_object(sm), offset, size,
79 entry_map, DMU_READ_PREFETCH);
80 if (error != 0)
81 break;
82
83 entry_map_end = entry_map + (size / sizeof (uint64_t));
84 for (entry = entry_map; entry < entry_map_end && error == 0;
85 entry++) {
86 uint64_t e = *entry;
87 uint64_t offset, size;
88
89 if (SM_DEBUG_DECODE(e)) /* Skip debug entries */
90 continue;
91
92 offset = (SM_OFFSET_DECODE(e) << sm->sm_shift) +
93 sm->sm_start;
94 size = SM_RUN_DECODE(e) << sm->sm_shift;
95
96 VERIFY0(P2PHASE(offset, 1ULL << sm->sm_shift));
97 VERIFY0(P2PHASE(size, 1ULL << sm->sm_shift));
98 VERIFY3U(offset, >=, sm->sm_start);
99 VERIFY3U(offset + size, <=, sm->sm_start + sm->sm_size);
100 error = callback(SM_TYPE_DECODE(e), offset, size, arg);
101 }
102 }
103
104 vmem_free(entry_map, bufsize);
105 return (error);
106 }
107
108 typedef struct space_map_load_arg {
109 space_map_t *smla_sm;
110 range_tree_t *smla_rt;
111 maptype_t smla_type;
112 } space_map_load_arg_t;
113
114 static int
115 space_map_load_callback(maptype_t type, uint64_t offset, uint64_t size,
116 void *arg)
117 {
118 space_map_load_arg_t *smla = arg;
119 if (type == smla->smla_type) {
120 VERIFY3U(range_tree_space(smla->smla_rt) + size, <=,
121 smla->smla_sm->sm_size);
122 range_tree_add(smla->smla_rt, offset, size);
123 } else {
124 range_tree_remove(smla->smla_rt, offset, size);
125 }
126
127 return (0);
128 }
129
130 /*
131 * Load the space map disk into the specified range tree. Segments of maptype
132 * are added to the range tree, other segment types are removed.
133 */
134 int
135 space_map_load(space_map_t *sm, range_tree_t *rt, maptype_t maptype)
136 {
137 uint64_t space;
138 int err;
139 space_map_load_arg_t smla;
140
141 VERIFY0(range_tree_space(rt));
142 space = space_map_allocated(sm);
143
144 if (maptype == SM_FREE) {
145 range_tree_add(rt, sm->sm_start, sm->sm_size);
146 space = sm->sm_size - space;
147 }
148
149 smla.smla_rt = rt;
150 smla.smla_sm = sm;
151 smla.smla_type = maptype;
152 err = space_map_iterate(sm, space_map_load_callback, &smla);
153
154 if (err == 0) {
155 VERIFY3U(range_tree_space(rt), ==, space);
156 } else {
157 range_tree_vacate(rt, NULL, NULL);
158 }
159
160 return (err);
161 }
162
163 void
164 space_map_histogram_clear(space_map_t *sm)
165 {
166 if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t))
167 return;
168
169 bzero(sm->sm_phys->smp_histogram, sizeof (sm->sm_phys->smp_histogram));
170 }
171
172 boolean_t
173 space_map_histogram_verify(space_map_t *sm, range_tree_t *rt)
174 {
175 /*
176 * Verify that the in-core range tree does not have any
177 * ranges smaller than our sm_shift size.
178 */
179 for (int i = 0; i < sm->sm_shift; i++) {
180 if (rt->rt_histogram[i] != 0)
181 return (B_FALSE);
182 }
183 return (B_TRUE);
184 }
185
186 void
187 space_map_histogram_add(space_map_t *sm, range_tree_t *rt, dmu_tx_t *tx)
188 {
189 int idx = 0;
190
191 ASSERT(dmu_tx_is_syncing(tx));
192 VERIFY3U(space_map_object(sm), !=, 0);
193
194 if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t))
195 return;
196
197 dmu_buf_will_dirty(sm->sm_dbuf, tx);
198
199 ASSERT(space_map_histogram_verify(sm, rt));
200 /*
201 * Transfer the content of the range tree histogram to the space
202 * map histogram. The space map histogram contains 32 buckets ranging
203 * between 2^sm_shift to 2^(32+sm_shift-1). The range tree,
204 * however, can represent ranges from 2^0 to 2^63. Since the space
205 * map only cares about allocatable blocks (minimum of sm_shift) we
206 * can safely ignore all ranges in the range tree smaller than sm_shift.
207 */
208 for (int i = sm->sm_shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
209
210 /*
211 * Since the largest histogram bucket in the space map is
212 * 2^(32+sm_shift-1), we need to normalize the values in
213 * the range tree for any bucket larger than that size. For
214 * example given an sm_shift of 9, ranges larger than 2^40
215 * would get normalized as if they were 1TB ranges. Assume
216 * the range tree had a count of 5 in the 2^44 (16TB) bucket,
217 * the calculation below would normalize this to 5 * 2^4 (16).
218 */
219 ASSERT3U(i, >=, idx + sm->sm_shift);
220 sm->sm_phys->smp_histogram[idx] +=
221 rt->rt_histogram[i] << (i - idx - sm->sm_shift);
222
223 /*
224 * Increment the space map's index as long as we haven't
225 * reached the maximum bucket size. Accumulate all ranges
226 * larger than the max bucket size into the last bucket.
227 */
228 if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) {
229 ASSERT3U(idx + sm->sm_shift, ==, i);
230 idx++;
231 ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE);
232 }
233 }
234 }
235
236 uint64_t
237 space_map_entries(space_map_t *sm, range_tree_t *rt)
238 {
239 avl_tree_t *t = &rt->rt_root;
240 range_seg_t *rs;
241 uint64_t size, entries;
242
243 /*
244 * All space_maps always have a debug entry so account for it here.
245 */
246 entries = 1;
247
248 /*
249 * Traverse the range tree and calculate the number of space map
250 * entries that would be required to write out the range tree.
251 */
252 for (rs = avl_first(t); rs != NULL; rs = AVL_NEXT(t, rs)) {
253 size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
254 entries += howmany(size, SM_RUN_MAX);
255 }
256 return (entries);
257 }
258
259 void
260 space_map_write(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
261 dmu_tx_t *tx)
262 {
263 objset_t *os = sm->sm_os;
264 spa_t *spa = dmu_objset_spa(os);
265 avl_tree_t *t = &rt->rt_root;
266 range_seg_t *rs;
267 uint64_t size, total, rt_space, nodes;
268 uint64_t *entry, *entry_map, *entry_map_end;
269 uint64_t expected_entries, actual_entries = 1;
270
271 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
272 VERIFY3U(space_map_object(sm), !=, 0);
273 dmu_buf_will_dirty(sm->sm_dbuf, tx);
274
275 /*
276 * This field is no longer necessary since the in-core space map
277 * now contains the object number but is maintained for backwards
278 * compatibility.
279 */
280 sm->sm_phys->smp_object = sm->sm_object;
281
282 if (range_tree_space(rt) == 0) {
283 VERIFY3U(sm->sm_object, ==, sm->sm_phys->smp_object);
284 return;
285 }
286
287 if (maptype == SM_ALLOC)
288 sm->sm_phys->smp_alloc += range_tree_space(rt);
289 else
290 sm->sm_phys->smp_alloc -= range_tree_space(rt);
291
292 expected_entries = space_map_entries(sm, rt);
293
294 entry_map = vmem_alloc(sm->sm_blksz, KM_SLEEP);
295 entry_map_end = entry_map + (sm->sm_blksz / sizeof (uint64_t));
296 entry = entry_map;
297
298 *entry++ = SM_DEBUG_ENCODE(1) |
299 SM_DEBUG_ACTION_ENCODE(maptype) |
300 SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(spa)) |
301 SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx));
302
303 total = 0;
304 nodes = avl_numnodes(&rt->rt_root);
305 rt_space = range_tree_space(rt);
306 for (rs = avl_first(t); rs != NULL; rs = AVL_NEXT(t, rs)) {
307 uint64_t start;
308
309 size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
310 start = (rs->rs_start - sm->sm_start) >> sm->sm_shift;
311
312 total += size << sm->sm_shift;
313
314 while (size != 0) {
315 uint64_t run_len;
316
317 run_len = MIN(size, SM_RUN_MAX);
318
319 if (entry == entry_map_end) {
320 dmu_write(os, space_map_object(sm),
321 sm->sm_phys->smp_objsize, sm->sm_blksz,
322 entry_map, tx);
323 sm->sm_phys->smp_objsize += sm->sm_blksz;
324 entry = entry_map;
325 }
326
327 *entry++ = SM_OFFSET_ENCODE(start) |
328 SM_TYPE_ENCODE(maptype) |
329 SM_RUN_ENCODE(run_len);
330
331 start += run_len;
332 size -= run_len;
333 actual_entries++;
334 }
335 }
336
337 if (entry != entry_map) {
338 size = (entry - entry_map) * sizeof (uint64_t);
339 dmu_write(os, space_map_object(sm), sm->sm_phys->smp_objsize,
340 size, entry_map, tx);
341 sm->sm_phys->smp_objsize += size;
342 }
343 ASSERT3U(expected_entries, ==, actual_entries);
344
345 /*
346 * Ensure that the space_map's accounting wasn't changed
347 * while we were in the middle of writing it out.
348 */
349 VERIFY3U(nodes, ==, avl_numnodes(&rt->rt_root));
350 VERIFY3U(range_tree_space(rt), ==, rt_space);
351 VERIFY3U(range_tree_space(rt), ==, total);
352
353 vmem_free(entry_map, sm->sm_blksz);
354 }
355
356 static int
357 space_map_open_impl(space_map_t *sm)
358 {
359 int error;
360 u_longlong_t blocks;
361
362 error = dmu_bonus_hold(sm->sm_os, sm->sm_object, sm, &sm->sm_dbuf);
363 if (error)
364 return (error);
365
366 dmu_object_size_from_db(sm->sm_dbuf, &sm->sm_blksz, &blocks);
367 sm->sm_phys = sm->sm_dbuf->db_data;
368 return (0);
369 }
370
371 int
372 space_map_open(space_map_t **smp, objset_t *os, uint64_t object,
373 uint64_t start, uint64_t size, uint8_t shift)
374 {
375 space_map_t *sm;
376 int error;
377
378 ASSERT(*smp == NULL);
379 ASSERT(os != NULL);
380 ASSERT(object != 0);
381
382 sm = kmem_alloc(sizeof (space_map_t), KM_SLEEP);
383
384 sm->sm_start = start;
385 sm->sm_size = size;
386 sm->sm_shift = shift;
387 sm->sm_os = os;
388 sm->sm_object = object;
389 sm->sm_length = 0;
390 sm->sm_alloc = 0;
391 sm->sm_blksz = 0;
392 sm->sm_dbuf = NULL;
393 sm->sm_phys = NULL;
394
395 error = space_map_open_impl(sm);
396 if (error != 0) {
397 space_map_close(sm);
398 return (error);
399 }
400
401 *smp = sm;
402
403 return (0);
404 }
405
406 void
407 space_map_close(space_map_t *sm)
408 {
409 if (sm == NULL)
410 return;
411
412 if (sm->sm_dbuf != NULL)
413 dmu_buf_rele(sm->sm_dbuf, sm);
414 sm->sm_dbuf = NULL;
415 sm->sm_phys = NULL;
416
417 kmem_free(sm, sizeof (*sm));
418 }
419
420 void
421 space_map_truncate(space_map_t *sm, dmu_tx_t *tx)
422 {
423 objset_t *os = sm->sm_os;
424 spa_t *spa = dmu_objset_spa(os);
425 dmu_object_info_t doi;
426
427 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
428 ASSERT(dmu_tx_is_syncing(tx));
429 VERIFY3U(dmu_tx_get_txg(tx), <=, spa_final_dirty_txg(spa));
430
431 dmu_object_info_from_db(sm->sm_dbuf, &doi);
432
433 /*
434 * If the space map has the wrong bonus size (because
435 * SPA_FEATURE_SPACEMAP_HISTOGRAM has recently been enabled), or
436 * the wrong block size (because space_map_blksz has changed),
437 * free and re-allocate its object with the updated sizes.
438 *
439 * Otherwise, just truncate the current object.
440 */
441 if ((spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
442 doi.doi_bonus_size != sizeof (space_map_phys_t)) ||
443 doi.doi_data_block_size != space_map_blksz) {
444 zfs_dbgmsg("txg %llu, spa %s, sm %p, reallocating "
445 "object[%llu]: old bonus %u, old blocksz %u",
446 dmu_tx_get_txg(tx), spa_name(spa), sm, sm->sm_object,
447 doi.doi_bonus_size, doi.doi_data_block_size);
448
449 space_map_free(sm, tx);
450 dmu_buf_rele(sm->sm_dbuf, sm);
451
452 sm->sm_object = space_map_alloc(sm->sm_os, tx);
453 VERIFY0(space_map_open_impl(sm));
454 } else {
455 VERIFY0(dmu_free_range(os, space_map_object(sm), 0, -1ULL, tx));
456
457 /*
458 * If the spacemap is reallocated, its histogram
459 * will be reset. Do the same in the common case so that
460 * bugs related to the uncommon case do not go unnoticed.
461 */
462 bzero(sm->sm_phys->smp_histogram,
463 sizeof (sm->sm_phys->smp_histogram));
464 }
465
466 dmu_buf_will_dirty(sm->sm_dbuf, tx);
467 sm->sm_phys->smp_objsize = 0;
468 sm->sm_phys->smp_alloc = 0;
469 }
470
471 /*
472 * Update the in-core space_map allocation and length values.
473 */
474 void
475 space_map_update(space_map_t *sm)
476 {
477 if (sm == NULL)
478 return;
479
480 sm->sm_alloc = sm->sm_phys->smp_alloc;
481 sm->sm_length = sm->sm_phys->smp_objsize;
482 }
483
484 uint64_t
485 space_map_alloc(objset_t *os, dmu_tx_t *tx)
486 {
487 spa_t *spa = dmu_objset_spa(os);
488 uint64_t object;
489 int bonuslen;
490
491 if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
492 spa_feature_incr(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM, tx);
493 bonuslen = sizeof (space_map_phys_t);
494 ASSERT3U(bonuslen, <=, dmu_bonus_max());
495 } else {
496 bonuslen = SPACE_MAP_SIZE_V0;
497 }
498
499 object = dmu_object_alloc(os,
500 DMU_OT_SPACE_MAP, space_map_blksz,
501 DMU_OT_SPACE_MAP_HEADER, bonuslen, tx);
502
503 return (object);
504 }
505
506 void
507 space_map_free_obj(objset_t *os, uint64_t smobj, dmu_tx_t *tx)
508 {
509 spa_t *spa = dmu_objset_spa(os);
510 if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
511 dmu_object_info_t doi;
512
513 VERIFY0(dmu_object_info(os, smobj, &doi));
514 if (doi.doi_bonus_size != SPACE_MAP_SIZE_V0) {
515 spa_feature_decr(spa,
516 SPA_FEATURE_SPACEMAP_HISTOGRAM, tx);
517 }
518 }
519
520 VERIFY0(dmu_object_free(os, smobj, tx));
521 }
522
523 void
524 space_map_free(space_map_t *sm, dmu_tx_t *tx)
525 {
526 if (sm == NULL)
527 return;
528
529 space_map_free_obj(sm->sm_os, space_map_object(sm), tx);
530 sm->sm_object = 0;
531 }
532
533 uint64_t
534 space_map_object(space_map_t *sm)
535 {
536 return (sm != NULL ? sm->sm_object : 0);
537 }
538
539 /*
540 * Returns the already synced, on-disk allocated space.
541 */
542 uint64_t
543 space_map_allocated(space_map_t *sm)
544 {
545 return (sm != NULL ? sm->sm_alloc : 0);
546 }
547
548 /*
549 * Returns the already synced, on-disk length;
550 */
551 uint64_t
552 space_map_length(space_map_t *sm)
553 {
554 return (sm != NULL ? sm->sm_length : 0);
555 }
556
557 /*
558 * Returns the allocated space that is currently syncing.
559 */
560 int64_t
561 space_map_alloc_delta(space_map_t *sm)
562 {
563 if (sm == NULL)
564 return (0);
565 ASSERT(sm->sm_dbuf != NULL);
566 return (sm->sm_phys->smp_alloc - space_map_allocated(sm));
567 }