]> git.proxmox.com Git - mirror_zfs-debian.git/blob - module/zfs/space_map.c
Imported Upstream version 0.6.4.2
[mirror_zfs-debian.git] / module / zfs / space_map.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25 /*
26 * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
27 */
28
29 #include <sys/zfs_context.h>
30 #include <sys/spa.h>
31 #include <sys/dmu.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dnode.h>
34 #include <sys/dsl_pool.h>
35 #include <sys/zio.h>
36 #include <sys/space_map.h>
37 #include <sys/refcount.h>
38 #include <sys/zfeature.h>
39
40 /*
41 * The data for a given space map can be kept on blocks of any size.
42 * Larger blocks entail fewer i/o operations, but they also cause the
43 * DMU to keep more data in-core, and also to waste more i/o bandwidth
44 * when only a few blocks have changed since the last transaction group.
45 */
46 int space_map_blksz = (1 << 12);
47
48 /*
49 * Load the space map disk into the specified range tree. Segments of maptype
50 * are added to the range tree, other segment types are removed.
51 *
52 * Note: space_map_load() will drop sm_lock across dmu_read() calls.
53 * The caller must be OK with this.
54 */
55 int
56 space_map_load(space_map_t *sm, range_tree_t *rt, maptype_t maptype)
57 {
58 uint64_t *entry, *entry_map, *entry_map_end;
59 uint64_t bufsize, size, offset, end, space;
60 int error = 0;
61
62 ASSERT(MUTEX_HELD(sm->sm_lock));
63
64 end = space_map_length(sm);
65 space = space_map_allocated(sm);
66
67 VERIFY0(range_tree_space(rt));
68
69 if (maptype == SM_FREE) {
70 range_tree_add(rt, sm->sm_start, sm->sm_size);
71 space = sm->sm_size - space;
72 }
73
74 bufsize = MAX(sm->sm_blksz, SPA_MINBLOCKSIZE);
75 entry_map = zio_buf_alloc(bufsize);
76
77 mutex_exit(sm->sm_lock);
78 if (end > bufsize) {
79 dmu_prefetch(sm->sm_os, space_map_object(sm), bufsize,
80 end - bufsize);
81 }
82 mutex_enter(sm->sm_lock);
83
84 for (offset = 0; offset < end; offset += bufsize) {
85 size = MIN(end - offset, bufsize);
86 VERIFY(P2PHASE(size, sizeof (uint64_t)) == 0);
87 VERIFY(size != 0);
88 ASSERT3U(sm->sm_blksz, !=, 0);
89
90 dprintf("object=%llu offset=%llx size=%llx\n",
91 space_map_object(sm), offset, size);
92
93 mutex_exit(sm->sm_lock);
94 error = dmu_read(sm->sm_os, space_map_object(sm), offset, size,
95 entry_map, DMU_READ_PREFETCH);
96 mutex_enter(sm->sm_lock);
97 if (error != 0)
98 break;
99
100 entry_map_end = entry_map + (size / sizeof (uint64_t));
101 for (entry = entry_map; entry < entry_map_end; entry++) {
102 uint64_t e = *entry;
103 uint64_t offset, size;
104
105 if (SM_DEBUG_DECODE(e)) /* Skip debug entries */
106 continue;
107
108 offset = (SM_OFFSET_DECODE(e) << sm->sm_shift) +
109 sm->sm_start;
110 size = SM_RUN_DECODE(e) << sm->sm_shift;
111
112 VERIFY0(P2PHASE(offset, 1ULL << sm->sm_shift));
113 VERIFY0(P2PHASE(size, 1ULL << sm->sm_shift));
114 VERIFY3U(offset, >=, sm->sm_start);
115 VERIFY3U(offset + size, <=, sm->sm_start + sm->sm_size);
116 if (SM_TYPE_DECODE(e) == maptype) {
117 VERIFY3U(range_tree_space(rt) + size, <=,
118 sm->sm_size);
119 range_tree_add(rt, offset, size);
120 } else {
121 range_tree_remove(rt, offset, size);
122 }
123 }
124 }
125
126 if (error == 0)
127 VERIFY3U(range_tree_space(rt), ==, space);
128 else
129 range_tree_vacate(rt, NULL, NULL);
130
131 zio_buf_free(entry_map, bufsize);
132 return (error);
133 }
134
135 void
136 space_map_histogram_clear(space_map_t *sm)
137 {
138 if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t))
139 return;
140
141 bzero(sm->sm_phys->smp_histogram, sizeof (sm->sm_phys->smp_histogram));
142 }
143
144 boolean_t
145 space_map_histogram_verify(space_map_t *sm, range_tree_t *rt)
146 {
147 int i;
148
149 /*
150 * Verify that the in-core range tree does not have any
151 * ranges smaller than our sm_shift size.
152 */
153 for (i = 0; i < sm->sm_shift; i++) {
154 if (rt->rt_histogram[i] != 0)
155 return (B_FALSE);
156 }
157 return (B_TRUE);
158 }
159
160 void
161 space_map_histogram_add(space_map_t *sm, range_tree_t *rt, dmu_tx_t *tx)
162 {
163 int idx = 0;
164 int i;
165
166 ASSERT(MUTEX_HELD(rt->rt_lock));
167 ASSERT(dmu_tx_is_syncing(tx));
168 VERIFY3U(space_map_object(sm), !=, 0);
169
170 if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t))
171 return;
172
173 dmu_buf_will_dirty(sm->sm_dbuf, tx);
174
175 ASSERT(space_map_histogram_verify(sm, rt));
176
177 /*
178 * Transfer the content of the range tree histogram to the space
179 * map histogram. The space map histogram contains 32 buckets ranging
180 * between 2^sm_shift to 2^(32+sm_shift-1). The range tree,
181 * however, can represent ranges from 2^0 to 2^63. Since the space
182 * map only cares about allocatable blocks (minimum of sm_shift) we
183 * can safely ignore all ranges in the range tree smaller than sm_shift.
184 */
185 for (i = sm->sm_shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
186
187 /*
188 * Since the largest histogram bucket in the space map is
189 * 2^(32+sm_shift-1), we need to normalize the values in
190 * the range tree for any bucket larger than that size. For
191 * example given an sm_shift of 9, ranges larger than 2^40
192 * would get normalized as if they were 1TB ranges. Assume
193 * the range tree had a count of 5 in the 2^44 (16TB) bucket,
194 * the calculation below would normalize this to 5 * 2^4 (16).
195 */
196 ASSERT3U(i, >=, idx + sm->sm_shift);
197 sm->sm_phys->smp_histogram[idx] +=
198 rt->rt_histogram[i] << (i - idx - sm->sm_shift);
199
200 /*
201 * Increment the space map's index as long as we haven't
202 * reached the maximum bucket size. Accumulate all ranges
203 * larger than the max bucket size into the last bucket.
204 */
205 if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) {
206 ASSERT3U(idx + sm->sm_shift, ==, i);
207 idx++;
208 ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE);
209 }
210 }
211 }
212
213 uint64_t
214 space_map_entries(space_map_t *sm, range_tree_t *rt)
215 {
216 avl_tree_t *t = &rt->rt_root;
217 range_seg_t *rs;
218 uint64_t size, entries;
219
220 /*
221 * All space_maps always have a debug entry so account for it here.
222 */
223 entries = 1;
224
225 /*
226 * Traverse the range tree and calculate the number of space map
227 * entries that would be required to write out the range tree.
228 */
229 for (rs = avl_first(t); rs != NULL; rs = AVL_NEXT(t, rs)) {
230 size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
231 entries += howmany(size, SM_RUN_MAX);
232 }
233 return (entries);
234 }
235
236 /*
237 * Note: space_map_write() will drop sm_lock across dmu_write() calls.
238 */
239 void
240 space_map_write(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
241 dmu_tx_t *tx)
242 {
243 objset_t *os = sm->sm_os;
244 spa_t *spa = dmu_objset_spa(os);
245 avl_tree_t *t = &rt->rt_root;
246 range_seg_t *rs;
247 uint64_t size, total, rt_space, nodes;
248 uint64_t *entry, *entry_map, *entry_map_end;
249 uint64_t expected_entries, actual_entries = 1;
250
251 ASSERT(MUTEX_HELD(rt->rt_lock));
252 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
253 VERIFY3U(space_map_object(sm), !=, 0);
254 dmu_buf_will_dirty(sm->sm_dbuf, tx);
255
256 /*
257 * This field is no longer necessary since the in-core space map
258 * now contains the object number but is maintained for backwards
259 * compatibility.
260 */
261 sm->sm_phys->smp_object = sm->sm_object;
262
263 if (range_tree_space(rt) == 0) {
264 VERIFY3U(sm->sm_object, ==, sm->sm_phys->smp_object);
265 return;
266 }
267
268 if (maptype == SM_ALLOC)
269 sm->sm_phys->smp_alloc += range_tree_space(rt);
270 else
271 sm->sm_phys->smp_alloc -= range_tree_space(rt);
272
273 expected_entries = space_map_entries(sm, rt);
274
275 entry_map = zio_buf_alloc(sm->sm_blksz);
276 entry_map_end = entry_map + (sm->sm_blksz / sizeof (uint64_t));
277 entry = entry_map;
278
279 *entry++ = SM_DEBUG_ENCODE(1) |
280 SM_DEBUG_ACTION_ENCODE(maptype) |
281 SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(spa)) |
282 SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx));
283
284 total = 0;
285 nodes = avl_numnodes(&rt->rt_root);
286 rt_space = range_tree_space(rt);
287 for (rs = avl_first(t); rs != NULL; rs = AVL_NEXT(t, rs)) {
288 uint64_t start;
289
290 size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
291 start = (rs->rs_start - sm->sm_start) >> sm->sm_shift;
292
293 total += size << sm->sm_shift;
294
295 while (size != 0) {
296 uint64_t run_len;
297
298 run_len = MIN(size, SM_RUN_MAX);
299
300 if (entry == entry_map_end) {
301 mutex_exit(rt->rt_lock);
302 dmu_write(os, space_map_object(sm),
303 sm->sm_phys->smp_objsize, sm->sm_blksz,
304 entry_map, tx);
305 mutex_enter(rt->rt_lock);
306 sm->sm_phys->smp_objsize += sm->sm_blksz;
307 entry = entry_map;
308 }
309
310 *entry++ = SM_OFFSET_ENCODE(start) |
311 SM_TYPE_ENCODE(maptype) |
312 SM_RUN_ENCODE(run_len);
313
314 start += run_len;
315 size -= run_len;
316 actual_entries++;
317 }
318 }
319
320 if (entry != entry_map) {
321 size = (entry - entry_map) * sizeof (uint64_t);
322 mutex_exit(rt->rt_lock);
323 dmu_write(os, space_map_object(sm), sm->sm_phys->smp_objsize,
324 size, entry_map, tx);
325 mutex_enter(rt->rt_lock);
326 sm->sm_phys->smp_objsize += size;
327 }
328 ASSERT3U(expected_entries, ==, actual_entries);
329
330 /*
331 * Ensure that the space_map's accounting wasn't changed
332 * while we were in the middle of writing it out.
333 */
334 VERIFY3U(nodes, ==, avl_numnodes(&rt->rt_root));
335 VERIFY3U(range_tree_space(rt), ==, rt_space);
336 VERIFY3U(range_tree_space(rt), ==, total);
337
338 zio_buf_free(entry_map, sm->sm_blksz);
339 }
340
341 static int
342 space_map_open_impl(space_map_t *sm)
343 {
344 int error;
345 u_longlong_t blocks;
346
347 error = dmu_bonus_hold(sm->sm_os, sm->sm_object, sm, &sm->sm_dbuf);
348 if (error)
349 return (error);
350
351 dmu_object_size_from_db(sm->sm_dbuf, &sm->sm_blksz, &blocks);
352 sm->sm_phys = sm->sm_dbuf->db_data;
353 return (0);
354 }
355
356 int
357 space_map_open(space_map_t **smp, objset_t *os, uint64_t object,
358 uint64_t start, uint64_t size, uint8_t shift, kmutex_t *lp)
359 {
360 space_map_t *sm;
361 int error;
362
363 ASSERT(*smp == NULL);
364 ASSERT(os != NULL);
365 ASSERT(object != 0);
366
367 sm = kmem_alloc(sizeof (space_map_t), KM_SLEEP);
368
369 sm->sm_start = start;
370 sm->sm_size = size;
371 sm->sm_shift = shift;
372 sm->sm_lock = lp;
373 sm->sm_os = os;
374 sm->sm_object = object;
375 sm->sm_length = 0;
376 sm->sm_alloc = 0;
377 sm->sm_blksz = 0;
378 sm->sm_dbuf = NULL;
379 sm->sm_phys = NULL;
380
381 error = space_map_open_impl(sm);
382 if (error != 0) {
383 space_map_close(sm);
384 return (error);
385 }
386
387 *smp = sm;
388
389 return (0);
390 }
391
392 void
393 space_map_close(space_map_t *sm)
394 {
395 if (sm == NULL)
396 return;
397
398 if (sm->sm_dbuf != NULL)
399 dmu_buf_rele(sm->sm_dbuf, sm);
400 sm->sm_dbuf = NULL;
401 sm->sm_phys = NULL;
402
403 kmem_free(sm, sizeof (*sm));
404 }
405
406 void
407 space_map_truncate(space_map_t *sm, dmu_tx_t *tx)
408 {
409 objset_t *os = sm->sm_os;
410 spa_t *spa = dmu_objset_spa(os);
411 dmu_object_info_t doi;
412
413 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
414 ASSERT(dmu_tx_is_syncing(tx));
415
416 dmu_object_info_from_db(sm->sm_dbuf, &doi);
417
418 /*
419 * If the space map has the wrong bonus size (because
420 * SPA_FEATURE_SPACEMAP_HISTOGRAM has recently been enabled), or
421 * the wrong block size (because space_map_blksz has changed),
422 * free and re-allocate its object with the updated sizes.
423 *
424 * Otherwise, just truncate the current object.
425 */
426 if ((spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
427 doi.doi_bonus_size != sizeof (space_map_phys_t)) ||
428 doi.doi_data_block_size != space_map_blksz) {
429 zfs_dbgmsg("txg %llu, spa %s, reallocating: "
430 "old bonus %llu, old blocksz %u", dmu_tx_get_txg(tx),
431 spa_name(spa), doi.doi_bonus_size, doi.doi_data_block_size);
432
433 space_map_free(sm, tx);
434 dmu_buf_rele(sm->sm_dbuf, sm);
435
436 sm->sm_object = space_map_alloc(sm->sm_os, tx);
437 VERIFY0(space_map_open_impl(sm));
438 } else {
439 VERIFY0(dmu_free_range(os, space_map_object(sm), 0, -1ULL, tx));
440
441 /*
442 * If the spacemap is reallocated, its histogram
443 * will be reset. Do the same in the common case so that
444 * bugs related to the uncommon case do not go unnoticed.
445 */
446 bzero(sm->sm_phys->smp_histogram,
447 sizeof (sm->sm_phys->smp_histogram));
448 }
449
450 dmu_buf_will_dirty(sm->sm_dbuf, tx);
451 sm->sm_phys->smp_objsize = 0;
452 sm->sm_phys->smp_alloc = 0;
453 }
454
455 /*
456 * Update the in-core space_map allocation and length values.
457 */
458 void
459 space_map_update(space_map_t *sm)
460 {
461 if (sm == NULL)
462 return;
463
464 ASSERT(MUTEX_HELD(sm->sm_lock));
465
466 sm->sm_alloc = sm->sm_phys->smp_alloc;
467 sm->sm_length = sm->sm_phys->smp_objsize;
468 }
469
470 uint64_t
471 space_map_alloc(objset_t *os, dmu_tx_t *tx)
472 {
473 spa_t *spa = dmu_objset_spa(os);
474 uint64_t object;
475 int bonuslen;
476
477 if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
478 spa_feature_incr(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM, tx);
479 bonuslen = sizeof (space_map_phys_t);
480 ASSERT3U(bonuslen, <=, dmu_bonus_max());
481 } else {
482 bonuslen = SPACE_MAP_SIZE_V0;
483 }
484
485 object = dmu_object_alloc(os,
486 DMU_OT_SPACE_MAP, space_map_blksz,
487 DMU_OT_SPACE_MAP_HEADER, bonuslen, tx);
488
489 return (object);
490 }
491
492 void
493 space_map_free(space_map_t *sm, dmu_tx_t *tx)
494 {
495 spa_t *spa;
496
497 if (sm == NULL)
498 return;
499
500 spa = dmu_objset_spa(sm->sm_os);
501 if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
502 dmu_object_info_t doi;
503
504 dmu_object_info_from_db(sm->sm_dbuf, &doi);
505 if (doi.doi_bonus_size != SPACE_MAP_SIZE_V0) {
506 VERIFY(spa_feature_is_active(spa,
507 SPA_FEATURE_SPACEMAP_HISTOGRAM));
508 spa_feature_decr(spa,
509 SPA_FEATURE_SPACEMAP_HISTOGRAM, tx);
510 }
511 }
512
513 VERIFY3U(dmu_object_free(sm->sm_os, space_map_object(sm), tx), ==, 0);
514 sm->sm_object = 0;
515 }
516
517 uint64_t
518 space_map_object(space_map_t *sm)
519 {
520 return (sm != NULL ? sm->sm_object : 0);
521 }
522
523 /*
524 * Returns the already synced, on-disk allocated space.
525 */
526 uint64_t
527 space_map_allocated(space_map_t *sm)
528 {
529 return (sm != NULL ? sm->sm_alloc : 0);
530 }
531
532 /*
533 * Returns the already synced, on-disk length;
534 */
535 uint64_t
536 space_map_length(space_map_t *sm)
537 {
538 return (sm != NULL ? sm->sm_length : 0);
539 }
540
541 /*
542 * Returns the allocated space that is currently syncing.
543 */
544 int64_t
545 space_map_alloc_delta(space_map_t *sm)
546 {
547 if (sm == NULL)
548 return (0);
549 ASSERT(sm->sm_dbuf != NULL);
550 return (sm->sm_phys->smp_alloc - space_map_allocated(sm));
551 }