]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/space_map.c
OpenZFS 8607 - variable set but not used
[mirror_zfs.git] / module / zfs / space_map.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25 /*
26 * Copyright (c) 2012, 2016 by Delphix. All rights reserved.
27 */
28
29 #include <sys/zfs_context.h>
30 #include <sys/spa.h>
31 #include <sys/dmu.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dnode.h>
34 #include <sys/dsl_pool.h>
35 #include <sys/zio.h>
36 #include <sys/space_map.h>
37 #include <sys/refcount.h>
38 #include <sys/zfeature.h>
39
40 /*
41 * The data for a given space map can be kept on blocks of any size.
42 * Larger blocks entail fewer i/o operations, but they also cause the
43 * DMU to keep more data in-core, and also to waste more i/o bandwidth
44 * when only a few blocks have changed since the last transaction group.
45 */
46 int space_map_blksz = (1 << 12);
47
48 /*
49 * Load the space map disk into the specified range tree. Segments of maptype
50 * are added to the range tree, other segment types are removed.
51 *
52 * Note: space_map_load() will drop sm_lock across dmu_read() calls.
53 * The caller must be OK with this.
54 */
55 int
56 space_map_load(space_map_t *sm, range_tree_t *rt, maptype_t maptype)
57 {
58 uint64_t *entry, *entry_map, *entry_map_end;
59 uint64_t bufsize, size, offset, end, space;
60 int error = 0;
61
62 ASSERT(MUTEX_HELD(sm->sm_lock));
63
64 end = space_map_length(sm);
65 space = space_map_allocated(sm);
66
67 VERIFY0(range_tree_space(rt));
68
69 if (maptype == SM_FREE) {
70 range_tree_add(rt, sm->sm_start, sm->sm_size);
71 space = sm->sm_size - space;
72 }
73
74 bufsize = MAX(sm->sm_blksz, SPA_MINBLOCKSIZE);
75 entry_map = vmem_alloc(bufsize, KM_SLEEP);
76
77 mutex_exit(sm->sm_lock);
78 if (end > bufsize) {
79 dmu_prefetch(sm->sm_os, space_map_object(sm), 0, bufsize,
80 end - bufsize, ZIO_PRIORITY_SYNC_READ);
81 }
82 mutex_enter(sm->sm_lock);
83
84 for (offset = 0; offset < end; offset += bufsize) {
85 size = MIN(end - offset, bufsize);
86 VERIFY(P2PHASE(size, sizeof (uint64_t)) == 0);
87 VERIFY(size != 0);
88 ASSERT3U(sm->sm_blksz, !=, 0);
89
90 dprintf("object=%llu offset=%llx size=%llx\n",
91 space_map_object(sm), offset, size);
92
93 mutex_exit(sm->sm_lock);
94 error = dmu_read(sm->sm_os, space_map_object(sm), offset, size,
95 entry_map, DMU_READ_PREFETCH);
96 mutex_enter(sm->sm_lock);
97 if (error != 0)
98 break;
99
100 entry_map_end = entry_map + (size / sizeof (uint64_t));
101 for (entry = entry_map; entry < entry_map_end; entry++) {
102 uint64_t e = *entry;
103 uint64_t offset, size;
104
105 if (SM_DEBUG_DECODE(e)) /* Skip debug entries */
106 continue;
107
108 offset = (SM_OFFSET_DECODE(e) << sm->sm_shift) +
109 sm->sm_start;
110 size = SM_RUN_DECODE(e) << sm->sm_shift;
111
112 VERIFY0(P2PHASE(offset, 1ULL << sm->sm_shift));
113 VERIFY0(P2PHASE(size, 1ULL << sm->sm_shift));
114 VERIFY3U(offset, >=, sm->sm_start);
115 VERIFY3U(offset + size, <=, sm->sm_start + sm->sm_size);
116 if (SM_TYPE_DECODE(e) == maptype) {
117 VERIFY3U(range_tree_space(rt) + size, <=,
118 sm->sm_size);
119 range_tree_add(rt, offset, size);
120 } else {
121 range_tree_remove(rt, offset, size);
122 }
123 }
124 }
125
126 if (error == 0)
127 VERIFY3U(range_tree_space(rt), ==, space);
128 else
129 range_tree_vacate(rt, NULL, NULL);
130
131 vmem_free(entry_map, bufsize);
132 return (error);
133 }
134
135 void
136 space_map_histogram_clear(space_map_t *sm)
137 {
138 if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t))
139 return;
140
141 bzero(sm->sm_phys->smp_histogram, sizeof (sm->sm_phys->smp_histogram));
142 }
143
144 boolean_t
145 space_map_histogram_verify(space_map_t *sm, range_tree_t *rt)
146 {
147 /*
148 * Verify that the in-core range tree does not have any
149 * ranges smaller than our sm_shift size.
150 */
151 for (int i = 0; i < sm->sm_shift; i++) {
152 if (rt->rt_histogram[i] != 0)
153 return (B_FALSE);
154 }
155 return (B_TRUE);
156 }
157
158 void
159 space_map_histogram_add(space_map_t *sm, range_tree_t *rt, dmu_tx_t *tx)
160 {
161 int idx = 0;
162
163 ASSERT(MUTEX_HELD(rt->rt_lock));
164 ASSERT(dmu_tx_is_syncing(tx));
165 VERIFY3U(space_map_object(sm), !=, 0);
166
167 if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t))
168 return;
169
170 dmu_buf_will_dirty(sm->sm_dbuf, tx);
171
172 ASSERT(space_map_histogram_verify(sm, rt));
173 /*
174 * Transfer the content of the range tree histogram to the space
175 * map histogram. The space map histogram contains 32 buckets ranging
176 * between 2^sm_shift to 2^(32+sm_shift-1). The range tree,
177 * however, can represent ranges from 2^0 to 2^63. Since the space
178 * map only cares about allocatable blocks (minimum of sm_shift) we
179 * can safely ignore all ranges in the range tree smaller than sm_shift.
180 */
181 for (int i = sm->sm_shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
182
183 /*
184 * Since the largest histogram bucket in the space map is
185 * 2^(32+sm_shift-1), we need to normalize the values in
186 * the range tree for any bucket larger than that size. For
187 * example given an sm_shift of 9, ranges larger than 2^40
188 * would get normalized as if they were 1TB ranges. Assume
189 * the range tree had a count of 5 in the 2^44 (16TB) bucket,
190 * the calculation below would normalize this to 5 * 2^4 (16).
191 */
192 ASSERT3U(i, >=, idx + sm->sm_shift);
193 sm->sm_phys->smp_histogram[idx] +=
194 rt->rt_histogram[i] << (i - idx - sm->sm_shift);
195
196 /*
197 * Increment the space map's index as long as we haven't
198 * reached the maximum bucket size. Accumulate all ranges
199 * larger than the max bucket size into the last bucket.
200 */
201 if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) {
202 ASSERT3U(idx + sm->sm_shift, ==, i);
203 idx++;
204 ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE);
205 }
206 }
207 }
208
209 uint64_t
210 space_map_entries(space_map_t *sm, range_tree_t *rt)
211 {
212 avl_tree_t *t = &rt->rt_root;
213 range_seg_t *rs;
214 uint64_t size, entries;
215
216 /*
217 * All space_maps always have a debug entry so account for it here.
218 */
219 entries = 1;
220
221 /*
222 * Traverse the range tree and calculate the number of space map
223 * entries that would be required to write out the range tree.
224 */
225 for (rs = avl_first(t); rs != NULL; rs = AVL_NEXT(t, rs)) {
226 size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
227 entries += howmany(size, SM_RUN_MAX);
228 }
229 return (entries);
230 }
231
232 /*
233 * Note: space_map_write() will drop sm_lock across dmu_write() calls.
234 */
235 void
236 space_map_write(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
237 dmu_tx_t *tx)
238 {
239 objset_t *os = sm->sm_os;
240 spa_t *spa = dmu_objset_spa(os);
241 avl_tree_t *t = &rt->rt_root;
242 range_seg_t *rs;
243 uint64_t size, total, rt_space, nodes;
244 uint64_t *entry, *entry_map, *entry_map_end;
245 uint64_t expected_entries, actual_entries = 1;
246
247 ASSERT(MUTEX_HELD(rt->rt_lock));
248 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
249 VERIFY3U(space_map_object(sm), !=, 0);
250 dmu_buf_will_dirty(sm->sm_dbuf, tx);
251
252 /*
253 * This field is no longer necessary since the in-core space map
254 * now contains the object number but is maintained for backwards
255 * compatibility.
256 */
257 sm->sm_phys->smp_object = sm->sm_object;
258
259 if (range_tree_space(rt) == 0) {
260 VERIFY3U(sm->sm_object, ==, sm->sm_phys->smp_object);
261 return;
262 }
263
264 if (maptype == SM_ALLOC)
265 sm->sm_phys->smp_alloc += range_tree_space(rt);
266 else
267 sm->sm_phys->smp_alloc -= range_tree_space(rt);
268
269 expected_entries = space_map_entries(sm, rt);
270
271 entry_map = vmem_alloc(sm->sm_blksz, KM_SLEEP);
272 entry_map_end = entry_map + (sm->sm_blksz / sizeof (uint64_t));
273 entry = entry_map;
274
275 *entry++ = SM_DEBUG_ENCODE(1) |
276 SM_DEBUG_ACTION_ENCODE(maptype) |
277 SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(spa)) |
278 SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx));
279
280 total = 0;
281 nodes = avl_numnodes(&rt->rt_root);
282 rt_space = range_tree_space(rt);
283 for (rs = avl_first(t); rs != NULL; rs = AVL_NEXT(t, rs)) {
284 uint64_t start;
285
286 size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
287 start = (rs->rs_start - sm->sm_start) >> sm->sm_shift;
288
289 total += size << sm->sm_shift;
290
291 while (size != 0) {
292 uint64_t run_len;
293
294 run_len = MIN(size, SM_RUN_MAX);
295
296 if (entry == entry_map_end) {
297 mutex_exit(rt->rt_lock);
298 dmu_write(os, space_map_object(sm),
299 sm->sm_phys->smp_objsize, sm->sm_blksz,
300 entry_map, tx);
301 mutex_enter(rt->rt_lock);
302 sm->sm_phys->smp_objsize += sm->sm_blksz;
303 entry = entry_map;
304 }
305
306 *entry++ = SM_OFFSET_ENCODE(start) |
307 SM_TYPE_ENCODE(maptype) |
308 SM_RUN_ENCODE(run_len);
309
310 start += run_len;
311 size -= run_len;
312 actual_entries++;
313 }
314 }
315
316 if (entry != entry_map) {
317 size = (entry - entry_map) * sizeof (uint64_t);
318 mutex_exit(rt->rt_lock);
319 dmu_write(os, space_map_object(sm), sm->sm_phys->smp_objsize,
320 size, entry_map, tx);
321 mutex_enter(rt->rt_lock);
322 sm->sm_phys->smp_objsize += size;
323 }
324 ASSERT3U(expected_entries, ==, actual_entries);
325
326 /*
327 * Ensure that the space_map's accounting wasn't changed
328 * while we were in the middle of writing it out.
329 */
330 VERIFY3U(nodes, ==, avl_numnodes(&rt->rt_root));
331 VERIFY3U(range_tree_space(rt), ==, rt_space);
332 VERIFY3U(range_tree_space(rt), ==, total);
333
334 vmem_free(entry_map, sm->sm_blksz);
335 }
336
337 static int
338 space_map_open_impl(space_map_t *sm)
339 {
340 int error;
341 u_longlong_t blocks;
342
343 error = dmu_bonus_hold(sm->sm_os, sm->sm_object, sm, &sm->sm_dbuf);
344 if (error)
345 return (error);
346
347 dmu_object_size_from_db(sm->sm_dbuf, &sm->sm_blksz, &blocks);
348 sm->sm_phys = sm->sm_dbuf->db_data;
349 return (0);
350 }
351
352 int
353 space_map_open(space_map_t **smp, objset_t *os, uint64_t object,
354 uint64_t start, uint64_t size, uint8_t shift, kmutex_t *lp)
355 {
356 space_map_t *sm;
357 int error;
358
359 ASSERT(*smp == NULL);
360 ASSERT(os != NULL);
361 ASSERT(object != 0);
362
363 sm = kmem_alloc(sizeof (space_map_t), KM_SLEEP);
364
365 sm->sm_start = start;
366 sm->sm_size = size;
367 sm->sm_shift = shift;
368 sm->sm_lock = lp;
369 sm->sm_os = os;
370 sm->sm_object = object;
371 sm->sm_length = 0;
372 sm->sm_alloc = 0;
373 sm->sm_blksz = 0;
374 sm->sm_dbuf = NULL;
375 sm->sm_phys = NULL;
376
377 error = space_map_open_impl(sm);
378 if (error != 0) {
379 space_map_close(sm);
380 return (error);
381 }
382
383 *smp = sm;
384
385 return (0);
386 }
387
388 void
389 space_map_close(space_map_t *sm)
390 {
391 if (sm == NULL)
392 return;
393
394 if (sm->sm_dbuf != NULL)
395 dmu_buf_rele(sm->sm_dbuf, sm);
396 sm->sm_dbuf = NULL;
397 sm->sm_phys = NULL;
398
399 kmem_free(sm, sizeof (*sm));
400 }
401
402 void
403 space_map_truncate(space_map_t *sm, dmu_tx_t *tx)
404 {
405 objset_t *os = sm->sm_os;
406 spa_t *spa = dmu_objset_spa(os);
407 dmu_object_info_t doi;
408
409 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
410 ASSERT(dmu_tx_is_syncing(tx));
411 VERIFY3U(dmu_tx_get_txg(tx), <=, spa_final_dirty_txg(spa));
412
413 dmu_object_info_from_db(sm->sm_dbuf, &doi);
414
415 /*
416 * If the space map has the wrong bonus size (because
417 * SPA_FEATURE_SPACEMAP_HISTOGRAM has recently been enabled), or
418 * the wrong block size (because space_map_blksz has changed),
419 * free and re-allocate its object with the updated sizes.
420 *
421 * Otherwise, just truncate the current object.
422 */
423 if ((spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
424 doi.doi_bonus_size != sizeof (space_map_phys_t)) ||
425 doi.doi_data_block_size != space_map_blksz) {
426 zfs_dbgmsg("txg %llu, spa %s, sm %p, reallocating "
427 "object[%llu]: old bonus %u, old blocksz %u",
428 dmu_tx_get_txg(tx), spa_name(spa), sm, sm->sm_object,
429 doi.doi_bonus_size, doi.doi_data_block_size);
430
431 space_map_free(sm, tx);
432 dmu_buf_rele(sm->sm_dbuf, sm);
433
434 sm->sm_object = space_map_alloc(sm->sm_os, tx);
435 VERIFY0(space_map_open_impl(sm));
436 } else {
437 VERIFY0(dmu_free_range(os, space_map_object(sm), 0, -1ULL, tx));
438
439 /*
440 * If the spacemap is reallocated, its histogram
441 * will be reset. Do the same in the common case so that
442 * bugs related to the uncommon case do not go unnoticed.
443 */
444 bzero(sm->sm_phys->smp_histogram,
445 sizeof (sm->sm_phys->smp_histogram));
446 }
447
448 dmu_buf_will_dirty(sm->sm_dbuf, tx);
449 sm->sm_phys->smp_objsize = 0;
450 sm->sm_phys->smp_alloc = 0;
451 }
452
453 /*
454 * Update the in-core space_map allocation and length values.
455 */
456 void
457 space_map_update(space_map_t *sm)
458 {
459 if (sm == NULL)
460 return;
461
462 ASSERT(MUTEX_HELD(sm->sm_lock));
463
464 sm->sm_alloc = sm->sm_phys->smp_alloc;
465 sm->sm_length = sm->sm_phys->smp_objsize;
466 }
467
468 uint64_t
469 space_map_alloc(objset_t *os, dmu_tx_t *tx)
470 {
471 spa_t *spa = dmu_objset_spa(os);
472 uint64_t object;
473 int bonuslen;
474
475 if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
476 spa_feature_incr(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM, tx);
477 bonuslen = sizeof (space_map_phys_t);
478 ASSERT3U(bonuslen, <=, dmu_bonus_max());
479 } else {
480 bonuslen = SPACE_MAP_SIZE_V0;
481 }
482
483 object = dmu_object_alloc(os,
484 DMU_OT_SPACE_MAP, space_map_blksz,
485 DMU_OT_SPACE_MAP_HEADER, bonuslen, tx);
486
487 return (object);
488 }
489
490 void
491 space_map_free(space_map_t *sm, dmu_tx_t *tx)
492 {
493 spa_t *spa;
494
495 if (sm == NULL)
496 return;
497
498 spa = dmu_objset_spa(sm->sm_os);
499 if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
500 dmu_object_info_t doi;
501
502 dmu_object_info_from_db(sm->sm_dbuf, &doi);
503 if (doi.doi_bonus_size != SPACE_MAP_SIZE_V0) {
504 VERIFY(spa_feature_is_active(spa,
505 SPA_FEATURE_SPACEMAP_HISTOGRAM));
506 spa_feature_decr(spa,
507 SPA_FEATURE_SPACEMAP_HISTOGRAM, tx);
508 }
509 }
510
511 VERIFY3U(dmu_object_free(sm->sm_os, space_map_object(sm), tx), ==, 0);
512 sm->sm_object = 0;
513 }
514
515 uint64_t
516 space_map_object(space_map_t *sm)
517 {
518 return (sm != NULL ? sm->sm_object : 0);
519 }
520
521 /*
522 * Returns the already synced, on-disk allocated space.
523 */
524 uint64_t
525 space_map_allocated(space_map_t *sm)
526 {
527 return (sm != NULL ? sm->sm_alloc : 0);
528 }
529
530 /*
531 * Returns the already synced, on-disk length;
532 */
533 uint64_t
534 space_map_length(space_map_t *sm)
535 {
536 return (sm != NULL ? sm->sm_length : 0);
537 }
538
539 /*
540 * Returns the allocated space that is currently syncing.
541 */
542 int64_t
543 space_map_alloc_delta(space_map_t *sm)
544 {
545 if (sm == NULL)
546 return (0);
547 ASSERT(sm->sm_dbuf != NULL);
548 return (sm->sm_phys->smp_alloc - space_map_allocated(sm));
549 }