4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012 by Delphix. All rights reserved.
27 #include <sys/zfs_context.h>
29 #include <sys/spa_impl.h>
33 #include <sys/dmu_tx.h>
35 #include <sys/dsl_pool.h>
36 #include <sys/zio_checksum.h>
37 #include <sys/zio_compress.h>
38 #include <sys/dsl_scan.h>
41 * Enable/disable prefetching of dedup-ed blocks which are going to be freed.
43 int zfs_dedup_prefetch
= 1;
45 static const ddt_ops_t
*ddt_ops
[DDT_TYPES
] = {
49 static const char *ddt_class_name
[DDT_CLASSES
] = {
56 ddt_object_create(ddt_t
*ddt
, enum ddt_type type
, enum ddt_class
class,
59 spa_t
*spa
= ddt
->ddt_spa
;
60 objset_t
*os
= ddt
->ddt_os
;
61 uint64_t *objectp
= &ddt
->ddt_object
[type
][class];
62 boolean_t prehash
= zio_checksum_table
[ddt
->ddt_checksum
].ci_dedup
;
63 char name
[DDT_NAMELEN
];
65 ddt_object_name(ddt
, type
, class, name
);
67 ASSERT(*objectp
== 0);
68 VERIFY(ddt_ops
[type
]->ddt_op_create(os
, objectp
, tx
, prehash
) == 0);
69 ASSERT(*objectp
!= 0);
71 VERIFY(zap_add(os
, DMU_POOL_DIRECTORY_OBJECT
, name
,
72 sizeof (uint64_t), 1, objectp
, tx
) == 0);
74 VERIFY(zap_add(os
, spa
->spa_ddt_stat_object
, name
,
75 sizeof (uint64_t), sizeof (ddt_histogram_t
) / sizeof (uint64_t),
76 &ddt
->ddt_histogram
[type
][class], tx
) == 0);
80 ddt_object_destroy(ddt_t
*ddt
, enum ddt_type type
, enum ddt_class
class,
83 spa_t
*spa
= ddt
->ddt_spa
;
84 objset_t
*os
= ddt
->ddt_os
;
85 uint64_t *objectp
= &ddt
->ddt_object
[type
][class];
87 char name
[DDT_NAMELEN
];
89 ddt_object_name(ddt
, type
, class, name
);
91 ASSERT(*objectp
!= 0);
92 ASSERT(ddt_histogram_empty(&ddt
->ddt_histogram
[type
][class]));
93 VERIFY(ddt_object_count(ddt
, type
, class, &count
) == 0 && count
== 0);
94 VERIFY(zap_remove(os
, DMU_POOL_DIRECTORY_OBJECT
, name
, tx
) == 0);
95 VERIFY(zap_remove(os
, spa
->spa_ddt_stat_object
, name
, tx
) == 0);
96 VERIFY(ddt_ops
[type
]->ddt_op_destroy(os
, *objectp
, tx
) == 0);
97 bzero(&ddt
->ddt_object_stats
[type
][class], sizeof (ddt_object_t
));
103 ddt_object_load(ddt_t
*ddt
, enum ddt_type type
, enum ddt_class
class)
105 ddt_object_t
*ddo
= &ddt
->ddt_object_stats
[type
][class];
106 dmu_object_info_t doi
;
108 char name
[DDT_NAMELEN
];
111 ddt_object_name(ddt
, type
, class, name
);
113 error
= zap_lookup(ddt
->ddt_os
, DMU_POOL_DIRECTORY_OBJECT
, name
,
114 sizeof (uint64_t), 1, &ddt
->ddt_object
[type
][class]);
119 error
= zap_lookup(ddt
->ddt_os
, ddt
->ddt_spa
->spa_ddt_stat_object
, name
,
120 sizeof (uint64_t), sizeof (ddt_histogram_t
) / sizeof (uint64_t),
121 &ddt
->ddt_histogram
[type
][class]);
124 * Seed the cached statistics.
126 error
= ddt_object_info(ddt
, type
, class, &doi
);
130 error
= ddt_object_count(ddt
, type
, class, &count
);
134 ddo
->ddo_count
= count
;
135 ddo
->ddo_dspace
= doi
.doi_physical_blocks_512
<< 9;
136 ddo
->ddo_mspace
= doi
.doi_fill_count
* doi
.doi_data_block_size
;
143 ddt_object_sync(ddt_t
*ddt
, enum ddt_type type
, enum ddt_class
class,
146 ddt_object_t
*ddo
= &ddt
->ddt_object_stats
[type
][class];
147 dmu_object_info_t doi
;
149 char name
[DDT_NAMELEN
];
151 ddt_object_name(ddt
, type
, class, name
);
153 VERIFY(zap_update(ddt
->ddt_os
, ddt
->ddt_spa
->spa_ddt_stat_object
, name
,
154 sizeof (uint64_t), sizeof (ddt_histogram_t
) / sizeof (uint64_t),
155 &ddt
->ddt_histogram
[type
][class], tx
) == 0);
158 * Cache DDT statistics; this is the only time they'll change.
160 VERIFY(ddt_object_info(ddt
, type
, class, &doi
) == 0);
161 VERIFY(ddt_object_count(ddt
, type
, class, &count
) == 0);
163 ddo
->ddo_count
= count
;
164 ddo
->ddo_dspace
= doi
.doi_physical_blocks_512
<< 9;
165 ddo
->ddo_mspace
= doi
.doi_fill_count
* doi
.doi_data_block_size
;
169 ddt_object_lookup(ddt_t
*ddt
, enum ddt_type type
, enum ddt_class
class,
172 if (!ddt_object_exists(ddt
, type
, class))
175 return (ddt_ops
[type
]->ddt_op_lookup(ddt
->ddt_os
,
176 ddt
->ddt_object
[type
][class], dde
));
180 ddt_object_prefetch(ddt_t
*ddt
, enum ddt_type type
, enum ddt_class
class,
183 if (!ddt_object_exists(ddt
, type
, class))
186 ddt_ops
[type
]->ddt_op_prefetch(ddt
->ddt_os
,
187 ddt
->ddt_object
[type
][class], dde
);
191 ddt_object_update(ddt_t
*ddt
, enum ddt_type type
, enum ddt_class
class,
192 ddt_entry_t
*dde
, dmu_tx_t
*tx
)
194 ASSERT(ddt_object_exists(ddt
, type
, class));
196 return (ddt_ops
[type
]->ddt_op_update(ddt
->ddt_os
,
197 ddt
->ddt_object
[type
][class], dde
, tx
));
201 ddt_object_remove(ddt_t
*ddt
, enum ddt_type type
, enum ddt_class
class,
202 ddt_entry_t
*dde
, dmu_tx_t
*tx
)
204 ASSERT(ddt_object_exists(ddt
, type
, class));
206 return (ddt_ops
[type
]->ddt_op_remove(ddt
->ddt_os
,
207 ddt
->ddt_object
[type
][class], dde
, tx
));
211 ddt_object_walk(ddt_t
*ddt
, enum ddt_type type
, enum ddt_class
class,
212 uint64_t *walk
, ddt_entry_t
*dde
)
214 ASSERT(ddt_object_exists(ddt
, type
, class));
216 return (ddt_ops
[type
]->ddt_op_walk(ddt
->ddt_os
,
217 ddt
->ddt_object
[type
][class], dde
, walk
));
221 ddt_object_count(ddt_t
*ddt
, enum ddt_type type
, enum ddt_class
class,
224 ASSERT(ddt_object_exists(ddt
, type
, class));
226 return (ddt_ops
[type
]->ddt_op_count(ddt
->ddt_os
,
227 ddt
->ddt_object
[type
][class], count
));
231 ddt_object_info(ddt_t
*ddt
, enum ddt_type type
, enum ddt_class
class,
232 dmu_object_info_t
*doi
)
234 if (!ddt_object_exists(ddt
, type
, class))
237 return (dmu_object_info(ddt
->ddt_os
, ddt
->ddt_object
[type
][class],
242 ddt_object_exists(ddt_t
*ddt
, enum ddt_type type
, enum ddt_class
class)
244 return (!!ddt
->ddt_object
[type
][class]);
248 ddt_object_name(ddt_t
*ddt
, enum ddt_type type
, enum ddt_class
class,
251 (void) sprintf(name
, DMU_POOL_DDT
,
252 zio_checksum_table
[ddt
->ddt_checksum
].ci_name
,
253 ddt_ops
[type
]->ddt_op_name
, ddt_class_name
[class]);
257 ddt_bp_fill(const ddt_phys_t
*ddp
, blkptr_t
*bp
, uint64_t txg
)
262 for (d
= 0; d
< SPA_DVAS_PER_BP
; d
++)
263 bp
->blk_dva
[d
] = ddp
->ddp_dva
[d
];
264 BP_SET_BIRTH(bp
, txg
, ddp
->ddp_phys_birth
);
268 ddt_bp_create(enum zio_checksum checksum
,
269 const ddt_key_t
*ddk
, const ddt_phys_t
*ddp
, blkptr_t
*bp
)
274 ddt_bp_fill(ddp
, bp
, ddp
->ddp_phys_birth
);
276 bp
->blk_cksum
= ddk
->ddk_cksum
;
279 BP_SET_LSIZE(bp
, DDK_GET_LSIZE(ddk
));
280 BP_SET_PSIZE(bp
, DDK_GET_PSIZE(ddk
));
281 BP_SET_COMPRESS(bp
, DDK_GET_COMPRESS(ddk
));
282 BP_SET_CHECKSUM(bp
, checksum
);
283 BP_SET_TYPE(bp
, DMU_OT_DEDUP
);
286 BP_SET_BYTEORDER(bp
, ZFS_HOST_BYTEORDER
);
290 ddt_key_fill(ddt_key_t
*ddk
, const blkptr_t
*bp
)
292 ddk
->ddk_cksum
= bp
->blk_cksum
;
295 DDK_SET_LSIZE(ddk
, BP_GET_LSIZE(bp
));
296 DDK_SET_PSIZE(ddk
, BP_GET_PSIZE(bp
));
297 DDK_SET_COMPRESS(ddk
, BP_GET_COMPRESS(bp
));
301 ddt_phys_fill(ddt_phys_t
*ddp
, const blkptr_t
*bp
)
304 ASSERT(ddp
->ddp_phys_birth
== 0);
306 for (d
= 0; d
< SPA_DVAS_PER_BP
; d
++)
307 ddp
->ddp_dva
[d
] = bp
->blk_dva
[d
];
308 ddp
->ddp_phys_birth
= BP_PHYSICAL_BIRTH(bp
);
312 ddt_phys_clear(ddt_phys_t
*ddp
)
314 bzero(ddp
, sizeof (*ddp
));
318 ddt_phys_addref(ddt_phys_t
*ddp
)
324 ddt_phys_decref(ddt_phys_t
*ddp
)
327 ASSERT((int64_t)ddp
->ddp_refcnt
> 0);
333 ddt_phys_free(ddt_t
*ddt
, ddt_key_t
*ddk
, ddt_phys_t
*ddp
, uint64_t txg
)
337 ddt_bp_create(ddt
->ddt_checksum
, ddk
, ddp
, &blk
);
339 zio_free(ddt
->ddt_spa
, txg
, &blk
);
343 ddt_phys_select(const ddt_entry_t
*dde
, const blkptr_t
*bp
)
345 ddt_phys_t
*ddp
= (ddt_phys_t
*)dde
->dde_phys
;
348 for (p
= 0; p
< DDT_PHYS_TYPES
; p
++, ddp
++) {
349 if (DVA_EQUAL(BP_IDENTITY(bp
), &ddp
->ddp_dva
[0]) &&
350 BP_PHYSICAL_BIRTH(bp
) == ddp
->ddp_phys_birth
)
357 ddt_phys_total_refcnt(const ddt_entry_t
*dde
)
362 for (p
= DDT_PHYS_SINGLE
; p
<= DDT_PHYS_TRIPLE
; p
++)
363 refcnt
+= dde
->dde_phys
[p
].ddp_refcnt
;
369 ddt_stat_generate(ddt_t
*ddt
, ddt_entry_t
*dde
, ddt_stat_t
*dds
)
371 spa_t
*spa
= ddt
->ddt_spa
;
372 ddt_phys_t
*ddp
= dde
->dde_phys
;
373 ddt_key_t
*ddk
= &dde
->dde_key
;
374 uint64_t lsize
= DDK_GET_LSIZE(ddk
);
375 uint64_t psize
= DDK_GET_PSIZE(ddk
);
378 bzero(dds
, sizeof (*dds
));
380 for (p
= 0; p
< DDT_PHYS_TYPES
; p
++, ddp
++) {
382 uint64_t refcnt
= ddp
->ddp_refcnt
;
384 if (ddp
->ddp_phys_birth
== 0)
387 for (d
= 0; d
< SPA_DVAS_PER_BP
; d
++)
388 dsize
+= dva_get_dsize_sync(spa
, &ddp
->ddp_dva
[d
]);
390 dds
->dds_blocks
+= 1;
391 dds
->dds_lsize
+= lsize
;
392 dds
->dds_psize
+= psize
;
393 dds
->dds_dsize
+= dsize
;
395 dds
->dds_ref_blocks
+= refcnt
;
396 dds
->dds_ref_lsize
+= lsize
* refcnt
;
397 dds
->dds_ref_psize
+= psize
* refcnt
;
398 dds
->dds_ref_dsize
+= dsize
* refcnt
;
403 ddt_stat_add(ddt_stat_t
*dst
, const ddt_stat_t
*src
, uint64_t neg
)
405 const uint64_t *s
= (const uint64_t *)src
;
406 uint64_t *d
= (uint64_t *)dst
;
407 uint64_t *d_end
= (uint64_t *)(dst
+ 1);
409 ASSERT(neg
== 0 || neg
== -1ULL); /* add or subtract */
412 *d
++ += (*s
++ ^ neg
) - neg
;
416 ddt_stat_update(ddt_t
*ddt
, ddt_entry_t
*dde
, uint64_t neg
)
419 ddt_histogram_t
*ddh
;
422 ddt_stat_generate(ddt
, dde
, &dds
);
424 bucket
= highbit(dds
.dds_ref_blocks
) - 1;
427 ddh
= &ddt
->ddt_histogram
[dde
->dde_type
][dde
->dde_class
];
429 ddt_stat_add(&ddh
->ddh_stat
[bucket
], &dds
, neg
);
433 ddt_histogram_add(ddt_histogram_t
*dst
, const ddt_histogram_t
*src
)
437 for (h
= 0; h
< 64; h
++)
438 ddt_stat_add(&dst
->ddh_stat
[h
], &src
->ddh_stat
[h
], 0);
442 ddt_histogram_stat(ddt_stat_t
*dds
, const ddt_histogram_t
*ddh
)
446 bzero(dds
, sizeof (*dds
));
448 for (h
= 0; h
< 64; h
++)
449 ddt_stat_add(dds
, &ddh
->ddh_stat
[h
], 0);
453 ddt_histogram_empty(const ddt_histogram_t
*ddh
)
455 const uint64_t *s
= (const uint64_t *)ddh
;
456 const uint64_t *s_end
= (const uint64_t *)(ddh
+ 1);
466 ddt_get_dedup_object_stats(spa_t
*spa
, ddt_object_t
*ddo_total
)
470 enum ddt_class
class;
472 /* Sum the statistics we cached in ddt_object_sync(). */
473 for (c
= 0; c
< ZIO_CHECKSUM_FUNCTIONS
; c
++) {
474 ddt_t
*ddt
= spa
->spa_ddt
[c
];
475 for (type
= 0; type
< DDT_TYPES
; type
++) {
476 for (class = 0; class < DDT_CLASSES
;
479 &ddt
->ddt_object_stats
[type
][class];
480 ddo_total
->ddo_count
+= ddo
->ddo_count
;
481 ddo_total
->ddo_dspace
+= ddo
->ddo_dspace
;
482 ddo_total
->ddo_mspace
+= ddo
->ddo_mspace
;
487 /* ... and compute the averages. */
488 if (ddo_total
->ddo_count
!= 0) {
489 ddo_total
->ddo_dspace
/= ddo_total
->ddo_count
;
490 ddo_total
->ddo_mspace
/= ddo_total
->ddo_count
;
495 ddt_get_dedup_histogram(spa_t
*spa
, ddt_histogram_t
*ddh
)
499 enum ddt_class
class;
501 for (c
= 0; c
< ZIO_CHECKSUM_FUNCTIONS
; c
++) {
502 ddt_t
*ddt
= spa
->spa_ddt
[c
];
503 for (type
= 0; type
< DDT_TYPES
; type
++) {
504 for (class = 0; class < DDT_CLASSES
;
506 ddt_histogram_add(ddh
,
507 &ddt
->ddt_histogram_cache
[type
][class]);
514 ddt_get_dedup_stats(spa_t
*spa
, ddt_stat_t
*dds_total
)
516 ddt_histogram_t
*ddh_total
;
518 /* XXX: Move to a slab */
519 ddh_total
= kmem_zalloc(sizeof (ddt_histogram_t
), KM_PUSHPAGE
);
520 ddt_get_dedup_histogram(spa
, ddh_total
);
521 ddt_histogram_stat(dds_total
, ddh_total
);
522 kmem_free(ddh_total
, sizeof (ddt_histogram_t
));
526 ddt_get_dedup_dspace(spa_t
*spa
)
528 ddt_stat_t dds_total
= { 0 };
530 ddt_get_dedup_stats(spa
, &dds_total
);
531 return (dds_total
.dds_ref_dsize
- dds_total
.dds_dsize
);
535 ddt_get_pool_dedup_ratio(spa_t
*spa
)
537 ddt_stat_t dds_total
= { 0 };
539 ddt_get_dedup_stats(spa
, &dds_total
);
540 if (dds_total
.dds_dsize
== 0)
543 return (dds_total
.dds_ref_dsize
* 100 / dds_total
.dds_dsize
);
547 ddt_ditto_copies_needed(ddt_t
*ddt
, ddt_entry_t
*dde
, ddt_phys_t
*ddp_willref
)
549 spa_t
*spa
= ddt
->ddt_spa
;
550 uint64_t total_refcnt
= 0;
551 uint64_t ditto
= spa
->spa_dedup_ditto
;
552 int total_copies
= 0;
553 int desired_copies
= 0;
556 for (p
= DDT_PHYS_SINGLE
; p
<= DDT_PHYS_TRIPLE
; p
++) {
557 ddt_phys_t
*ddp
= &dde
->dde_phys
[p
];
558 zio_t
*zio
= dde
->dde_lead_zio
[p
];
559 uint64_t refcnt
= ddp
->ddp_refcnt
; /* committed refs */
561 refcnt
+= zio
->io_parent_count
; /* pending refs */
562 if (ddp
== ddp_willref
)
563 refcnt
++; /* caller's ref */
565 total_refcnt
+= refcnt
;
570 if (ditto
== 0 || ditto
> UINT32_MAX
)
573 if (total_refcnt
>= 1)
575 if (total_refcnt
>= ditto
)
577 if (total_refcnt
>= ditto
* ditto
)
580 return (MAX(desired_copies
, total_copies
) - total_copies
);
584 ddt_ditto_copies_present(ddt_entry_t
*dde
)
586 ddt_phys_t
*ddp
= &dde
->dde_phys
[DDT_PHYS_DITTO
];
587 dva_t
*dva
= ddp
->ddp_dva
;
588 int copies
= 0 - DVA_GET_GANG(dva
);
591 for (d
= 0; d
< SPA_DVAS_PER_BP
; d
++, dva
++)
592 if (DVA_IS_VALID(dva
))
595 ASSERT(copies
>= 0 && copies
< SPA_DVAS_PER_BP
);
601 ddt_compress(void *src
, uchar_t
*dst
, size_t s_len
, size_t d_len
)
603 uchar_t
*version
= dst
++;
604 int cpfunc
= ZIO_COMPRESS_ZLE
;
605 zio_compress_info_t
*ci
= &zio_compress_table
[cpfunc
];
608 ASSERT(d_len
>= s_len
+ 1); /* no compression plus version byte */
610 c_len
= ci
->ci_compress(src
, dst
, s_len
, d_len
- 1, ci
->ci_level
);
612 if (c_len
== s_len
) {
613 cpfunc
= ZIO_COMPRESS_OFF
;
614 bcopy(src
, dst
, s_len
);
617 *version
= (ZFS_HOST_BYTEORDER
& DDT_COMPRESS_BYTEORDER_MASK
) | cpfunc
;
623 ddt_decompress(uchar_t
*src
, void *dst
, size_t s_len
, size_t d_len
)
625 uchar_t version
= *src
++;
626 int cpfunc
= version
& DDT_COMPRESS_FUNCTION_MASK
;
627 zio_compress_info_t
*ci
= &zio_compress_table
[cpfunc
];
629 if (ci
->ci_decompress
!= NULL
)
630 (void) ci
->ci_decompress(src
, dst
, s_len
, d_len
, ci
->ci_level
);
632 bcopy(src
, dst
, d_len
);
634 if ((version
^ ZFS_HOST_BYTEORDER
) & DDT_COMPRESS_BYTEORDER_MASK
)
635 byteswap_uint64_array(dst
, d_len
);
639 ddt_select_by_checksum(spa_t
*spa
, enum zio_checksum c
)
641 return (spa
->spa_ddt
[c
]);
645 ddt_select(spa_t
*spa
, const blkptr_t
*bp
)
647 return (spa
->spa_ddt
[BP_GET_CHECKSUM(bp
)]);
651 ddt_enter(ddt_t
*ddt
)
653 mutex_enter(&ddt
->ddt_lock
);
659 mutex_exit(&ddt
->ddt_lock
);
663 ddt_alloc(const ddt_key_t
*ddk
)
667 /* XXX: Move to a slab */
668 dde
= kmem_zalloc(sizeof (ddt_entry_t
), KM_PUSHPAGE
);
669 cv_init(&dde
->dde_cv
, NULL
, CV_DEFAULT
, NULL
);
677 ddt_free(ddt_entry_t
*dde
)
681 ASSERT(!dde
->dde_loading
);
683 for (p
= 0; p
< DDT_PHYS_TYPES
; p
++)
684 ASSERT(dde
->dde_lead_zio
[p
] == NULL
);
686 if (dde
->dde_repair_data
!= NULL
)
687 zio_buf_free(dde
->dde_repair_data
,
688 DDK_GET_PSIZE(&dde
->dde_key
));
690 cv_destroy(&dde
->dde_cv
);
691 kmem_free(dde
, sizeof (*dde
));
695 ddt_remove(ddt_t
*ddt
, ddt_entry_t
*dde
)
697 ASSERT(MUTEX_HELD(&ddt
->ddt_lock
));
699 avl_remove(&ddt
->ddt_tree
, dde
);
704 ddt_lookup(ddt_t
*ddt
, const blkptr_t
*bp
, boolean_t add
)
706 ddt_entry_t
*dde
, dde_search
;
708 enum ddt_class
class;
712 ASSERT(MUTEX_HELD(&ddt
->ddt_lock
));
714 ddt_key_fill(&dde_search
.dde_key
, bp
);
716 dde
= avl_find(&ddt
->ddt_tree
, &dde_search
, &where
);
720 dde
= ddt_alloc(&dde_search
.dde_key
);
721 avl_insert(&ddt
->ddt_tree
, dde
, where
);
724 while (dde
->dde_loading
)
725 cv_wait(&dde
->dde_cv
, &ddt
->ddt_lock
);
730 dde
->dde_loading
= B_TRUE
;
736 for (type
= 0; type
< DDT_TYPES
; type
++) {
737 for (class = 0; class < DDT_CLASSES
; class++) {
738 error
= ddt_object_lookup(ddt
, type
, class, dde
);
746 ASSERT(error
== 0 || error
== ENOENT
);
750 ASSERT(dde
->dde_loaded
== B_FALSE
);
751 ASSERT(dde
->dde_loading
== B_TRUE
);
753 dde
->dde_type
= type
; /* will be DDT_TYPES if no entry found */
754 dde
->dde_class
= class; /* will be DDT_CLASSES if no entry found */
755 dde
->dde_loaded
= B_TRUE
;
756 dde
->dde_loading
= B_FALSE
;
759 ddt_stat_update(ddt
, dde
, -1ULL);
761 cv_broadcast(&dde
->dde_cv
);
767 ddt_prefetch(spa_t
*spa
, const blkptr_t
*bp
)
772 enum ddt_class
class;
774 if (!zfs_dedup_prefetch
|| bp
== NULL
|| !BP_GET_DEDUP(bp
))
778 * We only remove the DDT once all tables are empty and only
779 * prefetch dedup blocks when there are entries in the DDT.
780 * Thus no locking is required as the DDT can't disappear on us.
782 ddt
= ddt_select(spa
, bp
);
783 ddt_key_fill(&dde
.dde_key
, bp
);
785 for (type
= 0; type
< DDT_TYPES
; type
++) {
786 for (class = 0; class < DDT_CLASSES
; class++) {
787 ddt_object_prefetch(ddt
, type
, class, &dde
);
793 ddt_entry_compare(const void *x1
, const void *x2
)
795 const ddt_entry_t
*dde1
= x1
;
796 const ddt_entry_t
*dde2
= x2
;
797 const uint64_t *u1
= (const uint64_t *)&dde1
->dde_key
;
798 const uint64_t *u2
= (const uint64_t *)&dde2
->dde_key
;
801 for (i
= 0; i
< DDT_KEY_WORDS
; i
++) {
812 ddt_table_alloc(spa_t
*spa
, enum zio_checksum c
)
816 /* XXX: Move to a slab */
817 ddt
= kmem_zalloc(sizeof (*ddt
), KM_PUSHPAGE
| KM_NODEBUG
);
819 mutex_init(&ddt
->ddt_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
820 avl_create(&ddt
->ddt_tree
, ddt_entry_compare
,
821 sizeof (ddt_entry_t
), offsetof(ddt_entry_t
, dde_node
));
822 avl_create(&ddt
->ddt_repair_tree
, ddt_entry_compare
,
823 sizeof (ddt_entry_t
), offsetof(ddt_entry_t
, dde_node
));
824 ddt
->ddt_checksum
= c
;
826 ddt
->ddt_os
= spa
->spa_meta_objset
;
832 ddt_table_free(ddt_t
*ddt
)
834 ASSERT(avl_numnodes(&ddt
->ddt_tree
) == 0);
835 ASSERT(avl_numnodes(&ddt
->ddt_repair_tree
) == 0);
836 avl_destroy(&ddt
->ddt_tree
);
837 avl_destroy(&ddt
->ddt_repair_tree
);
838 mutex_destroy(&ddt
->ddt_lock
);
839 kmem_free(ddt
, sizeof (*ddt
));
843 ddt_create(spa_t
*spa
)
847 spa
->spa_dedup_checksum
= ZIO_DEDUPCHECKSUM
;
849 for (c
= 0; c
< ZIO_CHECKSUM_FUNCTIONS
; c
++)
850 spa
->spa_ddt
[c
] = ddt_table_alloc(spa
, c
);
858 enum ddt_class
class;
863 error
= zap_lookup(spa
->spa_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
864 DMU_POOL_DDT_STATS
, sizeof (uint64_t), 1,
865 &spa
->spa_ddt_stat_object
);
868 return (error
== ENOENT
? 0 : error
);
870 for (c
= 0; c
< ZIO_CHECKSUM_FUNCTIONS
; c
++) {
871 ddt_t
*ddt
= spa
->spa_ddt
[c
];
872 for (type
= 0; type
< DDT_TYPES
; type
++) {
873 for (class = 0; class < DDT_CLASSES
;
875 error
= ddt_object_load(ddt
, type
, class);
876 if (error
!= 0 && error
!= ENOENT
)
882 * Seed the cached histograms.
884 bcopy(ddt
->ddt_histogram
, &ddt
->ddt_histogram_cache
,
885 sizeof (ddt
->ddt_histogram
));
892 ddt_unload(spa_t
*spa
)
896 for (c
= 0; c
< ZIO_CHECKSUM_FUNCTIONS
; c
++) {
897 if (spa
->spa_ddt
[c
]) {
898 ddt_table_free(spa
->spa_ddt
[c
]);
899 spa
->spa_ddt
[c
] = NULL
;
905 ddt_class_contains(spa_t
*spa
, enum ddt_class max_class
, const blkptr_t
*bp
)
910 enum ddt_class
class;
912 if (!BP_GET_DEDUP(bp
))
915 if (max_class
== DDT_CLASS_UNIQUE
)
918 ddt
= spa
->spa_ddt
[BP_GET_CHECKSUM(bp
)];
919 dde
= kmem_alloc(sizeof(ddt_entry_t
), KM_PUSHPAGE
);
921 ddt_key_fill(&(dde
->dde_key
), bp
);
923 for (type
= 0; type
< DDT_TYPES
; type
++) {
924 for (class = 0; class <= max_class
; class++) {
925 if (ddt_object_lookup(ddt
, type
, class, dde
) == 0) {
926 kmem_free(dde
, sizeof(ddt_entry_t
));
932 kmem_free(dde
, sizeof(ddt_entry_t
));
937 ddt_repair_start(ddt_t
*ddt
, const blkptr_t
*bp
)
942 enum ddt_class
class;
944 ddt_key_fill(&ddk
, bp
);
946 dde
= ddt_alloc(&ddk
);
948 for (type
= 0; type
< DDT_TYPES
; type
++) {
949 for (class = 0; class < DDT_CLASSES
; class++) {
951 * We can only do repair if there are multiple copies
952 * of the block. For anything in the UNIQUE class,
953 * there's definitely only one copy, so don't even try.
955 if (class != DDT_CLASS_UNIQUE
&&
956 ddt_object_lookup(ddt
, type
, class, dde
) == 0)
961 bzero(dde
->dde_phys
, sizeof (dde
->dde_phys
));
967 ddt_repair_done(ddt_t
*ddt
, ddt_entry_t
*dde
)
973 if (dde
->dde_repair_data
!= NULL
&& spa_writeable(ddt
->ddt_spa
) &&
974 avl_find(&ddt
->ddt_repair_tree
, dde
, &where
) == NULL
)
975 avl_insert(&ddt
->ddt_repair_tree
, dde
, where
);
983 ddt_repair_entry_done(zio_t
*zio
)
985 ddt_entry_t
*rdde
= zio
->io_private
;
991 ddt_repair_entry(ddt_t
*ddt
, ddt_entry_t
*dde
, ddt_entry_t
*rdde
, zio_t
*rio
)
993 ddt_phys_t
*ddp
= dde
->dde_phys
;
994 ddt_phys_t
*rddp
= rdde
->dde_phys
;
995 ddt_key_t
*ddk
= &dde
->dde_key
;
996 ddt_key_t
*rddk
= &rdde
->dde_key
;
1001 zio
= zio_null(rio
, rio
->io_spa
, NULL
,
1002 ddt_repair_entry_done
, rdde
, rio
->io_flags
);
1004 for (p
= 0; p
< DDT_PHYS_TYPES
; p
++, ddp
++, rddp
++) {
1005 if (ddp
->ddp_phys_birth
== 0 ||
1006 ddp
->ddp_phys_birth
!= rddp
->ddp_phys_birth
||
1007 bcmp(ddp
->ddp_dva
, rddp
->ddp_dva
, sizeof (ddp
->ddp_dva
)))
1009 ddt_bp_create(ddt
->ddt_checksum
, ddk
, ddp
, &blk
);
1010 zio_nowait(zio_rewrite(zio
, zio
->io_spa
, 0, &blk
,
1011 rdde
->dde_repair_data
, DDK_GET_PSIZE(rddk
), NULL
, NULL
,
1012 ZIO_PRIORITY_SYNC_WRITE
, ZIO_DDT_CHILD_FLAGS(zio
), NULL
));
1019 ddt_repair_table(ddt_t
*ddt
, zio_t
*rio
)
1021 spa_t
*spa
= ddt
->ddt_spa
;
1022 ddt_entry_t
*dde
, *rdde_next
, *rdde
;
1023 avl_tree_t
*t
= &ddt
->ddt_repair_tree
;
1026 if (spa_sync_pass(spa
) > 1)
1030 for (rdde
= avl_first(t
); rdde
!= NULL
; rdde
= rdde_next
) {
1031 rdde_next
= AVL_NEXT(t
, rdde
);
1032 avl_remove(&ddt
->ddt_repair_tree
, rdde
);
1034 ddt_bp_create(ddt
->ddt_checksum
, &rdde
->dde_key
, NULL
, &blk
);
1035 dde
= ddt_repair_start(ddt
, &blk
);
1036 ddt_repair_entry(ddt
, dde
, rdde
, rio
);
1037 ddt_repair_done(ddt
, dde
);
1044 ddt_sync_entry(ddt_t
*ddt
, ddt_entry_t
*dde
, dmu_tx_t
*tx
, uint64_t txg
)
1046 dsl_pool_t
*dp
= ddt
->ddt_spa
->spa_dsl_pool
;
1047 ddt_phys_t
*ddp
= dde
->dde_phys
;
1048 ddt_key_t
*ddk
= &dde
->dde_key
;
1049 enum ddt_type otype
= dde
->dde_type
;
1050 enum ddt_type ntype
= DDT_TYPE_CURRENT
;
1051 enum ddt_class oclass
= dde
->dde_class
;
1052 enum ddt_class nclass
;
1053 uint64_t total_refcnt
= 0;
1056 ASSERT(dde
->dde_loaded
);
1057 ASSERT(!dde
->dde_loading
);
1059 for (p
= 0; p
< DDT_PHYS_TYPES
; p
++, ddp
++) {
1060 ASSERT(dde
->dde_lead_zio
[p
] == NULL
);
1061 ASSERT((int64_t)ddp
->ddp_refcnt
>= 0);
1062 if (ddp
->ddp_phys_birth
== 0) {
1063 ASSERT(ddp
->ddp_refcnt
== 0);
1066 if (p
== DDT_PHYS_DITTO
) {
1067 if (ddt_ditto_copies_needed(ddt
, dde
, NULL
) == 0)
1068 ddt_phys_free(ddt
, ddk
, ddp
, txg
);
1071 if (ddp
->ddp_refcnt
== 0)
1072 ddt_phys_free(ddt
, ddk
, ddp
, txg
);
1073 total_refcnt
+= ddp
->ddp_refcnt
;
1076 if (dde
->dde_phys
[DDT_PHYS_DITTO
].ddp_phys_birth
!= 0)
1077 nclass
= DDT_CLASS_DITTO
;
1078 else if (total_refcnt
> 1)
1079 nclass
= DDT_CLASS_DUPLICATE
;
1081 nclass
= DDT_CLASS_UNIQUE
;
1083 if (otype
!= DDT_TYPES
&&
1084 (otype
!= ntype
|| oclass
!= nclass
|| total_refcnt
== 0)) {
1085 VERIFY(ddt_object_remove(ddt
, otype
, oclass
, dde
, tx
) == 0);
1086 ASSERT(ddt_object_lookup(ddt
, otype
, oclass
, dde
) == ENOENT
);
1089 if (total_refcnt
!= 0) {
1090 dde
->dde_type
= ntype
;
1091 dde
->dde_class
= nclass
;
1092 ddt_stat_update(ddt
, dde
, 0);
1093 if (!ddt_object_exists(ddt
, ntype
, nclass
))
1094 ddt_object_create(ddt
, ntype
, nclass
, tx
);
1095 VERIFY(ddt_object_update(ddt
, ntype
, nclass
, dde
, tx
) == 0);
1098 * If the class changes, the order that we scan this bp
1099 * changes. If it decreases, we could miss it, so
1100 * scan it right now. (This covers both class changing
1101 * while we are doing ddt_walk(), and when we are
1104 if (nclass
< oclass
) {
1105 dsl_scan_ddt_entry(dp
->dp_scan
,
1106 ddt
->ddt_checksum
, dde
, tx
);
1112 ddt_sync_table(ddt_t
*ddt
, dmu_tx_t
*tx
, uint64_t txg
)
1114 spa_t
*spa
= ddt
->ddt_spa
;
1116 void *cookie
= NULL
;
1118 enum ddt_class
class;
1120 if (avl_numnodes(&ddt
->ddt_tree
) == 0)
1123 ASSERT(spa
->spa_uberblock
.ub_version
>= SPA_VERSION_DEDUP
);
1125 if (spa
->spa_ddt_stat_object
== 0) {
1126 spa
->spa_ddt_stat_object
= zap_create_link(ddt
->ddt_os
,
1127 DMU_OT_DDT_STATS
, DMU_POOL_DIRECTORY_OBJECT
,
1128 DMU_POOL_DDT_STATS
, tx
);
1131 while ((dde
= avl_destroy_nodes(&ddt
->ddt_tree
, &cookie
)) != NULL
) {
1132 ddt_sync_entry(ddt
, dde
, tx
, txg
);
1136 for (type
= 0; type
< DDT_TYPES
; type
++) {
1137 uint64_t add
, count
= 0;
1138 for (class = 0; class < DDT_CLASSES
; class++) {
1139 if (ddt_object_exists(ddt
, type
, class)) {
1140 ddt_object_sync(ddt
, type
, class, tx
);
1141 VERIFY(ddt_object_count(ddt
, type
, class,
1146 for (class = 0; class < DDT_CLASSES
; class++) {
1147 if (count
== 0 && ddt_object_exists(ddt
, type
, class))
1148 ddt_object_destroy(ddt
, type
, class, tx
);
1152 bcopy(ddt
->ddt_histogram
, &ddt
->ddt_histogram_cache
,
1153 sizeof (ddt
->ddt_histogram
));
1157 ddt_sync(spa_t
*spa
, uint64_t txg
)
1160 zio_t
*rio
= zio_root(spa
, NULL
, NULL
,
1161 ZIO_FLAG_CANFAIL
| ZIO_FLAG_SPECULATIVE
);
1162 enum zio_checksum c
;
1164 ASSERT(spa_syncing_txg(spa
) == txg
);
1166 tx
= dmu_tx_create_assigned(spa
->spa_dsl_pool
, txg
);
1168 for (c
= 0; c
< ZIO_CHECKSUM_FUNCTIONS
; c
++) {
1169 ddt_t
*ddt
= spa
->spa_ddt
[c
];
1172 ddt_sync_table(ddt
, tx
, txg
);
1173 ddt_repair_table(ddt
, rio
);
1176 (void) zio_wait(rio
);
1182 ddt_walk(spa_t
*spa
, ddt_bookmark_t
*ddb
, ddt_entry_t
*dde
)
1187 ddt_t
*ddt
= spa
->spa_ddt
[ddb
->ddb_checksum
];
1189 if (ddt_object_exists(ddt
, ddb
->ddb_type
,
1191 error
= ddt_object_walk(ddt
,
1192 ddb
->ddb_type
, ddb
->ddb_class
,
1193 &ddb
->ddb_cursor
, dde
);
1195 dde
->dde_type
= ddb
->ddb_type
;
1196 dde
->dde_class
= ddb
->ddb_class
;
1199 if (error
!= ENOENT
)
1201 ddb
->ddb_cursor
= 0;
1202 } while (++ddb
->ddb_checksum
< ZIO_CHECKSUM_FUNCTIONS
);
1203 ddb
->ddb_checksum
= 0;
1204 } while (++ddb
->ddb_type
< DDT_TYPES
);
1206 } while (++ddb
->ddb_class
< DDT_CLASSES
);
1211 #if defined(_KERNEL) && defined(HAVE_SPL)
1212 module_param(zfs_dedup_prefetch
, int, 0644);
1213 MODULE_PARM_DESC(zfs_dedup_prefetch
,"Enable prefetching dedup-ed blks");