* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
+ * or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
#include <sys/spa_impl.h>
#include <sys/zio.h>
#include <sys/brt.h>
+#include <sys/brt_impl.h>
#include <sys/ddt.h>
#include <sys/bitmap.h>
#include <sys/zap.h>
* (copying the file content to the new dataset and removing the source file).
* In that case Block Cloning will only be used briefly, because the BRT entries
* will be removed when the source is removed.
- * Note: currently it is not possible to clone blocks between encrypted
- * datasets, even if those datasets use the same encryption key (this includes
- * snapshots of encrypted datasets). Cloning blocks between datasets that use
- * the same keys should be possible and should be implemented in the future.
+ * Block Cloning across encrypted datasets is supported as long as both
+ * datasets share the same master key (e.g. snapshots and clones)
*
* Block Cloning flow through ZFS layers.
*
* size_t len, unsigned int flags);
*
* Even though offsets and length represent bytes, they have to be
- * block-aligned or we will return the EXDEV error so the upper layer can
+ * block-aligned or we will return an error so the upper layer can
* fallback to the generic mechanism that will just copy the data.
* Using copy_file_range(2) will call OS-independent zfs_clone_range() function.
* This function was implemented based on zfs_write(), but instead of writing
* Some special cases to consider and how we address them:
* - The block we want to clone may have been created within the same
* transaction group that we are trying to clone. Such block has no BP
- * allocated yet, so cannot be immediately cloned. We return EXDEV.
+ * allocated yet, so cannot be immediately cloned. We return EAGAIN.
* - The block we want to clone may have been modified within the same
- * transaction group. We return EXDEV.
+ * transaction group. We return EAGAIN.
* - A block may be cloned multiple times during one transaction group (that's
* why pending list is actually a tree and not an append-only list - this
* way we can figure out faster if this block is cloned for the first time
* destination dataset is mounted and its ZIL replayed.
* To address this situation we leverage zil_claim() mechanism where ZFS will
* parse all the ZILs on pool import. When we come across TX_CLONE_RANGE
- * entries, we will bump reference counters for their BPs in the BRT and then
- * on mount and ZIL replay we will just attach BPs to the file without
- * bumping reference counters.
- * Note it is still possible that after zil_claim() we never mount the
- * destination, so we never replay its ZIL and we destroy it. This way we would
- * end up with leaked references in BRT. We address that too as ZFS gives us
- * a chance to clean this up on dataset destroy (see zil_free_clone_range()).
+ * entries, we will bump reference counters for their BPs in the BRT. Then
+ * on mount and ZIL replay we bump the reference counters once more, while the
+ * first references are dropped during ZIL destroy by zil_free_clone_range().
+ * It is possible that after zil_claim() we never mount the destination, so
+ * we never replay its ZIL and just destroy it. In this case the only taken
+ * references will be dropped by zil_free_clone_range(), since the cloning is
+ * not going to ever take place.
*/
-/*
- * BRT - Block Reference Table.
- */
-#define BRT_OBJECT_VDEV_PREFIX "com.fudosecurity:brt:vdev:"
-
-/*
- * We divide each VDEV into 16MB chunks. Each chunk is represented in memory
- * by a 16bit counter, thus 1TB VDEV requires 128kB of memory: (1TB / 16MB) * 2B
- * Each element in this array represents how many BRT entries do we have in this
- * chunk of storage. We always load this entire array into memory and update as
- * needed. By having it in memory we can quickly tell (during zio_free()) if
- * there are any BRT entries that we might need to update.
- *
- * This value cannot be larger than 16MB, at least as long as we support
- * 512 byte block sizes. With 512 byte block size we can have exactly
- * 32768 blocks in 16MB. In 32MB we could have 65536 blocks, which is one too
- * many for a 16bit counter.
- */
-#define BRT_RANGESIZE (16 * 1024 * 1024)
-_Static_assert(BRT_RANGESIZE / SPA_MINBLOCKSIZE <= UINT16_MAX,
- "BRT_RANGESIZE is too large.");
-/*
- * We don't want to update the whole structure every time. Maintain bitmap
- * of dirty blocks within the regions, so that a single bit represents a
- * block size of entcounts. For example if we have a 1PB vdev then all
- * entcounts take 128MB of memory ((64TB / 16MB) * 2B). We can divide this
- * 128MB array of entcounts into 32kB disk blocks, as we don't want to update
- * the whole 128MB on disk when we have updated only a single entcount.
- * We maintain a bitmap where each 32kB disk block within 128MB entcounts array
- * is represented by a single bit. This gives us 4096 bits. A set bit in the
- * bitmap means that we had a change in at least one of the 16384 entcounts
- * that reside on a 32kB disk block (32kB / sizeof (uint16_t)).
- */
-#define BRT_BLOCKSIZE (32 * 1024)
-#define BRT_RANGESIZE_TO_NBLOCKS(size) \
- (((size) - 1) / BRT_BLOCKSIZE / sizeof (uint16_t) + 1)
-
-#define BRT_LITTLE_ENDIAN 0
-#define BRT_BIG_ENDIAN 1
-#ifdef _ZFS_LITTLE_ENDIAN
-#define BRT_NATIVE_BYTEORDER BRT_LITTLE_ENDIAN
-#define BRT_NON_NATIVE_BYTEORDER BRT_BIG_ENDIAN
-#else
-#define BRT_NATIVE_BYTEORDER BRT_BIG_ENDIAN
-#define BRT_NON_NATIVE_BYTEORDER BRT_LITTLE_ENDIAN
-#endif
-
-typedef struct brt_vdev_phys {
- uint64_t bvp_mos_entries;
- uint64_t bvp_size;
- uint64_t bvp_byteorder;
- uint64_t bvp_totalcount;
- uint64_t bvp_rangesize;
- uint64_t bvp_usedspace;
- uint64_t bvp_savedspace;
-} brt_vdev_phys_t;
-
-typedef struct brt_vdev {
- /*
- * VDEV id.
- */
- uint64_t bv_vdevid;
- /*
- * Is the structure initiated?
- * (bv_entcount and bv_bitmap are allocated?)
- */
- boolean_t bv_initiated;
- /*
- * Object number in the MOS for the entcount array and brt_vdev_phys.
- */
- uint64_t bv_mos_brtvdev;
- /*
- * Object number in the MOS for the entries table.
- */
- uint64_t bv_mos_entries;
- /*
- * Entries to sync.
- */
- avl_tree_t bv_tree;
- /*
- * Does the bv_entcount[] array needs byte swapping?
- */
- boolean_t bv_need_byteswap;
- /*
- * Number of entries in the bv_entcount[] array.
- */
- uint64_t bv_size;
- /*
- * This is the array with BRT entry count per BRT_RANGESIZE.
- */
- uint16_t *bv_entcount;
- /*
- * Sum of all bv_entcount[]s.
- */
- uint64_t bv_totalcount;
- /*
- * Space on disk occupied by cloned blocks (without compression).
- */
- uint64_t bv_usedspace;
- /*
- * How much additional space would be occupied without block cloning.
- */
- uint64_t bv_savedspace;
- /*
- * brt_vdev_phys needs updating on disk.
- */
- boolean_t bv_meta_dirty;
- /*
- * bv_entcount[] needs updating on disk.
- */
- boolean_t bv_entcount_dirty;
- /*
- * bv_entcount[] potentially can be a bit too big to sychronize it all
- * when we just changed few entcounts. The fields below allow us to
- * track updates to bv_entcount[] array since the last sync.
- * A single bit in the bv_bitmap represents as many entcounts as can
- * fit into a single BRT_BLOCKSIZE.
- * For example we have 65536 entcounts in the bv_entcount array
- * (so the whole array is 128kB). We updated bv_entcount[2] and
- * bv_entcount[5]. In that case only first bit in the bv_bitmap will
- * be set and we will write only first BRT_BLOCKSIZE out of 128kB.
- */
- ulong_t *bv_bitmap;
- uint64_t bv_nblocks;
-} brt_vdev_t;
-
-/*
- * In-core brt
- */
-typedef struct brt {
- krwlock_t brt_lock;
- spa_t *brt_spa;
-#define brt_mos brt_spa->spa_meta_objset
- uint64_t brt_rangesize;
- uint64_t brt_usedspace;
- uint64_t brt_savedspace;
- avl_tree_t brt_pending_tree[TXG_SIZE];
- kmutex_t brt_pending_lock[TXG_SIZE];
- /* Sum of all entries across all bv_trees. */
- uint64_t brt_nentries;
- brt_vdev_t *brt_vdevs;
- uint64_t brt_nvdevs;
-} brt_t;
-
-/* Size of bre_offset / sizeof (uint64_t). */
-#define BRT_KEY_WORDS (1)
-
-/*
- * In-core brt entry.
- * On-disk we use bre_offset as the key and bre_refcount as the value.
- */
-typedef struct brt_entry {
- uint64_t bre_offset;
- uint64_t bre_refcount;
- avl_node_t bre_node;
-} brt_entry_t;
-
-typedef struct brt_pending_entry {
- blkptr_t bpe_bp;
- int bpe_count;
- avl_node_t bpe_node;
-} brt_pending_entry_t;
-
static kmem_cache_t *brt_entry_cache;
static kmem_cache_t *brt_pending_entry_cache;
size = (vdev_get_min_asize(vd) - 1) / brt->brt_rangesize + 1;
spa_config_exit(brt->brt_spa, SCL_VDEV, FTAG);
- entcount = kmem_zalloc(sizeof (entcount[0]) * size, KM_SLEEP);
+ entcount = vmem_zalloc(sizeof (entcount[0]) * size, KM_SLEEP);
nblocks = BRT_RANGESIZE_TO_NBLOCKS(size);
bitmap = kmem_zalloc(BT_SIZEOFMAP(nblocks), KM_SLEEP);
sizeof (entcount[0]) * MIN(size, brtvd->bv_size));
memcpy(bitmap, brtvd->bv_bitmap, MIN(BT_SIZEOFMAP(nblocks),
BT_SIZEOFMAP(brtvd->bv_nblocks)));
- kmem_free(brtvd->bv_entcount,
+ vmem_free(brtvd->bv_entcount,
sizeof (entcount[0]) * brtvd->bv_size);
kmem_free(brtvd->bv_bitmap, BT_SIZEOFMAP(brtvd->bv_nblocks));
}
ASSERT(RW_WRITE_HELD(&brt->brt_lock));
ASSERT(brtvd->bv_initiated);
- kmem_free(brtvd->bv_entcount, sizeof (uint16_t) * brtvd->bv_size);
+ vmem_free(brtvd->bv_entcount, sizeof (uint16_t) * brtvd->bv_size);
brtvd->bv_entcount = NULL;
kmem_free(brtvd->bv_bitmap, BT_SIZEOFMAP(brtvd->bv_nblocks));
brtvd->bv_bitmap = NULL;
return (B_FALSE);
}
+uint64_t
+brt_entry_get_refcount(spa_t *spa, const blkptr_t *bp)
+{
+ brt_t *brt = spa->spa_brt;
+ brt_vdev_t *brtvd;
+ brt_entry_t bre_search, *bre;
+ uint64_t vdevid, refcnt;
+ int error;
+
+ brt_entry_fill(bp, &bre_search, &vdevid);
+
+ brt_rlock(brt);
+
+ brtvd = brt_vdev(brt, vdevid);
+ ASSERT(brtvd != NULL);
+
+ bre = avl_find(&brtvd->bv_tree, &bre_search, NULL);
+ if (bre == NULL) {
+ error = brt_entry_lookup(brt, brtvd, &bre_search);
+ ASSERT(error == 0 || error == ENOENT);
+ if (error == ENOENT)
+ refcnt = 0;
+ else
+ refcnt = bre_search.bre_refcount;
+ } else
+ refcnt = bre->bre_refcount;
+
+ brt_unlock(brt);
+ return (refcnt);
+}
+
static void
brt_prefetch(brt_t *brt, const blkptr_t *bp)
{