From 2a16d4cfaf791ba2a6f61b29d1e3f2e7b675f913 Mon Sep 17 00:00:00 2001 From: Sara Hartse Date: Thu, 31 May 2018 10:36:37 -0700 Subject: [PATCH] zpool reopen should detect expanded devices Update bdev_capacity to have wholedisk vdevs query the size of the underlying block device (correcting for the size of the efi parition and partition alignment) and therefore detect expanded space. Correct vdev_get_stats_ex so that the expandsize is aligned to metaslab size and new space is only reported if it is large enough for a new metaslab. Reviewed by: Don Brady Reviewed-by: Brian Behlendorf Reviewed by: George Wilson Reviewed-by: Matthew Ahrens Reviewed by: John Wren Kennedy Signed-off-by: sara hartse External-issue: LX-165 Closes #7546 Issue #7582 --- include/sys/vdev_disk.h | 12 +++++ lib/libefi/rdwr_efi.c | 20 ++++++- lib/libzfs/libzfs_pool.c | 14 +---- module/zfs/vdev.c | 3 +- module/zfs/vdev_disk.c | 46 ++++++++++++---- .../zpool_expand/zpool_expand_002_pos.ksh | 54 +++++++++++++------ 6 files changed, 107 insertions(+), 42 deletions(-) diff --git a/include/sys/vdev_disk.h b/include/sys/vdev_disk.h index 15570b105..b8a32b316 100644 --- a/include/sys/vdev_disk.h +++ b/include/sys/vdev_disk.h @@ -23,11 +23,23 @@ * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Brian Behlendorf . * LLNL-CODE-403049. + * Copyright (c) 2018 by Delphix. All rights reserved. */ #ifndef _SYS_VDEV_DISK_H #define _SYS_VDEV_DISK_H +/* + * Don't start the slice at the default block of 34; many storage + * devices will use a stripe width of 128k, other vendors prefer a 1m + * alignment. It is best to play it safe and ensure a 1m alignment + * given 512B blocks. When the block size is larger by a power of 2 + * we will still be 1m aligned. Some devices are sensitive to the + * partition ending alignment as well. + */ +#define NEW_START_BLOCK 2048 +#define PARTITION_END_ALIGNMENT 2048 + #ifdef _KERNEL #include diff --git a/lib/libefi/rdwr_efi.c b/lib/libefi/rdwr_efi.c index 7935047eb..19cb17e5a 100644 --- a/lib/libefi/rdwr_efi.c +++ b/lib/libefi/rdwr_efi.c @@ -22,6 +22,7 @@ /* * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright 2012 Nexenta Systems, Inc. All rights reserved. + * Copyright (c) 2018 by Delphix. All rights reserved. */ #include @@ -1153,7 +1154,7 @@ efi_use_whole_disk(int fd) /* * Find the last physically non-zero partition. - * This is the reserved partition. + * This should be the reserved partition. */ for (i = 0; i < efi_label->efi_nparts; i ++) { if (resv_start < efi_label->efi_parts[i].p_start) { @@ -1162,6 +1163,23 @@ efi_use_whole_disk(int fd) } } + /* + * Verify that we've found the reserved partition by checking + * that it looks the way it did when we created it in zpool_label_disk. + * If we've found the incorrect partition, then we know that this + * device was reformatted and no longer is soley used by ZFS. + */ + if ((efi_label->efi_parts[resv_index].p_size != EFI_MIN_RESV_SIZE) || + (efi_label->efi_parts[resv_index].p_tag != V_RESERVED) || + (resv_index != 8)) { + if (efi_debug) { + (void) fprintf(stderr, + "efi_use_whole_disk: wholedisk not available\n"); + } + efi_free(efi_label); + return (VT_ENOSPC); + } + /* * Find the last physically non-zero partition before that. * This is the data partition. diff --git a/lib/libzfs/libzfs_pool.c b/lib/libzfs/libzfs_pool.c index e00d5f51d..53bc50340 100644 --- a/lib/libzfs/libzfs_pool.c +++ b/lib/libzfs/libzfs_pool.c @@ -22,7 +22,7 @@ /* * Copyright 2015 Nexenta Systems, Inc. All rights reserved. * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2011, 2014 by Delphix. All rights reserved. + * Copyright (c) 2011, 2018 by Delphix. All rights reserved. * Copyright 2016 Igor Kozhukhov * Copyright (c) 2017 Datto Inc. */ @@ -42,6 +42,7 @@ #include #include #include +#include #include #include "zfs_namecheck.h" @@ -912,17 +913,6 @@ zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf, return (0); } -/* - * Don't start the slice at the default block of 34; many storage - * devices will use a stripe width of 128k, other vendors prefer a 1m - * alignment. It is best to play it safe and ensure a 1m alignment - * given 512B blocks. When the block size is larger by a power of 2 - * we will still be 1m aligned. Some devices are sensitive to the - * partition ending alignment as well. - */ -#define NEW_START_BLOCK 2048 -#define PARTITION_END_ALIGNMENT 2048 - /* * Validate the given pool name, optionally putting an extended error message in * 'buf'. diff --git a/module/zfs/vdev.c b/module/zfs/vdev.c index acac2a973..b643bd354 100644 --- a/module/zfs/vdev.c +++ b/module/zfs/vdev.c @@ -21,7 +21,7 @@ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2011, 2015 by Delphix. All rights reserved. + * Copyright (c) 2011, 2018 by Delphix. All rights reserved. * Copyright 2017 Nexenta Systems, Inc. * Copyright (c) 2014 Integros [integros.com] * Copyright 2016 Toomas Soome @@ -3039,7 +3039,6 @@ vdev_get_stats_ex(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx) vd->vdev_max_asize - vd->vdev_asize, 1ULL << tvd->vdev_ms_shift); } - vs->vs_esize = vd->vdev_max_asize - vd->vdev_asize; if (vd->vdev_aux == NULL && vd == vd->vdev_top && !vd->vdev_ishole) { vs->vs_fragmentation = vd->vdev_mg->mg_fragmentation; diff --git a/module/zfs/vdev_disk.c b/module/zfs/vdev_disk.c index 6761e755a..6dc0544fc 100644 --- a/module/zfs/vdev_disk.c +++ b/module/zfs/vdev_disk.c @@ -23,7 +23,7 @@ * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Rewritten for Linux by Brian Behlendorf . * LLNL-CODE-403049. - * Copyright (c) 2012, 2015 by Delphix. All rights reserved. + * Copyright (c) 2012, 2018 by Delphix. All rights reserved. */ #include @@ -35,10 +35,14 @@ #include #include #include +#include char *zfs_vdev_scheduler = VDEV_SCHEDULER; static void *zfs_vdev_holder = VDEV_HOLDER; +/* size of the "reserved" partition, in blocks */ +#define EFI_MIN_RESV_SIZE (16 * 1024) + /* * Virtual device vector for disks. */ @@ -82,17 +86,39 @@ vdev_bdev_mode(int smode) } #endif /* HAVE_OPEN_BDEV_EXCLUSIVE */ +/* The capacity (in bytes) of a bdev that is available to be used by a vdev */ static uint64_t -bdev_capacity(struct block_device *bdev) +bdev_capacity(struct block_device *bdev, boolean_t wholedisk) { struct hd_struct *part = bdev->bd_part; + uint64_t sectors = get_capacity(bdev->bd_disk); + /* If there are no paritions, return the entire device capacity */ + if (part == NULL) + return (sectors << SECTOR_BITS); - /* The partition capacity referenced by the block device */ - if (part) - return (part->nr_sects << 9); - - /* Otherwise assume the full device capacity */ - return (get_capacity(bdev->bd_disk) << 9); + /* + * If there are partitions, decide if we are using a `wholedisk` + * layout (composed of part1 and part9) or just a single partition. + */ + if (wholedisk) { + /* Verify the expected device layout */ + ASSERT3P(bdev, !=, bdev->bd_contains); + /* + * Sectors used by the EFI partition (part9) as well as + * partion alignment. + */ + uint64_t used = EFI_MIN_RESV_SIZE + NEW_START_BLOCK + + PARTITION_END_ALIGNMENT; + + /* Space available to the vdev, i.e. the size of part1 */ + if (sectors <= used) + return (0); + uint64_t available = sectors - used; + return (available << SECTOR_BITS); + } else { + /* The partition capacity referenced by the block device */ + return (part->nr_sects << SECTOR_BITS); + } } static void @@ -328,9 +354,7 @@ skip_open: v->vdev_nonrot = blk_queue_nonrot(bdev_get_queue(vd->vd_bdev)); /* Physical volume size in bytes */ - *psize = bdev_capacity(vd->vd_bdev); - - /* TODO: report possible expansion size */ + *psize = bdev_capacity(vd->vd_bdev, v->vdev_wholedisk); *max_psize = *psize; /* Based on the minimum sector size set the block size */ diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_expand/zpool_expand_002_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_expand/zpool_expand_002_pos.ksh index d578ae602..66b6969db 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_expand/zpool_expand_002_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_expand/zpool_expand_002_pos.ksh @@ -26,7 +26,7 @@ # # -# Copyright (c) 2012, 2016 by Delphix. All rights reserved. +# Copyright (c) 2012, 2018 by Delphix. All rights reserved. # Copyright (c) 2017 Lawrence Livermore National Security, LLC. # @@ -43,8 +43,9 @@ # 1) Create 3 files # 2) Create a pool backed by the files # 3) Expand the files' size with truncate -# 4) Use zpool online -e to online the vdevs -# 5) Check that the pool size was expanded +# 4) Use zpool reopen to check the expandsize +# 5) Use zpool online -e to online the vdevs +# 6) Check that the pool size was expanded # verify_runnable "global" @@ -64,8 +65,8 @@ log_onexit cleanup log_assert "zpool can expand after zpool online -e zvol vdevs on LUN expansion" - for type in " " mirror raidz raidz2; do + # Initialize the file devices and the pool for i in 1 2 3; do log_must truncate -s $org_size ${TEMPFILE}.$i done @@ -80,13 +81,35 @@ for type in " " mirror raidz raidz2; do "$autoexp" fi typeset prev_size=$(get_pool_prop size $TESTPOOL1) - typeset zfs_prev_size=$(zfs get -p avail $TESTPOOL1 | tail -1 | \ - awk '{print $3}') + typeset zfs_prev_size=$(get_prop avail $TESTPOOL1) + # Increase the size of the file devices for i in 1 2 3; do log_must truncate -s $exp_size ${TEMPFILE}.$i done + # Reopen the pool and check that the `expandsize` property is set + log_must zpool reopen $TESTPOOL1 + typeset zpool_expandsize=$(get_pool_prop expandsize $TESTPOOL1) + + if [[ $type == "mirror" ]]; then + typeset expected_zpool_expandsize=$(($exp_size-$org_size)) + else + typeset expected_zpool_expandsize=$((3*($exp_size-$org_size))) + fi + + if [[ "$zpool_expandsize" = "-" ]]; then + log_fail "pool $TESTPOOL1 did not detect any " \ + "expandsize after reopen" + fi + + if [[ $zpool_expandsize -ne $expected_zpool_expandsize ]]; then + log_fail "pool $TESTPOOL1 did not detect correct " \ + "expandsize after reopen: found $zpool_expandsize," \ + "expected $expected_zpool_expandsize" + fi + + # Online the devices to add the new space to the pool for i in 1 2 3; do log_must zpool online -e $TESTPOOL1 ${TEMPFILE}.$i done @@ -96,8 +119,7 @@ for type in " " mirror raidz raidz2; do sync typeset expand_size=$(get_pool_prop size $TESTPOOL1) - typeset zfs_expand_size=$(zfs get -p avail $TESTPOOL1 | tail -1 | \ - awk '{print $3}') + typeset zfs_expand_size=$(get_prop avail $TESTPOOL1) log_note "$TESTPOOL1 $type has previous size: $prev_size and " \ "expanded size: $expand_size" @@ -112,8 +134,8 @@ for type in " " mirror raidz raidz2; do grep "(+${expansion_size}" | wc -l) if [[ $size_addition -ne $i ]]; then - log_fail "pool $TESTPOOL1 is not autoexpand " \ - "after LUN expansion" + log_fail "pool $TESTPOOL1 did not expand " \ + "after LUN expansion and zpool online -e" fi elif [[ $type == "mirror" ]]; then typeset expansion_size=$(($exp_size-$org_size)) @@ -123,8 +145,8 @@ for type in " " mirror raidz raidz2; do grep "(+${expansion_size})" >/dev/null 2>&1 if [[ $? -ne 0 ]]; then - log_fail "pool $TESTPOOL1 is not autoexpand " \ - "after LUN expansion" + log_fail "pool $TESTPOOL1 did not expand " \ + "after LUN expansion and zpool online -e" fi else typeset expansion_size=$((3*($exp_size-$org_size))) @@ -134,13 +156,13 @@ for type in " " mirror raidz raidz2; do grep "(+${expansion_size})" >/dev/null 2>&1 if [[ $? -ne 0 ]] ; then - log_fail "pool $TESTPOOL1 is not autoexpand " \ - "after LUN expansion" + log_fail "pool $TESTPOOL1 did not expand " \ + "after LUN expansion and zpool online -e" fi fi else - log_fail "pool $TESTPOOL1 is not autoexpanded after LUN " \ - "expansion" + log_fail "pool $TESTPOOL1 did not expand after LUN expansion " \ + "and zpool online -e" fi log_must zpool destroy $TESTPOOL1 done -- 2.39.5