return (strcmp(propb, propa));
}
-/*ARGSUSED*/
static int
compare_mountpoints(const void *a, const void *b, void *unused)
{
* hierarchy that is different from the dataset hierarchy, and still
* allow it to be changed.
*/
+ (void) unused;
return (compare_props(a, b, ZFS_PROP_MOUNTPOINT));
}
-/*ARGSUSED*/
static int
compare_dataset_names(const void *a, const void *b, void *unused)
{
+ (void) unused;
return (compare_props(a, b, ZFS_PROP_NAME));
}
uu_avl_node_t cn_avl;
} config_node_t;
-/* ARGSUSED */
static int
config_node_compare(const void *a, const void *b, void *unused)
{
- int ret;
-
+ (void) unused;
const config_node_t *ca = (config_node_t *)a;
const config_node_t *cb = (config_node_t *)b;
- ret = strcmp(ca->cn_name, cb->cn_name);
+ int ret = strcmp(ca->cn_name, cb->cn_name);
if (ret < 0)
return (-1);
const char *fsname, zfs_keyformat_t keyformat, boolean_t newkey,
uint8_t **restrict buf, size_t *restrict len_out)
{
+ (void) fsname, (void) newkey;
FILE *f = NULL;
int ret = 0;
const char *fsname, zfs_keyformat_t keyformat, boolean_t newkey,
uint8_t **restrict buf, size_t *restrict len_out)
{
+ (void) fsname, (void) newkey;
int ret = 0;
FILE *key = NULL;
boolean_t is_http = strncmp(uri, "http:", strlen("http:")) == 0;
static int
derive_key(libzfs_handle_t *hdl, zfs_keyformat_t format, uint64_t iters,
- uint8_t *key_material, size_t key_material_len, uint64_t salt,
+ uint8_t *key_material, uint64_t salt,
uint8_t **key_out)
{
int ret;
}
/* derive a key from the key material */
- ret = derive_key(hdl, keyformat, iters, key_material, key_material_len,
- salt, &key_data);
+ ret = derive_key(hdl, keyformat, iters, key_material, salt, &key_data);
if (ret != 0)
goto error;
zfs_crypto_clone_check(libzfs_handle_t *hdl, zfs_handle_t *origin_zhp,
char *parent_name, nvlist_t *props)
{
+ (void) origin_zhp, (void) parent_name;
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf),
goto error;
/* derive a key from the key material */
- ret = derive_key(zhp->zfs_hdl, keyformat, iters, key_material,
- key_material_len, salt, &key_data);
+ ret = derive_key(zhp->zfs_hdl, keyformat, iters, key_material, salt,
+ &key_data);
if (ret != 0)
goto error;
if (errno != 0 || *end != '\0')
return (EINVAL);
#else
+ (void) domainlen;
return (ENOSYS);
#endif /* HAVE_IDMAP */
} else {
#include "libzfs_impl.h"
static int
-zfs_iter_clones(zfs_handle_t *zhp, int flags, zfs_iter_f func, void *data)
+zfs_iter_clones(zfs_handle_t *zhp, zfs_iter_f func, void *data)
{
nvlist_t *nvl = zfs_get_clones_nvl(zhp);
nvpair_t *pair;
int
zfs_iter_bookmarks(zfs_handle_t *zhp, int flags, zfs_iter_f func, void *data)
{
+ (void) flags;
zfs_handle_t *nzhp;
nvlist_t *props = NULL;
nvlist_t *bmarks = NULL;
ida->first = B_FALSE;
if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT) {
- err = zfs_iter_clones(zhp, ida->flags, iter_dependents_cb, ida);
+ err = zfs_iter_clones(zhp, iter_dependents_cb, ida);
} else if (zhp->zfs_type != ZFS_TYPE_BOOKMARK) {
iter_stack_frame_t isf;
iter_stack_frame_t *f;
* Checks any higher order concerns about whether the given dataset is
* mountable, false otherwise. zfs_is_mountable_internal specifically assumes
* that the caller has verified the sanity of mounting the dataset at
- * mountpoint to the extent the caller wants.
+ * its mountpoint to the extent the caller wants.
*/
static boolean_t
-zfs_is_mountable_internal(zfs_handle_t *zhp, const char *mountpoint)
+zfs_is_mountable_internal(zfs_handle_t *zhp)
{
-
if (zfs_prop_get_int(zhp, ZFS_PROP_ZONED) &&
getzoneid() == GLOBAL_ZONEID)
return (B_FALSE);
if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) == ZFS_CANMOUNT_OFF)
return (B_FALSE);
- if (!zfs_is_mountable_internal(zhp, buf))
+ if (!zfs_is_mountable_internal(zhp))
return (B_FALSE);
if (zfs_prop_get_int(zhp, ZFS_PROP_REDACTED) && !(flags & MS_FORCE))
remount = 1;
/* Potentially duplicates some checks if invoked by zfs_mount(). */
- if (!zfs_is_mountable_internal(zhp, mountpoint))
+ if (!zfs_is_mountable_internal(zhp))
return (0);
/*
#define NMSGID (sizeof (zfs_msgid_table) / sizeof (zfs_msgid_table[0]))
-/* ARGSUSED */
static int
vdev_missing(vdev_stat_t *vs, uint_t vsc)
{
+ (void) vsc;
return (vs->vs_state == VDEV_STATE_CANT_OPEN &&
vs->vs_aux == VDEV_AUX_OPEN_FAILED);
}
-/* ARGSUSED */
static int
vdev_faulted(vdev_stat_t *vs, uint_t vsc)
{
+ (void) vsc;
return (vs->vs_state == VDEV_STATE_FAULTED);
}
-/* ARGSUSED */
static int
vdev_errors(vdev_stat_t *vs, uint_t vsc)
{
+ (void) vsc;
return (vs->vs_state == VDEV_STATE_DEGRADED ||
vs->vs_read_errors != 0 || vs->vs_write_errors != 0 ||
vs->vs_checksum_errors != 0);
}
-/* ARGSUSED */
static int
vdev_broken(vdev_stat_t *vs, uint_t vsc)
{
+ (void) vsc;
return (vs->vs_state == VDEV_STATE_CANT_OPEN);
}
-/* ARGSUSED */
static int
vdev_offlined(vdev_stat_t *vs, uint_t vsc)
{
+ (void) vsc;
return (vs->vs_state == VDEV_STATE_OFFLINE);
}
-/* ARGSUSED */
static int
vdev_removed(vdev_stat_t *vs, uint_t vsc)
{
+ (void) vsc;
return (vs->vs_state == VDEV_STATE_REMOVED);
}
int
do_unmount(zfs_handle_t *zhp, const char *mntpt, int flags)
{
+ (void) zhp;
if (unmount(mntpt, flags) < 0)
return (errno);
return (0);
void
zpool_disable_datasets_os(zpool_handle_t *zhp, boolean_t force)
{
+ (void) zhp, (void) force;
}
/* Called from the tail end of zfs_unmount() */
void
zpool_disable_volume_os(const char *name)
{
+ (void) name;
}
int
do_unmount(zfs_handle_t *zhp, const char *mntpt, int flags)
{
+ (void) zhp;
+
if (!libzfs_envvar_is_set("ZFS_MOUNT_HELPER")) {
int rv = umount2(mntpt, flags);
void
zpool_disable_datasets_os(zpool_handle_t *zhp, boolean_t force)
{
+ (void) zhp, (void) force;
}
/* Called from the tail end of zfs_unmount() */
void
zpool_disable_volume_os(const char *name)
{
+ (void) name;
}
int
zfs_destroy_snaps_nvl_os(libzfs_handle_t *hdl, nvlist_t *snaps)
{
+ (void) hdl, (void) snaps;
return (0);
}
/* Indicate that benchmark has been completed */
static boolean_t fletcher_4_initialized = B_FALSE;
-/*ARGSUSED*/
void
fletcher_init(zio_cksum_t *zcp)
{
return (0);
}
-/*ARGSUSED*/
void
fletcher_2_native(const void *buf, uint64_t size,
const void *ctx_template, zio_cksum_t *zcp)
{
+ (void) ctx_template;
fletcher_init(zcp);
(void) fletcher_2_incremental_native((void *) buf, size, zcp);
}
return (0);
}
-/*ARGSUSED*/
void
fletcher_2_byteswap(const void *buf, uint64_t size,
const void *ctx_template, zio_cksum_t *zcp)
{
+ (void) ctx_template;
fletcher_init(zcp);
(void) fletcher_2_incremental_byteswap((void *) buf, size, zcp);
}
ops->fini_native(&ctx, zcp);
}
-/*ARGSUSED*/
void
fletcher_4_native(const void *buf, uint64_t size,
const void *ctx_template, zio_cksum_t *zcp)
{
+ (void) ctx_template;
const uint64_t p2size = P2ALIGN(size, FLETCHER_MIN_SIMD_SIZE);
ASSERT(IS_P2ALIGNED(size, sizeof (uint32_t)));
ops->fini_byteswap(&ctx, zcp);
}
-/*ARGSUSED*/
void
fletcher_4_byteswap(const void *buf, uint64_t size,
const void *ctx_template, zio_cksum_t *zcp)
{
+ (void) ctx_template;
const uint64_t p2size = P2ALIGN(size, FLETCHER_MIN_SIMD_SIZE);
ASSERT(IS_P2ALIGNED(size, sizeof (uint32_t)));