TQ_SLEEP), !=, 0);
}
-/* ARGSUSED */
static void
vdev_file_io_done(zio_t *zio)
{
+ (void) zio;
}
vdev_ops_t vdev_file_ops = {
racct_add_force(curproc, RACCT_READIOPS, iops);
PROC_UNLOCK(curproc);
}
+#else
+ (void) size;
#endif /* RACCT */
}
racct_add_force(curproc, RACCT_WRITEIOPS, iops);
PROC_UNLOCK(curproc);
}
+#else
+ (void) size;
#endif /* RACCT */
}
* and le_bswap indicates whether a byteswap is needed to get this block
* into little endian format.
*/
-/* ARGSUSED */
int
zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen,
boolean_t should_bswap, uint8_t *portable_mac, uint8_t *local_mac)
* It also means we'll only return one zfs_uio_t.
*/
-/* ARGSUSED */
static int
zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf,
uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap, zfs_uio_t *puio,
zfs_uio_t *out_uio, uint_t *enc_len, uint8_t **authbuf, uint_t *auth_len,
boolean_t *no_crypt)
{
+ (void) puio;
uint8_t *aadbuf = zio_buf_alloc(datalen);
uint8_t *src, *dst, *slrp, *dlrp, *blkend, *aadp;
iovec_t *dst_iovecs;
return (0);
}
-/* ARGSUSED */
static int
zio_crypt_init_uios_normal(boolean_t encrypt, uint8_t *plainbuf,
uint8_t *cipherbuf, uint_t datalen, zfs_uio_t *puio, zfs_uio_t *out_uio,
uint_t *enc_len)
{
+ (void) puio;
int ret;
uint_t nr_plain = 1, nr_cipher = 2;
iovec_t *plain_iovecs = NULL, *cipher_iovecs = NULL;
TQ_SLEEP), !=, TASKQID_INVALID);
}
-/* ARGSUSED */
static void
vdev_file_io_done(zio_t *zio)
{
+ (void) zio;
}
vdev_ops_t vdev_file_ops = {
void
zfs_racct_read(uint64_t size, uint64_t iops)
{
+ (void) size, (void) iops;
}
void
zfs_racct_write(uint64_t size, uint64_t iops)
{
+ (void) size, (void) iops;
}
uint8_t *cipherbuf, uint_t datalen, zfs_uio_t *puio, zfs_uio_t *cuio,
uint_t *enc_len)
{
+ (void) encrypt;
int ret;
uint_t nr_plain = 1, nr_cipher = 2;
iovec_t *plain_iovecs = NULL, *cipher_iovecs = NULL;
&ba_ptr);
}
-/*ARGSUSED*/
static int
abd_zero_off_cb(void *buf, size_t size, void *private)
{
+ (void) private;
(void) memset(buf, 0, size);
return (0);
}
return (ret);
}
-/*ARGSUSED*/
static int
abd_copy_off_cb(void *dbuf, void *sbuf, size_t size, void *private)
{
+ (void) private;
(void) memcpy(dbuf, sbuf, size);
return (0);
}
abd_copy_off_cb, NULL);
}
-/*ARGSUSED*/
static int
abd_cmp_cb(void *bufa, void *bufb, size_t size, void *private)
{
+ (void) private;
return (memcmp(bufa, bufb, size));
}
static void
buf_fini(void)
{
- int i;
-
#if defined(_KERNEL)
/*
* Large allocations which do not require contiguous pages
kmem_free(buf_hash_table.ht_table,
(buf_hash_table.ht_mask + 1) * sizeof (void *));
#endif
- for (i = 0; i < BUF_LOCKS; i++)
+ for (int i = 0; i < BUF_LOCKS; i++)
mutex_destroy(BUF_HASH_LOCK(i));
kmem_cache_destroy(hdr_full_cache);
kmem_cache_destroy(hdr_full_crypt_cache);
* Constructor callback - called when the cache is empty
* and a new buf is requested.
*/
-/* ARGSUSED */
static int
hdr_full_cons(void *vbuf, void *unused, int kmflag)
{
+ (void) unused, (void) kmflag;
arc_buf_hdr_t *hdr = vbuf;
bzero(hdr, HDR_FULL_SIZE);
return (0);
}
-/* ARGSUSED */
static int
hdr_full_crypt_cons(void *vbuf, void *unused, int kmflag)
{
+ (void) unused;
arc_buf_hdr_t *hdr = vbuf;
hdr_full_cons(vbuf, unused, kmflag);
return (0);
}
-/* ARGSUSED */
static int
hdr_l2only_cons(void *vbuf, void *unused, int kmflag)
{
+ (void) unused, (void) kmflag;
arc_buf_hdr_t *hdr = vbuf;
bzero(hdr, HDR_L2ONLY_SIZE);
return (0);
}
-/* ARGSUSED */
static int
buf_cons(void *vbuf, void *unused, int kmflag)
{
+ (void) unused, (void) kmflag;
arc_buf_t *buf = vbuf;
bzero(buf, sizeof (arc_buf_t));
* Destructor callback - called when a cached buf is
* no longer required.
*/
-/* ARGSUSED */
static void
hdr_full_dest(void *vbuf, void *unused)
{
+ (void) unused;
arc_buf_hdr_t *hdr = vbuf;
ASSERT(HDR_EMPTY(hdr));
arc_space_return(HDR_FULL_SIZE, ARC_SPACE_HDRS);
}
-/* ARGSUSED */
static void
hdr_full_crypt_dest(void *vbuf, void *unused)
{
+ (void) unused;
arc_buf_hdr_t *hdr = vbuf;
hdr_full_dest(vbuf, unused);
arc_space_return(sizeof (hdr->b_crypt_hdr), ARC_SPACE_HDRS);
}
-/* ARGSUSED */
static void
hdr_l2only_dest(void *vbuf, void *unused)
{
- arc_buf_hdr_t *hdr __maybe_unused = vbuf;
+ (void) unused;
+ arc_buf_hdr_t *hdr = vbuf;
ASSERT(HDR_EMPTY(hdr));
arc_space_return(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS);
}
-/* ARGSUSED */
static void
buf_dest(void *vbuf, void *unused)
{
+ (void) unused;
arc_buf_t *buf = vbuf;
mutex_destroy(&buf->b_evict_lock);
void
arc_buf_sigsegv(int sig, siginfo_t *si, void *unused)
{
+ (void) sig, (void) unused;
panic("Got SIGSEGV at address: 0x%lx\n", (long)si->si_addr);
}
#endif
-/* ARGSUSED */
static void
arc_buf_unwatch(arc_buf_t *buf)
{
ASSERT0(mprotect(buf->b_data, arc_buf_size(buf),
PROT_READ | PROT_WRITE));
}
+#else
+ (void) buf;
#endif
}
-/* ARGSUSED */
static void
arc_buf_watch(arc_buf_t *buf)
{
if (arc_watch)
ASSERT0(mprotect(buf->b_data, arc_buf_size(buf),
PROT_READ));
+#else
+ (void) buf;
#endif
}
* arc_buf_fill().
*/
static void
-arc_buf_untransform_in_place(arc_buf_t *buf, kmutex_t *hash_lock)
+arc_buf_untransform_in_place(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
if (hash_lock != NULL)
mutex_enter(hash_lock);
- arc_buf_untransform_in_place(buf, hash_lock);
+ arc_buf_untransform_in_place(buf);
if (hash_lock != NULL)
mutex_exit(hash_lock);
void
arc_buf_info(arc_buf_t *ab, arc_buf_info_t *abi, int state_index)
{
+ (void) state_index;
arc_buf_hdr_t *hdr = ab->b_hdr;
l1arc_buf_hdr_t *l1hdr = NULL;
l2arc_buf_hdr_t *l2hdr = NULL;
abd_cache_reap_now();
}
-/* ARGSUSED */
static boolean_t
arc_evict_cb_check(void *arg, zthr_t *zthr)
{
+ (void) arg, (void) zthr;
+
#ifdef ZFS_DEBUG
/*
* This is necessary in order to keep the kstat information
* Keep arc_size under arc_c by running arc_evict which evicts data
* from the ARC.
*/
-/* ARGSUSED */
static void
arc_evict_cb(void *arg, zthr_t *zthr)
{
+ (void) arg, (void) zthr;
+
uint64_t evicted = 0;
fstrans_cookie_t cookie = spl_fstrans_mark();
spl_fstrans_unmark(cookie);
}
-/* ARGSUSED */
static boolean_t
arc_reap_cb_check(void *arg, zthr_t *zthr)
{
+ (void) arg, (void) zthr;
+
int64_t free_memory = arc_available_memory();
static int reap_cb_check_counter = 0;
* target size of the cache (arc_c), causing the arc_evict_cb()
* to free more buffers.
*/
-/* ARGSUSED */
static void
arc_reap_cb(void *arg, zthr_t *zthr)
{
+ (void) arg, (void) zthr;
+
int64_t free_memory;
fstrans_cookie_t cookie = spl_fstrans_mark();
}
/* a generic arc_read_done_func_t which you can use */
-/* ARGSUSED */
void
arc_bcopy_func(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
arc_buf_t *buf, void *arg)
{
+ (void) zio, (void) zb, (void) bp;
+
if (buf == NULL)
return;
}
/* a generic arc_read_done_func_t */
-/* ARGSUSED */
void
arc_getbuf_func(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
arc_buf_t *buf, void *arg)
{
+ (void) zb, (void) bp;
arc_buf_t **bufp = arg;
if (buf == NULL) {
* This thread feeds the L2ARC at regular intervals. This is the beating
* heart of the L2ARC.
*/
-/* ARGSUSED */
static void
l2arc_feed_thread(void *unused)
{
+ (void) unused;
callb_cpr_t cpr;
l2arc_dev_t *dev;
spa_t *spa;
uint64_t uncomp;
};
-/* ARGSUSED */
static int
space_range_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, dmu_tx_t *tx)
{
+ (void) bp_freed, (void) tx;
struct space_range_arg *sra = arg;
if (bp->blk_birth > sra->mintxg && bp->blk_birth <= sra->maxtxg) {
* bpobj are designated as free or allocated that information is not preserved
* in bplists.
*/
-/* ARGSUSED */
int
bplist_append_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
+ (void) bp_freed, (void) tx;
bplist_t *bpl = arg;
bplist_append(bpl, bp);
return (0);
dmu_buf_rele(db, FTAG);
}
-/* ARGSUSED */
static int
bptree_visit_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
{
+ (void) zilog, (void) dnp;
int err;
struct bptree_args *ba = arg;
uint_t dbuf_cache_hiwater_pct = 10;
uint_t dbuf_cache_lowater_pct = 10;
-/* ARGSUSED */
static int
dbuf_cons(void *vdb, void *unused, int kmflag)
{
+ (void) unused, (void) kmflag;
dmu_buf_impl_t *db = vdb;
bzero(db, sizeof (dmu_buf_impl_t));
return (0);
}
-/* ARGSUSED */
static void
dbuf_dest(void *vdb, void *unused)
{
+ (void) unused;
dmu_buf_impl_t *db = vdb;
mutex_destroy(&db->db_mtx);
rw_destroy(&db->db_rwlock);
* of the dbuf cache is at or below the maximum size. Once the dbuf is aged
* out of the cache it is destroyed and becomes eligible for arc eviction.
*/
-/* ARGSUSED */
static void
dbuf_evict_thread(void *unused)
{
+ (void) unused;
callb_cpr_t cpr;
CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG);
dbuf_read_done(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
arc_buf_t *buf, void *vdb)
{
+ (void) zb, (void) bp;
dmu_buf_impl_t *db = vdb;
mutex_enter(&db->db_mtx);
* was taken, ENOENT if no action was taken.
*/
static int
-dbuf_read_hole(dmu_buf_impl_t *db, dnode_t *dn, uint32_t flags)
+dbuf_read_hole(dmu_buf_impl_t *db, dnode_t *dn)
{
ASSERT(MUTEX_HELD(&db->db_mtx));
goto early_unlock;
}
- err = dbuf_read_hole(db, dn, flags);
+ err = dbuf_read_hole(db, dn);
if (err == 0)
goto early_unlock;
dl->dr_overridden_by.blk_birth = dr->dr_txg;
}
-/* ARGSUSED */
void
dmu_buf_fill_done(dmu_buf_t *dbuf, dmu_tx_t *tx)
{
+ (void) tx;
dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
dbuf_states_t old_state;
mutex_enter(&db->db_mtx);
dbuf_issue_final_prefetch_done(zio_t *zio, const zbookmark_phys_t *zb,
const blkptr_t *iobp, arc_buf_t *abuf, void *private)
{
+ (void) zio, (void) zb, (void) iobp;
dbuf_prefetch_arg_t *dpa = private;
dbuf_prefetch_fini(dpa, B_TRUE);
dbuf_prefetch_indirect_done(zio_t *zio, const zbookmark_phys_t *zb,
const blkptr_t *iobp, arc_buf_t *abuf, void *private)
{
+ (void) zb, (void) iobp;
dbuf_prefetch_arg_t *dpa = private;
ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel);
}
}
-/* ARGSUSED */
static void
dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
{
+ (void) buf;
dmu_buf_impl_t *db = vdb;
dnode_t *dn;
blkptr_t *bp = zio->io_bp;
dmu_buf_unlock_parent(db, dblt, FTAG);
}
-/* ARGSUSED */
/*
* This function gets called just prior to running through the compression
* stage of the zio pipeline. If we're an indirect block comprised of only
static void
dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
{
+ (void) zio, (void) buf;
dmu_buf_impl_t *db = vdb;
dnode_t *dn;
blkptr_t *bp;
* so this callback allows us to retire dirty space gradually, as the physical
* i/os complete.
*/
-/* ARGSUSED */
static void
dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg)
{
+ (void) buf;
dmu_buf_impl_t *db = arg;
objset_t *os = db->db_objset;
dsl_pool_t *dp = dmu_objset_pool(os);
dsl_pool_undirty_space(dp, delta, zio->io_txg);
}
-/* ARGSUSED */
static void
dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
{
+ (void) buf;
dmu_buf_impl_t *db = vdb;
blkptr_t *bp_orig = &zio->io_bp_orig;
blkptr_t *bp = db->db_blkptr;
* otherwise return false.
* Used below in dmu_free_long_range_impl() to enable abort when unmounting
*/
-/*ARGSUSED*/
static boolean_t
dmu_objset_zfs_unmounting(objset_t *os)
{
#ifdef _KERNEL
if (dmu_objset_type(os) == DMU_OST_ZFS)
return (zfs_get_vfs_flag_unmounted(os));
+#else
+ (void) os;
#endif
return (B_FALSE);
}
dmu_tx_t *dsa_tx;
} dmu_sync_arg_t;
-/* ARGSUSED */
static void
dmu_sync_ready(zio_t *zio, arc_buf_t *buf, void *varg)
{
+ (void) buf;
dmu_sync_arg_t *dsa = varg;
dmu_buf_t *db = dsa->dsa_zgd->zgd_db;
blkptr_t *bp = zio->io_bp;
dmu_sync_ready(zio, NULL, zio->io_private);
}
-/* ARGSUSED */
static void
dmu_sync_done(zio_t *zio, arc_buf_t *buf, void *varg)
{
+ (void) buf;
dmu_sync_arg_t *dsa = varg;
dbuf_dirty_record_t *dr = dsa->dsa_dr;
dmu_buf_impl_t *db = dr->dr_dbuf;
buf[i] = BSWAP_16(buf[i]);
}
-/* ARGSUSED */
void
byteswap_uint8_array(void *vbuf, size_t size)
{
+ (void) vbuf, (void) size;
}
void
(((uint64_t)dnp->dn_datablkszsec) << (SPA_MINBLOCKSHIFT + \
(level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT)))
-/* ARGSUSED */
static int
diff_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
{
+ (void) zilog;
dmu_diffarg_t *da = arg;
int err = 0;
dmu_objset_own_impl(dsl_dataset_t *ds, dmu_objset_type_t type,
boolean_t readonly, boolean_t decrypt, void *tag, objset_t **osp)
{
- int err;
+ (void) tag;
- err = dmu_objset_from_ds(ds, osp);
+ int err = dmu_objset_from_ds(ds, osp);
if (err != 0) {
return (err);
} else if (type != DMU_OST_ANY && type != (*osp)->os_phys->os_type) {
dsl_crypto_params_t *doca_dcp;
} dmu_objset_create_arg_t;
-/*ARGSUSED*/
static int
dmu_objset_create_check(void *arg, dmu_tx_t *tx)
{
proc_t *doca_proc;
} dmu_objset_clone_arg_t;
-/*ARGSUSED*/
static int
dmu_objset_clone_check(void *arg, dmu_tx_t *tx)
{
}
}
-/* ARGSUSED */
static void
dmu_objset_write_ready(zio_t *zio, arc_buf_t *abuf, void *arg)
{
+ (void) abuf;
blkptr_t *bp = zio->io_bp;
objset_t *os = arg;
dnode_phys_t *dnp = &os->os_phys->os_meta_dnode;
rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG);
}
-/* ARGSUSED */
static void
dmu_objset_write_done(zio_t *zio, arc_buf_t *abuf, void *arg)
{
+ (void) abuf;
blkptr_t *bp = zio->io_bp;
blkptr_t *bp_orig = &zio->io_bp_orig;
objset_t *os = arg;
* Third, if there is a deleted object, we need to create a redaction record for
* all of the blocks in that object.
*/
-/*ARGSUSED*/
static int
redact_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const struct dnode_phys *dnp, void *arg)
{
+ (void) spa, (void) zilog;
struct redact_thread_arg *rta = arg;
struct redact_record *record;
* This is the callback function to traverse_dataset that acts as a worker
* thread for dmu_send_impl.
*/
-/*ARGSUSED*/
static int
send_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const struct dnode_phys *dnp, void *arg)
{
+ (void) zilog;
struct send_thread_arg *sta = arg;
struct send_range *record;
struct send_merge_thread_arg *smt_arg, boolean_t resuming, objset_t *os,
redaction_list_t *redact_rl, nvlist_t *nvl)
{
+ (void) smt_arg;
dsl_dataset_t *to_ds = dspp->to_ds;
int err = 0;
return (err);
}
-/* ARGSUSED */
static int
traverse_prefetcher(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
{
+ (void) zilog, (void) dnp;
prefetch_data_t *pfd = arg;
int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE;
arc_flags_t aflags = ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH |
static void
dmu_zfetch_stream_done(void *arg, boolean_t io_issued)
{
+ (void) io_issued;
zstream_t *zs = arg;
if (zfs_refcount_remove(&zs->zs_refs, NULL) == 0)
return (TREE_PCMP(d1, d2));
}
-/* ARGSUSED */
static int
dnode_cons(void *arg, void *unused, int kmflag)
{
+ (void) unused, (void) kmflag;
dnode_t *dn = arg;
- int i;
rw_init(&dn->dn_struct_rwlock, NULL, RW_NOLOCKDEP, NULL);
mutex_init(&dn->dn_mtx, NULL, MUTEX_DEFAULT, NULL);
bzero(&dn->dn_next_blksz[0], sizeof (dn->dn_next_blksz));
bzero(&dn->dn_next_maxblkid[0], sizeof (dn->dn_next_maxblkid));
- for (i = 0; i < TXG_SIZE; i++) {
+ for (int i = 0; i < TXG_SIZE; i++) {
multilist_link_init(&dn->dn_dirty_link[i]);
dn->dn_free_ranges[i] = NULL;
list_create(&dn->dn_dirty_records[i],
return (0);
}
-/* ARGSUSED */
static void
dnode_dest(void *arg, void *unused)
{
- int i;
+ (void) unused;
dnode_t *dn = arg;
rw_destroy(&dn->dn_struct_rwlock);
zfs_refcount_destroy(&dn->dn_tx_holds);
ASSERT(!list_link_active(&dn->dn_link));
- for (i = 0; i < TXG_SIZE; i++) {
+ for (int i = 0; i < TXG_SIZE; i++) {
ASSERT(!multilist_link_active(&dn->dn_dirty_link[i]));
ASSERT3P(dn->dn_free_ranges[i], ==, NULL);
list_destroy(&dn->dn_dirty_records[i]);
odn->dn_moved = (uint8_t)-1;
}
-/*ARGSUSED*/
static kmem_cbrc_t
dnode_move(void *buf, void *newbuf, size_t size, void *arg)
{
(void) zfs_refcount_remove(&rl->rl_longholds, tag);
}
-/* ARGSUSED */
static void
redaction_list_evict_sync(void *rlu)
{
* Adjust the FBN of any bookmarks that reference this block, whose "next"
* is the head dataset.
*/
-/* ARGSUSED */
void
dsl_bookmark_block_killed(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx)
{
+ (void) tx;
+
/*
* Iterate over bookmarks whose "next" is the head dataset.
*/
dmu_tx_t *ddrsa_tx;
} dsl_dataset_rename_snapshot_arg_t;
-/* ARGSUSED */
static int
dsl_dataset_rename_snapshot_check_impl(dsl_pool_t *dp,
dsl_dataset_t *hds, void *arg)
{
+ (void) dp;
dsl_dataset_rename_snapshot_arg_t *ddrsa = arg;
int error;
uint64_t val;
} dsl_dataset_set_qr_arg_t;
-/* ARGSUSED */
static int
dsl_dataset_set_refquota_check(void *arg, dmu_tx_t *tx)
{
uint64_t ddsca_value;
} dsl_dataset_set_compression_arg_t;
-/* ARGSUSED */
static int
dsl_dataset_set_compression_check(void *arg, dmu_tx_t *tx)
{
dmu_tx_t *tx;
};
-/* ARGSUSED */
static int
kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
{
+ (void) spa, (void) dnp;
struct killarg *ka = arg;
dmu_tx_t *tx = ka->tx;
* inconsistent datasets, even if we encounter an error trying to
* process one of them.
*/
-/* ARGSUSED */
int
dsl_destroy_inconsistent(const char *dsname, void *arg)
{
+ (void) arg;
objset_t *os;
if (dmu_objset_hold(dsname, FTAG, &os) == 0) {
*/
if (secpolicy_zfs_proc(cr, proc) == 0)
return (ENFORCE_NEVER);
+#else
+ (void) proc;
#endif
if ((obj = dsl_dir_phys(dd)->dd_head_dataset_obj) == 0)
int nest_delta;
} dsl_valid_rename_arg_t;
-/* ARGSUSED */
static int
dsl_valid_rename(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
{
+ (void) dp;
dsl_valid_rename_arg_t *dvra = arg;
char namebuf[ZFS_MAX_DATASET_NAME_LEN];
* The delete queue is ZPL specific, and libzpool doesn't have
* it. It doesn't make sense to wait for it.
*/
+ (void) ds;
*in_progress = B_FALSE;
break;
#endif
}
dsl_pool_t *
-dsl_pool_create(spa_t *spa, nvlist_t *zplprops, dsl_crypto_params_t *dcp,
- uint64_t txg)
+dsl_pool_create(spa_t *spa, nvlist_t *zplprops __attribute__((unused)),
+ dsl_crypto_params_t *dcp, uint64_t txg)
{
int err;
dsl_pool_t *dp = dsl_pool_open_impl(spa, txg);
return (!list_is_empty(&ds->ds_prop_cbs));
}
-/* ARGSUSED */
static int
dsl_prop_notify_all_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
{
+ (void) arg;
dsl_dir_t *dd = ds->ds_dir;
dsl_prop_record_t *pr;
dsl_prop_cb_record_t *cbr;
}
}
-/* ARGSUSED */
int
dsl_scan_setup_check(void *arg, dmu_tx_t *tx)
{
+ (void) arg;
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev;
dsl_scan_setup_sync, &func, 0, ZFS_SPACE_CHECK_EXTRA_RESERVED));
}
-/* ARGSUSED */
static void
dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx)
{
ASSERT(!dsl_scan_is_running(scn));
}
-/* ARGSUSED */
static int
dsl_scan_cancel_check(void *arg, dmu_tx_t *tx)
{
+ (void) arg;
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
if (!dsl_scan_is_running(scn))
return (0);
}
-/* ARGSUSED */
static void
dsl_scan_cancel_sync(void *arg, dmu_tx_t *tx)
{
+ (void) arg;
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
dsl_scan_done(scn, B_FALSE, tx);
zil_header_t *zsa_zh;
} zil_scan_arg_t;
-/* ARGSUSED */
static int
dsl_scan_zil_block(zilog_t *zilog, const blkptr_t *bp, void *arg,
uint64_t claim_txg)
{
+ (void) zilog;
zil_scan_arg_t *zsa = arg;
dsl_pool_t *dp = zsa->zsa_dp;
dsl_scan_t *scn = dp->dp_scan;
return (0);
}
-/* ARGSUSED */
static int
dsl_scan_zil_record(zilog_t *zilog, const lr_t *lrc, void *arg,
uint64_t claim_txg)
{
+ (void) zilog;
if (lrc->lrc_txtype == TX_WRITE) {
zil_scan_arg_t *zsa = arg;
dsl_pool_t *dp = zsa->zsa_dp;
dsl_scan_prefetch_cb(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
arc_buf_t *buf, void *private)
{
+ (void) zio;
scan_prefetch_ctx_t *spc = private;
dsl_scan_t *scn = spc->spc_scn;
spa_t *spa = scn->scn_dp->dp_spa;
scan_prefetch_ctx_rele(spc, scn);
}
-/* ARGSUSED */
static void
dsl_scan_prefetch_thread(void *arg)
{
dsl_scan_sync_state(scn, tx, SYNC_CACHED);
}
-/* ARGSUSED */
static int
enqueue_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
{
dsl_dataset_rele(ds, FTAG);
}
-/* ARGSUSED */
static int
enqueue_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
{
+ (void) arg;
dsl_dataset_t *ds;
int err;
dsl_scan_t *scn = dp->dp_scan;
return (0);
}
-/* ARGSUSED */
void
dsl_scan_ddt_entry(dsl_scan_t *scn, enum zio_checksum checksum,
ddt_entry_t *dde, dmu_tx_t *tx)
{
+ (void) tx;
const ddt_key_t *ddk = &dde->dde_key;
ddt_phys_t *ddp = dde->dde_phys;
blkptr_t bp;
zbookmark_phys_t zb = { 0 };
- int p;
if (!dsl_scan_is_running(scn))
return;
if (scn->scn_done_txg != 0)
return;
- for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
+ for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
if (ddp->ddp_phys_birth == 0 ||
ddp->ddp_phys_birth > scn->scn_phys.scn_max_txg)
continue;
#define DST_AVG_BLKSHIFT 14
-/* ARGSUSED */
static int
dsl_null_checkfunc(void *arg, dmu_tx_t *tx)
{
+ (void) arg, (void) tx;
return (0);
}
/*
* Wrappers for FM nvlist allocators
*/
-/* ARGSUSED */
static void *
i_fm_alloc(nv_alloc_t *nva, size_t size)
{
+ (void) nva;
return (kmem_zalloc(size, KM_SLEEP));
}
-/* ARGSUSED */
static void
i_fm_free(nv_alloc_t *nva, void *buf, size_t size)
{
+ (void) nva;
kmem_free(buf, size);
}
return ((size_t)dstlen);
}
-/*ARGSUSED*/
int
gzip_decompress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
{
+ (void) n;
zlen_t dstlen = d_len;
ASSERT(d_len >= s_len);
static kmem_cache_t *lz4_cache;
-/*ARGSUSED*/
size_t
lz4_compress_zfs(void *s_start, void *d_start, size_t s_len,
size_t d_len, int n)
{
+ (void) n;
uint32_t bufsiz;
char *dest = d_start;
return (bufsiz + sizeof (bufsiz));
}
-/*ARGSUSED*/
int
lz4_decompress_zfs(void *s_start, void *d_start, size_t s_len,
size_t d_len, int n)
{
+ (void) n;
const char *src = s_start;
uint32_t bufsiz = BE_IN32(src);
/* Compression functions */
-/*ARGSUSED*/
static int
LZ4_compressCtx(void *ctx, const char *source, char *dest, int isize,
int osize)
HASHLOG64K))
#define LZ4_HASH64K_VALUE(p) LZ4_HASH64K_FUNCTION(A32(p))
-/*ARGSUSED*/
static int
LZ4_compress64kCtx(void *ctx, const char *source, char *dest, int isize,
int osize)
#define OFFSET_MASK ((1 << (16 - MATCH_BITS)) - 1)
#define LEMPEL_SIZE 1024
-/*ARGSUSED*/
size_t
lzjb_compress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
{
+ (void) n;
uchar_t *src = s_start;
uchar_t *dst = d_start;
uchar_t *cpy;
return (dst - (uchar_t *)d_start);
}
-/*ARGSUSED*/
int
lzjb_decompress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
{
+ (void) s_len, (void) n;
uchar_t *src = s_start;
uchar_t *dst = d_start;
uchar_t *d_end = (uchar_t *)d_start + d_len;
* Create any block allocator specific components. The current allocators
* rely on using both a size-ordered range_tree_t and an array of uint64_t's.
*/
-/* ARGSUSED */
static void
metaslab_rt_create(range_tree_t *rt, void *arg)
{
mrap->mra_floor_shift = metaslab_by_size_min_shift;
}
-/* ARGSUSED */
static void
metaslab_rt_destroy(range_tree_t *rt, void *arg)
{
+ (void) rt;
metaslab_rt_arg_t *mrap = arg;
zfs_btree_t *size_tree = mrap->mra_bt;
kmem_free(mrap, sizeof (*mrap));
}
-/* ARGSUSED */
static void
metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg)
{
zfs_btree_add(size_tree, rs);
}
-/* ARGSUSED */
static void
metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
{
zfs_btree_remove(size_tree, rs);
}
-/* ARGSUSED */
static void
metaslab_rt_vacate(range_tree_t *rt, void *arg)
{
inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache);
}
}
+#else
+ (void) mc;
#endif
}
}
}
-/* ARGSUSED */
static uint64_t
metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal,
uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d,
mutex_exit(&msp->ms_lock);
}
-/* ARGSUSED */
void
metaslab_free_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
uint64_t size, void *arg)
{
+ (void) inner_offset;
boolean_t *checkpoint = arg;
ASSERT3P(checkpoint, !=, NULL);
int mcca_error;
} metaslab_claim_cb_arg_t;
-/* ARGSUSED */
static void
metaslab_claim_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
uint64_t size, void *arg)
{
+ (void) inner_offset;
metaslab_claim_cb_arg_t *mcca_arg = arg;
if (mcca_arg->mcca_error == 0) {
spa_config_exit(spa, SCL_VDEV, FTAG);
}
-/* ARGSUSED */
static void
metaslab_check_free_impl_cb(uint64_t inner, vdev_t *vd, uint64_t offset,
uint64_t size, void *arg)
{
+ (void) inner, (void) arg;
+
if (vd->vdev_ops == &vdev_indirect_ops)
return;
return (range_tree_space(rt) == 0);
}
-/* ARGSUSED */
void
rt_btree_create(range_tree_t *rt, void *arg)
{
zfs_btree_create(size_tree, rt->rt_btree_compare, size);
}
-/* ARGSUSED */
void
rt_btree_destroy(range_tree_t *rt, void *arg)
{
+ (void) rt;
zfs_btree_t *size_tree = arg;
ASSERT0(zfs_btree_numnodes(size_tree));
zfs_btree_destroy(size_tree);
}
-/* ARGSUSED */
void
rt_btree_add(range_tree_t *rt, range_seg_t *rs, void *arg)
{
+ (void) rt;
zfs_btree_t *size_tree = arg;
zfs_btree_add(size_tree, rs);
}
-/* ARGSUSED */
void
rt_btree_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
{
+ (void) rt;
zfs_btree_t *size_tree = arg;
zfs_btree_remove(size_tree, rs);
}
-/* ARGSUSED */
void
rt_btree_vacate(range_tree_t *rt, void *arg)
{
static int sa_legacy_attr_count = ARRAY_SIZE(sa_legacy_attrs);
static kmem_cache_t *sa_cache = NULL;
-/*ARGSUSED*/
static int
sa_cache_constructor(void *buf, void *unused, int kmflag)
{
+ (void) unused, (void) kmflag;
sa_handle_t *hdl = buf;
mutex_init(&hdl->sa_lock, NULL, MUTEX_DEFAULT, NULL);
return (0);
}
-/*ARGSUSED*/
static void
sa_cache_destructor(void *buf, void *unused)
{
+ (void) unused;
sa_handle_t *hdl = buf;
mutex_destroy(&hdl->sa_lock);
}
}
}
-/*ARGSUSED*/
static void
sa_byteswap_cb(void *hdr, void *attr_addr, sa_attr_type_t attr,
uint16_t length, int length_idx, boolean_t variable_length, void *userp)
{
+ (void) hdr, (void) length_idx, (void) variable_length;
sa_handle_t *hdl = userp;
sa_os_t *sa = hdl->sa_os->os_sa;
return (0);
}
-/*ARGSUSED*/
static void
sa_evict_sync(void *dbu)
{
+ (void) dbu;
panic("evicting sa dbuf\n");
}
return (0);
}
-/*ARGSUSED*/
void
abd_checksum_SHA256(abd_t *abd, uint64_t size,
const void *ctx_template, zio_cksum_t *zcp)
{
+ (void) ctx_template;
int ret;
SHA2_CTX ctx;
zio_cksum_t tmp;
zcp->zc_word[3] = BE_64(tmp.zc_word[3]);
}
-/*ARGSUSED*/
void
abd_checksum_SHA512_native(abd_t *abd, uint64_t size,
const void *ctx_template, zio_cksum_t *zcp)
{
+ (void) ctx_template;
SHA2_CTX ctx;
SHA2Init(SHA512_256, &ctx);
SHA2Final(zcp, &ctx);
}
-/*ARGSUSED*/
void
abd_checksum_SHA512_byteswap(abd_t *abd, uint64_t size,
const void *ctx_template, zio_cksum_t *zcp)
}
}
-/*ARGSUSED*/
static int
spa_change_guid_check(void *arg, dmu_tx_t *tx)
{
int spa_load_verify_metadata = B_TRUE;
int spa_load_verify_data = B_TRUE;
-/*ARGSUSED*/
static int
spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
{
+ (void) zilog, (void) dnp;
+
if (zb->zb_level == ZB_DNODE_LEVEL || BP_IS_HOLE(bp) ||
BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp))
return (0);
return (0);
}
-/* ARGSUSED */
static int
verify_dataset_name_len(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
{
+ (void) dp, (void) arg;
+
if (dsl_dataset_namelen(ds) >= ZFS_MAX_DATASET_NAME_LEN)
return (SET_ERROR(ENAMETOOLONG));
return (spa->spa_livelists_to_delete != 0);
}
-/* ARGSUSED */
static boolean_t
spa_livelist_delete_cb_check(void *arg, zthr_t *z)
{
+ (void) z;
spa_t *spa = arg;
return (spa_livelist_delete_check(spa));
}
* be freed. Then, call a synctask which performs the actual frees and updates
* the pool-wide livelist data.
*/
-/* ARGSUSED */
static void
spa_livelist_delete_cb(void *arg, zthr_t *z)
{
zfs_livelist_condense_zthr_cancel++;
}
-/* ARGSUSED */
/*
* Check that there is something to condense but that a condense is not
* already in progress and that condensing has not been cancelled.
static boolean_t
spa_livelist_condense_cb_check(void *arg, zthr_t *z)
{
+ (void) z;
spa_t *spa = arg;
if ((spa->spa_to_condense.ds != NULL) &&
(spa->spa_to_condense.syncing == B_FALSE) &&
static boolean_t
spa_has_aux_vdev(spa_t *spa, uint64_t guid, spa_aux_vdev_t *sav)
{
+ (void) spa;
int i;
uint64_t vdev_guid;
ev = kmem_alloc(sizeof (sysevent_t), KM_SLEEP);
ev->resource = resource;
}
+#else
+ (void) spa, (void) vd, (void) hist_nvl, (void) name;
#endif
return (ev);
}
zfs_zevent_post(ev->resource, NULL, zfs_zevent_post_cb);
kmem_free(ev, sizeof (*ev));
}
+#else
+ (void) ev;
#endif
}
return (B_TRUE);
}
-/* ARGSUSED */
boolean_t
spa_checkpoint_discard_thread_check(void *arg, zthr_t *zthr)
{
+ (void) zthr;
spa_t *spa = arg;
if (!spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT))
}
-/* ARGSUSED */
static int
spa_checkpoint_check(void *arg, dmu_tx_t *tx)
{
+ (void) arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
if (!spa_feature_is_enabled(spa, SPA_FEATURE_POOL_CHECKPOINT))
return (0);
}
-/* ARGSUSED */
static void
spa_checkpoint_sync(void *arg, dmu_tx_t *tx)
{
+ (void) arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
spa_t *spa = dp->dp_spa;
uberblock_t checkpoint = spa->spa_ubsync;
return (error);
}
-/* ARGSUSED */
static int
spa_checkpoint_discard_check(void *arg, dmu_tx_t *tx)
{
+ (void) arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
if (!spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT))
return (0);
}
-/* ARGSUSED */
static void
spa_checkpoint_discard_sync(void *arg, dmu_tx_t *tx)
{
+ (void) arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
VERIFY0(zap_remove(spa_meta_objset(spa), DMU_POOL_DIRECTORY_OBJECT,
mutex_exit(&spa->spa_errlist_lock);
mutex_exit(&spa->spa_errlog_lock);
+#else
+ (void) spa, (void) uaddr, (void) count;
#endif
return (ret);
void
spa_config_enter(spa_t *spa, int locks, const void *tag, krw_t rw)
{
+ (void) tag;
int wlocks_held = 0;
ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY);
void
spa_config_exit(spa_t *spa, int locks, const void *tag)
{
+ (void) tag;
for (int i = SCL_LOCKS - 1; i >= 0; i--) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (!(locks & (1 << i)))
return (vd->vdev_mg);
}
-/* ARGSUSED */
void
vdev_default_xlate(vdev_t *vd, const range_seg64_t *logical_rs,
range_seg64_t *physical_rs, range_seg64_t *remain_rs)
{
+ (void) vd, (void) remain_rs;
+
physical_rs->rs_start = logical_rs->rs_start;
physical_rs->rs_end = logical_rs->rs_end;
}
static boolean_t
vdev_default_open_children_func(vdev_t *vd)
{
+ (void) vd;
return (B_TRUE);
}
vdev_default_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize,
uint64_t phys_birth)
{
+ (void) dva, (void) psize;
+
/* Set by sequential resilver. */
if (phys_birth == TXG_UNKNOWN)
return (B_TRUE);
static void
vdev_get_child_stat_ex(vdev_t *cvd, vdev_stat_ex_t *vsx, vdev_stat_ex_t *cvsx)
{
+ (void) cvd;
+
int t, b;
for (t = 0; t < ZIO_TYPES; t++) {
for (b = 0; b < ARRAY_SIZE(vsx->vsx_disk_histo[0]); b++)
vdev_space_update(vdev_t *vd, int64_t alloc_delta, int64_t defer_delta,
int64_t space_delta)
{
+ (void) defer_delta;
int64_t dspace_delta;
spa_t *spa = vd->vdev_spa;
vdev_t *rvd = spa->spa_root_vdev;
static int
vdev_draid_init(spa_t *spa, nvlist_t *nv, void **tsd)
{
+ (void) spa;
uint64_t ndata, nparity, nspares, ngroups;
int error;
return (cvd);
}
-/* ARGSUSED */
static void
vdev_draid_spare_close(vdev_t *vd)
{
zio_execute(zio);
}
-/* ARGSUSED */
static void
vdev_draid_spare_io_done(zio_t *zio)
{
+ (void) zio;
}
/*
}
}
-/* ARGSUSED */
static boolean_t
spa_condense_indirect_thread_check(void *arg, zthr_t *zthr)
{
+ (void) zthr;
spa_t *spa = arg;
return (spa->spa_condensing_indirect != NULL);
}
-/* ARGSUSED */
static void
spa_condense_indirect_thread(void *arg, zthr_t *zthr)
{
return (error);
}
-/* ARGSUSED */
static void
vdev_indirect_close(vdev_t *vd)
{
+ (void) vd;
}
-/* ARGSUSED */
static int
vdev_indirect_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
uint64_t *logical_ashift, uint64_t *physical_ashift)
* divisible by sizeof (uint64_t), and buf must be 8-byte aligned. The ABD
* allocation will guarantee these for us.
*/
-/* ARGSUSED */
static int
vdev_initialize_block_fill(void *buf, size_t len, void *unused)
{
+ (void) unused;
+
ASSERT0(len % sizeof (uint64_t));
#ifdef _ILP32
for (uint64_t i = 0; i < len; i += sizeof (uint32_t)) {
void
vdev_initialize_stop_wait(spa_t *spa, list_t *vd_list)
{
+ (void) spa;
vdev_t *vd;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
vdev_mirror_rebuild_asize(vdev_t *vd, uint64_t start, uint64_t asize,
uint64_t max_segment)
{
+ (void) start;
+
uint64_t psize = MIN(P2ROUNDUP(max_segment, 1 << vd->vdev_ashift),
SPA_MAXBLOCKSIZE);
#include <sys/fs/zfs.h>
#include <sys/zio.h>
-/* ARGSUSED */
static int
vdev_missing_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
uint64_t *ashift, uint64_t *pshift)
* VDEV_AUX_BAD_GUID_SUM. So we pretend to succeed, knowing that we
* will fail the GUID sum check before ever trying to open the pool.
*/
+ (void) vd;
*psize = 0;
*max_psize = 0;
*ashift = 0;
return (0);
}
-/* ARGSUSED */
static void
vdev_missing_close(vdev_t *vd)
{
+ (void) vd;
}
-/* ARGSUSED */
static void
vdev_missing_io_start(zio_t *zio)
{
zio_execute(zio);
}
-/* ARGSUSED */
static void
vdev_missing_io_done(zio_t *zio)
{
+ (void) zio;
}
vdev_ops_t vdev_missing_ops = {
}
}
-/* ARGSUSED */
static int
vdev_raidz_reconst_p_func(void *dbuf, void *sbuf, size_t size, void *private)
{
+ (void) private;
uint64_t *dst = dbuf;
uint64_t *src = sbuf;
int cnt = size / sizeof (src[0]);
return (0);
}
-/* ARGSUSED */
static int
vdev_raidz_reconst_q_pre_func(void *dbuf, void *sbuf, size_t size,
void *private)
{
+ (void) private;
uint64_t *dst = dbuf;
uint64_t *src = sbuf;
uint64_t mask;
return (0);
}
-/* ARGSUSED */
static int
vdev_raidz_reconst_q_pre_tail_func(void *buf, size_t size, void *private)
{
+ (void) private;
uint64_t *dst = buf;
uint64_t mask;
int cnt = size / sizeof (dst[0]);
vdev_raidz_xlate(vdev_t *cvd, const range_seg64_t *logical_rs,
range_seg64_t *physical_rs, range_seg64_t *remain_rs)
{
+ (void) remain_rs;
+
vdev_t *raidvd = cvd->vdev_parent;
ASSERT(raidvd->vdev_ops == &vdev_raidz_ops);
return (allocating == 0);
}
-/* ARGSUSED */
static int
spa_vdev_remove_cancel_check(void *arg, dmu_tx_t *tx)
{
+ (void) arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
if (spa->spa_vdev_removal == NULL)
* Cancel a removal by freeing all entries from the partial mapping
* and marking the vdev as no longer being removing.
*/
-/* ARGSUSED */
static void
spa_vdev_remove_cancel_sync(void *arg, dmu_tx_t *tx)
{
+ (void) arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
spa_vdev_removal_t *svr = spa->spa_vdev_removal;
vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
void
vdev_trim_stop_wait(spa_t *spa, list_t *vd_list)
{
+ (void) spa;
vdev_t *vd;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
}
}
-/* ARGSUSED */
static void
zcp_lua_counthook(lua_State *state, lua_Debug *ar)
{
+ (void) ar;
lua_getfield(state, LUA_REGISTRYINDEX, ZCP_RUN_INFO_KEY);
zcp_run_info_t *ri = lua_touserdata(state, -1);
* The txg_wait_synced_sig will continue to wait for the txg to complete
* after calling this callback.
*/
-/* ARGSUSED */
static void
zcp_eval_sig(void *arg, dmu_tx_t *tx)
{
+ (void) tx;
zcp_run_info_t *ri = arg;
ri->zri_canceled = B_TRUE;
.blocks_modified = 0
};
-/* ARGSUSED */
static int
zcp_synctask_destroy(lua_State *state, boolean_t sync, nvlist_t *err_details)
{
+ (void) err_details;
int err;
const char *dsname = lua_tostring(state, 1);
.blocks_modified = 3
};
-/* ARGSUSED */
static int
zcp_synctask_snapshot(lua_State *state, boolean_t sync, nvlist_t *err_details)
{
+ (void) err_details;
int err;
dsl_dataset_snapshot_arg_t ddsa = { 0 };
const char *dsname = lua_tostring(state, 1);
zcp_synctask_inherit_prop(lua_State *state, boolean_t sync,
nvlist_t *err_details)
{
+ (void) err_details;
int err;
zcp_inherit_prop_arg_t zipa = { 0 };
dsl_props_set_arg_t *dpsa = &zipa.zipa_dpsa;
.blocks_modified = 1,
};
-/* ARGSUSED */
static int
zcp_synctask_bookmark(lua_State *state, boolean_t sync, nvlist_t *err_details)
{
+ (void) err_details;
int err;
const char *source = lua_tostring(state, 1);
const char *new = lua_tostring(state, 2);
static int
zcp_synctask_set_prop(lua_State *state, boolean_t sync, nvlist_t *err_details)
{
+ (void) err_details;
int err;
zcp_set_prop_arg_t args = { 0 };
* read I/Os, there are basically three 'types' of I/O, which form a roughly
* layered diagram:
*
- * +---------------+
+ * +---------------+
* | Aggregate I/O | No associated logical data or device
* +---------------+
* |
/*
* background task to clean stale recent event nodes.
*/
-/*ARGSUSED*/
static void
zfs_ereport_cleaner(void *arg)
{
return (eip);
}
#else
-/*ARGSUSED*/
void
zfs_ereport_clear(spa_t *spa, vdev_t *vd)
{
+ (void) spa, (void) vd;
}
#endif
(zio != NULL) && (!zio->io_timestamp)) {
return (B_FALSE);
}
+#else
+ (void) subclass, (void) spa, (void) vd, (void) zio;
#endif
return (B_TRUE);
}
/* Cleanup is handled by the callback function */
rc = zfs_zevent_post(ereport, detector, zfs_zevent_post_cb);
+#else
+ (void) subclass, (void) spa, (void) vd, (void) zb, (void) zio,
+ (void) state;
#endif
return (rc);
}
if (zfs_is_ratelimiting_event(FM_EREPORT_ZFS_CHECKSUM, vd))
return (SET_ERROR(EBUSY));
+#else
+ (void) zb, (void) offset;
#endif
report = kmem_zalloc(sizeof (*report), KM_SLEEP);
report->zcr_ereport = report->zcr_detector = NULL;
if (info != NULL)
kmem_free(info, sizeof (*info));
+#else
+ (void) report, (void) good_data, (void) bad_data,
+ (void) drop_if_identical;
#endif
}
rc = zfs_zevent_post(ereport, detector, zfs_zevent_post_cb);
kmem_free(info, sizeof (*info));
}
+#else
+ (void) spa, (void) vd, (void) zb, (void) zio, (void) offset,
+ (void) length, (void) good_data, (void) bad_data, (void) zbc;
#endif
return (rc);
}
while ((elem = nvlist_next_nvpair(aux, elem)) != NULL)
(void) nvlist_add_nvpair(resource, elem);
}
-
+#else
+ (void) spa, (void) vd, (void) type, (void) name, (void) aux;
#endif
return (resource);
}
resource = zfs_event_create(spa, vd, type, name, aux);
if (resource)
zfs_zevent_post(resource, NULL, zfs_zevent_post_cb);
+#else
+ (void) spa, (void) vd, (void) type, (void) name, (void) aux;
#endif
}
if (aux)
fm_nvlist_destroy(aux, FM_NVA_FREE);
+#else
+ (void) spa, (void) vd, (void) laststate;
#endif
}
return (error);
}
-/* ARGSUSED */
static int
zil_clear_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx,
uint64_t first_txg)
{
+ (void) tx;
ASSERT(!BP_IS_HOLE(bp));
/*
return (0);
}
-/* ARGSUSED */
static int
zil_noop_log_record(zilog_t *zilog, const lr_t *lrc, void *tx,
uint64_t first_txg)
{
+ (void) zilog, (void) lrc, (void) tx, (void) first_txg;
return (0);
}
return (zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg));
}
-/* ARGSUSED */
static int
zil_free_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx,
uint64_t claim_txg)
{
+ (void) claim_txg;
+
zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
return (0);
* Checksum errors are ok as they indicate the end of the chain.
* Any other error (no device or read failure) returns an error.
*/
-/* ARGSUSED */
int
zil_check_log_chain(dsl_pool_t *dp, dsl_dataset_t *ds, void *tx)
{
+ (void) dp;
zilog_t *zilog;
objset_t *os;
blkptr_t *bp;
mutex_exit(&zilog->zl_lock);
}
-/* ARGSUSED */
static int
zil_lwb_cons(void *vbuf, void *unused, int kmflag)
{
+ (void) unused, (void) kmflag;
lwb_t *lwb = vbuf;
list_create(&lwb->lwb_itxs, sizeof (itx_t), offsetof(itx_t, itx_node));
list_create(&lwb->lwb_waiters, sizeof (zil_commit_waiter_t),
return (0);
}
-/* ARGSUSED */
static void
zil_lwb_dest(void *vbuf, void *unused)
{
+ (void) unused;
lwb_t *lwb = vbuf;
mutex_destroy(&lwb->lwb_vdev_lock);
avl_destroy(&lwb->lwb_vdev_tree);
return (0);
}
-/* ARGSUSED */
static int
zil_incr_blks(zilog_t *zilog, const blkptr_t *bp, void *arg, uint64_t claim_txg)
{
+ (void) bp, (void) arg, (void) claim_txg;
+
zilog->zl_replay_blks++;
return (0);
return (B_FALSE);
}
-/* ARGSUSED */
int
zil_reset(const char *osname, void *arg)
{
- int error;
+ (void) arg;
- error = zil_suspend(osname, NULL);
+ int error = zil_suspend(osname, NULL);
/* EACCES means crypto key not loaded */
if ((error == EACCES) || (error == EBUSY))
return (SET_ERROR(error));
static void
zio_abd_free(void *abd, size_t size)
{
+ (void) size;
abd_free((abd_t *)abd);
}
boolean_t
zfs_dva_valid(spa_t *spa, const dva_t *dva, const blkptr_t *bp)
{
+ (void) bp;
uint64_t vdevid = DVA_GET_VDEV(dva);
if (vdevid >= spa->spa_root_vdev->vdev_children)
!zio_taskq_member(zio, ZIO_TASKQ_ISSUE) &&
!zio_taskq_member(zio, ZIO_TASKQ_ISSUE_HIGH))
return (B_TRUE);
+#else
+ (void) zio;
#endif /* HAVE_LARGE_STACKS */
return (B_FALSE);
return (zio);
}
-/* ARGSUSED */
static zio_t *
zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
uint64_t offset)
{
+ (void) gn, (void) data, (void) offset;
+
zio_t *zio = zio_free_sync(pio, pio->io_spa, pio->io_txg, bp,
ZIO_GANG_CHILD_FLAGS(pio));
if (zio == NULL) {
return (zio);
}
-/* ARGSUSED */
static zio_t *
zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
uint64_t offset)
{
+ (void) gn, (void) data, (void) offset;
return (zio_claim(pio, pio->io_spa, pio->io_txg, bp,
NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio)));
}
zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE);
}
-/*ARGSUSED*/
void
zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr)
{
* invocation and passed to the checksum function.
*/
-/*ARGSUSED*/
static void
abd_checksum_off(abd_t *abd, uint64_t size,
const void *ctx_template, zio_cksum_t *zcp)
{
+ (void) abd, (void) size, (void) ctx_template;
ZIO_SET_CHECKSUM(zcp, 0, 0, 0, 0);
}
-/*ARGSUSED*/
static void
abd_fletcher_2_native(abd_t *abd, uint64_t size,
const void *ctx_template, zio_cksum_t *zcp)
{
+ (void) ctx_template;
fletcher_init(zcp);
(void) abd_iterate_func(abd, 0, size,
fletcher_2_incremental_native, zcp);
}
-/*ARGSUSED*/
static void
abd_fletcher_2_byteswap(abd_t *abd, uint64_t size,
const void *ctx_template, zio_cksum_t *zcp)
{
+ (void) ctx_template;
fletcher_init(zcp);
(void) abd_iterate_func(abd, 0, size,
fletcher_2_incremental_byteswap, zcp);
fletcher_4_abd_ops.acf_fini(acdp);
}
-/*ARGSUSED*/
void
abd_fletcher_4_native(abd_t *abd, uint64_t size,
const void *ctx_template, zio_cksum_t *zcp)
{
+ (void) ctx_template;
fletcher_4_ctx_t ctx;
zio_abd_checksum_data_t acd = {
}
-/*ARGSUSED*/
void
abd_fletcher_4_byteswap(abd_t *abd, uint64_t size,
const void *ctx_template, zio_cksum_t *zcp)
{
+ (void) ctx_template;
fletcher_4_ctx_t ctx;
zio_abd_checksum_data_t acd = {
zio_complevel_select(spa_t *spa, enum zio_compress compress, uint8_t child,
uint8_t parent)
{
+ (void) spa;
uint8_t result;
if (!ZIO_COMPRESS_HASLEVEL(compress))
return (result);
}
-/*ARGSUSED*/
static int
zio_compress_zeroed_cb(void *data, size_t len, void *private)
{
+ (void) private;
+
uint64_t *end = (uint64_t *)((char *)data + len);
for (uint64_t *word = (uint64_t *)data; word < end; word++)
if (*word != 0)