*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016 Actifio, Inc. All rights reserved.
*/
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include <zlib.h>
+#include <libgen.h>
#include <sys/signal.h>
#include <sys/spa.h>
#include <sys/stat.h>
#include <sys/processor.h>
#include <sys/zfs_context.h>
+#include <sys/rrwlock.h>
#include <sys/utsname.h>
#include <sys/time.h>
#include <sys/systeminfo.h>
+#include <zfs_fletcher.h>
+#include <sys/crypto/icp.h>
/*
* Emulation of kernel services in userland.
uint64_t physmem;
vnode_t *rootdir = (vnode_t *)0xabcd1234;
char hw_serial[HW_HOSTID_LEN];
+struct utsname hw_utsname;
+vmem_t *zio_arena = NULL;
-struct utsname utsname = {
- "userland", "libzpool", "1", "1", "na"
-};
+/* If set, all blocks read will be copied to the specified directory. */
+char *vn_dumpdir = NULL;
/* this only exists to have its address taken */
struct proc p0;
* =========================================================================
* threads
* =========================================================================
+ *
+ * TS_STACK_MIN is dictated by the minimum allowed pthread stack size. While
+ * TS_STACK_MAX is somewhat arbitrary, it was selected to be large enough for
+ * the expected stack depth while small enough to avoid exhausting address
+ * space with high thread counts.
*/
+#define TS_STACK_MIN MAX(PTHREAD_STACK_MIN, 32768)
+#define TS_STACK_MAX (256 * 1024)
-pthread_cond_t kthread_cond = PTHREAD_COND_INITIALIZER;
-pthread_mutex_t kthread_lock = PTHREAD_MUTEX_INITIALIZER;
-pthread_key_t kthread_key;
-int kthread_nr = 0;
-
-static void
-thread_init(void)
-{
- kthread_t *kt;
-
- VERIFY3S(pthread_key_create(&kthread_key, NULL), ==, 0);
-
- /* Create entry for primary kthread */
- kt = umem_zalloc(sizeof(kthread_t), UMEM_NOFAIL);
- kt->t_tid = pthread_self();
- kt->t_func = NULL;
-
- VERIFY3S(pthread_setspecific(kthread_key, kt), ==, 0);
-
- /* Only the main thread should be running at the moment */
- ASSERT3S(kthread_nr, ==, 0);
- kthread_nr = 1;
-}
-
-static void
-thread_fini(void)
-{
- kthread_t *kt = curthread;
-
- ASSERT(pthread_equal(kt->t_tid, pthread_self()));
- ASSERT3P(kt->t_func, ==, NULL);
-
- umem_free(kt, sizeof(kthread_t));
-
- /* Wait for all threads to exit via thread_exit() */
- VERIFY3S(pthread_mutex_lock(&kthread_lock), ==, 0);
-
- kthread_nr--; /* Main thread is exiting */
-
- while (kthread_nr > 0)
- VERIFY3S(pthread_cond_wait(&kthread_cond, &kthread_lock), ==,
- 0);
-
- ASSERT3S(kthread_nr, ==, 0);
- VERIFY3S(pthread_mutex_unlock(&kthread_lock), ==, 0);
-
- VERIFY3S(pthread_key_delete(kthread_key), ==, 0);
-}
-
-kthread_t *
-zk_thread_current(void)
-{
- kthread_t *kt = pthread_getspecific(kthread_key);
-
- ASSERT3P(kt, !=, NULL);
-
- return kt;
-}
-
-void *
-zk_thread_helper(void *arg)
-{
- kthread_t *kt = (kthread_t *) arg;
-
- VERIFY3S(pthread_setspecific(kthread_key, kt), ==, 0);
-
- VERIFY3S(pthread_mutex_lock(&kthread_lock), ==, 0);
- kthread_nr++;
- VERIFY3S(pthread_mutex_unlock(&kthread_lock), ==, 0);
-
- kt->t_tid = pthread_self();
- ((thread_func_arg_t) kt->t_func)(kt->t_arg);
-
- /* Unreachable, thread must exit with thread_exit() */
- abort();
-
- return NULL;
-}
-
+/*ARGSUSED*/
kthread_t *
-zk_thread_create(caddr_t stk, size_t stksize, thread_func_t func, void *arg,
- size_t len, proc_t *pp, int state, pri_t pri, int detachstate)
+zk_thread_create(void (*func)(void *), void *arg, size_t stksize, int state)
{
- kthread_t *kt;
pthread_attr_t attr;
- size_t stack;
+ pthread_t tid;
+ char *stkstr;
+ int detachstate = PTHREAD_CREATE_DETACHED;
- ASSERT3S(state & ~TS_RUN, ==, 0);
+ VERIFY0(pthread_attr_init(&attr));
- kt = umem_zalloc(sizeof(kthread_t), UMEM_NOFAIL);
- kt->t_func = func;
- kt->t_arg = arg;
+ if (state & TS_JOINABLE)
+ detachstate = PTHREAD_CREATE_JOINABLE;
+
+ VERIFY0(pthread_attr_setdetachstate(&attr, detachstate));
/*
- * The Solaris kernel stack size is 24k for x86/x86_64.
- * The Linux kernel stack size is 8k for x86/x86_64.
- *
- * We reduce the default stack size in userspace, to ensure
- * we observe stack overruns in user space as well as in
- * kernel space. In practice we can't set the userspace stack
- * size to 8k because differences in stack usage between kernel
- * space and userspace could lead to spurious stack overflows
- * (especially when debugging is enabled). Nevertheless, we try
- * to set it to the lowest value that works (currently 8k*4).
- * PTHREAD_STACK_MIN is the minimum stack required for a NULL
- * procedure in user space and is added in to the stack
- * requirements.
- *
- * Some buggy NPTL threading implementations include the
- * guard area within the stack size allocations. In
- * this case we allocate an extra page to account for the
- * guard area since we only have two pages of usable stack
- * on Linux.
+ * We allow the default stack size in user space to be specified by
+ * setting the ZFS_STACK_SIZE environment variable. This allows us
+ * the convenience of observing and debugging stack overruns in
+ * user space. Explicitly specified stack sizes will be honored.
+ * The usage of ZFS_STACK_SIZE is discussed further in the
+ * ENVIRONMENT VARIABLES sections of the ztest(1) man page.
*/
+ if (stksize == 0) {
+ stkstr = getenv("ZFS_STACK_SIZE");
- stack = PTHREAD_STACK_MIN + MAX(stksize, STACK_SIZE) * 4;
-
- VERIFY3S(pthread_attr_init(&attr), ==, 0);
- VERIFY3S(pthread_attr_setstacksize(&attr, stack), ==, 0);
- VERIFY3S(pthread_attr_setguardsize(&attr, PAGESIZE), ==, 0);
- VERIFY3S(pthread_attr_setdetachstate(&attr, detachstate), ==, 0);
-
- VERIFY3S(pthread_create(&kt->t_tid, &attr, &zk_thread_helper, kt),
- ==, 0);
-
- VERIFY3S(pthread_attr_destroy(&attr), ==, 0);
-
- return kt;
-}
-
-void
-zk_thread_exit(void)
-{
- kthread_t *kt = curthread;
-
- ASSERT(pthread_equal(kt->t_tid, pthread_self()));
-
- umem_free(kt, sizeof(kthread_t));
+ if (stkstr == NULL)
+ stksize = TS_STACK_MAX;
+ else
+ stksize = MAX(atoi(stkstr), TS_STACK_MIN);
+ }
- pthread_mutex_lock(&kthread_lock);
- kthread_nr--;
- pthread_mutex_unlock(&kthread_lock);
+ VERIFY3S(stksize, >, 0);
+ stksize = P2ROUNDUP(MAX(stksize, TS_STACK_MIN), PAGESIZE);
- pthread_cond_broadcast(&kthread_cond);
- pthread_exit((void *)TS_MAGIC);
-}
+ /*
+ * If this ever fails, it may be because the stack size is not a
+ * multiple of system page size.
+ */
+ VERIFY0(pthread_attr_setstacksize(&attr, stksize));
+ VERIFY0(pthread_attr_setguardsize(&attr, PAGESIZE));
-void
-zk_thread_join(kt_did_t tid)
-{
- void *ret;
+ VERIFY0(pthread_create(&tid, &attr, (void *(*)(void *))func, arg));
+ VERIFY0(pthread_attr_destroy(&attr));
- pthread_join((pthread_t)tid, &ret);
- VERIFY3P(ret, ==, (void *)TS_MAGIC);
+ return ((void *)(uintptr_t)tid);
}
/*
*/
/*ARGSUSED*/
kstat_t *
-kstat_create(char *module, int instance, char *name, char *class,
- uchar_t type, ulong_t ndata, uchar_t ks_flag)
+kstat_create(const char *module, int instance, const char *name,
+ const char *class, uchar_t type, ulong_t ndata, uchar_t ks_flag)
{
return (NULL);
}
kstat_delete(kstat_t *ksp)
{}
+/*ARGSUSED*/
+void
+kstat_waitq_enter(kstat_io_t *kiop)
+{}
+
+/*ARGSUSED*/
+void
+kstat_waitq_exit(kstat_io_t *kiop)
+{}
+
+/*ARGSUSED*/
+void
+kstat_runq_enter(kstat_io_t *kiop)
+{}
+
+/*ARGSUSED*/
+void
+kstat_runq_exit(kstat_io_t *kiop)
+{}
+
+/*ARGSUSED*/
+void
+kstat_waitq_to_runq(kstat_io_t *kiop)
+{}
+
+/*ARGSUSED*/
+void
+kstat_runq_back_to_waitq(kstat_io_t *kiop)
+{}
+
+void
+kstat_set_raw_ops(kstat_t *ksp,
+ int (*headers)(char *buf, size_t size),
+ int (*data)(char *buf, size_t size, void *data),
+ void *(*addr)(kstat_t *ksp, loff_t index))
+{}
+
/*
* =========================================================================
* mutexes
void
mutex_init(kmutex_t *mp, char *name, int type, void *cookie)
{
- ASSERT3S(type, ==, MUTEX_DEFAULT);
- ASSERT3P(cookie, ==, NULL);
- mp->m_owner = MTX_INIT;
- mp->m_magic = MTX_MAGIC;
- VERIFY3S(pthread_mutex_init(&mp->m_lock, NULL), ==, 0);
+ VERIFY0(pthread_mutex_init(&mp->m_lock, NULL));
+ memset(&mp->m_owner, 0, sizeof (pthread_t));
}
void
mutex_destroy(kmutex_t *mp)
{
- ASSERT3U(mp->m_magic, ==, MTX_MAGIC);
- ASSERT3P(mp->m_owner, ==, MTX_INIT);
- VERIFY3S(pthread_mutex_destroy(&(mp)->m_lock), ==, 0);
- mp->m_owner = MTX_DEST;
- mp->m_magic = 0;
+ VERIFY0(pthread_mutex_destroy(&mp->m_lock));
}
void
mutex_enter(kmutex_t *mp)
{
- ASSERT3U(mp->m_magic, ==, MTX_MAGIC);
- ASSERT3P(mp->m_owner, !=, MTX_DEST);
- ASSERT3P(mp->m_owner, !=, curthread);
- VERIFY3S(pthread_mutex_lock(&mp->m_lock), ==, 0);
- ASSERT3P(mp->m_owner, ==, MTX_INIT);
- mp->m_owner = curthread;
+ VERIFY0(pthread_mutex_lock(&mp->m_lock));
+ mp->m_owner = pthread_self();
}
int
mutex_tryenter(kmutex_t *mp)
{
- ASSERT3U(mp->m_magic, ==, MTX_MAGIC);
- ASSERT3P(mp->m_owner, !=, MTX_DEST);
- if (0 == pthread_mutex_trylock(&mp->m_lock)) {
- ASSERT3P(mp->m_owner, ==, MTX_INIT);
- mp->m_owner = curthread;
+ int error;
+
+ error = pthread_mutex_trylock(&mp->m_lock);
+ if (error == 0) {
+ mp->m_owner = pthread_self();
return (1);
} else {
+ VERIFY3S(error, ==, EBUSY);
return (0);
}
}
void
mutex_exit(kmutex_t *mp)
{
- ASSERT3U(mp->m_magic, ==, MTX_MAGIC);
- ASSERT3P(mutex_owner(mp), ==, curthread);
- mp->m_owner = MTX_INIT;
- VERIFY3S(pthread_mutex_unlock(&mp->m_lock), ==, 0);
-}
-
-void *
-mutex_owner(kmutex_t *mp)
-{
- ASSERT3U(mp->m_magic, ==, MTX_MAGIC);
- return (mp->m_owner);
-}
-
-int
-mutex_held(kmutex_t *mp)
-{
- return (mp->m_owner == curthread);
+ memset(&mp->m_owner, 0, sizeof (pthread_t));
+ VERIFY0(pthread_mutex_unlock(&mp->m_lock));
}
/*
void
rw_init(krwlock_t *rwlp, char *name, int type, void *arg)
{
- ASSERT3S(type, ==, RW_DEFAULT);
- ASSERT3P(arg, ==, NULL);
- VERIFY3S(pthread_rwlock_init(&rwlp->rw_lock, NULL), ==, 0);
- rwlp->rw_owner = RW_INIT;
- rwlp->rw_wr_owner = RW_INIT;
+ VERIFY0(pthread_rwlock_init(&rwlp->rw_lock, NULL));
rwlp->rw_readers = 0;
- rwlp->rw_magic = RW_MAGIC;
+ rwlp->rw_owner = 0;
}
void
rw_destroy(krwlock_t *rwlp)
{
- ASSERT3U(rwlp->rw_magic, ==, RW_MAGIC);
-
- VERIFY3S(pthread_rwlock_destroy(&rwlp->rw_lock), ==, 0);
- rwlp->rw_magic = 0;
+ VERIFY0(pthread_rwlock_destroy(&rwlp->rw_lock));
}
void
rw_enter(krwlock_t *rwlp, krw_t rw)
{
- ASSERT3U(rwlp->rw_magic, ==, RW_MAGIC);
- ASSERT3P(rwlp->rw_owner, !=, curthread);
- ASSERT3P(rwlp->rw_wr_owner, !=, curthread);
-
if (rw == RW_READER) {
- VERIFY3S(pthread_rwlock_rdlock(&rwlp->rw_lock), ==, 0);
- ASSERT3P(rwlp->rw_wr_owner, ==, RW_INIT);
-
+ VERIFY0(pthread_rwlock_rdlock(&rwlp->rw_lock));
atomic_inc_uint(&rwlp->rw_readers);
} else {
- VERIFY3S(pthread_rwlock_wrlock(&rwlp->rw_lock), ==, 0);
- ASSERT3P(rwlp->rw_wr_owner, ==, RW_INIT);
- ASSERT3U(rwlp->rw_readers, ==, 0);
-
- rwlp->rw_wr_owner = curthread;
+ VERIFY0(pthread_rwlock_wrlock(&rwlp->rw_lock));
+ rwlp->rw_owner = pthread_self();
}
-
- rwlp->rw_owner = curthread;
}
void
rw_exit(krwlock_t *rwlp)
{
- ASSERT3U(rwlp->rw_magic, ==, RW_MAGIC);
- ASSERT(RW_LOCK_HELD(rwlp));
-
if (RW_READ_HELD(rwlp))
atomic_dec_uint(&rwlp->rw_readers);
else
- rwlp->rw_wr_owner = RW_INIT;
+ rwlp->rw_owner = 0;
- rwlp->rw_owner = RW_INIT;
- VERIFY3S(pthread_rwlock_unlock(&rwlp->rw_lock), ==, 0);
+ VERIFY0(pthread_rwlock_unlock(&rwlp->rw_lock));
}
int
rw_tryenter(krwlock_t *rwlp, krw_t rw)
{
- int rv;
-
- ASSERT3U(rwlp->rw_magic, ==, RW_MAGIC);
+ int error;
if (rw == RW_READER)
- rv = pthread_rwlock_tryrdlock(&rwlp->rw_lock);
+ error = pthread_rwlock_tryrdlock(&rwlp->rw_lock);
else
- rv = pthread_rwlock_trywrlock(&rwlp->rw_lock);
-
- if (rv == 0) {
- ASSERT3P(rwlp->rw_wr_owner, ==, RW_INIT);
+ error = pthread_rwlock_trywrlock(&rwlp->rw_lock);
+ if (error == 0) {
if (rw == RW_READER)
atomic_inc_uint(&rwlp->rw_readers);
- else {
- ASSERT3U(rwlp->rw_readers, ==, 0);
- rwlp->rw_wr_owner = curthread;
- }
+ else
+ rwlp->rw_owner = pthread_self();
- rwlp->rw_owner = curthread;
return (1);
}
- VERIFY3S(rv, ==, EBUSY);
+ VERIFY3S(error, ==, EBUSY);
return (0);
}
+/* ARGSUSED */
+uint32_t
+zone_get_hostid(void *zonep)
+{
+ /*
+ * We're emulating the system's hostid in userland.
+ */
+ return (strtoul(hw_serial, NULL, 10));
+}
+
int
rw_tryupgrade(krwlock_t *rwlp)
{
- ASSERT3U(rwlp->rw_magic, ==, RW_MAGIC);
-
return (0);
}
void
cv_init(kcondvar_t *cv, char *name, int type, void *arg)
{
- ASSERT3S(type, ==, CV_DEFAULT);
- cv->cv_magic = CV_MAGIC;
- VERIFY3S(pthread_cond_init(&cv->cv, NULL), ==, 0);
+ VERIFY0(pthread_cond_init(cv, NULL));
}
void
cv_destroy(kcondvar_t *cv)
{
- ASSERT3U(cv->cv_magic, ==, CV_MAGIC);
- VERIFY3S(pthread_cond_destroy(&cv->cv), ==, 0);
- cv->cv_magic = 0;
+ VERIFY0(pthread_cond_destroy(cv));
}
void
cv_wait(kcondvar_t *cv, kmutex_t *mp)
{
- ASSERT3U(cv->cv_magic, ==, CV_MAGIC);
- ASSERT3P(mutex_owner(mp), ==, curthread);
- mp->m_owner = MTX_INIT;
- int ret = pthread_cond_wait(&cv->cv, &mp->m_lock);
- if (ret != 0)
- VERIFY3S(ret, ==, EINTR);
- mp->m_owner = curthread;
+ memset(&mp->m_owner, 0, sizeof (pthread_t));
+ VERIFY0(pthread_cond_wait(cv, &mp->m_lock));
+ mp->m_owner = pthread_self();
}
clock_t
{
int error;
struct timeval tv;
- timestruc_t ts;
+ struct timespec ts;
clock_t delta;
- ASSERT3U(cv->cv_magic, ==, CV_MAGIC);
-
-top:
delta = abstime - ddi_get_lbolt();
if (delta <= 0)
return (-1);
VERIFY(gettimeofday(&tv, NULL) == 0);
ts.tv_sec = tv.tv_sec + delta / hz;
- ts.tv_nsec = tv.tv_usec * 1000 + (delta % hz) * (NANOSEC / hz);
+ ts.tv_nsec = tv.tv_usec * NSEC_PER_USEC + (delta % hz) * (NANOSEC / hz);
if (ts.tv_nsec >= NANOSEC) {
ts.tv_sec++;
ts.tv_nsec -= NANOSEC;
}
- ASSERT3P(mutex_owner(mp), ==, curthread);
- mp->m_owner = MTX_INIT;
- error = pthread_cond_timedwait(&cv->cv, &mp->m_lock, &ts);
- mp->m_owner = curthread;
+ memset(&mp->m_owner, 0, sizeof (pthread_t));
+ error = pthread_cond_timedwait(cv, &mp->m_lock, &ts);
+ mp->m_owner = pthread_self();
if (error == ETIMEDOUT)
return (-1);
- if (error == EINTR)
- goto top;
+ VERIFY0(error);
+
+ return (1);
+}
+
+/*ARGSUSED*/
+clock_t
+cv_timedwait_hires(kcondvar_t *cv, kmutex_t *mp, hrtime_t tim, hrtime_t res,
+ int flag)
+{
+ int error;
+ struct timeval tv;
+ struct timespec ts;
+ hrtime_t delta;
+
+ ASSERT(flag == 0 || flag == CALLOUT_FLAG_ABSOLUTE);
- VERIFY3S(error, ==, 0);
+ delta = tim;
+ if (flag & CALLOUT_FLAG_ABSOLUTE)
+ delta -= gethrtime();
+
+ if (delta <= 0)
+ return (-1);
+
+ VERIFY0(gettimeofday(&tv, NULL));
+
+ ts.tv_sec = tv.tv_sec + delta / NANOSEC;
+ ts.tv_nsec = tv.tv_usec * NSEC_PER_USEC + (delta % NANOSEC);
+ if (ts.tv_nsec >= NANOSEC) {
+ ts.tv_sec++;
+ ts.tv_nsec -= NANOSEC;
+ }
+
+ memset(&mp->m_owner, 0, sizeof (pthread_t));
+ error = pthread_cond_timedwait(cv, &mp->m_lock, &ts);
+ mp->m_owner = pthread_self();
+
+ if (error == ETIMEDOUT)
+ return (-1);
+
+ VERIFY0(error);
return (1);
}
void
cv_signal(kcondvar_t *cv)
{
- ASSERT3U(cv->cv_magic, ==, CV_MAGIC);
- VERIFY3S(pthread_cond_signal(&cv->cv), ==, 0);
+ VERIFY0(pthread_cond_signal(cv));
}
void
cv_broadcast(kcondvar_t *cv)
{
- ASSERT3U(cv->cv_magic, ==, CV_MAGIC);
- VERIFY3S(pthread_cond_broadcast(&cv->cv), ==, 0);
+ VERIFY0(pthread_cond_broadcast(cv));
+}
+
+/*
+ * =========================================================================
+ * procfs list
+ * =========================================================================
+ */
+
+void
+seq_printf(struct seq_file *m, const char *fmt, ...)
+{}
+
+void
+procfs_list_install(const char *module,
+ const char *name,
+ procfs_list_t *procfs_list,
+ int (*show)(struct seq_file *f, void *p),
+ int (*show_header)(struct seq_file *f),
+ int (*clear)(procfs_list_t *procfs_list),
+ size_t procfs_list_node_off)
+{
+ mutex_init(&procfs_list->pl_lock, NULL, MUTEX_DEFAULT, NULL);
+ list_create(&procfs_list->pl_list,
+ procfs_list_node_off + sizeof (procfs_list_node_t),
+ procfs_list_node_off + offsetof(procfs_list_node_t, pln_link));
+ procfs_list->pl_next_id = 1;
+ procfs_list->pl_node_offset = procfs_list_node_off;
+}
+
+void
+procfs_list_uninstall(procfs_list_t *procfs_list)
+{}
+
+void
+procfs_list_destroy(procfs_list_t *procfs_list)
+{
+ ASSERT(list_is_empty(&procfs_list->pl_list));
+ list_destroy(&procfs_list->pl_list);
+ mutex_destroy(&procfs_list->pl_lock);
+}
+
+#define NODE_ID(procfs_list, obj) \
+ (((procfs_list_node_t *)(((char *)obj) + \
+ (procfs_list)->pl_node_offset))->pln_id)
+
+void
+procfs_list_add(procfs_list_t *procfs_list, void *p)
+{
+ ASSERT(MUTEX_HELD(&procfs_list->pl_lock));
+ NODE_ID(procfs_list, p) = procfs_list->pl_next_id++;
+ list_insert_tail(&procfs_list->pl_list, p);
}
/*
int
vn_open(char *path, int x1, int flags, int mode, vnode_t **vpp, int x2, int x3)
{
- int fd;
+ int fd = -1;
+ int dump_fd = -1;
vnode_t *vp;
int old_umask = 0;
char *realpath;
#ifdef __linux__
flags |= O_DIRECT;
#endif
- /* We shouldn't be writing to block devices in userspace */
- VERIFY(!(flags & FWRITE));
}
if (flags & FCREAT)
* FREAD and FWRITE to the corresponding O_RDONLY, O_WRONLY, and O_RDWR.
*/
fd = open64(realpath, flags - FREAD, mode);
- free(realpath);
+ if (fd == -1) {
+ err = errno;
+ free(realpath);
+ return (err);
+ }
if (flags & FCREAT)
(void) umask(old_umask);
- if (fd == -1)
- return (errno);
+ if (vn_dumpdir != NULL) {
+ char *dumppath = umem_zalloc(MAXPATHLEN, UMEM_NOFAIL);
+ (void) snprintf(dumppath, MAXPATHLEN,
+ "%s/%s", vn_dumpdir, basename(realpath));
+ dump_fd = open64(dumppath, O_CREAT | O_WRONLY, 0666);
+ umem_free(dumppath, MAXPATHLEN);
+ if (dump_fd == -1) {
+ err = errno;
+ free(realpath);
+ close(fd);
+ return (err);
+ }
+ } else {
+ dump_fd = -1;
+ }
+
+ free(realpath);
if (fstat64_blk(fd, &st) == -1) {
err = errno;
close(fd);
+ if (dump_fd != -1)
+ close(dump_fd);
return (err);
}
vp->v_fd = fd;
vp->v_size = st.st_size;
vp->v_path = spa_strdup(path);
+ vp->v_dump_fd = dump_fd;
return (0);
}
/*ARGSUSED*/
int
vn_rdwr(int uio, vnode_t *vp, void *addr, ssize_t len, offset_t offset,
- int x1, int x2, rlim64_t x3, void *x4, ssize_t *residp)
+ int x1, int x2, rlim64_t x3, void *x4, ssize_t *residp)
{
ssize_t rc, done = 0, split;
if (uio == UIO_READ) {
rc = pread64(vp->v_fd, addr, len, offset);
+ if (vp->v_dump_fd != -1 && rc != -1) {
+ int status;
+ status = pwrite64(vp->v_dump_fd, addr, rc, offset);
+ ASSERT(status != -1);
+ }
} else {
/*
* To simulate partial disk writes, we split writes into two
* (memory or disk) due to O_DIRECT, so we abort() in order to
* catch the offender.
*/
- abort();
+ abort();
}
#endif
if (rc == -1)
vn_close(vnode_t *vp)
{
close(vp->v_fd);
+ if (vp->v_dump_fd != -1)
+ close(vp->v_dump_fd);
spa_strfree(vp->v_path);
umem_free(vp, sizeof (vnode_t));
}
*/
if (dprintf_find_string("on"))
dprintf_print_all = 1;
+
+ if (dprintf_string != NULL)
+ zfs_flags |= ZFS_DEBUG_DPRINTF;
}
/*
if (dprintf_find_string("pid"))
(void) printf("%d ", getpid());
if (dprintf_find_string("tid"))
- (void) printf("%u ", (uint_t) pthread_self());
+ (void) printf("%u ", (uint_t)pthread_self());
if (dprintf_find_string("cpu"))
(void) printf("%u ", getcpuid());
if (dprintf_find_string("time"))
int
kobj_read_file(struct _buf *file, char *buf, unsigned size, unsigned off)
{
- ssize_t resid;
+ ssize_t resid = 0;
- vn_rdwr(UIO_READ, (vnode_t *)file->_fd, buf, size, (offset_t)off,
- UIO_SYSSPACE, 0, 0, 0, &resid);
+ if (vn_rdwr(UIO_READ, (vnode_t *)file->_fd, buf, size, (offset_t)off,
+ UIO_SYSSPACE, 0, 0, 0, &resid) != 0)
+ return (-1);
return (size - resid);
}
void
delay(clock_t ticks)
{
- poll(0, 0, ticks * (1000 / hz));
+ (void) poll(0, 0, ticks * (1000 / hz));
}
/*
* Find highest one bit set.
- * Returns bit number + 1 of highest bit that is set, otherwise returns 0.
- * High order bit is 31 (or 63 in _LP64 kernel).
+ * Returns bit number + 1 of highest bit that is set, otherwise returns 0.
+ * The __builtin_clzll() function is supported by both GCC and Clang.
*/
int
-highbit(ulong_t i)
+highbit64(uint64_t i)
{
- register int h = 1;
+ if (i == 0)
+ return (0);
+
+ return (NBBY * sizeof (uint64_t) - __builtin_clzll(i));
+}
+/*
+ * Find lowest one bit set.
+ * Returns bit number + 1 of lowest bit that is set, otherwise returns 0.
+ * The __builtin_ffsll() function is supported by both GCC and Clang.
+ */
+int
+lowbit64(uint64_t i)
+{
if (i == 0)
return (0);
-#ifdef _LP64
- if (i & 0xffffffff00000000ul) {
- h += 32; i >>= 32;
- }
-#endif
- if (i & 0xffff0000) {
- h += 16; i >>= 16;
- }
- if (i & 0xff00) {
- h += 8; i >>= 8;
- }
- if (i & 0xf0) {
- h += 4; i >>= 4;
- }
- if (i & 0xc) {
- h += 2; i >>= 2;
- }
- if (i & 0x2) {
- h += 1;
- }
- return (h);
+
+ return (__builtin_ffsll(i));
}
+char *random_path = "/dev/random";
+char *urandom_path = "/dev/urandom";
static int random_fd = -1, urandom_fd = -1;
+void
+random_init(void)
+{
+ VERIFY((random_fd = open(random_path, O_RDONLY)) != -1);
+ VERIFY((urandom_fd = open(urandom_path, O_RDONLY)) != -1);
+}
+
+void
+random_fini(void)
+{
+ close(random_fd);
+ close(urandom_fd);
+
+ random_fd = -1;
+ urandom_fd = -1;
+}
+
static int
random_get_bytes_common(uint8_t *ptr, size_t len, int fd)
{
return (0);
}
+utsname_t *
+utsname(void)
+{
+ return (&hw_utsname);
+}
+
/*
* =========================================================================
* kernel emulation setup & teardown
void
kernel_init(int mode)
{
+ extern uint_t rrw_tsd_key;
+
umem_nofail_callback(umem_out_of_memory);
physmem = sysconf(_SC_PHYS_PAGES);
(double)physmem * sysconf(_SC_PAGE_SIZE) / (1ULL << 30));
(void) snprintf(hw_serial, sizeof (hw_serial), "%ld",
- (mode & FWRITE) ? gethostid() : 0);
+ (mode & FWRITE) ? get_system_hostid() : 0);
+
+ random_init();
- VERIFY((random_fd = open("/dev/random", O_RDONLY)) != -1);
- VERIFY((urandom_fd = open("/dev/urandom", O_RDONLY)) != -1);
+ VERIFY0(uname(&hw_utsname));
- thread_init();
system_taskq_init();
+ icp_init();
spa_init(mode);
+
+ fletcher_4_init();
+
+ tsd_create(&rrw_tsd_key, rrw_tsd_destroy);
}
void
kernel_fini(void)
{
+ fletcher_4_fini();
spa_fini();
+ icp_fini();
system_taskq_fini();
- thread_fini();
- close(random_fd);
- close(urandom_fd);
-
- random_fd = -1;
- urandom_fd = -1;
+ random_fini();
}
uid_t
return (0);
}
+int
+secpolicy_zfs(const cred_t *cr)
+{
+ return (0);
+}
+
ksiddomain_t *
ksid_lookupdomain(const char *dom)
{
{
return (0);
}
+
+fstrans_cookie_t
+spl_fstrans_mark(void)
+{
+ return ((fstrans_cookie_t)0);
+}
+
+void
+spl_fstrans_unmark(fstrans_cookie_t cookie)
+{
+}
+
+int
+__spl_pf_fstrans_check(void)
+{
+ return (0);
+}
+
+void *zvol_tag = "zvol_tag";
+
+void
+zvol_create_minors(spa_t *spa, const char *name, boolean_t async)
+{
+}
+
+void
+zvol_remove_minor(spa_t *spa, const char *name, boolean_t async)
+{
+}
+
+void
+zvol_remove_minors(spa_t *spa, const char *name, boolean_t async)
+{
+}
+
+void
+zvol_rename_minors(spa_t *spa, const char *oldname, const char *newname,
+ boolean_t async)
+{
+}