+DEFAULT_INCLUDES = -I. -I.. -I../lib
AM_CFLAGS = -g -O2 -W -Wall -Wstrict-prototypes -Wshadow
-INCLUDES = -I$(top_srcdir)/include
+
sbin_PROGRAMS = splat
splat_SOURCES = splat.c
splat_LDFLAGS = $(top_builddir)/lib/libcommon.la
+
EXTRA_DIST = splat.h
#define _SPLAT_H
#include "list.h"
-#include "splat-ctl.h"
+#include "../include/splat-ctl.h"
#define DEV_NAME "/dev/splatctl"
#define COLOR_BLACK "\033[0;30m"
AC_CANONICAL_SYSTEM
AM_INIT_AUTOMAKE(spl, 0.0.1)
-AC_CONFIG_HEADERS([include/config.h])
+AC_CONFIG_HEADERS([config.h])
AC_PROG_INSTALL
AC_PROG_CC
modules/spl/Makefile
modules/splat/Makefile
include/Makefile
+ include/sys/Makefile
scripts/Makefile
scripts/spl.spec
])
-EXTRA_DIST = spl.h
-EXTRA_DIST += spl-condvar.h spl-kmem.h spl-random.h spl-thread.h
-EXTRA_DIST += spl-types.h spl-cred.h spl-kstat.h spl-rwlock.h
-EXTRA_DIST += spl-time.h spl-callb.h spl-generic.h spl-mutex.h
-EXTRA_DIST += spl-taskq.h spl-timer.h
-EXTRA_DIST += splat-ctl.h
-EXTRA_DIST += list.h
+SUBDIRS = sys
+
+EXTRA_DIST = splat-ctl.h
+++ /dev/null
-/*****************************************************************************
- * $Id: list.h 2899 2002-12-11 19:00:36Z dun $
- *****************************************************************************
- * Copyright (C) 2001-2002 The Regents of the University of California.
- * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- * Written by Chris Dunlap <cdunlap@llnl.gov>.
- *
- * This file is from LSD-Tools, the LLNL Software Development Toolbox.
- *
- * LSD-Tools is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * LSD-Tools is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with LSD-Tools; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *****************************************************************************/
-
-
-#ifndef LSD_LIST_H
-#define LSD_LIST_H
-
-
-/***********
- * Notes *
- ***********/
-/*
- * If NDEBUG is not defined, internal debug code will be enabled. This is
- * intended for development use only and production code should define NDEBUG.
- *
- * If WITH_LSD_FATAL_ERROR_FUNC is defined, the linker will expect to
- * find an external lsd_fatal_error(file,line,mesg) function. By default,
- * lsd_fatal_error(file,line,mesg) is a macro definition that outputs an
- * error message to stderr. This macro may be redefined to invoke another
- * routine instead.
- *
- * If WITH_LSD_NOMEM_ERROR_FUNC is defined, the linker will expect to
- * find an external lsd_nomem_error(file,line,mesg) function. By default,
- * lsd_nomem_error(file,line,mesg) is a macro definition that returns NULL.
- * This macro may be redefined to invoke another routine instead.
- *
- * If WITH_PTHREADS is defined, these routines will be thread-safe.
- */
-
-
-/****************
- * Data Types *
- ****************/
-
-typedef struct list * List;
-/*
- * List opaque data type.
- */
-
-typedef struct listIterator * ListIterator;
-/*
- * List Iterator opaque data type.
- */
-
-typedef void (*ListDelF) (void *x);
-/*
- * Function prototype to deallocate data stored in a list.
- * This function is responsible for freeing all memory associated
- * with an item, including all subordinate items (if applicable).
- */
-
-typedef int (*ListCmpF) (void *x, void *y);
-/*
- * Function prototype for comparing two items in a list.
- * Returns less-than-zero if (x<y), zero if (x==y), and
- * greather-than-zero if (x>y).
- */
-
-typedef int (*ListFindF) (void *x, void *key);
-/*
- * Function prototype for matching items in a list.
- * Returns non-zero if (x==key); o/w returns zero.
- */
-
-typedef int (*ListForF) (void *x, void *arg);
-/*
- * Function prototype for operating on each item in a list.
- * Returns less-than-zero on error.
- */
-
-
-/*******************************
- * General-Purpose Functions *
- *******************************/
-
-List list_create (ListDelF f);
-/*
- * Creates and returns a new empty list, or lsd_nomem_error() on failure.
- * The deletion function [f] is used to deallocate memory used by items
- * in the list; if this is NULL, memory associated with these items
- * will not be freed when the list is destroyed.
- * Note: Abandoning a list without calling list_destroy() will result
- * in a memory leak.
- */
-
-void list_destroy (List l);
-/*
- * Destroys list [l], freeing memory used for list iterators and the
- * list itself; if a deletion function was specified when the list
- * was created, it will be called for each item in the list.
- */
-
-int list_is_empty (List l);
-/*
- * Returns non-zero if list [l] is empty; o/w returns zero.
- */
-
-int list_count (List l);
-/*
- * Returns the number of items in list [l].
- */
-
-
-/***************************
- * List Access Functions *
- ***************************/
-
-void * list_append (List l, void *x);
-/*
- * Inserts data [x] at the end of list [l].
- * Returns the data's ptr, or lsd_nomem_error() if insertion failed.
- */
-
-void * list_prepend (List l, void *x);
-/*
- * Inserts data [x] at the beginning of list [l].
- * Returns the data's ptr, or lsd_nomem_error() if insertion failed.
- */
-
-void * list_find_first (List l, ListFindF f, void *key);
-/*
- * Traverses list [l] using [f] to match each item with [key].
- * Returns a ptr to the first item for which the function [f]
- * returns non-zero, or NULL if no such item is found.
- * Note: This function differs from list_find() in that it does not require
- * a list iterator; it should only be used when all list items are known
- * to be unique (according to the function [f]).
- */
-
-int list_delete_all (List l, ListFindF f, void *key);
-/*
- * Traverses list [l] using [f] to match each item with [key].
- * Removes all items from the list for which the function [f] returns
- * non-zero; if a deletion function was specified when the list was
- * created, it will be called to deallocate each item being removed.
- * Returns a count of the number of items removed from the list.
- */
-
-int list_for_each (List l, ListForF f, void *arg);
-/*
- * For each item in list [l], invokes the function [f] with [arg].
- * Returns a count of the number of items on which [f] was invoked.
- * If [f] returns <0 for a given item, the iteration is aborted and the
- * function returns the negative of that item's position in the list.
- */
-
-void list_sort (List l, ListCmpF f);
-/*
- * Sorts list [l] into ascending order according to the function [f].
- * Note: Sorting a list resets all iterators associated with the list.
- * Note: The sort algorithm is stable.
- */
-
-
-/****************************
- * Stack Access Functions *
- ****************************/
-
-void * list_push (List l, void *x);
-/*
- * Pushes data [x] onto the top of stack [l].
- * Returns the data's ptr, or lsd_nomem_error() if insertion failed.
- */
-
-void * list_pop (List l);
-/*
- * Pops the data item at the top of the stack [l].
- * Returns the data's ptr, or NULL if the stack is empty.
- */
-
-void * list_peek (List l);
-/*
- * Peeks at the data item at the top of the stack (or head of the queue) [l].
- * Returns the data's ptr, or NULL if the stack (or queue) is empty.
- * Note: The item is not removed from the list.
- */
-
-
-/****************************
- * Queue Access Functions *
- ****************************/
-
-void * list_enqueue (List l, void *x);
-/*
- * Enqueues data [x] at the tail of queue [l].
- * Returns the data's ptr, or lsd_nomem_error() if insertion failed.
- */
-
-void * list_dequeue (List l);
-/*
- * Dequeues the data item at the head of the queue [l].
- * Returns the data's ptr, or NULL if the queue is empty.
- */
-
-
-/*****************************
- * List Iterator Functions *
- *****************************/
-
-ListIterator list_iterator_create (List l);
-/*
- * Creates and returns a list iterator for non-destructively traversing
- * list [l], or lsd_nomem_error() on failure.
- */
-
-void list_iterator_reset (ListIterator i);
-/*
- * Resets the list iterator [i] to start traversal at the beginning
- * of the list.
- */
-
-void list_iterator_destroy (ListIterator i);
-/*
- * Destroys the list iterator [i]; list iterators not explicitly destroyed
- * in this manner will be destroyed when the list is deallocated via
- * list_destroy().
- */
-
-void * list_next (ListIterator i);
-/*
- * Returns a ptr to the next item's data,
- * or NULL once the end of the list is reached.
- * Example: i=list_iterator_create(i); while ((x=list_next(i))) {...}
- */
-
-void * list_insert (ListIterator i, void *x);
-/*
- * Inserts data [x] immediately before the last item returned via list
- * iterator [i]; once the list iterator reaches the end of the list,
- * insertion is made at the list's end.
- * Returns the data's ptr, or lsd_nomem_error() if insertion failed.
- */
-
-void * list_find (ListIterator i, ListFindF f, void *key);
-/*
- * Traverses the list from the point of the list iterator [i]
- * using [f] to match each item with [key].
- * Returns a ptr to the next item for which the function [f]
- * returns non-zero, or NULL once the end of the list is reached.
- * Example: i=list_iterator_reset(i); while ((x=list_find(i,f,k))) {...}
- */
-
-void * list_remove (ListIterator i);
-/*
- * Removes from the list the last item returned via list iterator [i]
- * and returns the data's ptr.
- * Note: The client is responsible for freeing the returned data.
- */
-
-int list_delete (ListIterator i);
-/*
- * Removes from the list the last item returned via list iterator [i];
- * if a deletion function was specified when the list was created,
- * it will be called to deallocate the item being removed.
- * Returns a count of the number of items removed from the list
- * (ie, '1' if the item was removed, and '0' otherwise).
- */
-
-
-#endif /* !LSD_LIST_H */
+++ /dev/null
-#ifndef _SPL_CALLB_H
-#define _SPL_CALLB_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <linux/module.h>
-#include "spl-mutex.h"
-
-#define DEBUG_CALLB
-
-#ifndef DEBUG_CALLB
-#define CALLB_CPR_ASSERT(cp) BUG_ON(!(MUTEX_HELD((cp)->cc_lockp)));
-#else
-#define CALLB_CPR_ASSERT(cp)
-#endif
-
-
-typedef struct callb_cpr {
- kmutex_t *cc_lockp;
-} callb_cpr_t;
-
-#define CALLB_CPR_INIT(cp, lockp, func, name) { \
- (cp)->cc_lockp = lockp; \
-}
-
-#define CALLB_CPR_SAFE_BEGIN(cp) { \
- CALLB_CPR_ASSERT(cp); \
-}
-
-#define CALLB_CPR_SAFE_END(cp, lockp) { \
- CALLB_CPR_ASSERT(cp); \
-}
-
-#define CALLB_CPR_EXIT(cp) { \
- ASSERT(MUTEX_HELD((cp)->cc_lockp)); \
- mutex_exit((cp)->cc_lockp); \
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _SPL_CALLB_H */
-
+++ /dev/null
-#ifndef _SPL_CONDVAR_H
-#define _SPL_CONDVAR_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <linux/module.h>
-#include <linux/wait.h>
-
-/* The kcondvar_t struct is protected by mutex taken externally before
- * calling any of the wait/signal funs, and passed into the wait funs.
- */
-#define CV_MAGIC 0x346545f4
-#define CV_POISON 0x95
-
-typedef struct {
- int cv_magic;
- char *cv_name;
- wait_queue_head_t cv_event;
- atomic_t cv_waiters;
- kmutex_t *cv_mutex; /* only for verification purposes */
-} kcondvar_t;
-
-typedef enum { CV_DEFAULT=0, CV_DRIVER } kcv_type_t;
-
-static __inline__ void
-cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
-{
- BUG_ON(cvp == NULL);
- BUG_ON(type != CV_DEFAULT);
- BUG_ON(arg != NULL);
-
- cvp->cv_magic = CV_MAGIC;
- init_waitqueue_head(&cvp->cv_event);
- atomic_set(&cvp->cv_waiters, 0);
- cvp->cv_mutex = NULL;
- cvp->cv_name = NULL;
-
- if (name) {
- cvp->cv_name = kmalloc(strlen(name) + 1, GFP_KERNEL);
- if (cvp->cv_name)
- strcpy(cvp->cv_name, name);
- }
-}
-
-static __inline__ void
-cv_destroy(kcondvar_t *cvp)
-{
- BUG_ON(cvp == NULL);
- BUG_ON(cvp->cv_magic != CV_MAGIC);
- BUG_ON(atomic_read(&cvp->cv_waiters) != 0);
- BUG_ON(waitqueue_active(&cvp->cv_event));
-
- if (cvp->cv_name)
- kfree(cvp->cv_name);
-
- memset(cvp, CV_POISON, sizeof(*cvp));
-}
-
-static __inline__ void
-cv_wait(kcondvar_t *cvp, kmutex_t *mtx)
-{
- DEFINE_WAIT(wait);
- int flag = 1;
-
- BUG_ON(cvp == NULL || mtx == NULL);
- BUG_ON(cvp->cv_magic != CV_MAGIC);
- BUG_ON(!mutex_owned(mtx));
-
- if (cvp->cv_mutex == NULL)
- cvp->cv_mutex = mtx;
-
- /* Ensure the same mutex is used by all callers */
- BUG_ON(cvp->cv_mutex != mtx);
-
- for (;;) {
- prepare_to_wait_exclusive(&cvp->cv_event, &wait,
- TASK_INTERRUPTIBLE);
- /* Must occur after we are added to the list but only once */
- if (flag) {
- atomic_inc(&cvp->cv_waiters);
- flag = 0;
- }
-
- /* XXX - The correct thing to do here may be to wake up and
- * force the caller to handle the signal. Spurious wakeups
- * should already be safely handled by the caller. */
- if (signal_pending(current))
- flush_signals(current);
-
- /* Mutex should be dropped after prepare_to_wait() this
- * ensures we're linked in to the waiters list and avoids the
- * race where 'cvp->cv_waiters > 0' but the list is empty. */
- mutex_exit(mtx);
- schedule();
- mutex_enter(mtx);
-
- /* XXX - The correct thing to do here may be to wake up and
- * force the caller to handle the signal. Spurious wakeups
- * should already be safely handled by the caller. */
- if (signal_pending(current))
- continue;
-
- break;
- }
-
- atomic_dec(&cvp->cv_waiters);
- finish_wait(&cvp->cv_event, &wait);
-}
-
-/* 'expire_time' argument is an absolute wall clock time in jiffies.
- * Return value is time left (expire_time - now) or -1 if timeout occurred.
- */
-static __inline__ clock_t
-cv_timedwait(kcondvar_t *cvp, kmutex_t *mtx, clock_t expire_time)
-{
- DEFINE_WAIT(wait);
- clock_t time_left;
- int flag = 1;
-
- BUG_ON(cvp == NULL || mtx == NULL);
- BUG_ON(cvp->cv_magic != CV_MAGIC);
- BUG_ON(!mutex_owned(mtx));
-
- if (cvp->cv_mutex == NULL)
- cvp->cv_mutex = mtx;
-
- /* XXX - Does not handle jiffie wrap properly */
- time_left = expire_time - jiffies;
- if (time_left <= 0)
- return -1;
-
- /* Ensure the same mutex is used by all callers */
- BUG_ON(cvp->cv_mutex != mtx);
-
- for (;;) {
- prepare_to_wait_exclusive(&cvp->cv_event, &wait,
- TASK_INTERRUPTIBLE);
- if (flag) {
- atomic_inc(&cvp->cv_waiters);
- flag = 0;
- }
-
- /* XXX - The correct thing to do here may be to wake up and
- * force the caller to handle the signal. Spurious wakeups
- * should already be safely handled by the caller. */
- if (signal_pending(current))
- flush_signals(current);
-
- /* Mutex should be dropped after prepare_to_wait() this
- * ensures we're linked in to the waiters list and avoids the
- * race where 'cvp->cv_waiters > 0' but the list is empty. */
- mutex_exit(mtx);
- time_left = schedule_timeout(time_left);
- mutex_enter(mtx);
-
- /* XXX - The correct thing to do here may be to wake up and
- * force the caller to handle the signal. Spurious wakeups
- * should already be safely handled by the caller. */
- if (signal_pending(current)) {
- if (time_left > 0)
- continue;
-
- flush_signals(current);
- }
-
- break;
- }
-
- atomic_dec(&cvp->cv_waiters);
- finish_wait(&cvp->cv_event, &wait);
-
- return (time_left > 0 ? time_left : -1);
-}
-
-static __inline__ void
-cv_signal(kcondvar_t *cvp)
-{
- BUG_ON(cvp == NULL);
- BUG_ON(cvp->cv_magic != CV_MAGIC);
-
- /* All waiters are added with WQ_FLAG_EXCLUSIVE so only one
- * waiter will be set runable with each call to wake_up().
- * Additionally wake_up() holds a spin_lock assoicated with
- * the wait queue to ensure we don't race waking up processes. */
- if (atomic_read(&cvp->cv_waiters) > 0)
- wake_up(&cvp->cv_event);
-}
-
-static __inline__ void
-cv_broadcast(kcondvar_t *cvp)
-{
- BUG_ON(cvp == NULL);
- BUG_ON(cvp->cv_magic != CV_MAGIC);
-
- /* Wake_up_all() will wake up all waiters even those which
- * have the WQ_FLAG_EXCLUSIVE flag set. */
- if (atomic_read(&cvp->cv_waiters) > 0)
- wake_up_all(&cvp->cv_event);
-}
-#endif /* _SPL_CONDVAR_H */
+++ /dev/null
-#ifndef _SPL_CRED_H
-#define _SPL_CRED_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <linux/module.h>
-#include <linux/types.h>
-
-/* XXX - Portions commented out because we really just want to have the type
- * defined and the contents aren't nearly so important at the moment. */
-typedef struct cred {
- uint_t cr_ref; /* reference count */
- uid_t cr_uid; /* effective user id */
- gid_t cr_gid; /* effective group id */
- uid_t cr_ruid; /* real user id */
- gid_t cr_rgid; /* real group id */
- uid_t cr_suid; /* "saved" user id (from exec) */
- gid_t cr_sgid; /* "saved" group id (from exec) */
- uint_t cr_ngroups; /* number of groups returned by */
- /* crgroups() */
-#if 0
- cred_priv_t cr_priv; /* privileges */
- projid_t cr_projid; /* project */
- struct zone *cr_zone; /* pointer to per-zone structure */
- struct ts_label_s *cr_label; /* pointer to the effective label */
- credsid_t *cr_ksid; /* pointer to SIDs */
-#endif
- gid_t cr_groups[1]; /* cr_groups size not fixed */
- /* audit info is defined dynamically */
- /* and valid only when audit enabled */
- /* auditinfo_addr_t cr_auinfo; audit info */
-} cred_t;
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _SPL_CRED_H */
-
+++ /dev/null
-#ifndef _SPL_GENERIC_H
-#define _SPL_GENERIC_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <linux/module.h>
-
-/* Missing defines.
- */
-#define INT32_MAX INT_MAX
-#define UINT64_MAX (~0ULL)
-#define NBBY 8
-#define ENOTSUP ENOTSUPP
-#define MAXNAMELEN 256
-#define MAXPATHLEN PATH_MAX
-#define __va_list va_list
-#define _KERNEL __KERNEL__
-#define max_ncpus 64
-
-/* 0..MAX_PRIO-1: Process priority
- * 0..MAX_RT_PRIO-1: RT priority tasks
- * MAX_RT_PRIO..MAX_PRIO-1: SCHED_NORMAL tasks
- *
- * Treat shim tasks as SCHED_NORMAL tasks
- */
-#define minclsyspri (MAX_RT_PRIO)
-#define maxclsyspri (MAX_PRIO-1)
-
-#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
-#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
-
-#define kred NULL
-
-#define FREAD 1
-#define FWRITE 2
-#define FCREAT O_CREAT
-#define FTRUNC O_TRUNC
-#define FOFFMAX O_LARGEFILE
-#define FSYNC O_SYNC
-#define FDSYNC O_DSYNC
-#define FRSYNC O_RSYNC
-#define FEXCL O_EXCL
-
-#define FNODSYNC 0x10000 /* fsync pseudo flag */
-#define FNOFOLLOW 0x20000 /* don't follow symlinks */
-
-/* Missing macros
- */
-#define PAGESIZE PAGE_SIZE
-
-/* from Solaris sys/byteorder.h */
-#define BSWAP_8(x) ((x) & 0xff)
-#define BSWAP_16(x) ((BSWAP_8(x) << 8) | BSWAP_8((x) >> 8))
-#define BSWAP_32(x) ((BSWAP_16(x) << 16) | BSWAP_16((x) >> 16))
-#define BSWAP_64(x) ((BSWAP_32(x) << 32) | BSWAP_32((x) >> 32))
-
-/* Map some simple functions.
- */
-#define bzero(ptr,size) memset(ptr,0,size)
-#define bcopy(src,dest,size) memcpy(dest,src,size)
-#define ASSERT(x) BUG_ON(!(x))
-#define ASSERT3U(left,OP,right) BUG_ON(!((left) OP (right)))
-
-/* Missing globals
- */
-extern int p0;
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _SPL_GENERIC_H */
+++ /dev/null
-#ifndef _SPL_KMEM_H
-#define _SPL_KMEM_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#undef DEBUG_KMEM
-#undef DEBUG_KMEM_UNIMPLEMENTED
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/spinlock.h>
-/*
- * Memory allocation interfaces
- */
-#define KM_SLEEP GFP_KERNEL
-#define KM_NOSLEEP GFP_ATOMIC
-#undef KM_PANIC /* No linux analog */
-#define KM_PUSHPAGE (GFP_KERNEL | GFP_HIGH)
-#define KM_VMFLAGS GFP_LEVEL_MASK
-#define KM_FLAGS __GFP_BITS_MASK
-
-#ifdef DEBUG_KMEM
-/* Shim layer memory accounting */
-extern atomic_t kmem_alloc_used;
-extern unsigned int kmem_alloc_max;
-#endif
-
-#ifdef DEBUG_KMEM
-#define __kmem_alloc(size, flags, allocator) \
-({ void *_ptr_; \
- \
- /* Marked unlikely because we should never be doing this */ \
- if (unlikely((size) > (PAGE_SIZE * 2))) \
- printk("Warning: kmem_alloc(%d, 0x%x) large alloc at %s:%d " \
- "(%d/%d)\n", (int)(size), (int)(flags), \
- __FILE__, __LINE__, \
- atomic_read(&kmem_alloc_used), kmem_alloc_max); \
- \
- _ptr_ = (void *)allocator((size), (flags)); \
- if (_ptr_ == NULL) { \
- printk("Warning: kmem_alloc(%d, 0x%x) failed at %s:%d " \
- "(%d/%d)\n", (int)(size), (int)(flags), \
- __FILE__, __LINE__, \
- atomic_read(&kmem_alloc_used), kmem_alloc_max); \
- atomic_add((size), &kmem_alloc_used); \
- if (unlikely(atomic_read(&kmem_alloc_used) > kmem_alloc_max)) \
- kmem_alloc_max = atomic_read(&kmem_alloc_used); \
- } \
- \
- _ptr_; \
-})
-
-#define kmem_alloc(size, flags) __kmem_alloc(size, flags, kmalloc)
-#define kmem_zalloc(size, flags) __kmem_alloc(size, flags, kzalloc)
-
-#define kmem_free(ptr, size) \
-({ \
- BUG_ON(!ptr); \
- atomic_sub((size), &kmem_alloc_used); \
- memset(ptr, 0x5a, (size)); /* Poison */ \
- kfree(ptr); \
- (ptr) = (void *)0xdeadbeef; \
-})
-
-
-#else
-
-#define kmem_alloc(size, flags) kmalloc(size, flags)
-#define kmem_zalloc(size, flags) kzalloc(size, flags)
-#define kmem_free(ptr, size) kfree(ptr)
-
-#endif /* DEBUG_KMEM */
-
-
-#ifdef DEBUG_KMEM_UNIMPLEMENTED
-static __inline__ void *
-kmem_alloc_tryhard(size_t size, size_t *alloc_size, int kmflags)
-{
-#error "kmem_alloc_tryhard() not implemented"
-}
-#endif /* DEBUG_KMEM_UNIMPLEMENTED */
-
-/*
- * Slab allocation interfaces
- */
-#undef KMC_NOTOUCH /* No linux analog */
-#define KMC_NODEBUG 0x00000000 /* Default beahvior */
-#define KMC_NOMAGAZINE /* No linux analog */
-#define KMC_NOHASH /* No linux analog */
-#define KMC_QCACHE /* No linux analog */
-
-#define KMC_REAP_CHUNK 256
-#define KMC_DEFAULT_SEEKS DEFAULT_SEEKS
-
-/* Defined by linux slab.h
- * typedef struct kmem_cache_s kmem_cache_t;
- */
-
-/* No linux analog
- * extern int kmem_ready;
- * extern pgcnt_t kmem_reapahead;
- */
-
-#ifdef DEBUG_KMEM_UNIMPLEMENTED
-static __inline__ void kmem_init(void) {
-#error "kmem_init() not implemented"
-}
-
-static __inline__ void kmem_thread_init(void) {
-#error "kmem_thread_init() not implemented"
-}
-
-static __inline__ void kmem_mp_init(void) {
-#error "kmem_mp_init() not implemented"
-}
-
-static __inline__ void kmem_reap_idspace(void) {
-#error "kmem_reap_idspace() not implemented"
-}
-
-static __inline__ size_t kmem_avail(void) {
-#error "kmem_avail() not implemented"
-}
-
-static __inline__ size_t kmem_maxavail(void) {
-#error "kmem_maxavail() not implemented"
-}
-
-static __inline__ uint64_t kmem_cache_stat(kmem_cache_t *cache) {
-#error "kmem_cache_stat() not implemented"
-}
-#endif /* DEBUG_KMEM_UNIMPLEMENTED */
-
-/* XXX - Used by arc.c to adjust its memory footprint. We may want
- * to use this hook in the future to adjust behavior based on
- * debug levels. For now it's safe to always return 0.
- */
-static __inline__ int
-kmem_debugging(void)
-{
- return 0;
-}
-
-typedef int (*kmem_constructor_t)(void *, void *, int);
-typedef void (*kmem_destructor_t)(void *, void *);
-typedef void (*kmem_reclaim_t)(void *);
-
-extern kmem_cache_t *
-__kmem_cache_create(char *name, size_t size, size_t align,
- kmem_constructor_t constructor,
- kmem_destructor_t destructor,
- kmem_reclaim_t reclaim,
- void *priv, void *vmp, int flags);
-
-void
-extern __kmem_cache_destroy(kmem_cache_t *cache);
-
-void
-extern __kmem_reap(void);
-
-#define kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags) \
- __kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags)
-#define kmem_cache_destroy(cache) __kmem_cache_destroy(cache)
-#define kmem_cache_alloc(cache, flags) kmem_cache_alloc(cache, flags)
-#define kmem_cache_free(cache, ptr) kmem_cache_free(cache, ptr)
-#define kmem_cache_reap_now(cache) kmem_cache_shrink(cache)
-#define kmem_reap() __kmem_reap()
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _SPL_KMEM_H */
+++ /dev/null
-#ifndef _SPL_KSTAT_H
-#define _SPL_KSTAT_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <linux/module.h>
-#include "spl-types.h"
-#include "spl-time.h"
-
-/* XXX - The minimum functionality here is stubbed out but nothing works. */
-
-#define KSTAT_STRLEN 31 /* 30 chars + NULL; must be 16 * n - 1 */
-
-#define KSTAT_TYPE_RAW 0 /* can be anything */
- /* ks_ndata >= 1 */
-#define KSTAT_TYPE_NAMED 1 /* name/value pair */
- /* ks_ndata >= 1 */
-#define KSTAT_TYPE_INTR 2 /* interrupt statistics */
- /* ks_ndata == 1 */
-#define KSTAT_TYPE_IO 3 /* I/O statistics */
- /* ks_ndata == 1 */
-#define KSTAT_TYPE_TIMER 4 /* event timer */
- /* ks_ndata >= 1 */
-
-#define KSTAT_NUM_TYPES 5
-
-
-#define KSTAT_DATA_CHAR 0
-#define KSTAT_DATA_INT32 1
-#define KSTAT_DATA_UINT32 2
-#define KSTAT_DATA_INT64 3
-#define KSTAT_DATA_UINT64 4
-
-
-#define KSTAT_FLAG_VIRTUAL 0x01
-#define KSTAT_FLAG_VAR_SIZE 0x02
-#define KSTAT_FLAG_WRITABLE 0x04
-#define KSTAT_FLAG_PERSISTENT 0x08
-#define KSTAT_FLAG_DORMANT 0x10
-#define KSTAT_FLAG_INVALID 0x2
-
-
-typedef int kid_t; /* unique kstat id */
-
-typedef struct kstat_s {
- /*
- * Fields relevant to both kernel and user
- */
- hrtime_t ks_crtime; /* creation time (from gethrtime()) */
- struct kstat_s *ks_next; /* kstat chain linkage */
- kid_t ks_kid; /* unique kstat ID */
- char ks_module[KSTAT_STRLEN]; /* provider module name */
- uchar_t ks_resv; /* reserved, currently just padding */
- int ks_instance; /* provider module's instance */
- char ks_name[KSTAT_STRLEN]; /* kstat name */
- uchar_t ks_type; /* kstat data type */
- char ks_class[KSTAT_STRLEN]; /* kstat class */
- uchar_t ks_flags; /* kstat flags */
- void *ks_data; /* kstat type-specific data */
- uint_t ks_ndata; /* # of type-specific data records */
- size_t ks_data_size; /* total size of kstat data section */
- hrtime_t ks_snaptime; /* time of last data shapshot */
- /*
- * Fields relevant to kernel only
- */
- int (*ks_update)(struct kstat *, int); /* dynamic update */
- void *ks_private; /* arbitrary provider-private data */
- int (*ks_snapshot)(struct kstat *, void *, int);
- void *ks_lock; /* protects this kstat's data */
-} kstat_t;
-
-typedef struct kstat_named_s {
- char name[KSTAT_STRLEN]; /* name of counter */
- uchar_t data_type; /* data type */
- union {
- char c[16]; /* enough for 128-bit ints */
- int32_t i32;
- uint32_t ui32;
- struct {
- union {
- char *ptr; /* NULL-term string */
- char __pad[8]; /* 64-bit padding */
- } addr;
- uint32_t len; /* # bytes for strlen + '\0' */
- } str;
-/*
- * The int64_t and uint64_t types are not valid for a maximally conformant
- * 32-bit compilation environment (cc -Xc) using compilers prior to the
- * introduction of C99 conforming compiler (reference ISO/IEC 9899:1990).
- * In these cases, the visibility of i64 and ui64 is only permitted for
- * 64-bit compilation environments or 32-bit non-maximally conformant
- * C89 or C90 ANSI C compilation environments (cc -Xt and cc -Xa). In the
- * C99 ANSI C compilation environment, the long long type is supported.
- * The _INT64_TYPE is defined by the implementation (see sys/int_types.h).
- */
- int64_t i64;
- uint64_t ui64;
- long l;
- ulong_t ul;
-
- /* These structure members are obsolete */
-
- longlong_t ll;
- u_longlong_t ull;
- float f;
- double d;
- } value; /* value of counter */
-} kstat_named_t;
-
-
-static __inline__ kstat_t *
-kstat_create(const char *ks_module, int ks_instance, const char *ks_name,
- const char *ks_class, uchar_t ks_type, uint_t ks_ndata,
- uchar_t ks_flags)
-{
- return NULL;
-}
-
-static __inline__ void
-kstat_install(kstat_t *ksp)
-{
- return;
-}
-
-static __inline__ void
-kstat_delete(kstat_t *ksp)
-{
- return;
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _SPL_KSTAT_H */
-
+++ /dev/null
-#ifndef _SPL_MUTEX_H
-#define _SPL_MUTEX_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <linux/module.h>
-#include "spl-types.h"
-
-/* See the "Big Theory Statement" in solaris mutex.c.
- *
- * Spin mutexes apparently aren't needed by zfs so we assert
- * if ibc is non-zero.
- *
- * Our impementation of adaptive mutexes aren't really adaptive.
- * They go to sleep every time.
- */
-
-#define MUTEX_DEFAULT 0
-#define MUTEX_HELD(x) (mutex_owned(x))
-
-#define KM_MAGIC 0x42424242
-#define KM_POISON 0x84
-
-typedef struct {
- int km_magic;
- char *km_name;
- struct task_struct *km_owner;
- struct semaphore km_sem;
-} kmutex_t;
-
-#undef mutex_init
-static __inline__ void
-mutex_init(kmutex_t *mp, char *name, int type, void *ibc)
-{
- BUG_ON(ibc != NULL); /* XXX - Spin mutexes not needed? */
- BUG_ON(type != MUTEX_DEFAULT); /* XXX - Only default type supported? */
-
- mp->km_magic = KM_MAGIC;
- sema_init(&mp->km_sem, 1);
- mp->km_owner = NULL;
- mp->km_name = NULL;
-
- if (name) {
- mp->km_name = kmalloc(strlen(name) + 1, GFP_KERNEL);
- if (mp->km_name)
- strcpy(mp->km_name, name);
- }
-}
-
-#undef mutex_destroy
-static __inline__ void
-mutex_destroy(kmutex_t *mp)
-{
- BUG_ON(mp->km_magic != KM_MAGIC);
-
- if (mp->km_name)
- kfree(mp->km_name);
-
- memset(mp, KM_POISON, sizeof(*mp));
-}
-
-static __inline__ void
-mutex_enter(kmutex_t *mp)
-{
- BUG_ON(mp->km_magic != KM_MAGIC);
- down(&mp->km_sem); /* Will check in_atomic() for us */
- BUG_ON(mp->km_owner != NULL);
- mp->km_owner = current;
-}
-
-/* Return 1 if we acquired the mutex, else zero.
- */
-static __inline__ int
-mutex_tryenter(kmutex_t *mp)
-{
- int result;
-
- BUG_ON(mp->km_magic != KM_MAGIC);
- result = down_trylock(&mp->km_sem); /* returns 0 if acquired */
- if (result == 0) {
- BUG_ON(mp->km_owner != NULL);
- mp->km_owner = current;
- return 1;
- }
- return 0;
-}
-
-static __inline__ void
-mutex_exit(kmutex_t *mp)
-{
- BUG_ON(mp->km_magic != KM_MAGIC);
- BUG_ON(mp->km_owner != current);
- mp->km_owner = NULL;
- up(&mp->km_sem);
-}
-
-/* Return 1 if mutex is held by current process, else zero.
- */
-static __inline__ int
-mutex_owned(kmutex_t *mp)
-{
- BUG_ON(mp->km_magic != KM_MAGIC);
- return (mp->km_owner == current);
-}
-
-/* Return owner if mutex is owned, else NULL.
- */
-static __inline__ kthread_t *
-mutex_owner(kmutex_t *mp)
-{
- BUG_ON(mp->km_magic != KM_MAGIC);
- return mp->km_owner;
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _SPL_MUTEX_H */
+++ /dev/null
-#ifndef _SPL_RANDOM_H
-#define _SPL_RANDOM_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <linux/module.h>
-#include <linux/random.h>
-
-/* FIXME:
- * Should add support for blocking in the future to
- * ensure that proper entopy is collected. ZFS doesn't
- * use it at the moment so this is good enough for now.
- * Always will succeed by returning 0.
- */
-static __inline__ int
-random_get_bytes(uint8_t *ptr, size_t len)
-{
- BUG_ON(len < 0);
- get_random_bytes((void *)ptr,(int)len);
- return 0;
-}
-
- /* Always will succeed by returning 0. */
-static __inline__ int
-random_get_pseudo_bytes(uint8_t *ptr, size_t len)
-{
- BUG_ON(len < 0);
- get_random_bytes((void *)ptr,(int)len);
- return 0;
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _SPL_RANDOM_H */
+++ /dev/null
-#ifndef _SPL_RWLOCK_H
-#define _SPL_RWLOCK_H
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/rwsem.h>
-#include <asm/current.h>
-#include "spl-types.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-typedef enum {
- RW_DRIVER = 2, /* driver (DDI) rwlock */
- RW_DEFAULT = 4 /* kernel default rwlock */
-} krw_type_t;
-
-typedef enum {
- RW_WRITER,
- RW_READER
-} krw_t;
-
-#define RW_READ_HELD(x) (rw_read_held((x)))
-#define RW_WRITE_HELD(x) (rw_write_held((x)))
-#define RW_LOCK_HELD(x) (rw_lock_held((x)))
-#define RW_ISWRITER(x) (rw_iswriter(x))
-
-#define RW_MAGIC 0x3423645a
-#define RW_POISON 0xa6
-
-typedef struct {
- int rw_magic;
- char *rw_name;
- struct rw_semaphore rw_sem;
- struct task_struct *rw_owner; /* holder of the write lock */
-} krwlock_t;
-
-static __inline__ void
-rw_init(krwlock_t *rwlp, char *name, krw_type_t type, void *arg)
-{
- BUG_ON(type != RW_DEFAULT); /* XXX no irq handler use */
- BUG_ON(arg != NULL); /* XXX no irq handler use */
- rwlp->rw_magic = RW_MAGIC;
- rwlp->rw_owner = NULL; /* no one holds the write lock yet */
- init_rwsem(&rwlp->rw_sem);
- rwlp->rw_name = NULL;
-
- if (name) {
- rwlp->rw_name = kmalloc(strlen(name) + 1, GFP_KERNEL);
- if (rwlp->rw_name)
- strcpy(rwlp->rw_name, name);
- }
-}
-
-static __inline__ void
-rw_destroy(krwlock_t *rwlp)
-{
- BUG_ON(rwlp == NULL);
- BUG_ON(rwlp->rw_magic != RW_MAGIC);
- BUG_ON(rwlp->rw_owner != NULL);
- spin_lock(&rwlp->rw_sem.wait_lock);
- BUG_ON(!list_empty(&rwlp->rw_sem.wait_list));
- spin_unlock(&rwlp->rw_sem.wait_lock);
-
- if (rwlp->rw_name)
- kfree(rwlp->rw_name);
-
- memset(rwlp, RW_POISON, sizeof(krwlock_t));
-}
-
-/* Return 0 if the lock could not be obtained without blocking.
- */
-static __inline__ int
-rw_tryenter(krwlock_t *rwlp, krw_t rw)
-{
- int result;
-
- BUG_ON(rwlp->rw_magic != RW_MAGIC);
- switch (rw) {
- /* these functions return 1 if success, 0 if contention */
- case RW_READER:
- /* Here the Solaris code would return 0
- * if there were any write waiters. Specifically
- * thinking about the case where readers may have
- * the lock and we would also allow this thread
- * to grab the read lock with a writer waiting in the
- * queue. This doesn't seem like a correctness
- * issue, so just call down_read_trylock()
- * for the test. We may have to revisit this if
- * it becomes an issue */
- result = down_read_trylock(&rwlp->rw_sem);
- break;
- case RW_WRITER:
- result = down_write_trylock(&rwlp->rw_sem);
- if (result) {
- /* there better not be anyone else
- * holding the write lock here */
- BUG_ON(rwlp->rw_owner != NULL);
- rwlp->rw_owner = current;
- }
- break;
- }
-
- return result;
-}
-
-static __inline__ void
-rw_enter(krwlock_t *rwlp, krw_t rw)
-{
- BUG_ON(rwlp->rw_magic != RW_MAGIC);
- switch (rw) {
- case RW_READER:
- /* Here the Solaris code would block
- * if there were any write waiters. Specifically
- * thinking about the case where readers may have
- * the lock and we would also allow this thread
- * to grab the read lock with a writer waiting in the
- * queue. This doesn't seem like a correctness
- * issue, so just call down_read()
- * for the test. We may have to revisit this if
- * it becomes an issue */
- down_read(&rwlp->rw_sem);
- break;
- case RW_WRITER:
- down_write(&rwlp->rw_sem);
-
- /* there better not be anyone else
- * holding the write lock here */
- BUG_ON(rwlp->rw_owner != NULL);
- rwlp->rw_owner = current;
- break;
- }
-}
-
-static __inline__ void
-rw_exit(krwlock_t *rwlp)
-{
- BUG_ON(rwlp->rw_magic != RW_MAGIC);
-
- /* rw_owner is held by current
- * thread iff it is a writer */
- if (rwlp->rw_owner == current) {
- rwlp->rw_owner = NULL;
- up_write(&rwlp->rw_sem);
- } else {
- up_read(&rwlp->rw_sem);
- }
-}
-
-static __inline__ void
-rw_downgrade(krwlock_t *rwlp)
-{
- BUG_ON(rwlp->rw_magic != RW_MAGIC);
- BUG_ON(rwlp->rw_owner != current);
- rwlp->rw_owner = NULL;
- downgrade_write(&rwlp->rw_sem);
-}
-
-/* Return 0 if unable to perform the upgrade.
- * Might be wise to fix the caller
- * to acquire the write lock first?
- */
-static __inline__ int
-rw_tryupgrade(krwlock_t *rwlp)
-{
- int result;
- BUG_ON(rwlp->rw_magic != RW_MAGIC);
-
- spin_lock(&rwlp->rw_sem.wait_lock);
-
- /* Check if there is anyone waiting for the
- * lock. If there is, then we know we should
- * not try to upgrade the lock */
- if (!list_empty(&rwlp->rw_sem.wait_list)) {
- printk(KERN_WARNING "There are threads waiting\n");
- spin_unlock(&rwlp->rw_sem.wait_lock);
- return 0;
- }
-#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
- /* Note that activity is protected by
- * the wait_lock. Don't try to upgrade
- * if there are multiple readers currently
- * holding the lock */
- if (rwlp->rw_sem.activity > 1) {
-#else
- /* Don't try to upgrade
- * if there are multiple readers currently
- * holding the lock */
- if ((rwlp->rw_sem.count & RWSEM_ACTIVE_MASK) > 1) {
-#endif
- spin_unlock(&rwlp->rw_sem.wait_lock);
- return 0;
- }
-
- /* Here it should be safe to drop the
- * read lock and reacquire it for writing since
- * we know there are no waiters */
- up_read(&rwlp->rw_sem);
-
- /* returns 1 if success, 0 if contention */
- result = down_write_trylock(&rwlp->rw_sem);
-
- /* Check if upgrade failed. Should not ever happen
- * if we got to this point */
- BUG_ON(!result);
- BUG_ON(rwlp->rw_owner != NULL);
- rwlp->rw_owner = current;
- spin_unlock(&rwlp->rw_sem.wait_lock);
- return 1;
-}
-
-static __inline__ kthread_t *
-rw_owner(krwlock_t *rwlp)
-{
- BUG_ON(rwlp->rw_magic != RW_MAGIC);
- return rwlp->rw_owner;
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _SPL_RWLOCK_H */
+++ /dev/null
-#ifndef _SPL_TASKQ_H
-#define _SPL_TASKQ_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/*
- * Task Queues - As of linux 2.6.x task queues have been replaced by a
- * similar construct called work queues. The big difference on the linux
- * side is that functions called from work queues run in process context
- * and not interrupt context.
- *
- * One nice feature of Solaris which does not exist in linux work
- * queues in the notion of a dynamic work queue. Rather than implementing
- * this in the shim layer I'm hardcoding one-thread per work queue.
- *
- * XXX - This may end up being a significant performance penalty which
- * forces us to implement dynamic workqueues. Which is all very doable
- * with a little effort.
- */
-#include <linux/module.h>
-#include <linux/workqueue.h>
-#include <linux/gfp.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include "spl-types.h"
-
-#undef DEBUG_TASKQ_UNIMPLEMENTED
-
-#define TASKQ_NAMELEN 31
-#define taskq_t workq_t
-
-typedef struct workqueue_struct workq_t;
-typedef unsigned long taskqid_t;
-typedef void (*task_func_t)(void *);
-
-/*
- * Public flags for taskq_create(): bit range 0-15
- */
-#define TASKQ_PREPOPULATE 0x0000 /* XXX - Workqueues fully populate */
-#define TASKQ_CPR_SAFE 0x0000 /* XXX - No analog */
-#define TASKQ_DYNAMIC 0x0000 /* XXX - Worksqueues not dynamic */
-
-/*
- * Flags for taskq_dispatch. TQ_SLEEP/TQ_NOSLEEP should be same as
- * KM_SLEEP/KM_NOSLEEP.
- */
-#define TQ_SLEEP 0x00 /* XXX - Workqueues don't support */
-#define TQ_NOSLEEP 0x00 /* these sorts of flags. They */
-#define TQ_NOQUEUE 0x00 /* always run in application */
-#define TQ_NOALLOC 0x00 /* context and can sleep. */
-
-
-#ifdef DEBUG_TASKQ_UNIMPLEMENTED
-static __inline__ void taskq_init(void) {
-#error "taskq_init() not implemented"
-}
-
-static __inline__ taskq_t *
-taskq_create_instance(const char *, int, int, pri_t, int, int, uint_t) {
-#error "taskq_create_instance() not implemented"
-}
-
-extern void nulltask(void *);
-extern void taskq_suspend(taskq_t *);
-extern int taskq_suspended(taskq_t *);
-extern void taskq_resume(taskq_t *);
-
-#endif /* DEBUG_TASKQ_UNIMPLEMENTED */
-
-extern taskqid_t __taskq_dispatch(taskq_t *, task_func_t, void *, uint_t);
-extern taskq_t *__taskq_create(const char *, int, pri_t, int, int, uint_t);
-
-#define taskq_create(name, thr, pri, min, max, flags) \
- __taskq_create(name, thr, pri, min, max, flags)
-#define taskq_dispatch(tq, func, priv, flags) \
- __taskq_dispatch(tq, func, priv, flags)
-#define taskq_destory(tq) destroy_workqueue(tq)
-#define taskq_wait(tq) flush_workqueue(tq)
-#define taskq_member(tq, kthr) 1 /* XXX -Just be true */
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _SPL_TASKQ_H */
+++ /dev/null
-#ifndef _SPL_THREAD_H
-#define _SPL_THREAD_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/spinlock.h>
-#include "spl-types.h"
-#include "spl-generic.h"
-
-/*
- * Thread interfaces
- */
-#define TP_MAGIC 0x53535353
-
-#define TS_SLEEP TASK_INTERRUPTIBLE
-#define TS_RUN TASK_RUNNING
-#define TS_ZOMB EXIT_ZOMBIE
-#define TS_STOPPED TASK_STOPPED
-#if 0
-#define TS_FREE 0x00 /* No clean linux mapping */
-#define TS_ONPROC 0x04 /* No clean linux mapping */
-#define TS_WAIT 0x20 /* No clean linux mapping */
-#endif
-
-#define thread_create(stk, stksize, func, arg, len, pp, state, pri) \
- __thread_create(stk, stksize, func, arg, len, pp, state, pri)
-#define thread_exit() __thread_exit()
-#define curthread get_current()
-
-/* We just need a valid type to pass around, it's unused */
-typedef struct proc_s {
- int foo;
-} proc_t;
-
-extern kthread_t *__thread_create(caddr_t stk, size_t stksize,
- void (*proc)(void *), void *args,
- size_t len, proc_t *pp, int state,
- pri_t pri);
-extern void __thread_exit(void);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _SPL_THREAD_H */
-
+++ /dev/null
-#ifndef _SPL_TIME_H
-#define _SPL_TIME_H
-
-/*
- * Structure returned by gettimeofday(2) system call,
- * and used in other calls.
- */
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <linux/module.h>
-#include <linux/time.h>
-#include "spl-types.h"
-
-extern unsigned long long monotonic_clock(void);
-typedef struct timespec timestruc_t; /* definition per SVr4 */
-typedef longlong_t hrtime_t;
-
-#define TIME32_MAX INT32_MAX
-#define TIME32_MIN INT32_MIN
-
-#define SEC 1
-#define MILLISEC 1000
-#define MICROSEC 1000000
-#define NANOSEC 1000000000
-
-#define hz \
-({ \
- BUG_ON(HZ < 100 || HZ > MICROSEC); \
- HZ; \
-})
-
-#define gethrestime(ts) getnstimeofday((ts))
-
-static __inline__ hrtime_t
-gethrtime(void) {
- /* BUG_ON(cur_timer == timer_none); */
-
- /* Solaris expects a long long here but monotonic_clock() returns an
- * unsigned long long. Note that monotonic_clock() returns the number
- * of nanoseconds passed since kernel initialization. Even for a signed
- * long long this will not "go negative" for ~292 years.
- */
- return monotonic_clock();
-}
-
-static __inline__ time_t
-gethrestime_sec(void)
-{
- timestruc_t now;
-
- gethrestime(&now);
- return (now.tv_sec);
-}
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _SPL_TIME_H */
+++ /dev/null
-#ifndef _SPL_TIMER_H
-#define _SPL_TIMER_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/timer.h>
-
-#define lbolt ((clock_t)jiffies)
-#define lbolt64 ((int64_t)get_jiffies_64())
-
-#define delay(ticks) schedule_timeout((long timeout)(ticks))
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _SPL_TIMER_H */
-
+++ /dev/null
-#ifndef _SPL_TYPES_H
-#define _SPL_TYPES_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-typedef enum { B_FALSE=0, B_TRUE=1 } boolean_t;
-typedef unsigned long uintptr_t;
-typedef unsigned long intptr_t;
-typedef unsigned long ulong_t;
-typedef unsigned int uint_t;
-typedef unsigned char uchar_t;
-typedef unsigned long long u_longlong_t;
-typedef unsigned long long u_offset_t;
-typedef unsigned long long rlim64_t;
-typedef long long longlong_t;
-typedef long long offset_t;
-typedef struct task_struct kthread_t;
-typedef struct vmem { } vmem_t;
-typedef short pri_t;
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _SPL_TYPES_H */
+++ /dev/null
-#ifndef _SPL_H
-#define _SPL_H
-
-#include "spl-callb.h"
-#include "spl-condvar.h"
-#include "spl-cred.h"
-#include "spl-generic.h"
-#include "spl-kmem.h"
-#include "spl-kstat.h"
-#include "spl-mutex.h"
-#include "spl-random.h"
-#include "spl-rwlock.h"
-#include "spl-taskq.h"
-#include "spl-thread.h"
-#include "spl-time.h"
-#include "spl-timer.h"
-#include "spl-types.h"
-
-#endif /* _SPL_H */
+++ /dev/null
-#ifndef _SPLAT_CTL_H
-#define _SPLAT_CTL_H
-
-/* Contains shared definitions which both the userspace
- * and kernelspace portions of splat must agree on.
- */
-
-#define SPLAT_MAJOR 229 /* XXX - Arbitrary */
-#define SPLAT_MINORS 1
-#define SPLAT_DEV "/dev/splatctl"
-
-#define SPLAT_NAME_SIZE 12
-#define SPLAT_DESC_SIZE 60
-
-typedef struct splat_user {
- char name[SPLAT_NAME_SIZE]; /* short name */
- char desc[SPLAT_DESC_SIZE]; /* short description */
- int id; /* unique numeric id */
-} splat_user_t;
-
-#define SPLAT_CFG_MAGIC 0x15263748U
-typedef struct splat_cfg {
- unsigned int cfg_magic; /* Unique magic */
- int cfg_cmd; /* Config command */
- int cfg_arg1; /* Config command arg 1 */
- int cfg_rc1; /* Config response 1 */
- union {
- struct {
- int size;
- splat_user_t descs[0];
- } splat_subsystems;
- struct {
- int size;
- splat_user_t descs[0];
- } splat_tests;
- } cfg_data;
-} splat_cfg_t;
-
-#define SPLAT_CMD_MAGIC 0x9daebfc0U
-typedef struct splat_cmd {
- unsigned int cmd_magic; /* Unique magic */
- int cmd_subsystem; /* Target subsystem */
- int cmd_test; /* Subsystem test */
- int cmd_data_size; /* Extra opaque data */
- char cmd_data_str[0]; /* Opaque data region */
-} splat_cmd_t;
-
-/* Valid ioctls */
-#define SPLAT_CFG _IOWR('f', 101, long)
-#define SPLAT_CMD _IOWR('f', 102, long)
-
-/* Valid configuration commands */
-#define SPLAT_CFG_BUFFER_CLEAR 0x001 /* Clear text buffer */
-#define SPLAT_CFG_BUFFER_SIZE 0x002 /* Resize text buffer */
-#define SPLAT_CFG_SUBSYSTEM_COUNT 0x101 /* Number of subsystem */
-#define SPLAT_CFG_SUBSYSTEM_LIST 0x102 /* List of N subsystems */
-#define SPLAT_CFG_TEST_COUNT 0x201 /* Number of tests */
-#define SPLAT_CFG_TEST_LIST 0x202 /* List of N tests */
-
-/* Valid subsystem and test commands defined in each subsystem, we do
- * need to be careful to avoid colisions. That alone may argue to define
- * them all here, for now we just define the global error codes.
- */
-#define SPLAT_SUBSYSTEM_UNKNOWN 0xF00
-#define SPLAT_TEST_UNKNOWN 0xFFF
-
-#endif /* _SPLAT_CTL_H */
--- /dev/null
+EXTRA_DIST = callb.h cmn_err.h condvar.h cred.h
+EXTRA_DIST += debug.h generic.h kmem.h kstat.h
+EXTRA_DIST += mutex.h param.h random.h rwlock.h
+EXTRA_DIST += spl.h taskq.h thread.h time.h
+EXTRA_DIST += timer.h types.h
--- /dev/null
+#ifndef _SPL_CALLB_H
+#define _SPL_CALLB_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <linux/module.h>
+#include <sys/mutex.h>
+
+#define DEBUG_CALLB
+
+#ifndef DEBUG_CALLB
+#define CALLB_CPR_ASSERT(cp) BUG_ON(!(MUTEX_HELD((cp)->cc_lockp)));
+#else
+#define CALLB_CPR_ASSERT(cp)
+#endif
+
+
+typedef struct callb_cpr {
+ kmutex_t *cc_lockp;
+} callb_cpr_t;
+
+#define CALLB_CPR_INIT(cp, lockp, func, name) { \
+ (cp)->cc_lockp = lockp; \
+}
+
+#define CALLB_CPR_SAFE_BEGIN(cp) { \
+ CALLB_CPR_ASSERT(cp); \
+}
+
+#define CALLB_CPR_SAFE_END(cp, lockp) { \
+ CALLB_CPR_ASSERT(cp); \
+}
+
+#define CALLB_CPR_EXIT(cp) { \
+ ASSERT(MUTEX_HELD((cp)->cc_lockp)); \
+ mutex_exit((cp)->cc_lockp); \
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SPL_CALLB_H */
+
--- /dev/null
+#ifndef _SPL_CMN_ERR_H
+#define _SPL_CMN_ERR_H
+
+#endif /* SPL_CMN_ERR_H */
--- /dev/null
+#ifndef _SPL_CONDVAR_H
+#define _SPL_CONDVAR_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <linux/module.h>
+#include <linux/wait.h>
+
+/* The kcondvar_t struct is protected by mutex taken externally before
+ * calling any of the wait/signal funs, and passed into the wait funs.
+ */
+#define CV_MAGIC 0x346545f4
+#define CV_POISON 0x95
+
+typedef struct {
+ int cv_magic;
+ char *cv_name;
+ wait_queue_head_t cv_event;
+ atomic_t cv_waiters;
+ kmutex_t *cv_mutex; /* only for verification purposes */
+} kcondvar_t;
+
+typedef enum { CV_DEFAULT=0, CV_DRIVER } kcv_type_t;
+
+static __inline__ void
+cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
+{
+ BUG_ON(cvp == NULL);
+ BUG_ON(type != CV_DEFAULT);
+ BUG_ON(arg != NULL);
+
+ cvp->cv_magic = CV_MAGIC;
+ init_waitqueue_head(&cvp->cv_event);
+ atomic_set(&cvp->cv_waiters, 0);
+ cvp->cv_mutex = NULL;
+ cvp->cv_name = NULL;
+
+ if (name) {
+ cvp->cv_name = kmalloc(strlen(name) + 1, GFP_KERNEL);
+ if (cvp->cv_name)
+ strcpy(cvp->cv_name, name);
+ }
+}
+
+static __inline__ void
+cv_destroy(kcondvar_t *cvp)
+{
+ BUG_ON(cvp == NULL);
+ BUG_ON(cvp->cv_magic != CV_MAGIC);
+ BUG_ON(atomic_read(&cvp->cv_waiters) != 0);
+ BUG_ON(waitqueue_active(&cvp->cv_event));
+
+ if (cvp->cv_name)
+ kfree(cvp->cv_name);
+
+ memset(cvp, CV_POISON, sizeof(*cvp));
+}
+
+static __inline__ void
+cv_wait(kcondvar_t *cvp, kmutex_t *mtx)
+{
+ DEFINE_WAIT(wait);
+ int flag = 1;
+
+ BUG_ON(cvp == NULL || mtx == NULL);
+ BUG_ON(cvp->cv_magic != CV_MAGIC);
+ BUG_ON(!mutex_owned(mtx));
+
+ if (cvp->cv_mutex == NULL)
+ cvp->cv_mutex = mtx;
+
+ /* Ensure the same mutex is used by all callers */
+ BUG_ON(cvp->cv_mutex != mtx);
+
+ for (;;) {
+ prepare_to_wait_exclusive(&cvp->cv_event, &wait,
+ TASK_INTERRUPTIBLE);
+ /* Must occur after we are added to the list but only once */
+ if (flag) {
+ atomic_inc(&cvp->cv_waiters);
+ flag = 0;
+ }
+
+ /* XXX - The correct thing to do here may be to wake up and
+ * force the caller to handle the signal. Spurious wakeups
+ * should already be safely handled by the caller. */
+ if (signal_pending(current))
+ flush_signals(current);
+
+ /* Mutex should be dropped after prepare_to_wait() this
+ * ensures we're linked in to the waiters list and avoids the
+ * race where 'cvp->cv_waiters > 0' but the list is empty. */
+ mutex_exit(mtx);
+ schedule();
+ mutex_enter(mtx);
+
+ /* XXX - The correct thing to do here may be to wake up and
+ * force the caller to handle the signal. Spurious wakeups
+ * should already be safely handled by the caller. */
+ if (signal_pending(current))
+ continue;
+
+ break;
+ }
+
+ atomic_dec(&cvp->cv_waiters);
+ finish_wait(&cvp->cv_event, &wait);
+}
+
+/* 'expire_time' argument is an absolute wall clock time in jiffies.
+ * Return value is time left (expire_time - now) or -1 if timeout occurred.
+ */
+static __inline__ clock_t
+cv_timedwait(kcondvar_t *cvp, kmutex_t *mtx, clock_t expire_time)
+{
+ DEFINE_WAIT(wait);
+ clock_t time_left;
+ int flag = 1;
+
+ BUG_ON(cvp == NULL || mtx == NULL);
+ BUG_ON(cvp->cv_magic != CV_MAGIC);
+ BUG_ON(!mutex_owned(mtx));
+
+ if (cvp->cv_mutex == NULL)
+ cvp->cv_mutex = mtx;
+
+ /* XXX - Does not handle jiffie wrap properly */
+ time_left = expire_time - jiffies;
+ if (time_left <= 0)
+ return -1;
+
+ /* Ensure the same mutex is used by all callers */
+ BUG_ON(cvp->cv_mutex != mtx);
+
+ for (;;) {
+ prepare_to_wait_exclusive(&cvp->cv_event, &wait,
+ TASK_INTERRUPTIBLE);
+ if (flag) {
+ atomic_inc(&cvp->cv_waiters);
+ flag = 0;
+ }
+
+ /* XXX - The correct thing to do here may be to wake up and
+ * force the caller to handle the signal. Spurious wakeups
+ * should already be safely handled by the caller. */
+ if (signal_pending(current))
+ flush_signals(current);
+
+ /* Mutex should be dropped after prepare_to_wait() this
+ * ensures we're linked in to the waiters list and avoids the
+ * race where 'cvp->cv_waiters > 0' but the list is empty. */
+ mutex_exit(mtx);
+ time_left = schedule_timeout(time_left);
+ mutex_enter(mtx);
+
+ /* XXX - The correct thing to do here may be to wake up and
+ * force the caller to handle the signal. Spurious wakeups
+ * should already be safely handled by the caller. */
+ if (signal_pending(current)) {
+ if (time_left > 0)
+ continue;
+
+ flush_signals(current);
+ }
+
+ break;
+ }
+
+ atomic_dec(&cvp->cv_waiters);
+ finish_wait(&cvp->cv_event, &wait);
+
+ return (time_left > 0 ? time_left : -1);
+}
+
+static __inline__ void
+cv_signal(kcondvar_t *cvp)
+{
+ BUG_ON(cvp == NULL);
+ BUG_ON(cvp->cv_magic != CV_MAGIC);
+
+ /* All waiters are added with WQ_FLAG_EXCLUSIVE so only one
+ * waiter will be set runable with each call to wake_up().
+ * Additionally wake_up() holds a spin_lock assoicated with
+ * the wait queue to ensure we don't race waking up processes. */
+ if (atomic_read(&cvp->cv_waiters) > 0)
+ wake_up(&cvp->cv_event);
+}
+
+static __inline__ void
+cv_broadcast(kcondvar_t *cvp)
+{
+ BUG_ON(cvp == NULL);
+ BUG_ON(cvp->cv_magic != CV_MAGIC);
+
+ /* Wake_up_all() will wake up all waiters even those which
+ * have the WQ_FLAG_EXCLUSIVE flag set. */
+ if (atomic_read(&cvp->cv_waiters) > 0)
+ wake_up_all(&cvp->cv_event);
+}
+#endif /* _SPL_CONDVAR_H */
--- /dev/null
+#ifndef _SPL_CRED_H
+#define _SPL_CRED_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <linux/module.h>
+#include <linux/types.h>
+
+/* XXX - Portions commented out because we really just want to have the type
+ * defined and the contents aren't nearly so important at the moment. */
+typedef struct cred {
+ uint_t cr_ref; /* reference count */
+ uid_t cr_uid; /* effective user id */
+ gid_t cr_gid; /* effective group id */
+ uid_t cr_ruid; /* real user id */
+ gid_t cr_rgid; /* real group id */
+ uid_t cr_suid; /* "saved" user id (from exec) */
+ gid_t cr_sgid; /* "saved" group id (from exec) */
+ uint_t cr_ngroups; /* number of groups returned by */
+ /* crgroups() */
+#if 0
+ cred_priv_t cr_priv; /* privileges */
+ projid_t cr_projid; /* project */
+ struct zone *cr_zone; /* pointer to per-zone structure */
+ struct ts_label_s *cr_label; /* pointer to the effective label */
+ credsid_t *cr_ksid; /* pointer to SIDs */
+#endif
+ gid_t cr_groups[1]; /* cr_groups size not fixed */
+ /* audit info is defined dynamically */
+ /* and valid only when audit enabled */
+ /* auditinfo_addr_t cr_auinfo; audit info */
+} cred_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SPL_CRED_H */
+
--- /dev/null
+#ifndef _SPL_DEBUG_H
+#define _SPL_DEBUG_H
+
+#endif /* SPL_DEBUG_H */
--- /dev/null
+#ifndef _SPL_GENERIC_H
+#define _SPL_GENERIC_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <linux/module.h>
+
+/* Missing defines.
+ */
+#define INT32_MAX INT_MAX
+#define UINT64_MAX (~0ULL)
+#define NBBY 8
+#define ENOTSUP ENOTSUPP
+#define MAXNAMELEN 256
+#define MAXPATHLEN PATH_MAX
+#define __va_list va_list
+#define _KERNEL __KERNEL__
+#define max_ncpus 64
+
+/* 0..MAX_PRIO-1: Process priority
+ * 0..MAX_RT_PRIO-1: RT priority tasks
+ * MAX_RT_PRIO..MAX_PRIO-1: SCHED_NORMAL tasks
+ *
+ * Treat shim tasks as SCHED_NORMAL tasks
+ */
+#define minclsyspri (MAX_RT_PRIO)
+#define maxclsyspri (MAX_PRIO-1)
+
+#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
+#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
+
+#define kred NULL
+
+#define FREAD 1
+#define FWRITE 2
+#define FCREAT O_CREAT
+#define FTRUNC O_TRUNC
+#define FOFFMAX O_LARGEFILE
+#define FSYNC O_SYNC
+#define FDSYNC O_DSYNC
+#define FRSYNC O_RSYNC
+#define FEXCL O_EXCL
+
+#define FNODSYNC 0x10000 /* fsync pseudo flag */
+#define FNOFOLLOW 0x20000 /* don't follow symlinks */
+
+/* Missing macros
+ */
+#define PAGESIZE PAGE_SIZE
+
+/* from Solaris sys/byteorder.h */
+#define BSWAP_8(x) ((x) & 0xff)
+#define BSWAP_16(x) ((BSWAP_8(x) << 8) | BSWAP_8((x) >> 8))
+#define BSWAP_32(x) ((BSWAP_16(x) << 16) | BSWAP_16((x) >> 16))
+#define BSWAP_64(x) ((BSWAP_32(x) << 32) | BSWAP_32((x) >> 32))
+
+/* Map some simple functions.
+ */
+#define bzero(ptr,size) memset(ptr,0,size)
+#define bcopy(src,dest,size) memcpy(dest,src,size)
+#define ASSERT(x) BUG_ON(!(x))
+#define ASSERT3U(left,OP,right) BUG_ON(!((left) OP (right)))
+
+/* Missing globals
+ */
+extern int p0;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SPL_GENERIC_H */
--- /dev/null
+#ifndef _SPL_KMEM_H
+#define _SPL_KMEM_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#undef DEBUG_KMEM
+#undef DEBUG_KMEM_UNIMPLEMENTED
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+/*
+ * Memory allocation interfaces
+ */
+#define KM_SLEEP GFP_KERNEL
+#define KM_NOSLEEP GFP_ATOMIC
+#undef KM_PANIC /* No linux analog */
+#define KM_PUSHPAGE (GFP_KERNEL | GFP_HIGH)
+#define KM_VMFLAGS GFP_LEVEL_MASK
+#define KM_FLAGS __GFP_BITS_MASK
+
+#ifdef DEBUG_KMEM
+/* Shim layer memory accounting */
+extern atomic_t kmem_alloc_used;
+extern unsigned int kmem_alloc_max;
+#endif
+
+#ifdef DEBUG_KMEM
+#define __kmem_alloc(size, flags, allocator) \
+({ void *_ptr_; \
+ \
+ /* Marked unlikely because we should never be doing this */ \
+ if (unlikely((size) > (PAGE_SIZE * 2))) \
+ printk("Warning: kmem_alloc(%d, 0x%x) large alloc at %s:%d " \
+ "(%d/%d)\n", (int)(size), (int)(flags), \
+ __FILE__, __LINE__, \
+ atomic_read(&kmem_alloc_used), kmem_alloc_max); \
+ \
+ _ptr_ = (void *)allocator((size), (flags)); \
+ if (_ptr_ == NULL) { \
+ printk("Warning: kmem_alloc(%d, 0x%x) failed at %s:%d " \
+ "(%d/%d)\n", (int)(size), (int)(flags), \
+ __FILE__, __LINE__, \
+ atomic_read(&kmem_alloc_used), kmem_alloc_max); \
+ atomic_add((size), &kmem_alloc_used); \
+ if (unlikely(atomic_read(&kmem_alloc_used) > kmem_alloc_max)) \
+ kmem_alloc_max = atomic_read(&kmem_alloc_used); \
+ } \
+ \
+ _ptr_; \
+})
+
+#define kmem_alloc(size, flags) __kmem_alloc(size, flags, kmalloc)
+#define kmem_zalloc(size, flags) __kmem_alloc(size, flags, kzalloc)
+
+#define kmem_free(ptr, size) \
+({ \
+ BUG_ON(!ptr); \
+ atomic_sub((size), &kmem_alloc_used); \
+ memset(ptr, 0x5a, (size)); /* Poison */ \
+ kfree(ptr); \
+ (ptr) = (void *)0xdeadbeef; \
+})
+
+
+#else
+
+#define kmem_alloc(size, flags) kmalloc(size, flags)
+#define kmem_zalloc(size, flags) kzalloc(size, flags)
+#define kmem_free(ptr, size) kfree(ptr)
+
+#endif /* DEBUG_KMEM */
+
+
+#ifdef DEBUG_KMEM_UNIMPLEMENTED
+static __inline__ void *
+kmem_alloc_tryhard(size_t size, size_t *alloc_size, int kmflags)
+{
+#error "kmem_alloc_tryhard() not implemented"
+}
+#endif /* DEBUG_KMEM_UNIMPLEMENTED */
+
+/*
+ * Slab allocation interfaces
+ */
+#undef KMC_NOTOUCH /* No linux analog */
+#define KMC_NODEBUG 0x00000000 /* Default beahvior */
+#define KMC_NOMAGAZINE /* No linux analog */
+#define KMC_NOHASH /* No linux analog */
+#define KMC_QCACHE /* No linux analog */
+
+#define KMC_REAP_CHUNK 256
+#define KMC_DEFAULT_SEEKS DEFAULT_SEEKS
+
+/* Defined by linux slab.h
+ * typedef struct kmem_cache_s kmem_cache_t;
+ */
+
+/* No linux analog
+ * extern int kmem_ready;
+ * extern pgcnt_t kmem_reapahead;
+ */
+
+#ifdef DEBUG_KMEM_UNIMPLEMENTED
+static __inline__ void kmem_init(void) {
+#error "kmem_init() not implemented"
+}
+
+static __inline__ void kmem_thread_init(void) {
+#error "kmem_thread_init() not implemented"
+}
+
+static __inline__ void kmem_mp_init(void) {
+#error "kmem_mp_init() not implemented"
+}
+
+static __inline__ void kmem_reap_idspace(void) {
+#error "kmem_reap_idspace() not implemented"
+}
+
+static __inline__ size_t kmem_avail(void) {
+#error "kmem_avail() not implemented"
+}
+
+static __inline__ size_t kmem_maxavail(void) {
+#error "kmem_maxavail() not implemented"
+}
+
+static __inline__ uint64_t kmem_cache_stat(kmem_cache_t *cache) {
+#error "kmem_cache_stat() not implemented"
+}
+#endif /* DEBUG_KMEM_UNIMPLEMENTED */
+
+/* XXX - Used by arc.c to adjust its memory footprint. We may want
+ * to use this hook in the future to adjust behavior based on
+ * debug levels. For now it's safe to always return 0.
+ */
+static __inline__ int
+kmem_debugging(void)
+{
+ return 0;
+}
+
+typedef int (*kmem_constructor_t)(void *, void *, int);
+typedef void (*kmem_destructor_t)(void *, void *);
+typedef void (*kmem_reclaim_t)(void *);
+
+extern kmem_cache_t *
+__kmem_cache_create(char *name, size_t size, size_t align,
+ kmem_constructor_t constructor,
+ kmem_destructor_t destructor,
+ kmem_reclaim_t reclaim,
+ void *priv, void *vmp, int flags);
+
+void
+extern __kmem_cache_destroy(kmem_cache_t *cache);
+
+void
+extern __kmem_reap(void);
+
+#define kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags) \
+ __kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags)
+#define kmem_cache_destroy(cache) __kmem_cache_destroy(cache)
+#define kmem_cache_alloc(cache, flags) kmem_cache_alloc(cache, flags)
+#define kmem_cache_free(cache, ptr) kmem_cache_free(cache, ptr)
+#define kmem_cache_reap_now(cache) kmem_cache_shrink(cache)
+#define kmem_reap() __kmem_reap()
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SPL_KMEM_H */
--- /dev/null
+#ifndef _SPL_KSTAT_H
+#define _SPL_KSTAT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <linux/module.h>
+#include <sys/types.h>
+#include <sys/time.h>
+
+/* XXX - The minimum functionality here is stubbed out but nothing works. */
+
+#define KSTAT_STRLEN 31 /* 30 chars + NULL; must be 16 * n - 1 */
+
+#define KSTAT_TYPE_RAW 0 /* can be anything */
+ /* ks_ndata >= 1 */
+#define KSTAT_TYPE_NAMED 1 /* name/value pair */
+ /* ks_ndata >= 1 */
+#define KSTAT_TYPE_INTR 2 /* interrupt statistics */
+ /* ks_ndata == 1 */
+#define KSTAT_TYPE_IO 3 /* I/O statistics */
+ /* ks_ndata == 1 */
+#define KSTAT_TYPE_TIMER 4 /* event timer */
+ /* ks_ndata >= 1 */
+
+#define KSTAT_NUM_TYPES 5
+
+
+#define KSTAT_DATA_CHAR 0
+#define KSTAT_DATA_INT32 1
+#define KSTAT_DATA_UINT32 2
+#define KSTAT_DATA_INT64 3
+#define KSTAT_DATA_UINT64 4
+
+
+#define KSTAT_FLAG_VIRTUAL 0x01
+#define KSTAT_FLAG_VAR_SIZE 0x02
+#define KSTAT_FLAG_WRITABLE 0x04
+#define KSTAT_FLAG_PERSISTENT 0x08
+#define KSTAT_FLAG_DORMANT 0x10
+#define KSTAT_FLAG_INVALID 0x2
+
+
+typedef int kid_t; /* unique kstat id */
+
+typedef struct kstat_s {
+ /*
+ * Fields relevant to both kernel and user
+ */
+ hrtime_t ks_crtime; /* creation time (from gethrtime()) */
+ struct kstat_s *ks_next; /* kstat chain linkage */
+ kid_t ks_kid; /* unique kstat ID */
+ char ks_module[KSTAT_STRLEN]; /* provider module name */
+ uchar_t ks_resv; /* reserved, currently just padding */
+ int ks_instance; /* provider module's instance */
+ char ks_name[KSTAT_STRLEN]; /* kstat name */
+ uchar_t ks_type; /* kstat data type */
+ char ks_class[KSTAT_STRLEN]; /* kstat class */
+ uchar_t ks_flags; /* kstat flags */
+ void *ks_data; /* kstat type-specific data */
+ uint_t ks_ndata; /* # of type-specific data records */
+ size_t ks_data_size; /* total size of kstat data section */
+ hrtime_t ks_snaptime; /* time of last data shapshot */
+ /*
+ * Fields relevant to kernel only
+ */
+ int (*ks_update)(struct kstat *, int); /* dynamic update */
+ void *ks_private; /* arbitrary provider-private data */
+ int (*ks_snapshot)(struct kstat *, void *, int);
+ void *ks_lock; /* protects this kstat's data */
+} kstat_t;
+
+typedef struct kstat_named_s {
+ char name[KSTAT_STRLEN]; /* name of counter */
+ uchar_t data_type; /* data type */
+ union {
+ char c[16]; /* enough for 128-bit ints */
+ int32_t i32;
+ uint32_t ui32;
+ struct {
+ union {
+ char *ptr; /* NULL-term string */
+ char __pad[8]; /* 64-bit padding */
+ } addr;
+ uint32_t len; /* # bytes for strlen + '\0' */
+ } str;
+/*
+ * The int64_t and uint64_t types are not valid for a maximally conformant
+ * 32-bit compilation environment (cc -Xc) using compilers prior to the
+ * introduction of C99 conforming compiler (reference ISO/IEC 9899:1990).
+ * In these cases, the visibility of i64 and ui64 is only permitted for
+ * 64-bit compilation environments or 32-bit non-maximally conformant
+ * C89 or C90 ANSI C compilation environments (cc -Xt and cc -Xa). In the
+ * C99 ANSI C compilation environment, the long long type is supported.
+ * The _INT64_TYPE is defined by the implementation (see sys/int_types.h).
+ */
+ int64_t i64;
+ uint64_t ui64;
+ long l;
+ ulong_t ul;
+
+ /* These structure members are obsolete */
+
+ longlong_t ll;
+ u_longlong_t ull;
+ float f;
+ double d;
+ } value; /* value of counter */
+} kstat_named_t;
+
+
+static __inline__ kstat_t *
+kstat_create(const char *ks_module, int ks_instance, const char *ks_name,
+ const char *ks_class, uchar_t ks_type, uint_t ks_ndata,
+ uchar_t ks_flags)
+{
+ return NULL;
+}
+
+static __inline__ void
+kstat_install(kstat_t *ksp)
+{
+ return;
+}
+
+static __inline__ void
+kstat_delete(kstat_t *ksp)
+{
+ return;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SPL_KSTAT_H */
+
--- /dev/null
+#ifndef _SPL_MUTEX_H
+#define _SPL_MUTEX_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <linux/module.h>
+#include <sys/types.h>
+
+/* See the "Big Theory Statement" in solaris mutex.c.
+ *
+ * Spin mutexes apparently aren't needed by zfs so we assert
+ * if ibc is non-zero.
+ *
+ * Our impementation of adaptive mutexes aren't really adaptive.
+ * They go to sleep every time.
+ */
+
+#define MUTEX_DEFAULT 0
+#define MUTEX_HELD(x) (mutex_owned(x))
+
+#define KM_MAGIC 0x42424242
+#define KM_POISON 0x84
+
+typedef struct {
+ int km_magic;
+ char *km_name;
+ struct task_struct *km_owner;
+ struct semaphore km_sem;
+} kmutex_t;
+
+#undef mutex_init
+static __inline__ void
+mutex_init(kmutex_t *mp, char *name, int type, void *ibc)
+{
+ BUG_ON(ibc != NULL); /* XXX - Spin mutexes not needed? */
+ BUG_ON(type != MUTEX_DEFAULT); /* XXX - Only default type supported? */
+
+ mp->km_magic = KM_MAGIC;
+ sema_init(&mp->km_sem, 1);
+ mp->km_owner = NULL;
+ mp->km_name = NULL;
+
+ if (name) {
+ mp->km_name = kmalloc(strlen(name) + 1, GFP_KERNEL);
+ if (mp->km_name)
+ strcpy(mp->km_name, name);
+ }
+}
+
+#undef mutex_destroy
+static __inline__ void
+mutex_destroy(kmutex_t *mp)
+{
+ BUG_ON(mp->km_magic != KM_MAGIC);
+
+ if (mp->km_name)
+ kfree(mp->km_name);
+
+ memset(mp, KM_POISON, sizeof(*mp));
+}
+
+static __inline__ void
+mutex_enter(kmutex_t *mp)
+{
+ BUG_ON(mp->km_magic != KM_MAGIC);
+ down(&mp->km_sem); /* Will check in_atomic() for us */
+ BUG_ON(mp->km_owner != NULL);
+ mp->km_owner = current;
+}
+
+/* Return 1 if we acquired the mutex, else zero.
+ */
+static __inline__ int
+mutex_tryenter(kmutex_t *mp)
+{
+ int result;
+
+ BUG_ON(mp->km_magic != KM_MAGIC);
+ result = down_trylock(&mp->km_sem); /* returns 0 if acquired */
+ if (result == 0) {
+ BUG_ON(mp->km_owner != NULL);
+ mp->km_owner = current;
+ return 1;
+ }
+ return 0;
+}
+
+static __inline__ void
+mutex_exit(kmutex_t *mp)
+{
+ BUG_ON(mp->km_magic != KM_MAGIC);
+ BUG_ON(mp->km_owner != current);
+ mp->km_owner = NULL;
+ up(&mp->km_sem);
+}
+
+/* Return 1 if mutex is held by current process, else zero.
+ */
+static __inline__ int
+mutex_owned(kmutex_t *mp)
+{
+ BUG_ON(mp->km_magic != KM_MAGIC);
+ return (mp->km_owner == current);
+}
+
+/* Return owner if mutex is owned, else NULL.
+ */
+static __inline__ kthread_t *
+mutex_owner(kmutex_t *mp)
+{
+ BUG_ON(mp->km_magic != KM_MAGIC);
+ return mp->km_owner;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SPL_MUTEX_H */
--- /dev/null
+#ifndef _SPL_PARAM_H
+#define _SPL_PARAM_H
+
+#endif /* SPL_PARAM_H */
--- /dev/null
+#ifndef _SPL_RANDOM_H
+#define _SPL_RANDOM_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <linux/module.h>
+#include <linux/random.h>
+
+/* FIXME:
+ * Should add support for blocking in the future to
+ * ensure that proper entopy is collected. ZFS doesn't
+ * use it at the moment so this is good enough for now.
+ * Always will succeed by returning 0.
+ */
+static __inline__ int
+random_get_bytes(uint8_t *ptr, size_t len)
+{
+ BUG_ON(len < 0);
+ get_random_bytes((void *)ptr,(int)len);
+ return 0;
+}
+
+ /* Always will succeed by returning 0. */
+static __inline__ int
+random_get_pseudo_bytes(uint8_t *ptr, size_t len)
+{
+ BUG_ON(len < 0);
+ get_random_bytes((void *)ptr,(int)len);
+ return 0;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SPL_RANDOM_H */
--- /dev/null
+#ifndef _SPL_RWLOCK_H
+#define _SPL_RWLOCK_H
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/rwsem.h>
+#include <asm/current.h>
+#include <sys/types.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef enum {
+ RW_DRIVER = 2, /* driver (DDI) rwlock */
+ RW_DEFAULT = 4 /* kernel default rwlock */
+} krw_type_t;
+
+typedef enum {
+ RW_WRITER,
+ RW_READER
+} krw_t;
+
+#define RW_READ_HELD(x) (rw_read_held((x)))
+#define RW_WRITE_HELD(x) (rw_write_held((x)))
+#define RW_LOCK_HELD(x) (rw_lock_held((x)))
+#define RW_ISWRITER(x) (rw_iswriter(x))
+
+#define RW_MAGIC 0x3423645a
+#define RW_POISON 0xa6
+
+typedef struct {
+ int rw_magic;
+ char *rw_name;
+ struct rw_semaphore rw_sem;
+ struct task_struct *rw_owner; /* holder of the write lock */
+} krwlock_t;
+
+static __inline__ void
+rw_init(krwlock_t *rwlp, char *name, krw_type_t type, void *arg)
+{
+ BUG_ON(type != RW_DEFAULT); /* XXX no irq handler use */
+ BUG_ON(arg != NULL); /* XXX no irq handler use */
+ rwlp->rw_magic = RW_MAGIC;
+ rwlp->rw_owner = NULL; /* no one holds the write lock yet */
+ init_rwsem(&rwlp->rw_sem);
+ rwlp->rw_name = NULL;
+
+ if (name) {
+ rwlp->rw_name = kmalloc(strlen(name) + 1, GFP_KERNEL);
+ if (rwlp->rw_name)
+ strcpy(rwlp->rw_name, name);
+ }
+}
+
+static __inline__ void
+rw_destroy(krwlock_t *rwlp)
+{
+ BUG_ON(rwlp == NULL);
+ BUG_ON(rwlp->rw_magic != RW_MAGIC);
+ BUG_ON(rwlp->rw_owner != NULL);
+ spin_lock(&rwlp->rw_sem.wait_lock);
+ BUG_ON(!list_empty(&rwlp->rw_sem.wait_list));
+ spin_unlock(&rwlp->rw_sem.wait_lock);
+
+ if (rwlp->rw_name)
+ kfree(rwlp->rw_name);
+
+ memset(rwlp, RW_POISON, sizeof(krwlock_t));
+}
+
+/* Return 0 if the lock could not be obtained without blocking.
+ */
+static __inline__ int
+rw_tryenter(krwlock_t *rwlp, krw_t rw)
+{
+ int result;
+
+ BUG_ON(rwlp->rw_magic != RW_MAGIC);
+ switch (rw) {
+ /* these functions return 1 if success, 0 if contention */
+ case RW_READER:
+ /* Here the Solaris code would return 0
+ * if there were any write waiters. Specifically
+ * thinking about the case where readers may have
+ * the lock and we would also allow this thread
+ * to grab the read lock with a writer waiting in the
+ * queue. This doesn't seem like a correctness
+ * issue, so just call down_read_trylock()
+ * for the test. We may have to revisit this if
+ * it becomes an issue */
+ result = down_read_trylock(&rwlp->rw_sem);
+ break;
+ case RW_WRITER:
+ result = down_write_trylock(&rwlp->rw_sem);
+ if (result) {
+ /* there better not be anyone else
+ * holding the write lock here */
+ BUG_ON(rwlp->rw_owner != NULL);
+ rwlp->rw_owner = current;
+ }
+ break;
+ }
+
+ return result;
+}
+
+static __inline__ void
+rw_enter(krwlock_t *rwlp, krw_t rw)
+{
+ BUG_ON(rwlp->rw_magic != RW_MAGIC);
+ switch (rw) {
+ case RW_READER:
+ /* Here the Solaris code would block
+ * if there were any write waiters. Specifically
+ * thinking about the case where readers may have
+ * the lock and we would also allow this thread
+ * to grab the read lock with a writer waiting in the
+ * queue. This doesn't seem like a correctness
+ * issue, so just call down_read()
+ * for the test. We may have to revisit this if
+ * it becomes an issue */
+ down_read(&rwlp->rw_sem);
+ break;
+ case RW_WRITER:
+ down_write(&rwlp->rw_sem);
+
+ /* there better not be anyone else
+ * holding the write lock here */
+ BUG_ON(rwlp->rw_owner != NULL);
+ rwlp->rw_owner = current;
+ break;
+ }
+}
+
+static __inline__ void
+rw_exit(krwlock_t *rwlp)
+{
+ BUG_ON(rwlp->rw_magic != RW_MAGIC);
+
+ /* rw_owner is held by current
+ * thread iff it is a writer */
+ if (rwlp->rw_owner == current) {
+ rwlp->rw_owner = NULL;
+ up_write(&rwlp->rw_sem);
+ } else {
+ up_read(&rwlp->rw_sem);
+ }
+}
+
+static __inline__ void
+rw_downgrade(krwlock_t *rwlp)
+{
+ BUG_ON(rwlp->rw_magic != RW_MAGIC);
+ BUG_ON(rwlp->rw_owner != current);
+ rwlp->rw_owner = NULL;
+ downgrade_write(&rwlp->rw_sem);
+}
+
+/* Return 0 if unable to perform the upgrade.
+ * Might be wise to fix the caller
+ * to acquire the write lock first?
+ */
+static __inline__ int
+rw_tryupgrade(krwlock_t *rwlp)
+{
+ int result;
+ BUG_ON(rwlp->rw_magic != RW_MAGIC);
+
+ spin_lock(&rwlp->rw_sem.wait_lock);
+
+ /* Check if there is anyone waiting for the
+ * lock. If there is, then we know we should
+ * not try to upgrade the lock */
+ if (!list_empty(&rwlp->rw_sem.wait_list)) {
+ printk(KERN_WARNING "There are threads waiting\n");
+ spin_unlock(&rwlp->rw_sem.wait_lock);
+ return 0;
+ }
+#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
+ /* Note that activity is protected by
+ * the wait_lock. Don't try to upgrade
+ * if there are multiple readers currently
+ * holding the lock */
+ if (rwlp->rw_sem.activity > 1) {
+#else
+ /* Don't try to upgrade
+ * if there are multiple readers currently
+ * holding the lock */
+ if ((rwlp->rw_sem.count & RWSEM_ACTIVE_MASK) > 1) {
+#endif
+ spin_unlock(&rwlp->rw_sem.wait_lock);
+ return 0;
+ }
+
+ /* Here it should be safe to drop the
+ * read lock and reacquire it for writing since
+ * we know there are no waiters */
+ up_read(&rwlp->rw_sem);
+
+ /* returns 1 if success, 0 if contention */
+ result = down_write_trylock(&rwlp->rw_sem);
+
+ /* Check if upgrade failed. Should not ever happen
+ * if we got to this point */
+ BUG_ON(!result);
+ BUG_ON(rwlp->rw_owner != NULL);
+ rwlp->rw_owner = current;
+ spin_unlock(&rwlp->rw_sem.wait_lock);
+ return 1;
+}
+
+static __inline__ kthread_t *
+rw_owner(krwlock_t *rwlp)
+{
+ BUG_ON(rwlp->rw_magic != RW_MAGIC);
+ return rwlp->rw_owner;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SPL_RWLOCK_H */
--- /dev/null
+#ifndef _SPL_H
+#define _SPL_H
+
+#include <sys/callb.h>
+#include <sys/condvar.h>
+#include <sys/cred.h>
+#include <sys/generic.h>
+#include <sys/kmem.h>
+#include <sys/kstat.h>
+#include <sys/mutex.h>
+#include <sys/random.h>
+#include <sys/rwlock.h>
+#include <sys/taskq.h>
+#include <sys/thread.h>
+#include <sys/time.h>
+#include <sys/timer.h>
+#include <sys/types.h>
+
+#endif /* _SPL_H */
--- /dev/null
+#ifndef _SPL_TASKQ_H
+#define _SPL_TASKQ_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Task Queues - As of linux 2.6.x task queues have been replaced by a
+ * similar construct called work queues. The big difference on the linux
+ * side is that functions called from work queues run in process context
+ * and not interrupt context.
+ *
+ * One nice feature of Solaris which does not exist in linux work
+ * queues in the notion of a dynamic work queue. Rather than implementing
+ * this in the shim layer I'm hardcoding one-thread per work queue.
+ *
+ * XXX - This may end up being a significant performance penalty which
+ * forces us to implement dynamic workqueues. Which is all very doable
+ * with a little effort.
+ */
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <sys/types.h>
+
+#undef DEBUG_TASKQ_UNIMPLEMENTED
+
+#define TASKQ_NAMELEN 31
+#define taskq_t workq_t
+
+typedef struct workqueue_struct workq_t;
+typedef unsigned long taskqid_t;
+typedef void (*task_func_t)(void *);
+
+/*
+ * Public flags for taskq_create(): bit range 0-15
+ */
+#define TASKQ_PREPOPULATE 0x0000 /* XXX - Workqueues fully populate */
+#define TASKQ_CPR_SAFE 0x0000 /* XXX - No analog */
+#define TASKQ_DYNAMIC 0x0000 /* XXX - Worksqueues not dynamic */
+
+/*
+ * Flags for taskq_dispatch. TQ_SLEEP/TQ_NOSLEEP should be same as
+ * KM_SLEEP/KM_NOSLEEP.
+ */
+#define TQ_SLEEP 0x00 /* XXX - Workqueues don't support */
+#define TQ_NOSLEEP 0x00 /* these sorts of flags. They */
+#define TQ_NOQUEUE 0x00 /* always run in application */
+#define TQ_NOALLOC 0x00 /* context and can sleep. */
+
+
+#ifdef DEBUG_TASKQ_UNIMPLEMENTED
+static __inline__ void taskq_init(void) {
+#error "taskq_init() not implemented"
+}
+
+static __inline__ taskq_t *
+taskq_create_instance(const char *, int, int, pri_t, int, int, uint_t) {
+#error "taskq_create_instance() not implemented"
+}
+
+extern void nulltask(void *);
+extern void taskq_suspend(taskq_t *);
+extern int taskq_suspended(taskq_t *);
+extern void taskq_resume(taskq_t *);
+
+#endif /* DEBUG_TASKQ_UNIMPLEMENTED */
+
+extern taskqid_t __taskq_dispatch(taskq_t *, task_func_t, void *, uint_t);
+extern taskq_t *__taskq_create(const char *, int, pri_t, int, int, uint_t);
+
+#define taskq_create(name, thr, pri, min, max, flags) \
+ __taskq_create(name, thr, pri, min, max, flags)
+#define taskq_dispatch(tq, func, priv, flags) \
+ __taskq_dispatch(tq, func, priv, flags)
+#define taskq_destory(tq) destroy_workqueue(tq)
+#define taskq_wait(tq) flush_workqueue(tq)
+#define taskq_member(tq, kthr) 1 /* XXX -Just be true */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SPL_TASKQ_H */
--- /dev/null
+#ifndef _SPL_THREAD_H
+#define _SPL_THREAD_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <sys/types.h>
+#include <sys/generic.h>
+
+/*
+ * Thread interfaces
+ */
+#define TP_MAGIC 0x53535353
+
+#define TS_SLEEP TASK_INTERRUPTIBLE
+#define TS_RUN TASK_RUNNING
+#define TS_ZOMB EXIT_ZOMBIE
+#define TS_STOPPED TASK_STOPPED
+#if 0
+#define TS_FREE 0x00 /* No clean linux mapping */
+#define TS_ONPROC 0x04 /* No clean linux mapping */
+#define TS_WAIT 0x20 /* No clean linux mapping */
+#endif
+
+#define thread_create(stk, stksize, func, arg, len, pp, state, pri) \
+ __thread_create(stk, stksize, func, arg, len, pp, state, pri)
+#define thread_exit() __thread_exit()
+#define curthread get_current()
+
+/* We just need a valid type to pass around, it's unused */
+typedef struct proc_s {
+ int foo;
+} proc_t;
+
+extern kthread_t *__thread_create(caddr_t stk, size_t stksize,
+ void (*proc)(void *), void *args,
+ size_t len, proc_t *pp, int state,
+ pri_t pri);
+extern void __thread_exit(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SPL_THREAD_H */
+
--- /dev/null
+#ifndef _SPL_TIME_H
+#define _SPL_TIME_H
+
+/*
+ * Structure returned by gettimeofday(2) system call,
+ * and used in other calls.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <linux/module.h>
+#include <linux/time.h>
+#include <sys/types.h>
+
+extern unsigned long long monotonic_clock(void);
+typedef struct timespec timestruc_t; /* definition per SVr4 */
+typedef longlong_t hrtime_t;
+
+#define TIME32_MAX INT32_MAX
+#define TIME32_MIN INT32_MIN
+
+#define SEC 1
+#define MILLISEC 1000
+#define MICROSEC 1000000
+#define NANOSEC 1000000000
+
+#define hz \
+({ \
+ BUG_ON(HZ < 100 || HZ > MICROSEC); \
+ HZ; \
+})
+
+#define gethrestime(ts) getnstimeofday((ts))
+
+static __inline__ hrtime_t
+gethrtime(void) {
+ /* BUG_ON(cur_timer == timer_none); */
+
+ /* Solaris expects a long long here but monotonic_clock() returns an
+ * unsigned long long. Note that monotonic_clock() returns the number
+ * of nanoseconds passed since kernel initialization. Even for a signed
+ * long long this will not "go negative" for ~292 years.
+ */
+ return monotonic_clock();
+}
+
+static __inline__ time_t
+gethrestime_sec(void)
+{
+ timestruc_t now;
+
+ gethrestime(&now);
+ return (now.tv_sec);
+}
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SPL_TIME_H */
--- /dev/null
+#ifndef _SPL_TIMER_H
+#define _SPL_TIMER_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+
+#define lbolt ((clock_t)jiffies)
+#define lbolt64 ((int64_t)get_jiffies_64())
+
+#define delay(ticks) schedule_timeout((long timeout)(ticks))
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SPL_TIMER_H */
+
--- /dev/null
+#ifndef _SPL_TYPES_H
+#define _SPL_TYPES_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef enum { B_FALSE=0, B_TRUE=1 } boolean_t;
+typedef unsigned long uintptr_t;
+typedef unsigned long intptr_t;
+typedef unsigned long ulong_t;
+typedef unsigned int uint_t;
+typedef unsigned char uchar_t;
+typedef unsigned long long u_longlong_t;
+typedef unsigned long long u_offset_t;
+typedef unsigned long long rlim64_t;
+typedef long long longlong_t;
+typedef long long offset_t;
+typedef struct task_struct kthread_t;
+typedef struct vmem { } vmem_t;
+typedef short pri_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SPL_TYPES_H */
-INCLUDES = -I$(top_srcdir)/include
+DEFAULT_INCLUDES = -I. -I..
+AM_CFLAGS = -g -O2 -W -Wall -Wstrict-prototypes -Wshadow
+
noinst_LTLIBRARIES = libcommon.la
libcommon_la_SOURCES = list.c
+
+EXTRA_DIST = list.h
void **plast;
assert(sizeof(char) == 1);
- assert(size >= sizeof(void *));
+ assert(size >= (int)sizeof(void *));
assert(pfreelist != NULL);
assert(LIST_ALLOC > 0);
list_mutex_lock(&list_free_lock);
--- /dev/null
+/*****************************************************************************
+ * $Id: list.h 2899 2002-12-11 19:00:36Z dun $
+ *****************************************************************************
+ * Copyright (C) 2001-2002 The Regents of the University of California.
+ * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ * Written by Chris Dunlap <cdunlap@llnl.gov>.
+ *
+ * This file is from LSD-Tools, the LLNL Software Development Toolbox.
+ *
+ * LSD-Tools is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * LSD-Tools is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with LSD-Tools; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *****************************************************************************/
+
+
+#ifndef LSD_LIST_H
+#define LSD_LIST_H
+
+
+/***********
+ * Notes *
+ ***********/
+/*
+ * If NDEBUG is not defined, internal debug code will be enabled. This is
+ * intended for development use only and production code should define NDEBUG.
+ *
+ * If WITH_LSD_FATAL_ERROR_FUNC is defined, the linker will expect to
+ * find an external lsd_fatal_error(file,line,mesg) function. By default,
+ * lsd_fatal_error(file,line,mesg) is a macro definition that outputs an
+ * error message to stderr. This macro may be redefined to invoke another
+ * routine instead.
+ *
+ * If WITH_LSD_NOMEM_ERROR_FUNC is defined, the linker will expect to
+ * find an external lsd_nomem_error(file,line,mesg) function. By default,
+ * lsd_nomem_error(file,line,mesg) is a macro definition that returns NULL.
+ * This macro may be redefined to invoke another routine instead.
+ *
+ * If WITH_PTHREADS is defined, these routines will be thread-safe.
+ */
+
+
+/****************
+ * Data Types *
+ ****************/
+
+typedef struct list * List;
+/*
+ * List opaque data type.
+ */
+
+typedef struct listIterator * ListIterator;
+/*
+ * List Iterator opaque data type.
+ */
+
+typedef void (*ListDelF) (void *x);
+/*
+ * Function prototype to deallocate data stored in a list.
+ * This function is responsible for freeing all memory associated
+ * with an item, including all subordinate items (if applicable).
+ */
+
+typedef int (*ListCmpF) (void *x, void *y);
+/*
+ * Function prototype for comparing two items in a list.
+ * Returns less-than-zero if (x<y), zero if (x==y), and
+ * greather-than-zero if (x>y).
+ */
+
+typedef int (*ListFindF) (void *x, void *key);
+/*
+ * Function prototype for matching items in a list.
+ * Returns non-zero if (x==key); o/w returns zero.
+ */
+
+typedef int (*ListForF) (void *x, void *arg);
+/*
+ * Function prototype for operating on each item in a list.
+ * Returns less-than-zero on error.
+ */
+
+
+/*******************************
+ * General-Purpose Functions *
+ *******************************/
+
+List list_create (ListDelF f);
+/*
+ * Creates and returns a new empty list, or lsd_nomem_error() on failure.
+ * The deletion function [f] is used to deallocate memory used by items
+ * in the list; if this is NULL, memory associated with these items
+ * will not be freed when the list is destroyed.
+ * Note: Abandoning a list without calling list_destroy() will result
+ * in a memory leak.
+ */
+
+void list_destroy (List l);
+/*
+ * Destroys list [l], freeing memory used for list iterators and the
+ * list itself; if a deletion function was specified when the list
+ * was created, it will be called for each item in the list.
+ */
+
+int list_is_empty (List l);
+/*
+ * Returns non-zero if list [l] is empty; o/w returns zero.
+ */
+
+int list_count (List l);
+/*
+ * Returns the number of items in list [l].
+ */
+
+
+/***************************
+ * List Access Functions *
+ ***************************/
+
+void * list_append (List l, void *x);
+/*
+ * Inserts data [x] at the end of list [l].
+ * Returns the data's ptr, or lsd_nomem_error() if insertion failed.
+ */
+
+void * list_prepend (List l, void *x);
+/*
+ * Inserts data [x] at the beginning of list [l].
+ * Returns the data's ptr, or lsd_nomem_error() if insertion failed.
+ */
+
+void * list_find_first (List l, ListFindF f, void *key);
+/*
+ * Traverses list [l] using [f] to match each item with [key].
+ * Returns a ptr to the first item for which the function [f]
+ * returns non-zero, or NULL if no such item is found.
+ * Note: This function differs from list_find() in that it does not require
+ * a list iterator; it should only be used when all list items are known
+ * to be unique (according to the function [f]).
+ */
+
+int list_delete_all (List l, ListFindF f, void *key);
+/*
+ * Traverses list [l] using [f] to match each item with [key].
+ * Removes all items from the list for which the function [f] returns
+ * non-zero; if a deletion function was specified when the list was
+ * created, it will be called to deallocate each item being removed.
+ * Returns a count of the number of items removed from the list.
+ */
+
+int list_for_each (List l, ListForF f, void *arg);
+/*
+ * For each item in list [l], invokes the function [f] with [arg].
+ * Returns a count of the number of items on which [f] was invoked.
+ * If [f] returns <0 for a given item, the iteration is aborted and the
+ * function returns the negative of that item's position in the list.
+ */
+
+void list_sort (List l, ListCmpF f);
+/*
+ * Sorts list [l] into ascending order according to the function [f].
+ * Note: Sorting a list resets all iterators associated with the list.
+ * Note: The sort algorithm is stable.
+ */
+
+
+/****************************
+ * Stack Access Functions *
+ ****************************/
+
+void * list_push (List l, void *x);
+/*
+ * Pushes data [x] onto the top of stack [l].
+ * Returns the data's ptr, or lsd_nomem_error() if insertion failed.
+ */
+
+void * list_pop (List l);
+/*
+ * Pops the data item at the top of the stack [l].
+ * Returns the data's ptr, or NULL if the stack is empty.
+ */
+
+void * list_peek (List l);
+/*
+ * Peeks at the data item at the top of the stack (or head of the queue) [l].
+ * Returns the data's ptr, or NULL if the stack (or queue) is empty.
+ * Note: The item is not removed from the list.
+ */
+
+
+/****************************
+ * Queue Access Functions *
+ ****************************/
+
+void * list_enqueue (List l, void *x);
+/*
+ * Enqueues data [x] at the tail of queue [l].
+ * Returns the data's ptr, or lsd_nomem_error() if insertion failed.
+ */
+
+void * list_dequeue (List l);
+/*
+ * Dequeues the data item at the head of the queue [l].
+ * Returns the data's ptr, or NULL if the queue is empty.
+ */
+
+
+/*****************************
+ * List Iterator Functions *
+ *****************************/
+
+ListIterator list_iterator_create (List l);
+/*
+ * Creates and returns a list iterator for non-destructively traversing
+ * list [l], or lsd_nomem_error() on failure.
+ */
+
+void list_iterator_reset (ListIterator i);
+/*
+ * Resets the list iterator [i] to start traversal at the beginning
+ * of the list.
+ */
+
+void list_iterator_destroy (ListIterator i);
+/*
+ * Destroys the list iterator [i]; list iterators not explicitly destroyed
+ * in this manner will be destroyed when the list is deallocated via
+ * list_destroy().
+ */
+
+void * list_next (ListIterator i);
+/*
+ * Returns a ptr to the next item's data,
+ * or NULL once the end of the list is reached.
+ * Example: i=list_iterator_create(i); while ((x=list_next(i))) {...}
+ */
+
+void * list_insert (ListIterator i, void *x);
+/*
+ * Inserts data [x] immediately before the last item returned via list
+ * iterator [i]; once the list iterator reaches the end of the list,
+ * insertion is made at the list's end.
+ * Returns the data's ptr, or lsd_nomem_error() if insertion failed.
+ */
+
+void * list_find (ListIterator i, ListFindF f, void *key);
+/*
+ * Traverses the list from the point of the list iterator [i]
+ * using [f] to match each item with [key].
+ * Returns a ptr to the next item for which the function [f]
+ * returns non-zero, or NULL once the end of the list is reached.
+ * Example: i=list_iterator_reset(i); while ((x=list_find(i,f,k))) {...}
+ */
+
+void * list_remove (ListIterator i);
+/*
+ * Removes from the list the last item returned via list iterator [i]
+ * and returns the data's ptr.
+ * Note: The client is responsible for freeing the returned data.
+ */
+
+int list_delete (ListIterator i);
+/*
+ * Removes from the list the last item returned via list iterator [i];
+ * if a deletion function was specified when the list was created,
+ * it will be called to deallocate the item being removed.
+ * Returns a count of the number of items removed from the list
+ * (ie, '1' if the item was removed, and '0' otherwise).
+ */
+
+
+#endif /* !LSD_LIST_H */
-#include "spl-generic.h"
+#include <sys/generic.h>
#include "config.h"
/*
-#include "spl-kmem.h"
+#include <sys/kmem.h>
/*
* Memory allocation interfaces
-#include <spl-rwlock.h>
+#include <sys/rwlock.h>
int
rw_lock_held(krwlock_t *rwlp)
-#include <spl-taskq.h>
+#include <sys/taskq.h>
/*
* Task queue interface
-#include <spl-thread.h>
+#include <sys/thread.h>
/*
* Thread interfaces
#include <asm/uaccess.h>
#include <stdarg.h>
-#include "spl.h"
+#include <sys/spl.h>
#include "splat-ctl.h"
#define SPLAT_SUBSYSTEM_INIT(type) \