]> git.proxmox.com Git - mirror_spl.git/blob - module/spl/spl-generic.c
Prepare SPL repo to merge with ZFS repo
[mirror_spl.git] / module / spl / spl-generic.c
1 /*
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * Solaris Porting Layer (SPL) Generic Implementation.
25 */
26
27 #include <sys/sysmacros.h>
28 #include <sys/systeminfo.h>
29 #include <sys/vmsystm.h>
30 #include <sys/kobj.h>
31 #include <sys/kmem.h>
32 #include <sys/kmem_cache.h>
33 #include <sys/vmem.h>
34 #include <sys/mutex.h>
35 #include <sys/rwlock.h>
36 #include <sys/taskq.h>
37 #include <sys/tsd.h>
38 #include <sys/zmod.h>
39 #include <sys/debug.h>
40 #include <sys/proc.h>
41 #include <sys/kstat.h>
42 #include <sys/file.h>
43 #include <linux/ctype.h>
44 #include <sys/disp.h>
45 #include <sys/random.h>
46 #include <sys/strings.h>
47 #include <linux/kmod.h>
48
49 char spl_version[32] = "SPL v" SPL_META_VERSION "-" SPL_META_RELEASE;
50 EXPORT_SYMBOL(spl_version);
51
52 /* BEGIN CSTYLED */
53 unsigned long spl_hostid = 0;
54 EXPORT_SYMBOL(spl_hostid);
55 module_param(spl_hostid, ulong, 0644);
56 MODULE_PARM_DESC(spl_hostid, "The system hostid.");
57 /* END CSTYLED */
58
59 proc_t p0;
60 EXPORT_SYMBOL(p0);
61
62 /*
63 * Xorshift Pseudo Random Number Generator based on work by Sebastiano Vigna
64 *
65 * "Further scramblings of Marsaglia's xorshift generators"
66 * http://vigna.di.unimi.it/ftp/papers/xorshiftplus.pdf
67 *
68 * random_get_pseudo_bytes() is an API function on Illumos whose sole purpose
69 * is to provide bytes containing random numbers. It is mapped to /dev/urandom
70 * on Illumos, which uses a "FIPS 186-2 algorithm". No user of the SPL's
71 * random_get_pseudo_bytes() needs bytes that are of cryptographic quality, so
72 * we can implement it using a fast PRNG that we seed using Linux' actual
73 * equivalent to random_get_pseudo_bytes(). We do this by providing each CPU
74 * with an independent seed so that all calls to random_get_pseudo_bytes() are
75 * free of atomic instructions.
76 *
77 * A consequence of using a fast PRNG is that using random_get_pseudo_bytes()
78 * to generate words larger than 128 bits will paradoxically be limited to
79 * `2^128 - 1` possibilities. This is because we have a sequence of `2^128 - 1`
80 * 128-bit words and selecting the first will implicitly select the second. If
81 * a caller finds this behavior undesireable, random_get_bytes() should be used
82 * instead.
83 *
84 * XXX: Linux interrupt handlers that trigger within the critical section
85 * formed by `s[1] = xp[1];` and `xp[0] = s[0];` and call this function will
86 * see the same numbers. Nothing in the code currently calls this in an
87 * interrupt handler, so this is considered to be okay. If that becomes a
88 * problem, we could create a set of per-cpu variables for interrupt handlers
89 * and use them when in_interrupt() from linux/preempt_mask.h evaluates to
90 * true.
91 */
92 static DEFINE_PER_CPU(uint64_t[2], spl_pseudo_entropy);
93
94 /*
95 * spl_rand_next()/spl_rand_jump() are copied from the following CC-0 licensed
96 * file:
97 *
98 * http://xorshift.di.unimi.it/xorshift128plus.c
99 */
100
101 static inline uint64_t
102 spl_rand_next(uint64_t *s)
103 {
104 uint64_t s1 = s[0];
105 const uint64_t s0 = s[1];
106 s[0] = s0;
107 s1 ^= s1 << 23; // a
108 s[1] = s1 ^ s0 ^ (s1 >> 18) ^ (s0 >> 5); // b, c
109 return (s[1] + s0);
110 }
111
112 static inline void
113 spl_rand_jump(uint64_t *s)
114 {
115 static const uint64_t JUMP[] =
116 { 0x8a5cd789635d2dff, 0x121fd2155c472f96 };
117
118 uint64_t s0 = 0;
119 uint64_t s1 = 0;
120 int i, b;
121 for (i = 0; i < sizeof (JUMP) / sizeof (*JUMP); i++)
122 for (b = 0; b < 64; b++) {
123 if (JUMP[i] & 1ULL << b) {
124 s0 ^= s[0];
125 s1 ^= s[1];
126 }
127 (void) spl_rand_next(s);
128 }
129
130 s[0] = s0;
131 s[1] = s1;
132 }
133
134 int
135 random_get_pseudo_bytes(uint8_t *ptr, size_t len)
136 {
137 uint64_t *xp, s[2];
138
139 ASSERT(ptr);
140
141 xp = get_cpu_var(spl_pseudo_entropy);
142
143 s[0] = xp[0];
144 s[1] = xp[1];
145
146 while (len) {
147 union {
148 uint64_t ui64;
149 uint8_t byte[sizeof (uint64_t)];
150 }entropy;
151 int i = MIN(len, sizeof (uint64_t));
152
153 len -= i;
154 entropy.ui64 = spl_rand_next(s);
155
156 while (i--)
157 *ptr++ = entropy.byte[i];
158 }
159
160 xp[0] = s[0];
161 xp[1] = s[1];
162
163 put_cpu_var(spl_pseudo_entropy);
164
165 return (0);
166 }
167
168
169 EXPORT_SYMBOL(random_get_pseudo_bytes);
170
171 #if BITS_PER_LONG == 32
172 /*
173 * Support 64/64 => 64 division on a 32-bit platform. While the kernel
174 * provides a div64_u64() function for this we do not use it because the
175 * implementation is flawed. There are cases which return incorrect
176 * results as late as linux-2.6.35. Until this is fixed upstream the
177 * spl must provide its own implementation.
178 *
179 * This implementation is a slightly modified version of the algorithm
180 * proposed by the book 'Hacker's Delight'. The original source can be
181 * found here and is available for use without restriction.
182 *
183 * http://www.hackersdelight.org/HDcode/newCode/divDouble.c
184 */
185
186 /*
187 * Calculate number of leading of zeros for a 64-bit value.
188 */
189 static int
190 nlz64(uint64_t x)
191 {
192 register int n = 0;
193
194 if (x == 0)
195 return (64);
196
197 if (x <= 0x00000000FFFFFFFFULL) { n = n + 32; x = x << 32; }
198 if (x <= 0x0000FFFFFFFFFFFFULL) { n = n + 16; x = x << 16; }
199 if (x <= 0x00FFFFFFFFFFFFFFULL) { n = n + 8; x = x << 8; }
200 if (x <= 0x0FFFFFFFFFFFFFFFULL) { n = n + 4; x = x << 4; }
201 if (x <= 0x3FFFFFFFFFFFFFFFULL) { n = n + 2; x = x << 2; }
202 if (x <= 0x7FFFFFFFFFFFFFFFULL) { n = n + 1; }
203
204 return (n);
205 }
206
207 /*
208 * Newer kernels have a div_u64() function but we define our own
209 * to simplify portibility between kernel versions.
210 */
211 static inline uint64_t
212 __div_u64(uint64_t u, uint32_t v)
213 {
214 (void) do_div(u, v);
215 return (u);
216 }
217
218 /*
219 * Implementation of 64-bit unsigned division for 32-bit machines.
220 *
221 * First the procedure takes care of the case in which the divisor is a
222 * 32-bit quantity. There are two subcases: (1) If the left half of the
223 * dividend is less than the divisor, one execution of do_div() is all that
224 * is required (overflow is not possible). (2) Otherwise it does two
225 * divisions, using the grade school method.
226 */
227 uint64_t
228 __udivdi3(uint64_t u, uint64_t v)
229 {
230 uint64_t u0, u1, v1, q0, q1, k;
231 int n;
232
233 if (v >> 32 == 0) { // If v < 2**32:
234 if (u >> 32 < v) { // If u/v cannot overflow,
235 return (__div_u64(u, v)); // just do one division.
236 } else { // If u/v would overflow:
237 u1 = u >> 32; // Break u into two halves.
238 u0 = u & 0xFFFFFFFF;
239 q1 = __div_u64(u1, v); // First quotient digit.
240 k = u1 - q1 * v; // First remainder, < v.
241 u0 += (k << 32);
242 q0 = __div_u64(u0, v); // Seconds quotient digit.
243 return ((q1 << 32) + q0);
244 }
245 } else { // If v >= 2**32:
246 n = nlz64(v); // 0 <= n <= 31.
247 v1 = (v << n) >> 32; // Normalize divisor, MSB is 1.
248 u1 = u >> 1; // To ensure no overflow.
249 q1 = __div_u64(u1, v1); // Get quotient from
250 q0 = (q1 << n) >> 31; // Undo normalization and
251 // division of u by 2.
252 if (q0 != 0) // Make q0 correct or
253 q0 = q0 - 1; // too small by 1.
254 if ((u - q0 * v) >= v)
255 q0 = q0 + 1; // Now q0 is correct.
256
257 return (q0);
258 }
259 }
260 EXPORT_SYMBOL(__udivdi3);
261
262 /* BEGIN CSTYLED */
263 #ifndef abs64
264 #define abs64(x) ({ uint64_t t = (x) >> 63; ((x) ^ t) - t; })
265 #endif
266 /* END CSTYLED */
267
268 /*
269 * Implementation of 64-bit signed division for 32-bit machines.
270 */
271 int64_t
272 __divdi3(int64_t u, int64_t v)
273 {
274 int64_t q, t;
275 q = __udivdi3(abs64(u), abs64(v));
276 t = (u ^ v) >> 63; // If u, v have different
277 return ((q ^ t) - t); // signs, negate q.
278 }
279 EXPORT_SYMBOL(__divdi3);
280
281 /*
282 * Implementation of 64-bit unsigned modulo for 32-bit machines.
283 */
284 uint64_t
285 __umoddi3(uint64_t dividend, uint64_t divisor)
286 {
287 return (dividend - (divisor * __udivdi3(dividend, divisor)));
288 }
289 EXPORT_SYMBOL(__umoddi3);
290
291 /*
292 * Implementation of 64-bit unsigned division/modulo for 32-bit machines.
293 */
294 uint64_t
295 __udivmoddi4(uint64_t n, uint64_t d, uint64_t *r)
296 {
297 uint64_t q = __udivdi3(n, d);
298 if (r)
299 *r = n - d * q;
300 return (q);
301 }
302 EXPORT_SYMBOL(__udivmoddi4);
303
304 /*
305 * Implementation of 64-bit signed division/modulo for 32-bit machines.
306 */
307 int64_t
308 __divmoddi4(int64_t n, int64_t d, int64_t *r)
309 {
310 int64_t q, rr;
311 boolean_t nn = B_FALSE;
312 boolean_t nd = B_FALSE;
313 if (n < 0) {
314 nn = B_TRUE;
315 n = -n;
316 }
317 if (d < 0) {
318 nd = B_TRUE;
319 d = -d;
320 }
321
322 q = __udivmoddi4(n, d, (uint64_t *)&rr);
323
324 if (nn != nd)
325 q = -q;
326 if (nn)
327 rr = -rr;
328 if (r)
329 *r = rr;
330 return (q);
331 }
332 EXPORT_SYMBOL(__divmoddi4);
333
334 #if defined(__arm) || defined(__arm__)
335 /*
336 * Implementation of 64-bit (un)signed division for 32-bit arm machines.
337 *
338 * Run-time ABI for the ARM Architecture (page 20). A pair of (unsigned)
339 * long longs is returned in {{r0, r1}, {r2,r3}}, the quotient in {r0, r1},
340 * and the remainder in {r2, r3}. The return type is specifically left
341 * set to 'void' to ensure the compiler does not overwrite these registers
342 * during the return. All results are in registers as per ABI
343 */
344 void
345 __aeabi_uldivmod(uint64_t u, uint64_t v)
346 {
347 uint64_t res;
348 uint64_t mod;
349
350 res = __udivdi3(u, v);
351 mod = __umoddi3(u, v);
352 {
353 register uint32_t r0 asm("r0") = (res & 0xFFFFFFFF);
354 register uint32_t r1 asm("r1") = (res >> 32);
355 register uint32_t r2 asm("r2") = (mod & 0xFFFFFFFF);
356 register uint32_t r3 asm("r3") = (mod >> 32);
357
358 /* BEGIN CSTYLED */
359 asm volatile(""
360 : "+r"(r0), "+r"(r1), "+r"(r2),"+r"(r3) /* output */
361 : "r"(r0), "r"(r1), "r"(r2), "r"(r3)); /* input */
362 /* END CSTYLED */
363
364 return; /* r0; */
365 }
366 }
367 EXPORT_SYMBOL(__aeabi_uldivmod);
368
369 void
370 __aeabi_ldivmod(int64_t u, int64_t v)
371 {
372 int64_t res;
373 uint64_t mod;
374
375 res = __divdi3(u, v);
376 mod = __umoddi3(u, v);
377 {
378 register uint32_t r0 asm("r0") = (res & 0xFFFFFFFF);
379 register uint32_t r1 asm("r1") = (res >> 32);
380 register uint32_t r2 asm("r2") = (mod & 0xFFFFFFFF);
381 register uint32_t r3 asm("r3") = (mod >> 32);
382
383 /* BEGIN CSTYLED */
384 asm volatile(""
385 : "+r"(r0), "+r"(r1), "+r"(r2),"+r"(r3) /* output */
386 : "r"(r0), "r"(r1), "r"(r2), "r"(r3)); /* input */
387 /* END CSTYLED */
388
389 return; /* r0; */
390 }
391 }
392 EXPORT_SYMBOL(__aeabi_ldivmod);
393 #endif /* __arm || __arm__ */
394 #endif /* BITS_PER_LONG */
395
396 /*
397 * NOTE: The strtoxx behavior is solely based on my reading of the Solaris
398 * ddi_strtol(9F) man page. I have not verified the behavior of these
399 * functions against their Solaris counterparts. It is possible that I
400 * may have misinterpreted the man page or the man page is incorrect.
401 */
402 int ddi_strtoul(const char *, char **, int, unsigned long *);
403 int ddi_strtol(const char *, char **, int, long *);
404 int ddi_strtoull(const char *, char **, int, unsigned long long *);
405 int ddi_strtoll(const char *, char **, int, long long *);
406
407 #define define_ddi_strtoux(type, valtype) \
408 int ddi_strtou##type(const char *str, char **endptr, \
409 int base, valtype *result) \
410 { \
411 valtype last_value, value = 0; \
412 char *ptr = (char *)str; \
413 int flag = 1, digit; \
414 \
415 if (strlen(ptr) == 0) \
416 return (EINVAL); \
417 \
418 /* Auto-detect base based on prefix */ \
419 if (!base) { \
420 if (str[0] == '0') { \
421 if (tolower(str[1]) == 'x' && isxdigit(str[2])) { \
422 base = 16; /* hex */ \
423 ptr += 2; \
424 } else if (str[1] >= '0' && str[1] < 8) { \
425 base = 8; /* octal */ \
426 ptr += 1; \
427 } else { \
428 return (EINVAL); \
429 } \
430 } else { \
431 base = 10; /* decimal */ \
432 } \
433 } \
434 \
435 while (1) { \
436 if (isdigit(*ptr)) \
437 digit = *ptr - '0'; \
438 else if (isalpha(*ptr)) \
439 digit = tolower(*ptr) - 'a' + 10; \
440 else \
441 break; \
442 \
443 if (digit >= base) \
444 break; \
445 \
446 last_value = value; \
447 value = value * base + digit; \
448 if (last_value > value) /* Overflow */ \
449 return (ERANGE); \
450 \
451 flag = 1; \
452 ptr++; \
453 } \
454 \
455 if (flag) \
456 *result = value; \
457 \
458 if (endptr) \
459 *endptr = (char *)(flag ? ptr : str); \
460 \
461 return (0); \
462 } \
463
464 #define define_ddi_strtox(type, valtype) \
465 int ddi_strto##type(const char *str, char **endptr, \
466 int base, valtype *result) \
467 { \
468 int rc; \
469 \
470 if (*str == '-') { \
471 rc = ddi_strtou##type(str + 1, endptr, base, result); \
472 if (!rc) { \
473 if (*endptr == str + 1) \
474 *endptr = (char *)str; \
475 else \
476 *result = -*result; \
477 } \
478 } else { \
479 rc = ddi_strtou##type(str, endptr, base, result); \
480 } \
481 \
482 return (rc); \
483 }
484
485 define_ddi_strtoux(l, unsigned long)
486 define_ddi_strtox(l, long)
487 define_ddi_strtoux(ll, unsigned long long)
488 define_ddi_strtox(ll, long long)
489
490 EXPORT_SYMBOL(ddi_strtoul);
491 EXPORT_SYMBOL(ddi_strtol);
492 EXPORT_SYMBOL(ddi_strtoll);
493 EXPORT_SYMBOL(ddi_strtoull);
494
495 int
496 ddi_copyin(const void *from, void *to, size_t len, int flags)
497 {
498 /* Fake ioctl() issued by kernel, 'from' is a kernel address */
499 if (flags & FKIOCTL) {
500 memcpy(to, from, len);
501 return (0);
502 }
503
504 return (copyin(from, to, len));
505 }
506 EXPORT_SYMBOL(ddi_copyin);
507
508 int
509 ddi_copyout(const void *from, void *to, size_t len, int flags)
510 {
511 /* Fake ioctl() issued by kernel, 'from' is a kernel address */
512 if (flags & FKIOCTL) {
513 memcpy(to, from, len);
514 return (0);
515 }
516
517 return (copyout(from, to, len));
518 }
519 EXPORT_SYMBOL(ddi_copyout);
520
521 /*
522 * Read the unique system identifier from the /etc/hostid file.
523 *
524 * The behavior of /usr/bin/hostid on Linux systems with the
525 * regular eglibc and coreutils is:
526 *
527 * 1. Generate the value if the /etc/hostid file does not exist
528 * or if the /etc/hostid file is less than four bytes in size.
529 *
530 * 2. If the /etc/hostid file is at least 4 bytes, then return
531 * the first four bytes [0..3] in native endian order.
532 *
533 * 3. Always ignore bytes [4..] if they exist in the file.
534 *
535 * Only the first four bytes are significant, even on systems that
536 * have a 64-bit word size.
537 *
538 * See:
539 *
540 * eglibc: sysdeps/unix/sysv/linux/gethostid.c
541 * coreutils: src/hostid.c
542 *
543 * Notes:
544 *
545 * The /etc/hostid file on Solaris is a text file that often reads:
546 *
547 * # DO NOT EDIT
548 * "0123456789"
549 *
550 * Directly copying this file to Linux results in a constant
551 * hostid of 4f442023 because the default comment constitutes
552 * the first four bytes of the file.
553 *
554 */
555
556 char *spl_hostid_path = HW_HOSTID_PATH;
557 module_param(spl_hostid_path, charp, 0444);
558 MODULE_PARM_DESC(spl_hostid_path, "The system hostid file (/etc/hostid)");
559
560 static int
561 hostid_read(uint32_t *hostid)
562 {
563 uint64_t size;
564 struct _buf *file;
565 uint32_t value = 0;
566 int error;
567
568 file = kobj_open_file(spl_hostid_path);
569 if (file == (struct _buf *)-1)
570 return (ENOENT);
571
572 error = kobj_get_filesize(file, &size);
573 if (error) {
574 kobj_close_file(file);
575 return (error);
576 }
577
578 if (size < sizeof (HW_HOSTID_MASK)) {
579 kobj_close_file(file);
580 return (EINVAL);
581 }
582
583 /*
584 * Read directly into the variable like eglibc does.
585 * Short reads are okay; native behavior is preserved.
586 */
587 error = kobj_read_file(file, (char *)&value, sizeof (value), 0);
588 if (error < 0) {
589 kobj_close_file(file);
590 return (EIO);
591 }
592
593 /* Mask down to 32 bits like coreutils does. */
594 *hostid = (value & HW_HOSTID_MASK);
595 kobj_close_file(file);
596
597 return (0);
598 }
599
600 /*
601 * Return the system hostid. Preferentially use the spl_hostid module option
602 * when set, otherwise use the value in the /etc/hostid file.
603 */
604 uint32_t
605 zone_get_hostid(void *zone)
606 {
607 uint32_t hostid;
608
609 ASSERT3P(zone, ==, NULL);
610
611 if (spl_hostid != 0)
612 return ((uint32_t)(spl_hostid & HW_HOSTID_MASK));
613
614 if (hostid_read(&hostid) == 0)
615 return (hostid);
616
617 return (0);
618 }
619 EXPORT_SYMBOL(zone_get_hostid);
620
621 static int
622 spl_kvmem_init(void)
623 {
624 int rc = 0;
625
626 rc = spl_kmem_init();
627 if (rc)
628 return (rc);
629
630 rc = spl_vmem_init();
631 if (rc) {
632 spl_kmem_fini();
633 return (rc);
634 }
635
636 return (rc);
637 }
638
639 /*
640 * We initialize the random number generator with 128 bits of entropy from the
641 * system random number generator. In the improbable case that we have a zero
642 * seed, we fallback to the system jiffies, unless it is also zero, in which
643 * situation we use a preprogrammed seed. We step forward by 2^64 iterations to
644 * initialize each of the per-cpu seeds so that the sequences generated on each
645 * CPU are guaranteed to never overlap in practice.
646 */
647 static void __init
648 spl_random_init(void)
649 {
650 uint64_t s[2];
651 int i;
652
653 get_random_bytes(s, sizeof (s));
654
655 if (s[0] == 0 && s[1] == 0) {
656 if (jiffies != 0) {
657 s[0] = jiffies;
658 s[1] = ~0 - jiffies;
659 } else {
660 (void) memcpy(s, "improbable seed", sizeof (s));
661 }
662 printk("SPL: get_random_bytes() returned 0 "
663 "when generating random seed. Setting initial seed to "
664 "0x%016llx%016llx.", cpu_to_be64(s[0]), cpu_to_be64(s[1]));
665 }
666
667 for_each_possible_cpu(i) {
668 uint64_t *wordp = per_cpu(spl_pseudo_entropy, i);
669
670 spl_rand_jump(s);
671
672 wordp[0] = s[0];
673 wordp[1] = s[1];
674 }
675 }
676
677 static void
678 spl_kvmem_fini(void)
679 {
680 spl_vmem_fini();
681 spl_kmem_fini();
682 }
683
684 static int __init
685 spl_init(void)
686 {
687 int rc = 0;
688
689 bzero(&p0, sizeof (proc_t));
690 spl_random_init();
691
692 if ((rc = spl_kvmem_init()))
693 goto out1;
694
695 if ((rc = spl_mutex_init()))
696 goto out2;
697
698 if ((rc = spl_rw_init()))
699 goto out3;
700
701 if ((rc = spl_tsd_init()))
702 goto out4;
703
704 if ((rc = spl_taskq_init()))
705 goto out5;
706
707 if ((rc = spl_kmem_cache_init()))
708 goto out6;
709
710 if ((rc = spl_vn_init()))
711 goto out7;
712
713 if ((rc = spl_proc_init()))
714 goto out8;
715
716 if ((rc = spl_kstat_init()))
717 goto out9;
718
719 if ((rc = spl_zlib_init()))
720 goto out10;
721
722 printk(KERN_NOTICE "SPL: Loaded module v%s-%s%s\n", SPL_META_VERSION,
723 SPL_META_RELEASE, SPL_DEBUG_STR);
724 return (rc);
725
726 out10:
727 spl_kstat_fini();
728 out9:
729 spl_proc_fini();
730 out8:
731 spl_vn_fini();
732 out7:
733 spl_kmem_cache_fini();
734 out6:
735 spl_taskq_fini();
736 out5:
737 spl_tsd_fini();
738 out4:
739 spl_rw_fini();
740 out3:
741 spl_mutex_fini();
742 out2:
743 spl_kvmem_fini();
744 out1:
745 printk(KERN_NOTICE "SPL: Failed to Load Solaris Porting Layer "
746 "v%s-%s%s, rc = %d\n", SPL_META_VERSION, SPL_META_RELEASE,
747 SPL_DEBUG_STR, rc);
748
749 return (rc);
750 }
751
752 static void __exit
753 spl_fini(void)
754 {
755 printk(KERN_NOTICE "SPL: Unloaded module v%s-%s%s\n",
756 SPL_META_VERSION, SPL_META_RELEASE, SPL_DEBUG_STR);
757 spl_zlib_fini();
758 spl_kstat_fini();
759 spl_proc_fini();
760 spl_vn_fini();
761 spl_kmem_cache_fini();
762 spl_taskq_fini();
763 spl_tsd_fini();
764 spl_rw_fini();
765 spl_mutex_fini();
766 spl_kvmem_fini();
767 }
768
769 module_init(spl_init);
770 module_exit(spl_fini);
771
772 MODULE_DESCRIPTION("Solaris Porting Layer");
773 MODULE_AUTHOR(SPL_META_AUTHOR);
774 MODULE_LICENSE(SPL_META_LICENSE);
775 MODULE_VERSION(SPL_META_VERSION "-" SPL_META_RELEASE);