]> git.proxmox.com Git - mirror_spl.git/blob - include/sys/kmem.h
8d5e729373fa265a14f393280df89d2aecbed9b3
[mirror_spl.git] / include / sys / kmem.h
1 /*
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 */
24
25 #ifndef _SPL_KMEM_H
26 #define _SPL_KMEM_H
27
28 #include <sys/debug.h>
29 #include <linux/slab.h>
30 #include <linux/sched.h>
31
32 extern int kmem_debugging(void);
33 extern char *kmem_vasprintf(const char *fmt, va_list ap);
34 extern char *kmem_asprintf(const char *fmt, ...);
35 extern char *strdup(const char *str);
36 extern void strfree(char *str);
37
38 /*
39 * Memory allocation interfaces
40 */
41 #define KM_SLEEP 0x0000 /* can block for memory; success guaranteed */
42 #define KM_NOSLEEP 0x0001 /* cannot block for memory; may fail */
43 #define KM_PUSHPAGE 0x0004 /* can block for memory; may use reserve */
44 #define KM_ZERO 0x1000 /* zero the allocation */
45 #define KM_VMEM 0x2000 /* caller is vmem_* wrapper */
46
47 #define KM_PUBLIC_MASK (KM_SLEEP | KM_NOSLEEP | KM_PUSHPAGE)
48
49 /*
50 * Convert a KM_* flags mask to its Linux GFP_* counterpart. The conversion
51 * function is context aware which means that KM_SLEEP allocations can be
52 * safely used in syncing contexts which have set PF_FSTRANS.
53 */
54 static inline gfp_t
55 kmem_flags_convert(int flags)
56 {
57 gfp_t lflags = __GFP_NOWARN | __GFP_COMP;
58
59 if (flags & KM_NOSLEEP) {
60 lflags |= GFP_ATOMIC | __GFP_NORETRY;
61 } else {
62 lflags |= GFP_KERNEL;
63 if ((current->flags & PF_FSTRANS))
64 lflags &= ~(__GFP_IO|__GFP_FS);
65 }
66
67 if (flags & KM_PUSHPAGE)
68 lflags |= __GFP_HIGH;
69
70 if (flags & KM_ZERO)
71 lflags |= __GFP_ZERO;
72
73 return (lflags);
74 }
75
76 typedef struct {
77 struct task_struct *fstrans_thread;
78 unsigned int saved_flags;
79 } fstrans_cookie_t;
80
81 static inline fstrans_cookie_t
82 spl_fstrans_mark(void)
83 {
84 fstrans_cookie_t cookie;
85
86 cookie.fstrans_thread = current;
87 cookie.saved_flags = current->flags & PF_FSTRANS;
88 current->flags |= PF_FSTRANS;
89
90 return (cookie);
91 }
92
93 static inline void
94 spl_fstrans_unmark(fstrans_cookie_t cookie)
95 {
96 ASSERT3P(cookie.fstrans_thread, ==, current);
97 ASSERT(current->flags & PF_FSTRANS);
98
99 current->flags &= ~(PF_FSTRANS);
100 current->flags |= cookie.saved_flags;
101 }
102
103 static inline int
104 spl_fstrans_check(void)
105 {
106 return (current->flags & PF_FSTRANS);
107 }
108
109 #ifdef HAVE_ATOMIC64_T
110 #define kmem_alloc_used_add(size) atomic64_add(size, &kmem_alloc_used)
111 #define kmem_alloc_used_sub(size) atomic64_sub(size, &kmem_alloc_used)
112 #define kmem_alloc_used_read() atomic64_read(&kmem_alloc_used)
113 #define kmem_alloc_used_set(size) atomic64_set(&kmem_alloc_used, size)
114 extern atomic64_t kmem_alloc_used;
115 extern unsigned long long kmem_alloc_max;
116 #else /* HAVE_ATOMIC64_T */
117 #define kmem_alloc_used_add(size) atomic_add(size, &kmem_alloc_used)
118 #define kmem_alloc_used_sub(size) atomic_sub(size, &kmem_alloc_used)
119 #define kmem_alloc_used_read() atomic_read(&kmem_alloc_used)
120 #define kmem_alloc_used_set(size) atomic_set(&kmem_alloc_used, size)
121 extern atomic_t kmem_alloc_used;
122 extern unsigned long long kmem_alloc_max;
123 #endif /* HAVE_ATOMIC64_T */
124
125 extern unsigned int spl_kmem_alloc_warn;
126 extern unsigned int spl_kmem_alloc_max;
127
128 #define kmem_alloc(sz, fl) spl_kmem_alloc((sz), (fl), __func__, __LINE__)
129 #define kmem_zalloc(sz, fl) spl_kmem_zalloc((sz), (fl), __func__, __LINE__)
130 #define kmem_free(ptr, sz) spl_kmem_free((ptr), (sz))
131
132 extern void *spl_kmem_alloc(size_t sz, int fl, const char *func, int line);
133 extern void *spl_kmem_zalloc(size_t sz, int fl, const char *func, int line);
134 extern void spl_kmem_free(const void *ptr, size_t sz);
135
136 /*
137 * The following functions are only available for internal use.
138 */
139 extern void *spl_kmem_alloc_impl(size_t size, int flags, int node);
140 extern void *spl_kmem_alloc_debug(size_t size, int flags, int node);
141 extern void *spl_kmem_alloc_track(size_t size, int flags,
142 const char *func, int line, int node);
143 extern void spl_kmem_free_impl(const void *buf, size_t size);
144 extern void spl_kmem_free_debug(const void *buf, size_t size);
145 extern void spl_kmem_free_track(const void *buf, size_t size);
146
147 extern int spl_kmem_init(void);
148 extern void spl_kmem_fini(void);
149
150 #endif /* _SPL_KMEM_H */