]> git.proxmox.com Git - mirror_spl.git/blob - include/sys/kmem.h
Linux 4.9 compat: group_info changes
[mirror_spl.git] / include / sys / kmem.h
1 /*
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 */
24
25 #ifndef _SPL_KMEM_H
26 #define _SPL_KMEM_H
27
28 #include <sys/debug.h>
29 #include <linux/slab.h>
30 #include <linux/sched.h>
31
32 extern int kmem_debugging(void);
33 extern char *kmem_vasprintf(const char *fmt, va_list ap);
34 extern char *kmem_asprintf(const char *fmt, ...);
35 extern char *strdup(const char *str);
36 extern void strfree(char *str);
37
38 /*
39 * Memory allocation interfaces
40 */
41 #define KM_SLEEP 0x0000 /* can block for memory; success guaranteed */
42 #define KM_NOSLEEP 0x0001 /* cannot block for memory; may fail */
43 #define KM_PUSHPAGE 0x0004 /* can block for memory; may use reserve */
44 #define KM_ZERO 0x1000 /* zero the allocation */
45 #define KM_VMEM 0x2000 /* caller is vmem_* wrapper */
46
47 #define KM_PUBLIC_MASK (KM_SLEEP | KM_NOSLEEP | KM_PUSHPAGE)
48
49 /*
50 * Convert a KM_* flags mask to its Linux GFP_* counterpart. The conversion
51 * function is context aware which means that KM_SLEEP allocations can be
52 * safely used in syncing contexts which have set PF_FSTRANS.
53 */
54 static inline gfp_t
55 kmem_flags_convert(int flags)
56 {
57 gfp_t lflags = __GFP_NOWARN | __GFP_COMP;
58
59 if (flags & KM_NOSLEEP) {
60 lflags |= GFP_ATOMIC | __GFP_NORETRY;
61 } else {
62 lflags |= GFP_KERNEL;
63 if ((current->flags & PF_FSTRANS))
64 lflags &= ~(__GFP_IO|__GFP_FS);
65 }
66
67 if (flags & KM_PUSHPAGE)
68 lflags |= __GFP_HIGH;
69
70 if (flags & KM_ZERO)
71 lflags |= __GFP_ZERO;
72
73 return (lflags);
74 }
75
76 typedef struct {
77 struct task_struct *fstrans_thread;
78 unsigned int saved_flags;
79 } fstrans_cookie_t;
80
81 #ifdef PF_MEMALLOC_NOIO
82 #define SPL_FSTRANS (PF_FSTRANS|PF_MEMALLOC_NOIO)
83 #else
84 #define SPL_FSTRANS (PF_FSTRANS)
85 #endif
86
87 static inline fstrans_cookie_t
88 spl_fstrans_mark(void)
89 {
90 fstrans_cookie_t cookie;
91
92 cookie.fstrans_thread = current;
93 cookie.saved_flags = current->flags & SPL_FSTRANS;
94 current->flags |= SPL_FSTRANS;
95
96 return (cookie);
97 }
98
99 static inline void
100 spl_fstrans_unmark(fstrans_cookie_t cookie)
101 {
102 ASSERT3P(cookie.fstrans_thread, ==, current);
103 ASSERT((current->flags & SPL_FSTRANS) == SPL_FSTRANS);
104
105 current->flags &= ~SPL_FSTRANS;
106 current->flags |= cookie.saved_flags;
107 }
108
109 static inline int
110 spl_fstrans_check(void)
111 {
112 return (current->flags & PF_FSTRANS);
113 }
114
115 #ifdef HAVE_ATOMIC64_T
116 #define kmem_alloc_used_add(size) atomic64_add(size, &kmem_alloc_used)
117 #define kmem_alloc_used_sub(size) atomic64_sub(size, &kmem_alloc_used)
118 #define kmem_alloc_used_read() atomic64_read(&kmem_alloc_used)
119 #define kmem_alloc_used_set(size) atomic64_set(&kmem_alloc_used, size)
120 extern atomic64_t kmem_alloc_used;
121 extern unsigned long long kmem_alloc_max;
122 #else /* HAVE_ATOMIC64_T */
123 #define kmem_alloc_used_add(size) atomic_add(size, &kmem_alloc_used)
124 #define kmem_alloc_used_sub(size) atomic_sub(size, &kmem_alloc_used)
125 #define kmem_alloc_used_read() atomic_read(&kmem_alloc_used)
126 #define kmem_alloc_used_set(size) atomic_set(&kmem_alloc_used, size)
127 extern atomic_t kmem_alloc_used;
128 extern unsigned long long kmem_alloc_max;
129 #endif /* HAVE_ATOMIC64_T */
130
131 extern unsigned int spl_kmem_alloc_warn;
132 extern unsigned int spl_kmem_alloc_max;
133
134 #define kmem_alloc(sz, fl) spl_kmem_alloc((sz), (fl), __func__, __LINE__)
135 #define kmem_zalloc(sz, fl) spl_kmem_zalloc((sz), (fl), __func__, __LINE__)
136 #define kmem_free(ptr, sz) spl_kmem_free((ptr), (sz))
137
138 extern void *spl_kmem_alloc(size_t sz, int fl, const char *func, int line);
139 extern void *spl_kmem_zalloc(size_t sz, int fl, const char *func, int line);
140 extern void spl_kmem_free(const void *ptr, size_t sz);
141
142 /*
143 * The following functions are only available for internal use.
144 */
145 extern void *spl_kmem_alloc_impl(size_t size, int flags, int node);
146 extern void *spl_kmem_alloc_debug(size_t size, int flags, int node);
147 extern void *spl_kmem_alloc_track(size_t size, int flags,
148 const char *func, int line, int node);
149 extern void spl_kmem_free_impl(const void *buf, size_t size);
150 extern void spl_kmem_free_debug(const void *buf, size_t size);
151 extern void spl_kmem_free_track(const void *buf, size_t size);
152
153 extern int spl_kmem_init(void);
154 extern void spl_kmem_fini(void);
155
156 #endif /* _SPL_KMEM_H */