]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/xfs/linux-2.6/kmem.h
[XFS] Cleanup the use of zones/slabs, more consistent and allows flags to
[mirror_ubuntu-artful-kernel.git] / fs / xfs / linux-2.6 / kmem.h
CommitLineData
1da177e4 1/*
7b718769
NS
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
1da177e4 4 *
7b718769
NS
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
1da177e4
LT
7 * published by the Free Software Foundation.
8 *
7b718769
NS
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
1da177e4 13 *
7b718769
NS
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
1da177e4
LT
17 */
18#ifndef __XFS_SUPPORT_KMEM_H__
19#define __XFS_SUPPORT_KMEM_H__
20
21#include <linux/slab.h>
22#include <linux/sched.h>
23#include <linux/mm.h>
24
25/*
8758280f 26 * Process flags handling
1da177e4 27 */
1da177e4
LT
28
29#define PFLAGS_TEST_NOIO() (current->flags & PF_NOIO)
30#define PFLAGS_TEST_FSTRANS() (current->flags & PF_FSTRANS)
31
32#define PFLAGS_SET_NOIO() do { \
33 current->flags |= PF_NOIO; \
34} while (0)
35
36#define PFLAGS_CLEAR_NOIO() do { \
37 current->flags &= ~PF_NOIO; \
38} while (0)
39
40/* these could be nested, so we save state */
41#define PFLAGS_SET_FSTRANS(STATEP) do { \
42 *(STATEP) = current->flags; \
43 current->flags |= PF_FSTRANS; \
44} while (0)
45
46#define PFLAGS_CLEAR_FSTRANS(STATEP) do { \
47 *(STATEP) = current->flags; \
48 current->flags &= ~PF_FSTRANS; \
49} while (0)
50
51/* Restore the PF_FSTRANS state to what was saved in STATEP */
52#define PFLAGS_RESTORE_FSTRANS(STATEP) do { \
53 current->flags = ((current->flags & ~PF_FSTRANS) | \
54 (*(STATEP) & PF_FSTRANS)); \
55} while (0)
56
57#define PFLAGS_DUP(OSTATEP, NSTATEP) do { \
58 *(NSTATEP) = *(OSTATEP); \
59} while (0)
60
8758280f
NS
61/*
62 * General memory allocation interfaces
63 */
64
65#define KM_SLEEP 0x0001u
66#define KM_NOSLEEP 0x0002u
67#define KM_NOFS 0x0004u
68#define KM_MAYFAIL 0x0008u
69
70/*
71 * We use a special process flag to avoid recursive callbacks into
72 * the filesystem during transactions. We will also issue our own
73 * warnings, so we explicitly skip any generic ones (silly of us).
74 */
75static inline gfp_t
76kmem_flags_convert(unsigned int __nocast flags)
1da177e4 77{
8758280f 78 gfp_t lflags;
1da177e4 79
8758280f 80 BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL));
1da177e4
LT
81
82 if (flags & KM_NOSLEEP) {
8758280f 83 lflags = GFP_ATOMIC | __GFP_NOWARN;
1da177e4 84 } else {
8758280f 85 lflags = GFP_KERNEL | __GFP_NOWARN;
1da177e4
LT
86 if (PFLAGS_TEST_FSTRANS() || (flags & KM_NOFS))
87 lflags &= ~__GFP_FS;
88 }
8758280f 89 return lflags;
1da177e4
LT
90}
91
8758280f
NS
92extern void *kmem_alloc(size_t, unsigned int __nocast);
93extern void *kmem_realloc(void *, size_t, size_t, unsigned int __nocast);
94extern void *kmem_zalloc(size_t, unsigned int __nocast);
95extern void kmem_free(void *, size_t);
96
97/*
98 * Zone interfaces
99 */
100
101#define KM_ZONE_HWALIGN SLAB_HWCACHE_ALIGN
102#define KM_ZONE_RECLAIM SLAB_RECLAIM_ACCOUNT
103#define KM_ZONE_SPREAD 0
104
105#define kmem_zone kmem_cache
106#define kmem_zone_t struct kmem_cache
107
108static inline kmem_zone_t *
1da177e4
LT
109kmem_zone_init(int size, char *zone_name)
110{
111 return kmem_cache_create(zone_name, size, 0, 0, NULL, NULL);
112}
113
8758280f
NS
114static inline kmem_zone_t *
115kmem_zone_init_flags(int size, char *zone_name, unsigned long flags,
116 void (*construct)(void *, kmem_zone_t *, unsigned long))
117{
118 return kmem_cache_create(zone_name, size, 0, flags, construct, NULL);
119}
120
121static inline void
1da177e4
LT
122kmem_zone_free(kmem_zone_t *zone, void *ptr)
123{
124 kmem_cache_free(zone, ptr);
125}
126
8758280f 127static inline void
1da177e4
LT
128kmem_zone_destroy(kmem_zone_t *zone)
129{
130 if (zone && kmem_cache_destroy(zone))
131 BUG();
132}
133
7f248a81 134extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast);
8758280f 135extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast);
1da177e4 136
8758280f
NS
137/*
138 * Low memory cache shrinkers
139 */
1da177e4
LT
140
141typedef struct shrinker *kmem_shaker_t;
27496a8c 142typedef int (*kmem_shake_func_t)(int, gfp_t);
1da177e4 143
8758280f 144static inline kmem_shaker_t
1da177e4
LT
145kmem_shake_register(kmem_shake_func_t sfunc)
146{
147 return set_shrinker(DEFAULT_SEEKS, sfunc);
148}
149
8758280f 150static inline void
1da177e4
LT
151kmem_shake_deregister(kmem_shaker_t shrinker)
152{
153 remove_shrinker(shrinker);
154}
155
8758280f 156static inline int
27496a8c 157kmem_shake_allow(gfp_t gfp_mask)
1da177e4
LT
158{
159 return (gfp_mask & __GFP_WAIT);
160}
161
162#endif /* __XFS_SUPPORT_KMEM_H__ */