]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _LINUX_CPUSET_H |
2 | #define _LINUX_CPUSET_H | |
3 | /* | |
4 | * cpuset interface | |
5 | * | |
6 | * Copyright (C) 2003 BULL SA | |
825a46af | 7 | * Copyright (C) 2004-2006 Silicon Graphics, Inc. |
1da177e4 LT |
8 | * |
9 | */ | |
10 | ||
11 | #include <linux/sched.h> | |
12 | #include <linux/cpumask.h> | |
13 | #include <linux/nodemask.h> | |
a1bc5a4e | 14 | #include <linux/mm.h> |
1da177e4 LT |
15 | |
16 | #ifdef CONFIG_CPUSETS | |
17 | ||
202f72d5 PJ |
18 | extern int number_of_cpusets; /* How many cpusets are defined in system? */ |
19 | ||
1da177e4 LT |
20 | extern int cpuset_init(void); |
21 | extern void cpuset_init_smp(void); | |
7ddf96b0 | 22 | extern void cpuset_update_active_cpus(bool cpu_online); |
6af866af | 23 | extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); |
2baab4e9 | 24 | extern void cpuset_cpus_allowed_fallback(struct task_struct *p); |
909d75a3 | 25 | extern nodemask_t cpuset_mems_allowed(struct task_struct *p); |
9276b1bc | 26 | #define cpuset_current_mems_allowed (current->mems_allowed) |
1da177e4 | 27 | void cpuset_init_current_mems_allowed(void); |
19770b32 | 28 | int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); |
202f72d5 | 29 | |
a1bc5a4e DR |
30 | extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask); |
31 | extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask); | |
02a0e53d | 32 | |
a1bc5a4e | 33 | static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) |
02a0e53d PJ |
34 | { |
35 | return number_of_cpusets <= 1 || | |
a1bc5a4e | 36 | __cpuset_node_allowed_softwall(node, gfp_mask); |
02a0e53d PJ |
37 | } |
38 | ||
a1bc5a4e | 39 | static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) |
202f72d5 | 40 | { |
02a0e53d | 41 | return number_of_cpusets <= 1 || |
a1bc5a4e DR |
42 | __cpuset_node_allowed_hardwall(node, gfp_mask); |
43 | } | |
44 | ||
45 | static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) | |
46 | { | |
47 | return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask); | |
48 | } | |
49 | ||
50 | static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) | |
51 | { | |
52 | return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask); | |
202f72d5 PJ |
53 | } |
54 | ||
bbe373f2 DR |
55 | extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, |
56 | const struct task_struct *tsk2); | |
3e0d98b9 PJ |
57 | |
58 | #define cpuset_memory_pressure_bump() \ | |
59 | do { \ | |
60 | if (cpuset_memory_pressure_enabled) \ | |
61 | __cpuset_memory_pressure_bump(); \ | |
62 | } while (0) | |
63 | extern int cpuset_memory_pressure_enabled; | |
64 | extern void __cpuset_memory_pressure_bump(void); | |
65 | ||
df5f8314 EB |
66 | extern void cpuset_task_status_allowed(struct seq_file *m, |
67 | struct task_struct *task); | |
8d8b97ba | 68 | extern int proc_cpuset_show(struct seq_file *, void *); |
1da177e4 | 69 | |
825a46af | 70 | extern int cpuset_mem_spread_node(void); |
6adef3eb | 71 | extern int cpuset_slab_spread_node(void); |
825a46af PJ |
72 | |
73 | static inline int cpuset_do_page_mem_spread(void) | |
74 | { | |
75 | return current->flags & PF_SPREAD_PAGE; | |
76 | } | |
77 | ||
78 | static inline int cpuset_do_slab_mem_spread(void) | |
79 | { | |
80 | return current->flags & PF_SPREAD_SLAB; | |
81 | } | |
82 | ||
8793d854 PM |
83 | extern int current_cpuset_is_being_rebound(void); |
84 | ||
e761b772 MK |
85 | extern void rebuild_sched_domains(void); |
86 | ||
75aa1994 DR |
87 | extern void cpuset_print_task_mems_allowed(struct task_struct *p); |
88 | ||
c0ff7453 | 89 | /* |
d26914d1 MG |
90 | * read_mems_allowed_begin is required when making decisions involving |
91 | * mems_allowed such as during page allocation. mems_allowed can be updated in | |
92 | * parallel and depending on the new value an operation can fail potentially | |
93 | * causing process failure. A retry loop with read_mems_allowed_begin and | |
94 | * read_mems_allowed_retry prevents these artificial failures. | |
c0ff7453 | 95 | */ |
d26914d1 | 96 | static inline unsigned int read_mems_allowed_begin(void) |
c0ff7453 | 97 | { |
cc9a6c87 | 98 | return read_seqcount_begin(¤t->mems_allowed_seq); |
c0ff7453 MX |
99 | } |
100 | ||
cc9a6c87 | 101 | /* |
d26914d1 MG |
102 | * If this returns true, the operation that took place after |
103 | * read_mems_allowed_begin may have failed artificially due to a concurrent | |
104 | * update of mems_allowed. It is up to the caller to retry the operation if | |
cc9a6c87 MG |
105 | * appropriate. |
106 | */ | |
d26914d1 | 107 | static inline bool read_mems_allowed_retry(unsigned int seq) |
c0ff7453 | 108 | { |
d26914d1 | 109 | return read_seqcount_retry(¤t->mems_allowed_seq, seq); |
c0ff7453 MX |
110 | } |
111 | ||
58568d2a MX |
112 | static inline void set_mems_allowed(nodemask_t nodemask) |
113 | { | |
db751fe3 JS |
114 | unsigned long flags; |
115 | ||
c0ff7453 | 116 | task_lock(current); |
db751fe3 | 117 | local_irq_save(flags); |
cc9a6c87 | 118 | write_seqcount_begin(¤t->mems_allowed_seq); |
58568d2a | 119 | current->mems_allowed = nodemask; |
cc9a6c87 | 120 | write_seqcount_end(¤t->mems_allowed_seq); |
db751fe3 | 121 | local_irq_restore(flags); |
c0ff7453 | 122 | task_unlock(current); |
58568d2a MX |
123 | } |
124 | ||
1da177e4 LT |
125 | #else /* !CONFIG_CPUSETS */ |
126 | ||
127 | static inline int cpuset_init(void) { return 0; } | |
128 | static inline void cpuset_init_smp(void) {} | |
1da177e4 | 129 | |
7ddf96b0 | 130 | static inline void cpuset_update_active_cpus(bool cpu_online) |
3a101d05 TH |
131 | { |
132 | partition_sched_domains(1, NULL, NULL); | |
133 | } | |
134 | ||
6af866af LZ |
135 | static inline void cpuset_cpus_allowed(struct task_struct *p, |
136 | struct cpumask *mask) | |
1da177e4 | 137 | { |
aa85ea5b | 138 | cpumask_copy(mask, cpu_possible_mask); |
1da177e4 LT |
139 | } |
140 | ||
2baab4e9 | 141 | static inline void cpuset_cpus_allowed_fallback(struct task_struct *p) |
9084bb82 | 142 | { |
9084bb82 ON |
143 | } |
144 | ||
909d75a3 PJ |
145 | static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) |
146 | { | |
147 | return node_possible_map; | |
148 | } | |
149 | ||
38d7bee9 | 150 | #define cpuset_current_mems_allowed (node_states[N_MEMORY]) |
1da177e4 | 151 | static inline void cpuset_init_current_mems_allowed(void) {} |
1da177e4 | 152 | |
19770b32 | 153 | static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) |
1da177e4 LT |
154 | { |
155 | return 1; | |
156 | } | |
157 | ||
a1bc5a4e DR |
158 | static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) |
159 | { | |
160 | return 1; | |
161 | } | |
162 | ||
163 | static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) | |
164 | { | |
165 | return 1; | |
166 | } | |
167 | ||
02a0e53d PJ |
168 | static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) |
169 | { | |
170 | return 1; | |
171 | } | |
172 | ||
173 | static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) | |
1da177e4 LT |
174 | { |
175 | return 1; | |
176 | } | |
177 | ||
bbe373f2 DR |
178 | static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, |
179 | const struct task_struct *tsk2) | |
ef08e3b4 PJ |
180 | { |
181 | return 1; | |
182 | } | |
183 | ||
3e0d98b9 PJ |
184 | static inline void cpuset_memory_pressure_bump(void) {} |
185 | ||
df5f8314 EB |
186 | static inline void cpuset_task_status_allowed(struct seq_file *m, |
187 | struct task_struct *task) | |
1da177e4 | 188 | { |
1da177e4 LT |
189 | } |
190 | ||
825a46af PJ |
191 | static inline int cpuset_mem_spread_node(void) |
192 | { | |
193 | return 0; | |
194 | } | |
195 | ||
6adef3eb JS |
196 | static inline int cpuset_slab_spread_node(void) |
197 | { | |
198 | return 0; | |
199 | } | |
200 | ||
825a46af PJ |
201 | static inline int cpuset_do_page_mem_spread(void) |
202 | { | |
203 | return 0; | |
204 | } | |
205 | ||
206 | static inline int cpuset_do_slab_mem_spread(void) | |
207 | { | |
208 | return 0; | |
209 | } | |
210 | ||
8793d854 PM |
211 | static inline int current_cpuset_is_being_rebound(void) |
212 | { | |
213 | return 0; | |
214 | } | |
215 | ||
e761b772 MK |
216 | static inline void rebuild_sched_domains(void) |
217 | { | |
dfb512ec | 218 | partition_sched_domains(1, NULL, NULL); |
e761b772 MK |
219 | } |
220 | ||
75aa1994 DR |
221 | static inline void cpuset_print_task_mems_allowed(struct task_struct *p) |
222 | { | |
223 | } | |
224 | ||
58568d2a MX |
225 | static inline void set_mems_allowed(nodemask_t nodemask) |
226 | { | |
227 | } | |
228 | ||
d26914d1 | 229 | static inline unsigned int read_mems_allowed_begin(void) |
c0ff7453 | 230 | { |
cc9a6c87 | 231 | return 0; |
c0ff7453 MX |
232 | } |
233 | ||
d26914d1 | 234 | static inline bool read_mems_allowed_retry(unsigned int seq) |
c0ff7453 | 235 | { |
d26914d1 | 236 | return false; |
c0ff7453 MX |
237 | } |
238 | ||
1da177e4 LT |
239 | #endif /* !CONFIG_CPUSETS */ |
240 | ||
241 | #endif /* _LINUX_CPUSET_H */ |