]>
Commit | Line | Data |
---|---|---|
1 | #ifndef _LINUX_CPUSET_H | |
2 | #define _LINUX_CPUSET_H | |
3 | /* | |
4 | * cpuset interface | |
5 | * | |
6 | * Copyright (C) 2003 BULL SA | |
7 | * Copyright (C) 2004-2006 Silicon Graphics, Inc. | |
8 | * | |
9 | */ | |
10 | ||
11 | #include <linux/sched.h> | |
12 | #include <linux/cpumask.h> | |
13 | #include <linux/nodemask.h> | |
14 | #include <linux/cgroup.h> | |
15 | #include <linux/mm.h> | |
16 | ||
17 | #ifdef CONFIG_CPUSETS | |
18 | ||
19 | extern int number_of_cpusets; /* How many cpusets are defined in system? */ | |
20 | ||
21 | extern int cpuset_init(void); | |
22 | extern void cpuset_init_smp(void); | |
23 | extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); | |
24 | extern int cpuset_cpus_allowed_fallback(struct task_struct *p); | |
25 | extern nodemask_t cpuset_mems_allowed(struct task_struct *p); | |
26 | #define cpuset_current_mems_allowed (current->mems_allowed) | |
27 | void cpuset_init_current_mems_allowed(void); | |
28 | int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); | |
29 | ||
30 | extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask); | |
31 | extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask); | |
32 | ||
33 | static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) | |
34 | { | |
35 | return number_of_cpusets <= 1 || | |
36 | __cpuset_node_allowed_softwall(node, gfp_mask); | |
37 | } | |
38 | ||
39 | static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) | |
40 | { | |
41 | return number_of_cpusets <= 1 || | |
42 | __cpuset_node_allowed_hardwall(node, gfp_mask); | |
43 | } | |
44 | ||
45 | static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) | |
46 | { | |
47 | return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask); | |
48 | } | |
49 | ||
50 | static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) | |
51 | { | |
52 | return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask); | |
53 | } | |
54 | ||
55 | extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, | |
56 | const struct task_struct *tsk2); | |
57 | ||
58 | #define cpuset_memory_pressure_bump() \ | |
59 | do { \ | |
60 | if (cpuset_memory_pressure_enabled) \ | |
61 | __cpuset_memory_pressure_bump(); \ | |
62 | } while (0) | |
63 | extern int cpuset_memory_pressure_enabled; | |
64 | extern void __cpuset_memory_pressure_bump(void); | |
65 | ||
66 | extern const struct file_operations proc_cpuset_operations; | |
67 | struct seq_file; | |
68 | extern void cpuset_task_status_allowed(struct seq_file *m, | |
69 | struct task_struct *task); | |
70 | ||
71 | extern int cpuset_mem_spread_node(void); | |
72 | ||
73 | static inline int cpuset_do_page_mem_spread(void) | |
74 | { | |
75 | return current->flags & PF_SPREAD_PAGE; | |
76 | } | |
77 | ||
78 | static inline int cpuset_do_slab_mem_spread(void) | |
79 | { | |
80 | return current->flags & PF_SPREAD_SLAB; | |
81 | } | |
82 | ||
83 | extern int current_cpuset_is_being_rebound(void); | |
84 | ||
85 | extern void rebuild_sched_domains(void); | |
86 | ||
87 | extern void cpuset_print_task_mems_allowed(struct task_struct *p); | |
88 | ||
89 | /* | |
90 | * reading current mems_allowed and mempolicy in the fastpath must protected | |
91 | * by get_mems_allowed() | |
92 | */ | |
93 | static inline void get_mems_allowed(void) | |
94 | { | |
95 | current->mems_allowed_change_disable++; | |
96 | ||
97 | /* | |
98 | * ensure that reading mems_allowed and mempolicy happens after the | |
99 | * update of ->mems_allowed_change_disable. | |
100 | * | |
101 | * the write-side task finds ->mems_allowed_change_disable is not 0, | |
102 | * and knows the read-side task is reading mems_allowed or mempolicy, | |
103 | * so it will clear old bits lazily. | |
104 | */ | |
105 | smp_mb(); | |
106 | } | |
107 | ||
108 | static inline void put_mems_allowed(void) | |
109 | { | |
110 | /* | |
111 | * ensure that reading mems_allowed and mempolicy before reducing | |
112 | * mems_allowed_change_disable. | |
113 | * | |
114 | * the write-side task will know that the read-side task is still | |
115 | * reading mems_allowed or mempolicy, don't clears old bits in the | |
116 | * nodemask. | |
117 | */ | |
118 | smp_mb(); | |
119 | --ACCESS_ONCE(current->mems_allowed_change_disable); | |
120 | } | |
121 | ||
122 | static inline void set_mems_allowed(nodemask_t nodemask) | |
123 | { | |
124 | task_lock(current); | |
125 | current->mems_allowed = nodemask; | |
126 | task_unlock(current); | |
127 | } | |
128 | ||
129 | #else /* !CONFIG_CPUSETS */ | |
130 | ||
131 | static inline int cpuset_init(void) { return 0; } | |
132 | static inline void cpuset_init_smp(void) {} | |
133 | ||
134 | static inline void cpuset_cpus_allowed(struct task_struct *p, | |
135 | struct cpumask *mask) | |
136 | { | |
137 | cpumask_copy(mask, cpu_possible_mask); | |
138 | } | |
139 | ||
140 | static inline int cpuset_cpus_allowed_fallback(struct task_struct *p) | |
141 | { | |
142 | cpumask_copy(&p->cpus_allowed, cpu_possible_mask); | |
143 | return cpumask_any(cpu_active_mask); | |
144 | } | |
145 | ||
146 | static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) | |
147 | { | |
148 | return node_possible_map; | |
149 | } | |
150 | ||
151 | #define cpuset_current_mems_allowed (node_states[N_HIGH_MEMORY]) | |
152 | static inline void cpuset_init_current_mems_allowed(void) {} | |
153 | ||
154 | static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) | |
155 | { | |
156 | return 1; | |
157 | } | |
158 | ||
159 | static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) | |
160 | { | |
161 | return 1; | |
162 | } | |
163 | ||
164 | static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) | |
165 | { | |
166 | return 1; | |
167 | } | |
168 | ||
169 | static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) | |
170 | { | |
171 | return 1; | |
172 | } | |
173 | ||
174 | static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) | |
175 | { | |
176 | return 1; | |
177 | } | |
178 | ||
179 | static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, | |
180 | const struct task_struct *tsk2) | |
181 | { | |
182 | return 1; | |
183 | } | |
184 | ||
185 | static inline void cpuset_memory_pressure_bump(void) {} | |
186 | ||
187 | static inline void cpuset_task_status_allowed(struct seq_file *m, | |
188 | struct task_struct *task) | |
189 | { | |
190 | } | |
191 | ||
192 | static inline int cpuset_mem_spread_node(void) | |
193 | { | |
194 | return 0; | |
195 | } | |
196 | ||
197 | static inline int cpuset_do_page_mem_spread(void) | |
198 | { | |
199 | return 0; | |
200 | } | |
201 | ||
202 | static inline int cpuset_do_slab_mem_spread(void) | |
203 | { | |
204 | return 0; | |
205 | } | |
206 | ||
207 | static inline int current_cpuset_is_being_rebound(void) | |
208 | { | |
209 | return 0; | |
210 | } | |
211 | ||
212 | static inline void rebuild_sched_domains(void) | |
213 | { | |
214 | partition_sched_domains(1, NULL, NULL); | |
215 | } | |
216 | ||
217 | static inline void cpuset_print_task_mems_allowed(struct task_struct *p) | |
218 | { | |
219 | } | |
220 | ||
221 | static inline void set_mems_allowed(nodemask_t nodemask) | |
222 | { | |
223 | } | |
224 | ||
225 | static inline void get_mems_allowed(void) | |
226 | { | |
227 | } | |
228 | ||
229 | static inline void put_mems_allowed(void) | |
230 | { | |
231 | } | |
232 | ||
233 | #endif /* !CONFIG_CPUSETS */ | |
234 | ||
235 | #endif /* _LINUX_CPUSET_H */ |