]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/cpuset.h
scsi: cxgb4i: call neigh_event_send() to update MAC address
[mirror_ubuntu-artful-kernel.git] / include / linux / cpuset.h
1 #ifndef _LINUX_CPUSET_H
2 #define _LINUX_CPUSET_H
3 /*
4 * cpuset interface
5 *
6 * Copyright (C) 2003 BULL SA
7 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
8 *
9 */
10
11 #include <linux/sched.h>
12 #include <linux/sched/topology.h>
13 #include <linux/sched/task.h>
14 #include <linux/cpumask.h>
15 #include <linux/nodemask.h>
16 #include <linux/mm.h>
17 #include <linux/jump_label.h>
18
19 #ifdef CONFIG_CPUSETS
20
21 extern struct static_key_false cpusets_enabled_key;
22 static inline bool cpusets_enabled(void)
23 {
24 return static_branch_unlikely(&cpusets_enabled_key);
25 }
26
27 static inline int nr_cpusets(void)
28 {
29 /* jump label reference count + the top-level cpuset */
30 return static_key_count(&cpusets_enabled_key.key) + 1;
31 }
32
33 static inline void cpuset_inc(void)
34 {
35 static_branch_inc(&cpusets_enabled_key);
36 }
37
38 static inline void cpuset_dec(void)
39 {
40 static_branch_dec(&cpusets_enabled_key);
41 }
42
43 extern int cpuset_init(void);
44 extern void cpuset_init_smp(void);
45 extern void cpuset_update_active_cpus(void);
46 extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
47 extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
48 extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
49 #define cpuset_current_mems_allowed (current->mems_allowed)
50 void cpuset_init_current_mems_allowed(void);
51 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
52
53 extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask);
54
55 static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
56 {
57 if (cpusets_enabled())
58 return __cpuset_node_allowed(node, gfp_mask);
59 return true;
60 }
61
62 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
63 {
64 return __cpuset_node_allowed(zone_to_nid(z), gfp_mask);
65 }
66
67 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
68 {
69 if (cpusets_enabled())
70 return __cpuset_zone_allowed(z, gfp_mask);
71 return true;
72 }
73
74 extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
75 const struct task_struct *tsk2);
76
77 #define cpuset_memory_pressure_bump() \
78 do { \
79 if (cpuset_memory_pressure_enabled) \
80 __cpuset_memory_pressure_bump(); \
81 } while (0)
82 extern int cpuset_memory_pressure_enabled;
83 extern void __cpuset_memory_pressure_bump(void);
84
85 extern void cpuset_task_status_allowed(struct seq_file *m,
86 struct task_struct *task);
87 extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
88 struct pid *pid, struct task_struct *tsk);
89
90 extern int cpuset_mem_spread_node(void);
91 extern int cpuset_slab_spread_node(void);
92
93 static inline int cpuset_do_page_mem_spread(void)
94 {
95 return task_spread_page(current);
96 }
97
98 static inline int cpuset_do_slab_mem_spread(void)
99 {
100 return task_spread_slab(current);
101 }
102
103 extern int current_cpuset_is_being_rebound(void);
104
105 extern void rebuild_sched_domains(void);
106
107 extern void cpuset_print_current_mems_allowed(void);
108
109 /*
110 * read_mems_allowed_begin is required when making decisions involving
111 * mems_allowed such as during page allocation. mems_allowed can be updated in
112 * parallel and depending on the new value an operation can fail potentially
113 * causing process failure. A retry loop with read_mems_allowed_begin and
114 * read_mems_allowed_retry prevents these artificial failures.
115 */
116 static inline unsigned int read_mems_allowed_begin(void)
117 {
118 if (!cpusets_enabled())
119 return 0;
120
121 return read_seqcount_begin(&current->mems_allowed_seq);
122 }
123
124 /*
125 * If this returns true, the operation that took place after
126 * read_mems_allowed_begin may have failed artificially due to a concurrent
127 * update of mems_allowed. It is up to the caller to retry the operation if
128 * appropriate.
129 */
130 static inline bool read_mems_allowed_retry(unsigned int seq)
131 {
132 if (!cpusets_enabled())
133 return false;
134
135 return read_seqcount_retry(&current->mems_allowed_seq, seq);
136 }
137
138 static inline void set_mems_allowed(nodemask_t nodemask)
139 {
140 unsigned long flags;
141
142 task_lock(current);
143 local_irq_save(flags);
144 write_seqcount_begin(&current->mems_allowed_seq);
145 current->mems_allowed = nodemask;
146 write_seqcount_end(&current->mems_allowed_seq);
147 local_irq_restore(flags);
148 task_unlock(current);
149 }
150
151 #else /* !CONFIG_CPUSETS */
152
153 static inline bool cpusets_enabled(void) { return false; }
154
155 static inline int cpuset_init(void) { return 0; }
156 static inline void cpuset_init_smp(void) {}
157
158 static inline void cpuset_update_active_cpus(void)
159 {
160 partition_sched_domains(1, NULL, NULL);
161 }
162
163 static inline void cpuset_cpus_allowed(struct task_struct *p,
164 struct cpumask *mask)
165 {
166 cpumask_copy(mask, cpu_possible_mask);
167 }
168
169 static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
170 {
171 }
172
173 static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
174 {
175 return node_possible_map;
176 }
177
178 #define cpuset_current_mems_allowed (node_states[N_MEMORY])
179 static inline void cpuset_init_current_mems_allowed(void) {}
180
181 static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
182 {
183 return 1;
184 }
185
186 static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
187 {
188 return true;
189 }
190
191 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
192 {
193 return true;
194 }
195
196 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
197 {
198 return true;
199 }
200
201 static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
202 const struct task_struct *tsk2)
203 {
204 return 1;
205 }
206
207 static inline void cpuset_memory_pressure_bump(void) {}
208
209 static inline void cpuset_task_status_allowed(struct seq_file *m,
210 struct task_struct *task)
211 {
212 }
213
214 static inline int cpuset_mem_spread_node(void)
215 {
216 return 0;
217 }
218
219 static inline int cpuset_slab_spread_node(void)
220 {
221 return 0;
222 }
223
224 static inline int cpuset_do_page_mem_spread(void)
225 {
226 return 0;
227 }
228
229 static inline int cpuset_do_slab_mem_spread(void)
230 {
231 return 0;
232 }
233
234 static inline int current_cpuset_is_being_rebound(void)
235 {
236 return 0;
237 }
238
239 static inline void rebuild_sched_domains(void)
240 {
241 partition_sched_domains(1, NULL, NULL);
242 }
243
244 static inline void cpuset_print_current_mems_allowed(void)
245 {
246 }
247
248 static inline void set_mems_allowed(nodemask_t nodemask)
249 {
250 }
251
252 static inline unsigned int read_mems_allowed_begin(void)
253 {
254 return 0;
255 }
256
257 static inline bool read_mems_allowed_retry(unsigned int seq)
258 {
259 return false;
260 }
261
262 #endif /* !CONFIG_CPUSETS */
263
264 #endif /* _LINUX_CPUSET_H */