1 #ifndef _LINUX_CPUSET_H
2 #define _LINUX_CPUSET_H
6 * Copyright (C) 2003 BULL SA
7 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
11 #include <linux/sched.h>
12 #include <linux/sched/topology.h>
13 #include <linux/sched/task.h>
14 #include <linux/cpumask.h>
15 #include <linux/nodemask.h>
17 #include <linux/jump_label.h>
21 extern struct static_key_false cpusets_enabled_key
;
22 static inline bool cpusets_enabled(void)
24 return static_branch_unlikely(&cpusets_enabled_key
);
27 static inline int nr_cpusets(void)
29 /* jump label reference count + the top-level cpuset */
30 return static_key_count(&cpusets_enabled_key
.key
) + 1;
33 static inline void cpuset_inc(void)
35 static_branch_inc(&cpusets_enabled_key
);
38 static inline void cpuset_dec(void)
40 static_branch_dec(&cpusets_enabled_key
);
43 extern int cpuset_init(void);
44 extern void cpuset_init_smp(void);
45 extern void cpuset_update_active_cpus(void);
46 extern void cpuset_cpus_allowed(struct task_struct
*p
, struct cpumask
*mask
);
47 extern void cpuset_cpus_allowed_fallback(struct task_struct
*p
);
48 extern nodemask_t
cpuset_mems_allowed(struct task_struct
*p
);
49 #define cpuset_current_mems_allowed (current->mems_allowed)
50 void cpuset_init_current_mems_allowed(void);
51 int cpuset_nodemask_valid_mems_allowed(nodemask_t
*nodemask
);
53 extern bool __cpuset_node_allowed(int node
, gfp_t gfp_mask
);
55 static inline bool cpuset_node_allowed(int node
, gfp_t gfp_mask
)
57 if (cpusets_enabled())
58 return __cpuset_node_allowed(node
, gfp_mask
);
62 static inline bool __cpuset_zone_allowed(struct zone
*z
, gfp_t gfp_mask
)
64 return __cpuset_node_allowed(zone_to_nid(z
), gfp_mask
);
67 static inline bool cpuset_zone_allowed(struct zone
*z
, gfp_t gfp_mask
)
69 if (cpusets_enabled())
70 return __cpuset_zone_allowed(z
, gfp_mask
);
74 extern int cpuset_mems_allowed_intersects(const struct task_struct
*tsk1
,
75 const struct task_struct
*tsk2
);
77 #define cpuset_memory_pressure_bump() \
79 if (cpuset_memory_pressure_enabled) \
80 __cpuset_memory_pressure_bump(); \
82 extern int cpuset_memory_pressure_enabled
;
83 extern void __cpuset_memory_pressure_bump(void);
85 extern void cpuset_task_status_allowed(struct seq_file
*m
,
86 struct task_struct
*task
);
87 extern int proc_cpuset_show(struct seq_file
*m
, struct pid_namespace
*ns
,
88 struct pid
*pid
, struct task_struct
*tsk
);
90 extern int cpuset_mem_spread_node(void);
91 extern int cpuset_slab_spread_node(void);
93 static inline int cpuset_do_page_mem_spread(void)
95 return task_spread_page(current
);
98 static inline int cpuset_do_slab_mem_spread(void)
100 return task_spread_slab(current
);
103 extern int current_cpuset_is_being_rebound(void);
105 extern void rebuild_sched_domains(void);
107 extern void cpuset_print_current_mems_allowed(void);
110 * read_mems_allowed_begin is required when making decisions involving
111 * mems_allowed such as during page allocation. mems_allowed can be updated in
112 * parallel and depending on the new value an operation can fail potentially
113 * causing process failure. A retry loop with read_mems_allowed_begin and
114 * read_mems_allowed_retry prevents these artificial failures.
116 static inline unsigned int read_mems_allowed_begin(void)
118 if (!cpusets_enabled())
121 return read_seqcount_begin(¤t
->mems_allowed_seq
);
125 * If this returns true, the operation that took place after
126 * read_mems_allowed_begin may have failed artificially due to a concurrent
127 * update of mems_allowed. It is up to the caller to retry the operation if
130 static inline bool read_mems_allowed_retry(unsigned int seq
)
132 if (!cpusets_enabled())
135 return read_seqcount_retry(¤t
->mems_allowed_seq
, seq
);
138 static inline void set_mems_allowed(nodemask_t nodemask
)
143 local_irq_save(flags
);
144 write_seqcount_begin(¤t
->mems_allowed_seq
);
145 current
->mems_allowed
= nodemask
;
146 write_seqcount_end(¤t
->mems_allowed_seq
);
147 local_irq_restore(flags
);
148 task_unlock(current
);
151 #else /* !CONFIG_CPUSETS */
153 static inline bool cpusets_enabled(void) { return false; }
155 static inline int cpuset_init(void) { return 0; }
156 static inline void cpuset_init_smp(void) {}
158 static inline void cpuset_update_active_cpus(void)
160 partition_sched_domains(1, NULL
, NULL
);
163 static inline void cpuset_cpus_allowed(struct task_struct
*p
,
164 struct cpumask
*mask
)
166 cpumask_copy(mask
, cpu_possible_mask
);
169 static inline void cpuset_cpus_allowed_fallback(struct task_struct
*p
)
173 static inline nodemask_t
cpuset_mems_allowed(struct task_struct
*p
)
175 return node_possible_map
;
178 #define cpuset_current_mems_allowed (node_states[N_MEMORY])
179 static inline void cpuset_init_current_mems_allowed(void) {}
181 static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t
*nodemask
)
186 static inline bool cpuset_node_allowed(int node
, gfp_t gfp_mask
)
191 static inline bool __cpuset_zone_allowed(struct zone
*z
, gfp_t gfp_mask
)
196 static inline bool cpuset_zone_allowed(struct zone
*z
, gfp_t gfp_mask
)
201 static inline int cpuset_mems_allowed_intersects(const struct task_struct
*tsk1
,
202 const struct task_struct
*tsk2
)
207 static inline void cpuset_memory_pressure_bump(void) {}
209 static inline void cpuset_task_status_allowed(struct seq_file
*m
,
210 struct task_struct
*task
)
214 static inline int cpuset_mem_spread_node(void)
219 static inline int cpuset_slab_spread_node(void)
224 static inline int cpuset_do_page_mem_spread(void)
229 static inline int cpuset_do_slab_mem_spread(void)
234 static inline int current_cpuset_is_being_rebound(void)
239 static inline void rebuild_sched_domains(void)
241 partition_sched_domains(1, NULL
, NULL
);
244 static inline void cpuset_print_current_mems_allowed(void)
248 static inline void set_mems_allowed(nodemask_t nodemask
)
252 static inline unsigned int read_mems_allowed_begin(void)
257 static inline bool read_mems_allowed_retry(unsigned int seq
)
262 #endif /* !CONFIG_CPUSETS */
264 #endif /* _LINUX_CPUSET_H */