1 #ifndef _LINUX_CPUSET_H
2 #define _LINUX_CPUSET_H
6 * Copyright (C) 2003 BULL SA
7 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
11 #include <linux/sched.h>
12 #include <linux/sched/topology.h>
13 #include <linux/cpumask.h>
14 #include <linux/nodemask.h>
16 #include <linux/jump_label.h>
20 extern struct static_key_false cpusets_enabled_key
;
21 static inline bool cpusets_enabled(void)
23 return static_branch_unlikely(&cpusets_enabled_key
);
26 static inline int nr_cpusets(void)
28 /* jump label reference count + the top-level cpuset */
29 return static_key_count(&cpusets_enabled_key
.key
) + 1;
32 static inline void cpuset_inc(void)
34 static_branch_inc(&cpusets_enabled_key
);
37 static inline void cpuset_dec(void)
39 static_branch_dec(&cpusets_enabled_key
);
42 extern int cpuset_init(void);
43 extern void cpuset_init_smp(void);
44 extern void cpuset_update_active_cpus(bool cpu_online
);
45 extern void cpuset_cpus_allowed(struct task_struct
*p
, struct cpumask
*mask
);
46 extern void cpuset_cpus_allowed_fallback(struct task_struct
*p
);
47 extern nodemask_t
cpuset_mems_allowed(struct task_struct
*p
);
48 #define cpuset_current_mems_allowed (current->mems_allowed)
49 void cpuset_init_current_mems_allowed(void);
50 int cpuset_nodemask_valid_mems_allowed(nodemask_t
*nodemask
);
52 extern bool __cpuset_node_allowed(int node
, gfp_t gfp_mask
);
54 static inline bool cpuset_node_allowed(int node
, gfp_t gfp_mask
)
56 if (cpusets_enabled())
57 return __cpuset_node_allowed(node
, gfp_mask
);
61 static inline bool __cpuset_zone_allowed(struct zone
*z
, gfp_t gfp_mask
)
63 return __cpuset_node_allowed(zone_to_nid(z
), gfp_mask
);
66 static inline bool cpuset_zone_allowed(struct zone
*z
, gfp_t gfp_mask
)
68 if (cpusets_enabled())
69 return __cpuset_zone_allowed(z
, gfp_mask
);
73 extern int cpuset_mems_allowed_intersects(const struct task_struct
*tsk1
,
74 const struct task_struct
*tsk2
);
76 #define cpuset_memory_pressure_bump() \
78 if (cpuset_memory_pressure_enabled) \
79 __cpuset_memory_pressure_bump(); \
81 extern int cpuset_memory_pressure_enabled
;
82 extern void __cpuset_memory_pressure_bump(void);
84 extern void cpuset_task_status_allowed(struct seq_file
*m
,
85 struct task_struct
*task
);
86 extern int proc_cpuset_show(struct seq_file
*m
, struct pid_namespace
*ns
,
87 struct pid
*pid
, struct task_struct
*tsk
);
89 extern int cpuset_mem_spread_node(void);
90 extern int cpuset_slab_spread_node(void);
92 static inline int cpuset_do_page_mem_spread(void)
94 return task_spread_page(current
);
97 static inline int cpuset_do_slab_mem_spread(void)
99 return task_spread_slab(current
);
102 extern int current_cpuset_is_being_rebound(void);
104 extern void rebuild_sched_domains(void);
106 extern void cpuset_print_current_mems_allowed(void);
109 * read_mems_allowed_begin is required when making decisions involving
110 * mems_allowed such as during page allocation. mems_allowed can be updated in
111 * parallel and depending on the new value an operation can fail potentially
112 * causing process failure. A retry loop with read_mems_allowed_begin and
113 * read_mems_allowed_retry prevents these artificial failures.
115 static inline unsigned int read_mems_allowed_begin(void)
117 if (!cpusets_enabled())
120 return read_seqcount_begin(¤t
->mems_allowed_seq
);
124 * If this returns true, the operation that took place after
125 * read_mems_allowed_begin may have failed artificially due to a concurrent
126 * update of mems_allowed. It is up to the caller to retry the operation if
129 static inline bool read_mems_allowed_retry(unsigned int seq
)
131 if (!cpusets_enabled())
134 return read_seqcount_retry(¤t
->mems_allowed_seq
, seq
);
137 static inline void set_mems_allowed(nodemask_t nodemask
)
142 local_irq_save(flags
);
143 write_seqcount_begin(¤t
->mems_allowed_seq
);
144 current
->mems_allowed
= nodemask
;
145 write_seqcount_end(¤t
->mems_allowed_seq
);
146 local_irq_restore(flags
);
147 task_unlock(current
);
150 #else /* !CONFIG_CPUSETS */
152 static inline bool cpusets_enabled(void) { return false; }
154 static inline int cpuset_init(void) { return 0; }
155 static inline void cpuset_init_smp(void) {}
157 static inline void cpuset_update_active_cpus(bool cpu_online
)
159 partition_sched_domains(1, NULL
, NULL
);
162 static inline void cpuset_cpus_allowed(struct task_struct
*p
,
163 struct cpumask
*mask
)
165 cpumask_copy(mask
, cpu_possible_mask
);
168 static inline void cpuset_cpus_allowed_fallback(struct task_struct
*p
)
172 static inline nodemask_t
cpuset_mems_allowed(struct task_struct
*p
)
174 return node_possible_map
;
177 #define cpuset_current_mems_allowed (node_states[N_MEMORY])
178 static inline void cpuset_init_current_mems_allowed(void) {}
180 static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t
*nodemask
)
185 static inline bool cpuset_node_allowed(int node
, gfp_t gfp_mask
)
190 static inline bool __cpuset_zone_allowed(struct zone
*z
, gfp_t gfp_mask
)
195 static inline bool cpuset_zone_allowed(struct zone
*z
, gfp_t gfp_mask
)
200 static inline int cpuset_mems_allowed_intersects(const struct task_struct
*tsk1
,
201 const struct task_struct
*tsk2
)
206 static inline void cpuset_memory_pressure_bump(void) {}
208 static inline void cpuset_task_status_allowed(struct seq_file
*m
,
209 struct task_struct
*task
)
213 static inline int cpuset_mem_spread_node(void)
218 static inline int cpuset_slab_spread_node(void)
223 static inline int cpuset_do_page_mem_spread(void)
228 static inline int cpuset_do_slab_mem_spread(void)
233 static inline int current_cpuset_is_being_rebound(void)
238 static inline void rebuild_sched_domains(void)
240 partition_sched_domains(1, NULL
, NULL
);
243 static inline void cpuset_print_current_mems_allowed(void)
247 static inline void set_mems_allowed(nodemask_t nodemask
)
251 static inline unsigned int read_mems_allowed_begin(void)
256 static inline bool read_mems_allowed_retry(unsigned int seq
)
261 #endif /* !CONFIG_CPUSETS */
263 #endif /* _LINUX_CPUSET_H */