1 #ifndef _LINUX_CPUSET_H
2 #define _LINUX_CPUSET_H
6 * Copyright (C) 2003 BULL SA
7 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
11 #include <linux/sched.h>
12 #include <linux/cpumask.h>
13 #include <linux/nodemask.h>
15 #include <linux/jump_label.h>
19 extern struct static_key cpusets_enabled_key
;
20 static inline bool cpusets_enabled(void)
22 return static_key_false(&cpusets_enabled_key
);
25 static inline int nr_cpusets(void)
27 /* jump label reference count + the top-level cpuset */
28 return static_key_count(&cpusets_enabled_key
) + 1;
31 static inline void cpuset_inc(void)
33 static_key_slow_inc(&cpusets_enabled_key
);
36 static inline void cpuset_dec(void)
38 static_key_slow_dec(&cpusets_enabled_key
);
41 extern int cpuset_init(void);
42 extern void cpuset_init_smp(void);
43 extern void cpuset_update_active_cpus(bool cpu_online
);
44 extern void cpuset_cpus_allowed(struct task_struct
*p
, struct cpumask
*mask
);
45 extern void cpuset_cpus_allowed_fallback(struct task_struct
*p
);
46 extern nodemask_t
cpuset_mems_allowed(struct task_struct
*p
);
47 #define cpuset_current_mems_allowed (current->mems_allowed)
48 void cpuset_init_current_mems_allowed(void);
49 int cpuset_nodemask_valid_mems_allowed(nodemask_t
*nodemask
);
51 extern int __cpuset_node_allowed(int node
, gfp_t gfp_mask
);
53 static inline int cpuset_node_allowed(int node
, gfp_t gfp_mask
)
55 return nr_cpusets() <= 1 || __cpuset_node_allowed(node
, gfp_mask
);
58 static inline int cpuset_zone_allowed(struct zone
*z
, gfp_t gfp_mask
)
60 return cpuset_node_allowed(zone_to_nid(z
), gfp_mask
);
63 extern int cpuset_mems_allowed_intersects(const struct task_struct
*tsk1
,
64 const struct task_struct
*tsk2
);
66 #define cpuset_memory_pressure_bump() \
68 if (cpuset_memory_pressure_enabled) \
69 __cpuset_memory_pressure_bump(); \
71 extern int cpuset_memory_pressure_enabled
;
72 extern void __cpuset_memory_pressure_bump(void);
74 extern void cpuset_task_status_allowed(struct seq_file
*m
,
75 struct task_struct
*task
);
76 extern int proc_cpuset_show(struct seq_file
*m
, struct pid_namespace
*ns
,
77 struct pid
*pid
, struct task_struct
*tsk
);
79 extern int cpuset_mem_spread_node(void);
80 extern int cpuset_slab_spread_node(void);
82 static inline int cpuset_do_page_mem_spread(void)
84 return task_spread_page(current
);
87 static inline int cpuset_do_slab_mem_spread(void)
89 return task_spread_slab(current
);
92 extern int current_cpuset_is_being_rebound(void);
94 extern void rebuild_sched_domains(void);
96 extern void cpuset_print_task_mems_allowed(struct task_struct
*p
);
99 * read_mems_allowed_begin is required when making decisions involving
100 * mems_allowed such as during page allocation. mems_allowed can be updated in
101 * parallel and depending on the new value an operation can fail potentially
102 * causing process failure. A retry loop with read_mems_allowed_begin and
103 * read_mems_allowed_retry prevents these artificial failures.
105 static inline unsigned int read_mems_allowed_begin(void)
107 return read_seqcount_begin(¤t
->mems_allowed_seq
);
111 * If this returns true, the operation that took place after
112 * read_mems_allowed_begin may have failed artificially due to a concurrent
113 * update of mems_allowed. It is up to the caller to retry the operation if
116 static inline bool read_mems_allowed_retry(unsigned int seq
)
118 return read_seqcount_retry(¤t
->mems_allowed_seq
, seq
);
121 static inline void set_mems_allowed(nodemask_t nodemask
)
126 local_irq_save(flags
);
127 write_seqcount_begin(¤t
->mems_allowed_seq
);
128 current
->mems_allowed
= nodemask
;
129 write_seqcount_end(¤t
->mems_allowed_seq
);
130 local_irq_restore(flags
);
131 task_unlock(current
);
134 #else /* !CONFIG_CPUSETS */
136 static inline bool cpusets_enabled(void) { return false; }
138 static inline int cpuset_init(void) { return 0; }
139 static inline void cpuset_init_smp(void) {}
141 static inline void cpuset_update_active_cpus(bool cpu_online
)
143 partition_sched_domains(1, NULL
, NULL
);
146 static inline void cpuset_cpus_allowed(struct task_struct
*p
,
147 struct cpumask
*mask
)
149 cpumask_copy(mask
, cpu_possible_mask
);
152 static inline void cpuset_cpus_allowed_fallback(struct task_struct
*p
)
156 static inline nodemask_t
cpuset_mems_allowed(struct task_struct
*p
)
158 return node_possible_map
;
161 #define cpuset_current_mems_allowed (node_states[N_MEMORY])
162 static inline void cpuset_init_current_mems_allowed(void) {}
164 static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t
*nodemask
)
169 static inline int cpuset_node_allowed(int node
, gfp_t gfp_mask
)
174 static inline int cpuset_zone_allowed(struct zone
*z
, gfp_t gfp_mask
)
179 static inline int cpuset_mems_allowed_intersects(const struct task_struct
*tsk1
,
180 const struct task_struct
*tsk2
)
185 static inline void cpuset_memory_pressure_bump(void) {}
187 static inline void cpuset_task_status_allowed(struct seq_file
*m
,
188 struct task_struct
*task
)
192 static inline int cpuset_mem_spread_node(void)
197 static inline int cpuset_slab_spread_node(void)
202 static inline int cpuset_do_page_mem_spread(void)
207 static inline int cpuset_do_slab_mem_spread(void)
212 static inline int current_cpuset_is_being_rebound(void)
217 static inline void rebuild_sched_domains(void)
219 partition_sched_domains(1, NULL
, NULL
);
222 static inline void cpuset_print_task_mems_allowed(struct task_struct
*p
)
226 static inline void set_mems_allowed(nodemask_t nodemask
)
230 static inline unsigned int read_mems_allowed_begin(void)
235 static inline bool read_mems_allowed_retry(unsigned int seq
)
240 #endif /* !CONFIG_CPUSETS */
242 #endif /* _LINUX_CPUSET_H */