1 #ifndef _LINUX_CPUSET_H
2 #define _LINUX_CPUSET_H
6 * Copyright (C) 2003 BULL SA
7 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
11 #include <linux/sched.h>
12 #include <linux/cpumask.h>
13 #include <linux/nodemask.h>
18 extern int number_of_cpusets
; /* How many cpusets are defined in system? */
20 extern int cpuset_init(void);
21 extern void cpuset_init_smp(void);
22 extern void cpuset_update_active_cpus(bool cpu_online
);
23 extern void cpuset_cpus_allowed(struct task_struct
*p
, struct cpumask
*mask
);
24 extern void cpuset_cpus_allowed_fallback(struct task_struct
*p
);
25 extern nodemask_t
cpuset_mems_allowed(struct task_struct
*p
);
26 #define cpuset_current_mems_allowed (current->mems_allowed)
27 void cpuset_init_current_mems_allowed(void);
28 int cpuset_nodemask_valid_mems_allowed(nodemask_t
*nodemask
);
30 extern int __cpuset_node_allowed_softwall(int node
, gfp_t gfp_mask
);
31 extern int __cpuset_node_allowed_hardwall(int node
, gfp_t gfp_mask
);
33 static inline int cpuset_node_allowed_softwall(int node
, gfp_t gfp_mask
)
35 return number_of_cpusets
<= 1 ||
36 __cpuset_node_allowed_softwall(node
, gfp_mask
);
39 static inline int cpuset_node_allowed_hardwall(int node
, gfp_t gfp_mask
)
41 return number_of_cpusets
<= 1 ||
42 __cpuset_node_allowed_hardwall(node
, gfp_mask
);
45 static inline int cpuset_zone_allowed_softwall(struct zone
*z
, gfp_t gfp_mask
)
47 return cpuset_node_allowed_softwall(zone_to_nid(z
), gfp_mask
);
50 static inline int cpuset_zone_allowed_hardwall(struct zone
*z
, gfp_t gfp_mask
)
52 return cpuset_node_allowed_hardwall(zone_to_nid(z
), gfp_mask
);
55 extern int cpuset_mems_allowed_intersects(const struct task_struct
*tsk1
,
56 const struct task_struct
*tsk2
);
58 #define cpuset_memory_pressure_bump() \
60 if (cpuset_memory_pressure_enabled) \
61 __cpuset_memory_pressure_bump(); \
63 extern int cpuset_memory_pressure_enabled
;
64 extern void __cpuset_memory_pressure_bump(void);
66 extern const struct file_operations proc_cpuset_operations
;
68 extern void cpuset_task_status_allowed(struct seq_file
*m
,
69 struct task_struct
*task
);
71 extern int cpuset_mem_spread_node(void);
72 extern int cpuset_slab_spread_node(void);
74 static inline int cpuset_do_page_mem_spread(void)
76 return current
->flags
& PF_SPREAD_PAGE
;
79 static inline int cpuset_do_slab_mem_spread(void)
81 return current
->flags
& PF_SPREAD_SLAB
;
84 extern int current_cpuset_is_being_rebound(void);
86 extern void rebuild_sched_domains(void);
88 extern void cpuset_print_task_mems_allowed(struct task_struct
*p
);
91 * get_mems_allowed is required when making decisions involving mems_allowed
92 * such as during page allocation. mems_allowed can be updated in parallel
93 * and depending on the new value an operation can fail potentially causing
94 * process failure. A retry loop with get_mems_allowed and put_mems_allowed
95 * prevents these artificial failures.
97 static inline unsigned int get_mems_allowed(void)
99 return read_seqcount_begin(¤t
->mems_allowed_seq
);
103 * If this returns false, the operation that took place after get_mems_allowed
104 * may have failed. It is up to the caller to retry the operation if
107 static inline bool put_mems_allowed(unsigned int seq
)
109 return !read_seqcount_retry(¤t
->mems_allowed_seq
, seq
);
112 static inline void set_mems_allowed(nodemask_t nodemask
)
115 write_seqcount_begin(¤t
->mems_allowed_seq
);
116 current
->mems_allowed
= nodemask
;
117 write_seqcount_end(¤t
->mems_allowed_seq
);
118 task_unlock(current
);
121 #else /* !CONFIG_CPUSETS */
123 static inline int cpuset_init(void) { return 0; }
124 static inline void cpuset_init_smp(void) {}
126 static inline void cpuset_update_active_cpus(bool cpu_online
)
128 partition_sched_domains(1, NULL
, NULL
);
131 static inline void cpuset_cpus_allowed(struct task_struct
*p
,
132 struct cpumask
*mask
)
134 cpumask_copy(mask
, cpu_possible_mask
);
137 static inline void cpuset_cpus_allowed_fallback(struct task_struct
*p
)
141 static inline nodemask_t
cpuset_mems_allowed(struct task_struct
*p
)
143 return node_possible_map
;
146 #define cpuset_current_mems_allowed (node_states[N_MEMORY])
147 static inline void cpuset_init_current_mems_allowed(void) {}
149 static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t
*nodemask
)
154 static inline int cpuset_node_allowed_softwall(int node
, gfp_t gfp_mask
)
159 static inline int cpuset_node_allowed_hardwall(int node
, gfp_t gfp_mask
)
164 static inline int cpuset_zone_allowed_softwall(struct zone
*z
, gfp_t gfp_mask
)
169 static inline int cpuset_zone_allowed_hardwall(struct zone
*z
, gfp_t gfp_mask
)
174 static inline int cpuset_mems_allowed_intersects(const struct task_struct
*tsk1
,
175 const struct task_struct
*tsk2
)
180 static inline void cpuset_memory_pressure_bump(void) {}
182 static inline void cpuset_task_status_allowed(struct seq_file
*m
,
183 struct task_struct
*task
)
187 static inline int cpuset_mem_spread_node(void)
192 static inline int cpuset_slab_spread_node(void)
197 static inline int cpuset_do_page_mem_spread(void)
202 static inline int cpuset_do_slab_mem_spread(void)
207 static inline int current_cpuset_is_being_rebound(void)
212 static inline void rebuild_sched_domains(void)
214 partition_sched_domains(1, NULL
, NULL
);
217 static inline void cpuset_print_task_mems_allowed(struct task_struct
*p
)
221 static inline void set_mems_allowed(nodemask_t nodemask
)
225 static inline unsigned int get_mems_allowed(void)
230 static inline bool put_mems_allowed(unsigned int seq
)
235 #endif /* !CONFIG_CPUSETS */
237 #endif /* _LINUX_CPUSET_H */