cpusets: new round-robin rotor for SLAB allocations
[deliverable/linux.git] / include / linux / cpuset.h
1 #ifndef _LINUX_CPUSET_H
2 #define _LINUX_CPUSET_H
3 /*
4 * cpuset interface
5 *
6 * Copyright (C) 2003 BULL SA
7 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
8 *
9 */
10
11 #include <linux/sched.h>
12 #include <linux/cpumask.h>
13 #include <linux/nodemask.h>
14 #include <linux/cgroup.h>
15 #include <linux/mm.h>
16
17 #ifdef CONFIG_CPUSETS
18
19 extern int number_of_cpusets; /* How many cpusets are defined in system? */
20
21 extern int cpuset_init(void);
22 extern void cpuset_init_smp(void);
23 extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
24 extern int cpuset_cpus_allowed_fallback(struct task_struct *p);
25 extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
26 #define cpuset_current_mems_allowed (current->mems_allowed)
27 void cpuset_init_current_mems_allowed(void);
28 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
29
30 extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask);
31 extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask);
32
33 static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
34 {
35 return number_of_cpusets <= 1 ||
36 __cpuset_node_allowed_softwall(node, gfp_mask);
37 }
38
39 static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
40 {
41 return number_of_cpusets <= 1 ||
42 __cpuset_node_allowed_hardwall(node, gfp_mask);
43 }
44
45 static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
46 {
47 return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask);
48 }
49
50 static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
51 {
52 return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask);
53 }
54
55 extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
56 const struct task_struct *tsk2);
57
58 #define cpuset_memory_pressure_bump() \
59 do { \
60 if (cpuset_memory_pressure_enabled) \
61 __cpuset_memory_pressure_bump(); \
62 } while (0)
63 extern int cpuset_memory_pressure_enabled;
64 extern void __cpuset_memory_pressure_bump(void);
65
66 extern const struct file_operations proc_cpuset_operations;
67 struct seq_file;
68 extern void cpuset_task_status_allowed(struct seq_file *m,
69 struct task_struct *task);
70
71 extern int cpuset_mem_spread_node(void);
72 extern int cpuset_slab_spread_node(void);
73
74 static inline int cpuset_do_page_mem_spread(void)
75 {
76 return current->flags & PF_SPREAD_PAGE;
77 }
78
79 static inline int cpuset_do_slab_mem_spread(void)
80 {
81 return current->flags & PF_SPREAD_SLAB;
82 }
83
84 extern int current_cpuset_is_being_rebound(void);
85
86 extern void rebuild_sched_domains(void);
87
88 extern void cpuset_print_task_mems_allowed(struct task_struct *p);
89
90 /*
91 * reading current mems_allowed and mempolicy in the fastpath must protected
92 * by get_mems_allowed()
93 */
94 static inline void get_mems_allowed(void)
95 {
96 current->mems_allowed_change_disable++;
97
98 /*
99 * ensure that reading mems_allowed and mempolicy happens after the
100 * update of ->mems_allowed_change_disable.
101 *
102 * the write-side task finds ->mems_allowed_change_disable is not 0,
103 * and knows the read-side task is reading mems_allowed or mempolicy,
104 * so it will clear old bits lazily.
105 */
106 smp_mb();
107 }
108
109 static inline void put_mems_allowed(void)
110 {
111 /*
112 * ensure that reading mems_allowed and mempolicy before reducing
113 * mems_allowed_change_disable.
114 *
115 * the write-side task will know that the read-side task is still
116 * reading mems_allowed or mempolicy, don't clears old bits in the
117 * nodemask.
118 */
119 smp_mb();
120 --ACCESS_ONCE(current->mems_allowed_change_disable);
121 }
122
123 static inline void set_mems_allowed(nodemask_t nodemask)
124 {
125 task_lock(current);
126 current->mems_allowed = nodemask;
127 task_unlock(current);
128 }
129
130 #else /* !CONFIG_CPUSETS */
131
132 static inline int cpuset_init(void) { return 0; }
133 static inline void cpuset_init_smp(void) {}
134
135 static inline void cpuset_cpus_allowed(struct task_struct *p,
136 struct cpumask *mask)
137 {
138 cpumask_copy(mask, cpu_possible_mask);
139 }
140
141 static inline int cpuset_cpus_allowed_fallback(struct task_struct *p)
142 {
143 cpumask_copy(&p->cpus_allowed, cpu_possible_mask);
144 return cpumask_any(cpu_active_mask);
145 }
146
147 static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
148 {
149 return node_possible_map;
150 }
151
152 #define cpuset_current_mems_allowed (node_states[N_HIGH_MEMORY])
153 static inline void cpuset_init_current_mems_allowed(void) {}
154
155 static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
156 {
157 return 1;
158 }
159
160 static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
161 {
162 return 1;
163 }
164
165 static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
166 {
167 return 1;
168 }
169
170 static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
171 {
172 return 1;
173 }
174
175 static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
176 {
177 return 1;
178 }
179
180 static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
181 const struct task_struct *tsk2)
182 {
183 return 1;
184 }
185
186 static inline void cpuset_memory_pressure_bump(void) {}
187
188 static inline void cpuset_task_status_allowed(struct seq_file *m,
189 struct task_struct *task)
190 {
191 }
192
193 static inline int cpuset_mem_spread_node(void)
194 {
195 return 0;
196 }
197
198 static inline int cpuset_slab_spread_node(void)
199 {
200 return 0;
201 }
202
203 static inline int cpuset_do_page_mem_spread(void)
204 {
205 return 0;
206 }
207
208 static inline int cpuset_do_slab_mem_spread(void)
209 {
210 return 0;
211 }
212
213 static inline int current_cpuset_is_being_rebound(void)
214 {
215 return 0;
216 }
217
218 static inline void rebuild_sched_domains(void)
219 {
220 partition_sched_domains(1, NULL, NULL);
221 }
222
223 static inline void cpuset_print_task_mems_allowed(struct task_struct *p)
224 {
225 }
226
227 static inline void set_mems_allowed(nodemask_t nodemask)
228 {
229 }
230
231 static inline void get_mems_allowed(void)
232 {
233 }
234
235 static inline void put_mems_allowed(void)
236 {
237 }
238
239 #endif /* !CONFIG_CPUSETS */
240
241 #endif /* _LINUX_CPUSET_H */
This page took 0.036336 seconds and 5 git commands to generate.