sched: _cpu_down(): Don't play with current->cpus_allowed
[deliverable/linux.git] / include / linux / cpuset.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_CPUSET_H
2#define _LINUX_CPUSET_H
3/*
4 * cpuset interface
5 *
6 * Copyright (C) 2003 BULL SA
825a46af 7 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
1da177e4
LT
8 *
9 */
10
11#include <linux/sched.h>
12#include <linux/cpumask.h>
13#include <linux/nodemask.h>
8793d854 14#include <linux/cgroup.h>
a1bc5a4e 15#include <linux/mm.h>
1da177e4
LT
16
17#ifdef CONFIG_CPUSETS
18
202f72d5
PJ
19extern int number_of_cpusets; /* How many cpusets are defined in system? */
20
1da177e4
LT
21extern int cpuset_init(void);
22extern void cpuset_init_smp(void);
6af866af 23extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
909d75a3 24extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
9276b1bc 25#define cpuset_current_mems_allowed (current->mems_allowed)
1da177e4 26void cpuset_init_current_mems_allowed(void);
19770b32 27int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
202f72d5 28
a1bc5a4e
DR
29extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask);
30extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask);
02a0e53d 31
a1bc5a4e 32static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
02a0e53d
PJ
33{
34 return number_of_cpusets <= 1 ||
a1bc5a4e 35 __cpuset_node_allowed_softwall(node, gfp_mask);
02a0e53d
PJ
36}
37
a1bc5a4e 38static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
202f72d5 39{
02a0e53d 40 return number_of_cpusets <= 1 ||
a1bc5a4e
DR
41 __cpuset_node_allowed_hardwall(node, gfp_mask);
42}
43
44static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
45{
46 return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask);
47}
48
49static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
50{
51 return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask);
202f72d5
PJ
52}
53
bbe373f2
DR
54extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
55 const struct task_struct *tsk2);
3e0d98b9
PJ
56
57#define cpuset_memory_pressure_bump() \
58 do { \
59 if (cpuset_memory_pressure_enabled) \
60 __cpuset_memory_pressure_bump(); \
61 } while (0)
62extern int cpuset_memory_pressure_enabled;
63extern void __cpuset_memory_pressure_bump(void);
64
54047320 65extern const struct file_operations proc_cpuset_operations;
df5f8314
EB
66struct seq_file;
67extern void cpuset_task_status_allowed(struct seq_file *m,
68 struct task_struct *task);
1da177e4 69
825a46af
PJ
70extern int cpuset_mem_spread_node(void);
71
72static inline int cpuset_do_page_mem_spread(void)
73{
74 return current->flags & PF_SPREAD_PAGE;
75}
76
77static inline int cpuset_do_slab_mem_spread(void)
78{
79 return current->flags & PF_SPREAD_SLAB;
80}
81
8793d854
PM
82extern int current_cpuset_is_being_rebound(void);
83
e761b772
MK
84extern void rebuild_sched_domains(void);
85
75aa1994
DR
86extern void cpuset_print_task_mems_allowed(struct task_struct *p);
87
58568d2a
MX
88static inline void set_mems_allowed(nodemask_t nodemask)
89{
90 current->mems_allowed = nodemask;
91}
92
1da177e4
LT
93#else /* !CONFIG_CPUSETS */
94
95static inline int cpuset_init(void) { return 0; }
96static inline void cpuset_init_smp(void) {}
1da177e4 97
6af866af
LZ
98static inline void cpuset_cpus_allowed(struct task_struct *p,
99 struct cpumask *mask)
1da177e4 100{
aa85ea5b 101 cpumask_copy(mask, cpu_possible_mask);
1da177e4
LT
102}
103
909d75a3
PJ
104static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
105{
106 return node_possible_map;
107}
108
0e1e7c7a 109#define cpuset_current_mems_allowed (node_states[N_HIGH_MEMORY])
1da177e4 110static inline void cpuset_init_current_mems_allowed(void) {}
1da177e4 111
19770b32 112static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
1da177e4
LT
113{
114 return 1;
115}
116
a1bc5a4e
DR
117static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
118{
119 return 1;
120}
121
122static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
123{
124 return 1;
125}
126
02a0e53d
PJ
127static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
128{
129 return 1;
130}
131
132static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
1da177e4
LT
133{
134 return 1;
135}
136
bbe373f2
DR
137static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
138 const struct task_struct *tsk2)
ef08e3b4
PJ
139{
140 return 1;
141}
142
3e0d98b9
PJ
143static inline void cpuset_memory_pressure_bump(void) {}
144
df5f8314
EB
145static inline void cpuset_task_status_allowed(struct seq_file *m,
146 struct task_struct *task)
1da177e4 147{
1da177e4
LT
148}
149
825a46af
PJ
150static inline int cpuset_mem_spread_node(void)
151{
152 return 0;
153}
154
155static inline int cpuset_do_page_mem_spread(void)
156{
157 return 0;
158}
159
160static inline int cpuset_do_slab_mem_spread(void)
161{
162 return 0;
163}
164
8793d854
PM
165static inline int current_cpuset_is_being_rebound(void)
166{
167 return 0;
168}
169
e761b772
MK
170static inline void rebuild_sched_domains(void)
171{
dfb512ec 172 partition_sched_domains(1, NULL, NULL);
e761b772
MK
173}
174
75aa1994
DR
175static inline void cpuset_print_task_mems_allowed(struct task_struct *p)
176{
177}
178
58568d2a
MX
179static inline void set_mems_allowed(nodemask_t nodemask)
180{
181}
182
1da177e4
LT
183#endif /* !CONFIG_CPUSETS */
184
185#endif /* _LINUX_CPUSET_H */
This page took 0.586496 seconds and 5 git commands to generate.