| 1 | #ifndef _LINUX_CPUSET_H |
| 2 | #define _LINUX_CPUSET_H |
| 3 | /* |
| 4 | * cpuset interface |
| 5 | * |
| 6 | * Copyright (C) 2003 BULL SA |
| 7 | * Copyright (C) 2004-2006 Silicon Graphics, Inc. |
| 8 | * |
| 9 | */ |
| 10 | |
| 11 | #include <linux/sched.h> |
| 12 | #include <linux/cpumask.h> |
| 13 | #include <linux/nodemask.h> |
| 14 | #include <linux/mm.h> |
| 15 | #include <linux/jump_label.h> |
| 16 | |
| 17 | #ifdef CONFIG_CPUSETS |
| 18 | |
| 19 | extern struct static_key cpusets_enabled_key; |
| 20 | static inline bool cpusets_enabled(void) |
| 21 | { |
| 22 | return static_key_false(&cpusets_enabled_key); |
| 23 | } |
| 24 | |
| 25 | static inline int nr_cpusets(void) |
| 26 | { |
| 27 | /* jump label reference count + the top-level cpuset */ |
| 28 | return static_key_count(&cpusets_enabled_key) + 1; |
| 29 | } |
| 30 | |
| 31 | static inline void cpuset_inc(void) |
| 32 | { |
| 33 | static_key_slow_inc(&cpusets_enabled_key); |
| 34 | } |
| 35 | |
| 36 | static inline void cpuset_dec(void) |
| 37 | { |
| 38 | static_key_slow_dec(&cpusets_enabled_key); |
| 39 | } |
| 40 | |
| 41 | extern int cpuset_init(void); |
| 42 | extern void cpuset_init_smp(void); |
| 43 | extern void cpuset_update_active_cpus(bool cpu_online); |
| 44 | extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); |
| 45 | extern void cpuset_cpus_allowed_fallback(struct task_struct *p); |
| 46 | extern nodemask_t cpuset_mems_allowed(struct task_struct *p); |
| 47 | #define cpuset_current_mems_allowed (current->mems_allowed) |
| 48 | void cpuset_init_current_mems_allowed(void); |
| 49 | int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); |
| 50 | |
| 51 | extern int __cpuset_node_allowed(int node, gfp_t gfp_mask); |
| 52 | |
| 53 | static inline int cpuset_node_allowed(int node, gfp_t gfp_mask) |
| 54 | { |
| 55 | return nr_cpusets() <= 1 || __cpuset_node_allowed(node, gfp_mask); |
| 56 | } |
| 57 | |
| 58 | static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) |
| 59 | { |
| 60 | return cpuset_node_allowed(zone_to_nid(z), gfp_mask); |
| 61 | } |
| 62 | |
| 63 | extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, |
| 64 | const struct task_struct *tsk2); |
| 65 | |
| 66 | #define cpuset_memory_pressure_bump() \ |
| 67 | do { \ |
| 68 | if (cpuset_memory_pressure_enabled) \ |
| 69 | __cpuset_memory_pressure_bump(); \ |
| 70 | } while (0) |
| 71 | extern int cpuset_memory_pressure_enabled; |
| 72 | extern void __cpuset_memory_pressure_bump(void); |
| 73 | |
| 74 | extern void cpuset_task_status_allowed(struct seq_file *m, |
| 75 | struct task_struct *task); |
| 76 | extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns, |
| 77 | struct pid *pid, struct task_struct *tsk); |
| 78 | |
| 79 | extern int cpuset_mem_spread_node(void); |
| 80 | extern int cpuset_slab_spread_node(void); |
| 81 | |
| 82 | static inline int cpuset_do_page_mem_spread(void) |
| 83 | { |
| 84 | return task_spread_page(current); |
| 85 | } |
| 86 | |
| 87 | static inline int cpuset_do_slab_mem_spread(void) |
| 88 | { |
| 89 | return task_spread_slab(current); |
| 90 | } |
| 91 | |
| 92 | extern int current_cpuset_is_being_rebound(void); |
| 93 | |
| 94 | extern void rebuild_sched_domains(void); |
| 95 | |
| 96 | extern void cpuset_print_current_mems_allowed(void); |
| 97 | |
| 98 | /* |
| 99 | * read_mems_allowed_begin is required when making decisions involving |
| 100 | * mems_allowed such as during page allocation. mems_allowed can be updated in |
| 101 | * parallel and depending on the new value an operation can fail potentially |
| 102 | * causing process failure. A retry loop with read_mems_allowed_begin and |
| 103 | * read_mems_allowed_retry prevents these artificial failures. |
| 104 | */ |
| 105 | static inline unsigned int read_mems_allowed_begin(void) |
| 106 | { |
| 107 | if (!cpusets_enabled()) |
| 108 | return 0; |
| 109 | |
| 110 | return read_seqcount_begin(¤t->mems_allowed_seq); |
| 111 | } |
| 112 | |
| 113 | /* |
| 114 | * If this returns true, the operation that took place after |
| 115 | * read_mems_allowed_begin may have failed artificially due to a concurrent |
| 116 | * update of mems_allowed. It is up to the caller to retry the operation if |
| 117 | * appropriate. |
| 118 | */ |
| 119 | static inline bool read_mems_allowed_retry(unsigned int seq) |
| 120 | { |
| 121 | if (!cpusets_enabled()) |
| 122 | return false; |
| 123 | |
| 124 | return read_seqcount_retry(¤t->mems_allowed_seq, seq); |
| 125 | } |
| 126 | |
| 127 | static inline void set_mems_allowed(nodemask_t nodemask) |
| 128 | { |
| 129 | unsigned long flags; |
| 130 | |
| 131 | task_lock(current); |
| 132 | local_irq_save(flags); |
| 133 | write_seqcount_begin(¤t->mems_allowed_seq); |
| 134 | current->mems_allowed = nodemask; |
| 135 | write_seqcount_end(¤t->mems_allowed_seq); |
| 136 | local_irq_restore(flags); |
| 137 | task_unlock(current); |
| 138 | } |
| 139 | |
| 140 | #else /* !CONFIG_CPUSETS */ |
| 141 | |
| 142 | static inline bool cpusets_enabled(void) { return false; } |
| 143 | |
| 144 | static inline int cpuset_init(void) { return 0; } |
| 145 | static inline void cpuset_init_smp(void) {} |
| 146 | |
| 147 | static inline void cpuset_update_active_cpus(bool cpu_online) |
| 148 | { |
| 149 | partition_sched_domains(1, NULL, NULL); |
| 150 | } |
| 151 | |
| 152 | static inline void cpuset_cpus_allowed(struct task_struct *p, |
| 153 | struct cpumask *mask) |
| 154 | { |
| 155 | cpumask_copy(mask, cpu_possible_mask); |
| 156 | } |
| 157 | |
| 158 | static inline void cpuset_cpus_allowed_fallback(struct task_struct *p) |
| 159 | { |
| 160 | } |
| 161 | |
| 162 | static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) |
| 163 | { |
| 164 | return node_possible_map; |
| 165 | } |
| 166 | |
| 167 | #define cpuset_current_mems_allowed (node_states[N_MEMORY]) |
| 168 | static inline void cpuset_init_current_mems_allowed(void) {} |
| 169 | |
| 170 | static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) |
| 171 | { |
| 172 | return 1; |
| 173 | } |
| 174 | |
| 175 | static inline int cpuset_node_allowed(int node, gfp_t gfp_mask) |
| 176 | { |
| 177 | return 1; |
| 178 | } |
| 179 | |
| 180 | static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) |
| 181 | { |
| 182 | return 1; |
| 183 | } |
| 184 | |
| 185 | static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, |
| 186 | const struct task_struct *tsk2) |
| 187 | { |
| 188 | return 1; |
| 189 | } |
| 190 | |
| 191 | static inline void cpuset_memory_pressure_bump(void) {} |
| 192 | |
| 193 | static inline void cpuset_task_status_allowed(struct seq_file *m, |
| 194 | struct task_struct *task) |
| 195 | { |
| 196 | } |
| 197 | |
| 198 | static inline int cpuset_mem_spread_node(void) |
| 199 | { |
| 200 | return 0; |
| 201 | } |
| 202 | |
| 203 | static inline int cpuset_slab_spread_node(void) |
| 204 | { |
| 205 | return 0; |
| 206 | } |
| 207 | |
| 208 | static inline int cpuset_do_page_mem_spread(void) |
| 209 | { |
| 210 | return 0; |
| 211 | } |
| 212 | |
| 213 | static inline int cpuset_do_slab_mem_spread(void) |
| 214 | { |
| 215 | return 0; |
| 216 | } |
| 217 | |
| 218 | static inline int current_cpuset_is_being_rebound(void) |
| 219 | { |
| 220 | return 0; |
| 221 | } |
| 222 | |
| 223 | static inline void rebuild_sched_domains(void) |
| 224 | { |
| 225 | partition_sched_domains(1, NULL, NULL); |
| 226 | } |
| 227 | |
| 228 | static inline void cpuset_print_current_mems_allowed(void) |
| 229 | { |
| 230 | } |
| 231 | |
| 232 | static inline void set_mems_allowed(nodemask_t nodemask) |
| 233 | { |
| 234 | } |
| 235 | |
| 236 | static inline unsigned int read_mems_allowed_begin(void) |
| 237 | { |
| 238 | return 0; |
| 239 | } |
| 240 | |
| 241 | static inline bool read_mems_allowed_retry(unsigned int seq) |
| 242 | { |
| 243 | return false; |
| 244 | } |
| 245 | |
| 246 | #endif /* !CONFIG_CPUSETS */ |
| 247 | |
| 248 | #endif /* _LINUX_CPUSET_H */ |