Merge branch 'linus' into cpus4096
[deliverable/linux.git] / include / linux / cpumask.h
1 #ifndef __LINUX_CPUMASK_H
2 #define __LINUX_CPUMASK_H
3
4 /*
5 * Cpumasks provide a bitmap suitable for representing the
6 * set of CPU's in a system, one bit position per CPU number.
7 *
8 * See detailed comments in the file linux/bitmap.h describing the
9 * data type on which these cpumasks are based.
10 *
11 * For details of cpumask_scnprintf() and cpumask_parse_user(),
12 * see bitmap_scnprintf() and bitmap_parse_user() in lib/bitmap.c.
13 * For details of cpulist_scnprintf() and cpulist_parse(), see
14 * bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c.
15 * For details of cpu_remap(), see bitmap_bitremap in lib/bitmap.c
16 * For details of cpus_remap(), see bitmap_remap in lib/bitmap.c.
17 * For details of cpus_onto(), see bitmap_onto in lib/bitmap.c.
18 * For details of cpus_fold(), see bitmap_fold in lib/bitmap.c.
19 *
20 * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
21 * Note: The alternate operations with the suffix "_nr" are used
22 * to limit the range of the loop to nr_cpu_ids instead of
23 * NR_CPUS when NR_CPUS > 64 for performance reasons.
24 * If NR_CPUS is <= 64 then most assembler bitmask
25 * operators execute faster with a constant range, so
26 * the operator will continue to use NR_CPUS.
27 *
28 * Another consideration is that nr_cpu_ids is initialized
29 * to NR_CPUS and isn't lowered until the possible cpus are
30 * discovered (including any disabled cpus). So early uses
31 * will span the entire range of NR_CPUS.
32 * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
33 *
34 * The available cpumask operations are:
35 *
36 * void cpu_set(cpu, mask) turn on bit 'cpu' in mask
37 * void cpu_clear(cpu, mask) turn off bit 'cpu' in mask
38 * void cpus_setall(mask) set all bits
39 * void cpus_clear(mask) clear all bits
40 * int cpu_isset(cpu, mask) true iff bit 'cpu' set in mask
41 * int cpu_test_and_set(cpu, mask) test and set bit 'cpu' in mask
42 *
43 * void cpus_and(dst, src1, src2) dst = src1 & src2 [intersection]
44 * void cpus_or(dst, src1, src2) dst = src1 | src2 [union]
45 * void cpus_xor(dst, src1, src2) dst = src1 ^ src2
46 * void cpus_andnot(dst, src1, src2) dst = src1 & ~src2
47 * void cpus_complement(dst, src) dst = ~src
48 *
49 * int cpus_equal(mask1, mask2) Does mask1 == mask2?
50 * int cpus_intersects(mask1, mask2) Do mask1 and mask2 intersect?
51 * int cpus_subset(mask1, mask2) Is mask1 a subset of mask2?
52 * int cpus_empty(mask) Is mask empty (no bits sets)?
53 * int cpus_full(mask) Is mask full (all bits sets)?
54 * int cpus_weight(mask) Hamming weigh - number of set bits
55 * int cpus_weight_nr(mask) Same using nr_cpu_ids instead of NR_CPUS
56 *
57 * void cpus_shift_right(dst, src, n) Shift right
58 * void cpus_shift_left(dst, src, n) Shift left
59 *
60 * int first_cpu(mask) Number lowest set bit, or NR_CPUS
61 * int next_cpu(cpu, mask) Next cpu past 'cpu', or NR_CPUS
62 * int next_cpu_nr(cpu, mask) Next cpu past 'cpu', or nr_cpu_ids
63 *
64 * cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set
65 * CPU_MASK_ALL Initializer - all bits set
66 * CPU_MASK_NONE Initializer - no bits set
67 * unsigned long *cpus_addr(mask) Array of unsigned long's in mask
68 *
69 * int cpumask_scnprintf(buf, len, mask) Format cpumask for printing
70 * int cpumask_parse_user(ubuf, ulen, mask) Parse ascii string as cpumask
71 * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing
72 * int cpulist_parse(buf, map) Parse ascii string as cpulist
73 * int cpu_remap(oldbit, old, new) newbit = map(old, new)(oldbit)
74 * void cpus_remap(dst, src, old, new) *dst = map(old, new)(src)
75 * void cpus_onto(dst, orig, relmap) *dst = orig relative to relmap
76 * void cpus_fold(dst, orig, sz) dst bits = orig bits mod sz
77 *
78 * for_each_cpu_mask(cpu, mask) for-loop cpu over mask using NR_CPUS
79 * for_each_cpu_mask_nr(cpu, mask) for-loop cpu over mask using nr_cpu_ids
80 *
81 * int num_online_cpus() Number of online CPUs
82 * int num_possible_cpus() Number of all possible CPUs
83 * int num_present_cpus() Number of present CPUs
84 *
85 * int cpu_online(cpu) Is some cpu online?
86 * int cpu_possible(cpu) Is some cpu possible?
87 * int cpu_present(cpu) Is some cpu present (can schedule)?
88 *
89 * int any_online_cpu(mask) First online cpu in mask
90 *
91 * for_each_possible_cpu(cpu) for-loop cpu over cpu_possible_map
92 * for_each_online_cpu(cpu) for-loop cpu over cpu_online_map
93 * for_each_present_cpu(cpu) for-loop cpu over cpu_present_map
94 *
95 * Subtlety:
96 * 1) The 'type-checked' form of cpu_isset() causes gcc (3.3.2, anyway)
97 * to generate slightly worse code. Note for example the additional
98 * 40 lines of assembly code compiling the "for each possible cpu"
99 * loops buried in the disk_stat_read() macros calls when compiling
100 * drivers/block/genhd.c (arch i386, CONFIG_SMP=y). So use a simple
101 * one-line #define for cpu_isset(), instead of wrapping an inline
102 * inside a macro, the way we do the other calls.
103 */
104
105 #include <linux/kernel.h>
106 #include <linux/threads.h>
107 #include <linux/bitmap.h>
108
109 typedef struct { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
110 extern cpumask_t _unused_cpumask_arg_;
111
112 #define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
113 static inline void __cpu_set(int cpu, volatile cpumask_t *dstp)
114 {
115 set_bit(cpu, dstp->bits);
116 }
117
118 #define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst))
119 static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp)
120 {
121 clear_bit(cpu, dstp->bits);
122 }
123
124 #define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS)
125 static inline void __cpus_setall(cpumask_t *dstp, int nbits)
126 {
127 bitmap_fill(dstp->bits, nbits);
128 }
129
130 #define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS)
131 static inline void __cpus_clear(cpumask_t *dstp, int nbits)
132 {
133 bitmap_zero(dstp->bits, nbits);
134 }
135
136 /* No static inline type checking - see Subtlety (1) above. */
137 #define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits)
138
139 #define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask))
140 static inline int __cpu_test_and_set(int cpu, cpumask_t *addr)
141 {
142 return test_and_set_bit(cpu, addr->bits);
143 }
144
145 #define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
146 static inline void __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
147 const cpumask_t *src2p, int nbits)
148 {
149 bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
150 }
151
152 #define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
153 static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
154 const cpumask_t *src2p, int nbits)
155 {
156 bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
157 }
158
159 #define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS)
160 static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
161 const cpumask_t *src2p, int nbits)
162 {
163 bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
164 }
165
166 #define cpus_andnot(dst, src1, src2) \
167 __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
168 static inline void __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
169 const cpumask_t *src2p, int nbits)
170 {
171 bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
172 }
173
174 #define cpus_complement(dst, src) __cpus_complement(&(dst), &(src), NR_CPUS)
175 static inline void __cpus_complement(cpumask_t *dstp,
176 const cpumask_t *srcp, int nbits)
177 {
178 bitmap_complement(dstp->bits, srcp->bits, nbits);
179 }
180
181 #define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS)
182 static inline int __cpus_equal(const cpumask_t *src1p,
183 const cpumask_t *src2p, int nbits)
184 {
185 return bitmap_equal(src1p->bits, src2p->bits, nbits);
186 }
187
188 #define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS)
189 static inline int __cpus_intersects(const cpumask_t *src1p,
190 const cpumask_t *src2p, int nbits)
191 {
192 return bitmap_intersects(src1p->bits, src2p->bits, nbits);
193 }
194
195 #define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS)
196 static inline int __cpus_subset(const cpumask_t *src1p,
197 const cpumask_t *src2p, int nbits)
198 {
199 return bitmap_subset(src1p->bits, src2p->bits, nbits);
200 }
201
202 #define cpus_empty(src) __cpus_empty(&(src), NR_CPUS)
203 static inline int __cpus_empty(const cpumask_t *srcp, int nbits)
204 {
205 return bitmap_empty(srcp->bits, nbits);
206 }
207
208 #define cpus_full(cpumask) __cpus_full(&(cpumask), NR_CPUS)
209 static inline int __cpus_full(const cpumask_t *srcp, int nbits)
210 {
211 return bitmap_full(srcp->bits, nbits);
212 }
213
214 #define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS)
215 static inline int __cpus_weight(const cpumask_t *srcp, int nbits)
216 {
217 return bitmap_weight(srcp->bits, nbits);
218 }
219
220 #define cpus_shift_right(dst, src, n) \
221 __cpus_shift_right(&(dst), &(src), (n), NR_CPUS)
222 static inline void __cpus_shift_right(cpumask_t *dstp,
223 const cpumask_t *srcp, int n, int nbits)
224 {
225 bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
226 }
227
228 #define cpus_shift_left(dst, src, n) \
229 __cpus_shift_left(&(dst), &(src), (n), NR_CPUS)
230 static inline void __cpus_shift_left(cpumask_t *dstp,
231 const cpumask_t *srcp, int n, int nbits)
232 {
233 bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
234 }
235
236
237 #ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP
238 extern cpumask_t *cpumask_of_cpu_map;
239 #define cpumask_of_cpu(cpu) (cpumask_of_cpu_map[cpu])
240
241 #else
242 #define cpumask_of_cpu(cpu) \
243 (*({ \
244 typeof(_unused_cpumask_arg_) m; \
245 if (sizeof(m) == sizeof(unsigned long)) { \
246 m.bits[0] = 1UL<<(cpu); \
247 } else { \
248 cpus_clear(m); \
249 cpu_set((cpu), m); \
250 } \
251 &m; \
252 }))
253 #endif
254
255 #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
256
257 #if NR_CPUS <= BITS_PER_LONG
258
259 #define CPU_MASK_ALL \
260 (cpumask_t) { { \
261 [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
262 } }
263
264 #define CPU_MASK_ALL_PTR (&CPU_MASK_ALL)
265
266 #else
267
268 #define CPU_MASK_ALL \
269 (cpumask_t) { { \
270 [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
271 [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
272 } }
273
274 /* cpu_mask_all is in init/main.c */
275 extern cpumask_t cpu_mask_all;
276 #define CPU_MASK_ALL_PTR (&cpu_mask_all)
277
278 #endif
279
280 #define CPU_MASK_NONE \
281 (cpumask_t) { { \
282 [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
283 } }
284
285 #define CPU_MASK_CPU0 \
286 (cpumask_t) { { \
287 [0] = 1UL \
288 } }
289
290 #define cpus_addr(src) ((src).bits)
291
292 #define cpumask_scnprintf(buf, len, src) \
293 __cpumask_scnprintf((buf), (len), &(src), NR_CPUS)
294 static inline int __cpumask_scnprintf(char *buf, int len,
295 const cpumask_t *srcp, int nbits)
296 {
297 return bitmap_scnprintf(buf, len, srcp->bits, nbits);
298 }
299
300 #define cpumask_parse_user(ubuf, ulen, dst) \
301 __cpumask_parse_user((ubuf), (ulen), &(dst), NR_CPUS)
302 static inline int __cpumask_parse_user(const char __user *buf, int len,
303 cpumask_t *dstp, int nbits)
304 {
305 return bitmap_parse_user(buf, len, dstp->bits, nbits);
306 }
307
308 #define cpulist_scnprintf(buf, len, src) \
309 __cpulist_scnprintf((buf), (len), &(src), NR_CPUS)
310 static inline int __cpulist_scnprintf(char *buf, int len,
311 const cpumask_t *srcp, int nbits)
312 {
313 return bitmap_scnlistprintf(buf, len, srcp->bits, nbits);
314 }
315
316 #define cpulist_parse(buf, dst) __cpulist_parse((buf), &(dst), NR_CPUS)
317 static inline int __cpulist_parse(const char *buf, cpumask_t *dstp, int nbits)
318 {
319 return bitmap_parselist(buf, dstp->bits, nbits);
320 }
321
322 #define cpu_remap(oldbit, old, new) \
323 __cpu_remap((oldbit), &(old), &(new), NR_CPUS)
324 static inline int __cpu_remap(int oldbit,
325 const cpumask_t *oldp, const cpumask_t *newp, int nbits)
326 {
327 return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits);
328 }
329
330 #define cpus_remap(dst, src, old, new) \
331 __cpus_remap(&(dst), &(src), &(old), &(new), NR_CPUS)
332 static inline void __cpus_remap(cpumask_t *dstp, const cpumask_t *srcp,
333 const cpumask_t *oldp, const cpumask_t *newp, int nbits)
334 {
335 bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits);
336 }
337
338 #define cpus_onto(dst, orig, relmap) \
339 __cpus_onto(&(dst), &(orig), &(relmap), NR_CPUS)
340 static inline void __cpus_onto(cpumask_t *dstp, const cpumask_t *origp,
341 const cpumask_t *relmapp, int nbits)
342 {
343 bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits);
344 }
345
346 #define cpus_fold(dst, orig, sz) \
347 __cpus_fold(&(dst), &(orig), sz, NR_CPUS)
348 static inline void __cpus_fold(cpumask_t *dstp, const cpumask_t *origp,
349 int sz, int nbits)
350 {
351 bitmap_fold(dstp->bits, origp->bits, sz, nbits);
352 }
353
354 #if NR_CPUS == 1
355
356 #define nr_cpu_ids 1
357 #define first_cpu(src) ({ (void)(src); 0; })
358 #define next_cpu(n, src) ({ (void)(src); 1; })
359 #define any_online_cpu(mask) 0
360 #define for_each_cpu_mask(cpu, mask) \
361 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
362
363 #else /* NR_CPUS > 1 */
364
365 extern int nr_cpu_ids;
366 int __first_cpu(const cpumask_t *srcp);
367 int __next_cpu(int n, const cpumask_t *srcp);
368 int __any_online_cpu(const cpumask_t *mask);
369
370 #define first_cpu(src) __first_cpu(&(src))
371 #define next_cpu(n, src) __next_cpu((n), &(src))
372 #define any_online_cpu(mask) __any_online_cpu(&(mask))
373 #define for_each_cpu_mask(cpu, mask) \
374 for ((cpu) = -1; \
375 (cpu) = next_cpu((cpu), (mask)), \
376 (cpu) < NR_CPUS; )
377 #endif
378
379 #if NR_CPUS <= 64
380
381 #define next_cpu_nr(n, src) next_cpu(n, src)
382 #define cpus_weight_nr(cpumask) cpus_weight(cpumask)
383 #define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask)
384
385 #else /* NR_CPUS > 64 */
386
387 int __next_cpu_nr(int n, const cpumask_t *srcp);
388 #define next_cpu_nr(n, src) __next_cpu_nr((n), &(src))
389 #define cpus_weight_nr(cpumask) __cpus_weight(&(cpumask), nr_cpu_ids)
390 #define for_each_cpu_mask_nr(cpu, mask) \
391 for ((cpu) = -1; \
392 (cpu) = next_cpu_nr((cpu), (mask)), \
393 (cpu) < nr_cpu_ids; )
394
395 #endif /* NR_CPUS > 64 */
396
397 /*
398 * The following particular system cpumasks and operations manage
399 * possible, present and online cpus. Each of them is a fixed size
400 * bitmap of size NR_CPUS.
401 *
402 * #ifdef CONFIG_HOTPLUG_CPU
403 * cpu_possible_map - has bit 'cpu' set iff cpu is populatable
404 * cpu_present_map - has bit 'cpu' set iff cpu is populated
405 * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler
406 * #else
407 * cpu_possible_map - has bit 'cpu' set iff cpu is populated
408 * cpu_present_map - copy of cpu_possible_map
409 * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler
410 * #endif
411 *
412 * In either case, NR_CPUS is fixed at compile time, as the static
413 * size of these bitmaps. The cpu_possible_map is fixed at boot
414 * time, as the set of CPU id's that it is possible might ever
415 * be plugged in at anytime during the life of that system boot.
416 * The cpu_present_map is dynamic(*), representing which CPUs
417 * are currently plugged in. And cpu_online_map is the dynamic
418 * subset of cpu_present_map, indicating those CPUs available
419 * for scheduling.
420 *
421 * If HOTPLUG is enabled, then cpu_possible_map is forced to have
422 * all NR_CPUS bits set, otherwise it is just the set of CPUs that
423 * ACPI reports present at boot.
424 *
425 * If HOTPLUG is enabled, then cpu_present_map varies dynamically,
426 * depending on what ACPI reports as currently plugged in, otherwise
427 * cpu_present_map is just a copy of cpu_possible_map.
428 *
429 * (*) Well, cpu_present_map is dynamic in the hotplug case. If not
430 * hotplug, it's a copy of cpu_possible_map, hence fixed at boot.
431 *
432 * Subtleties:
433 * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
434 * assumption that their single CPU is online. The UP
435 * cpu_{online,possible,present}_maps are placebos. Changing them
436 * will have no useful affect on the following num_*_cpus()
437 * and cpu_*() macros in the UP case. This ugliness is a UP
438 * optimization - don't waste any instructions or memory references
439 * asking if you're online or how many CPUs there are if there is
440 * only one CPU.
441 * 2) Most SMP arch's #define some of these maps to be some
442 * other map specific to that arch. Therefore, the following
443 * must be #define macros, not inlines. To see why, examine
444 * the assembly code produced by the following. Note that
445 * set1() writes phys_x_map, but set2() writes x_map:
446 * int x_map, phys_x_map;
447 * #define set1(a) x_map = a
448 * inline void set2(int a) { x_map = a; }
449 * #define x_map phys_x_map
450 * main(){ set1(3); set2(5); }
451 */
452
453 extern cpumask_t cpu_possible_map;
454 extern cpumask_t cpu_online_map;
455 extern cpumask_t cpu_present_map;
456
457 #if NR_CPUS > 1
458 #define num_online_cpus() cpus_weight_nr(cpu_online_map)
459 #define num_possible_cpus() cpus_weight_nr(cpu_possible_map)
460 #define num_present_cpus() cpus_weight_nr(cpu_present_map)
461 #define cpu_online(cpu) cpu_isset((cpu), cpu_online_map)
462 #define cpu_possible(cpu) cpu_isset((cpu), cpu_possible_map)
463 #define cpu_present(cpu) cpu_isset((cpu), cpu_present_map)
464 #else
465 #define num_online_cpus() 1
466 #define num_possible_cpus() 1
467 #define num_present_cpus() 1
468 #define cpu_online(cpu) ((cpu) == 0)
469 #define cpu_possible(cpu) ((cpu) == 0)
470 #define cpu_present(cpu) ((cpu) == 0)
471 #endif
472
473 #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
474
475 #define for_each_possible_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_possible_map)
476 #define for_each_online_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_online_map)
477 #define for_each_present_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_present_map)
478
479 #endif /* __LINUX_CPUMASK_H */
This page took 0.04456 seconds and 6 git commands to generate.