8 #include <linux/bitmap.h>
11 static int max_cpu_num
;
12 static int max_node_num
;
13 static int *cpunode_map
;
15 static struct cpu_map
*cpu_map__default_new(void)
20 nr_cpus
= sysconf(_SC_NPROCESSORS_ONLN
);
24 cpus
= malloc(sizeof(*cpus
) + nr_cpus
* sizeof(int));
27 for (i
= 0; i
< nr_cpus
; ++i
)
31 atomic_set(&cpus
->refcnt
, 1);
37 static struct cpu_map
*cpu_map__trim_new(int nr_cpus
, int *tmp_cpus
)
39 size_t payload_size
= nr_cpus
* sizeof(int);
40 struct cpu_map
*cpus
= malloc(sizeof(*cpus
) + payload_size
);
44 memcpy(cpus
->map
, tmp_cpus
, payload_size
);
45 atomic_set(&cpus
->refcnt
, 1);
51 struct cpu_map
*cpu_map__read(FILE *file
)
53 struct cpu_map
*cpus
= NULL
;
55 int *tmp_cpus
= NULL
, *tmp
;
63 n
= fscanf(file
, "%u%c", &cpu
, &sep
);
67 int new_max
= nr_cpus
+ cpu
- prev
- 1;
69 if (new_max
>= max_entries
) {
70 max_entries
= new_max
+ MAX_NR_CPUS
/ 2;
71 tmp
= realloc(tmp_cpus
, max_entries
* sizeof(int));
78 tmp_cpus
[nr_cpus
++] = prev
;
80 if (nr_cpus
== max_entries
) {
81 max_entries
+= MAX_NR_CPUS
;
82 tmp
= realloc(tmp_cpus
, max_entries
* sizeof(int));
88 tmp_cpus
[nr_cpus
++] = cpu
;
89 if (n
== 2 && sep
== '-')
93 if (n
== 1 || sep
== '\n')
98 cpus
= cpu_map__trim_new(nr_cpus
, tmp_cpus
);
100 cpus
= cpu_map__default_new();
106 static struct cpu_map
*cpu_map__read_all_cpu_map(void)
108 struct cpu_map
*cpus
= NULL
;
111 onlnf
= fopen("/sys/devices/system/cpu/online", "r");
113 return cpu_map__default_new();
115 cpus
= cpu_map__read(onlnf
);
120 struct cpu_map
*cpu_map__new(const char *cpu_list
)
122 struct cpu_map
*cpus
= NULL
;
123 unsigned long start_cpu
, end_cpu
= 0;
126 int *tmp_cpus
= NULL
, *tmp
;
130 return cpu_map__read_all_cpu_map();
132 if (!isdigit(*cpu_list
))
135 while (isdigit(*cpu_list
)) {
137 start_cpu
= strtoul(cpu_list
, &p
, 0);
138 if (start_cpu
>= INT_MAX
139 || (*p
!= '\0' && *p
!= ',' && *p
!= '-'))
145 end_cpu
= strtoul(cpu_list
, &p
, 0);
147 if (end_cpu
>= INT_MAX
|| (*p
!= '\0' && *p
!= ','))
150 if (end_cpu
< start_cpu
)
156 for (; start_cpu
<= end_cpu
; start_cpu
++) {
157 /* check for duplicates */
158 for (i
= 0; i
< nr_cpus
; i
++)
159 if (tmp_cpus
[i
] == (int)start_cpu
)
162 if (nr_cpus
== max_entries
) {
163 max_entries
+= MAX_NR_CPUS
;
164 tmp
= realloc(tmp_cpus
, max_entries
* sizeof(int));
169 tmp_cpus
[nr_cpus
++] = (int)start_cpu
;
178 cpus
= cpu_map__trim_new(nr_cpus
, tmp_cpus
);
180 cpus
= cpu_map__default_new();
187 static struct cpu_map
*cpu_map__from_entries(struct cpu_map_entries
*cpus
)
191 map
= cpu_map__empty_new(cpus
->nr
);
195 for (i
= 0; i
< cpus
->nr
; i
++) {
197 * Special treatment for -1, which is not real cpu number,
198 * and we need to use (int) -1 to initialize map[i],
199 * otherwise it would become 65535.
201 if (cpus
->cpu
[i
] == (u16
) -1)
204 map
->map
[i
] = (int) cpus
->cpu
[i
];
211 static struct cpu_map
*cpu_map__from_mask(struct cpu_map_mask
*mask
)
214 int nr
, nbits
= mask
->nr
* mask
->long_size
* BITS_PER_BYTE
;
216 nr
= bitmap_weight(mask
->mask
, nbits
);
218 map
= cpu_map__empty_new(nr
);
222 for_each_set_bit(cpu
, mask
->mask
, nbits
)
229 struct cpu_map
*cpu_map__new_data(struct cpu_map_data
*data
)
231 if (data
->type
== PERF_CPU_MAP__CPUS
)
232 return cpu_map__from_entries((struct cpu_map_entries
*)data
->data
);
234 return cpu_map__from_mask((struct cpu_map_mask
*)data
->data
);
237 size_t cpu_map__fprintf(struct cpu_map
*map
, FILE *fp
)
242 cpu_map__snprint(map
, buf
, sizeof(buf
));
243 return fprintf(fp
, "%s\n", buf
);
247 struct cpu_map
*cpu_map__dummy_new(void)
249 struct cpu_map
*cpus
= malloc(sizeof(*cpus
) + sizeof(int));
254 atomic_set(&cpus
->refcnt
, 1);
260 struct cpu_map
*cpu_map__empty_new(int nr
)
262 struct cpu_map
*cpus
= malloc(sizeof(*cpus
) + sizeof(int) * nr
);
268 for (i
= 0; i
< nr
; i
++)
271 atomic_set(&cpus
->refcnt
, 1);
277 static void cpu_map__delete(struct cpu_map
*map
)
280 WARN_ONCE(atomic_read(&map
->refcnt
) != 0,
281 "cpu_map refcnt unbalanced\n");
286 struct cpu_map
*cpu_map__get(struct cpu_map
*map
)
289 atomic_inc(&map
->refcnt
);
293 void cpu_map__put(struct cpu_map
*map
)
295 if (map
&& atomic_dec_and_test(&map
->refcnt
))
296 cpu_map__delete(map
);
299 static int cpu__get_topology_int(int cpu
, const char *name
, int *value
)
303 snprintf(path
, PATH_MAX
,
304 "devices/system/cpu/cpu%d/topology/%s", cpu
, name
);
306 return sysfs__read_int(path
, value
);
309 int cpu_map__get_socket_id(int cpu
)
311 int value
, ret
= cpu__get_topology_int(cpu
, "physical_package_id", &value
);
315 int cpu_map__get_socket(struct cpu_map
*map
, int idx
, void *data __maybe_unused
)
324 return cpu_map__get_socket_id(cpu
);
327 static int cmp_ids(const void *a
, const void *b
)
329 return *(int *)a
- *(int *)b
;
332 int cpu_map__build_map(struct cpu_map
*cpus
, struct cpu_map
**res
,
333 int (*f
)(struct cpu_map
*map
, int cpu
, void *data
),
340 /* allocate as much as possible */
341 c
= calloc(1, sizeof(*c
) + nr
* sizeof(int));
345 for (cpu
= 0; cpu
< nr
; cpu
++) {
346 s1
= f(cpus
, cpu
, data
);
347 for (s2
= 0; s2
< c
->nr
; s2
++) {
348 if (s1
== c
->map
[s2
])
356 /* ensure we process id in increasing order */
357 qsort(c
->map
, c
->nr
, sizeof(int), cmp_ids
);
359 atomic_set(&c
->refcnt
, 1);
364 int cpu_map__get_core_id(int cpu
)
366 int value
, ret
= cpu__get_topology_int(cpu
, "core_id", &value
);
370 int cpu_map__get_core(struct cpu_map
*map
, int idx
, void *data
)
379 cpu
= cpu_map__get_core_id(cpu
);
381 s
= cpu_map__get_socket(map
, idx
, data
);
386 * encode socket in upper 16 bits
387 * core_id is relative to socket, and
388 * we need a global id. So we combine
391 return (s
<< 16) | (cpu
& 0xffff);
394 int cpu_map__build_socket_map(struct cpu_map
*cpus
, struct cpu_map
**sockp
)
396 return cpu_map__build_map(cpus
, sockp
, cpu_map__get_socket
, NULL
);
399 int cpu_map__build_core_map(struct cpu_map
*cpus
, struct cpu_map
**corep
)
401 return cpu_map__build_map(cpus
, corep
, cpu_map__get_core
, NULL
);
404 /* setup simple routines to easily access node numbers given a cpu number */
405 static int get_max_num(char *path
, int *max
)
411 if (filename__read_str(path
, &buf
, &num
))
416 /* start on the right, to find highest node num */
418 if ((buf
[num
] == ',') || (buf
[num
] == '-')) {
423 if (sscanf(&buf
[num
], "%d", max
) < 1) {
428 /* convert from 0-based to 1-based */
436 /* Determine highest possible cpu in the system for sparse allocation */
437 static void set_max_cpu_num(void)
446 mnt
= sysfs__mountpoint();
450 /* get the highest possible cpu number for a sparse allocation */
451 ret
= snprintf(path
, PATH_MAX
, "%s/devices/system/cpu/possible", mnt
);
452 if (ret
== PATH_MAX
) {
453 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX
);
457 ret
= get_max_num(path
, &max_cpu_num
);
461 pr_err("Failed to read max cpus, using default of %d\n", max_cpu_num
);
464 /* Determine highest possible node in the system for sparse allocation */
465 static void set_max_node_num(void)
474 mnt
= sysfs__mountpoint();
478 /* get the highest possible cpu number for a sparse allocation */
479 ret
= snprintf(path
, PATH_MAX
, "%s/devices/system/node/possible", mnt
);
480 if (ret
== PATH_MAX
) {
481 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX
);
485 ret
= get_max_num(path
, &max_node_num
);
489 pr_err("Failed to read max nodes, using default of %d\n", max_node_num
);
492 int cpu__max_node(void)
494 if (unlikely(!max_node_num
))
500 int cpu__max_cpu(void)
502 if (unlikely(!max_cpu_num
))
508 int cpu__get_node(int cpu
)
510 if (unlikely(cpunode_map
== NULL
)) {
511 pr_debug("cpu_map not initialized\n");
515 return cpunode_map
[cpu
];
518 static int init_cpunode_map(void)
525 cpunode_map
= calloc(max_cpu_num
, sizeof(int));
527 pr_err("%s: calloc failed\n", __func__
);
531 for (i
= 0; i
< max_cpu_num
; i
++)
537 int cpu__setup_cpunode_map(void)
539 struct dirent
*dent1
, *dent2
;
541 unsigned int cpu
, mem
;
547 /* initialize globals */
548 if (init_cpunode_map())
551 mnt
= sysfs__mountpoint();
555 n
= snprintf(path
, PATH_MAX
, "%s/devices/system/node", mnt
);
557 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX
);
561 dir1
= opendir(path
);
565 /* walk tree and setup map */
566 while ((dent1
= readdir(dir1
)) != NULL
) {
567 if (dent1
->d_type
!= DT_DIR
|| sscanf(dent1
->d_name
, "node%u", &mem
) < 1)
570 n
= snprintf(buf
, PATH_MAX
, "%s/%s", path
, dent1
->d_name
);
572 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX
);
579 while ((dent2
= readdir(dir2
)) != NULL
) {
580 if (dent2
->d_type
!= DT_LNK
|| sscanf(dent2
->d_name
, "cpu%u", &cpu
) < 1)
582 cpunode_map
[cpu
] = mem
;
590 bool cpu_map__has(struct cpu_map
*cpus
, int cpu
)
592 return cpu_map__idx(cpus
, cpu
) != -1;
595 int cpu_map__idx(struct cpu_map
*cpus
, int cpu
)
599 for (i
= 0; i
< cpus
->nr
; ++i
) {
600 if (cpus
->map
[i
] == cpu
)
607 int cpu_map__cpu(struct cpu_map
*cpus
, int idx
)
609 return cpus
->map
[idx
];
612 size_t cpu_map__snprint(struct cpu_map
*map
, char *buf
, size_t size
)
614 int i
, cpu
, start
= -1;
618 #define COMMA first ? "" : ","
620 for (i
= 0; i
< map
->nr
+ 1; i
++) {
621 bool last
= i
== map
->nr
;
623 cpu
= last
? INT_MAX
: map
->map
[i
];
628 ret
+= snprintf(buf
+ ret
, size
- ret
,
632 } else if (((i
- start
) != (cpu
- map
->map
[start
])) || last
) {
636 ret
+= snprintf(buf
+ ret
, size
- ret
,
640 ret
+= snprintf(buf
+ ret
, size
- ret
,
642 map
->map
[start
], map
->map
[end
]);
651 pr_debug("cpumask list: %s\n", buf
);
This page took 0.240631 seconds and 5 git commands to generate.