2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * This file contains NUMA specific variables and functions which can
7 * be split away from DISCONTIGMEM and are used on NUMA machines with
9 * 2002/08/07 Erich Focht <efocht@ess.nec.de>
10 * Populate cpu entries in sysfs for non-numa systems as well
11 * Intel Corporation - Ashok Raj
12 * 02/27/2006 Zhang, Yanmin
13 * Populate cpu cache entries in sysfs for cpu cache info
16 #include <linux/config.h>
17 #include <linux/cpu.h>
18 #include <linux/kernel.h>
20 #include <linux/node.h>
21 #include <linux/init.h>
22 #include <linux/bootmem.h>
23 #include <linux/nodemask.h>
24 #include <linux/notifier.h>
25 #include <asm/mmzone.h>
29 static struct ia64_cpu
*sysfs_cpus
;
31 int arch_register_cpu(int num
)
33 struct node
*parent
= NULL
;
36 parent
= &node_devices
[cpu_to_node(num
)];
37 #endif /* CONFIG_NUMA */
39 #if defined (CONFIG_ACPI) && defined (CONFIG_HOTPLUG_CPU)
41 * If CPEI cannot be re-targetted, and this is
42 * CPEI target, then dont create the control file
44 if (!can_cpei_retarget() && is_cpu_cpei_target(num
))
45 sysfs_cpus
[num
].cpu
.no_control
= 1;
48 return register_cpu(&sysfs_cpus
[num
].cpu
, num
, parent
);
51 #ifdef CONFIG_HOTPLUG_CPU
53 void arch_unregister_cpu(int num
)
55 struct node
*parent
= NULL
;
58 int node
= cpu_to_node(num
);
59 parent
= &node_devices
[node
];
60 #endif /* CONFIG_NUMA */
62 return unregister_cpu(&sysfs_cpus
[num
].cpu
, parent
);
64 EXPORT_SYMBOL(arch_register_cpu
);
65 EXPORT_SYMBOL(arch_unregister_cpu
);
66 #endif /*CONFIG_HOTPLUG_CPU*/
69 static int __init
topology_init(void)
75 * MCD - Do we want to register all ONLINE nodes, or all POSSIBLE nodes?
77 for_each_online_node(i
) {
78 if ((err
= register_one_node(i
)))
83 sysfs_cpus
= kzalloc(sizeof(struct ia64_cpu
) * NR_CPUS
, GFP_KERNEL
);
89 for_each_present_cpu(i
) {
90 if((err
= arch_register_cpu(i
)))
97 subsys_initcall(topology_init
);
101 * Export cpu cache information through sysfs
105 * A bunch of string array to get pretty printing
107 static const char *cache_types
[] = {
111 "Unified" /* unified */
114 static const char *cache_mattrib
[]={
122 pal_cache_config_info_t cci
;
123 cpumask_t shared_cpu_map
;
129 struct cpu_cache_info
{
130 struct cache_info
*cache_leaves
;
131 int num_cache_leaves
;
135 static struct cpu_cache_info all_cpu_cache_info
[NR_CPUS
];
136 #define LEAF_KOBJECT_PTR(x,y) (&all_cpu_cache_info[x].cache_leaves[y])
139 static void cache_shared_cpu_map_setup( unsigned int cpu
,
140 struct cache_info
* this_leaf
)
142 pal_cache_shared_info_t csi
;
143 int num_shared
, i
= 0;
146 if (cpu_data(cpu
)->threads_per_core
<= 1 &&
147 cpu_data(cpu
)->cores_per_socket
<= 1) {
148 cpu_set(cpu
, this_leaf
->shared_cpu_map
);
152 if (ia64_pal_cache_shared_info(this_leaf
->level
,
155 &csi
) != PAL_STATUS_SUCCESS
)
158 num_shared
= (int) csi
.num_shared
;
160 for_each_possible_cpu(j
)
161 if (cpu_data(cpu
)->socket_id
== cpu_data(j
)->socket_id
162 && cpu_data(j
)->core_id
== csi
.log1_cid
163 && cpu_data(j
)->thread_id
== csi
.log1_tid
)
164 cpu_set(j
, this_leaf
->shared_cpu_map
);
167 } while (i
< num_shared
&&
168 ia64_pal_cache_shared_info(this_leaf
->level
,
171 &csi
) == PAL_STATUS_SUCCESS
);
174 static void cache_shared_cpu_map_setup(unsigned int cpu
,
175 struct cache_info
* this_leaf
)
177 cpu_set(cpu
, this_leaf
->shared_cpu_map
);
182 static ssize_t
show_coherency_line_size(struct cache_info
*this_leaf
,
185 return sprintf(buf
, "%u\n", 1 << this_leaf
->cci
.pcci_line_size
);
188 static ssize_t
show_ways_of_associativity(struct cache_info
*this_leaf
,
191 return sprintf(buf
, "%u\n", this_leaf
->cci
.pcci_assoc
);
194 static ssize_t
show_attributes(struct cache_info
*this_leaf
, char *buf
)
198 cache_mattrib
[this_leaf
->cci
.pcci_cache_attr
]);
201 static ssize_t
show_size(struct cache_info
*this_leaf
, char *buf
)
203 return sprintf(buf
, "%uK\n", this_leaf
->cci
.pcci_cache_size
/ 1024);
206 static ssize_t
show_number_of_sets(struct cache_info
*this_leaf
, char *buf
)
208 unsigned number_of_sets
= this_leaf
->cci
.pcci_cache_size
;
209 number_of_sets
/= this_leaf
->cci
.pcci_assoc
;
210 number_of_sets
/= 1 << this_leaf
->cci
.pcci_line_size
;
212 return sprintf(buf
, "%u\n", number_of_sets
);
215 static ssize_t
show_shared_cpu_map(struct cache_info
*this_leaf
, char *buf
)
218 cpumask_t shared_cpu_map
;
220 cpus_and(shared_cpu_map
, this_leaf
->shared_cpu_map
, cpu_online_map
);
221 len
= cpumask_scnprintf(buf
, NR_CPUS
+1, shared_cpu_map
);
222 len
+= sprintf(buf
+len
, "\n");
226 static ssize_t
show_type(struct cache_info
*this_leaf
, char *buf
)
228 int type
= this_leaf
->type
+ this_leaf
->cci
.pcci_unified
;
229 return sprintf(buf
, "%s\n", cache_types
[type
]);
232 static ssize_t
show_level(struct cache_info
*this_leaf
, char *buf
)
234 return sprintf(buf
, "%u\n", this_leaf
->level
);
238 struct attribute attr
;
239 ssize_t (*show
)(struct cache_info
*, char *);
240 ssize_t (*store
)(struct cache_info
*, const char *, size_t count
);
246 #define define_one_ro(_name) \
247 static struct cache_attr _name = \
248 __ATTR(_name, 0444, show_##_name, NULL)
250 define_one_ro(level
);
252 define_one_ro(coherency_line_size
);
253 define_one_ro(ways_of_associativity
);
255 define_one_ro(number_of_sets
);
256 define_one_ro(shared_cpu_map
);
257 define_one_ro(attributes
);
259 static struct attribute
* cache_default_attrs
[] = {
262 &coherency_line_size
.attr
,
263 &ways_of_associativity
.attr
,
266 &number_of_sets
.attr
,
267 &shared_cpu_map
.attr
,
271 #define to_object(k) container_of(k, struct cache_info, kobj)
272 #define to_attr(a) container_of(a, struct cache_attr, attr)
274 static ssize_t
cache_show(struct kobject
* kobj
, struct attribute
* attr
, char * buf
)
276 struct cache_attr
*fattr
= to_attr(attr
);
277 struct cache_info
*this_leaf
= to_object(kobj
);
280 ret
= fattr
->show
? fattr
->show(this_leaf
, buf
) : 0;
284 static struct sysfs_ops cache_sysfs_ops
= {
288 static struct kobj_type cache_ktype
= {
289 .sysfs_ops
= &cache_sysfs_ops
,
290 .default_attrs
= cache_default_attrs
,
293 static struct kobj_type cache_ktype_percpu_entry
= {
294 .sysfs_ops
= &cache_sysfs_ops
,
297 static void __cpuinit
cpu_cache_sysfs_exit(unsigned int cpu
)
299 kfree(all_cpu_cache_info
[cpu
].cache_leaves
);
300 all_cpu_cache_info
[cpu
].cache_leaves
= NULL
;
301 all_cpu_cache_info
[cpu
].num_cache_leaves
= 0;
302 memset(&all_cpu_cache_info
[cpu
].kobj
, 0, sizeof(struct kobject
));
306 static int __cpuinit
cpu_cache_sysfs_init(unsigned int cpu
)
308 u64 i
, levels
, unique_caches
;
309 pal_cache_config_info_t cci
;
312 struct cache_info
*this_cache
;
313 int num_cache_leaves
= 0;
315 if ((status
= ia64_pal_cache_summary(&levels
, &unique_caches
)) != 0) {
316 printk(KERN_ERR
"ia64_pal_cache_summary=%ld\n", status
);
320 this_cache
=kzalloc(sizeof(struct cache_info
)*unique_caches
,
322 if (this_cache
== NULL
)
325 for (i
=0; i
< levels
; i
++) {
326 for (j
=2; j
>0 ; j
--) {
327 if ((status
=ia64_pal_cache_config_info(i
,j
, &cci
)) !=
331 this_cache
[num_cache_leaves
].cci
= cci
;
332 this_cache
[num_cache_leaves
].level
= i
+ 1;
333 this_cache
[num_cache_leaves
].type
= j
;
335 cache_shared_cpu_map_setup(cpu
,
336 &this_cache
[num_cache_leaves
]);
341 all_cpu_cache_info
[cpu
].cache_leaves
= this_cache
;
342 all_cpu_cache_info
[cpu
].num_cache_leaves
= num_cache_leaves
;
344 memset(&all_cpu_cache_info
[cpu
].kobj
, 0, sizeof(struct kobject
));
349 /* Add cache interface for CPU device */
350 static int __cpuinit
cache_add_dev(struct sys_device
* sys_dev
)
352 unsigned int cpu
= sys_dev
->id
;
354 struct cache_info
*this_object
;
358 if (all_cpu_cache_info
[cpu
].kobj
.parent
)
361 oldmask
= current
->cpus_allowed
;
362 retval
= set_cpus_allowed(current
, cpumask_of_cpu(cpu
));
363 if (unlikely(retval
))
366 retval
= cpu_cache_sysfs_init(cpu
);
367 set_cpus_allowed(current
, oldmask
);
368 if (unlikely(retval
< 0))
371 all_cpu_cache_info
[cpu
].kobj
.parent
= &sys_dev
->kobj
;
372 kobject_set_name(&all_cpu_cache_info
[cpu
].kobj
, "%s", "cache");
373 all_cpu_cache_info
[cpu
].kobj
.ktype
= &cache_ktype_percpu_entry
;
374 retval
= kobject_register(&all_cpu_cache_info
[cpu
].kobj
);
376 for (i
= 0; i
< all_cpu_cache_info
[cpu
].num_cache_leaves
; i
++) {
377 this_object
= LEAF_KOBJECT_PTR(cpu
,i
);
378 this_object
->kobj
.parent
= &all_cpu_cache_info
[cpu
].kobj
;
379 kobject_set_name(&(this_object
->kobj
), "index%1lu", i
);
380 this_object
->kobj
.ktype
= &cache_ktype
;
381 retval
= kobject_register(&(this_object
->kobj
));
382 if (unlikely(retval
)) {
383 for (j
= 0; j
< i
; j
++) {
385 &(LEAF_KOBJECT_PTR(cpu
,j
)->kobj
));
387 kobject_unregister(&all_cpu_cache_info
[cpu
].kobj
);
388 cpu_cache_sysfs_exit(cpu
);
395 /* Remove cache interface for CPU device */
396 static int __cpuinit
cache_remove_dev(struct sys_device
* sys_dev
)
398 unsigned int cpu
= sys_dev
->id
;
401 for (i
= 0; i
< all_cpu_cache_info
[cpu
].num_cache_leaves
; i
++)
402 kobject_unregister(&(LEAF_KOBJECT_PTR(cpu
,i
)->kobj
));
404 if (all_cpu_cache_info
[cpu
].kobj
.parent
) {
405 kobject_unregister(&all_cpu_cache_info
[cpu
].kobj
);
406 memset(&all_cpu_cache_info
[cpu
].kobj
,
408 sizeof(struct kobject
));
411 cpu_cache_sysfs_exit(cpu
);
417 * When a cpu is hot-plugged, do a check and initiate
418 * cache kobject if necessary
420 static int cache_cpu_callback(struct notifier_block
*nfb
,
421 unsigned long action
, void *hcpu
)
423 unsigned int cpu
= (unsigned long)hcpu
;
424 struct sys_device
*sys_dev
;
426 sys_dev
= get_cpu_sysdev(cpu
);
429 cache_add_dev(sys_dev
);
432 cache_remove_dev(sys_dev
);
438 static struct notifier_block cache_cpu_notifier
=
440 .notifier_call
= cache_cpu_callback
443 static int __cpuinit
cache_sysfs_init(void)
447 for_each_online_cpu(i
) {
448 cache_cpu_callback(&cache_cpu_notifier
, CPU_ONLINE
,
452 register_cpu_notifier(&cache_cpu_notifier
);
457 device_initcall(cache_sysfs_init
);