[PATCH] sparc64: support sparsemem and !memory hotplug
[deliverable/linux.git] / arch / ia64 / kernel / topology.c
CommitLineData
1da177e4
LT
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * This file contains NUMA specific variables and functions which can
7 * be split away from DISCONTIGMEM and are used on NUMA machines with
8 * contiguous memory.
9 * 2002/08/07 Erich Focht <efocht@ess.nec.de>
10 * Populate cpu entries in sysfs for non-numa systems as well
11 * Intel Corporation - Ashok Raj
f1918005
ZY
12 * 02/27/2006 Zhang, Yanmin
13 * Populate cpu cache entries in sysfs for cpu cache info
1da177e4
LT
14 */
15
16#include <linux/config.h>
17#include <linux/cpu.h>
18#include <linux/kernel.h>
19#include <linux/mm.h>
20#include <linux/node.h>
21#include <linux/init.h>
22#include <linux/bootmem.h>
23#include <linux/nodemask.h>
f1918005 24#include <linux/notifier.h>
1da177e4
LT
25#include <asm/mmzone.h>
26#include <asm/numa.h>
27#include <asm/cpu.h>
28
29#ifdef CONFIG_NUMA
30static struct node *sysfs_nodes;
31#endif
32static struct ia64_cpu *sysfs_cpus;
33
34int arch_register_cpu(int num)
35{
36 struct node *parent = NULL;
37
38#ifdef CONFIG_NUMA
39 parent = &sysfs_nodes[cpu_to_node(num)];
40#endif /* CONFIG_NUMA */
41
b88e9265 42#if defined (CONFIG_ACPI) && defined (CONFIG_HOTPLUG_CPU)
55e59c51
AR
43 /*
44 * If CPEI cannot be re-targetted, and this is
45 * CPEI target, then dont create the control file
46 */
47 if (!can_cpei_retarget() && is_cpu_cpei_target(num))
48 sysfs_cpus[num].cpu.no_control = 1;
46906c44 49#endif
55e59c51 50
1da177e4
LT
51 return register_cpu(&sysfs_cpus[num].cpu, num, parent);
52}
53
54#ifdef CONFIG_HOTPLUG_CPU
55
56void arch_unregister_cpu(int num)
57{
58 struct node *parent = NULL;
59
60#ifdef CONFIG_NUMA
61 int node = cpu_to_node(num);
62 parent = &sysfs_nodes[node];
63#endif /* CONFIG_NUMA */
64
65 return unregister_cpu(&sysfs_cpus[num].cpu, parent);
66}
67EXPORT_SYMBOL(arch_register_cpu);
68EXPORT_SYMBOL(arch_unregister_cpu);
69#endif /*CONFIG_HOTPLUG_CPU*/
70
71
72static int __init topology_init(void)
73{
74 int i, err = 0;
75
76#ifdef CONFIG_NUMA
69dcc991 77 sysfs_nodes = kzalloc(sizeof(struct node) * MAX_NUMNODES, GFP_KERNEL);
1da177e4
LT
78 if (!sysfs_nodes) {
79 err = -ENOMEM;
80 goto out;
81 }
1da177e4 82
69dcc991
ZY
83 /*
84 * MCD - Do we want to register all ONLINE nodes, or all POSSIBLE nodes?
85 */
86 for_each_online_node(i) {
1da177e4
LT
87 if ((err = register_node(&sysfs_nodes[i], i, 0)))
88 goto out;
69dcc991 89 }
1da177e4
LT
90#endif
91
69dcc991 92 sysfs_cpus = kzalloc(sizeof(struct ia64_cpu) * NR_CPUS, GFP_KERNEL);
1da177e4
LT
93 if (!sysfs_cpus) {
94 err = -ENOMEM;
95 goto out;
96 }
1da177e4 97
69dcc991 98 for_each_present_cpu(i) {
1da177e4
LT
99 if((err = arch_register_cpu(i)))
100 goto out;
69dcc991 101 }
1da177e4
LT
102out:
103 return err;
104}
105
69dcc991 106subsys_initcall(topology_init);
f1918005
ZY
107
108
109/*
110 * Export cpu cache information through sysfs
111 */
112
113/*
114 * A bunch of string array to get pretty printing
115 */
116static const char *cache_types[] = {
117 "", /* not used */
118 "Instruction",
119 "Data",
120 "Unified" /* unified */
121};
122
123static const char *cache_mattrib[]={
124 "WriteThrough",
125 "WriteBack",
126 "", /* reserved */
127 "" /* reserved */
128};
129
130struct cache_info {
131 pal_cache_config_info_t cci;
132 cpumask_t shared_cpu_map;
133 int level;
134 int type;
135 struct kobject kobj;
136};
137
138struct cpu_cache_info {
139 struct cache_info *cache_leaves;
140 int num_cache_leaves;
141 struct kobject kobj;
142};
143
144static struct cpu_cache_info all_cpu_cache_info[NR_CPUS];
145#define LEAF_KOBJECT_PTR(x,y) (&all_cpu_cache_info[x].cache_leaves[y])
146
147#ifdef CONFIG_SMP
148static void cache_shared_cpu_map_setup( unsigned int cpu,
149 struct cache_info * this_leaf)
150{
151 pal_cache_shared_info_t csi;
152 int num_shared, i = 0;
153 unsigned int j;
154
155 if (cpu_data(cpu)->threads_per_core <= 1 &&
156 cpu_data(cpu)->cores_per_socket <= 1) {
157 cpu_set(cpu, this_leaf->shared_cpu_map);
158 return;
159 }
160
161 if (ia64_pal_cache_shared_info(this_leaf->level,
162 this_leaf->type,
163 0,
164 &csi) != PAL_STATUS_SUCCESS)
165 return;
166
167 num_shared = (int) csi.num_shared;
168 do {
fb1bb34d 169 for_each_possible_cpu(j)
f1918005
ZY
170 if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id
171 && cpu_data(j)->core_id == csi.log1_cid
172 && cpu_data(j)->thread_id == csi.log1_tid)
173 cpu_set(j, this_leaf->shared_cpu_map);
174
175 i++;
176 } while (i < num_shared &&
177 ia64_pal_cache_shared_info(this_leaf->level,
178 this_leaf->type,
179 i,
180 &csi) == PAL_STATUS_SUCCESS);
181}
182#else
183static void cache_shared_cpu_map_setup(unsigned int cpu,
184 struct cache_info * this_leaf)
185{
186 cpu_set(cpu, this_leaf->shared_cpu_map);
187 return;
188}
189#endif
190
191static ssize_t show_coherency_line_size(struct cache_info *this_leaf,
192 char *buf)
193{
194 return sprintf(buf, "%u\n", 1 << this_leaf->cci.pcci_line_size);
195}
196
197static ssize_t show_ways_of_associativity(struct cache_info *this_leaf,
198 char *buf)
199{
200 return sprintf(buf, "%u\n", this_leaf->cci.pcci_assoc);
201}
202
203static ssize_t show_attributes(struct cache_info *this_leaf, char *buf)
204{
205 return sprintf(buf,
206 "%s\n",
207 cache_mattrib[this_leaf->cci.pcci_cache_attr]);
208}
209
210static ssize_t show_size(struct cache_info *this_leaf, char *buf)
211{
212 return sprintf(buf, "%uK\n", this_leaf->cci.pcci_cache_size / 1024);
213}
214
215static ssize_t show_number_of_sets(struct cache_info *this_leaf, char *buf)
216{
217 unsigned number_of_sets = this_leaf->cci.pcci_cache_size;
218 number_of_sets /= this_leaf->cci.pcci_assoc;
219 number_of_sets /= 1 << this_leaf->cci.pcci_line_size;
220
221 return sprintf(buf, "%u\n", number_of_sets);
222}
223
224static ssize_t show_shared_cpu_map(struct cache_info *this_leaf, char *buf)
225{
226 ssize_t len;
227 cpumask_t shared_cpu_map;
228
229 cpus_and(shared_cpu_map, this_leaf->shared_cpu_map, cpu_online_map);
230 len = cpumask_scnprintf(buf, NR_CPUS+1, shared_cpu_map);
231 len += sprintf(buf+len, "\n");
232 return len;
233}
234
235static ssize_t show_type(struct cache_info *this_leaf, char *buf)
236{
237 int type = this_leaf->type + this_leaf->cci.pcci_unified;
238 return sprintf(buf, "%s\n", cache_types[type]);
239}
240
241static ssize_t show_level(struct cache_info *this_leaf, char *buf)
242{
243 return sprintf(buf, "%u\n", this_leaf->level);
244}
245
246struct cache_attr {
247 struct attribute attr;
248 ssize_t (*show)(struct cache_info *, char *);
249 ssize_t (*store)(struct cache_info *, const char *, size_t count);
250};
251
252#ifdef define_one_ro
253 #undef define_one_ro
254#endif
255#define define_one_ro(_name) \
256 static struct cache_attr _name = \
257__ATTR(_name, 0444, show_##_name, NULL)
258
259define_one_ro(level);
260define_one_ro(type);
261define_one_ro(coherency_line_size);
262define_one_ro(ways_of_associativity);
263define_one_ro(size);
264define_one_ro(number_of_sets);
265define_one_ro(shared_cpu_map);
266define_one_ro(attributes);
267
268static struct attribute * cache_default_attrs[] = {
269 &type.attr,
270 &level.attr,
271 &coherency_line_size.attr,
272 &ways_of_associativity.attr,
273 &attributes.attr,
274 &size.attr,
275 &number_of_sets.attr,
276 &shared_cpu_map.attr,
277 NULL
278};
279
280#define to_object(k) container_of(k, struct cache_info, kobj)
281#define to_attr(a) container_of(a, struct cache_attr, attr)
282
283static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char * buf)
284{
285 struct cache_attr *fattr = to_attr(attr);
286 struct cache_info *this_leaf = to_object(kobj);
287 ssize_t ret;
288
289 ret = fattr->show ? fattr->show(this_leaf, buf) : 0;
290 return ret;
291}
292
293static struct sysfs_ops cache_sysfs_ops = {
294 .show = cache_show
295};
296
297static struct kobj_type cache_ktype = {
298 .sysfs_ops = &cache_sysfs_ops,
299 .default_attrs = cache_default_attrs,
300};
301
302static struct kobj_type cache_ktype_percpu_entry = {
303 .sysfs_ops = &cache_sysfs_ops,
304};
305
306static void __cpuinit cpu_cache_sysfs_exit(unsigned int cpu)
307{
cbf283c0
JJ
308 kfree(all_cpu_cache_info[cpu].cache_leaves);
309 all_cpu_cache_info[cpu].cache_leaves = NULL;
f1918005
ZY
310 all_cpu_cache_info[cpu].num_cache_leaves = 0;
311 memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
f1918005
ZY
312 return;
313}
314
315static int __cpuinit cpu_cache_sysfs_init(unsigned int cpu)
316{
317 u64 i, levels, unique_caches;
318 pal_cache_config_info_t cci;
319 int j;
320 s64 status;
321 struct cache_info *this_cache;
322 int num_cache_leaves = 0;
323
324 if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) {
325 printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status);
326 return -1;
327 }
328
329 this_cache=kzalloc(sizeof(struct cache_info)*unique_caches,
330 GFP_KERNEL);
331 if (this_cache == NULL)
332 return -ENOMEM;
333
334 for (i=0; i < levels; i++) {
335 for (j=2; j >0 ; j--) {
336 if ((status=ia64_pal_cache_config_info(i,j, &cci)) !=
337 PAL_STATUS_SUCCESS)
338 continue;
339
340 this_cache[num_cache_leaves].cci = cci;
341 this_cache[num_cache_leaves].level = i + 1;
342 this_cache[num_cache_leaves].type = j;
343
344 cache_shared_cpu_map_setup(cpu,
345 &this_cache[num_cache_leaves]);
346 num_cache_leaves ++;
347 }
348 }
349
350 all_cpu_cache_info[cpu].cache_leaves = this_cache;
351 all_cpu_cache_info[cpu].num_cache_leaves = num_cache_leaves;
352
353 memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
354
355 return 0;
356}
357
358/* Add cache interface for CPU device */
359static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
360{
361 unsigned int cpu = sys_dev->id;
362 unsigned long i, j;
363 struct cache_info *this_object;
364 int retval = 0;
365 cpumask_t oldmask;
366
367 if (all_cpu_cache_info[cpu].kobj.parent)
368 return 0;
369
370 oldmask = current->cpus_allowed;
371 retval = set_cpus_allowed(current, cpumask_of_cpu(cpu));
372 if (unlikely(retval))
373 return retval;
374
375 retval = cpu_cache_sysfs_init(cpu);
376 set_cpus_allowed(current, oldmask);
377 if (unlikely(retval < 0))
378 return retval;
379
380 all_cpu_cache_info[cpu].kobj.parent = &sys_dev->kobj;
381 kobject_set_name(&all_cpu_cache_info[cpu].kobj, "%s", "cache");
382 all_cpu_cache_info[cpu].kobj.ktype = &cache_ktype_percpu_entry;
383 retval = kobject_register(&all_cpu_cache_info[cpu].kobj);
384
385 for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) {
386 this_object = LEAF_KOBJECT_PTR(cpu,i);
387 this_object->kobj.parent = &all_cpu_cache_info[cpu].kobj;
388 kobject_set_name(&(this_object->kobj), "index%1lu", i);
389 this_object->kobj.ktype = &cache_ktype;
390 retval = kobject_register(&(this_object->kobj));
391 if (unlikely(retval)) {
392 for (j = 0; j < i; j++) {
393 kobject_unregister(
394 &(LEAF_KOBJECT_PTR(cpu,j)->kobj));
395 }
396 kobject_unregister(&all_cpu_cache_info[cpu].kobj);
397 cpu_cache_sysfs_exit(cpu);
398 break;
399 }
400 }
401 return retval;
402}
403
404/* Remove cache interface for CPU device */
405static int __cpuinit cache_remove_dev(struct sys_device * sys_dev)
406{
407 unsigned int cpu = sys_dev->id;
408 unsigned long i;
409
410 for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++)
411 kobject_unregister(&(LEAF_KOBJECT_PTR(cpu,i)->kobj));
412
413 if (all_cpu_cache_info[cpu].kobj.parent) {
414 kobject_unregister(&all_cpu_cache_info[cpu].kobj);
415 memset(&all_cpu_cache_info[cpu].kobj,
416 0,
417 sizeof(struct kobject));
418 }
419
420 cpu_cache_sysfs_exit(cpu);
421
422 return 0;
423}
424
425/*
426 * When a cpu is hot-plugged, do a check and initiate
427 * cache kobject if necessary
428 */
83d722f7 429static int cache_cpu_callback(struct notifier_block *nfb,
f1918005
ZY
430 unsigned long action, void *hcpu)
431{
432 unsigned int cpu = (unsigned long)hcpu;
433 struct sys_device *sys_dev;
434
435 sys_dev = get_cpu_sysdev(cpu);
436 switch (action) {
437 case CPU_ONLINE:
438 cache_add_dev(sys_dev);
439 break;
440 case CPU_DEAD:
441 cache_remove_dev(sys_dev);
442 break;
443 }
444 return NOTIFY_OK;
445}
446
447static struct notifier_block cache_cpu_notifier =
448{
449 .notifier_call = cache_cpu_callback
450};
451
452static int __cpuinit cache_sysfs_init(void)
453{
454 int i;
455
456 for_each_online_cpu(i) {
457 cache_cpu_callback(&cache_cpu_notifier, CPU_ONLINE,
458 (void *)(long)i);
459 }
460
461 register_cpu_notifier(&cache_cpu_notifier);
462
463 return 0;
464}
465
466device_initcall(cache_sysfs_init);
467
This page took 0.128618 seconds and 5 git commands to generate.