powerpc/85xx: add sysfs for pw20 state and altivec idle
[deliverable/linux.git] / arch / powerpc / kernel / sysfs.c
1 #include <linux/device.h>
2 #include <linux/cpu.h>
3 #include <linux/smp.h>
4 #include <linux/percpu.h>
5 #include <linux/init.h>
6 #include <linux/sched.h>
7 #include <linux/export.h>
8 #include <linux/nodemask.h>
9 #include <linux/cpumask.h>
10 #include <linux/notifier.h>
11
12 #include <asm/current.h>
13 #include <asm/processor.h>
14 #include <asm/cputable.h>
15 #include <asm/hvcall.h>
16 #include <asm/prom.h>
17 #include <asm/machdep.h>
18 #include <asm/smp.h>
19 #include <asm/pmc.h>
20 #include <asm/firmware.h>
21
22 #include "cacheinfo.h"
23
24 #ifdef CONFIG_PPC64
25 #include <asm/paca.h>
26 #include <asm/lppaca.h>
27 #endif
28
29 static DEFINE_PER_CPU(struct cpu, cpu_devices);
30
31 /*
32 * SMT snooze delay stuff, 64-bit only for now
33 */
34
35 #ifdef CONFIG_PPC64
36
37 /* Time in microseconds we delay before sleeping in the idle loop */
38 DEFINE_PER_CPU(long, smt_snooze_delay) = { 100 };
39
40 static ssize_t store_smt_snooze_delay(struct device *dev,
41 struct device_attribute *attr,
42 const char *buf,
43 size_t count)
44 {
45 struct cpu *cpu = container_of(dev, struct cpu, dev);
46 ssize_t ret;
47 long snooze;
48
49 ret = sscanf(buf, "%ld", &snooze);
50 if (ret != 1)
51 return -EINVAL;
52
53 per_cpu(smt_snooze_delay, cpu->dev.id) = snooze;
54 update_smt_snooze_delay(cpu->dev.id, snooze);
55
56 return count;
57 }
58
59 static ssize_t show_smt_snooze_delay(struct device *dev,
60 struct device_attribute *attr,
61 char *buf)
62 {
63 struct cpu *cpu = container_of(dev, struct cpu, dev);
64
65 return sprintf(buf, "%ld\n", per_cpu(smt_snooze_delay, cpu->dev.id));
66 }
67
68 static DEVICE_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay,
69 store_smt_snooze_delay);
70
71 static int __init setup_smt_snooze_delay(char *str)
72 {
73 unsigned int cpu;
74 long snooze;
75
76 if (!cpu_has_feature(CPU_FTR_SMT))
77 return 1;
78
79 snooze = simple_strtol(str, NULL, 10);
80 for_each_possible_cpu(cpu)
81 per_cpu(smt_snooze_delay, cpu) = snooze;
82
83 return 1;
84 }
85 __setup("smt-snooze-delay=", setup_smt_snooze_delay);
86
87 #endif /* CONFIG_PPC64 */
88
89 #ifdef CONFIG_PPC_FSL_BOOK3E
90 #define MAX_BIT 63
91
92 static u64 pw20_wt;
93 static u64 altivec_idle_wt;
94
95 static unsigned int get_idle_ticks_bit(u64 ns)
96 {
97 u64 cycle;
98
99 if (ns >= 10000)
100 cycle = div_u64(ns + 500, 1000) * tb_ticks_per_usec;
101 else
102 cycle = div_u64(ns * tb_ticks_per_usec, 1000);
103
104 if (!cycle)
105 return 0;
106
107 return ilog2(cycle);
108 }
109
110 static void do_show_pwrmgtcr0(void *val)
111 {
112 u32 *value = val;
113
114 *value = mfspr(SPRN_PWRMGTCR0);
115 }
116
117 static ssize_t show_pw20_state(struct device *dev,
118 struct device_attribute *attr, char *buf)
119 {
120 u32 value;
121 unsigned int cpu = dev->id;
122
123 smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
124
125 value &= PWRMGTCR0_PW20_WAIT;
126
127 return sprintf(buf, "%u\n", value ? 1 : 0);
128 }
129
130 static void do_store_pw20_state(void *val)
131 {
132 u32 *value = val;
133 u32 pw20_state;
134
135 pw20_state = mfspr(SPRN_PWRMGTCR0);
136
137 if (*value)
138 pw20_state |= PWRMGTCR0_PW20_WAIT;
139 else
140 pw20_state &= ~PWRMGTCR0_PW20_WAIT;
141
142 mtspr(SPRN_PWRMGTCR0, pw20_state);
143 }
144
145 static ssize_t store_pw20_state(struct device *dev,
146 struct device_attribute *attr,
147 const char *buf, size_t count)
148 {
149 u32 value;
150 unsigned int cpu = dev->id;
151
152 if (kstrtou32(buf, 0, &value))
153 return -EINVAL;
154
155 if (value > 1)
156 return -EINVAL;
157
158 smp_call_function_single(cpu, do_store_pw20_state, &value, 1);
159
160 return count;
161 }
162
163 static ssize_t show_pw20_wait_time(struct device *dev,
164 struct device_attribute *attr, char *buf)
165 {
166 u32 value;
167 u64 tb_cycle = 1;
168 u64 time;
169
170 unsigned int cpu = dev->id;
171
172 if (!pw20_wt) {
173 smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
174 value = (value & PWRMGTCR0_PW20_ENT) >>
175 PWRMGTCR0_PW20_ENT_SHIFT;
176
177 tb_cycle = (tb_cycle << (MAX_BIT - value + 1));
178 /* convert ms to ns */
179 if (tb_ticks_per_usec > 1000) {
180 time = div_u64(tb_cycle, tb_ticks_per_usec / 1000);
181 } else {
182 u32 rem_us;
183
184 time = div_u64_rem(tb_cycle, tb_ticks_per_usec,
185 &rem_us);
186 time = time * 1000 + rem_us * 1000 / tb_ticks_per_usec;
187 }
188 } else {
189 time = pw20_wt;
190 }
191
192 return sprintf(buf, "%llu\n", time > 0 ? time : 0);
193 }
194
195 static void set_pw20_wait_entry_bit(void *val)
196 {
197 u32 *value = val;
198 u32 pw20_idle;
199
200 pw20_idle = mfspr(SPRN_PWRMGTCR0);
201
202 /* Set Automatic PW20 Core Idle Count */
203 /* clear count */
204 pw20_idle &= ~PWRMGTCR0_PW20_ENT;
205
206 /* set count */
207 pw20_idle |= ((MAX_BIT - *value) << PWRMGTCR0_PW20_ENT_SHIFT);
208
209 mtspr(SPRN_PWRMGTCR0, pw20_idle);
210 }
211
212 static ssize_t store_pw20_wait_time(struct device *dev,
213 struct device_attribute *attr,
214 const char *buf, size_t count)
215 {
216 u32 entry_bit;
217 u64 value;
218
219 unsigned int cpu = dev->id;
220
221 if (kstrtou64(buf, 0, &value))
222 return -EINVAL;
223
224 if (!value)
225 return -EINVAL;
226
227 entry_bit = get_idle_ticks_bit(value);
228 if (entry_bit > MAX_BIT)
229 return -EINVAL;
230
231 pw20_wt = value;
232
233 smp_call_function_single(cpu, set_pw20_wait_entry_bit,
234 &entry_bit, 1);
235
236 return count;
237 }
238
239 static ssize_t show_altivec_idle(struct device *dev,
240 struct device_attribute *attr, char *buf)
241 {
242 u32 value;
243 unsigned int cpu = dev->id;
244
245 smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
246
247 value &= PWRMGTCR0_AV_IDLE_PD_EN;
248
249 return sprintf(buf, "%u\n", value ? 1 : 0);
250 }
251
252 static void do_store_altivec_idle(void *val)
253 {
254 u32 *value = val;
255 u32 altivec_idle;
256
257 altivec_idle = mfspr(SPRN_PWRMGTCR0);
258
259 if (*value)
260 altivec_idle |= PWRMGTCR0_AV_IDLE_PD_EN;
261 else
262 altivec_idle &= ~PWRMGTCR0_AV_IDLE_PD_EN;
263
264 mtspr(SPRN_PWRMGTCR0, altivec_idle);
265 }
266
267 static ssize_t store_altivec_idle(struct device *dev,
268 struct device_attribute *attr,
269 const char *buf, size_t count)
270 {
271 u32 value;
272 unsigned int cpu = dev->id;
273
274 if (kstrtou32(buf, 0, &value))
275 return -EINVAL;
276
277 if (value > 1)
278 return -EINVAL;
279
280 smp_call_function_single(cpu, do_store_altivec_idle, &value, 1);
281
282 return count;
283 }
284
285 static ssize_t show_altivec_idle_wait_time(struct device *dev,
286 struct device_attribute *attr, char *buf)
287 {
288 u32 value;
289 u64 tb_cycle = 1;
290 u64 time;
291
292 unsigned int cpu = dev->id;
293
294 if (!altivec_idle_wt) {
295 smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
296 value = (value & PWRMGTCR0_AV_IDLE_CNT) >>
297 PWRMGTCR0_AV_IDLE_CNT_SHIFT;
298
299 tb_cycle = (tb_cycle << (MAX_BIT - value + 1));
300 /* convert ms to ns */
301 if (tb_ticks_per_usec > 1000) {
302 time = div_u64(tb_cycle, tb_ticks_per_usec / 1000);
303 } else {
304 u32 rem_us;
305
306 time = div_u64_rem(tb_cycle, tb_ticks_per_usec,
307 &rem_us);
308 time = time * 1000 + rem_us * 1000 / tb_ticks_per_usec;
309 }
310 } else {
311 time = altivec_idle_wt;
312 }
313
314 return sprintf(buf, "%llu\n", time > 0 ? time : 0);
315 }
316
317 static void set_altivec_idle_wait_entry_bit(void *val)
318 {
319 u32 *value = val;
320 u32 altivec_idle;
321
322 altivec_idle = mfspr(SPRN_PWRMGTCR0);
323
324 /* Set Automatic AltiVec Idle Count */
325 /* clear count */
326 altivec_idle &= ~PWRMGTCR0_AV_IDLE_CNT;
327
328 /* set count */
329 altivec_idle |= ((MAX_BIT - *value) << PWRMGTCR0_AV_IDLE_CNT_SHIFT);
330
331 mtspr(SPRN_PWRMGTCR0, altivec_idle);
332 }
333
334 static ssize_t store_altivec_idle_wait_time(struct device *dev,
335 struct device_attribute *attr,
336 const char *buf, size_t count)
337 {
338 u32 entry_bit;
339 u64 value;
340
341 unsigned int cpu = dev->id;
342
343 if (kstrtou64(buf, 0, &value))
344 return -EINVAL;
345
346 if (!value)
347 return -EINVAL;
348
349 entry_bit = get_idle_ticks_bit(value);
350 if (entry_bit > MAX_BIT)
351 return -EINVAL;
352
353 altivec_idle_wt = value;
354
355 smp_call_function_single(cpu, set_altivec_idle_wait_entry_bit,
356 &entry_bit, 1);
357
358 return count;
359 }
360
361 /*
362 * Enable/Disable interface:
363 * 0, disable. 1, enable.
364 */
365 static DEVICE_ATTR(pw20_state, 0600, show_pw20_state, store_pw20_state);
366 static DEVICE_ATTR(altivec_idle, 0600, show_altivec_idle, store_altivec_idle);
367
368 /*
369 * Set wait time interface:(Nanosecond)
370 * Example: Base on TBfreq is 41MHZ.
371 * 1~48(ns): TB[63]
372 * 49~97(ns): TB[62]
373 * 98~195(ns): TB[61]
374 * 196~390(ns): TB[60]
375 * 391~780(ns): TB[59]
376 * 781~1560(ns): TB[58]
377 * ...
378 */
379 static DEVICE_ATTR(pw20_wait_time, 0600,
380 show_pw20_wait_time,
381 store_pw20_wait_time);
382 static DEVICE_ATTR(altivec_idle_wait_time, 0600,
383 show_altivec_idle_wait_time,
384 store_altivec_idle_wait_time);
385 #endif
386
387 /*
388 * Enabling PMCs will slow partition context switch times so we only do
389 * it the first time we write to the PMCs.
390 */
391
392 static DEFINE_PER_CPU(char, pmcs_enabled);
393
394 void ppc_enable_pmcs(void)
395 {
396 ppc_set_pmu_inuse(1);
397
398 /* Only need to enable them once */
399 if (__get_cpu_var(pmcs_enabled))
400 return;
401
402 __get_cpu_var(pmcs_enabled) = 1;
403
404 if (ppc_md.enable_pmcs)
405 ppc_md.enable_pmcs();
406 }
407 EXPORT_SYMBOL(ppc_enable_pmcs);
408
409 #define __SYSFS_SPRSETUP(NAME, ADDRESS, EXTRA) \
410 static void read_##NAME(void *val) \
411 { \
412 *(unsigned long *)val = mfspr(ADDRESS); \
413 } \
414 static void write_##NAME(void *val) \
415 { \
416 EXTRA; \
417 mtspr(ADDRESS, *(unsigned long *)val); \
418 } \
419 static ssize_t show_##NAME(struct device *dev, \
420 struct device_attribute *attr, \
421 char *buf) \
422 { \
423 struct cpu *cpu = container_of(dev, struct cpu, dev); \
424 unsigned long val; \
425 smp_call_function_single(cpu->dev.id, read_##NAME, &val, 1); \
426 return sprintf(buf, "%lx\n", val); \
427 } \
428 static ssize_t __used \
429 store_##NAME(struct device *dev, struct device_attribute *attr, \
430 const char *buf, size_t count) \
431 { \
432 struct cpu *cpu = container_of(dev, struct cpu, dev); \
433 unsigned long val; \
434 int ret = sscanf(buf, "%lx", &val); \
435 if (ret != 1) \
436 return -EINVAL; \
437 smp_call_function_single(cpu->dev.id, write_##NAME, &val, 1); \
438 return count; \
439 }
440
441 #define SYSFS_PMCSETUP(NAME, ADDRESS) \
442 __SYSFS_SPRSETUP(NAME, ADDRESS, ppc_enable_pmcs())
443 #define SYSFS_SPRSETUP(NAME, ADDRESS) \
444 __SYSFS_SPRSETUP(NAME, ADDRESS, )
445
446 /* Let's define all possible registers, we'll only hook up the ones
447 * that are implemented on the current processor
448 */
449
450 #if defined(CONFIG_PPC64)
451 #define HAS_PPC_PMC_CLASSIC 1
452 #define HAS_PPC_PMC_IBM 1
453 #define HAS_PPC_PMC_PA6T 1
454 #elif defined(CONFIG_6xx)
455 #define HAS_PPC_PMC_CLASSIC 1
456 #define HAS_PPC_PMC_IBM 1
457 #define HAS_PPC_PMC_G4 1
458 #endif
459
460
461 #ifdef HAS_PPC_PMC_CLASSIC
462 SYSFS_PMCSETUP(mmcr0, SPRN_MMCR0);
463 SYSFS_PMCSETUP(mmcr1, SPRN_MMCR1);
464 SYSFS_PMCSETUP(pmc1, SPRN_PMC1);
465 SYSFS_PMCSETUP(pmc2, SPRN_PMC2);
466 SYSFS_PMCSETUP(pmc3, SPRN_PMC3);
467 SYSFS_PMCSETUP(pmc4, SPRN_PMC4);
468 SYSFS_PMCSETUP(pmc5, SPRN_PMC5);
469 SYSFS_PMCSETUP(pmc6, SPRN_PMC6);
470
471 #ifdef HAS_PPC_PMC_G4
472 SYSFS_PMCSETUP(mmcr2, SPRN_MMCR2);
473 #endif
474
475 #ifdef CONFIG_PPC64
476 SYSFS_PMCSETUP(pmc7, SPRN_PMC7);
477 SYSFS_PMCSETUP(pmc8, SPRN_PMC8);
478
479 SYSFS_PMCSETUP(mmcra, SPRN_MMCRA);
480 SYSFS_SPRSETUP(purr, SPRN_PURR);
481 SYSFS_SPRSETUP(spurr, SPRN_SPURR);
482 SYSFS_SPRSETUP(dscr, SPRN_DSCR);
483 SYSFS_SPRSETUP(pir, SPRN_PIR);
484
485 /*
486 Lets only enable read for phyp resources and
487 enable write when needed with a separate function.
488 Lets be conservative and default to pseries.
489 */
490 static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
491 static DEVICE_ATTR(spurr, 0400, show_spurr, NULL);
492 static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr);
493 static DEVICE_ATTR(purr, 0400, show_purr, store_purr);
494 static DEVICE_ATTR(pir, 0400, show_pir, NULL);
495
496 unsigned long dscr_default = 0;
497 EXPORT_SYMBOL(dscr_default);
498
499 static void add_write_permission_dev_attr(struct device_attribute *attr)
500 {
501 attr->attr.mode |= 0200;
502 }
503
504 static ssize_t show_dscr_default(struct device *dev,
505 struct device_attribute *attr, char *buf)
506 {
507 return sprintf(buf, "%lx\n", dscr_default);
508 }
509
510 static void update_dscr(void *dummy)
511 {
512 if (!current->thread.dscr_inherit) {
513 current->thread.dscr = dscr_default;
514 mtspr(SPRN_DSCR, dscr_default);
515 }
516 }
517
518 static ssize_t __used store_dscr_default(struct device *dev,
519 struct device_attribute *attr, const char *buf,
520 size_t count)
521 {
522 unsigned long val;
523 int ret = 0;
524
525 ret = sscanf(buf, "%lx", &val);
526 if (ret != 1)
527 return -EINVAL;
528 dscr_default = val;
529
530 on_each_cpu(update_dscr, NULL, 1);
531
532 return count;
533 }
534
535 static DEVICE_ATTR(dscr_default, 0600,
536 show_dscr_default, store_dscr_default);
537
538 static void sysfs_create_dscr_default(void)
539 {
540 int err = 0;
541 if (cpu_has_feature(CPU_FTR_DSCR))
542 err = device_create_file(cpu_subsys.dev_root, &dev_attr_dscr_default);
543 }
544 #endif /* CONFIG_PPC64 */
545
546 #ifdef HAS_PPC_PMC_PA6T
547 SYSFS_PMCSETUP(pa6t_pmc0, SPRN_PA6T_PMC0);
548 SYSFS_PMCSETUP(pa6t_pmc1, SPRN_PA6T_PMC1);
549 SYSFS_PMCSETUP(pa6t_pmc2, SPRN_PA6T_PMC2);
550 SYSFS_PMCSETUP(pa6t_pmc3, SPRN_PA6T_PMC3);
551 SYSFS_PMCSETUP(pa6t_pmc4, SPRN_PA6T_PMC4);
552 SYSFS_PMCSETUP(pa6t_pmc5, SPRN_PA6T_PMC5);
553 #ifdef CONFIG_DEBUG_KERNEL
554 SYSFS_SPRSETUP(hid0, SPRN_HID0);
555 SYSFS_SPRSETUP(hid1, SPRN_HID1);
556 SYSFS_SPRSETUP(hid4, SPRN_HID4);
557 SYSFS_SPRSETUP(hid5, SPRN_HID5);
558 SYSFS_SPRSETUP(ima0, SPRN_PA6T_IMA0);
559 SYSFS_SPRSETUP(ima1, SPRN_PA6T_IMA1);
560 SYSFS_SPRSETUP(ima2, SPRN_PA6T_IMA2);
561 SYSFS_SPRSETUP(ima3, SPRN_PA6T_IMA3);
562 SYSFS_SPRSETUP(ima4, SPRN_PA6T_IMA4);
563 SYSFS_SPRSETUP(ima5, SPRN_PA6T_IMA5);
564 SYSFS_SPRSETUP(ima6, SPRN_PA6T_IMA6);
565 SYSFS_SPRSETUP(ima7, SPRN_PA6T_IMA7);
566 SYSFS_SPRSETUP(ima8, SPRN_PA6T_IMA8);
567 SYSFS_SPRSETUP(ima9, SPRN_PA6T_IMA9);
568 SYSFS_SPRSETUP(imaat, SPRN_PA6T_IMAAT);
569 SYSFS_SPRSETUP(btcr, SPRN_PA6T_BTCR);
570 SYSFS_SPRSETUP(pccr, SPRN_PA6T_PCCR);
571 SYSFS_SPRSETUP(rpccr, SPRN_PA6T_RPCCR);
572 SYSFS_SPRSETUP(der, SPRN_PA6T_DER);
573 SYSFS_SPRSETUP(mer, SPRN_PA6T_MER);
574 SYSFS_SPRSETUP(ber, SPRN_PA6T_BER);
575 SYSFS_SPRSETUP(ier, SPRN_PA6T_IER);
576 SYSFS_SPRSETUP(sier, SPRN_PA6T_SIER);
577 SYSFS_SPRSETUP(siar, SPRN_PA6T_SIAR);
578 SYSFS_SPRSETUP(tsr0, SPRN_PA6T_TSR0);
579 SYSFS_SPRSETUP(tsr1, SPRN_PA6T_TSR1);
580 SYSFS_SPRSETUP(tsr2, SPRN_PA6T_TSR2);
581 SYSFS_SPRSETUP(tsr3, SPRN_PA6T_TSR3);
582 #endif /* CONFIG_DEBUG_KERNEL */
583 #endif /* HAS_PPC_PMC_PA6T */
584
585 #ifdef HAS_PPC_PMC_IBM
586 static struct device_attribute ibm_common_attrs[] = {
587 __ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
588 __ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
589 };
590 #endif /* HAS_PPC_PMC_G4 */
591
592 #ifdef HAS_PPC_PMC_G4
593 static struct device_attribute g4_common_attrs[] = {
594 __ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
595 __ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
596 __ATTR(mmcr2, 0600, show_mmcr2, store_mmcr2),
597 };
598 #endif /* HAS_PPC_PMC_G4 */
599
600 static struct device_attribute classic_pmc_attrs[] = {
601 __ATTR(pmc1, 0600, show_pmc1, store_pmc1),
602 __ATTR(pmc2, 0600, show_pmc2, store_pmc2),
603 __ATTR(pmc3, 0600, show_pmc3, store_pmc3),
604 __ATTR(pmc4, 0600, show_pmc4, store_pmc4),
605 __ATTR(pmc5, 0600, show_pmc5, store_pmc5),
606 __ATTR(pmc6, 0600, show_pmc6, store_pmc6),
607 #ifdef CONFIG_PPC64
608 __ATTR(pmc7, 0600, show_pmc7, store_pmc7),
609 __ATTR(pmc8, 0600, show_pmc8, store_pmc8),
610 #endif
611 };
612
613 #ifdef HAS_PPC_PMC_PA6T
614 static struct device_attribute pa6t_attrs[] = {
615 __ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
616 __ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
617 __ATTR(pmc0, 0600, show_pa6t_pmc0, store_pa6t_pmc0),
618 __ATTR(pmc1, 0600, show_pa6t_pmc1, store_pa6t_pmc1),
619 __ATTR(pmc2, 0600, show_pa6t_pmc2, store_pa6t_pmc2),
620 __ATTR(pmc3, 0600, show_pa6t_pmc3, store_pa6t_pmc3),
621 __ATTR(pmc4, 0600, show_pa6t_pmc4, store_pa6t_pmc4),
622 __ATTR(pmc5, 0600, show_pa6t_pmc5, store_pa6t_pmc5),
623 #ifdef CONFIG_DEBUG_KERNEL
624 __ATTR(hid0, 0600, show_hid0, store_hid0),
625 __ATTR(hid1, 0600, show_hid1, store_hid1),
626 __ATTR(hid4, 0600, show_hid4, store_hid4),
627 __ATTR(hid5, 0600, show_hid5, store_hid5),
628 __ATTR(ima0, 0600, show_ima0, store_ima0),
629 __ATTR(ima1, 0600, show_ima1, store_ima1),
630 __ATTR(ima2, 0600, show_ima2, store_ima2),
631 __ATTR(ima3, 0600, show_ima3, store_ima3),
632 __ATTR(ima4, 0600, show_ima4, store_ima4),
633 __ATTR(ima5, 0600, show_ima5, store_ima5),
634 __ATTR(ima6, 0600, show_ima6, store_ima6),
635 __ATTR(ima7, 0600, show_ima7, store_ima7),
636 __ATTR(ima8, 0600, show_ima8, store_ima8),
637 __ATTR(ima9, 0600, show_ima9, store_ima9),
638 __ATTR(imaat, 0600, show_imaat, store_imaat),
639 __ATTR(btcr, 0600, show_btcr, store_btcr),
640 __ATTR(pccr, 0600, show_pccr, store_pccr),
641 __ATTR(rpccr, 0600, show_rpccr, store_rpccr),
642 __ATTR(der, 0600, show_der, store_der),
643 __ATTR(mer, 0600, show_mer, store_mer),
644 __ATTR(ber, 0600, show_ber, store_ber),
645 __ATTR(ier, 0600, show_ier, store_ier),
646 __ATTR(sier, 0600, show_sier, store_sier),
647 __ATTR(siar, 0600, show_siar, store_siar),
648 __ATTR(tsr0, 0600, show_tsr0, store_tsr0),
649 __ATTR(tsr1, 0600, show_tsr1, store_tsr1),
650 __ATTR(tsr2, 0600, show_tsr2, store_tsr2),
651 __ATTR(tsr3, 0600, show_tsr3, store_tsr3),
652 #endif /* CONFIG_DEBUG_KERNEL */
653 };
654 #endif /* HAS_PPC_PMC_PA6T */
655 #endif /* HAS_PPC_PMC_CLASSIC */
656
657 static void register_cpu_online(unsigned int cpu)
658 {
659 struct cpu *c = &per_cpu(cpu_devices, cpu);
660 struct device *s = &c->dev;
661 struct device_attribute *attrs, *pmc_attrs;
662 int i, nattrs;
663
664 #ifdef CONFIG_PPC64
665 if (cpu_has_feature(CPU_FTR_SMT))
666 device_create_file(s, &dev_attr_smt_snooze_delay);
667 #endif
668
669 /* PMC stuff */
670 switch (cur_cpu_spec->pmc_type) {
671 #ifdef HAS_PPC_PMC_IBM
672 case PPC_PMC_IBM:
673 attrs = ibm_common_attrs;
674 nattrs = sizeof(ibm_common_attrs) / sizeof(struct device_attribute);
675 pmc_attrs = classic_pmc_attrs;
676 break;
677 #endif /* HAS_PPC_PMC_IBM */
678 #ifdef HAS_PPC_PMC_G4
679 case PPC_PMC_G4:
680 attrs = g4_common_attrs;
681 nattrs = sizeof(g4_common_attrs) / sizeof(struct device_attribute);
682 pmc_attrs = classic_pmc_attrs;
683 break;
684 #endif /* HAS_PPC_PMC_G4 */
685 #ifdef HAS_PPC_PMC_PA6T
686 case PPC_PMC_PA6T:
687 /* PA Semi starts counting at PMC0 */
688 attrs = pa6t_attrs;
689 nattrs = sizeof(pa6t_attrs) / sizeof(struct device_attribute);
690 pmc_attrs = NULL;
691 break;
692 #endif /* HAS_PPC_PMC_PA6T */
693 default:
694 attrs = NULL;
695 nattrs = 0;
696 pmc_attrs = NULL;
697 }
698
699 for (i = 0; i < nattrs; i++)
700 device_create_file(s, &attrs[i]);
701
702 if (pmc_attrs)
703 for (i = 0; i < cur_cpu_spec->num_pmcs; i++)
704 device_create_file(s, &pmc_attrs[i]);
705
706 #ifdef CONFIG_PPC64
707 if (cpu_has_feature(CPU_FTR_MMCRA))
708 device_create_file(s, &dev_attr_mmcra);
709
710 if (cpu_has_feature(CPU_FTR_PURR)) {
711 if (!firmware_has_feature(FW_FEATURE_LPAR))
712 add_write_permission_dev_attr(&dev_attr_purr);
713 device_create_file(s, &dev_attr_purr);
714 }
715
716 if (cpu_has_feature(CPU_FTR_SPURR))
717 device_create_file(s, &dev_attr_spurr);
718
719 if (cpu_has_feature(CPU_FTR_DSCR))
720 device_create_file(s, &dev_attr_dscr);
721
722 if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2))
723 device_create_file(s, &dev_attr_pir);
724 #endif /* CONFIG_PPC64 */
725
726 #ifdef CONFIG_PPC_FSL_BOOK3E
727 if (PVR_VER(cur_cpu_spec->pvr_value) == PVR_VER_E6500) {
728 device_create_file(s, &dev_attr_pw20_state);
729 device_create_file(s, &dev_attr_pw20_wait_time);
730
731 device_create_file(s, &dev_attr_altivec_idle);
732 device_create_file(s, &dev_attr_altivec_idle_wait_time);
733 }
734 #endif
735 cacheinfo_cpu_online(cpu);
736 }
737
738 #ifdef CONFIG_HOTPLUG_CPU
739 static void unregister_cpu_online(unsigned int cpu)
740 {
741 struct cpu *c = &per_cpu(cpu_devices, cpu);
742 struct device *s = &c->dev;
743 struct device_attribute *attrs, *pmc_attrs;
744 int i, nattrs;
745
746 BUG_ON(!c->hotpluggable);
747
748 #ifdef CONFIG_PPC64
749 if (cpu_has_feature(CPU_FTR_SMT))
750 device_remove_file(s, &dev_attr_smt_snooze_delay);
751 #endif
752
753 /* PMC stuff */
754 switch (cur_cpu_spec->pmc_type) {
755 #ifdef HAS_PPC_PMC_IBM
756 case PPC_PMC_IBM:
757 attrs = ibm_common_attrs;
758 nattrs = sizeof(ibm_common_attrs) / sizeof(struct device_attribute);
759 pmc_attrs = classic_pmc_attrs;
760 break;
761 #endif /* HAS_PPC_PMC_IBM */
762 #ifdef HAS_PPC_PMC_G4
763 case PPC_PMC_G4:
764 attrs = g4_common_attrs;
765 nattrs = sizeof(g4_common_attrs) / sizeof(struct device_attribute);
766 pmc_attrs = classic_pmc_attrs;
767 break;
768 #endif /* HAS_PPC_PMC_G4 */
769 #ifdef HAS_PPC_PMC_PA6T
770 case PPC_PMC_PA6T:
771 /* PA Semi starts counting at PMC0 */
772 attrs = pa6t_attrs;
773 nattrs = sizeof(pa6t_attrs) / sizeof(struct device_attribute);
774 pmc_attrs = NULL;
775 break;
776 #endif /* HAS_PPC_PMC_PA6T */
777 default:
778 attrs = NULL;
779 nattrs = 0;
780 pmc_attrs = NULL;
781 }
782
783 for (i = 0; i < nattrs; i++)
784 device_remove_file(s, &attrs[i]);
785
786 if (pmc_attrs)
787 for (i = 0; i < cur_cpu_spec->num_pmcs; i++)
788 device_remove_file(s, &pmc_attrs[i]);
789
790 #ifdef CONFIG_PPC64
791 if (cpu_has_feature(CPU_FTR_MMCRA))
792 device_remove_file(s, &dev_attr_mmcra);
793
794 if (cpu_has_feature(CPU_FTR_PURR))
795 device_remove_file(s, &dev_attr_purr);
796
797 if (cpu_has_feature(CPU_FTR_SPURR))
798 device_remove_file(s, &dev_attr_spurr);
799
800 if (cpu_has_feature(CPU_FTR_DSCR))
801 device_remove_file(s, &dev_attr_dscr);
802
803 if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2))
804 device_remove_file(s, &dev_attr_pir);
805 #endif /* CONFIG_PPC64 */
806
807 #ifdef CONFIG_PPC_FSL_BOOK3E
808 if (PVR_VER(cur_cpu_spec->pvr_value) == PVR_VER_E6500) {
809 device_remove_file(s, &dev_attr_pw20_state);
810 device_remove_file(s, &dev_attr_pw20_wait_time);
811
812 device_remove_file(s, &dev_attr_altivec_idle);
813 device_remove_file(s, &dev_attr_altivec_idle_wait_time);
814 }
815 #endif
816 cacheinfo_cpu_offline(cpu);
817 }
818
819 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
820 ssize_t arch_cpu_probe(const char *buf, size_t count)
821 {
822 if (ppc_md.cpu_probe)
823 return ppc_md.cpu_probe(buf, count);
824
825 return -EINVAL;
826 }
827
828 ssize_t arch_cpu_release(const char *buf, size_t count)
829 {
830 if (ppc_md.cpu_release)
831 return ppc_md.cpu_release(buf, count);
832
833 return -EINVAL;
834 }
835 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
836
837 #endif /* CONFIG_HOTPLUG_CPU */
838
839 static int sysfs_cpu_notify(struct notifier_block *self,
840 unsigned long action, void *hcpu)
841 {
842 unsigned int cpu = (unsigned int)(long)hcpu;
843
844 switch (action) {
845 case CPU_ONLINE:
846 case CPU_ONLINE_FROZEN:
847 register_cpu_online(cpu);
848 break;
849 #ifdef CONFIG_HOTPLUG_CPU
850 case CPU_DEAD:
851 case CPU_DEAD_FROZEN:
852 unregister_cpu_online(cpu);
853 break;
854 #endif
855 }
856 return NOTIFY_OK;
857 }
858
859 static struct notifier_block sysfs_cpu_nb = {
860 .notifier_call = sysfs_cpu_notify,
861 };
862
863 static DEFINE_MUTEX(cpu_mutex);
864
865 int cpu_add_dev_attr(struct device_attribute *attr)
866 {
867 int cpu;
868
869 mutex_lock(&cpu_mutex);
870
871 for_each_possible_cpu(cpu) {
872 device_create_file(get_cpu_device(cpu), attr);
873 }
874
875 mutex_unlock(&cpu_mutex);
876 return 0;
877 }
878 EXPORT_SYMBOL_GPL(cpu_add_dev_attr);
879
880 int cpu_add_dev_attr_group(struct attribute_group *attrs)
881 {
882 int cpu;
883 struct device *dev;
884 int ret;
885
886 mutex_lock(&cpu_mutex);
887
888 for_each_possible_cpu(cpu) {
889 dev = get_cpu_device(cpu);
890 ret = sysfs_create_group(&dev->kobj, attrs);
891 WARN_ON(ret != 0);
892 }
893
894 mutex_unlock(&cpu_mutex);
895 return 0;
896 }
897 EXPORT_SYMBOL_GPL(cpu_add_dev_attr_group);
898
899
900 void cpu_remove_dev_attr(struct device_attribute *attr)
901 {
902 int cpu;
903
904 mutex_lock(&cpu_mutex);
905
906 for_each_possible_cpu(cpu) {
907 device_remove_file(get_cpu_device(cpu), attr);
908 }
909
910 mutex_unlock(&cpu_mutex);
911 }
912 EXPORT_SYMBOL_GPL(cpu_remove_dev_attr);
913
914 void cpu_remove_dev_attr_group(struct attribute_group *attrs)
915 {
916 int cpu;
917 struct device *dev;
918
919 mutex_lock(&cpu_mutex);
920
921 for_each_possible_cpu(cpu) {
922 dev = get_cpu_device(cpu);
923 sysfs_remove_group(&dev->kobj, attrs);
924 }
925
926 mutex_unlock(&cpu_mutex);
927 }
928 EXPORT_SYMBOL_GPL(cpu_remove_dev_attr_group);
929
930
931 /* NUMA stuff */
932
933 #ifdef CONFIG_NUMA
934 static void register_nodes(void)
935 {
936 int i;
937
938 for (i = 0; i < MAX_NUMNODES; i++)
939 register_one_node(i);
940 }
941
942 int sysfs_add_device_to_node(struct device *dev, int nid)
943 {
944 struct node *node = node_devices[nid];
945 return sysfs_create_link(&node->dev.kobj, &dev->kobj,
946 kobject_name(&dev->kobj));
947 }
948 EXPORT_SYMBOL_GPL(sysfs_add_device_to_node);
949
950 void sysfs_remove_device_from_node(struct device *dev, int nid)
951 {
952 struct node *node = node_devices[nid];
953 sysfs_remove_link(&node->dev.kobj, kobject_name(&dev->kobj));
954 }
955 EXPORT_SYMBOL_GPL(sysfs_remove_device_from_node);
956
957 #else
958 static void register_nodes(void)
959 {
960 return;
961 }
962
963 #endif
964
965 /* Only valid if CPU is present. */
966 static ssize_t show_physical_id(struct device *dev,
967 struct device_attribute *attr, char *buf)
968 {
969 struct cpu *cpu = container_of(dev, struct cpu, dev);
970
971 return sprintf(buf, "%d\n", get_hard_smp_processor_id(cpu->dev.id));
972 }
973 static DEVICE_ATTR(physical_id, 0444, show_physical_id, NULL);
974
975 static int __init topology_init(void)
976 {
977 int cpu;
978
979 register_nodes();
980 register_cpu_notifier(&sysfs_cpu_nb);
981
982 for_each_possible_cpu(cpu) {
983 struct cpu *c = &per_cpu(cpu_devices, cpu);
984
985 /*
986 * For now, we just see if the system supports making
987 * the RTAS calls for CPU hotplug. But, there may be a
988 * more comprehensive way to do this for an individual
989 * CPU. For instance, the boot cpu might never be valid
990 * for hotplugging.
991 */
992 if (ppc_md.cpu_die)
993 c->hotpluggable = 1;
994
995 if (cpu_online(cpu) || c->hotpluggable) {
996 register_cpu(c, cpu);
997
998 device_create_file(&c->dev, &dev_attr_physical_id);
999 }
1000
1001 if (cpu_online(cpu))
1002 register_cpu_online(cpu);
1003 }
1004 #ifdef CONFIG_PPC64
1005 sysfs_create_dscr_default();
1006 #endif /* CONFIG_PPC64 */
1007
1008 return 0;
1009 }
1010 subsys_initcall(topology_init);
This page took 0.077301 seconds and 5 git commands to generate.