perfcounters: enable lowlevel pmc code to schedule counters
[deliverable/linux.git] / arch / x86 / kernel / cpu / proc.c
CommitLineData
1da177e4
LT
1#include <linux/smp.h>
2#include <linux/timex.h>
3#include <linux/string.h>
1da177e4 4#include <linux/seq_file.h>
95235ca2 5#include <linux/cpufreq.h>
1da177e4
LT
6
7/*
8 * Get CPU information for use by the procfs.
9 */
2aef7720 10#ifdef CONFIG_X86_32
a967ceac
HS
11static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c,
12 unsigned int cpu)
13{
14#ifdef CONFIG_X86_HT
15 if (c->x86_max_cores * smp_num_siblings > 1) {
16 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
17 seq_printf(m, "siblings\t: %d\n",
18 cpus_weight(per_cpu(cpu_core_map, cpu)));
19 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
20 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
01aaea1a
YL
21 seq_printf(m, "apicid\t\t: %d\n", c->apicid);
22 seq_printf(m, "initial apicid\t: %d\n", c->initial_apicid);
a967ceac
HS
23 }
24#endif
25}
26
27static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
28{
29 /*
30 * We use exception 16 if we have hardware math and we've either seen
31 * it or the CPU claims it is internal
32 */
33 int fpu_exception = c->hard_math && (ignore_fpu_irq || cpu_has_fpu);
34 seq_printf(m,
35 "fdiv_bug\t: %s\n"
36 "hlt_bug\t\t: %s\n"
37 "f00f_bug\t: %s\n"
38 "coma_bug\t: %s\n"
39 "fpu\t\t: %s\n"
40 "fpu_exception\t: %s\n"
41 "cpuid level\t: %d\n"
42 "wp\t\t: %s\n",
43 c->fdiv_bug ? "yes" : "no",
44 c->hlt_works_ok ? "no" : "yes",
45 c->f00f_bug ? "yes" : "no",
46 c->coma_bug ? "yes" : "no",
47 c->hard_math ? "yes" : "no",
48 fpu_exception ? "yes" : "no",
49 c->cpuid_level,
50 c->wp_works_ok ? "yes" : "no");
51}
2aef7720
HS
52#else
53static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c,
54 unsigned int cpu)
55{
56#ifdef CONFIG_SMP
57 if (c->x86_max_cores * smp_num_siblings > 1) {
58 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
59 seq_printf(m, "siblings\t: %d\n",
60 cpus_weight(per_cpu(cpu_core_map, cpu)));
61 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
62 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
282bfe21 63 seq_printf(m, "apicid\t\t: %d\n", c->apicid);
01aaea1a 64 seq_printf(m, "initial apicid\t: %d\n", c->initial_apicid);
2aef7720
HS
65 }
66#endif
67}
68
69static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
70{
71 seq_printf(m,
72 "fpu\t\t: yes\n"
73 "fpu_exception\t: yes\n"
74 "cpuid level\t: %d\n"
75 "wp\t\t: yes\n",
76 c->cpuid_level);
77}
78#endif
a967ceac 79
1da177e4
LT
80static int show_cpuinfo(struct seq_file *m, void *v)
81{
1da177e4 82 struct cpuinfo_x86 *c = v;
a967ceac
HS
83 unsigned int cpu = 0;
84 int i;
1da177e4
LT
85
86#ifdef CONFIG_SMP
a967ceac 87 cpu = c->cpu_index;
1da177e4 88#endif
a967ceac
HS
89 seq_printf(m, "processor\t: %u\n"
90 "vendor_id\t: %s\n"
91 "cpu family\t: %d\n"
92 "model\t\t: %u\n"
93 "model name\t: %s\n",
94 cpu,
95 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
96 c->x86,
97 c->x86_model,
98 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1da177e4
LT
99
100 if (c->x86_mask || c->cpuid_level >= 0)
101 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
102 else
103 seq_printf(m, "stepping\t: unknown\n");
104
a967ceac
HS
105 if (cpu_has(c, X86_FEATURE_TSC)) {
106 unsigned int freq = cpufreq_quick_get(cpu);
107
95235ca2
VP
108 if (!freq)
109 freq = cpu_khz;
a3a255e7 110 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
a967ceac 111 freq / 1000, (freq % 1000));
1da177e4
LT
112 }
113
114 /* Cache size */
115 if (c->x86_cache_size >= 0)
116 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
a967ceac
HS
117
118 show_cpuinfo_core(m, c, cpu);
119 show_cpuinfo_misc(m, c);
120
121 seq_printf(m, "flags\t\t:");
122 for (i = 0; i < 32*NCAPINTS; i++)
123 if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
1da177e4
LT
124 seq_printf(m, " %s", x86_cap_flags[i]);
125
f84c3a42
HS
126 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
127 c->loops_per_jiffy/(500000/HZ),
128 (c->loops_per_jiffy/(5000/HZ)) % 100);
2aef7720
HS
129
130#ifdef CONFIG_X86_64
131 if (c->x86_tlbsize > 0)
132 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
133#endif
f84c3a42 134 seq_printf(m, "clflush size\t: %u\n", c->x86_clflush_size);
2aef7720
HS
135#ifdef CONFIG_X86_64
136 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
137 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
138 c->x86_phys_bits, c->x86_virt_bits);
139#endif
f84c3a42
HS
140
141 seq_printf(m, "power management:");
142 for (i = 0; i < 32; i++) {
3f98bc49
AK
143 if (c->x86_power & (1 << i)) {
144 if (i < ARRAY_SIZE(x86_power_flags) &&
145 x86_power_flags[i])
146 seq_printf(m, "%s%s",
147 x86_power_flags[i][0]?" ":"",
148 x86_power_flags[i]);
149 else
150 seq_printf(m, " [%d]", i);
151 }
f84c3a42 152 }
3f98bc49 153
f84c3a42 154 seq_printf(m, "\n\n");
3dd9d514 155
1da177e4
LT
156 return 0;
157}
158
159static void *c_start(struct seq_file *m, loff_t *pos)
160{
92cb7612 161 if (*pos == 0) /* just in case, cpu 0 is not the first */
c0c52d28 162 *pos = first_cpu(cpu_online_map);
bc8bcc79
LJ
163 else
164 *pos = next_cpu_nr(*pos - 1, cpu_online_map);
165 if ((*pos) < nr_cpu_ids)
92cb7612
MT
166 return &cpu_data(*pos);
167 return NULL;
1da177e4 168}
a967ceac 169
1da177e4
LT
170static void *c_next(struct seq_file *m, void *v, loff_t *pos)
171{
bc8bcc79 172 (*pos)++;
1da177e4
LT
173 return c_start(m, pos);
174}
a967ceac 175
1da177e4
LT
176static void c_stop(struct seq_file *m, void *v)
177{
178}
a967ceac 179
8a45eb31 180const struct seq_operations cpuinfo_op = {
1da177e4
LT
181 .start = c_start,
182 .next = c_next,
183 .stop = c_stop,
184 .show = show_cpuinfo,
185};
This page took 0.568609 seconds and 5 git commands to generate.