Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation; either version | |
7 | * 2 of the License, or (at your option) any later version. | |
8 | */ | |
9 | ||
10 | #include <linux/oprofile.h> | |
11 | #include <linux/init.h> | |
12 | #include <linux/smp.h> | |
13 | #include <asm/ptrace.h> | |
14 | #include <asm/system.h> | |
15 | #include <asm/processor.h> | |
16 | #include <asm/cputable.h> | |
17 | #include <asm/systemcfg.h> | |
18 | #include <asm/rtas.h> | |
19 | ||
20 | #define dbg(args...) | |
21 | ||
22 | #include "op_impl.h" | |
23 | ||
24 | static unsigned long reset_value[OP_MAX_COUNTER]; | |
25 | ||
1da177e4 LT |
26 | static int oprofile_running; |
27 | static int mmcra_has_sihv; | |
28 | ||
29 | /* mmcr values are set in power4_reg_setup, used in power4_cpu_setup */ | |
30 | static u32 mmcr0_val; | |
31 | static u64 mmcr1_val; | |
32 | static u32 mmcra_val; | |
33 | ||
34 | /* | |
35 | * Since we do not have an NMI, backtracing through spinlocks is | |
36 | * only a best guess. In light of this, allow it to be disabled at | |
37 | * runtime. | |
38 | */ | |
39 | static int backtrace_spinlocks; | |
40 | ||
41 | static void power4_reg_setup(struct op_counter_config *ctr, | |
42 | struct op_system_config *sys, | |
43 | int num_ctrs) | |
44 | { | |
45 | int i; | |
46 | ||
1da177e4 LT |
47 | /* |
48 | * SIHV / SIPR bits are only implemented on POWER4+ (GQ) and above. | |
49 | * However we disable it on all POWER4 until we verify it works | |
50 | * (I was seeing some strange behaviour last time I tried). | |
51 | * | |
52 | * It has been verified to work on POWER5 so we enable it there. | |
53 | */ | |
54 | if (cpu_has_feature(CPU_FTR_MMCRA_SIHV)) | |
55 | mmcra_has_sihv = 1; | |
56 | ||
57 | /* | |
58 | * The performance counter event settings are given in the mmcr0, | |
59 | * mmcr1 and mmcra values passed from the user in the | |
60 | * op_system_config structure (sys variable). | |
61 | */ | |
62 | mmcr0_val = sys->mmcr0; | |
63 | mmcr1_val = sys->mmcr1; | |
64 | mmcra_val = sys->mmcra; | |
65 | ||
66 | backtrace_spinlocks = sys->backtrace_spinlocks; | |
67 | ||
a6908cd0 | 68 | for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) |
1da177e4 LT |
69 | reset_value[i] = 0x80000000UL - ctr[i].count; |
70 | ||
71 | /* setup user and kernel profiling */ | |
72 | if (sys->enable_kernel) | |
73 | mmcr0_val &= ~MMCR0_KERNEL_DISABLE; | |
74 | else | |
75 | mmcr0_val |= MMCR0_KERNEL_DISABLE; | |
76 | ||
77 | if (sys->enable_user) | |
78 | mmcr0_val &= ~MMCR0_PROBLEM_DISABLE; | |
79 | else | |
80 | mmcr0_val |= MMCR0_PROBLEM_DISABLE; | |
81 | } | |
82 | ||
83 | extern void ppc64_enable_pmcs(void); | |
84 | ||
85 | static void power4_cpu_setup(void *unused) | |
86 | { | |
87 | unsigned int mmcr0 = mmcr0_val; | |
88 | unsigned long mmcra = mmcra_val; | |
89 | ||
90 | ppc64_enable_pmcs(); | |
91 | ||
92 | /* set the freeze bit */ | |
93 | mmcr0 |= MMCR0_FC; | |
94 | mtspr(SPRN_MMCR0, mmcr0); | |
95 | ||
96 | mmcr0 |= MMCR0_FCM1|MMCR0_PMXE|MMCR0_FCECE; | |
97 | mmcr0 |= MMCR0_PMC1CE|MMCR0_PMCjCE; | |
98 | mtspr(SPRN_MMCR0, mmcr0); | |
99 | ||
100 | mtspr(SPRN_MMCR1, mmcr1_val); | |
101 | ||
102 | mmcra |= MMCRA_SAMPLE_ENABLE; | |
103 | mtspr(SPRN_MMCRA, mmcra); | |
104 | ||
105 | dbg("setup on cpu %d, mmcr0 %lx\n", smp_processor_id(), | |
106 | mfspr(SPRN_MMCR0)); | |
107 | dbg("setup on cpu %d, mmcr1 %lx\n", smp_processor_id(), | |
108 | mfspr(SPRN_MMCR1)); | |
109 | dbg("setup on cpu %d, mmcra %lx\n", smp_processor_id(), | |
110 | mfspr(SPRN_MMCRA)); | |
111 | } | |
112 | ||
113 | static void power4_start(struct op_counter_config *ctr) | |
114 | { | |
115 | int i; | |
116 | unsigned int mmcr0; | |
117 | ||
118 | /* set the PMM bit (see comment below) */ | |
119 | mtmsrd(mfmsr() | MSR_PMM); | |
120 | ||
a6908cd0 | 121 | for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) { |
1da177e4 LT |
122 | if (ctr[i].enabled) { |
123 | ctr_write(i, reset_value[i]); | |
124 | } else { | |
125 | ctr_write(i, 0); | |
126 | } | |
127 | } | |
128 | ||
129 | mmcr0 = mfspr(SPRN_MMCR0); | |
130 | ||
131 | /* | |
132 | * We must clear the PMAO bit on some (GQ) chips. Just do it | |
133 | * all the time | |
134 | */ | |
135 | mmcr0 &= ~MMCR0_PMAO; | |
136 | ||
137 | /* | |
138 | * now clear the freeze bit, counting will not start until we | |
139 | * rfid from this excetion, because only at that point will | |
140 | * the PMM bit be cleared | |
141 | */ | |
142 | mmcr0 &= ~MMCR0_FC; | |
143 | mtspr(SPRN_MMCR0, mmcr0); | |
144 | ||
145 | oprofile_running = 1; | |
146 | ||
147 | dbg("start on cpu %d, mmcr0 %x\n", smp_processor_id(), mmcr0); | |
148 | } | |
149 | ||
150 | static void power4_stop(void) | |
151 | { | |
152 | unsigned int mmcr0; | |
153 | ||
154 | /* freeze counters */ | |
155 | mmcr0 = mfspr(SPRN_MMCR0); | |
156 | mmcr0 |= MMCR0_FC; | |
157 | mtspr(SPRN_MMCR0, mmcr0); | |
158 | ||
159 | oprofile_running = 0; | |
160 | ||
161 | dbg("stop on cpu %d, mmcr0 %x\n", smp_processor_id(), mmcr0); | |
162 | ||
163 | mb(); | |
164 | } | |
165 | ||
166 | /* Fake functions used by canonicalize_pc */ | |
167 | static void __attribute_used__ hypervisor_bucket(void) | |
168 | { | |
169 | } | |
170 | ||
171 | static void __attribute_used__ rtas_bucket(void) | |
172 | { | |
173 | } | |
174 | ||
175 | static void __attribute_used__ kernel_unknown_bucket(void) | |
176 | { | |
177 | } | |
178 | ||
179 | static unsigned long check_spinlock_pc(struct pt_regs *regs, | |
180 | unsigned long profile_pc) | |
181 | { | |
182 | unsigned long pc = instruction_pointer(regs); | |
183 | ||
184 | /* | |
185 | * If both the SIAR (sampled instruction) and the perfmon exception | |
186 | * occurred in a spinlock region then we account the sample to the | |
187 | * calling function. This isnt 100% correct, we really need soft | |
188 | * IRQ disable so we always get the perfmon exception at the | |
189 | * point at which the SIAR is set. | |
190 | */ | |
191 | if (backtrace_spinlocks && in_lock_functions(pc) && | |
192 | in_lock_functions(profile_pc)) | |
193 | return regs->link; | |
194 | else | |
195 | return profile_pc; | |
196 | } | |
197 | ||
198 | /* | |
199 | * On GQ and newer the MMCRA stores the HV and PR bits at the time | |
200 | * the SIAR was sampled. We use that to work out if the SIAR was sampled in | |
201 | * the hypervisor, our exception vectors or RTAS. | |
202 | */ | |
203 | static unsigned long get_pc(struct pt_regs *regs) | |
204 | { | |
205 | unsigned long pc = mfspr(SPRN_SIAR); | |
206 | unsigned long mmcra; | |
207 | ||
208 | /* Cant do much about it */ | |
209 | if (!mmcra_has_sihv) | |
210 | return check_spinlock_pc(regs, pc); | |
211 | ||
212 | mmcra = mfspr(SPRN_MMCRA); | |
213 | ||
214 | /* Were we in the hypervisor? */ | |
215 | if ((systemcfg->platform == PLATFORM_PSERIES_LPAR) && | |
216 | (mmcra & MMCRA_SIHV)) | |
217 | /* function descriptor madness */ | |
218 | return *((unsigned long *)hypervisor_bucket); | |
219 | ||
220 | /* We were in userspace, nothing to do */ | |
221 | if (mmcra & MMCRA_SIPR) | |
222 | return pc; | |
223 | ||
224 | #ifdef CONFIG_PPC_RTAS | |
225 | /* Were we in RTAS? */ | |
226 | if (pc >= rtas.base && pc < (rtas.base + rtas.size)) | |
227 | /* function descriptor madness */ | |
228 | return *((unsigned long *)rtas_bucket); | |
229 | #endif | |
230 | ||
231 | /* Were we in our exception vectors or SLB real mode miss handler? */ | |
232 | if (pc < 0x1000000UL) | |
233 | return (unsigned long)__va(pc); | |
234 | ||
235 | /* Not sure where we were */ | |
236 | if (pc < KERNELBASE) | |
237 | /* function descriptor madness */ | |
238 | return *((unsigned long *)kernel_unknown_bucket); | |
239 | ||
240 | return check_spinlock_pc(regs, pc); | |
241 | } | |
242 | ||
243 | static int get_kernel(unsigned long pc) | |
244 | { | |
245 | int is_kernel; | |
246 | ||
247 | if (!mmcra_has_sihv) { | |
248 | is_kernel = (pc >= KERNELBASE); | |
249 | } else { | |
250 | unsigned long mmcra = mfspr(SPRN_MMCRA); | |
251 | is_kernel = ((mmcra & MMCRA_SIPR) == 0); | |
252 | } | |
253 | ||
254 | return is_kernel; | |
255 | } | |
256 | ||
257 | static void power4_handle_interrupt(struct pt_regs *regs, | |
258 | struct op_counter_config *ctr) | |
259 | { | |
260 | unsigned long pc; | |
261 | int is_kernel; | |
262 | int val; | |
263 | int i; | |
264 | unsigned int mmcr0; | |
265 | ||
266 | pc = get_pc(regs); | |
267 | is_kernel = get_kernel(pc); | |
268 | ||
269 | /* set the PMM bit (see comment below) */ | |
270 | mtmsrd(mfmsr() | MSR_PMM); | |
271 | ||
a6908cd0 | 272 | for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) { |
1da177e4 LT |
273 | val = ctr_read(i); |
274 | if (val < 0) { | |
275 | if (oprofile_running && ctr[i].enabled) { | |
276 | oprofile_add_pc(pc, is_kernel, i); | |
277 | ctr_write(i, reset_value[i]); | |
278 | } else { | |
279 | ctr_write(i, 0); | |
280 | } | |
281 | } | |
282 | } | |
283 | ||
284 | mmcr0 = mfspr(SPRN_MMCR0); | |
285 | ||
286 | /* reset the perfmon trigger */ | |
287 | mmcr0 |= MMCR0_PMXE; | |
288 | ||
289 | /* | |
290 | * We must clear the PMAO bit on some (GQ) chips. Just do it | |
291 | * all the time | |
292 | */ | |
293 | mmcr0 &= ~MMCR0_PMAO; | |
294 | ||
295 | /* | |
296 | * now clear the freeze bit, counting will not start until we | |
297 | * rfid from this exception, because only at that point will | |
298 | * the PMM bit be cleared | |
299 | */ | |
300 | mmcr0 &= ~MMCR0_FC; | |
301 | mtspr(SPRN_MMCR0, mmcr0); | |
302 | } | |
303 | ||
304 | struct op_ppc64_model op_model_power4 = { | |
305 | .reg_setup = power4_reg_setup, | |
306 | .cpu_setup = power4_cpu_setup, | |
307 | .start = power4_start, | |
308 | .stop = power4_stop, | |
309 | .handle_interrupt = power4_handle_interrupt, | |
310 | }; |