2 * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/oprofile.h>
11 #include <linux/init.h>
12 #include <linux/smp.h>
13 #include <asm/ptrace.h>
14 #include <asm/system.h>
15 #include <asm/processor.h>
16 #include <asm/cputable.h>
17 #include <asm/systemcfg.h>
24 static unsigned long reset_value
[OP_MAX_COUNTER
];
26 static int oprofile_running
;
27 static int mmcra_has_sihv
;
29 /* mmcr values are set in power4_reg_setup, used in power4_cpu_setup */
35 * Since we do not have an NMI, backtracing through spinlocks is
36 * only a best guess. In light of this, allow it to be disabled at
39 static int backtrace_spinlocks
;
41 static void power4_reg_setup(struct op_counter_config
*ctr
,
42 struct op_system_config
*sys
,
48 * SIHV / SIPR bits are only implemented on POWER4+ (GQ) and above.
49 * However we disable it on all POWER4 until we verify it works
50 * (I was seeing some strange behaviour last time I tried).
52 * It has been verified to work on POWER5 so we enable it there.
54 if (cpu_has_feature(CPU_FTR_MMCRA_SIHV
))
58 * The performance counter event settings are given in the mmcr0,
59 * mmcr1 and mmcra values passed from the user in the
60 * op_system_config structure (sys variable).
62 mmcr0_val
= sys
->mmcr0
;
63 mmcr1_val
= sys
->mmcr1
;
64 mmcra_val
= sys
->mmcra
;
66 backtrace_spinlocks
= sys
->backtrace_spinlocks
;
68 for (i
= 0; i
< cur_cpu_spec
->num_pmcs
; ++i
)
69 reset_value
[i
] = 0x80000000UL
- ctr
[i
].count
;
71 /* setup user and kernel profiling */
72 if (sys
->enable_kernel
)
73 mmcr0_val
&= ~MMCR0_KERNEL_DISABLE
;
75 mmcr0_val
|= MMCR0_KERNEL_DISABLE
;
78 mmcr0_val
&= ~MMCR0_PROBLEM_DISABLE
;
80 mmcr0_val
|= MMCR0_PROBLEM_DISABLE
;
83 extern void ppc64_enable_pmcs(void);
85 static void power4_cpu_setup(void *unused
)
87 unsigned int mmcr0
= mmcr0_val
;
88 unsigned long mmcra
= mmcra_val
;
92 /* set the freeze bit */
94 mtspr(SPRN_MMCR0
, mmcr0
);
96 mmcr0
|= MMCR0_FCM1
|MMCR0_PMXE
|MMCR0_FCECE
;
97 mmcr0
|= MMCR0_PMC1CE
|MMCR0_PMCjCE
;
98 mtspr(SPRN_MMCR0
, mmcr0
);
100 mtspr(SPRN_MMCR1
, mmcr1_val
);
102 mmcra
|= MMCRA_SAMPLE_ENABLE
;
103 mtspr(SPRN_MMCRA
, mmcra
);
105 dbg("setup on cpu %d, mmcr0 %lx\n", smp_processor_id(),
107 dbg("setup on cpu %d, mmcr1 %lx\n", smp_processor_id(),
109 dbg("setup on cpu %d, mmcra %lx\n", smp_processor_id(),
113 static void power4_start(struct op_counter_config
*ctr
)
118 /* set the PMM bit (see comment below) */
119 mtmsrd(mfmsr() | MSR_PMM
);
121 for (i
= 0; i
< cur_cpu_spec
->num_pmcs
; ++i
) {
122 if (ctr
[i
].enabled
) {
123 ctr_write(i
, reset_value
[i
]);
129 mmcr0
= mfspr(SPRN_MMCR0
);
132 * We must clear the PMAO bit on some (GQ) chips. Just do it
135 mmcr0
&= ~MMCR0_PMAO
;
138 * now clear the freeze bit, counting will not start until we
139 * rfid from this excetion, because only at that point will
140 * the PMM bit be cleared
143 mtspr(SPRN_MMCR0
, mmcr0
);
145 oprofile_running
= 1;
147 dbg("start on cpu %d, mmcr0 %x\n", smp_processor_id(), mmcr0
);
150 static void power4_stop(void)
154 /* freeze counters */
155 mmcr0
= mfspr(SPRN_MMCR0
);
157 mtspr(SPRN_MMCR0
, mmcr0
);
159 oprofile_running
= 0;
161 dbg("stop on cpu %d, mmcr0 %x\n", smp_processor_id(), mmcr0
);
166 /* Fake functions used by canonicalize_pc */
167 static void __attribute_used__
hypervisor_bucket(void)
171 static void __attribute_used__
rtas_bucket(void)
175 static void __attribute_used__
kernel_unknown_bucket(void)
179 static unsigned long check_spinlock_pc(struct pt_regs
*regs
,
180 unsigned long profile_pc
)
182 unsigned long pc
= instruction_pointer(regs
);
185 * If both the SIAR (sampled instruction) and the perfmon exception
186 * occurred in a spinlock region then we account the sample to the
187 * calling function. This isnt 100% correct, we really need soft
188 * IRQ disable so we always get the perfmon exception at the
189 * point at which the SIAR is set.
191 if (backtrace_spinlocks
&& in_lock_functions(pc
) &&
192 in_lock_functions(profile_pc
))
199 * On GQ and newer the MMCRA stores the HV and PR bits at the time
200 * the SIAR was sampled. We use that to work out if the SIAR was sampled in
201 * the hypervisor, our exception vectors or RTAS.
203 static unsigned long get_pc(struct pt_regs
*regs
)
205 unsigned long pc
= mfspr(SPRN_SIAR
);
208 /* Cant do much about it */
210 return check_spinlock_pc(regs
, pc
);
212 mmcra
= mfspr(SPRN_MMCRA
);
214 /* Were we in the hypervisor? */
215 if ((systemcfg
->platform
== PLATFORM_PSERIES_LPAR
) &&
216 (mmcra
& MMCRA_SIHV
))
217 /* function descriptor madness */
218 return *((unsigned long *)hypervisor_bucket
);
220 /* We were in userspace, nothing to do */
221 if (mmcra
& MMCRA_SIPR
)
224 #ifdef CONFIG_PPC_RTAS
225 /* Were we in RTAS? */
226 if (pc
>= rtas
.base
&& pc
< (rtas
.base
+ rtas
.size
))
227 /* function descriptor madness */
228 return *((unsigned long *)rtas_bucket
);
231 /* Were we in our exception vectors or SLB real mode miss handler? */
232 if (pc
< 0x1000000UL
)
233 return (unsigned long)__va(pc
);
235 /* Not sure where we were */
237 /* function descriptor madness */
238 return *((unsigned long *)kernel_unknown_bucket
);
240 return check_spinlock_pc(regs
, pc
);
243 static int get_kernel(unsigned long pc
)
247 if (!mmcra_has_sihv
) {
248 is_kernel
= (pc
>= KERNELBASE
);
250 unsigned long mmcra
= mfspr(SPRN_MMCRA
);
251 is_kernel
= ((mmcra
& MMCRA_SIPR
) == 0);
257 static void power4_handle_interrupt(struct pt_regs
*regs
,
258 struct op_counter_config
*ctr
)
267 is_kernel
= get_kernel(pc
);
269 /* set the PMM bit (see comment below) */
270 mtmsrd(mfmsr() | MSR_PMM
);
272 for (i
= 0; i
< cur_cpu_spec
->num_pmcs
; ++i
) {
275 if (oprofile_running
&& ctr
[i
].enabled
) {
276 oprofile_add_pc(pc
, is_kernel
, i
);
277 ctr_write(i
, reset_value
[i
]);
284 mmcr0
= mfspr(SPRN_MMCR0
);
286 /* reset the perfmon trigger */
290 * We must clear the PMAO bit on some (GQ) chips. Just do it
293 mmcr0
&= ~MMCR0_PMAO
;
296 * now clear the freeze bit, counting will not start until we
297 * rfid from this exception, because only at that point will
298 * the PMM bit be cleared
301 mtspr(SPRN_MMCR0
, mmcr0
);
304 struct op_ppc64_model op_model_power4
= {
305 .reg_setup
= power4_reg_setup
,
306 .cpu_setup
= power4_cpu_setup
,
307 .start
= power4_start
,
309 .handle_interrupt
= power4_handle_interrupt
,