Merge branch 'topic/livepatch' of git://git.kernel.org/pub/scm/linux/kernel/git/power...
[deliverable/linux.git] / arch / powerpc / oprofile / op_model_fsl_emb.c
1 /*
2 * Freescale Embedded oprofile support, based on ppc64 oprofile support
3 * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
4 *
5 * Copyright (c) 2004, 2010 Freescale Semiconductor, Inc
6 *
7 * Author: Andy Fleming
8 * Maintainer: Kumar Gala <galak@kernel.crashing.org>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16 #include <linux/oprofile.h>
17 #include <linux/smp.h>
18 #include <asm/ptrace.h>
19 #include <asm/processor.h>
20 #include <asm/cputable.h>
21 #include <asm/reg_fsl_emb.h>
22 #include <asm/page.h>
23 #include <asm/pmc.h>
24 #include <asm/oprofile_impl.h>
25
26 static unsigned long reset_value[OP_MAX_COUNTER];
27
28 static int num_counters;
29 static int oprofile_running;
30
31 static inline u32 get_pmlca(int ctr)
32 {
33 u32 pmlca;
34
35 switch (ctr) {
36 case 0:
37 pmlca = mfpmr(PMRN_PMLCA0);
38 break;
39 case 1:
40 pmlca = mfpmr(PMRN_PMLCA1);
41 break;
42 case 2:
43 pmlca = mfpmr(PMRN_PMLCA2);
44 break;
45 case 3:
46 pmlca = mfpmr(PMRN_PMLCA3);
47 break;
48 case 4:
49 pmlca = mfpmr(PMRN_PMLCA4);
50 break;
51 case 5:
52 pmlca = mfpmr(PMRN_PMLCA5);
53 break;
54 default:
55 panic("Bad ctr number\n");
56 }
57
58 return pmlca;
59 }
60
61 static inline void set_pmlca(int ctr, u32 pmlca)
62 {
63 switch (ctr) {
64 case 0:
65 mtpmr(PMRN_PMLCA0, pmlca);
66 break;
67 case 1:
68 mtpmr(PMRN_PMLCA1, pmlca);
69 break;
70 case 2:
71 mtpmr(PMRN_PMLCA2, pmlca);
72 break;
73 case 3:
74 mtpmr(PMRN_PMLCA3, pmlca);
75 break;
76 case 4:
77 mtpmr(PMRN_PMLCA4, pmlca);
78 break;
79 case 5:
80 mtpmr(PMRN_PMLCA5, pmlca);
81 break;
82 default:
83 panic("Bad ctr number\n");
84 }
85 }
86
87 static inline unsigned int ctr_read(unsigned int i)
88 {
89 switch(i) {
90 case 0:
91 return mfpmr(PMRN_PMC0);
92 case 1:
93 return mfpmr(PMRN_PMC1);
94 case 2:
95 return mfpmr(PMRN_PMC2);
96 case 3:
97 return mfpmr(PMRN_PMC3);
98 case 4:
99 return mfpmr(PMRN_PMC4);
100 case 5:
101 return mfpmr(PMRN_PMC5);
102 default:
103 return 0;
104 }
105 }
106
107 static inline void ctr_write(unsigned int i, unsigned int val)
108 {
109 switch(i) {
110 case 0:
111 mtpmr(PMRN_PMC0, val);
112 break;
113 case 1:
114 mtpmr(PMRN_PMC1, val);
115 break;
116 case 2:
117 mtpmr(PMRN_PMC2, val);
118 break;
119 case 3:
120 mtpmr(PMRN_PMC3, val);
121 break;
122 case 4:
123 mtpmr(PMRN_PMC4, val);
124 break;
125 case 5:
126 mtpmr(PMRN_PMC5, val);
127 break;
128 default:
129 break;
130 }
131 }
132
133
134 static void init_pmc_stop(int ctr)
135 {
136 u32 pmlca = (PMLCA_FC | PMLCA_FCS | PMLCA_FCU |
137 PMLCA_FCM1 | PMLCA_FCM0);
138 u32 pmlcb = 0;
139
140 switch (ctr) {
141 case 0:
142 mtpmr(PMRN_PMLCA0, pmlca);
143 mtpmr(PMRN_PMLCB0, pmlcb);
144 break;
145 case 1:
146 mtpmr(PMRN_PMLCA1, pmlca);
147 mtpmr(PMRN_PMLCB1, pmlcb);
148 break;
149 case 2:
150 mtpmr(PMRN_PMLCA2, pmlca);
151 mtpmr(PMRN_PMLCB2, pmlcb);
152 break;
153 case 3:
154 mtpmr(PMRN_PMLCA3, pmlca);
155 mtpmr(PMRN_PMLCB3, pmlcb);
156 break;
157 case 4:
158 mtpmr(PMRN_PMLCA4, pmlca);
159 mtpmr(PMRN_PMLCB4, pmlcb);
160 break;
161 case 5:
162 mtpmr(PMRN_PMLCA5, pmlca);
163 mtpmr(PMRN_PMLCB5, pmlcb);
164 break;
165 default:
166 panic("Bad ctr number!\n");
167 }
168 }
169
170 static void set_pmc_event(int ctr, int event)
171 {
172 u32 pmlca;
173
174 pmlca = get_pmlca(ctr);
175
176 pmlca = (pmlca & ~PMLCA_EVENT_MASK) |
177 ((event << PMLCA_EVENT_SHIFT) &
178 PMLCA_EVENT_MASK);
179
180 set_pmlca(ctr, pmlca);
181 }
182
183 static void set_pmc_user_kernel(int ctr, int user, int kernel)
184 {
185 u32 pmlca;
186
187 pmlca = get_pmlca(ctr);
188
189 if(user)
190 pmlca &= ~PMLCA_FCU;
191 else
192 pmlca |= PMLCA_FCU;
193
194 if(kernel)
195 pmlca &= ~PMLCA_FCS;
196 else
197 pmlca |= PMLCA_FCS;
198
199 set_pmlca(ctr, pmlca);
200 }
201
202 static void set_pmc_marked(int ctr, int mark0, int mark1)
203 {
204 u32 pmlca = get_pmlca(ctr);
205
206 if(mark0)
207 pmlca &= ~PMLCA_FCM0;
208 else
209 pmlca |= PMLCA_FCM0;
210
211 if(mark1)
212 pmlca &= ~PMLCA_FCM1;
213 else
214 pmlca |= PMLCA_FCM1;
215
216 set_pmlca(ctr, pmlca);
217 }
218
219 static void pmc_start_ctr(int ctr, int enable)
220 {
221 u32 pmlca = get_pmlca(ctr);
222
223 pmlca &= ~PMLCA_FC;
224
225 if (enable)
226 pmlca |= PMLCA_CE;
227 else
228 pmlca &= ~PMLCA_CE;
229
230 set_pmlca(ctr, pmlca);
231 }
232
233 static void pmc_start_ctrs(int enable)
234 {
235 u32 pmgc0 = mfpmr(PMRN_PMGC0);
236
237 pmgc0 &= ~PMGC0_FAC;
238 pmgc0 |= PMGC0_FCECE;
239
240 if (enable)
241 pmgc0 |= PMGC0_PMIE;
242 else
243 pmgc0 &= ~PMGC0_PMIE;
244
245 mtpmr(PMRN_PMGC0, pmgc0);
246 }
247
248 static void pmc_stop_ctrs(void)
249 {
250 u32 pmgc0 = mfpmr(PMRN_PMGC0);
251
252 pmgc0 |= PMGC0_FAC;
253
254 pmgc0 &= ~(PMGC0_PMIE | PMGC0_FCECE);
255
256 mtpmr(PMRN_PMGC0, pmgc0);
257 }
258
259 static int fsl_emb_cpu_setup(struct op_counter_config *ctr)
260 {
261 int i;
262
263 /* freeze all counters */
264 pmc_stop_ctrs();
265
266 for (i = 0;i < num_counters;i++) {
267 init_pmc_stop(i);
268
269 set_pmc_event(i, ctr[i].event);
270
271 set_pmc_user_kernel(i, ctr[i].user, ctr[i].kernel);
272 }
273
274 return 0;
275 }
276
277 static int fsl_emb_reg_setup(struct op_counter_config *ctr,
278 struct op_system_config *sys,
279 int num_ctrs)
280 {
281 int i;
282
283 num_counters = num_ctrs;
284
285 /* Our counters count up, and "count" refers to
286 * how much before the next interrupt, and we interrupt
287 * on overflow. So we calculate the starting value
288 * which will give us "count" until overflow.
289 * Then we set the events on the enabled counters */
290 for (i = 0; i < num_counters; ++i)
291 reset_value[i] = 0x80000000UL - ctr[i].count;
292
293 return 0;
294 }
295
296 static int fsl_emb_start(struct op_counter_config *ctr)
297 {
298 int i;
299
300 mtmsr(mfmsr() | MSR_PMM);
301
302 for (i = 0; i < num_counters; ++i) {
303 if (ctr[i].enabled) {
304 ctr_write(i, reset_value[i]);
305 /* Set each enabled counter to only
306 * count when the Mark bit is *not* set */
307 set_pmc_marked(i, 1, 0);
308 pmc_start_ctr(i, 1);
309 } else {
310 ctr_write(i, 0);
311
312 /* Set the ctr to be stopped */
313 pmc_start_ctr(i, 0);
314 }
315 }
316
317 /* Clear the freeze bit, and enable the interrupt.
318 * The counters won't actually start until the rfi clears
319 * the PMM bit */
320 pmc_start_ctrs(1);
321
322 oprofile_running = 1;
323
324 pr_debug("start on cpu %d, pmgc0 %x\n", smp_processor_id(),
325 mfpmr(PMRN_PMGC0));
326
327 return 0;
328 }
329
330 static void fsl_emb_stop(void)
331 {
332 /* freeze counters */
333 pmc_stop_ctrs();
334
335 oprofile_running = 0;
336
337 pr_debug("stop on cpu %d, pmgc0 %x\n", smp_processor_id(),
338 mfpmr(PMRN_PMGC0));
339
340 mb();
341 }
342
343
344 static void fsl_emb_handle_interrupt(struct pt_regs *regs,
345 struct op_counter_config *ctr)
346 {
347 unsigned long pc;
348 int is_kernel;
349 int val;
350 int i;
351
352 pc = regs->nip;
353 is_kernel = is_kernel_addr(pc);
354
355 for (i = 0; i < num_counters; ++i) {
356 val = ctr_read(i);
357 if (val < 0) {
358 if (oprofile_running && ctr[i].enabled) {
359 oprofile_add_ext_sample(pc, regs, i, is_kernel);
360 ctr_write(i, reset_value[i]);
361 } else {
362 ctr_write(i, 0);
363 }
364 }
365 }
366
367 /* The freeze bit was set by the interrupt. */
368 /* Clear the freeze bit, and reenable the interrupt. The
369 * counters won't actually start until the rfi clears the PMM
370 * bit. The PMM bit should not be set until after the interrupt
371 * is cleared to avoid it getting lost in some hypervisor
372 * environments.
373 */
374 mtmsr(mfmsr() | MSR_PMM);
375 pmc_start_ctrs(1);
376 }
377
378 struct op_powerpc_model op_model_fsl_emb = {
379 .reg_setup = fsl_emb_reg_setup,
380 .cpu_setup = fsl_emb_cpu_setup,
381 .start = fsl_emb_start,
382 .stop = fsl_emb_stop,
383 .handle_interrupt = fsl_emb_handle_interrupt,
384 };
This page took 0.044055 seconds and 5 git commands to generate.