Merge git://git.infradead.org/mtd-2.6
[deliverable/linux.git] / arch / powerpc / oprofile / op_model_cell.c
CommitLineData
18f2190d
MJ
1/*
2 * Cell Broadband Engine OProfile Support
3 *
4 * (C) Copyright IBM Corporation 2006
5 *
6 * Author: David Erb (djerb@us.ibm.com)
7 * Modifications:
1474855d
BN
8 * Carl Love <carll@us.ibm.com>
9 * Maynard Johnson <maynardj@us.ibm.com>
18f2190d
MJ
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17#include <linux/cpufreq.h>
18#include <linux/delay.h>
19#include <linux/init.h>
20#include <linux/jiffies.h>
21#include <linux/kthread.h>
22#include <linux/oprofile.h>
23#include <linux/percpu.h>
24#include <linux/smp.h>
25#include <linux/spinlock.h>
26#include <linux/timer.h>
27#include <asm/cell-pmu.h>
28#include <asm/cputable.h>
29#include <asm/firmware.h>
30#include <asm/io.h>
31#include <asm/oprofile_impl.h>
32#include <asm/processor.h>
33#include <asm/prom.h>
34#include <asm/ptrace.h>
35#include <asm/reg.h>
36#include <asm/rtas.h>
37#include <asm/system.h>
eef686a0 38#include <asm/cell-regs.h>
18f2190d
MJ
39
40#include "../platforms/cell/interrupt.h"
1474855d
BN
41#include "cell/pr_util.h"
42
43static void cell_global_stop_spu(void);
44
45/*
46 * spu_cycle_reset is the number of cycles between samples.
47 * This variable is used for SPU profiling and should ONLY be set
48 * at the beginning of cell_reg_setup; otherwise, it's read-only.
49 */
50static unsigned int spu_cycle_reset;
51
52#define NUM_SPUS_PER_NODE 8
53#define SPU_CYCLES_EVENT_NUM 2 /* event number for SPU_CYCLES */
18f2190d
MJ
54
55#define PPU_CYCLES_EVENT_NUM 1 /* event number for CYCLES */
1474855d
BN
56#define PPU_CYCLES_GRP_NUM 1 /* special group number for identifying
57 * PPU_CYCLES event
58 */
59#define CBE_COUNT_ALL_CYCLES 0x42800000 /* PPU cycle event specifier */
18f2190d 60
bcb63e25
CL
61#define NUM_THREADS 2 /* number of physical threads in
62 * physical processor
63 */
a1ef4849 64#define NUM_DEBUG_BUS_WORDS 4
bcb63e25
CL
65#define NUM_INPUT_BUS_WORDS 2
66
1474855d 67#define MAX_SPU_COUNT 0xFFFFFF /* maximum 24 bit LFSR value */
18f2190d
MJ
68
69struct pmc_cntrl_data {
70 unsigned long vcntr;
71 unsigned long evnts;
72 unsigned long masks;
73 unsigned long enabled;
74};
75
76/*
77 * ibm,cbe-perftools rtas parameters
78 */
18f2190d
MJ
79struct pm_signal {
80 u16 cpu; /* Processor to modify */
1474855d
BN
81 u16 sub_unit; /* hw subunit this applies to (if applicable)*/
82 short int signal_group; /* Signal Group to Enable/Disable */
18f2190d
MJ
83 u8 bus_word; /* Enable/Disable on this Trace/Trigger/Event
84 * Bus Word(s) (bitmask)
85 */
86 u8 bit; /* Trigger/Event bit (if applicable) */
87};
88
89/*
90 * rtas call arguments
91 */
92enum {
93 SUBFUNC_RESET = 1,
94 SUBFUNC_ACTIVATE = 2,
95 SUBFUNC_DEACTIVATE = 3,
96
97 PASSTHRU_IGNORE = 0,
98 PASSTHRU_ENABLE = 1,
99 PASSTHRU_DISABLE = 2,
100};
101
102struct pm_cntrl {
103 u16 enable;
104 u16 stop_at_max;
105 u16 trace_mode;
106 u16 freeze;
107 u16 count_mode;
108};
109
110static struct {
111 u32 group_control;
112 u32 debug_bus_control;
113 struct pm_cntrl pm_cntrl;
114 u32 pm07_cntrl[NR_PHYS_CTRS];
115} pm_regs;
116
18f2190d
MJ
117#define GET_SUB_UNIT(x) ((x & 0x0000f000) >> 12)
118#define GET_BUS_WORD(x) ((x & 0x000000f0) >> 4)
119#define GET_BUS_TYPE(x) ((x & 0x00000300) >> 8)
120#define GET_POLARITY(x) ((x & 0x00000002) >> 1)
121#define GET_COUNT_CYCLES(x) (x & 0x00000001)
122#define GET_INPUT_CONTROL(x) ((x & 0x00000004) >> 2)
123
18f2190d
MJ
124static DEFINE_PER_CPU(unsigned long[NR_PHYS_CTRS], pmc_values);
125
126static struct pmc_cntrl_data pmc_cntrl[NUM_THREADS][NR_PHYS_CTRS];
127
1474855d
BN
128/*
129 * The CELL profiling code makes rtas calls to setup the debug bus to
130 * route the performance signals. Additionally, SPU profiling requires
131 * a second rtas call to setup the hardware to capture the SPU PCs.
132 * The EIO error value is returned if the token lookups or the rtas
133 * call fail. The EIO error number is the best choice of the existing
134 * error numbers. The probability of rtas related error is very low. But
135 * by returning EIO and printing additional information to dmsg the user
136 * will know that OProfile did not start and dmesg will tell them why.
137 * OProfile does not support returning errors on Stop. Not a huge issue
138 * since failure to reset the debug bus or stop the SPU PC collection is
139 * not a fatel issue. Chances are if the Stop failed, Start doesn't work
140 * either.
141 */
142
143/*
144 * Interpetation of hdw_thread:
18f2190d
MJ
145 * 0 - even virtual cpus 0, 2, 4,...
146 * 1 - odd virtual cpus 1, 3, 5, ...
1474855d
BN
147 *
148 * FIXME: this is strictly wrong, we need to clean this up in a number
149 * of places. It works for now. -arnd
18f2190d
MJ
150 */
151static u32 hdw_thread;
152
153static u32 virt_cntr_inter_mask;
154static struct timer_list timer_virt_cntr;
155
1474855d
BN
156/*
157 * pm_signal needs to be global since it is initialized in
18f2190d
MJ
158 * cell_reg_setup at the time when the necessary information
159 * is available.
160 */
161static struct pm_signal pm_signal[NR_PHYS_CTRS];
1474855d
BN
162static int pm_rtas_token; /* token for debug bus setup call */
163static int spu_rtas_token; /* token for SPU cycle profiling */
18f2190d
MJ
164
165static u32 reset_value[NR_PHYS_CTRS];
166static int num_counters;
167static int oprofile_running;
057b184a 168static DEFINE_SPINLOCK(virt_cntr_lock);
18f2190d
MJ
169
170static u32 ctr_enabled;
171
bcb63e25 172static unsigned char input_bus[NUM_INPUT_BUS_WORDS];
18f2190d
MJ
173
174/*
175 * Firmware interface functions
176 */
177static int
178rtas_ibm_cbe_perftools(int subfunc, int passthru,
179 void *address, unsigned long length)
180{
181 u64 paddr = __pa(address);
182
1474855d
BN
183 return rtas_call(pm_rtas_token, 5, 1, NULL, subfunc,
184 passthru, paddr >> 32, paddr & 0xffffffff, length);
18f2190d
MJ
185}
186
187static void pm_rtas_reset_signals(u32 node)
188{
189 int ret;
190 struct pm_signal pm_signal_local;
191
1474855d
BN
192 /*
193 * The debug bus is being set to the passthru disable state.
194 * However, the FW still expects atleast one legal signal routing
195 * entry or it will return an error on the arguments. If we don't
196 * supply a valid entry, we must ignore all return values. Ignoring
197 * all return values means we might miss an error we should be
198 * concerned about.
18f2190d
MJ
199 */
200
201 /* fw expects physical cpu #. */
202 pm_signal_local.cpu = node;
203 pm_signal_local.signal_group = 21;
204 pm_signal_local.bus_word = 1;
205 pm_signal_local.sub_unit = 0;
206 pm_signal_local.bit = 0;
207
208 ret = rtas_ibm_cbe_perftools(SUBFUNC_RESET, PASSTHRU_DISABLE,
209 &pm_signal_local,
210 sizeof(struct pm_signal));
211
1474855d
BN
212 if (unlikely(ret))
213 /*
214 * Not a fatal error. For Oprofile stop, the oprofile
215 * functions do not support returning an error for
216 * failure to stop OProfile.
217 */
18f2190d 218 printk(KERN_WARNING "%s: rtas returned: %d\n",
e48b1b45 219 __func__, ret);
18f2190d
MJ
220}
221
1474855d 222static int pm_rtas_activate_signals(u32 node, u32 count)
18f2190d
MJ
223{
224 int ret;
c7eb7347 225 int i, j;
18f2190d
MJ
226 struct pm_signal pm_signal_local[NR_PHYS_CTRS];
227
1474855d
BN
228 /*
229 * There is no debug setup required for the cycles event.
c7eb7347
MJ
230 * Note that only events in the same group can be used.
231 * Otherwise, there will be conflicts in correctly routing
232 * the signals on the debug bus. It is the responsiblity
233 * of the OProfile user tool to check the events are in
234 * the same group.
235 */
236 i = 0;
18f2190d 237 for (j = 0; j < count; j++) {
c7eb7347
MJ
238 if (pm_signal[j].signal_group != PPU_CYCLES_GRP_NUM) {
239
240 /* fw expects physical cpu # */
241 pm_signal_local[i].cpu = node;
242 pm_signal_local[i].signal_group
243 = pm_signal[j].signal_group;
244 pm_signal_local[i].bus_word = pm_signal[j].bus_word;
245 pm_signal_local[i].sub_unit = pm_signal[j].sub_unit;
246 pm_signal_local[i].bit = pm_signal[j].bit;
247 i++;
248 }
18f2190d
MJ
249 }
250
c7eb7347
MJ
251 if (i != 0) {
252 ret = rtas_ibm_cbe_perftools(SUBFUNC_ACTIVATE, PASSTHRU_ENABLE,
253 pm_signal_local,
254 i * sizeof(struct pm_signal));
18f2190d 255
1474855d 256 if (unlikely(ret)) {
c7eb7347 257 printk(KERN_WARNING "%s: rtas returned: %d\n",
e48b1b45 258 __func__, ret);
1474855d
BN
259 return -EIO;
260 }
c7eb7347 261 }
1474855d
BN
262
263 return 0;
18f2190d
MJ
264}
265
266/*
267 * PM Signal functions
268 */
269static void set_pm_event(u32 ctr, int event, u32 unit_mask)
270{
271 struct pm_signal *p;
272 u32 signal_bit;
273 u32 bus_word, bus_type, count_cycles, polarity, input_control;
274 int j, i;
275
276 if (event == PPU_CYCLES_EVENT_NUM) {
277 /* Special Event: Count all cpu cycles */
278 pm_regs.pm07_cntrl[ctr] = CBE_COUNT_ALL_CYCLES;
279 p = &(pm_signal[ctr]);
c7eb7347 280 p->signal_group = PPU_CYCLES_GRP_NUM;
18f2190d
MJ
281 p->bus_word = 1;
282 p->sub_unit = 0;
283 p->bit = 0;
284 goto out;
285 } else {
286 pm_regs.pm07_cntrl[ctr] = 0;
287 }
288
289 bus_word = GET_BUS_WORD(unit_mask);
290 bus_type = GET_BUS_TYPE(unit_mask);
291 count_cycles = GET_COUNT_CYCLES(unit_mask);
292 polarity = GET_POLARITY(unit_mask);
293 input_control = GET_INPUT_CONTROL(unit_mask);
294 signal_bit = (event % 100);
295
296 p = &(pm_signal[ctr]);
297
298 p->signal_group = event / 100;
299 p->bus_word = bus_word;
a1ef4849 300 p->sub_unit = GET_SUB_UNIT(unit_mask);
18f2190d
MJ
301
302 pm_regs.pm07_cntrl[ctr] = 0;
303 pm_regs.pm07_cntrl[ctr] |= PM07_CTR_COUNT_CYCLES(count_cycles);
304 pm_regs.pm07_cntrl[ctr] |= PM07_CTR_POLARITY(polarity);
305 pm_regs.pm07_cntrl[ctr] |= PM07_CTR_INPUT_CONTROL(input_control);
306
1474855d
BN
307 /*
308 * Some of the islands signal selection is based on 64 bit words.
bcb63e25
CL
309 * The debug bus words are 32 bits, the input words to the performance
310 * counters are defined as 32 bits. Need to convert the 64 bit island
311 * specification to the appropriate 32 input bit and bus word for the
1474855d 312 * performance counter event selection. See the CELL Performance
bcb63e25
CL
313 * monitoring signals manual and the Perf cntr hardware descriptions
314 * for the details.
315 */
18f2190d
MJ
316 if (input_control == 0) {
317 if (signal_bit > 31) {
318 signal_bit -= 32;
319 if (bus_word == 0x3)
320 bus_word = 0x2;
321 else if (bus_word == 0xc)
322 bus_word = 0x8;
323 }
324
325 if ((bus_type == 0) && p->signal_group >= 60)
326 bus_type = 2;
327 if ((bus_type == 1) && p->signal_group >= 50)
328 bus_type = 0;
329
330 pm_regs.pm07_cntrl[ctr] |= PM07_CTR_INPUT_MUX(signal_bit);
331 } else {
332 pm_regs.pm07_cntrl[ctr] = 0;
333 p->bit = signal_bit;
334 }
335
a1ef4849 336 for (i = 0; i < NUM_DEBUG_BUS_WORDS; i++) {
18f2190d
MJ
337 if (bus_word & (1 << i)) {
338 pm_regs.debug_bus_control |=
a1ef4849 339 (bus_type << (30 - (2 * i)));
18f2190d 340
bcb63e25 341 for (j = 0; j < NUM_INPUT_BUS_WORDS; j++) {
18f2190d
MJ
342 if (input_bus[j] == 0xff) {
343 input_bus[j] = i;
344 pm_regs.group_control |=
a1ef4849 345 (i << (30 - (2 * j)));
1474855d 346
18f2190d
MJ
347 break;
348 }
349 }
350 }
351 }
352out:
353 ;
354}
355
bcb63e25 356static void write_pm_cntrl(int cpu)
18f2190d 357{
1474855d
BN
358 /*
359 * Oprofile will use 32 bit counters, set bits 7:10 to 0
bcb63e25
CL
360 * pmregs.pm_cntrl is a global
361 */
362
18f2190d 363 u32 val = 0;
bcb63e25 364 if (pm_regs.pm_cntrl.enable == 1)
18f2190d
MJ
365 val |= CBE_PM_ENABLE_PERF_MON;
366
bcb63e25 367 if (pm_regs.pm_cntrl.stop_at_max == 1)
18f2190d
MJ
368 val |= CBE_PM_STOP_AT_MAX;
369
bcb63e25
CL
370 if (pm_regs.pm_cntrl.trace_mode == 1)
371 val |= CBE_PM_TRACE_MODE_SET(pm_regs.pm_cntrl.trace_mode);
18f2190d 372
bcb63e25 373 if (pm_regs.pm_cntrl.freeze == 1)
18f2190d
MJ
374 val |= CBE_PM_FREEZE_ALL_CTRS;
375
1474855d
BN
376 /*
377 * Routine set_count_mode must be called previously to set
18f2190d
MJ
378 * the count mode based on the user selection of user and kernel.
379 */
bcb63e25 380 val |= CBE_PM_COUNT_MODE_SET(pm_regs.pm_cntrl.count_mode);
18f2190d
MJ
381 cbe_write_pm(cpu, pm_control, val);
382}
383
384static inline void
bcb63e25 385set_count_mode(u32 kernel, u32 user)
18f2190d 386{
1474855d
BN
387 /*
388 * The user must specify user and kernel if they want them. If
bcb63e25
CL
389 * neither is specified, OProfile will count in hypervisor mode.
390 * pm_regs.pm_cntrl is a global
18f2190d
MJ
391 */
392 if (kernel) {
393 if (user)
bcb63e25 394 pm_regs.pm_cntrl.count_mode = CBE_COUNT_ALL_MODES;
18f2190d 395 else
bcb63e25
CL
396 pm_regs.pm_cntrl.count_mode =
397 CBE_COUNT_SUPERVISOR_MODE;
18f2190d
MJ
398 } else {
399 if (user)
bcb63e25 400 pm_regs.pm_cntrl.count_mode = CBE_COUNT_PROBLEM_MODE;
18f2190d 401 else
bcb63e25
CL
402 pm_regs.pm_cntrl.count_mode =
403 CBE_COUNT_HYPERVISOR_MODE;
18f2190d
MJ
404 }
405}
406
25ad2913 407static inline void enable_ctr(u32 cpu, u32 ctr, u32 *pm07_cntrl)
18f2190d
MJ
408{
409
bcb63e25 410 pm07_cntrl[ctr] |= CBE_PM_CTR_ENABLE;
18f2190d
MJ
411 cbe_write_pm07_control(cpu, ctr, pm07_cntrl[ctr]);
412}
413
414/*
415 * Oprofile is expected to collect data on all CPUs simultaneously.
1474855d 416 * However, there is one set of performance counters per node. There are
18f2190d
MJ
417 * two hardware threads or virtual CPUs on each node. Hence, OProfile must
418 * multiplex in time the performance counter collection on the two virtual
419 * CPUs. The multiplexing of the performance counters is done by this
420 * virtual counter routine.
421 *
422 * The pmc_values used below is defined as 'per-cpu' but its use is
423 * more akin to 'per-node'. We need to store two sets of counter
424 * values per node -- one for the previous run and one for the next.
425 * The per-cpu[NR_PHYS_CTRS] gives us the storage we need. Each odd/even
426 * pair of per-cpu arrays is used for storing the previous and next
427 * pmc values for a given node.
428 * NOTE: We use the per-cpu variable to improve cache performance.
1474855d
BN
429 *
430 * This routine will alternate loading the virtual counters for
431 * virtual CPUs
18f2190d
MJ
432 */
433static void cell_virtual_cntr(unsigned long data)
434{
18f2190d
MJ
435 int i, prev_hdw_thread, next_hdw_thread;
436 u32 cpu;
437 unsigned long flags;
438
1474855d
BN
439 /*
440 * Make sure that the interrupt_hander and the virt counter are
441 * not both playing with the counters on the same node.
18f2190d
MJ
442 */
443
444 spin_lock_irqsave(&virt_cntr_lock, flags);
445
446 prev_hdw_thread = hdw_thread;
447
448 /* switch the cpu handling the interrupts */
449 hdw_thread = 1 ^ hdw_thread;
450 next_hdw_thread = hdw_thread;
451
a1ef4849
BN
452 pm_regs.group_control = 0;
453 pm_regs.debug_bus_control = 0;
454
455 for (i = 0; i < NUM_INPUT_BUS_WORDS; i++)
456 input_bus[i] = 0xff;
457
1474855d
BN
458 /*
459 * There are some per thread events. Must do the
bcb63e25
CL
460 * set event, for the thread that is being started
461 */
1474855d 462 for (i = 0; i < num_counters; i++)
bcb63e25
CL
463 set_pm_event(i,
464 pmc_cntrl[next_hdw_thread][i].evnts,
465 pmc_cntrl[next_hdw_thread][i].masks);
466
1474855d
BN
467 /*
468 * The following is done only once per each node, but
18f2190d
MJ
469 * we need cpu #, not node #, to pass to the cbe_xxx functions.
470 */
471 for_each_online_cpu(cpu) {
472 if (cbe_get_hw_thread_id(cpu))
473 continue;
474
1474855d
BN
475 /*
476 * stop counters, save counter values, restore counts
18f2190d
MJ
477 * for previous thread
478 */
479 cbe_disable_pm(cpu);
480 cbe_disable_pm_interrupts(cpu);
481 for (i = 0; i < num_counters; i++) {
482 per_cpu(pmc_values, cpu + prev_hdw_thread)[i]
483 = cbe_read_ctr(cpu, i);
484
485 if (per_cpu(pmc_values, cpu + next_hdw_thread)[i]
486 == 0xFFFFFFFF)
487 /* If the cntr value is 0xffffffff, we must
488 * reset that to 0xfffffff0 when the current
1474855d 489 * thread is restarted. This will generate a
bcb63e25
CL
490 * new interrupt and make sure that we never
491 * restore the counters to the max value. If
492 * the counters were restored to the max value,
493 * they do not increment and no interrupts are
494 * generated. Hence no more samples will be
495 * collected on that cpu.
18f2190d
MJ
496 */
497 cbe_write_ctr(cpu, i, 0xFFFFFFF0);
498 else
499 cbe_write_ctr(cpu, i,
500 per_cpu(pmc_values,
501 cpu +
502 next_hdw_thread)[i]);
503 }
504
1474855d
BN
505 /*
506 * Switch to the other thread. Change the interrupt
18f2190d
MJ
507 * and control regs to be scheduled on the CPU
508 * corresponding to the thread to execute.
509 */
510 for (i = 0; i < num_counters; i++) {
511 if (pmc_cntrl[next_hdw_thread][i].enabled) {
1474855d
BN
512 /*
513 * There are some per thread events.
18f2190d
MJ
514 * Must do the set event, enable_cntr
515 * for each cpu.
516 */
18f2190d
MJ
517 enable_ctr(cpu, i,
518 pm_regs.pm07_cntrl);
519 } else {
520 cbe_write_pm07_control(cpu, i, 0);
521 }
522 }
523
524 /* Enable interrupts on the CPU thread that is starting */
525 cbe_enable_pm_interrupts(cpu, next_hdw_thread,
526 virt_cntr_inter_mask);
527 cbe_enable_pm(cpu);
528 }
529
530 spin_unlock_irqrestore(&virt_cntr_lock, flags);
531
532 mod_timer(&timer_virt_cntr, jiffies + HZ / 10);
533}
534
535static void start_virt_cntrs(void)
536{
537 init_timer(&timer_virt_cntr);
538 timer_virt_cntr.function = cell_virtual_cntr;
539 timer_virt_cntr.data = 0UL;
540 timer_virt_cntr.expires = jiffies + HZ / 10;
541 add_timer(&timer_virt_cntr);
542}
543
544/* This function is called once for all cpus combined */
1474855d
BN
545static int cell_reg_setup(struct op_counter_config *ctr,
546 struct op_system_config *sys, int num_ctrs)
18f2190d
MJ
547{
548 int i, j, cpu;
1474855d
BN
549 spu_cycle_reset = 0;
550
551 if (ctr[0].event == SPU_CYCLES_EVENT_NUM) {
552 spu_cycle_reset = ctr[0].count;
553
554 /*
555 * Each node will need to make the rtas call to start
556 * and stop SPU profiling. Get the token once and store it.
557 */
558 spu_rtas_token = rtas_token("ibm,cbe-spu-perftools");
559
560 if (unlikely(spu_rtas_token == RTAS_UNKNOWN_SERVICE)) {
561 printk(KERN_ERR
562 "%s: rtas token ibm,cbe-spu-perftools unknown\n",
e48b1b45 563 __func__);
1474855d
BN
564 return -EIO;
565 }
566 }
18f2190d
MJ
567
568 pm_rtas_token = rtas_token("ibm,cbe-perftools");
1474855d
BN
569
570 /*
571 * For all events excetp PPU CYCLEs, each node will need to make
572 * the rtas cbe-perftools call to setup and reset the debug bus.
573 * Make the token lookup call once and store it in the global
574 * variable pm_rtas_token.
575 */
576 if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) {
577 printk(KERN_ERR
578 "%s: rtas token ibm,cbe-perftools unknown\n",
e48b1b45 579 __func__);
1474855d 580 return -EIO;
18f2190d
MJ
581 }
582
583 num_counters = num_ctrs;
584
210434d7
CL
585 if (unlikely(num_ctrs > NR_PHYS_CTRS)) {
586 printk(KERN_ERR
587 "%s: Oprofile, number of specified events " \
588 "exceeds number of physical counters\n",
589 __func__);
590 return -EIO;
591 }
18f2190d
MJ
592 pm_regs.group_control = 0;
593 pm_regs.debug_bus_control = 0;
594
595 /* setup the pm_control register */
596 memset(&pm_regs.pm_cntrl, 0, sizeof(struct pm_cntrl));
597 pm_regs.pm_cntrl.stop_at_max = 1;
598 pm_regs.pm_cntrl.trace_mode = 0;
599 pm_regs.pm_cntrl.freeze = 1;
600
bcb63e25 601 set_count_mode(sys->enable_kernel, sys->enable_user);
18f2190d
MJ
602
603 /* Setup the thread 0 events */
604 for (i = 0; i < num_ctrs; ++i) {
605
606 pmc_cntrl[0][i].evnts = ctr[i].event;
607 pmc_cntrl[0][i].masks = ctr[i].unit_mask;
608 pmc_cntrl[0][i].enabled = ctr[i].enabled;
609 pmc_cntrl[0][i].vcntr = i;
610
611 for_each_possible_cpu(j)
612 per_cpu(pmc_values, j)[i] = 0;
613 }
614
1474855d
BN
615 /*
616 * Setup the thread 1 events, map the thread 0 event to the
18f2190d
MJ
617 * equivalent thread 1 event.
618 */
619 for (i = 0; i < num_ctrs; ++i) {
620 if ((ctr[i].event >= 2100) && (ctr[i].event <= 2111))
621 pmc_cntrl[1][i].evnts = ctr[i].event + 19;
622 else if (ctr[i].event == 2203)
623 pmc_cntrl[1][i].evnts = ctr[i].event;
624 else if ((ctr[i].event >= 2200) && (ctr[i].event <= 2215))
625 pmc_cntrl[1][i].evnts = ctr[i].event + 16;
626 else
627 pmc_cntrl[1][i].evnts = ctr[i].event;
628
629 pmc_cntrl[1][i].masks = ctr[i].unit_mask;
630 pmc_cntrl[1][i].enabled = ctr[i].enabled;
631 pmc_cntrl[1][i].vcntr = i;
632 }
633
bcb63e25 634 for (i = 0; i < NUM_INPUT_BUS_WORDS; i++)
18f2190d
MJ
635 input_bus[i] = 0xff;
636
1474855d
BN
637 /*
638 * Our counters count up, and "count" refers to
18f2190d 639 * how much before the next interrupt, and we interrupt
1474855d 640 * on overflow. So we calculate the starting value
18f2190d
MJ
641 * which will give us "count" until overflow.
642 * Then we set the events on the enabled counters.
643 */
644 for (i = 0; i < num_counters; ++i) {
645 /* start with virtual counter set 0 */
646 if (pmc_cntrl[0][i].enabled) {
647 /* Using 32bit counters, reset max - count */
648 reset_value[i] = 0xFFFFFFFF - ctr[i].count;
649 set_pm_event(i,
650 pmc_cntrl[0][i].evnts,
651 pmc_cntrl[0][i].masks);
652
653 /* global, used by cell_cpu_setup */
654 ctr_enabled |= (1 << i);
655 }
656 }
657
658 /* initialize the previous counts for the virtual cntrs */
659 for_each_online_cpu(cpu)
660 for (i = 0; i < num_counters; ++i) {
661 per_cpu(pmc_values, cpu)[i] = reset_value[i];
662 }
1474855d
BN
663
664 return 0;
18f2190d
MJ
665}
666
1474855d
BN
667
668
18f2190d 669/* This function is called once for each cpu */
1474855d 670static int cell_cpu_setup(struct op_counter_config *cntr)
18f2190d
MJ
671{
672 u32 cpu = smp_processor_id();
673 u32 num_enabled = 0;
674 int i;
675
1474855d
BN
676 if (spu_cycle_reset)
677 return 0;
678
18f2190d
MJ
679 /* There is one performance monitor per processor chip (i.e. node),
680 * so we only need to perform this function once per node.
681 */
682 if (cbe_get_hw_thread_id(cpu))
1474855d 683 return 0;
18f2190d
MJ
684
685 /* Stop all counters */
686 cbe_disable_pm(cpu);
687 cbe_disable_pm_interrupts(cpu);
688
689 cbe_write_pm(cpu, pm_interval, 0);
690 cbe_write_pm(cpu, pm_start_stop, 0);
691 cbe_write_pm(cpu, group_control, pm_regs.group_control);
692 cbe_write_pm(cpu, debug_bus_control, pm_regs.debug_bus_control);
bcb63e25 693 write_pm_cntrl(cpu);
18f2190d
MJ
694
695 for (i = 0; i < num_counters; ++i) {
696 if (ctr_enabled & (1 << i)) {
697 pm_signal[num_enabled].cpu = cbe_cpu_to_node(cpu);
698 num_enabled++;
699 }
700 }
701
1474855d
BN
702 /*
703 * The pm_rtas_activate_signals will return -EIO if the FW
704 * call failed.
705 */
706 return pm_rtas_activate_signals(cbe_cpu_to_node(cpu), num_enabled);
707}
708
709#define ENTRIES 303
710#define MAXLFSR 0xFFFFFF
711
712/* precomputed table of 24 bit LFSR values */
713static int initial_lfsr[] = {
714 8221349, 12579195, 5379618, 10097839, 7512963, 7519310, 3955098, 10753424,
715 15507573, 7458917, 285419, 2641121, 9780088, 3915503, 6668768, 1548716,
716 4885000, 8774424, 9650099, 2044357, 2304411, 9326253, 10332526, 4421547,
717 3440748, 10179459, 13332843, 10375561, 1313462, 8375100, 5198480, 6071392,
718 9341783, 1526887, 3985002, 1439429, 13923762, 7010104, 11969769, 4547026,
719 2040072, 4025602, 3437678, 7939992, 11444177, 4496094, 9803157, 10745556,
720 3671780, 4257846, 5662259, 13196905, 3237343, 12077182, 16222879, 7587769,
721 14706824, 2184640, 12591135, 10420257, 7406075, 3648978, 11042541, 15906893,
722 11914928, 4732944, 10695697, 12928164, 11980531, 4430912, 11939291, 2917017,
723 6119256, 4172004, 9373765, 8410071, 14788383, 5047459, 5474428, 1737756,
724 15967514, 13351758, 6691285, 8034329, 2856544, 14394753, 11310160, 12149558,
725 7487528, 7542781, 15668898, 12525138, 12790975, 3707933, 9106617, 1965401,
726 16219109, 12801644, 2443203, 4909502, 8762329, 3120803, 6360315, 9309720,
727 15164599, 10844842, 4456529, 6667610, 14924259, 884312, 6234963, 3326042,
728 15973422, 13919464, 5272099, 6414643, 3909029, 2764324, 5237926, 4774955,
729 10445906, 4955302, 5203726, 10798229, 11443419, 2303395, 333836, 9646934,
730 3464726, 4159182, 568492, 995747, 10318756, 13299332, 4836017, 8237783,
731 3878992, 2581665, 11394667, 5672745, 14412947, 3159169, 9094251, 16467278,
732 8671392, 15230076, 4843545, 7009238, 15504095, 1494895, 9627886, 14485051,
733 8304291, 252817, 12421642, 16085736, 4774072, 2456177, 4160695, 15409741,
734 4902868, 5793091, 13162925, 16039714, 782255, 11347835, 14884586, 366972,
735 16308990, 11913488, 13390465, 2958444, 10340278, 1177858, 1319431, 10426302,
736 2868597, 126119, 5784857, 5245324, 10903900, 16436004, 3389013, 1742384,
737 14674502, 10279218, 8536112, 10364279, 6877778, 14051163, 1025130, 6072469,
738 1988305, 8354440, 8216060, 16342977, 13112639, 3976679, 5913576, 8816697,
739 6879995, 14043764, 3339515, 9364420, 15808858, 12261651, 2141560, 5636398,
740 10345425, 10414756, 781725, 6155650, 4746914, 5078683, 7469001, 6799140,
741 10156444, 9667150, 10116470, 4133858, 2121972, 1124204, 1003577, 1611214,
742 14304602, 16221850, 13878465, 13577744, 3629235, 8772583, 10881308, 2410386,
743 7300044, 5378855, 9301235, 12755149, 4977682, 8083074, 10327581, 6395087,
744 9155434, 15501696, 7514362, 14520507, 15808945, 3244584, 4741962, 9658130,
745 14336147, 8654727, 7969093, 15759799, 14029445, 5038459, 9894848, 8659300,
746 13699287, 8834306, 10712885, 14753895, 10410465, 3373251, 309501, 9561475,
747 5526688, 14647426, 14209836, 5339224, 207299, 14069911, 8722990, 2290950,
748 3258216, 12505185, 6007317, 9218111, 14661019, 10537428, 11731949, 9027003,
749 6641507, 9490160, 200241, 9720425, 16277895, 10816638, 1554761, 10431375,
750 7467528, 6790302, 3429078, 14633753, 14428997, 11463204, 3576212, 2003426,
751 6123687, 820520, 9992513, 15784513, 5778891, 6428165, 8388607
752};
753
754/*
755 * The hardware uses an LFSR counting sequence to determine when to capture
756 * the SPU PCs. An LFSR sequence is like a puesdo random number sequence
757 * where each number occurs once in the sequence but the sequence is not in
758 * numerical order. The SPU PC capture is done when the LFSR sequence reaches
759 * the last value in the sequence. Hence the user specified value N
760 * corresponds to the LFSR number that is N from the end of the sequence.
761 *
762 * To avoid the time to compute the LFSR, a lookup table is used. The 24 bit
763 * LFSR sequence is broken into four ranges. The spacing of the precomputed
764 * values is adjusted in each range so the error between the user specifed
765 * number (N) of events between samples and the actual number of events based
766 * on the precomputed value will be les then about 6.2%. Note, if the user
767 * specifies N < 2^16, the LFSR value that is 2^16 from the end will be used.
768 * This is to prevent the loss of samples because the trace buffer is full.
769 *
770 * User specified N Step between Index in
771 * precomputed values precomputed
772 * table
773 * 0 to 2^16-1 ---- 0
774 * 2^16 to 2^16+2^19-1 2^12 1 to 128
775 * 2^16+2^19 to 2^16+2^19+2^22-1 2^15 129 to 256
776 * 2^16+2^19+2^22 to 2^24-1 2^18 257 to 302
777 *
778 *
779 * For example, the LFSR values in the second range are computed for 2^16,
780 * 2^16+2^12, ... , 2^19-2^16, 2^19 and stored in the table at indicies
781 * 1, 2,..., 127, 128.
782 *
783 * The 24 bit LFSR value for the nth number in the sequence can be
784 * calculated using the following code:
785 *
786 * #define size 24
787 * int calculate_lfsr(int n)
788 * {
789 * int i;
790 * unsigned int newlfsr0;
791 * unsigned int lfsr = 0xFFFFFF;
792 * unsigned int howmany = n;
793 *
794 * for (i = 2; i < howmany + 2; i++) {
795 * newlfsr0 = (((lfsr >> (size - 1 - 0)) & 1) ^
796 * ((lfsr >> (size - 1 - 1)) & 1) ^
797 * (((lfsr >> (size - 1 - 6)) & 1) ^
798 * ((lfsr >> (size - 1 - 23)) & 1)));
799 *
800 * lfsr >>= 1;
801 * lfsr = lfsr | (newlfsr0 << (size - 1));
802 * }
803 * return lfsr;
804 * }
805 */
806
807#define V2_16 (0x1 << 16)
808#define V2_19 (0x1 << 19)
809#define V2_22 (0x1 << 22)
810
811static int calculate_lfsr(int n)
812{
813 /*
814 * The ranges and steps are in powers of 2 so the calculations
815 * can be done using shifts rather then divide.
816 */
817 int index;
818
819 if ((n >> 16) == 0)
820 index = 0;
821 else if (((n - V2_16) >> 19) == 0)
822 index = ((n - V2_16) >> 12) + 1;
823 else if (((n - V2_16 - V2_19) >> 22) == 0)
824 index = ((n - V2_16 - V2_19) >> 15 ) + 1 + 128;
825 else if (((n - V2_16 - V2_19 - V2_22) >> 24) == 0)
826 index = ((n - V2_16 - V2_19 - V2_22) >> 18 ) + 1 + 256;
827 else
828 index = ENTRIES-1;
829
830 /* make sure index is valid */
831 if ((index > ENTRIES) || (index < 0))
832 index = ENTRIES-1;
833
834 return initial_lfsr[index];
835}
836
837static int pm_rtas_activate_spu_profiling(u32 node)
838{
839 int ret, i;
210434d7 840 struct pm_signal pm_signal_local[NUM_SPUS_PER_NODE];
1474855d
BN
841
842 /*
843 * Set up the rtas call to configure the debug bus to
844 * route the SPU PCs. Setup the pm_signal for each SPU
845 */
210434d7 846 for (i = 0; i < ARRAY_SIZE(pm_signal_local); i++) {
1474855d
BN
847 pm_signal_local[i].cpu = node;
848 pm_signal_local[i].signal_group = 41;
849 /* spu i on word (i/2) */
850 pm_signal_local[i].bus_word = 1 << i / 2;
851 /* spu i */
852 pm_signal_local[i].sub_unit = i;
853 pm_signal_local[i].bit = 63;
854 }
855
856 ret = rtas_ibm_cbe_perftools(SUBFUNC_ACTIVATE,
857 PASSTHRU_ENABLE, pm_signal_local,
210434d7 858 (ARRAY_SIZE(pm_signal_local)
1474855d
BN
859 * sizeof(struct pm_signal)));
860
861 if (unlikely(ret)) {
862 printk(KERN_WARNING "%s: rtas returned: %d\n",
e48b1b45 863 __func__, ret);
1474855d
BN
864 return -EIO;
865 }
866
867 return 0;
868}
869
870#ifdef CONFIG_CPU_FREQ
871static int
872oprof_cpufreq_notify(struct notifier_block *nb, unsigned long val, void *data)
873{
874 int ret = 0;
875 struct cpufreq_freqs *frq = data;
876 if ((val == CPUFREQ_PRECHANGE && frq->old < frq->new) ||
877 (val == CPUFREQ_POSTCHANGE && frq->old > frq->new) ||
878 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE))
879 set_spu_profiling_frequency(frq->new, spu_cycle_reset);
880 return ret;
881}
882
883static struct notifier_block cpu_freq_notifier_block = {
884 .notifier_call = oprof_cpufreq_notify
885};
886#endif
887
888static int cell_global_start_spu(struct op_counter_config *ctr)
889{
890 int subfunc;
891 unsigned int lfsr_value;
892 int cpu;
893 int ret;
894 int rtas_error;
895 unsigned int cpu_khzfreq = 0;
896
897 /* The SPU profiling uses time-based profiling based on
898 * cpu frequency, so if configured with the CPU_FREQ
899 * option, we should detect frequency changes and react
900 * accordingly.
901 */
902#ifdef CONFIG_CPU_FREQ
903 ret = cpufreq_register_notifier(&cpu_freq_notifier_block,
904 CPUFREQ_TRANSITION_NOTIFIER);
905 if (ret < 0)
906 /* this is not a fatal error */
907 printk(KERN_ERR "CPU freq change registration failed: %d\n",
908 ret);
909
910 else
911 cpu_khzfreq = cpufreq_quick_get(smp_processor_id());
912#endif
913
914 set_spu_profiling_frequency(cpu_khzfreq, spu_cycle_reset);
915
916 for_each_online_cpu(cpu) {
917 if (cbe_get_hw_thread_id(cpu))
918 continue;
919
920 /*
921 * Setup SPU cycle-based profiling.
922 * Set perf_mon_control bit 0 to a zero before
923 * enabling spu collection hardware.
924 */
925 cbe_write_pm(cpu, pm_control, 0);
926
927 if (spu_cycle_reset > MAX_SPU_COUNT)
928 /* use largest possible value */
929 lfsr_value = calculate_lfsr(MAX_SPU_COUNT-1);
930 else
931 lfsr_value = calculate_lfsr(spu_cycle_reset);
932
933 /* must use a non zero value. Zero disables data collection. */
934 if (lfsr_value == 0)
935 lfsr_value = calculate_lfsr(1);
936
937 lfsr_value = lfsr_value << 8; /* shift lfsr to correct
938 * register location
939 */
940
941 /* debug bus setup */
942 ret = pm_rtas_activate_spu_profiling(cbe_cpu_to_node(cpu));
943
944 if (unlikely(ret)) {
945 rtas_error = ret;
946 goto out;
947 }
948
949
950 subfunc = 2; /* 2 - activate SPU tracing, 3 - deactivate */
951
952 /* start profiling */
953 ret = rtas_call(spu_rtas_token, 3, 1, NULL, subfunc,
954 cbe_cpu_to_node(cpu), lfsr_value);
955
956 if (unlikely(ret != 0)) {
957 printk(KERN_ERR
958 "%s: rtas call ibm,cbe-spu-perftools failed, return = %d\n",
e48b1b45 959 __func__, ret);
1474855d
BN
960 rtas_error = -EIO;
961 goto out;
962 }
963 }
964
965 rtas_error = start_spu_profiling(spu_cycle_reset);
966 if (rtas_error)
967 goto out_stop;
968
969 oprofile_running = 1;
970 return 0;
971
972out_stop:
973 cell_global_stop_spu(); /* clean up the PMU/debug bus */
18f2190d 974out:
1474855d 975 return rtas_error;
18f2190d
MJ
976}
977
1474855d 978static int cell_global_start_ppu(struct op_counter_config *ctr)
18f2190d 979{
1474855d 980 u32 cpu, i;
18f2190d 981 u32 interrupt_mask = 0;
18f2190d
MJ
982
983 /* This routine gets called once for the system.
984 * There is one performance monitor per node, so we
985 * only need to perform this function once per node.
986 */
987 for_each_online_cpu(cpu) {
988 if (cbe_get_hw_thread_id(cpu))
989 continue;
990
991 interrupt_mask = 0;
992
993 for (i = 0; i < num_counters; ++i) {
994 if (ctr_enabled & (1 << i)) {
995 cbe_write_ctr(cpu, i, reset_value[i]);
996 enable_ctr(cpu, i, pm_regs.pm07_cntrl);
997 interrupt_mask |=
998 CBE_PM_CTR_OVERFLOW_INTR(i);
999 } else {
1000 /* Disable counter */
1001 cbe_write_pm07_control(cpu, i, 0);
1002 }
1003 }
1004
bcb63e25 1005 cbe_get_and_clear_pm_interrupts(cpu);
18f2190d
MJ
1006 cbe_enable_pm_interrupts(cpu, hdw_thread, interrupt_mask);
1007 cbe_enable_pm(cpu);
1008 }
1009
1010 virt_cntr_inter_mask = interrupt_mask;
1011 oprofile_running = 1;
1012 smp_wmb();
1013
1474855d
BN
1014 /*
1015 * NOTE: start_virt_cntrs will result in cell_virtual_cntr() being
1016 * executed which manipulates the PMU. We start the "virtual counter"
18f2190d
MJ
1017 * here so that we do not need to synchronize access to the PMU in
1018 * the above for-loop.
1019 */
1020 start_virt_cntrs();
1474855d
BN
1021
1022 return 0;
18f2190d
MJ
1023}
1024
1474855d
BN
1025static int cell_global_start(struct op_counter_config *ctr)
1026{
1027 if (spu_cycle_reset)
1028 return cell_global_start_spu(ctr);
1029 else
1030 return cell_global_start_ppu(ctr);
1031}
1032
1033/*
1034 * Note the generic OProfile stop calls do not support returning
1035 * an error on stop. Hence, will not return an error if the FW
1036 * calls fail on stop. Failure to reset the debug bus is not an issue.
1037 * Failure to disable the SPU profiling is not an issue. The FW calls
1038 * to enable the performance counters and debug bus will work even if
1039 * the hardware was not cleanly reset.
1040 */
1041static void cell_global_stop_spu(void)
1042{
1043 int subfunc, rtn_value;
1044 unsigned int lfsr_value;
1045 int cpu;
1046
1047 oprofile_running = 0;
1048
1049#ifdef CONFIG_CPU_FREQ
1050 cpufreq_unregister_notifier(&cpu_freq_notifier_block,
1051 CPUFREQ_TRANSITION_NOTIFIER);
1052#endif
1053
1054 for_each_online_cpu(cpu) {
1055 if (cbe_get_hw_thread_id(cpu))
1056 continue;
1057
1058 subfunc = 3; /*
1059 * 2 - activate SPU tracing,
1060 * 3 - deactivate
1061 */
1062 lfsr_value = 0x8f100000;
1063
1064 rtn_value = rtas_call(spu_rtas_token, 3, 1, NULL,
1065 subfunc, cbe_cpu_to_node(cpu),
1066 lfsr_value);
1067
1068 if (unlikely(rtn_value != 0)) {
1069 printk(KERN_ERR
1070 "%s: rtas call ibm,cbe-spu-perftools failed, return = %d\n",
e48b1b45 1071 __func__, rtn_value);
1474855d
BN
1072 }
1073
1074 /* Deactivate the signals */
1075 pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
1076 }
1077
1078 stop_spu_profiling();
1079}
1080
1081static void cell_global_stop_ppu(void)
18f2190d
MJ
1082{
1083 int cpu;
1084
1474855d
BN
1085 /*
1086 * This routine will be called once for the system.
18f2190d
MJ
1087 * There is one performance monitor per node, so we
1088 * only need to perform this function once per node.
1089 */
1090 del_timer_sync(&timer_virt_cntr);
1091 oprofile_running = 0;
1092 smp_wmb();
1093
1094 for_each_online_cpu(cpu) {
1095 if (cbe_get_hw_thread_id(cpu))
1096 continue;
1097
1098 cbe_sync_irq(cbe_cpu_to_node(cpu));
1099 /* Stop the counters */
1100 cbe_disable_pm(cpu);
1101
1102 /* Deactivate the signals */
1103 pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
1104
1105 /* Deactivate interrupts */
1106 cbe_disable_pm_interrupts(cpu);
1107 }
1108}
1109
1474855d
BN
1110static void cell_global_stop(void)
1111{
1112 if (spu_cycle_reset)
1113 cell_global_stop_spu();
1114 else
1115 cell_global_stop_ppu();
1116}
1117
1118static void cell_handle_interrupt(struct pt_regs *regs,
1119 struct op_counter_config *ctr)
18f2190d
MJ
1120{
1121 u32 cpu;
1122 u64 pc;
1123 int is_kernel;
1124 unsigned long flags = 0;
1125 u32 interrupt_mask;
1126 int i;
1127
1128 cpu = smp_processor_id();
1129
1474855d
BN
1130 /*
1131 * Need to make sure the interrupt handler and the virt counter
18f2190d
MJ
1132 * routine are not running at the same time. See the
1133 * cell_virtual_cntr() routine for additional comments.
1134 */
1135 spin_lock_irqsave(&virt_cntr_lock, flags);
1136
1474855d
BN
1137 /*
1138 * Need to disable and reenable the performance counters
18f2190d
MJ
1139 * to get the desired behavior from the hardware. This
1140 * is hardware specific.
1141 */
1142
1143 cbe_disable_pm(cpu);
1144
bcb63e25 1145 interrupt_mask = cbe_get_and_clear_pm_interrupts(cpu);
18f2190d 1146
1474855d
BN
1147 /*
1148 * If the interrupt mask has been cleared, then the virt cntr
18f2190d
MJ
1149 * has cleared the interrupt. When the thread that generated
1150 * the interrupt is restored, the data count will be restored to
1151 * 0xffffff0 to cause the interrupt to be regenerated.
1152 */
1153
1154 if ((oprofile_running == 1) && (interrupt_mask != 0)) {
1155 pc = regs->nip;
1156 is_kernel = is_kernel_addr(pc);
1157
1158 for (i = 0; i < num_counters; ++i) {
1159 if ((interrupt_mask & CBE_PM_CTR_OVERFLOW_INTR(i))
1160 && ctr[i].enabled) {
101fd46a 1161 oprofile_add_ext_sample(pc, regs, i, is_kernel);
18f2190d
MJ
1162 cbe_write_ctr(cpu, i, reset_value[i]);
1163 }
1164 }
1165
1474855d
BN
1166 /*
1167 * The counters were frozen by the interrupt.
18f2190d
MJ
1168 * Reenable the interrupt and restart the counters.
1169 * If there was a race between the interrupt handler and
1474855d 1170 * the virtual counter routine. The virutal counter
18f2190d
MJ
1171 * routine may have cleared the interrupts. Hence must
1172 * use the virt_cntr_inter_mask to re-enable the interrupts.
1173 */
1174 cbe_enable_pm_interrupts(cpu, hdw_thread,
1175 virt_cntr_inter_mask);
1176
1474855d
BN
1177 /*
1178 * The writes to the various performance counters only writes
1179 * to a latch. The new values (interrupt setting bits, reset
18f2190d
MJ
1180 * counter value etc.) are not copied to the actual registers
1181 * until the performance monitor is enabled. In order to get
1182 * this to work as desired, the permormance monitor needs to
beb7dd86 1183 * be disabled while writing to the latches. This is a
18f2190d
MJ
1184 * HW design issue.
1185 */
1186 cbe_enable_pm(cpu);
1187 }
1188 spin_unlock_irqrestore(&virt_cntr_lock, flags);
1189}
1190
1474855d
BN
1191/*
1192 * This function is called from the generic OProfile
1193 * driver. When profiling PPUs, we need to do the
1194 * generic sync start; otherwise, do spu_sync_start.
1195 */
1196static int cell_sync_start(void)
1197{
1198 if (spu_cycle_reset)
1199 return spu_sync_start();
1200 else
1201 return DO_GENERIC_SYNC;
1202}
1203
1204static int cell_sync_stop(void)
1205{
1206 if (spu_cycle_reset)
1207 return spu_sync_stop();
1208 else
1209 return 1;
1210}
1211
18f2190d
MJ
1212struct op_powerpc_model op_model_cell = {
1213 .reg_setup = cell_reg_setup,
1214 .cpu_setup = cell_cpu_setup,
1215 .global_start = cell_global_start,
1216 .global_stop = cell_global_stop,
1474855d
BN
1217 .sync_start = cell_sync_start,
1218 .sync_stop = cell_sync_stop,
18f2190d
MJ
1219 .handle_interrupt = cell_handle_interrupt,
1220};
This page took 0.404156 seconds and 5 git commands to generate.