Commit | Line | Data |
---|---|---|
1474855d BN |
1 | /* |
2 | * Cell Broadband Engine OProfile Support | |
3 | * | |
4 | * (C) Copyright IBM Corporation 2006 | |
5 | * | |
6 | * Authors: Maynard Johnson <maynardj@us.ibm.com> | |
7 | * Carl Love <carll@us.ibm.com> | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or | |
10 | * modify it under the terms of the GNU General Public License | |
11 | * as published by the Free Software Foundation; either version | |
12 | * 2 of the License, or (at your option) any later version. | |
13 | */ | |
14 | ||
15 | #include <linux/hrtimer.h> | |
16 | #include <linux/smp.h> | |
17 | #include <linux/slab.h> | |
18 | #include <asm/cell-pmu.h> | |
19 | #include "pr_util.h" | |
20 | ||
21 | #define TRACE_ARRAY_SIZE 1024 | |
22 | #define SCALE_SHIFT 14 | |
23 | ||
24 | static u32 *samples; | |
25 | ||
a5598ca0 | 26 | int spu_prof_running; |
1474855d BN |
27 | static unsigned int profiling_interval; |
28 | ||
29 | #define NUM_SPU_BITS_TRBUF 16 | |
30 | #define SPUS_PER_TB_ENTRY 4 | |
1474855d BN |
31 | |
32 | #define SPU_PC_MASK 0xFFFF | |
33 | ||
34 | static DEFINE_SPINLOCK(sample_array_lock); | |
35 | unsigned long sample_array_lock_flags; | |
36 | ||
37 | void set_spu_profiling_frequency(unsigned int freq_khz, unsigned int cycles_reset) | |
38 | { | |
39 | unsigned long ns_per_cyc; | |
40 | ||
41 | if (!freq_khz) | |
42 | freq_khz = ppc_proc_freq/1000; | |
43 | ||
44 | /* To calculate a timeout in nanoseconds, the basic | |
45 | * formula is ns = cycles_reset * (NSEC_PER_SEC / cpu frequency). | |
46 | * To avoid floating point math, we use the scale math | |
47 | * technique as described in linux/jiffies.h. We use | |
48 | * a scale factor of SCALE_SHIFT, which provides 4 decimal places | |
49 | * of precision. This is close enough for the purpose at hand. | |
50 | * | |
51 | * The value of the timeout should be small enough that the hw | |
025dfdaf | 52 | * trace buffer will not get more than about 1/3 full for the |
1474855d BN |
53 | * maximum user specified (the LFSR value) hw sampling frequency. |
54 | * This is to ensure the trace buffer will never fill even if the | |
55 | * kernel thread scheduling varies under a heavy system load. | |
56 | */ | |
57 | ||
58 | ns_per_cyc = (USEC_PER_SEC << SCALE_SHIFT)/freq_khz; | |
59 | profiling_interval = (ns_per_cyc * cycles_reset) >> SCALE_SHIFT; | |
60 | ||
61 | } | |
62 | ||
63 | /* | |
64 | * Extract SPU PC from trace buffer entry | |
65 | */ | |
66 | static void spu_pc_extract(int cpu, int entry) | |
67 | { | |
68 | /* the trace buffer is 128 bits */ | |
69 | u64 trace_buffer[2]; | |
70 | u64 spu_mask; | |
71 | int spu; | |
72 | ||
73 | spu_mask = SPU_PC_MASK; | |
74 | ||
75 | /* Each SPU PC is 16 bits; hence, four spus in each of | |
76 | * the two 64-bit buffer entries that make up the | |
77 | * 128-bit trace_buffer entry. Process two 64-bit values | |
78 | * simultaneously. | |
79 | * trace[0] SPU PC contents are: 0 1 2 3 | |
80 | * trace[1] SPU PC contents are: 4 5 6 7 | |
81 | */ | |
82 | ||
83 | cbe_read_trace_buffer(cpu, trace_buffer); | |
84 | ||
85 | for (spu = SPUS_PER_TB_ENTRY-1; spu >= 0; spu--) { | |
86 | /* spu PC trace entry is upper 16 bits of the | |
87 | * 18 bit SPU program counter | |
88 | */ | |
89 | samples[spu * TRACE_ARRAY_SIZE + entry] | |
90 | = (spu_mask & trace_buffer[0]) << 2; | |
91 | samples[(spu + SPUS_PER_TB_ENTRY) * TRACE_ARRAY_SIZE + entry] | |
92 | = (spu_mask & trace_buffer[1]) << 2; | |
93 | ||
94 | trace_buffer[0] = trace_buffer[0] >> NUM_SPU_BITS_TRBUF; | |
95 | trace_buffer[1] = trace_buffer[1] >> NUM_SPU_BITS_TRBUF; | |
96 | } | |
97 | } | |
98 | ||
99 | static int cell_spu_pc_collection(int cpu) | |
100 | { | |
101 | u32 trace_addr; | |
102 | int entry; | |
103 | ||
104 | /* process the collected SPU PC for the node */ | |
105 | ||
106 | entry = 0; | |
107 | ||
108 | trace_addr = cbe_read_pm(cpu, trace_address); | |
109 | while (!(trace_addr & CBE_PM_TRACE_BUF_EMPTY)) { | |
110 | /* there is data in the trace buffer to process */ | |
111 | spu_pc_extract(cpu, entry); | |
112 | ||
113 | entry++; | |
114 | ||
115 | if (entry >= TRACE_ARRAY_SIZE) | |
116 | /* spu_samples is full */ | |
117 | break; | |
118 | ||
119 | trace_addr = cbe_read_pm(cpu, trace_address); | |
120 | } | |
121 | ||
122 | return entry; | |
123 | } | |
124 | ||
125 | ||
126 | static enum hrtimer_restart profile_spus(struct hrtimer *timer) | |
127 | { | |
128 | ktime_t kt; | |
129 | int cpu, node, k, num_samples, spu_num; | |
130 | ||
131 | if (!spu_prof_running) | |
132 | goto stop; | |
133 | ||
134 | for_each_online_cpu(cpu) { | |
135 | if (cbe_get_hw_thread_id(cpu)) | |
136 | continue; | |
137 | ||
138 | node = cbe_cpu_to_node(cpu); | |
139 | ||
140 | /* There should only be one kernel thread at a time processing | |
141 | * the samples. In the very unlikely case that the processing | |
142 | * is taking a very long time and multiple kernel threads are | |
143 | * started to process the samples. Make sure only one kernel | |
144 | * thread is working on the samples array at a time. The | |
145 | * sample array must be loaded and then processed for a given | |
146 | * cpu. The sample array is not per cpu. | |
147 | */ | |
148 | spin_lock_irqsave(&sample_array_lock, | |
149 | sample_array_lock_flags); | |
150 | num_samples = cell_spu_pc_collection(cpu); | |
151 | ||
152 | if (num_samples == 0) { | |
153 | spin_unlock_irqrestore(&sample_array_lock, | |
154 | sample_array_lock_flags); | |
155 | continue; | |
156 | } | |
157 | ||
158 | for (k = 0; k < SPUS_PER_NODE; k++) { | |
159 | spu_num = k + (node * SPUS_PER_NODE); | |
160 | spu_sync_buffer(spu_num, | |
161 | samples + (k * TRACE_ARRAY_SIZE), | |
162 | num_samples); | |
163 | } | |
164 | ||
165 | spin_unlock_irqrestore(&sample_array_lock, | |
166 | sample_array_lock_flags); | |
167 | ||
168 | } | |
169 | smp_wmb(); /* insure spu event buffer updates are written */ | |
170 | /* don't want events intermingled... */ | |
171 | ||
172 | kt = ktime_set(0, profiling_interval); | |
173 | if (!spu_prof_running) | |
174 | goto stop; | |
175 | hrtimer_forward(timer, timer->base->get_time(), kt); | |
176 | return HRTIMER_RESTART; | |
177 | ||
178 | stop: | |
179 | printk(KERN_INFO "SPU_PROF: spu-prof timer ending\n"); | |
180 | return HRTIMER_NORESTART; | |
181 | } | |
182 | ||
183 | static struct hrtimer timer; | |
184 | /* | |
185 | * Entry point for SPU profiling. | |
186 | * NOTE: SPU profiling is done system-wide, not per-CPU. | |
187 | * | |
188 | * cycles_reset is the count value specified by the user when | |
189 | * setting up OProfile to count SPU_CYCLES. | |
190 | */ | |
191 | int start_spu_profiling(unsigned int cycles_reset) | |
192 | { | |
193 | ktime_t kt; | |
194 | ||
195 | pr_debug("timer resolution: %lu\n", TICK_NSEC); | |
196 | kt = ktime_set(0, profiling_interval); | |
197 | hrtimer_init(&timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | |
23446d1d | 198 | hrtimer_set_expires(&timer, kt); |
1474855d BN |
199 | timer.function = profile_spus; |
200 | ||
201 | /* Allocate arrays for collecting SPU PC samples */ | |
202 | samples = kzalloc(SPUS_PER_NODE * | |
203 | TRACE_ARRAY_SIZE * sizeof(u32), GFP_KERNEL); | |
204 | ||
205 | if (!samples) | |
206 | return -ENOMEM; | |
207 | ||
208 | spu_prof_running = 1; | |
209 | hrtimer_start(&timer, kt, HRTIMER_MODE_REL); | |
a5598ca0 | 210 | schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE); |
1474855d BN |
211 | |
212 | return 0; | |
213 | } | |
214 | ||
215 | void stop_spu_profiling(void) | |
216 | { | |
217 | spu_prof_running = 0; | |
218 | hrtimer_cancel(&timer); | |
219 | kfree(samples); | |
220 | pr_debug("SPU_PROF: stop_spu_profiling issued\n"); | |
221 | } |