Commit | Line | Data |
---|---|---|
ed69bdd8 LP |
1 | /* |
2 | * CCI cache coherent interconnect driver | |
3 | * | |
4 | * Copyright (C) 2013 ARM Ltd. | |
5 | * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | |
12 | * kind, whether express or implied; without even the implied warranty | |
13 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | */ | |
16 | ||
17 | #include <linux/arm-cci.h> | |
18 | #include <linux/io.h> | |
c6f85cb4 | 19 | #include <linux/interrupt.h> |
ed69bdd8 LP |
20 | #include <linux/module.h> |
21 | #include <linux/of_address.h> | |
b91c8f28 PA |
22 | #include <linux/of_irq.h> |
23 | #include <linux/of_platform.h> | |
c6f85cb4 | 24 | #include <linux/perf_event.h> |
b91c8f28 | 25 | #include <linux/platform_device.h> |
ed69bdd8 | 26 | #include <linux/slab.h> |
b91c8f28 | 27 | #include <linux/spinlock.h> |
ed69bdd8 LP |
28 | |
29 | #include <asm/cacheflush.h> | |
30 | #include <asm/smp_plat.h> | |
31 | ||
f6b9e83c SP |
32 | static void __iomem *cci_ctrl_base; |
33 | static unsigned long cci_ctrl_phys; | |
ed69bdd8 | 34 | |
ee8e5d5f | 35 | #ifdef CONFIG_ARM_CCI400_PORT_CTRL |
ed69bdd8 LP |
36 | struct cci_nb_ports { |
37 | unsigned int nb_ace; | |
38 | unsigned int nb_ace_lite; | |
39 | }; | |
40 | ||
f6b9e83c SP |
41 | static const struct cci_nb_ports cci400_ports = { |
42 | .nb_ace = 2, | |
43 | .nb_ace_lite = 3 | |
ed69bdd8 LP |
44 | }; |
45 | ||
ee8e5d5f SP |
46 | #define CCI400_PORTS_DATA (&cci400_ports) |
47 | #else | |
48 | #define CCI400_PORTS_DATA (NULL) | |
49 | #endif | |
50 | ||
f6b9e83c | 51 | static const struct of_device_id arm_cci_matches[] = { |
ee8e5d5f SP |
52 | #ifdef CONFIG_ARM_CCI400_COMMON |
53 | {.compatible = "arm,cci-400", .data = CCI400_PORTS_DATA }, | |
a95791ef SP |
54 | #endif |
55 | #ifdef CONFIG_ARM_CCI500_PMU | |
56 | { .compatible = "arm,cci-500", }, | |
ee8e5d5f | 57 | #endif |
f6b9e83c | 58 | {}, |
ed69bdd8 LP |
59 | }; |
60 | ||
f4d58938 | 61 | #ifdef CONFIG_ARM_CCI_PMU |
b91c8f28 | 62 | |
f4d58938 | 63 | #define DRIVER_NAME "ARM-CCI" |
f6b9e83c SP |
64 | #define DRIVER_NAME_PMU DRIVER_NAME " PMU" |
65 | ||
b91c8f28 PA |
66 | #define CCI_PMCR 0x0100 |
67 | #define CCI_PID2 0x0fe8 | |
68 | ||
69 | #define CCI_PMCR_CEN 0x00000001 | |
70 | #define CCI_PMCR_NCNT_MASK 0x0000f800 | |
71 | #define CCI_PMCR_NCNT_SHIFT 11 | |
72 | ||
73 | #define CCI_PID2_REV_MASK 0xf0 | |
74 | #define CCI_PID2_REV_SHIFT 4 | |
75 | ||
f6b9e83c SP |
76 | #define CCI_PMU_EVT_SEL 0x000 |
77 | #define CCI_PMU_CNTR 0x004 | |
78 | #define CCI_PMU_CNTR_CTRL 0x008 | |
79 | #define CCI_PMU_OVRFLW 0x00c | |
80 | ||
81 | #define CCI_PMU_OVRFLW_FLAG 1 | |
82 | ||
ab5b316d SP |
83 | #define CCI_PMU_CNTR_SIZE(model) ((model)->cntr_size) |
84 | #define CCI_PMU_CNTR_BASE(model, idx) ((idx) * CCI_PMU_CNTR_SIZE(model)) | |
85 | #define CCI_PMU_CNTR_MASK ((1ULL << 32) -1) | |
86 | #define CCI_PMU_CNTR_LAST(cci_pmu) (cci_pmu->num_cntrs - 1) | |
f6b9e83c | 87 | |
ab5b316d SP |
88 | #define CCI_PMU_MAX_HW_CNTRS(model) \ |
89 | ((model)->num_hw_cntrs + (model)->fixed_hw_cntrs) | |
f6b9e83c | 90 | |
fc17c839 SP |
91 | /* Types of interfaces that can generate events */ |
92 | enum { | |
93 | CCI_IF_SLAVE, | |
94 | CCI_IF_MASTER, | |
a95791ef SP |
95 | #ifdef CONFIG_ARM_CCI500_PMU |
96 | CCI_IF_GLOBAL, | |
97 | #endif | |
fc17c839 SP |
98 | CCI_IF_MAX, |
99 | }; | |
100 | ||
101 | struct event_range { | |
102 | u32 min; | |
103 | u32 max; | |
104 | }; | |
105 | ||
f6b9e83c | 106 | struct cci_pmu_hw_events { |
ab5b316d SP |
107 | struct perf_event **events; |
108 | unsigned long *used_mask; | |
f6b9e83c SP |
109 | raw_spinlock_t pmu_lock; |
110 | }; | |
111 | ||
31216290 | 112 | struct cci_pmu; |
ab5b316d SP |
113 | /* |
114 | * struct cci_pmu_model: | |
115 | * @fixed_hw_cntrs - Number of fixed event counters | |
116 | * @num_hw_cntrs - Maximum number of programmable event counters | |
117 | * @cntr_size - Size of an event counter mapping | |
118 | */ | |
fc17c839 SP |
119 | struct cci_pmu_model { |
120 | char *name; | |
ab5b316d SP |
121 | u32 fixed_hw_cntrs; |
122 | u32 num_hw_cntrs; | |
123 | u32 cntr_size; | |
5e442eba MR |
124 | struct attribute **format_attrs; |
125 | struct attribute **event_attrs; | |
fc17c839 | 126 | struct event_range event_ranges[CCI_IF_MAX]; |
31216290 SP |
127 | int (*validate_hw_event)(struct cci_pmu *, unsigned long); |
128 | int (*get_event_idx)(struct cci_pmu *, struct cci_pmu_hw_events *, unsigned long); | |
fff3f1a0 | 129 | void (*write_counters)(struct cci_pmu *, unsigned long *); |
fc17c839 SP |
130 | }; |
131 | ||
132 | static struct cci_pmu_model cci_pmu_models[]; | |
133 | ||
f6b9e83c SP |
134 | struct cci_pmu { |
135 | void __iomem *base; | |
136 | struct pmu pmu; | |
137 | int nr_irqs; | |
ab5b316d | 138 | int *irqs; |
f6b9e83c | 139 | unsigned long active_irqs; |
fc17c839 | 140 | const struct cci_pmu_model *model; |
f6b9e83c SP |
141 | struct cci_pmu_hw_events hw_events; |
142 | struct platform_device *plat_device; | |
ab5b316d | 143 | int num_cntrs; |
f6b9e83c SP |
144 | atomic_t active_events; |
145 | struct mutex reserve_mutex; | |
a1a076d7 | 146 | struct notifier_block cpu_nb; |
f6b9e83c SP |
147 | cpumask_t cpus; |
148 | }; | |
f6b9e83c SP |
149 | |
150 | #define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu)) | |
151 | ||
f4d58938 SP |
152 | enum cci_models { |
153 | #ifdef CONFIG_ARM_CCI400_PMU | |
154 | CCI400_R0, | |
155 | CCI400_R1, | |
a95791ef SP |
156 | #endif |
157 | #ifdef CONFIG_ARM_CCI500_PMU | |
158 | CCI500_R0, | |
f4d58938 SP |
159 | #endif |
160 | CCI_MODEL_MAX | |
161 | }; | |
162 | ||
c66eea5f SP |
163 | static void pmu_write_counters(struct cci_pmu *cci_pmu, |
164 | unsigned long *mask); | |
e14cfad3 SP |
165 | static ssize_t cci_pmu_format_show(struct device *dev, |
166 | struct device_attribute *attr, char *buf); | |
167 | static ssize_t cci_pmu_event_show(struct device *dev, | |
168 | struct device_attribute *attr, char *buf); | |
169 | ||
5e442eba MR |
170 | #define CCI_EXT_ATTR_ENTRY(_name, _func, _config) \ |
171 | &((struct dev_ext_attribute[]) { \ | |
172 | { __ATTR(_name, S_IRUGO, _func, NULL), (void *)_config } \ | |
173 | })[0].attr.attr | |
e14cfad3 SP |
174 | |
175 | #define CCI_FORMAT_EXT_ATTR_ENTRY(_name, _config) \ | |
176 | CCI_EXT_ATTR_ENTRY(_name, cci_pmu_format_show, (char *)_config) | |
177 | #define CCI_EVENT_EXT_ATTR_ENTRY(_name, _config) \ | |
178 | CCI_EXT_ATTR_ENTRY(_name, cci_pmu_event_show, (unsigned long)_config) | |
179 | ||
f4d58938 SP |
180 | /* CCI400 PMU Specific definitions */ |
181 | ||
182 | #ifdef CONFIG_ARM_CCI400_PMU | |
183 | ||
b91c8f28 | 184 | /* Port ids */ |
f4d58938 SP |
185 | #define CCI400_PORT_S0 0 |
186 | #define CCI400_PORT_S1 1 | |
187 | #define CCI400_PORT_S2 2 | |
188 | #define CCI400_PORT_S3 3 | |
189 | #define CCI400_PORT_S4 4 | |
190 | #define CCI400_PORT_M0 5 | |
191 | #define CCI400_PORT_M1 6 | |
192 | #define CCI400_PORT_M2 7 | |
193 | ||
194 | #define CCI400_R1_PX 5 | |
b91c8f28 | 195 | |
b91c8f28 PA |
196 | /* |
197 | * Instead of an event id to monitor CCI cycles, a dedicated counter is | |
198 | * provided. Use 0xff to represent CCI cycles and hope that no future revisions | |
199 | * make use of this event in hardware. | |
200 | */ | |
201 | enum cci400_perf_events { | |
f4d58938 | 202 | CCI400_PMU_CYCLES = 0xff |
b91c8f28 PA |
203 | }; |
204 | ||
f4d58938 SP |
205 | #define CCI400_PMU_CYCLE_CNTR_IDX 0 |
206 | #define CCI400_PMU_CNTR0_IDX 1 | |
b91c8f28 PA |
207 | |
208 | /* | |
209 | * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8 | |
210 | * ports and bits 4:0 are event codes. There are different event codes | |
211 | * associated with each port type. | |
212 | * | |
213 | * Additionally, the range of events associated with the port types changed | |
214 | * between Rev0 and Rev1. | |
215 | * | |
216 | * The constants below define the range of valid codes for each port type for | |
217 | * the different revisions and are used to validate the event to be monitored. | |
218 | */ | |
219 | ||
f4d58938 SP |
220 | #define CCI400_PMU_EVENT_MASK 0xffUL |
221 | #define CCI400_PMU_EVENT_SOURCE_SHIFT 5 | |
222 | #define CCI400_PMU_EVENT_SOURCE_MASK 0x7 | |
223 | #define CCI400_PMU_EVENT_CODE_SHIFT 0 | |
224 | #define CCI400_PMU_EVENT_CODE_MASK 0x1f | |
225 | #define CCI400_PMU_EVENT_SOURCE(event) \ | |
226 | ((event >> CCI400_PMU_EVENT_SOURCE_SHIFT) & \ | |
227 | CCI400_PMU_EVENT_SOURCE_MASK) | |
228 | #define CCI400_PMU_EVENT_CODE(event) \ | |
229 | ((event >> CCI400_PMU_EVENT_CODE_SHIFT) & CCI400_PMU_EVENT_CODE_MASK) | |
230 | ||
231 | #define CCI400_R0_SLAVE_PORT_MIN_EV 0x00 | |
232 | #define CCI400_R0_SLAVE_PORT_MAX_EV 0x13 | |
233 | #define CCI400_R0_MASTER_PORT_MIN_EV 0x14 | |
234 | #define CCI400_R0_MASTER_PORT_MAX_EV 0x1a | |
235 | ||
236 | #define CCI400_R1_SLAVE_PORT_MIN_EV 0x00 | |
237 | #define CCI400_R1_SLAVE_PORT_MAX_EV 0x14 | |
238 | #define CCI400_R1_MASTER_PORT_MIN_EV 0x00 | |
239 | #define CCI400_R1_MASTER_PORT_MAX_EV 0x11 | |
b91c8f28 | 240 | |
e14cfad3 SP |
241 | #define CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(_name, _config) \ |
242 | CCI_EXT_ATTR_ENTRY(_name, cci400_pmu_cycle_event_show, \ | |
243 | (unsigned long)_config) | |
244 | ||
245 | static ssize_t cci400_pmu_cycle_event_show(struct device *dev, | |
246 | struct device_attribute *attr, char *buf); | |
247 | ||
5e442eba | 248 | static struct attribute *cci400_pmu_format_attrs[] = { |
e14cfad3 SP |
249 | CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"), |
250 | CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-7"), | |
5e442eba | 251 | NULL |
e14cfad3 SP |
252 | }; |
253 | ||
5e442eba | 254 | static struct attribute *cci400_r0_pmu_event_attrs[] = { |
e14cfad3 SP |
255 | /* Slave events */ |
256 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any, 0x0), | |
257 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device, 0x01), | |
258 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_normal_or_nonshareable, 0x2), | |
259 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_inner_or_outershareable, 0x3), | |
260 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maintenance, 0x4), | |
261 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_mem_barrier, 0x5), | |
262 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_sync_barrier, 0x6), | |
263 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7), | |
264 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg_sync, 0x8), | |
265 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_tt_full, 0x9), | |
266 | CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_last_hs_snoop, 0xA), | |
267 | CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall_rvalids_h_rready_l, 0xB), | |
268 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_any, 0xC), | |
269 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_device, 0xD), | |
270 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_normal_or_nonshareable, 0xE), | |
271 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_inner_or_outershare_wback_wclean, 0xF), | |
272 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_unique, 0x10), | |
273 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_line_unique, 0x11), | |
274 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_evict, 0x12), | |
275 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall_tt_full, 0x13), | |
276 | /* Master events */ | |
277 | CCI_EVENT_EXT_ATTR_ENTRY(mi_retry_speculative_fetch, 0x14), | |
278 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_addr_hazard, 0x15), | |
279 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_id_hazard, 0x16), | |
280 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_tt_full, 0x17), | |
281 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_barrier_hazard, 0x18), | |
282 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_barrier_hazard, 0x19), | |
283 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_tt_full, 0x1A), | |
284 | /* Special event for cycles counter */ | |
285 | CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles, 0xff), | |
5e442eba | 286 | NULL |
e14cfad3 SP |
287 | }; |
288 | ||
5e442eba | 289 | static struct attribute *cci400_r1_pmu_event_attrs[] = { |
e14cfad3 SP |
290 | /* Slave events */ |
291 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any, 0x0), | |
292 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device, 0x01), | |
293 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_normal_or_nonshareable, 0x2), | |
294 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_inner_or_outershareable, 0x3), | |
295 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maintenance, 0x4), | |
296 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_mem_barrier, 0x5), | |
297 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_sync_barrier, 0x6), | |
298 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7), | |
299 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg_sync, 0x8), | |
300 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_tt_full, 0x9), | |
301 | CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_last_hs_snoop, 0xA), | |
302 | CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall_rvalids_h_rready_l, 0xB), | |
303 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_any, 0xC), | |
304 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_device, 0xD), | |
305 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_normal_or_nonshareable, 0xE), | |
306 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_inner_or_outershare_wback_wclean, 0xF), | |
307 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_unique, 0x10), | |
308 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_line_unique, 0x11), | |
309 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_evict, 0x12), | |
310 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall_tt_full, 0x13), | |
311 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_slave_id_hazard, 0x14), | |
312 | /* Master events */ | |
313 | CCI_EVENT_EXT_ATTR_ENTRY(mi_retry_speculative_fetch, 0x0), | |
314 | CCI_EVENT_EXT_ATTR_ENTRY(mi_stall_cycle_addr_hazard, 0x1), | |
315 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_master_id_hazard, 0x2), | |
316 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_hi_prio_rtq_full, 0x3), | |
317 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_barrier_hazard, 0x4), | |
318 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_barrier_hazard, 0x5), | |
319 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_wtq_full, 0x6), | |
320 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_low_prio_rtq_full, 0x7), | |
321 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_mid_prio_rtq_full, 0x8), | |
322 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn0, 0x9), | |
323 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn1, 0xA), | |
324 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn2, 0xB), | |
325 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn3, 0xC), | |
326 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn0, 0xD), | |
327 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn1, 0xE), | |
328 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn2, 0xF), | |
329 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn3, 0x10), | |
330 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_unique_or_line_unique_addr_hazard, 0x11), | |
331 | /* Special event for cycles counter */ | |
332 | CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles, 0xff), | |
5e442eba | 333 | NULL |
e14cfad3 SP |
334 | }; |
335 | ||
336 | static ssize_t cci400_pmu_cycle_event_show(struct device *dev, | |
337 | struct device_attribute *attr, char *buf) | |
338 | { | |
339 | struct dev_ext_attribute *eattr = container_of(attr, | |
340 | struct dev_ext_attribute, attr); | |
341 | return snprintf(buf, PAGE_SIZE, "config=0x%lx\n", (unsigned long)eattr->var); | |
342 | } | |
343 | ||
31216290 SP |
344 | static int cci400_get_event_idx(struct cci_pmu *cci_pmu, |
345 | struct cci_pmu_hw_events *hw, | |
346 | unsigned long cci_event) | |
347 | { | |
348 | int idx; | |
349 | ||
350 | /* cycles event idx is fixed */ | |
f4d58938 SP |
351 | if (cci_event == CCI400_PMU_CYCLES) { |
352 | if (test_and_set_bit(CCI400_PMU_CYCLE_CNTR_IDX, hw->used_mask)) | |
31216290 SP |
353 | return -EAGAIN; |
354 | ||
f4d58938 | 355 | return CCI400_PMU_CYCLE_CNTR_IDX; |
31216290 SP |
356 | } |
357 | ||
f4d58938 | 358 | for (idx = CCI400_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx) |
31216290 SP |
359 | if (!test_and_set_bit(idx, hw->used_mask)) |
360 | return idx; | |
361 | ||
362 | /* No counters available */ | |
363 | return -EAGAIN; | |
364 | } | |
365 | ||
366 | static int cci400_validate_hw_event(struct cci_pmu *cci_pmu, unsigned long hw_event) | |
b91c8f28 | 367 | { |
f4d58938 SP |
368 | u8 ev_source = CCI400_PMU_EVENT_SOURCE(hw_event); |
369 | u8 ev_code = CCI400_PMU_EVENT_CODE(hw_event); | |
fc17c839 | 370 | int if_type; |
b91c8f28 | 371 | |
f4d58938 | 372 | if (hw_event & ~CCI400_PMU_EVENT_MASK) |
874c5714 SP |
373 | return -ENOENT; |
374 | ||
f4d58938 | 375 | if (hw_event == CCI400_PMU_CYCLES) |
31216290 SP |
376 | return hw_event; |
377 | ||
b91c8f28 | 378 | switch (ev_source) { |
f4d58938 SP |
379 | case CCI400_PORT_S0: |
380 | case CCI400_PORT_S1: | |
381 | case CCI400_PORT_S2: | |
382 | case CCI400_PORT_S3: | |
383 | case CCI400_PORT_S4: | |
b91c8f28 | 384 | /* Slave Interface */ |
fc17c839 | 385 | if_type = CCI_IF_SLAVE; |
b91c8f28 | 386 | break; |
f4d58938 SP |
387 | case CCI400_PORT_M0: |
388 | case CCI400_PORT_M1: | |
389 | case CCI400_PORT_M2: | |
b91c8f28 | 390 | /* Master Interface */ |
fc17c839 | 391 | if_type = CCI_IF_MASTER; |
b91c8f28 | 392 | break; |
fc17c839 SP |
393 | default: |
394 | return -ENOENT; | |
b91c8f28 PA |
395 | } |
396 | ||
a1a076d7 SP |
397 | if (ev_code >= cci_pmu->model->event_ranges[if_type].min && |
398 | ev_code <= cci_pmu->model->event_ranges[if_type].max) | |
fc17c839 SP |
399 | return hw_event; |
400 | ||
b91c8f28 PA |
401 | return -ENOENT; |
402 | } | |
403 | ||
f4d58938 | 404 | static int probe_cci400_revision(void) |
f6b9e83c SP |
405 | { |
406 | int rev; | |
407 | rev = readl_relaxed(cci_ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK; | |
408 | rev >>= CCI_PID2_REV_SHIFT; | |
409 | ||
f4d58938 SP |
410 | if (rev < CCI400_R1_PX) |
411 | return CCI400_R0; | |
f6b9e83c | 412 | else |
f4d58938 | 413 | return CCI400_R1; |
f6b9e83c SP |
414 | } |
415 | ||
fc17c839 | 416 | static const struct cci_pmu_model *probe_cci_model(struct platform_device *pdev) |
f6b9e83c | 417 | { |
772742a6 | 418 | if (platform_has_secure_cci_access()) |
f4d58938 SP |
419 | return &cci_pmu_models[probe_cci400_revision()]; |
420 | return NULL; | |
421 | } | |
422 | #else /* !CONFIG_ARM_CCI400_PMU */ | |
423 | static inline struct cci_pmu_model *probe_cci_model(struct platform_device *pdev) | |
424 | { | |
772742a6 | 425 | return NULL; |
f6b9e83c | 426 | } |
f4d58938 | 427 | #endif /* CONFIG_ARM_CCI400_PMU */ |
f6b9e83c | 428 | |
a95791ef SP |
429 | #ifdef CONFIG_ARM_CCI500_PMU |
430 | ||
431 | /* | |
432 | * CCI500 provides 8 independent event counters that can count | |
433 | * any of the events available. | |
434 | * | |
435 | * CCI500 PMU event id is an 9-bit value made of two parts. | |
436 | * bits [8:5] - Source for the event | |
437 | * 0x0-0x6 - Slave interfaces | |
438 | * 0x8-0xD - Master interfaces | |
439 | * 0xf - Global Events | |
440 | * 0x7,0xe - Reserved | |
441 | * | |
442 | * bits [4:0] - Event code (specific to type of interface) | |
443 | */ | |
444 | ||
445 | /* Port ids */ | |
446 | #define CCI500_PORT_S0 0x0 | |
447 | #define CCI500_PORT_S1 0x1 | |
448 | #define CCI500_PORT_S2 0x2 | |
449 | #define CCI500_PORT_S3 0x3 | |
450 | #define CCI500_PORT_S4 0x4 | |
451 | #define CCI500_PORT_S5 0x5 | |
452 | #define CCI500_PORT_S6 0x6 | |
453 | ||
454 | #define CCI500_PORT_M0 0x8 | |
455 | #define CCI500_PORT_M1 0x9 | |
456 | #define CCI500_PORT_M2 0xa | |
457 | #define CCI500_PORT_M3 0xb | |
458 | #define CCI500_PORT_M4 0xc | |
459 | #define CCI500_PORT_M5 0xd | |
460 | ||
461 | #define CCI500_PORT_GLOBAL 0xf | |
462 | ||
463 | #define CCI500_PMU_EVENT_MASK 0x1ffUL | |
464 | #define CCI500_PMU_EVENT_SOURCE_SHIFT 0x5 | |
465 | #define CCI500_PMU_EVENT_SOURCE_MASK 0xf | |
466 | #define CCI500_PMU_EVENT_CODE_SHIFT 0x0 | |
467 | #define CCI500_PMU_EVENT_CODE_MASK 0x1f | |
468 | ||
469 | #define CCI500_PMU_EVENT_SOURCE(event) \ | |
470 | ((event >> CCI500_PMU_EVENT_SOURCE_SHIFT) & CCI500_PMU_EVENT_SOURCE_MASK) | |
471 | #define CCI500_PMU_EVENT_CODE(event) \ | |
472 | ((event >> CCI500_PMU_EVENT_CODE_SHIFT) & CCI500_PMU_EVENT_CODE_MASK) | |
473 | ||
474 | #define CCI500_SLAVE_PORT_MIN_EV 0x00 | |
475 | #define CCI500_SLAVE_PORT_MAX_EV 0x1f | |
476 | #define CCI500_MASTER_PORT_MIN_EV 0x00 | |
477 | #define CCI500_MASTER_PORT_MAX_EV 0x06 | |
478 | #define CCI500_GLOBAL_PORT_MIN_EV 0x00 | |
479 | #define CCI500_GLOBAL_PORT_MAX_EV 0x0f | |
480 | ||
e14cfad3 SP |
481 | |
482 | #define CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(_name, _config) \ | |
483 | CCI_EXT_ATTR_ENTRY(_name, cci500_pmu_global_event_show, \ | |
484 | (unsigned long) _config) | |
485 | ||
486 | static ssize_t cci500_pmu_global_event_show(struct device *dev, | |
487 | struct device_attribute *attr, char *buf); | |
488 | ||
5e442eba | 489 | static struct attribute *cci500_pmu_format_attrs[] = { |
e14cfad3 SP |
490 | CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"), |
491 | CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-8"), | |
5e442eba | 492 | NULL, |
e14cfad3 SP |
493 | }; |
494 | ||
5e442eba | 495 | static struct attribute *cci500_pmu_event_attrs[] = { |
e14cfad3 SP |
496 | /* Slave events */ |
497 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_arvalid, 0x0), | |
498 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_dev, 0x1), | |
499 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_nonshareable, 0x2), | |
500 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_shareable_non_alloc, 0x3), | |
501 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_shareable_alloc, 0x4), | |
502 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_invalidate, 0x5), | |
503 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maint, 0x6), | |
504 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7), | |
505 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_rval, 0x8), | |
506 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_rlast_snoop, 0x9), | |
507 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_awalid, 0xA), | |
508 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_dev, 0xB), | |
509 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_non_shareable, 0xC), | |
510 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wb, 0xD), | |
511 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wlu, 0xE), | |
512 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wunique, 0xF), | |
513 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_evict, 0x10), | |
514 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_wrevict, 0x11), | |
515 | CCI_EVENT_EXT_ATTR_ENTRY(si_w_data_beat, 0x12), | |
516 | CCI_EVENT_EXT_ATTR_ENTRY(si_srq_acvalid, 0x13), | |
517 | CCI_EVENT_EXT_ATTR_ENTRY(si_srq_read, 0x14), | |
518 | CCI_EVENT_EXT_ATTR_ENTRY(si_srq_clean, 0x15), | |
519 | CCI_EVENT_EXT_ATTR_ENTRY(si_srq_data_transfer_low, 0x16), | |
520 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_arvalid, 0x17), | |
521 | CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall, 0x18), | |
522 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall, 0x19), | |
523 | CCI_EVENT_EXT_ATTR_ENTRY(si_w_data_stall, 0x1A), | |
524 | CCI_EVENT_EXT_ATTR_ENTRY(si_w_resp_stall, 0x1B), | |
525 | CCI_EVENT_EXT_ATTR_ENTRY(si_srq_stall, 0x1C), | |
526 | CCI_EVENT_EXT_ATTR_ENTRY(si_s_data_stall, 0x1D), | |
527 | CCI_EVENT_EXT_ATTR_ENTRY(si_rq_stall_ot_limit, 0x1E), | |
528 | CCI_EVENT_EXT_ATTR_ENTRY(si_r_stall_arbit, 0x1F), | |
529 | ||
530 | /* Master events */ | |
531 | CCI_EVENT_EXT_ATTR_ENTRY(mi_r_data_beat_any, 0x0), | |
532 | CCI_EVENT_EXT_ATTR_ENTRY(mi_w_data_beat_any, 0x1), | |
533 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall, 0x2), | |
534 | CCI_EVENT_EXT_ATTR_ENTRY(mi_r_data_stall, 0x3), | |
535 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall, 0x4), | |
536 | CCI_EVENT_EXT_ATTR_ENTRY(mi_w_data_stall, 0x5), | |
537 | CCI_EVENT_EXT_ATTR_ENTRY(mi_w_resp_stall, 0x6), | |
538 | ||
539 | /* Global events */ | |
540 | CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_0_1, 0x0), | |
541 | CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_2_3, 0x1), | |
542 | CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_4_5, 0x2), | |
543 | CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_6_7, 0x3), | |
544 | CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_0_1, 0x4), | |
545 | CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_2_3, 0x5), | |
546 | CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_4_5, 0x6), | |
547 | CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_6_7, 0x7), | |
548 | CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_back_invalidation, 0x8), | |
549 | CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_alloc_busy, 0x9), | |
550 | CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_tt_full, 0xA), | |
551 | CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_wrq, 0xB), | |
552 | CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_cd_hs, 0xC), | |
553 | CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_rq_stall_addr_hazard, 0xD), | |
554 | CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snopp_rq_stall_tt_full, 0xE), | |
555 | CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_tzmp1_prot, 0xF), | |
5e442eba | 556 | NULL |
e14cfad3 SP |
557 | }; |
558 | ||
559 | static ssize_t cci500_pmu_global_event_show(struct device *dev, | |
560 | struct device_attribute *attr, char *buf) | |
561 | { | |
562 | struct dev_ext_attribute *eattr = container_of(attr, | |
563 | struct dev_ext_attribute, attr); | |
564 | /* Global events have single fixed source code */ | |
565 | return snprintf(buf, PAGE_SIZE, "event=0x%lx,source=0x%x\n", | |
566 | (unsigned long)eattr->var, CCI500_PORT_GLOBAL); | |
567 | } | |
568 | ||
a95791ef SP |
569 | static int cci500_validate_hw_event(struct cci_pmu *cci_pmu, |
570 | unsigned long hw_event) | |
571 | { | |
572 | u32 ev_source = CCI500_PMU_EVENT_SOURCE(hw_event); | |
573 | u32 ev_code = CCI500_PMU_EVENT_CODE(hw_event); | |
574 | int if_type; | |
575 | ||
576 | if (hw_event & ~CCI500_PMU_EVENT_MASK) | |
577 | return -ENOENT; | |
578 | ||
579 | switch (ev_source) { | |
580 | case CCI500_PORT_S0: | |
581 | case CCI500_PORT_S1: | |
582 | case CCI500_PORT_S2: | |
583 | case CCI500_PORT_S3: | |
584 | case CCI500_PORT_S4: | |
585 | case CCI500_PORT_S5: | |
586 | case CCI500_PORT_S6: | |
587 | if_type = CCI_IF_SLAVE; | |
588 | break; | |
589 | case CCI500_PORT_M0: | |
590 | case CCI500_PORT_M1: | |
591 | case CCI500_PORT_M2: | |
592 | case CCI500_PORT_M3: | |
593 | case CCI500_PORT_M4: | |
594 | case CCI500_PORT_M5: | |
595 | if_type = CCI_IF_MASTER; | |
596 | break; | |
597 | case CCI500_PORT_GLOBAL: | |
598 | if_type = CCI_IF_GLOBAL; | |
599 | break; | |
600 | default: | |
601 | return -ENOENT; | |
602 | } | |
603 | ||
604 | if (ev_code >= cci_pmu->model->event_ranges[if_type].min && | |
605 | ev_code <= cci_pmu->model->event_ranges[if_type].max) | |
606 | return hw_event; | |
607 | ||
608 | return -ENOENT; | |
609 | } | |
610 | #endif /* CONFIG_ARM_CCI500_PMU */ | |
611 | ||
c66eea5f SP |
612 | /* |
613 | * Program the CCI PMU counters which have PERF_HES_ARCH set | |
614 | * with the event period and mark them ready before we enable | |
615 | * PMU. | |
616 | */ | |
617 | void cci_pmu_sync_counters(struct cci_pmu *cci_pmu) | |
618 | { | |
619 | int i; | |
620 | struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events; | |
621 | ||
622 | DECLARE_BITMAP(mask, cci_pmu->num_cntrs); | |
623 | ||
624 | bitmap_zero(mask, cci_pmu->num_cntrs); | |
625 | for_each_set_bit(i, cci_pmu->hw_events.used_mask, cci_pmu->num_cntrs) { | |
626 | struct perf_event *event = cci_hw->events[i]; | |
627 | ||
628 | if (WARN_ON(!event)) | |
629 | continue; | |
630 | ||
631 | /* Leave the events which are not counting */ | |
632 | if (event->hw.state & PERF_HES_STOPPED) | |
633 | continue; | |
634 | if (event->hw.state & PERF_HES_ARCH) { | |
635 | set_bit(i, mask); | |
636 | event->hw.state &= ~PERF_HES_ARCH; | |
637 | } | |
638 | } | |
639 | ||
640 | pmu_write_counters(cci_pmu, mask); | |
641 | } | |
642 | ||
a077c52f | 643 | /* Should be called with cci_pmu->hw_events->pmu_lock held */ |
11300027 | 644 | static void __cci_pmu_enable_nosync(struct cci_pmu *cci_pmu) |
a077c52f SP |
645 | { |
646 | u32 val; | |
647 | ||
648 | /* Enable all the PMU counters. */ | |
649 | val = readl_relaxed(cci_ctrl_base + CCI_PMCR) | CCI_PMCR_CEN; | |
650 | writel(val, cci_ctrl_base + CCI_PMCR); | |
651 | } | |
652 | ||
11300027 SP |
653 | /* Should be called with cci_pmu->hw_events->pmu_lock held */ |
654 | static void __cci_pmu_enable_sync(struct cci_pmu *cci_pmu) | |
655 | { | |
656 | cci_pmu_sync_counters(cci_pmu); | |
657 | __cci_pmu_enable_nosync(cci_pmu); | |
658 | } | |
659 | ||
a077c52f SP |
660 | /* Should be called with cci_pmu->hw_events->pmu_lock held */ |
661 | static void __cci_pmu_disable(void) | |
662 | { | |
663 | u32 val; | |
664 | ||
665 | /* Disable all the PMU counters. */ | |
666 | val = readl_relaxed(cci_ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN; | |
667 | writel(val, cci_ctrl_base + CCI_PMCR); | |
668 | } | |
669 | ||
e14cfad3 SP |
670 | static ssize_t cci_pmu_format_show(struct device *dev, |
671 | struct device_attribute *attr, char *buf) | |
672 | { | |
673 | struct dev_ext_attribute *eattr = container_of(attr, | |
674 | struct dev_ext_attribute, attr); | |
675 | return snprintf(buf, PAGE_SIZE, "%s\n", (char *)eattr->var); | |
676 | } | |
677 | ||
678 | static ssize_t cci_pmu_event_show(struct device *dev, | |
679 | struct device_attribute *attr, char *buf) | |
680 | { | |
681 | struct dev_ext_attribute *eattr = container_of(attr, | |
682 | struct dev_ext_attribute, attr); | |
683 | /* source parameter is mandatory for normal PMU events */ | |
684 | return snprintf(buf, PAGE_SIZE, "source=?,event=0x%lx\n", | |
685 | (unsigned long)eattr->var); | |
686 | } | |
687 | ||
c6f85cb4 | 688 | static int pmu_is_valid_counter(struct cci_pmu *cci_pmu, int idx) |
b91c8f28 | 689 | { |
ab5b316d | 690 | return 0 <= idx && idx <= CCI_PMU_CNTR_LAST(cci_pmu); |
b91c8f28 PA |
691 | } |
692 | ||
a1a076d7 | 693 | static u32 pmu_read_register(struct cci_pmu *cci_pmu, int idx, unsigned int offset) |
b91c8f28 | 694 | { |
ab5b316d SP |
695 | return readl_relaxed(cci_pmu->base + |
696 | CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset); | |
b91c8f28 PA |
697 | } |
698 | ||
a1a076d7 SP |
699 | static void pmu_write_register(struct cci_pmu *cci_pmu, u32 value, |
700 | int idx, unsigned int offset) | |
b91c8f28 | 701 | { |
a1a076d7 | 702 | return writel_relaxed(value, cci_pmu->base + |
ab5b316d | 703 | CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset); |
b91c8f28 PA |
704 | } |
705 | ||
a1a076d7 | 706 | static void pmu_disable_counter(struct cci_pmu *cci_pmu, int idx) |
b91c8f28 | 707 | { |
a1a076d7 | 708 | pmu_write_register(cci_pmu, 0, idx, CCI_PMU_CNTR_CTRL); |
b91c8f28 PA |
709 | } |
710 | ||
a1a076d7 | 711 | static void pmu_enable_counter(struct cci_pmu *cci_pmu, int idx) |
b91c8f28 | 712 | { |
a1a076d7 | 713 | pmu_write_register(cci_pmu, 1, idx, CCI_PMU_CNTR_CTRL); |
b91c8f28 PA |
714 | } |
715 | ||
1ce6311b SP |
716 | static bool __maybe_unused |
717 | pmu_counter_is_enabled(struct cci_pmu *cci_pmu, int idx) | |
718 | { | |
719 | return (pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR_CTRL) & 0x1) != 0; | |
720 | } | |
721 | ||
a1a076d7 | 722 | static void pmu_set_event(struct cci_pmu *cci_pmu, int idx, unsigned long event) |
b91c8f28 | 723 | { |
a1a076d7 | 724 | pmu_write_register(cci_pmu, event, idx, CCI_PMU_EVT_SEL); |
b91c8f28 PA |
725 | } |
726 | ||
cea16f8b SP |
727 | /* |
728 | * For all counters on the CCI-PMU, disable any 'enabled' counters, | |
729 | * saving the changed counters in the mask, so that we can restore | |
730 | * it later using pmu_restore_counters. The mask is private to the | |
731 | * caller. We cannot rely on the used_mask maintained by the CCI_PMU | |
732 | * as it only tells us if the counter is assigned to perf_event or not. | |
733 | * The state of the perf_event cannot be locked by the PMU layer, hence | |
734 | * we check the individual counter status (which can be locked by | |
735 | * cci_pm->hw_events->pmu_lock). | |
736 | * | |
737 | * @mask should be initialised to empty by the caller. | |
738 | */ | |
739 | static void __maybe_unused | |
740 | pmu_save_counters(struct cci_pmu *cci_pmu, unsigned long *mask) | |
741 | { | |
742 | int i; | |
743 | ||
744 | for (i = 0; i < cci_pmu->num_cntrs; i++) { | |
745 | if (pmu_counter_is_enabled(cci_pmu, i)) { | |
746 | set_bit(i, mask); | |
747 | pmu_disable_counter(cci_pmu, i); | |
748 | } | |
749 | } | |
750 | } | |
751 | ||
752 | /* | |
753 | * Restore the status of the counters. Reversal of the pmu_save_counters(). | |
754 | * For each counter set in the mask, enable the counter back. | |
755 | */ | |
756 | static void __maybe_unused | |
757 | pmu_restore_counters(struct cci_pmu *cci_pmu, unsigned long *mask) | |
758 | { | |
759 | int i; | |
760 | ||
761 | for_each_set_bit(i, mask, cci_pmu->num_cntrs) | |
762 | pmu_enable_counter(cci_pmu, i); | |
763 | } | |
764 | ||
ab5b316d SP |
765 | /* |
766 | * Returns the number of programmable counters actually implemented | |
767 | * by the cci | |
768 | */ | |
b91c8f28 PA |
769 | static u32 pmu_get_max_counters(void) |
770 | { | |
ab5b316d SP |
771 | return (readl_relaxed(cci_ctrl_base + CCI_PMCR) & |
772 | CCI_PMCR_NCNT_MASK) >> CCI_PMCR_NCNT_SHIFT; | |
b91c8f28 PA |
773 | } |
774 | ||
c6f85cb4 | 775 | static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *event) |
b91c8f28 | 776 | { |
c6f85cb4 | 777 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); |
31216290 | 778 | unsigned long cci_event = event->hw.config_base; |
b91c8f28 PA |
779 | int idx; |
780 | ||
31216290 SP |
781 | if (cci_pmu->model->get_event_idx) |
782 | return cci_pmu->model->get_event_idx(cci_pmu, hw, cci_event); | |
b91c8f28 | 783 | |
31216290 SP |
784 | /* Generic code to find an unused idx from the mask */ |
785 | for(idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) | |
b91c8f28 PA |
786 | if (!test_and_set_bit(idx, hw->used_mask)) |
787 | return idx; | |
788 | ||
789 | /* No counters available */ | |
790 | return -EAGAIN; | |
791 | } | |
792 | ||
793 | static int pmu_map_event(struct perf_event *event) | |
794 | { | |
31216290 | 795 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); |
b91c8f28 | 796 | |
31216290 SP |
797 | if (event->attr.type < PERF_TYPE_MAX || |
798 | !cci_pmu->model->validate_hw_event) | |
b91c8f28 PA |
799 | return -ENOENT; |
800 | ||
31216290 | 801 | return cci_pmu->model->validate_hw_event(cci_pmu, event->attr.config); |
b91c8f28 PA |
802 | } |
803 | ||
c6f85cb4 | 804 | static int pmu_request_irq(struct cci_pmu *cci_pmu, irq_handler_t handler) |
b91c8f28 PA |
805 | { |
806 | int i; | |
807 | struct platform_device *pmu_device = cci_pmu->plat_device; | |
808 | ||
809 | if (unlikely(!pmu_device)) | |
810 | return -ENODEV; | |
811 | ||
a1a076d7 | 812 | if (cci_pmu->nr_irqs < 1) { |
b91c8f28 PA |
813 | dev_err(&pmu_device->dev, "no irqs for CCI PMUs defined\n"); |
814 | return -ENODEV; | |
815 | } | |
816 | ||
817 | /* | |
818 | * Register all available CCI PMU interrupts. In the interrupt handler | |
819 | * we iterate over the counters checking for interrupt source (the | |
820 | * overflowing counter) and clear it. | |
821 | * | |
822 | * This should allow handling of non-unique interrupt for the counters. | |
823 | */ | |
a1a076d7 SP |
824 | for (i = 0; i < cci_pmu->nr_irqs; i++) { |
825 | int err = request_irq(cci_pmu->irqs[i], handler, IRQF_SHARED, | |
b91c8f28 PA |
826 | "arm-cci-pmu", cci_pmu); |
827 | if (err) { | |
828 | dev_err(&pmu_device->dev, "unable to request IRQ%d for ARM CCI PMU counters\n", | |
a1a076d7 | 829 | cci_pmu->irqs[i]); |
b91c8f28 PA |
830 | return err; |
831 | } | |
832 | ||
a1a076d7 | 833 | set_bit(i, &cci_pmu->active_irqs); |
b91c8f28 PA |
834 | } |
835 | ||
836 | return 0; | |
837 | } | |
838 | ||
c6f85cb4 MR |
839 | static void pmu_free_irq(struct cci_pmu *cci_pmu) |
840 | { | |
841 | int i; | |
842 | ||
a1a076d7 SP |
843 | for (i = 0; i < cci_pmu->nr_irqs; i++) { |
844 | if (!test_and_clear_bit(i, &cci_pmu->active_irqs)) | |
c6f85cb4 MR |
845 | continue; |
846 | ||
a1a076d7 | 847 | free_irq(cci_pmu->irqs[i], cci_pmu); |
c6f85cb4 MR |
848 | } |
849 | } | |
850 | ||
851 | static u32 pmu_read_counter(struct perf_event *event) | |
852 | { | |
853 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
854 | struct hw_perf_event *hw_counter = &event->hw; | |
855 | int idx = hw_counter->idx; | |
856 | u32 value; | |
857 | ||
858 | if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { | |
859 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); | |
860 | return 0; | |
861 | } | |
a1a076d7 | 862 | value = pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR); |
c6f85cb4 MR |
863 | |
864 | return value; | |
865 | } | |
866 | ||
c8bc2b11 | 867 | static void pmu_write_counter(struct cci_pmu *cci_pmu, u32 value, int idx) |
c6f85cb4 | 868 | { |
c8bc2b11 | 869 | pmu_write_register(cci_pmu, value, idx, CCI_PMU_CNTR); |
c6f85cb4 MR |
870 | } |
871 | ||
fff3f1a0 | 872 | static void __pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask) |
a53eb5c6 SP |
873 | { |
874 | int i; | |
875 | struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events; | |
876 | ||
877 | for_each_set_bit(i, mask, cci_pmu->num_cntrs) { | |
878 | struct perf_event *event = cci_hw->events[i]; | |
879 | ||
880 | if (WARN_ON(!event)) | |
881 | continue; | |
c8bc2b11 | 882 | pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i); |
a53eb5c6 SP |
883 | } |
884 | } | |
885 | ||
fff3f1a0 SP |
886 | static void pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask) |
887 | { | |
888 | if (cci_pmu->model->write_counters) | |
889 | cci_pmu->model->write_counters(cci_pmu, mask); | |
890 | else | |
891 | __pmu_write_counters(cci_pmu, mask); | |
892 | } | |
893 | ||
a445fcc9 SP |
894 | #ifdef CONFIG_ARM_CCI500_PMU |
895 | ||
896 | /* | |
897 | * CCI-500 has advanced power saving policies, which could gate the | |
898 | * clocks to the PMU counters, which makes the writes to them ineffective. | |
899 | * The only way to write to those counters is when the global counters | |
900 | * are enabled and the particular counter is enabled. | |
901 | * | |
902 | * So we do the following : | |
903 | * | |
904 | * 1) Disable all the PMU counters, saving their current state | |
905 | * 2) Enable the global PMU profiling, now that all counters are | |
906 | * disabled. | |
907 | * | |
908 | * For each counter to be programmed, repeat steps 3-7: | |
909 | * | |
910 | * 3) Write an invalid event code to the event control register for the | |
911 | counter, so that the counters are not modified. | |
912 | * 4) Enable the counter control for the counter. | |
913 | * 5) Set the counter value | |
914 | * 6) Disable the counter | |
915 | * 7) Restore the event in the target counter | |
916 | * | |
917 | * 8) Disable the global PMU. | |
918 | * 9) Restore the status of the rest of the counters. | |
919 | * | |
920 | * We choose an event which for CCI-500 is guaranteed not to count. | |
921 | * We use the highest possible event code (0x1f) for the master interface 0. | |
922 | */ | |
923 | #define CCI500_INVALID_EVENT ((CCI500_PORT_M0 << CCI500_PMU_EVENT_SOURCE_SHIFT) | \ | |
924 | (CCI500_PMU_EVENT_CODE_MASK << CCI500_PMU_EVENT_CODE_SHIFT)) | |
925 | static void cci500_pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask) | |
926 | { | |
927 | int i; | |
928 | DECLARE_BITMAP(saved_mask, cci_pmu->num_cntrs); | |
929 | ||
930 | bitmap_zero(saved_mask, cci_pmu->num_cntrs); | |
931 | pmu_save_counters(cci_pmu, saved_mask); | |
932 | ||
933 | /* | |
934 | * Now that all the counters are disabled, we can safely turn the PMU on, | |
935 | * without syncing the status of the counters | |
936 | */ | |
937 | __cci_pmu_enable_nosync(cci_pmu); | |
938 | ||
939 | for_each_set_bit(i, mask, cci_pmu->num_cntrs) { | |
940 | struct perf_event *event = cci_pmu->hw_events.events[i]; | |
941 | ||
942 | if (WARN_ON(!event)) | |
943 | continue; | |
944 | ||
945 | pmu_set_event(cci_pmu, i, CCI500_INVALID_EVENT); | |
946 | pmu_enable_counter(cci_pmu, i); | |
947 | pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i); | |
948 | pmu_disable_counter(cci_pmu, i); | |
949 | pmu_set_event(cci_pmu, i, event->hw.config_base); | |
950 | } | |
951 | ||
952 | __cci_pmu_disable(); | |
953 | ||
954 | pmu_restore_counters(cci_pmu, saved_mask); | |
955 | } | |
956 | ||
957 | #endif /* CONFIG_ARM_CCI500_PMU */ | |
958 | ||
c6f85cb4 MR |
959 | static u64 pmu_event_update(struct perf_event *event) |
960 | { | |
961 | struct hw_perf_event *hwc = &event->hw; | |
962 | u64 delta, prev_raw_count, new_raw_count; | |
963 | ||
964 | do { | |
965 | prev_raw_count = local64_read(&hwc->prev_count); | |
966 | new_raw_count = pmu_read_counter(event); | |
967 | } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count, | |
968 | new_raw_count) != prev_raw_count); | |
969 | ||
970 | delta = (new_raw_count - prev_raw_count) & CCI_PMU_CNTR_MASK; | |
971 | ||
972 | local64_add(delta, &event->count); | |
973 | ||
974 | return new_raw_count; | |
975 | } | |
976 | ||
977 | static void pmu_read(struct perf_event *event) | |
978 | { | |
979 | pmu_event_update(event); | |
980 | } | |
981 | ||
982 | void pmu_event_set_period(struct perf_event *event) | |
983 | { | |
984 | struct hw_perf_event *hwc = &event->hw; | |
985 | /* | |
986 | * The CCI PMU counters have a period of 2^32. To account for the | |
987 | * possiblity of extreme interrupt latency we program for a period of | |
988 | * half that. Hopefully we can handle the interrupt before another 2^31 | |
989 | * events occur and the counter overtakes its previous value. | |
990 | */ | |
991 | u64 val = 1ULL << 31; | |
992 | local64_set(&hwc->prev_count, val); | |
c66eea5f SP |
993 | |
994 | /* | |
995 | * CCI PMU uses PERF_HES_ARCH to keep track of the counters, whose | |
996 | * values needs to be sync-ed with the s/w state before the PMU is | |
997 | * enabled. | |
998 | * Mark this counter for sync. | |
999 | */ | |
1000 | hwc->state |= PERF_HES_ARCH; | |
c6f85cb4 MR |
1001 | } |
1002 | ||
b91c8f28 PA |
1003 | static irqreturn_t pmu_handle_irq(int irq_num, void *dev) |
1004 | { | |
1005 | unsigned long flags; | |
c6f85cb4 | 1006 | struct cci_pmu *cci_pmu = dev; |
a1a076d7 | 1007 | struct cci_pmu_hw_events *events = &cci_pmu->hw_events; |
b91c8f28 PA |
1008 | int idx, handled = IRQ_NONE; |
1009 | ||
1010 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | |
c66eea5f SP |
1011 | |
1012 | /* Disable the PMU while we walk through the counters */ | |
1013 | __cci_pmu_disable(); | |
b91c8f28 PA |
1014 | /* |
1015 | * Iterate over counters and update the corresponding perf events. | |
1016 | * This should work regardless of whether we have per-counter overflow | |
1017 | * interrupt or a combined overflow interrupt. | |
1018 | */ | |
31216290 | 1019 | for (idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) { |
b91c8f28 PA |
1020 | struct perf_event *event = events->events[idx]; |
1021 | struct hw_perf_event *hw_counter; | |
1022 | ||
1023 | if (!event) | |
1024 | continue; | |
1025 | ||
1026 | hw_counter = &event->hw; | |
1027 | ||
1028 | /* Did this counter overflow? */ | |
a1a076d7 | 1029 | if (!(pmu_read_register(cci_pmu, idx, CCI_PMU_OVRFLW) & |
fc5130de | 1030 | CCI_PMU_OVRFLW_FLAG)) |
b91c8f28 PA |
1031 | continue; |
1032 | ||
a1a076d7 SP |
1033 | pmu_write_register(cci_pmu, CCI_PMU_OVRFLW_FLAG, idx, |
1034 | CCI_PMU_OVRFLW); | |
b91c8f28 | 1035 | |
c6f85cb4 MR |
1036 | pmu_event_update(event); |
1037 | pmu_event_set_period(event); | |
b91c8f28 | 1038 | handled = IRQ_HANDLED; |
b91c8f28 | 1039 | } |
c66eea5f SP |
1040 | |
1041 | /* Enable the PMU and sync possibly overflowed counters */ | |
11300027 | 1042 | __cci_pmu_enable_sync(cci_pmu); |
b91c8f28 PA |
1043 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
1044 | ||
1045 | return IRQ_RETVAL(handled); | |
1046 | } | |
1047 | ||
c6f85cb4 | 1048 | static int cci_pmu_get_hw(struct cci_pmu *cci_pmu) |
b91c8f28 | 1049 | { |
c6f85cb4 MR |
1050 | int ret = pmu_request_irq(cci_pmu, pmu_handle_irq); |
1051 | if (ret) { | |
1052 | pmu_free_irq(cci_pmu); | |
1053 | return ret; | |
1054 | } | |
1055 | return 0; | |
1056 | } | |
b91c8f28 | 1057 | |
c6f85cb4 MR |
1058 | static void cci_pmu_put_hw(struct cci_pmu *cci_pmu) |
1059 | { | |
1060 | pmu_free_irq(cci_pmu); | |
1061 | } | |
b91c8f28 | 1062 | |
c6f85cb4 MR |
1063 | static void hw_perf_event_destroy(struct perf_event *event) |
1064 | { | |
1065 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
1066 | atomic_t *active_events = &cci_pmu->active_events; | |
1067 | struct mutex *reserve_mutex = &cci_pmu->reserve_mutex; | |
1068 | ||
1069 | if (atomic_dec_and_mutex_lock(active_events, reserve_mutex)) { | |
1070 | cci_pmu_put_hw(cci_pmu); | |
1071 | mutex_unlock(reserve_mutex); | |
b91c8f28 PA |
1072 | } |
1073 | } | |
1074 | ||
c6f85cb4 | 1075 | static void cci_pmu_enable(struct pmu *pmu) |
b91c8f28 | 1076 | { |
c6f85cb4 MR |
1077 | struct cci_pmu *cci_pmu = to_cci_pmu(pmu); |
1078 | struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; | |
ab5b316d | 1079 | int enabled = bitmap_weight(hw_events->used_mask, cci_pmu->num_cntrs); |
b91c8f28 | 1080 | unsigned long flags; |
c6f85cb4 MR |
1081 | |
1082 | if (!enabled) | |
1083 | return; | |
1084 | ||
1085 | raw_spin_lock_irqsave(&hw_events->pmu_lock, flags); | |
11300027 | 1086 | __cci_pmu_enable_sync(cci_pmu); |
c6f85cb4 MR |
1087 | raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); |
1088 | ||
1089 | } | |
1090 | ||
1091 | static void cci_pmu_disable(struct pmu *pmu) | |
1092 | { | |
1093 | struct cci_pmu *cci_pmu = to_cci_pmu(pmu); | |
1094 | struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; | |
1095 | unsigned long flags; | |
c6f85cb4 MR |
1096 | |
1097 | raw_spin_lock_irqsave(&hw_events->pmu_lock, flags); | |
a077c52f | 1098 | __cci_pmu_disable(); |
c6f85cb4 MR |
1099 | raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); |
1100 | } | |
1101 | ||
31216290 SP |
1102 | /* |
1103 | * Check if the idx represents a non-programmable counter. | |
1104 | * All the fixed event counters are mapped before the programmable | |
1105 | * counters. | |
1106 | */ | |
1107 | static bool pmu_fixed_hw_idx(struct cci_pmu *cci_pmu, int idx) | |
1108 | { | |
1109 | return (idx >= 0) && (idx < cci_pmu->model->fixed_hw_cntrs); | |
1110 | } | |
1111 | ||
c6f85cb4 MR |
1112 | static void cci_pmu_start(struct perf_event *event, int pmu_flags) |
1113 | { | |
1114 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
1115 | struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; | |
1116 | struct hw_perf_event *hwc = &event->hw; | |
1117 | int idx = hwc->idx; | |
1118 | unsigned long flags; | |
1119 | ||
1120 | /* | |
1121 | * To handle interrupt latency, we always reprogram the period | |
1122 | * regardlesss of PERF_EF_RELOAD. | |
1123 | */ | |
1124 | if (pmu_flags & PERF_EF_RELOAD) | |
1125 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); | |
1126 | ||
1127 | hwc->state = 0; | |
b91c8f28 PA |
1128 | |
1129 | if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { | |
1130 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); | |
1131 | return; | |
1132 | } | |
1133 | ||
c6f85cb4 | 1134 | raw_spin_lock_irqsave(&hw_events->pmu_lock, flags); |
b91c8f28 | 1135 | |
31216290 SP |
1136 | /* Configure the counter unless you are counting a fixed event */ |
1137 | if (!pmu_fixed_hw_idx(cci_pmu, idx)) | |
a1a076d7 | 1138 | pmu_set_event(cci_pmu, idx, hwc->config_base); |
b91c8f28 | 1139 | |
c6f85cb4 | 1140 | pmu_event_set_period(event); |
a1a076d7 | 1141 | pmu_enable_counter(cci_pmu, idx); |
b91c8f28 | 1142 | |
c6f85cb4 | 1143 | raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); |
b91c8f28 PA |
1144 | } |
1145 | ||
c6f85cb4 | 1146 | static void cci_pmu_stop(struct perf_event *event, int pmu_flags) |
b91c8f28 | 1147 | { |
c6f85cb4 MR |
1148 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); |
1149 | struct hw_perf_event *hwc = &event->hw; | |
1150 | int idx = hwc->idx; | |
1151 | ||
1152 | if (hwc->state & PERF_HES_STOPPED) | |
1153 | return; | |
b91c8f28 PA |
1154 | |
1155 | if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { | |
1156 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); | |
1157 | return; | |
1158 | } | |
1159 | ||
c6f85cb4 MR |
1160 | /* |
1161 | * We always reprogram the counter, so ignore PERF_EF_UPDATE. See | |
1162 | * cci_pmu_start() | |
1163 | */ | |
a1a076d7 | 1164 | pmu_disable_counter(cci_pmu, idx); |
c6f85cb4 MR |
1165 | pmu_event_update(event); |
1166 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; | |
b91c8f28 PA |
1167 | } |
1168 | ||
c6f85cb4 | 1169 | static int cci_pmu_add(struct perf_event *event, int flags) |
b91c8f28 | 1170 | { |
c6f85cb4 MR |
1171 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); |
1172 | struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; | |
1173 | struct hw_perf_event *hwc = &event->hw; | |
1174 | int idx; | |
1175 | int err = 0; | |
b91c8f28 | 1176 | |
c6f85cb4 | 1177 | perf_pmu_disable(event->pmu); |
b91c8f28 | 1178 | |
c6f85cb4 MR |
1179 | /* If we don't have a space for the counter then finish early. */ |
1180 | idx = pmu_get_event_idx(hw_events, event); | |
1181 | if (idx < 0) { | |
1182 | err = idx; | |
1183 | goto out; | |
1184 | } | |
b91c8f28 | 1185 | |
c6f85cb4 MR |
1186 | event->hw.idx = idx; |
1187 | hw_events->events[idx] = event; | |
1188 | ||
1189 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; | |
1190 | if (flags & PERF_EF_START) | |
1191 | cci_pmu_start(event, PERF_EF_RELOAD); | |
1192 | ||
1193 | /* Propagate our changes to the userspace mapping. */ | |
1194 | perf_event_update_userpage(event); | |
1195 | ||
1196 | out: | |
1197 | perf_pmu_enable(event->pmu); | |
1198 | return err; | |
b91c8f28 PA |
1199 | } |
1200 | ||
c6f85cb4 | 1201 | static void cci_pmu_del(struct perf_event *event, int flags) |
b91c8f28 | 1202 | { |
c6f85cb4 MR |
1203 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); |
1204 | struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; | |
1205 | struct hw_perf_event *hwc = &event->hw; | |
1206 | int idx = hwc->idx; | |
b91c8f28 | 1207 | |
c6f85cb4 MR |
1208 | cci_pmu_stop(event, PERF_EF_UPDATE); |
1209 | hw_events->events[idx] = NULL; | |
1210 | clear_bit(idx, hw_events->used_mask); | |
b91c8f28 | 1211 | |
c6f85cb4 MR |
1212 | perf_event_update_userpage(event); |
1213 | } | |
b91c8f28 | 1214 | |
c6f85cb4 | 1215 | static int |
b1862199 SP |
1216 | validate_event(struct pmu *cci_pmu, |
1217 | struct cci_pmu_hw_events *hw_events, | |
1218 | struct perf_event *event) | |
c6f85cb4 MR |
1219 | { |
1220 | if (is_software_event(event)) | |
1221 | return 1; | |
1222 | ||
b1862199 SP |
1223 | /* |
1224 | * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The | |
1225 | * core perf code won't check that the pmu->ctx == leader->ctx | |
1226 | * until after pmu->event_init(event). | |
1227 | */ | |
1228 | if (event->pmu != cci_pmu) | |
1229 | return 0; | |
1230 | ||
c6f85cb4 MR |
1231 | if (event->state < PERF_EVENT_STATE_OFF) |
1232 | return 1; | |
1233 | ||
1234 | if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) | |
1235 | return 1; | |
1236 | ||
1237 | return pmu_get_event_idx(hw_events, event) >= 0; | |
b91c8f28 PA |
1238 | } |
1239 | ||
c6f85cb4 MR |
1240 | static int |
1241 | validate_group(struct perf_event *event) | |
b91c8f28 | 1242 | { |
c6f85cb4 | 1243 | struct perf_event *sibling, *leader = event->group_leader; |
ab5b316d SP |
1244 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); |
1245 | unsigned long mask[BITS_TO_LONGS(cci_pmu->num_cntrs)]; | |
c6f85cb4 MR |
1246 | struct cci_pmu_hw_events fake_pmu = { |
1247 | /* | |
1248 | * Initialise the fake PMU. We only need to populate the | |
1249 | * used_mask for the purposes of validation. | |
1250 | */ | |
ab5b316d | 1251 | .used_mask = mask, |
c6f85cb4 | 1252 | }; |
ab5b316d | 1253 | memset(mask, 0, BITS_TO_LONGS(cci_pmu->num_cntrs) * sizeof(unsigned long)); |
b91c8f28 | 1254 | |
b1862199 | 1255 | if (!validate_event(event->pmu, &fake_pmu, leader)) |
c6f85cb4 MR |
1256 | return -EINVAL; |
1257 | ||
1258 | list_for_each_entry(sibling, &leader->sibling_list, group_entry) { | |
b1862199 | 1259 | if (!validate_event(event->pmu, &fake_pmu, sibling)) |
c6f85cb4 | 1260 | return -EINVAL; |
b91c8f28 | 1261 | } |
b91c8f28 | 1262 | |
b1862199 | 1263 | if (!validate_event(event->pmu, &fake_pmu, event)) |
c6f85cb4 MR |
1264 | return -EINVAL; |
1265 | ||
1266 | return 0; | |
b91c8f28 PA |
1267 | } |
1268 | ||
c6f85cb4 MR |
1269 | static int |
1270 | __hw_perf_event_init(struct perf_event *event) | |
b91c8f28 | 1271 | { |
c6f85cb4 MR |
1272 | struct hw_perf_event *hwc = &event->hw; |
1273 | int mapping; | |
b91c8f28 | 1274 | |
c6f85cb4 MR |
1275 | mapping = pmu_map_event(event); |
1276 | ||
1277 | if (mapping < 0) { | |
1278 | pr_debug("event %x:%llx not supported\n", event->attr.type, | |
1279 | event->attr.config); | |
1280 | return mapping; | |
1281 | } | |
1282 | ||
1283 | /* | |
1284 | * We don't assign an index until we actually place the event onto | |
1285 | * hardware. Use -1 to signify that we haven't decided where to put it | |
1286 | * yet. | |
1287 | */ | |
1288 | hwc->idx = -1; | |
1289 | hwc->config_base = 0; | |
1290 | hwc->config = 0; | |
1291 | hwc->event_base = 0; | |
1292 | ||
1293 | /* | |
1294 | * Store the event encoding into the config_base field. | |
1295 | */ | |
1296 | hwc->config_base |= (unsigned long)mapping; | |
1297 | ||
1298 | /* | |
1299 | * Limit the sample_period to half of the counter width. That way, the | |
1300 | * new counter value is far less likely to overtake the previous one | |
1301 | * unless you have some serious IRQ latency issues. | |
1302 | */ | |
1303 | hwc->sample_period = CCI_PMU_CNTR_MASK >> 1; | |
1304 | hwc->last_period = hwc->sample_period; | |
1305 | local64_set(&hwc->period_left, hwc->sample_period); | |
1306 | ||
1307 | if (event->group_leader != event) { | |
1308 | if (validate_group(event) != 0) | |
1309 | return -EINVAL; | |
1310 | } | |
1311 | ||
1312 | return 0; | |
1313 | } | |
1314 | ||
1315 | static int cci_pmu_event_init(struct perf_event *event) | |
1316 | { | |
1317 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
1318 | atomic_t *active_events = &cci_pmu->active_events; | |
1319 | int err = 0; | |
1320 | int cpu; | |
1321 | ||
1322 | if (event->attr.type != event->pmu->type) | |
1323 | return -ENOENT; | |
1324 | ||
1325 | /* Shared by all CPUs, no meaningful state to sample */ | |
1326 | if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) | |
1327 | return -EOPNOTSUPP; | |
1328 | ||
1329 | /* We have no filtering of any kind */ | |
1330 | if (event->attr.exclude_user || | |
1331 | event->attr.exclude_kernel || | |
1332 | event->attr.exclude_hv || | |
1333 | event->attr.exclude_idle || | |
1334 | event->attr.exclude_host || | |
1335 | event->attr.exclude_guest) | |
1336 | return -EINVAL; | |
1337 | ||
1338 | /* | |
1339 | * Following the example set by other "uncore" PMUs, we accept any CPU | |
1340 | * and rewrite its affinity dynamically rather than having perf core | |
1341 | * handle cpu == -1 and pid == -1 for this case. | |
1342 | * | |
1343 | * The perf core will pin online CPUs for the duration of this call and | |
1344 | * the event being installed into its context, so the PMU's CPU can't | |
1345 | * change under our feet. | |
1346 | */ | |
1347 | cpu = cpumask_first(&cci_pmu->cpus); | |
1348 | if (event->cpu < 0 || cpu < 0) | |
1349 | return -EINVAL; | |
1350 | event->cpu = cpu; | |
1351 | ||
1352 | event->destroy = hw_perf_event_destroy; | |
1353 | if (!atomic_inc_not_zero(active_events)) { | |
1354 | mutex_lock(&cci_pmu->reserve_mutex); | |
1355 | if (atomic_read(active_events) == 0) | |
1356 | err = cci_pmu_get_hw(cci_pmu); | |
1357 | if (!err) | |
1358 | atomic_inc(active_events); | |
1359 | mutex_unlock(&cci_pmu->reserve_mutex); | |
1360 | } | |
1361 | if (err) | |
1362 | return err; | |
1363 | ||
1364 | err = __hw_perf_event_init(event); | |
1365 | if (err) | |
1366 | hw_perf_event_destroy(event); | |
1367 | ||
1368 | return err; | |
b91c8f28 PA |
1369 | } |
1370 | ||
a1a076d7 | 1371 | static ssize_t pmu_cpumask_attr_show(struct device *dev, |
c6f85cb4 MR |
1372 | struct device_attribute *attr, char *buf) |
1373 | { | |
5e442eba MR |
1374 | struct pmu *pmu = dev_get_drvdata(dev); |
1375 | struct cci_pmu *cci_pmu = to_cci_pmu(pmu); | |
a1a076d7 | 1376 | |
660e5ec0 | 1377 | int n = scnprintf(buf, PAGE_SIZE - 1, "%*pbl", |
a1a076d7 | 1378 | cpumask_pr_args(&cci_pmu->cpus)); |
c6f85cb4 MR |
1379 | buf[n++] = '\n'; |
1380 | buf[n] = '\0'; | |
1381 | return n; | |
1382 | } | |
1383 | ||
5e442eba MR |
1384 | static struct device_attribute pmu_cpumask_attr = |
1385 | __ATTR(cpumask, S_IRUGO, pmu_cpumask_attr_show, NULL); | |
c6f85cb4 MR |
1386 | |
1387 | static struct attribute *pmu_attrs[] = { | |
5e442eba | 1388 | &pmu_cpumask_attr.attr, |
c6f85cb4 MR |
1389 | NULL, |
1390 | }; | |
1391 | ||
1392 | static struct attribute_group pmu_attr_group = { | |
1393 | .attrs = pmu_attrs, | |
1394 | }; | |
1395 | ||
e14cfad3 SP |
1396 | static struct attribute_group pmu_format_attr_group = { |
1397 | .name = "format", | |
1398 | .attrs = NULL, /* Filled in cci_pmu_init_attrs */ | |
1399 | }; | |
1400 | ||
1401 | static struct attribute_group pmu_event_attr_group = { | |
1402 | .name = "events", | |
1403 | .attrs = NULL, /* Filled in cci_pmu_init_attrs */ | |
1404 | }; | |
1405 | ||
c6f85cb4 MR |
1406 | static const struct attribute_group *pmu_attr_groups[] = { |
1407 | &pmu_attr_group, | |
e14cfad3 SP |
1408 | &pmu_format_attr_group, |
1409 | &pmu_event_attr_group, | |
c6f85cb4 MR |
1410 | NULL |
1411 | }; | |
1412 | ||
1413 | static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev) | |
1414 | { | |
5e442eba MR |
1415 | const struct cci_pmu_model *model = cci_pmu->model; |
1416 | char *name = model->name; | |
ab5b316d | 1417 | u32 num_cntrs; |
e14cfad3 | 1418 | |
5e442eba MR |
1419 | pmu_event_attr_group.attrs = model->event_attrs; |
1420 | pmu_format_attr_group.attrs = model->format_attrs; | |
a1a076d7 | 1421 | |
c6f85cb4 | 1422 | cci_pmu->pmu = (struct pmu) { |
fc17c839 | 1423 | .name = cci_pmu->model->name, |
c6f85cb4 MR |
1424 | .task_ctx_nr = perf_invalid_context, |
1425 | .pmu_enable = cci_pmu_enable, | |
1426 | .pmu_disable = cci_pmu_disable, | |
1427 | .event_init = cci_pmu_event_init, | |
1428 | .add = cci_pmu_add, | |
1429 | .del = cci_pmu_del, | |
1430 | .start = cci_pmu_start, | |
1431 | .stop = cci_pmu_stop, | |
1432 | .read = pmu_read, | |
1433 | .attr_groups = pmu_attr_groups, | |
b91c8f28 PA |
1434 | }; |
1435 | ||
1436 | cci_pmu->plat_device = pdev; | |
ab5b316d SP |
1437 | num_cntrs = pmu_get_max_counters(); |
1438 | if (num_cntrs > cci_pmu->model->num_hw_cntrs) { | |
1439 | dev_warn(&pdev->dev, | |
1440 | "PMU implements more counters(%d) than supported by" | |
1441 | " the model(%d), truncated.", | |
1442 | num_cntrs, cci_pmu->model->num_hw_cntrs); | |
1443 | num_cntrs = cci_pmu->model->num_hw_cntrs; | |
1444 | } | |
1445 | cci_pmu->num_cntrs = num_cntrs + cci_pmu->model->fixed_hw_cntrs; | |
b91c8f28 | 1446 | |
c6f85cb4 | 1447 | return perf_pmu_register(&cci_pmu->pmu, name, -1); |
b91c8f28 PA |
1448 | } |
1449 | ||
c6f85cb4 MR |
1450 | static int cci_pmu_cpu_notifier(struct notifier_block *self, |
1451 | unsigned long action, void *hcpu) | |
1452 | { | |
a1a076d7 SP |
1453 | struct cci_pmu *cci_pmu = container_of(self, |
1454 | struct cci_pmu, cpu_nb); | |
c6f85cb4 MR |
1455 | unsigned int cpu = (long)hcpu; |
1456 | unsigned int target; | |
1457 | ||
1458 | switch (action & ~CPU_TASKS_FROZEN) { | |
1459 | case CPU_DOWN_PREPARE: | |
a1a076d7 | 1460 | if (!cpumask_test_and_clear_cpu(cpu, &cci_pmu->cpus)) |
c6f85cb4 MR |
1461 | break; |
1462 | target = cpumask_any_but(cpu_online_mask, cpu); | |
0f17380c | 1463 | if (target >= nr_cpu_ids) // UP, last CPU |
c6f85cb4 MR |
1464 | break; |
1465 | /* | |
1466 | * TODO: migrate context once core races on event->ctx have | |
1467 | * been fixed. | |
1468 | */ | |
a1a076d7 | 1469 | cpumask_set_cpu(target, &cci_pmu->cpus); |
c6f85cb4 MR |
1470 | default: |
1471 | break; | |
1472 | } | |
1473 | ||
1474 | return NOTIFY_OK; | |
1475 | } | |
1476 | ||
fc17c839 | 1477 | static struct cci_pmu_model cci_pmu_models[] = { |
f4d58938 SP |
1478 | #ifdef CONFIG_ARM_CCI400_PMU |
1479 | [CCI400_R0] = { | |
fc17c839 | 1480 | .name = "CCI_400", |
ab5b316d SP |
1481 | .fixed_hw_cntrs = 1, /* Cycle counter */ |
1482 | .num_hw_cntrs = 4, | |
1483 | .cntr_size = SZ_4K, | |
e14cfad3 | 1484 | .format_attrs = cci400_pmu_format_attrs, |
e14cfad3 | 1485 | .event_attrs = cci400_r0_pmu_event_attrs, |
fc17c839 SP |
1486 | .event_ranges = { |
1487 | [CCI_IF_SLAVE] = { | |
f4d58938 SP |
1488 | CCI400_R0_SLAVE_PORT_MIN_EV, |
1489 | CCI400_R0_SLAVE_PORT_MAX_EV, | |
fc17c839 SP |
1490 | }, |
1491 | [CCI_IF_MASTER] = { | |
f4d58938 SP |
1492 | CCI400_R0_MASTER_PORT_MIN_EV, |
1493 | CCI400_R0_MASTER_PORT_MAX_EV, | |
fc17c839 SP |
1494 | }, |
1495 | }, | |
31216290 SP |
1496 | .validate_hw_event = cci400_validate_hw_event, |
1497 | .get_event_idx = cci400_get_event_idx, | |
fc17c839 | 1498 | }, |
f4d58938 | 1499 | [CCI400_R1] = { |
fc17c839 | 1500 | .name = "CCI_400_r1", |
ab5b316d SP |
1501 | .fixed_hw_cntrs = 1, /* Cycle counter */ |
1502 | .num_hw_cntrs = 4, | |
1503 | .cntr_size = SZ_4K, | |
e14cfad3 | 1504 | .format_attrs = cci400_pmu_format_attrs, |
e14cfad3 | 1505 | .event_attrs = cci400_r1_pmu_event_attrs, |
fc17c839 SP |
1506 | .event_ranges = { |
1507 | [CCI_IF_SLAVE] = { | |
f4d58938 SP |
1508 | CCI400_R1_SLAVE_PORT_MIN_EV, |
1509 | CCI400_R1_SLAVE_PORT_MAX_EV, | |
fc17c839 SP |
1510 | }, |
1511 | [CCI_IF_MASTER] = { | |
f4d58938 SP |
1512 | CCI400_R1_MASTER_PORT_MIN_EV, |
1513 | CCI400_R1_MASTER_PORT_MAX_EV, | |
fc17c839 SP |
1514 | }, |
1515 | }, | |
31216290 SP |
1516 | .validate_hw_event = cci400_validate_hw_event, |
1517 | .get_event_idx = cci400_get_event_idx, | |
fc17c839 | 1518 | }, |
f4d58938 | 1519 | #endif |
a95791ef SP |
1520 | #ifdef CONFIG_ARM_CCI500_PMU |
1521 | [CCI500_R0] = { | |
1522 | .name = "CCI_500", | |
1523 | .fixed_hw_cntrs = 0, | |
1524 | .num_hw_cntrs = 8, | |
1525 | .cntr_size = SZ_64K, | |
e14cfad3 | 1526 | .format_attrs = cci500_pmu_format_attrs, |
e14cfad3 | 1527 | .event_attrs = cci500_pmu_event_attrs, |
a95791ef SP |
1528 | .event_ranges = { |
1529 | [CCI_IF_SLAVE] = { | |
1530 | CCI500_SLAVE_PORT_MIN_EV, | |
1531 | CCI500_SLAVE_PORT_MAX_EV, | |
1532 | }, | |
1533 | [CCI_IF_MASTER] = { | |
1534 | CCI500_MASTER_PORT_MIN_EV, | |
1535 | CCI500_MASTER_PORT_MAX_EV, | |
1536 | }, | |
1537 | [CCI_IF_GLOBAL] = { | |
1538 | CCI500_GLOBAL_PORT_MIN_EV, | |
1539 | CCI500_GLOBAL_PORT_MAX_EV, | |
1540 | }, | |
1541 | }, | |
1542 | .validate_hw_event = cci500_validate_hw_event, | |
a445fcc9 | 1543 | .write_counters = cci500_pmu_write_counters, |
a95791ef SP |
1544 | }, |
1545 | #endif | |
fc17c839 SP |
1546 | }; |
1547 | ||
b91c8f28 | 1548 | static const struct of_device_id arm_cci_pmu_matches[] = { |
f4d58938 | 1549 | #ifdef CONFIG_ARM_CCI400_PMU |
b91c8f28 PA |
1550 | { |
1551 | .compatible = "arm,cci-400-pmu", | |
772742a6 SP |
1552 | .data = NULL, |
1553 | }, | |
1554 | { | |
1555 | .compatible = "arm,cci-400-pmu,r0", | |
f4d58938 | 1556 | .data = &cci_pmu_models[CCI400_R0], |
772742a6 SP |
1557 | }, |
1558 | { | |
1559 | .compatible = "arm,cci-400-pmu,r1", | |
f4d58938 | 1560 | .data = &cci_pmu_models[CCI400_R1], |
b91c8f28 | 1561 | }, |
a95791ef SP |
1562 | #endif |
1563 | #ifdef CONFIG_ARM_CCI500_PMU | |
1564 | { | |
1565 | .compatible = "arm,cci-500-pmu,r0", | |
1566 | .data = &cci_pmu_models[CCI500_R0], | |
1567 | }, | |
f4d58938 | 1568 | #endif |
b91c8f28 PA |
1569 | {}, |
1570 | }; | |
1571 | ||
fc17c839 SP |
1572 | static inline const struct cci_pmu_model *get_cci_model(struct platform_device *pdev) |
1573 | { | |
1574 | const struct of_device_id *match = of_match_node(arm_cci_pmu_matches, | |
1575 | pdev->dev.of_node); | |
1576 | if (!match) | |
1577 | return NULL; | |
772742a6 SP |
1578 | if (match->data) |
1579 | return match->data; | |
fc17c839 | 1580 | |
772742a6 SP |
1581 | dev_warn(&pdev->dev, "DEPRECATED compatible property," |
1582 | "requires secure access to CCI registers"); | |
fc17c839 SP |
1583 | return probe_cci_model(pdev); |
1584 | } | |
1585 | ||
f6b9e83c SP |
1586 | static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs) |
1587 | { | |
1588 | int i; | |
1589 | ||
1590 | for (i = 0; i < nr_irqs; i++) | |
1591 | if (irq == irqs[i]) | |
1592 | return true; | |
1593 | ||
1594 | return false; | |
1595 | } | |
1596 | ||
ab5b316d | 1597 | static struct cci_pmu *cci_pmu_alloc(struct platform_device *pdev) |
b91c8f28 | 1598 | { |
a1a076d7 | 1599 | struct cci_pmu *cci_pmu; |
fc17c839 SP |
1600 | const struct cci_pmu_model *model; |
1601 | ||
ab5b316d SP |
1602 | /* |
1603 | * All allocations are devm_* hence we don't have to free | |
1604 | * them explicitly on an error, as it would end up in driver | |
1605 | * detach. | |
1606 | */ | |
fc17c839 SP |
1607 | model = get_cci_model(pdev); |
1608 | if (!model) { | |
1609 | dev_warn(&pdev->dev, "CCI PMU version not supported\n"); | |
ab5b316d | 1610 | return ERR_PTR(-ENODEV); |
fc17c839 | 1611 | } |
b91c8f28 | 1612 | |
a1a076d7 SP |
1613 | cci_pmu = devm_kzalloc(&pdev->dev, sizeof(*cci_pmu), GFP_KERNEL); |
1614 | if (!cci_pmu) | |
ab5b316d | 1615 | return ERR_PTR(-ENOMEM); |
b91c8f28 | 1616 | |
a1a076d7 | 1617 | cci_pmu->model = model; |
ab5b316d SP |
1618 | cci_pmu->irqs = devm_kcalloc(&pdev->dev, CCI_PMU_MAX_HW_CNTRS(model), |
1619 | sizeof(*cci_pmu->irqs), GFP_KERNEL); | |
1620 | if (!cci_pmu->irqs) | |
1621 | return ERR_PTR(-ENOMEM); | |
1622 | cci_pmu->hw_events.events = devm_kcalloc(&pdev->dev, | |
1623 | CCI_PMU_MAX_HW_CNTRS(model), | |
1624 | sizeof(*cci_pmu->hw_events.events), | |
1625 | GFP_KERNEL); | |
1626 | if (!cci_pmu->hw_events.events) | |
1627 | return ERR_PTR(-ENOMEM); | |
1628 | cci_pmu->hw_events.used_mask = devm_kcalloc(&pdev->dev, | |
1629 | BITS_TO_LONGS(CCI_PMU_MAX_HW_CNTRS(model)), | |
1630 | sizeof(*cci_pmu->hw_events.used_mask), | |
1631 | GFP_KERNEL); | |
1632 | if (!cci_pmu->hw_events.used_mask) | |
1633 | return ERR_PTR(-ENOMEM); | |
1634 | ||
1635 | return cci_pmu; | |
1636 | } | |
1637 | ||
1638 | ||
1639 | static int cci_pmu_probe(struct platform_device *pdev) | |
1640 | { | |
1641 | struct resource *res; | |
1642 | struct cci_pmu *cci_pmu; | |
1643 | int i, ret, irq; | |
1644 | ||
1645 | cci_pmu = cci_pmu_alloc(pdev); | |
1646 | if (IS_ERR(cci_pmu)) | |
1647 | return PTR_ERR(cci_pmu); | |
1648 | ||
b91c8f28 | 1649 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
a1a076d7 SP |
1650 | cci_pmu->base = devm_ioremap_resource(&pdev->dev, res); |
1651 | if (IS_ERR(cci_pmu->base)) | |
fee4f2c6 | 1652 | return -ENOMEM; |
b91c8f28 PA |
1653 | |
1654 | /* | |
ab5b316d | 1655 | * CCI PMU has one overflow interrupt per counter; but some may be tied |
b91c8f28 PA |
1656 | * together to a common interrupt. |
1657 | */ | |
a1a076d7 | 1658 | cci_pmu->nr_irqs = 0; |
ab5b316d | 1659 | for (i = 0; i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model); i++) { |
b91c8f28 PA |
1660 | irq = platform_get_irq(pdev, i); |
1661 | if (irq < 0) | |
1662 | break; | |
1663 | ||
a1a076d7 | 1664 | if (is_duplicate_irq(irq, cci_pmu->irqs, cci_pmu->nr_irqs)) |
b91c8f28 PA |
1665 | continue; |
1666 | ||
a1a076d7 | 1667 | cci_pmu->irqs[cci_pmu->nr_irqs++] = irq; |
b91c8f28 PA |
1668 | } |
1669 | ||
1670 | /* | |
1671 | * Ensure that the device tree has as many interrupts as the number | |
1672 | * of counters. | |
1673 | */ | |
ab5b316d | 1674 | if (i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model)) { |
b91c8f28 | 1675 | dev_warn(&pdev->dev, "In-correct number of interrupts: %d, should be %d\n", |
ab5b316d | 1676 | i, CCI_PMU_MAX_HW_CNTRS(cci_pmu->model)); |
fee4f2c6 | 1677 | return -EINVAL; |
b91c8f28 PA |
1678 | } |
1679 | ||
a1a076d7 SP |
1680 | raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock); |
1681 | mutex_init(&cci_pmu->reserve_mutex); | |
1682 | atomic_set(&cci_pmu->active_events, 0); | |
1683 | cpumask_set_cpu(smp_processor_id(), &cci_pmu->cpus); | |
c6f85cb4 | 1684 | |
a1a076d7 SP |
1685 | cci_pmu->cpu_nb = (struct notifier_block) { |
1686 | .notifier_call = cci_pmu_cpu_notifier, | |
1687 | /* | |
1688 | * to migrate uncore events, our notifier should be executed | |
1689 | * before perf core's notifier. | |
1690 | */ | |
1691 | .priority = CPU_PRI_PERF + 1, | |
1692 | }; | |
1693 | ||
1694 | ret = register_cpu_notifier(&cci_pmu->cpu_nb); | |
c6f85cb4 MR |
1695 | if (ret) |
1696 | return ret; | |
b91c8f28 | 1697 | |
a1a076d7 SP |
1698 | ret = cci_pmu_init(cci_pmu, pdev); |
1699 | if (ret) { | |
1700 | unregister_cpu_notifier(&cci_pmu->cpu_nb); | |
fee4f2c6 | 1701 | return ret; |
a1a076d7 | 1702 | } |
b91c8f28 | 1703 | |
a1a076d7 | 1704 | pr_info("ARM %s PMU driver probed", cci_pmu->model->name); |
b91c8f28 | 1705 | return 0; |
b91c8f28 PA |
1706 | } |
1707 | ||
1708 | static int cci_platform_probe(struct platform_device *pdev) | |
1709 | { | |
1710 | if (!cci_probed()) | |
1711 | return -ENODEV; | |
1712 | ||
1713 | return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); | |
1714 | } | |
1715 | ||
f6b9e83c SP |
1716 | static struct platform_driver cci_pmu_driver = { |
1717 | .driver = { | |
1718 | .name = DRIVER_NAME_PMU, | |
1719 | .of_match_table = arm_cci_pmu_matches, | |
1720 | }, | |
1721 | .probe = cci_pmu_probe, | |
1722 | }; | |
1723 | ||
1724 | static struct platform_driver cci_platform_driver = { | |
1725 | .driver = { | |
1726 | .name = DRIVER_NAME, | |
1727 | .of_match_table = arm_cci_matches, | |
1728 | }, | |
1729 | .probe = cci_platform_probe, | |
1730 | }; | |
1731 | ||
1732 | static int __init cci_platform_init(void) | |
1733 | { | |
1734 | int ret; | |
1735 | ||
1736 | ret = platform_driver_register(&cci_pmu_driver); | |
1737 | if (ret) | |
1738 | return ret; | |
1739 | ||
1740 | return platform_driver_register(&cci_platform_driver); | |
1741 | } | |
1742 | ||
f4d58938 | 1743 | #else /* !CONFIG_ARM_CCI_PMU */ |
f6b9e83c SP |
1744 | |
1745 | static int __init cci_platform_init(void) | |
1746 | { | |
1747 | return 0; | |
1748 | } | |
1749 | ||
f4d58938 | 1750 | #endif /* CONFIG_ARM_CCI_PMU */ |
ee8e5d5f SP |
1751 | |
1752 | #ifdef CONFIG_ARM_CCI400_PORT_CTRL | |
b91c8f28 | 1753 | |
f6b9e83c SP |
1754 | #define CCI_PORT_CTRL 0x0 |
1755 | #define CCI_CTRL_STATUS 0xc | |
1756 | ||
1757 | #define CCI_ENABLE_SNOOP_REQ 0x1 | |
1758 | #define CCI_ENABLE_DVM_REQ 0x2 | |
1759 | #define CCI_ENABLE_REQ (CCI_ENABLE_SNOOP_REQ | CCI_ENABLE_DVM_REQ) | |
1760 | ||
1761 | enum cci_ace_port_type { | |
1762 | ACE_INVALID_PORT = 0x0, | |
1763 | ACE_PORT, | |
1764 | ACE_LITE_PORT, | |
1765 | }; | |
1766 | ||
1767 | struct cci_ace_port { | |
1768 | void __iomem *base; | |
1769 | unsigned long phys; | |
1770 | enum cci_ace_port_type type; | |
1771 | struct device_node *dn; | |
1772 | }; | |
1773 | ||
1774 | static struct cci_ace_port *ports; | |
1775 | static unsigned int nb_cci_ports; | |
1776 | ||
ed69bdd8 LP |
1777 | struct cpu_port { |
1778 | u64 mpidr; | |
1779 | u32 port; | |
1780 | }; | |
62158f81 | 1781 | |
ed69bdd8 LP |
1782 | /* |
1783 | * Use the port MSB as valid flag, shift can be made dynamic | |
1784 | * by computing number of bits required for port indexes. | |
1785 | * Code disabling CCI cpu ports runs with D-cache invalidated | |
1786 | * and SCTLR bit clear so data accesses must be kept to a minimum | |
1787 | * to improve performance; for now shift is left static to | |
1788 | * avoid one more data access while disabling the CCI port. | |
1789 | */ | |
1790 | #define PORT_VALID_SHIFT 31 | |
1791 | #define PORT_VALID (0x1 << PORT_VALID_SHIFT) | |
1792 | ||
1793 | static inline void init_cpu_port(struct cpu_port *port, u32 index, u64 mpidr) | |
1794 | { | |
1795 | port->port = PORT_VALID | index; | |
1796 | port->mpidr = mpidr; | |
1797 | } | |
1798 | ||
1799 | static inline bool cpu_port_is_valid(struct cpu_port *port) | |
1800 | { | |
1801 | return !!(port->port & PORT_VALID); | |
1802 | } | |
1803 | ||
1804 | static inline bool cpu_port_match(struct cpu_port *port, u64 mpidr) | |
1805 | { | |
1806 | return port->mpidr == (mpidr & MPIDR_HWID_BITMASK); | |
1807 | } | |
1808 | ||
1809 | static struct cpu_port cpu_port[NR_CPUS]; | |
1810 | ||
1811 | /** | |
1812 | * __cci_ace_get_port - Function to retrieve the port index connected to | |
1813 | * a cpu or device. | |
1814 | * | |
1815 | * @dn: device node of the device to look-up | |
1816 | * @type: port type | |
1817 | * | |
1818 | * Return value: | |
1819 | * - CCI port index if success | |
1820 | * - -ENODEV if failure | |
1821 | */ | |
1822 | static int __cci_ace_get_port(struct device_node *dn, int type) | |
1823 | { | |
1824 | int i; | |
1825 | bool ace_match; | |
1826 | struct device_node *cci_portn; | |
1827 | ||
1828 | cci_portn = of_parse_phandle(dn, "cci-control-port", 0); | |
1829 | for (i = 0; i < nb_cci_ports; i++) { | |
1830 | ace_match = ports[i].type == type; | |
1831 | if (ace_match && cci_portn == ports[i].dn) | |
1832 | return i; | |
1833 | } | |
1834 | return -ENODEV; | |
1835 | } | |
1836 | ||
1837 | int cci_ace_get_port(struct device_node *dn) | |
1838 | { | |
1839 | return __cci_ace_get_port(dn, ACE_LITE_PORT); | |
1840 | } | |
1841 | EXPORT_SYMBOL_GPL(cci_ace_get_port); | |
1842 | ||
b91c8f28 | 1843 | static void cci_ace_init_ports(void) |
ed69bdd8 | 1844 | { |
78b4d6e0 SK |
1845 | int port, cpu; |
1846 | struct device_node *cpun; | |
ed69bdd8 LP |
1847 | |
1848 | /* | |
1849 | * Port index look-up speeds up the function disabling ports by CPU, | |
1850 | * since the logical to port index mapping is done once and does | |
1851 | * not change after system boot. | |
1852 | * The stashed index array is initialized for all possible CPUs | |
1853 | * at probe time. | |
1854 | */ | |
78b4d6e0 SK |
1855 | for_each_possible_cpu(cpu) { |
1856 | /* too early to use cpu->of_node */ | |
1857 | cpun = of_get_cpu_node(cpu, NULL); | |
ed69bdd8 | 1858 | |
78b4d6e0 | 1859 | if (WARN(!cpun, "Missing cpu device node\n")) |
ed69bdd8 | 1860 | continue; |
78b4d6e0 | 1861 | |
ed69bdd8 LP |
1862 | port = __cci_ace_get_port(cpun, ACE_PORT); |
1863 | if (port < 0) | |
1864 | continue; | |
1865 | ||
1866 | init_cpu_port(&cpu_port[cpu], port, cpu_logical_map(cpu)); | |
1867 | } | |
1868 | ||
1869 | for_each_possible_cpu(cpu) { | |
1870 | WARN(!cpu_port_is_valid(&cpu_port[cpu]), | |
1871 | "CPU %u does not have an associated CCI port\n", | |
1872 | cpu); | |
1873 | } | |
1874 | } | |
1875 | /* | |
1876 | * Functions to enable/disable a CCI interconnect slave port | |
1877 | * | |
1878 | * They are called by low-level power management code to disable slave | |
1879 | * interfaces snoops and DVM broadcast. | |
1880 | * Since they may execute with cache data allocation disabled and | |
1881 | * after the caches have been cleaned and invalidated the functions provide | |
1882 | * no explicit locking since they may run with D-cache disabled, so normal | |
1883 | * cacheable kernel locks based on ldrex/strex may not work. | |
1884 | * Locking has to be provided by BSP implementations to ensure proper | |
1885 | * operations. | |
1886 | */ | |
1887 | ||
1888 | /** | |
1889 | * cci_port_control() - function to control a CCI port | |
1890 | * | |
1891 | * @port: index of the port to setup | |
1892 | * @enable: if true enables the port, if false disables it | |
1893 | */ | |
1894 | static void notrace cci_port_control(unsigned int port, bool enable) | |
1895 | { | |
1896 | void __iomem *base = ports[port].base; | |
1897 | ||
1898 | writel_relaxed(enable ? CCI_ENABLE_REQ : 0, base + CCI_PORT_CTRL); | |
1899 | /* | |
1900 | * This function is called from power down procedures | |
1901 | * and must not execute any instruction that might | |
1902 | * cause the processor to be put in a quiescent state | |
1903 | * (eg wfi). Hence, cpu_relax() can not be added to this | |
1904 | * read loop to optimize power, since it might hide possibly | |
1905 | * disruptive operations. | |
1906 | */ | |
1907 | while (readl_relaxed(cci_ctrl_base + CCI_CTRL_STATUS) & 0x1) | |
1908 | ; | |
1909 | } | |
1910 | ||
1911 | /** | |
1912 | * cci_disable_port_by_cpu() - function to disable a CCI port by CPU | |
1913 | * reference | |
1914 | * | |
1915 | * @mpidr: mpidr of the CPU whose CCI port should be disabled | |
1916 | * | |
1917 | * Disabling a CCI port for a CPU implies disabling the CCI port | |
1918 | * controlling that CPU cluster. Code disabling CPU CCI ports | |
1919 | * must make sure that the CPU running the code is the last active CPU | |
1920 | * in the cluster ie all other CPUs are quiescent in a low power state. | |
1921 | * | |
1922 | * Return: | |
1923 | * 0 on success | |
1924 | * -ENODEV on port look-up failure | |
1925 | */ | |
1926 | int notrace cci_disable_port_by_cpu(u64 mpidr) | |
1927 | { | |
1928 | int cpu; | |
1929 | bool is_valid; | |
1930 | for (cpu = 0; cpu < nr_cpu_ids; cpu++) { | |
1931 | is_valid = cpu_port_is_valid(&cpu_port[cpu]); | |
1932 | if (is_valid && cpu_port_match(&cpu_port[cpu], mpidr)) { | |
1933 | cci_port_control(cpu_port[cpu].port, false); | |
1934 | return 0; | |
1935 | } | |
1936 | } | |
1937 | return -ENODEV; | |
1938 | } | |
1939 | EXPORT_SYMBOL_GPL(cci_disable_port_by_cpu); | |
1940 | ||
62158f81 NP |
1941 | /** |
1942 | * cci_enable_port_for_self() - enable a CCI port for calling CPU | |
1943 | * | |
1944 | * Enabling a CCI port for the calling CPU implies enabling the CCI | |
1945 | * port controlling that CPU's cluster. Caller must make sure that the | |
1946 | * CPU running the code is the first active CPU in the cluster and all | |
1947 | * other CPUs are quiescent in a low power state or waiting for this CPU | |
1948 | * to complete the CCI initialization. | |
1949 | * | |
1950 | * Because this is called when the MMU is still off and with no stack, | |
1951 | * the code must be position independent and ideally rely on callee | |
1952 | * clobbered registers only. To achieve this we must code this function | |
1953 | * entirely in assembler. | |
1954 | * | |
1955 | * On success this returns with the proper CCI port enabled. In case of | |
1956 | * any failure this never returns as the inability to enable the CCI is | |
1957 | * fatal and there is no possible recovery at this stage. | |
1958 | */ | |
1959 | asmlinkage void __naked cci_enable_port_for_self(void) | |
1960 | { | |
1961 | asm volatile ("\n" | |
f4902492 | 1962 | " .arch armv7-a\n" |
62158f81 NP |
1963 | " mrc p15, 0, r0, c0, c0, 5 @ get MPIDR value \n" |
1964 | " and r0, r0, #"__stringify(MPIDR_HWID_BITMASK)" \n" | |
1965 | " adr r1, 5f \n" | |
1966 | " ldr r2, [r1] \n" | |
1967 | " add r1, r1, r2 @ &cpu_port \n" | |
1968 | " add ip, r1, %[sizeof_cpu_port] \n" | |
1969 | ||
1970 | /* Loop over the cpu_port array looking for a matching MPIDR */ | |
1971 | "1: ldr r2, [r1, %[offsetof_cpu_port_mpidr_lsb]] \n" | |
1972 | " cmp r2, r0 @ compare MPIDR \n" | |
1973 | " bne 2f \n" | |
1974 | ||
1975 | /* Found a match, now test port validity */ | |
1976 | " ldr r3, [r1, %[offsetof_cpu_port_port]] \n" | |
1977 | " tst r3, #"__stringify(PORT_VALID)" \n" | |
1978 | " bne 3f \n" | |
1979 | ||
1980 | /* no match, loop with the next cpu_port entry */ | |
1981 | "2: add r1, r1, %[sizeof_struct_cpu_port] \n" | |
1982 | " cmp r1, ip @ done? \n" | |
1983 | " blo 1b \n" | |
1984 | ||
1985 | /* CCI port not found -- cheaply try to stall this CPU */ | |
1986 | "cci_port_not_found: \n" | |
1987 | " wfi \n" | |
1988 | " wfe \n" | |
1989 | " b cci_port_not_found \n" | |
1990 | ||
1991 | /* Use matched port index to look up the corresponding ports entry */ | |
1992 | "3: bic r3, r3, #"__stringify(PORT_VALID)" \n" | |
1993 | " adr r0, 6f \n" | |
1994 | " ldmia r0, {r1, r2} \n" | |
1995 | " sub r1, r1, r0 @ virt - phys \n" | |
1996 | " ldr r0, [r0, r2] @ *(&ports) \n" | |
1997 | " mov r2, %[sizeof_struct_ace_port] \n" | |
1998 | " mla r0, r2, r3, r0 @ &ports[index] \n" | |
1999 | " sub r0, r0, r1 @ virt_to_phys() \n" | |
2000 | ||
2001 | /* Enable the CCI port */ | |
2002 | " ldr r0, [r0, %[offsetof_port_phys]] \n" | |
fdb07aee | 2003 | " mov r3, %[cci_enable_req]\n" |
62158f81 NP |
2004 | " str r3, [r0, #"__stringify(CCI_PORT_CTRL)"] \n" |
2005 | ||
2006 | /* poll the status reg for completion */ | |
2007 | " adr r1, 7f \n" | |
2008 | " ldr r0, [r1] \n" | |
2009 | " ldr r0, [r0, r1] @ cci_ctrl_base \n" | |
2010 | "4: ldr r1, [r0, #"__stringify(CCI_CTRL_STATUS)"] \n" | |
fdb07aee | 2011 | " tst r1, %[cci_control_status_bits] \n" |
62158f81 NP |
2012 | " bne 4b \n" |
2013 | ||
2014 | " mov r0, #0 \n" | |
2015 | " bx lr \n" | |
2016 | ||
2017 | " .align 2 \n" | |
2018 | "5: .word cpu_port - . \n" | |
2019 | "6: .word . \n" | |
2020 | " .word ports - 6b \n" | |
2021 | "7: .word cci_ctrl_phys - . \n" | |
2022 | : : | |
2023 | [sizeof_cpu_port] "i" (sizeof(cpu_port)), | |
fdb07aee VK |
2024 | [cci_enable_req] "i" cpu_to_le32(CCI_ENABLE_REQ), |
2025 | [cci_control_status_bits] "i" cpu_to_le32(1), | |
62158f81 NP |
2026 | #ifndef __ARMEB__ |
2027 | [offsetof_cpu_port_mpidr_lsb] "i" (offsetof(struct cpu_port, mpidr)), | |
2028 | #else | |
2029 | [offsetof_cpu_port_mpidr_lsb] "i" (offsetof(struct cpu_port, mpidr)+4), | |
2030 | #endif | |
2031 | [offsetof_cpu_port_port] "i" (offsetof(struct cpu_port, port)), | |
2032 | [sizeof_struct_cpu_port] "i" (sizeof(struct cpu_port)), | |
2033 | [sizeof_struct_ace_port] "i" (sizeof(struct cci_ace_port)), | |
2034 | [offsetof_port_phys] "i" (offsetof(struct cci_ace_port, phys)) ); | |
2035 | ||
2036 | unreachable(); | |
2037 | } | |
2038 | ||
ed69bdd8 LP |
2039 | /** |
2040 | * __cci_control_port_by_device() - function to control a CCI port by device | |
2041 | * reference | |
2042 | * | |
2043 | * @dn: device node pointer of the device whose CCI port should be | |
2044 | * controlled | |
2045 | * @enable: if true enables the port, if false disables it | |
2046 | * | |
2047 | * Return: | |
2048 | * 0 on success | |
2049 | * -ENODEV on port look-up failure | |
2050 | */ | |
2051 | int notrace __cci_control_port_by_device(struct device_node *dn, bool enable) | |
2052 | { | |
2053 | int port; | |
2054 | ||
2055 | if (!dn) | |
2056 | return -ENODEV; | |
2057 | ||
2058 | port = __cci_ace_get_port(dn, ACE_LITE_PORT); | |
2059 | if (WARN_ONCE(port < 0, "node %s ACE lite port look-up failure\n", | |
2060 | dn->full_name)) | |
2061 | return -ENODEV; | |
2062 | cci_port_control(port, enable); | |
2063 | return 0; | |
2064 | } | |
2065 | EXPORT_SYMBOL_GPL(__cci_control_port_by_device); | |
2066 | ||
2067 | /** | |
2068 | * __cci_control_port_by_index() - function to control a CCI port by port index | |
2069 | * | |
2070 | * @port: port index previously retrieved with cci_ace_get_port() | |
2071 | * @enable: if true enables the port, if false disables it | |
2072 | * | |
2073 | * Return: | |
2074 | * 0 on success | |
2075 | * -ENODEV on port index out of range | |
2076 | * -EPERM if operation carried out on an ACE PORT | |
2077 | */ | |
2078 | int notrace __cci_control_port_by_index(u32 port, bool enable) | |
2079 | { | |
2080 | if (port >= nb_cci_ports || ports[port].type == ACE_INVALID_PORT) | |
2081 | return -ENODEV; | |
2082 | /* | |
2083 | * CCI control for ports connected to CPUS is extremely fragile | |
2084 | * and must be made to go through a specific and controlled | |
2085 | * interface (ie cci_disable_port_by_cpu(); control by general purpose | |
2086 | * indexing is therefore disabled for ACE ports. | |
2087 | */ | |
2088 | if (ports[port].type == ACE_PORT) | |
2089 | return -EPERM; | |
2090 | ||
2091 | cci_port_control(port, enable); | |
2092 | return 0; | |
2093 | } | |
2094 | EXPORT_SYMBOL_GPL(__cci_control_port_by_index); | |
2095 | ||
ed69bdd8 LP |
2096 | static const struct of_device_id arm_cci_ctrl_if_matches[] = { |
2097 | {.compatible = "arm,cci-400-ctrl-if", }, | |
2098 | {}, | |
2099 | }; | |
2100 | ||
f6b9e83c | 2101 | static int cci_probe_ports(struct device_node *np) |
ed69bdd8 LP |
2102 | { |
2103 | struct cci_nb_ports const *cci_config; | |
2104 | int ret, i, nb_ace = 0, nb_ace_lite = 0; | |
f6b9e83c | 2105 | struct device_node *cp; |
62158f81 | 2106 | struct resource res; |
ed69bdd8 LP |
2107 | const char *match_str; |
2108 | bool is_ace; | |
2109 | ||
896ddd60 | 2110 | |
ed69bdd8 LP |
2111 | cci_config = of_match_node(arm_cci_matches, np)->data; |
2112 | if (!cci_config) | |
2113 | return -ENODEV; | |
2114 | ||
2115 | nb_cci_ports = cci_config->nb_ace + cci_config->nb_ace_lite; | |
2116 | ||
7c762036 | 2117 | ports = kcalloc(nb_cci_ports, sizeof(*ports), GFP_KERNEL); |
ed69bdd8 LP |
2118 | if (!ports) |
2119 | return -ENOMEM; | |
2120 | ||
ed69bdd8 LP |
2121 | for_each_child_of_node(np, cp) { |
2122 | if (!of_match_node(arm_cci_ctrl_if_matches, cp)) | |
2123 | continue; | |
2124 | ||
2125 | i = nb_ace + nb_ace_lite; | |
2126 | ||
2127 | if (i >= nb_cci_ports) | |
2128 | break; | |
2129 | ||
2130 | if (of_property_read_string(cp, "interface-type", | |
2131 | &match_str)) { | |
2132 | WARN(1, "node %s missing interface-type property\n", | |
2133 | cp->full_name); | |
2134 | continue; | |
2135 | } | |
2136 | is_ace = strcmp(match_str, "ace") == 0; | |
2137 | if (!is_ace && strcmp(match_str, "ace-lite")) { | |
2138 | WARN(1, "node %s containing invalid interface-type property, skipping it\n", | |
2139 | cp->full_name); | |
2140 | continue; | |
2141 | } | |
2142 | ||
62158f81 NP |
2143 | ret = of_address_to_resource(cp, 0, &res); |
2144 | if (!ret) { | |
2145 | ports[i].base = ioremap(res.start, resource_size(&res)); | |
2146 | ports[i].phys = res.start; | |
2147 | } | |
2148 | if (ret || !ports[i].base) { | |
ed69bdd8 LP |
2149 | WARN(1, "unable to ioremap CCI port %d\n", i); |
2150 | continue; | |
2151 | } | |
2152 | ||
2153 | if (is_ace) { | |
2154 | if (WARN_ON(nb_ace >= cci_config->nb_ace)) | |
2155 | continue; | |
2156 | ports[i].type = ACE_PORT; | |
2157 | ++nb_ace; | |
2158 | } else { | |
2159 | if (WARN_ON(nb_ace_lite >= cci_config->nb_ace_lite)) | |
2160 | continue; | |
2161 | ports[i].type = ACE_LITE_PORT; | |
2162 | ++nb_ace_lite; | |
2163 | } | |
2164 | ports[i].dn = cp; | |
2165 | } | |
2166 | ||
2167 | /* initialize a stashed array of ACE ports to speed-up look-up */ | |
2168 | cci_ace_init_ports(); | |
2169 | ||
2170 | /* | |
2171 | * Multi-cluster systems may need this data when non-coherent, during | |
2172 | * cluster power-up/power-down. Make sure it reaches main memory. | |
2173 | */ | |
2174 | sync_cache_w(&cci_ctrl_base); | |
62158f81 | 2175 | sync_cache_w(&cci_ctrl_phys); |
ed69bdd8 LP |
2176 | sync_cache_w(&ports); |
2177 | sync_cache_w(&cpu_port); | |
2178 | __sync_cache_range_w(ports, sizeof(*ports) * nb_cci_ports); | |
2179 | pr_info("ARM CCI driver probed\n"); | |
f6b9e83c | 2180 | |
ed69bdd8 | 2181 | return 0; |
f6b9e83c | 2182 | } |
ee8e5d5f SP |
2183 | #else /* !CONFIG_ARM_CCI400_PORT_CTRL */ |
2184 | static inline int cci_probe_ports(struct device_node *np) | |
2185 | { | |
2186 | return 0; | |
2187 | } | |
2188 | #endif /* CONFIG_ARM_CCI400_PORT_CTRL */ | |
ed69bdd8 | 2189 | |
f6b9e83c SP |
2190 | static int cci_probe(void) |
2191 | { | |
2192 | int ret; | |
2193 | struct device_node *np; | |
2194 | struct resource res; | |
ed69bdd8 | 2195 | |
f6b9e83c SP |
2196 | np = of_find_matching_node(NULL, arm_cci_matches); |
2197 | if(!np || !of_device_is_available(np)) | |
2198 | return -ENODEV; | |
2199 | ||
2200 | ret = of_address_to_resource(np, 0, &res); | |
2201 | if (!ret) { | |
2202 | cci_ctrl_base = ioremap(res.start, resource_size(&res)); | |
2203 | cci_ctrl_phys = res.start; | |
2204 | } | |
2205 | if (ret || !cci_ctrl_base) { | |
2206 | WARN(1, "unable to ioremap CCI ctrl\n"); | |
2207 | return -ENXIO; | |
2208 | } | |
2209 | ||
2210 | return cci_probe_ports(np); | |
ed69bdd8 LP |
2211 | } |
2212 | ||
2213 | static int cci_init_status = -EAGAIN; | |
2214 | static DEFINE_MUTEX(cci_probing); | |
2215 | ||
b91c8f28 | 2216 | static int cci_init(void) |
ed69bdd8 LP |
2217 | { |
2218 | if (cci_init_status != -EAGAIN) | |
2219 | return cci_init_status; | |
2220 | ||
2221 | mutex_lock(&cci_probing); | |
2222 | if (cci_init_status == -EAGAIN) | |
2223 | cci_init_status = cci_probe(); | |
2224 | mutex_unlock(&cci_probing); | |
2225 | return cci_init_status; | |
2226 | } | |
2227 | ||
2228 | /* | |
2229 | * To sort out early init calls ordering a helper function is provided to | |
2230 | * check if the CCI driver has beed initialized. Function check if the driver | |
2231 | * has been initialized, if not it calls the init function that probes | |
2232 | * the driver and updates the return value. | |
2233 | */ | |
b91c8f28 | 2234 | bool cci_probed(void) |
ed69bdd8 LP |
2235 | { |
2236 | return cci_init() == 0; | |
2237 | } | |
2238 | EXPORT_SYMBOL_GPL(cci_probed); | |
2239 | ||
2240 | early_initcall(cci_init); | |
b91c8f28 | 2241 | core_initcall(cci_platform_init); |
ed69bdd8 LP |
2242 | MODULE_LICENSE("GPL"); |
2243 | MODULE_DESCRIPTION("ARM CCI support"); |