2 * Linux performance counter support for ARC700 series
4 * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
6 * This code is inspired by the perf support of various other architectures.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/errno.h>
14 #include <linux/module.h>
16 #include <linux/perf_event.h>
17 #include <linux/platform_device.h>
18 #include <asm/arcregs.h>
22 int counter_size
; /* in bits */
24 unsigned long used_mask
[BITS_TO_LONGS(ARC_PMU_MAX_HWEVENTS
)];
25 int ev_hw_idx
[PERF_COUNT_ARC_HW_MAX
];
28 static struct arc_pmu
*arc_pmu
;
30 /* read counter #idx; note that counter# != event# on ARC! */
31 static uint64_t arc_pmu_read_counter(int idx
)
37 * ARC supports making 'snapshots' of the counters, so we don't
38 * need to care about counters wrapping to 0 underneath our feet
40 write_aux_reg(ARC_REG_PCT_INDEX
, idx
);
41 tmp
= read_aux_reg(ARC_REG_PCT_CONTROL
);
42 write_aux_reg(ARC_REG_PCT_CONTROL
, tmp
| ARC_REG_PCT_CONTROL_SN
);
43 result
= (uint64_t) (read_aux_reg(ARC_REG_PCT_SNAPH
)) << 32;
44 result
|= read_aux_reg(ARC_REG_PCT_SNAPL
);
49 static void arc_perf_event_update(struct perf_event
*event
,
50 struct hw_perf_event
*hwc
, int idx
)
52 uint64_t prev_raw_count
, new_raw_count
;
56 prev_raw_count
= local64_read(&hwc
->prev_count
);
57 new_raw_count
= arc_pmu_read_counter(idx
);
58 } while (local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
59 new_raw_count
) != prev_raw_count
);
61 delta
= (new_raw_count
- prev_raw_count
) &
62 ((1ULL << arc_pmu
->counter_size
) - 1ULL);
64 local64_add(delta
, &event
->count
);
65 local64_sub(delta
, &hwc
->period_left
);
68 static void arc_pmu_read(struct perf_event
*event
)
70 arc_perf_event_update(event
, &event
->hw
, event
->hw
.idx
);
73 static int arc_pmu_cache_event(u64 config
)
75 unsigned int cache_type
, cache_op
, cache_result
;
78 cache_type
= (config
>> 0) & 0xff;
79 cache_op
= (config
>> 8) & 0xff;
80 cache_result
= (config
>> 16) & 0xff;
81 if (cache_type
>= PERF_COUNT_HW_CACHE_MAX
)
83 if (cache_op
>= PERF_COUNT_HW_CACHE_OP_MAX
)
85 if (cache_result
>= PERF_COUNT_HW_CACHE_RESULT_MAX
)
88 ret
= arc_pmu_cache_map
[cache_type
][cache_op
][cache_result
];
90 if (ret
== CACHE_OP_UNSUPPORTED
)
93 pr_debug("init cache event: type/op/result %d/%d/%d with h/w %d \'%s\'\n",
94 cache_type
, cache_op
, cache_result
, ret
,
95 arc_pmu_ev_hw_map
[ret
]);
100 /* initializes hw_perf_event structure if event is supported */
101 static int arc_pmu_event_init(struct perf_event
*event
)
103 struct hw_perf_event
*hwc
= &event
->hw
;
106 switch (event
->attr
.type
) {
107 case PERF_TYPE_HARDWARE
:
108 if (event
->attr
.config
>= PERF_COUNT_HW_MAX
)
110 if (arc_pmu
->ev_hw_idx
[event
->attr
.config
] < 0)
112 hwc
->config
= arc_pmu
->ev_hw_idx
[event
->attr
.config
];
113 pr_debug("init event %d with h/w %d \'%s\'\n",
114 (int) event
->attr
.config
, (int) hwc
->config
,
115 arc_pmu_ev_hw_map
[event
->attr
.config
]);
117 case PERF_TYPE_HW_CACHE
:
118 ret
= arc_pmu_cache_event(event
->attr
.config
);
121 hwc
->config
= arc_pmu
->ev_hw_idx
[ret
];
128 /* starts all counters */
129 static void arc_pmu_enable(struct pmu
*pmu
)
132 tmp
= read_aux_reg(ARC_REG_PCT_CONTROL
);
133 write_aux_reg(ARC_REG_PCT_CONTROL
, (tmp
& 0xffff0000) | 0x1);
136 /* stops all counters */
137 static void arc_pmu_disable(struct pmu
*pmu
)
140 tmp
= read_aux_reg(ARC_REG_PCT_CONTROL
);
141 write_aux_reg(ARC_REG_PCT_CONTROL
, (tmp
& 0xffff0000) | 0x0);
145 * Assigns hardware counter to hardware condition.
146 * Note that there is no separate start/stop mechanism;
147 * stopping is achieved by assigning the 'never' condition
149 static void arc_pmu_start(struct perf_event
*event
, int flags
)
151 struct hw_perf_event
*hwc
= &event
->hw
;
154 if (WARN_ON_ONCE(idx
== -1))
157 if (flags
& PERF_EF_RELOAD
)
158 WARN_ON_ONCE(!(event
->hw
.state
& PERF_HES_UPTODATE
));
162 /* enable ARC pmu here */
163 write_aux_reg(ARC_REG_PCT_INDEX
, idx
);
164 write_aux_reg(ARC_REG_PCT_CONFIG
, hwc
->config
);
167 static void arc_pmu_stop(struct perf_event
*event
, int flags
)
169 struct hw_perf_event
*hwc
= &event
->hw
;
172 if (!(event
->hw
.state
& PERF_HES_STOPPED
)) {
173 /* stop ARC pmu here */
174 write_aux_reg(ARC_REG_PCT_INDEX
, idx
);
176 /* condition code #0 is always "never" */
177 write_aux_reg(ARC_REG_PCT_CONFIG
, 0);
179 event
->hw
.state
|= PERF_HES_STOPPED
;
182 if ((flags
& PERF_EF_UPDATE
) &&
183 !(event
->hw
.state
& PERF_HES_UPTODATE
)) {
184 arc_perf_event_update(event
, &event
->hw
, idx
);
185 event
->hw
.state
|= PERF_HES_UPTODATE
;
189 static void arc_pmu_del(struct perf_event
*event
, int flags
)
191 arc_pmu_stop(event
, PERF_EF_UPDATE
);
192 __clear_bit(event
->hw
.idx
, arc_pmu
->used_mask
);
194 perf_event_update_userpage(event
);
197 /* allocate hardware counter and optionally start counting */
198 static int arc_pmu_add(struct perf_event
*event
, int flags
)
200 struct hw_perf_event
*hwc
= &event
->hw
;
203 if (__test_and_set_bit(idx
, arc_pmu
->used_mask
)) {
204 idx
= find_first_zero_bit(arc_pmu
->used_mask
,
205 arc_pmu
->n_counters
);
206 if (idx
== arc_pmu
->n_counters
)
209 __set_bit(idx
, arc_pmu
->used_mask
);
213 write_aux_reg(ARC_REG_PCT_INDEX
, idx
);
214 write_aux_reg(ARC_REG_PCT_CONFIG
, 0);
215 write_aux_reg(ARC_REG_PCT_COUNTL
, 0);
216 write_aux_reg(ARC_REG_PCT_COUNTH
, 0);
217 local64_set(&hwc
->prev_count
, 0);
219 hwc
->state
= PERF_HES_UPTODATE
| PERF_HES_STOPPED
;
220 if (flags
& PERF_EF_START
)
221 arc_pmu_start(event
, PERF_EF_RELOAD
);
223 perf_event_update_userpage(event
);
228 static int arc_pmu_device_probe(struct platform_device
*pdev
)
230 struct arc_pmu
*arc_pmu
;
231 struct arc_reg_pct_build pct_bcr
;
232 struct arc_reg_cc_build cc_bcr
;
237 uint32_t word0
, word1
;
244 READ_BCR(ARC_REG_PCT_BUILD
, pct_bcr
);
246 pr_err("This core does not have performance counters!\n");
249 BUG_ON(pct_bcr
.c
> ARC_PMU_MAX_HWEVENTS
);
251 READ_BCR(ARC_REG_CC_BUILD
, cc_bcr
);
253 pr_err("Performance counters exist, but no countable conditions?\n");
257 arc_pmu
= devm_kzalloc(&pdev
->dev
, sizeof(struct arc_pmu
), GFP_KERNEL
);
261 arc_pmu
->n_counters
= pct_bcr
.c
;
262 arc_pmu
->counter_size
= 32 + (pct_bcr
.s
<< 4);
264 pr_info("ARC perf\t: %d counters (%d bits), %d countable conditions\n",
265 arc_pmu
->n_counters
, arc_pmu
->counter_size
, cc_bcr
.c
);
268 for (i
= 0; i
< PERF_COUNT_ARC_HW_MAX
; i
++)
269 arc_pmu
->ev_hw_idx
[i
] = -1;
271 /* loop thru all available h/w condition indexes */
272 for (j
= 0; j
< cc_bcr
.c
; j
++) {
273 write_aux_reg(ARC_REG_CC_INDEX
, j
);
274 cc_name
.indiv
.word0
= read_aux_reg(ARC_REG_CC_NAME0
);
275 cc_name
.indiv
.word1
= read_aux_reg(ARC_REG_CC_NAME1
);
277 /* See if it has been mapped to a perf event_id */
278 for (i
= 0; i
< ARRAY_SIZE(arc_pmu_ev_hw_map
); i
++) {
279 if (arc_pmu_ev_hw_map
[i
] &&
280 !strcmp(arc_pmu_ev_hw_map
[i
], cc_name
.str
) &&
281 strlen(arc_pmu_ev_hw_map
[i
])) {
282 pr_debug("mapping perf event %2d to h/w event \'%8s\' (idx %d)\n",
284 arc_pmu
->ev_hw_idx
[i
] = j
;
289 arc_pmu
->pmu
= (struct pmu
) {
290 .pmu_enable
= arc_pmu_enable
,
291 .pmu_disable
= arc_pmu_disable
,
292 .event_init
= arc_pmu_event_init
,
295 .start
= arc_pmu_start
,
296 .stop
= arc_pmu_stop
,
297 .read
= arc_pmu_read
,
300 /* ARC 700 PMU does not support sampling events */
301 arc_pmu
->pmu
.capabilities
|= PERF_PMU_CAP_NO_INTERRUPT
;
303 ret
= perf_pmu_register(&arc_pmu
->pmu
, pdev
->name
, PERF_TYPE_RAW
);
309 static const struct of_device_id arc_pmu_match
[] = {
310 { .compatible
= "snps,arc700-pmu" },
313 MODULE_DEVICE_TABLE(of
, arc_pmu_match
);
316 static struct platform_driver arc_pmu_driver
= {
318 .name
= "arc700-pmu",
319 .of_match_table
= of_match_ptr(arc_pmu_match
),
321 .probe
= arc_pmu_device_probe
,
324 module_platform_driver(arc_pmu_driver
);
326 MODULE_LICENSE("GPL");
327 MODULE_AUTHOR("Mischa Jonker <mjonker@synopsys.com>");
328 MODULE_DESCRIPTION("ARC PMU driver");