2 * CCI cache coherent interconnect driver
4 * Copyright (C) 2013 ARM Ltd.
5 * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12 * kind, whether express or implied; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
17 #include <linux/arm-cci.h>
19 #include <linux/interrupt.h>
20 #include <linux/module.h>
21 #include <linux/of_address.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_platform.h>
24 #include <linux/perf_event.h>
25 #include <linux/platform_device.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
29 #include <asm/cacheflush.h>
30 #include <asm/smp_plat.h>
32 static void __iomem
*cci_ctrl_base
;
33 static unsigned long cci_ctrl_phys
;
35 #ifdef CONFIG_ARM_CCI400_PORT_CTRL
38 unsigned int nb_ace_lite
;
41 static const struct cci_nb_ports cci400_ports
= {
46 #define CCI400_PORTS_DATA (&cci400_ports)
48 #define CCI400_PORTS_DATA (NULL)
51 static const struct of_device_id arm_cci_matches
[] = {
52 #ifdef CONFIG_ARM_CCI400_COMMON
53 {.compatible
= "arm,cci-400", .data
= CCI400_PORTS_DATA
},
58 #ifdef CONFIG_ARM_CCI400_PMU
60 #define DRIVER_NAME "CCI-400"
61 #define DRIVER_NAME_PMU DRIVER_NAME " PMU"
63 #define CCI_PMCR 0x0100
64 #define CCI_PID2 0x0fe8
66 #define CCI_PMCR_CEN 0x00000001
67 #define CCI_PMCR_NCNT_MASK 0x0000f800
68 #define CCI_PMCR_NCNT_SHIFT 11
70 #define CCI_PID2_REV_MASK 0xf0
71 #define CCI_PID2_REV_SHIFT 4
73 #define CCI_PMU_EVT_SEL 0x000
74 #define CCI_PMU_CNTR 0x004
75 #define CCI_PMU_CNTR_CTRL 0x008
76 #define CCI_PMU_OVRFLW 0x00c
78 #define CCI_PMU_OVRFLW_FLAG 1
80 #define CCI_PMU_CNTR_BASE(idx) ((idx) * SZ_4K)
82 #define CCI_PMU_CNTR_MASK ((1ULL << 32) -1)
84 #define CCI_PMU_EVENT_MASK 0xffUL
85 #define CCI_PMU_EVENT_SOURCE(event) ((event >> 5) & 0x7)
86 #define CCI_PMU_EVENT_CODE(event) (event & 0x1f)
88 #define CCI_PMU_MAX_HW_EVENTS 5 /* CCI PMU has 4 counters + 1 cycle counter */
90 /* Types of interfaces that can generate events */
102 struct cci_pmu_hw_events
{
103 struct perf_event
*events
[CCI_PMU_MAX_HW_EVENTS
];
104 unsigned long used_mask
[BITS_TO_LONGS(CCI_PMU_MAX_HW_EVENTS
)];
105 raw_spinlock_t pmu_lock
;
108 struct cci_pmu_model
{
110 struct event_range event_ranges
[CCI_IF_MAX
];
113 static struct cci_pmu_model cci_pmu_models
[];
119 int irqs
[CCI_PMU_MAX_HW_EVENTS
];
120 unsigned long active_irqs
;
121 const struct cci_pmu_model
*model
;
122 struct cci_pmu_hw_events hw_events
;
123 struct platform_device
*plat_device
;
125 atomic_t active_events
;
126 struct mutex reserve_mutex
;
127 struct notifier_block cpu_nb
;
131 #define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu))
134 #define CCI_PORT_S0 0
135 #define CCI_PORT_S1 1
136 #define CCI_PORT_S2 2
137 #define CCI_PORT_S3 3
138 #define CCI_PORT_S4 4
139 #define CCI_PORT_M0 5
140 #define CCI_PORT_M1 6
141 #define CCI_PORT_M2 7
145 #define CCI_REV_R1_PX 5
148 * Instead of an event id to monitor CCI cycles, a dedicated counter is
149 * provided. Use 0xff to represent CCI cycles and hope that no future revisions
150 * make use of this event in hardware.
152 enum cci400_perf_events
{
153 CCI_PMU_CYCLES
= 0xff
156 #define CCI_PMU_CYCLE_CNTR_IDX 0
157 #define CCI_PMU_CNTR0_IDX 1
158 #define CCI_PMU_CNTR_LAST(cci_pmu) (CCI_PMU_CYCLE_CNTR_IDX + cci_pmu->num_events - 1)
161 * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8
162 * ports and bits 4:0 are event codes. There are different event codes
163 * associated with each port type.
165 * Additionally, the range of events associated with the port types changed
166 * between Rev0 and Rev1.
168 * The constants below define the range of valid codes for each port type for
169 * the different revisions and are used to validate the event to be monitored.
172 #define CCI_REV_R0_SLAVE_PORT_MIN_EV 0x00
173 #define CCI_REV_R0_SLAVE_PORT_MAX_EV 0x13
174 #define CCI_REV_R0_MASTER_PORT_MIN_EV 0x14
175 #define CCI_REV_R0_MASTER_PORT_MAX_EV 0x1a
177 #define CCI_REV_R1_SLAVE_PORT_MIN_EV 0x00
178 #define CCI_REV_R1_SLAVE_PORT_MAX_EV 0x14
179 #define CCI_REV_R1_MASTER_PORT_MIN_EV 0x00
180 #define CCI_REV_R1_MASTER_PORT_MAX_EV 0x11
182 static int pmu_validate_hw_event(struct cci_pmu
*cci_pmu
, unsigned long hw_event
)
184 u8 ev_source
= CCI_PMU_EVENT_SOURCE(hw_event
);
185 u8 ev_code
= CCI_PMU_EVENT_CODE(hw_event
);
188 if (hw_event
& ~CCI_PMU_EVENT_MASK
)
197 /* Slave Interface */
198 if_type
= CCI_IF_SLAVE
;
203 /* Master Interface */
204 if_type
= CCI_IF_MASTER
;
210 if (ev_code
>= cci_pmu
->model
->event_ranges
[if_type
].min
&&
211 ev_code
<= cci_pmu
->model
->event_ranges
[if_type
].max
)
217 static int probe_cci_revision(void)
220 rev
= readl_relaxed(cci_ctrl_base
+ CCI_PID2
) & CCI_PID2_REV_MASK
;
221 rev
>>= CCI_PID2_REV_SHIFT
;
223 if (rev
< CCI_REV_R1_PX
)
229 static const struct cci_pmu_model
*probe_cci_model(struct platform_device
*pdev
)
231 if (platform_has_secure_cci_access())
232 return &cci_pmu_models
[probe_cci_revision()];
236 static int pmu_is_valid_counter(struct cci_pmu
*cci_pmu
, int idx
)
238 return CCI_PMU_CYCLE_CNTR_IDX
<= idx
&&
239 idx
<= CCI_PMU_CNTR_LAST(cci_pmu
);
242 static u32
pmu_read_register(struct cci_pmu
*cci_pmu
, int idx
, unsigned int offset
)
244 return readl_relaxed(cci_pmu
->base
+ CCI_PMU_CNTR_BASE(idx
) + offset
);
247 static void pmu_write_register(struct cci_pmu
*cci_pmu
, u32 value
,
248 int idx
, unsigned int offset
)
250 return writel_relaxed(value
, cci_pmu
->base
+
251 CCI_PMU_CNTR_BASE(idx
) + offset
);
254 static void pmu_disable_counter(struct cci_pmu
*cci_pmu
, int idx
)
256 pmu_write_register(cci_pmu
, 0, idx
, CCI_PMU_CNTR_CTRL
);
259 static void pmu_enable_counter(struct cci_pmu
*cci_pmu
, int idx
)
261 pmu_write_register(cci_pmu
, 1, idx
, CCI_PMU_CNTR_CTRL
);
264 static void pmu_set_event(struct cci_pmu
*cci_pmu
, int idx
, unsigned long event
)
266 pmu_write_register(cci_pmu
, event
, idx
, CCI_PMU_EVT_SEL
);
269 static u32
pmu_get_max_counters(void)
271 u32 n_cnts
= (readl_relaxed(cci_ctrl_base
+ CCI_PMCR
) &
272 CCI_PMCR_NCNT_MASK
) >> CCI_PMCR_NCNT_SHIFT
;
274 /* add 1 for cycle counter */
278 static int pmu_get_event_idx(struct cci_pmu_hw_events
*hw
, struct perf_event
*event
)
280 struct cci_pmu
*cci_pmu
= to_cci_pmu(event
->pmu
);
281 struct hw_perf_event
*hw_event
= &event
->hw
;
282 unsigned long cci_event
= hw_event
->config_base
;
285 if (cci_event
== CCI_PMU_CYCLES
) {
286 if (test_and_set_bit(CCI_PMU_CYCLE_CNTR_IDX
, hw
->used_mask
))
289 return CCI_PMU_CYCLE_CNTR_IDX
;
292 for (idx
= CCI_PMU_CNTR0_IDX
; idx
<= CCI_PMU_CNTR_LAST(cci_pmu
); ++idx
)
293 if (!test_and_set_bit(idx
, hw
->used_mask
))
296 /* No counters available */
300 static int pmu_map_event(struct perf_event
*event
)
303 unsigned long config
= event
->attr
.config
;
305 if (event
->attr
.type
< PERF_TYPE_MAX
)
308 if (config
== CCI_PMU_CYCLES
)
311 mapping
= pmu_validate_hw_event(to_cci_pmu(event
->pmu
),
317 static int pmu_request_irq(struct cci_pmu
*cci_pmu
, irq_handler_t handler
)
320 struct platform_device
*pmu_device
= cci_pmu
->plat_device
;
322 if (unlikely(!pmu_device
))
325 if (cci_pmu
->nr_irqs
< 1) {
326 dev_err(&pmu_device
->dev
, "no irqs for CCI PMUs defined\n");
331 * Register all available CCI PMU interrupts. In the interrupt handler
332 * we iterate over the counters checking for interrupt source (the
333 * overflowing counter) and clear it.
335 * This should allow handling of non-unique interrupt for the counters.
337 for (i
= 0; i
< cci_pmu
->nr_irqs
; i
++) {
338 int err
= request_irq(cci_pmu
->irqs
[i
], handler
, IRQF_SHARED
,
339 "arm-cci-pmu", cci_pmu
);
341 dev_err(&pmu_device
->dev
, "unable to request IRQ%d for ARM CCI PMU counters\n",
346 set_bit(i
, &cci_pmu
->active_irqs
);
352 static void pmu_free_irq(struct cci_pmu
*cci_pmu
)
356 for (i
= 0; i
< cci_pmu
->nr_irqs
; i
++) {
357 if (!test_and_clear_bit(i
, &cci_pmu
->active_irqs
))
360 free_irq(cci_pmu
->irqs
[i
], cci_pmu
);
364 static u32
pmu_read_counter(struct perf_event
*event
)
366 struct cci_pmu
*cci_pmu
= to_cci_pmu(event
->pmu
);
367 struct hw_perf_event
*hw_counter
= &event
->hw
;
368 int idx
= hw_counter
->idx
;
371 if (unlikely(!pmu_is_valid_counter(cci_pmu
, idx
))) {
372 dev_err(&cci_pmu
->plat_device
->dev
, "Invalid CCI PMU counter %d\n", idx
);
375 value
= pmu_read_register(cci_pmu
, idx
, CCI_PMU_CNTR
);
380 static void pmu_write_counter(struct perf_event
*event
, u32 value
)
382 struct cci_pmu
*cci_pmu
= to_cci_pmu(event
->pmu
);
383 struct hw_perf_event
*hw_counter
= &event
->hw
;
384 int idx
= hw_counter
->idx
;
386 if (unlikely(!pmu_is_valid_counter(cci_pmu
, idx
)))
387 dev_err(&cci_pmu
->plat_device
->dev
, "Invalid CCI PMU counter %d\n", idx
);
389 pmu_write_register(cci_pmu
, value
, idx
, CCI_PMU_CNTR
);
392 static u64
pmu_event_update(struct perf_event
*event
)
394 struct hw_perf_event
*hwc
= &event
->hw
;
395 u64 delta
, prev_raw_count
, new_raw_count
;
398 prev_raw_count
= local64_read(&hwc
->prev_count
);
399 new_raw_count
= pmu_read_counter(event
);
400 } while (local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
401 new_raw_count
) != prev_raw_count
);
403 delta
= (new_raw_count
- prev_raw_count
) & CCI_PMU_CNTR_MASK
;
405 local64_add(delta
, &event
->count
);
407 return new_raw_count
;
410 static void pmu_read(struct perf_event
*event
)
412 pmu_event_update(event
);
415 void pmu_event_set_period(struct perf_event
*event
)
417 struct hw_perf_event
*hwc
= &event
->hw
;
419 * The CCI PMU counters have a period of 2^32. To account for the
420 * possiblity of extreme interrupt latency we program for a period of
421 * half that. Hopefully we can handle the interrupt before another 2^31
422 * events occur and the counter overtakes its previous value.
424 u64 val
= 1ULL << 31;
425 local64_set(&hwc
->prev_count
, val
);
426 pmu_write_counter(event
, val
);
429 static irqreturn_t
pmu_handle_irq(int irq_num
, void *dev
)
432 struct cci_pmu
*cci_pmu
= dev
;
433 struct cci_pmu_hw_events
*events
= &cci_pmu
->hw_events
;
434 int idx
, handled
= IRQ_NONE
;
436 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
438 * Iterate over counters and update the corresponding perf events.
439 * This should work regardless of whether we have per-counter overflow
440 * interrupt or a combined overflow interrupt.
442 for (idx
= CCI_PMU_CYCLE_CNTR_IDX
; idx
<= CCI_PMU_CNTR_LAST(cci_pmu
); idx
++) {
443 struct perf_event
*event
= events
->events
[idx
];
444 struct hw_perf_event
*hw_counter
;
449 hw_counter
= &event
->hw
;
451 /* Did this counter overflow? */
452 if (!(pmu_read_register(cci_pmu
, idx
, CCI_PMU_OVRFLW
) &
453 CCI_PMU_OVRFLW_FLAG
))
456 pmu_write_register(cci_pmu
, CCI_PMU_OVRFLW_FLAG
, idx
,
459 pmu_event_update(event
);
460 pmu_event_set_period(event
);
461 handled
= IRQ_HANDLED
;
463 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
465 return IRQ_RETVAL(handled
);
468 static int cci_pmu_get_hw(struct cci_pmu
*cci_pmu
)
470 int ret
= pmu_request_irq(cci_pmu
, pmu_handle_irq
);
472 pmu_free_irq(cci_pmu
);
478 static void cci_pmu_put_hw(struct cci_pmu
*cci_pmu
)
480 pmu_free_irq(cci_pmu
);
483 static void hw_perf_event_destroy(struct perf_event
*event
)
485 struct cci_pmu
*cci_pmu
= to_cci_pmu(event
->pmu
);
486 atomic_t
*active_events
= &cci_pmu
->active_events
;
487 struct mutex
*reserve_mutex
= &cci_pmu
->reserve_mutex
;
489 if (atomic_dec_and_mutex_lock(active_events
, reserve_mutex
)) {
490 cci_pmu_put_hw(cci_pmu
);
491 mutex_unlock(reserve_mutex
);
495 static void cci_pmu_enable(struct pmu
*pmu
)
497 struct cci_pmu
*cci_pmu
= to_cci_pmu(pmu
);
498 struct cci_pmu_hw_events
*hw_events
= &cci_pmu
->hw_events
;
499 int enabled
= bitmap_weight(hw_events
->used_mask
, cci_pmu
->num_events
);
506 raw_spin_lock_irqsave(&hw_events
->pmu_lock
, flags
);
508 /* Enable all the PMU counters. */
509 val
= readl_relaxed(cci_ctrl_base
+ CCI_PMCR
) | CCI_PMCR_CEN
;
510 writel(val
, cci_ctrl_base
+ CCI_PMCR
);
511 raw_spin_unlock_irqrestore(&hw_events
->pmu_lock
, flags
);
515 static void cci_pmu_disable(struct pmu
*pmu
)
517 struct cci_pmu
*cci_pmu
= to_cci_pmu(pmu
);
518 struct cci_pmu_hw_events
*hw_events
= &cci_pmu
->hw_events
;
522 raw_spin_lock_irqsave(&hw_events
->pmu_lock
, flags
);
524 /* Disable all the PMU counters. */
525 val
= readl_relaxed(cci_ctrl_base
+ CCI_PMCR
) & ~CCI_PMCR_CEN
;
526 writel(val
, cci_ctrl_base
+ CCI_PMCR
);
527 raw_spin_unlock_irqrestore(&hw_events
->pmu_lock
, flags
);
530 static void cci_pmu_start(struct perf_event
*event
, int pmu_flags
)
532 struct cci_pmu
*cci_pmu
= to_cci_pmu(event
->pmu
);
533 struct cci_pmu_hw_events
*hw_events
= &cci_pmu
->hw_events
;
534 struct hw_perf_event
*hwc
= &event
->hw
;
539 * To handle interrupt latency, we always reprogram the period
540 * regardlesss of PERF_EF_RELOAD.
542 if (pmu_flags
& PERF_EF_RELOAD
)
543 WARN_ON_ONCE(!(hwc
->state
& PERF_HES_UPTODATE
));
547 if (unlikely(!pmu_is_valid_counter(cci_pmu
, idx
))) {
548 dev_err(&cci_pmu
->plat_device
->dev
, "Invalid CCI PMU counter %d\n", idx
);
552 raw_spin_lock_irqsave(&hw_events
->pmu_lock
, flags
);
554 /* Configure the event to count, unless you are counting cycles */
555 if (idx
!= CCI_PMU_CYCLE_CNTR_IDX
)
556 pmu_set_event(cci_pmu
, idx
, hwc
->config_base
);
558 pmu_event_set_period(event
);
559 pmu_enable_counter(cci_pmu
, idx
);
561 raw_spin_unlock_irqrestore(&hw_events
->pmu_lock
, flags
);
564 static void cci_pmu_stop(struct perf_event
*event
, int pmu_flags
)
566 struct cci_pmu
*cci_pmu
= to_cci_pmu(event
->pmu
);
567 struct hw_perf_event
*hwc
= &event
->hw
;
570 if (hwc
->state
& PERF_HES_STOPPED
)
573 if (unlikely(!pmu_is_valid_counter(cci_pmu
, idx
))) {
574 dev_err(&cci_pmu
->plat_device
->dev
, "Invalid CCI PMU counter %d\n", idx
);
579 * We always reprogram the counter, so ignore PERF_EF_UPDATE. See
582 pmu_disable_counter(cci_pmu
, idx
);
583 pmu_event_update(event
);
584 hwc
->state
|= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
587 static int cci_pmu_add(struct perf_event
*event
, int flags
)
589 struct cci_pmu
*cci_pmu
= to_cci_pmu(event
->pmu
);
590 struct cci_pmu_hw_events
*hw_events
= &cci_pmu
->hw_events
;
591 struct hw_perf_event
*hwc
= &event
->hw
;
595 perf_pmu_disable(event
->pmu
);
597 /* If we don't have a space for the counter then finish early. */
598 idx
= pmu_get_event_idx(hw_events
, event
);
605 hw_events
->events
[idx
] = event
;
607 hwc
->state
= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
608 if (flags
& PERF_EF_START
)
609 cci_pmu_start(event
, PERF_EF_RELOAD
);
611 /* Propagate our changes to the userspace mapping. */
612 perf_event_update_userpage(event
);
615 perf_pmu_enable(event
->pmu
);
619 static void cci_pmu_del(struct perf_event
*event
, int flags
)
621 struct cci_pmu
*cci_pmu
= to_cci_pmu(event
->pmu
);
622 struct cci_pmu_hw_events
*hw_events
= &cci_pmu
->hw_events
;
623 struct hw_perf_event
*hwc
= &event
->hw
;
626 cci_pmu_stop(event
, PERF_EF_UPDATE
);
627 hw_events
->events
[idx
] = NULL
;
628 clear_bit(idx
, hw_events
->used_mask
);
630 perf_event_update_userpage(event
);
634 validate_event(struct pmu
*cci_pmu
,
635 struct cci_pmu_hw_events
*hw_events
,
636 struct perf_event
*event
)
638 if (is_software_event(event
))
642 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
643 * core perf code won't check that the pmu->ctx == leader->ctx
644 * until after pmu->event_init(event).
646 if (event
->pmu
!= cci_pmu
)
649 if (event
->state
< PERF_EVENT_STATE_OFF
)
652 if (event
->state
== PERF_EVENT_STATE_OFF
&& !event
->attr
.enable_on_exec
)
655 return pmu_get_event_idx(hw_events
, event
) >= 0;
659 validate_group(struct perf_event
*event
)
661 struct perf_event
*sibling
, *leader
= event
->group_leader
;
662 struct cci_pmu_hw_events fake_pmu
= {
664 * Initialise the fake PMU. We only need to populate the
665 * used_mask for the purposes of validation.
670 if (!validate_event(event
->pmu
, &fake_pmu
, leader
))
673 list_for_each_entry(sibling
, &leader
->sibling_list
, group_entry
) {
674 if (!validate_event(event
->pmu
, &fake_pmu
, sibling
))
678 if (!validate_event(event
->pmu
, &fake_pmu
, event
))
685 __hw_perf_event_init(struct perf_event
*event
)
687 struct hw_perf_event
*hwc
= &event
->hw
;
690 mapping
= pmu_map_event(event
);
693 pr_debug("event %x:%llx not supported\n", event
->attr
.type
,
699 * We don't assign an index until we actually place the event onto
700 * hardware. Use -1 to signify that we haven't decided where to put it
704 hwc
->config_base
= 0;
709 * Store the event encoding into the config_base field.
711 hwc
->config_base
|= (unsigned long)mapping
;
714 * Limit the sample_period to half of the counter width. That way, the
715 * new counter value is far less likely to overtake the previous one
716 * unless you have some serious IRQ latency issues.
718 hwc
->sample_period
= CCI_PMU_CNTR_MASK
>> 1;
719 hwc
->last_period
= hwc
->sample_period
;
720 local64_set(&hwc
->period_left
, hwc
->sample_period
);
722 if (event
->group_leader
!= event
) {
723 if (validate_group(event
) != 0)
730 static int cci_pmu_event_init(struct perf_event
*event
)
732 struct cci_pmu
*cci_pmu
= to_cci_pmu(event
->pmu
);
733 atomic_t
*active_events
= &cci_pmu
->active_events
;
737 if (event
->attr
.type
!= event
->pmu
->type
)
740 /* Shared by all CPUs, no meaningful state to sample */
741 if (is_sampling_event(event
) || event
->attach_state
& PERF_ATTACH_TASK
)
744 /* We have no filtering of any kind */
745 if (event
->attr
.exclude_user
||
746 event
->attr
.exclude_kernel
||
747 event
->attr
.exclude_hv
||
748 event
->attr
.exclude_idle
||
749 event
->attr
.exclude_host
||
750 event
->attr
.exclude_guest
)
754 * Following the example set by other "uncore" PMUs, we accept any CPU
755 * and rewrite its affinity dynamically rather than having perf core
756 * handle cpu == -1 and pid == -1 for this case.
758 * The perf core will pin online CPUs for the duration of this call and
759 * the event being installed into its context, so the PMU's CPU can't
760 * change under our feet.
762 cpu
= cpumask_first(&cci_pmu
->cpus
);
763 if (event
->cpu
< 0 || cpu
< 0)
767 event
->destroy
= hw_perf_event_destroy
;
768 if (!atomic_inc_not_zero(active_events
)) {
769 mutex_lock(&cci_pmu
->reserve_mutex
);
770 if (atomic_read(active_events
) == 0)
771 err
= cci_pmu_get_hw(cci_pmu
);
773 atomic_inc(active_events
);
774 mutex_unlock(&cci_pmu
->reserve_mutex
);
779 err
= __hw_perf_event_init(event
);
781 hw_perf_event_destroy(event
);
786 static ssize_t
pmu_cpumask_attr_show(struct device
*dev
,
787 struct device_attribute
*attr
, char *buf
)
789 struct dev_ext_attribute
*eattr
= container_of(attr
,
790 struct dev_ext_attribute
, attr
);
791 struct cci_pmu
*cci_pmu
= eattr
->var
;
793 int n
= scnprintf(buf
, PAGE_SIZE
- 1, "%*pbl",
794 cpumask_pr_args(&cci_pmu
->cpus
));
800 static struct dev_ext_attribute pmu_cpumask_attr
= {
801 __ATTR(cpumask
, S_IRUGO
, pmu_cpumask_attr_show
, NULL
),
802 NULL
, /* Populated in cci_pmu_init */
805 static struct attribute
*pmu_attrs
[] = {
806 &pmu_cpumask_attr
.attr
.attr
,
810 static struct attribute_group pmu_attr_group
= {
814 static const struct attribute_group
*pmu_attr_groups
[] = {
819 static int cci_pmu_init(struct cci_pmu
*cci_pmu
, struct platform_device
*pdev
)
821 char *name
= cci_pmu
->model
->name
;
823 pmu_cpumask_attr
.var
= cci_pmu
;
824 cci_pmu
->pmu
= (struct pmu
) {
825 .name
= cci_pmu
->model
->name
,
826 .task_ctx_nr
= perf_invalid_context
,
827 .pmu_enable
= cci_pmu_enable
,
828 .pmu_disable
= cci_pmu_disable
,
829 .event_init
= cci_pmu_event_init
,
832 .start
= cci_pmu_start
,
833 .stop
= cci_pmu_stop
,
835 .attr_groups
= pmu_attr_groups
,
838 cci_pmu
->plat_device
= pdev
;
839 cci_pmu
->num_events
= pmu_get_max_counters();
841 return perf_pmu_register(&cci_pmu
->pmu
, name
, -1);
844 static int cci_pmu_cpu_notifier(struct notifier_block
*self
,
845 unsigned long action
, void *hcpu
)
847 struct cci_pmu
*cci_pmu
= container_of(self
,
848 struct cci_pmu
, cpu_nb
);
849 unsigned int cpu
= (long)hcpu
;
852 switch (action
& ~CPU_TASKS_FROZEN
) {
853 case CPU_DOWN_PREPARE
:
854 if (!cpumask_test_and_clear_cpu(cpu
, &cci_pmu
->cpus
))
856 target
= cpumask_any_but(cpu_online_mask
, cpu
);
857 if (target
< 0) // UP, last CPU
860 * TODO: migrate context once core races on event->ctx have
863 cpumask_set_cpu(target
, &cci_pmu
->cpus
);
871 static struct cci_pmu_model cci_pmu_models
[] = {
876 CCI_REV_R0_SLAVE_PORT_MIN_EV
,
877 CCI_REV_R0_SLAVE_PORT_MAX_EV
,
880 CCI_REV_R0_MASTER_PORT_MIN_EV
,
881 CCI_REV_R0_MASTER_PORT_MAX_EV
,
886 .name
= "CCI_400_r1",
889 CCI_REV_R1_SLAVE_PORT_MIN_EV
,
890 CCI_REV_R1_SLAVE_PORT_MAX_EV
,
893 CCI_REV_R1_MASTER_PORT_MIN_EV
,
894 CCI_REV_R1_MASTER_PORT_MAX_EV
,
900 static const struct of_device_id arm_cci_pmu_matches
[] = {
902 .compatible
= "arm,cci-400-pmu",
906 .compatible
= "arm,cci-400-pmu,r0",
907 .data
= &cci_pmu_models
[CCI_REV_R0
],
910 .compatible
= "arm,cci-400-pmu,r1",
911 .data
= &cci_pmu_models
[CCI_REV_R1
],
916 static inline const struct cci_pmu_model
*get_cci_model(struct platform_device
*pdev
)
918 const struct of_device_id
*match
= of_match_node(arm_cci_pmu_matches
,
925 dev_warn(&pdev
->dev
, "DEPRECATED compatible property,"
926 "requires secure access to CCI registers");
927 return probe_cci_model(pdev
);
930 static bool is_duplicate_irq(int irq
, int *irqs
, int nr_irqs
)
934 for (i
= 0; i
< nr_irqs
; i
++)
941 static int cci_pmu_probe(struct platform_device
*pdev
)
943 struct resource
*res
;
944 struct cci_pmu
*cci_pmu
;
946 const struct cci_pmu_model
*model
;
948 model
= get_cci_model(pdev
);
950 dev_warn(&pdev
->dev
, "CCI PMU version not supported\n");
954 cci_pmu
= devm_kzalloc(&pdev
->dev
, sizeof(*cci_pmu
), GFP_KERNEL
);
958 cci_pmu
->model
= model
;
959 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
960 cci_pmu
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
961 if (IS_ERR(cci_pmu
->base
))
965 * CCI PMU has 5 overflow signals - one per counter; but some may be tied
966 * together to a common interrupt.
968 cci_pmu
->nr_irqs
= 0;
969 for (i
= 0; i
< CCI_PMU_MAX_HW_EVENTS
; i
++) {
970 irq
= platform_get_irq(pdev
, i
);
974 if (is_duplicate_irq(irq
, cci_pmu
->irqs
, cci_pmu
->nr_irqs
))
977 cci_pmu
->irqs
[cci_pmu
->nr_irqs
++] = irq
;
981 * Ensure that the device tree has as many interrupts as the number
984 if (i
< CCI_PMU_MAX_HW_EVENTS
) {
985 dev_warn(&pdev
->dev
, "In-correct number of interrupts: %d, should be %d\n",
986 i
, CCI_PMU_MAX_HW_EVENTS
);
990 raw_spin_lock_init(&cci_pmu
->hw_events
.pmu_lock
);
991 mutex_init(&cci_pmu
->reserve_mutex
);
992 atomic_set(&cci_pmu
->active_events
, 0);
993 cpumask_set_cpu(smp_processor_id(), &cci_pmu
->cpus
);
995 cci_pmu
->cpu_nb
= (struct notifier_block
) {
996 .notifier_call
= cci_pmu_cpu_notifier
,
998 * to migrate uncore events, our notifier should be executed
999 * before perf core's notifier.
1001 .priority
= CPU_PRI_PERF
+ 1,
1004 ret
= register_cpu_notifier(&cci_pmu
->cpu_nb
);
1008 ret
= cci_pmu_init(cci_pmu
, pdev
);
1010 unregister_cpu_notifier(&cci_pmu
->cpu_nb
);
1014 pr_info("ARM %s PMU driver probed", cci_pmu
->model
->name
);
1018 static int cci_platform_probe(struct platform_device
*pdev
)
1023 return of_platform_populate(pdev
->dev
.of_node
, NULL
, NULL
, &pdev
->dev
);
1026 static struct platform_driver cci_pmu_driver
= {
1028 .name
= DRIVER_NAME_PMU
,
1029 .of_match_table
= arm_cci_pmu_matches
,
1031 .probe
= cci_pmu_probe
,
1034 static struct platform_driver cci_platform_driver
= {
1036 .name
= DRIVER_NAME
,
1037 .of_match_table
= arm_cci_matches
,
1039 .probe
= cci_platform_probe
,
1042 static int __init
cci_platform_init(void)
1046 ret
= platform_driver_register(&cci_pmu_driver
);
1050 return platform_driver_register(&cci_platform_driver
);
1053 #else /* !CONFIG_ARM_CCI400_PMU */
1055 static int __init
cci_platform_init(void)
1060 #endif /* CONFIG_ARM_CCI400_PMU */
1062 #ifdef CONFIG_ARM_CCI400_PORT_CTRL
1064 #define CCI_PORT_CTRL 0x0
1065 #define CCI_CTRL_STATUS 0xc
1067 #define CCI_ENABLE_SNOOP_REQ 0x1
1068 #define CCI_ENABLE_DVM_REQ 0x2
1069 #define CCI_ENABLE_REQ (CCI_ENABLE_SNOOP_REQ | CCI_ENABLE_DVM_REQ)
1071 enum cci_ace_port_type
{
1072 ACE_INVALID_PORT
= 0x0,
1077 struct cci_ace_port
{
1080 enum cci_ace_port_type type
;
1081 struct device_node
*dn
;
1084 static struct cci_ace_port
*ports
;
1085 static unsigned int nb_cci_ports
;
1093 * Use the port MSB as valid flag, shift can be made dynamic
1094 * by computing number of bits required for port indexes.
1095 * Code disabling CCI cpu ports runs with D-cache invalidated
1096 * and SCTLR bit clear so data accesses must be kept to a minimum
1097 * to improve performance; for now shift is left static to
1098 * avoid one more data access while disabling the CCI port.
1100 #define PORT_VALID_SHIFT 31
1101 #define PORT_VALID (0x1 << PORT_VALID_SHIFT)
1103 static inline void init_cpu_port(struct cpu_port
*port
, u32 index
, u64 mpidr
)
1105 port
->port
= PORT_VALID
| index
;
1106 port
->mpidr
= mpidr
;
1109 static inline bool cpu_port_is_valid(struct cpu_port
*port
)
1111 return !!(port
->port
& PORT_VALID
);
1114 static inline bool cpu_port_match(struct cpu_port
*port
, u64 mpidr
)
1116 return port
->mpidr
== (mpidr
& MPIDR_HWID_BITMASK
);
1119 static struct cpu_port cpu_port
[NR_CPUS
];
1122 * __cci_ace_get_port - Function to retrieve the port index connected to
1125 * @dn: device node of the device to look-up
1129 * - CCI port index if success
1130 * - -ENODEV if failure
1132 static int __cci_ace_get_port(struct device_node
*dn
, int type
)
1136 struct device_node
*cci_portn
;
1138 cci_portn
= of_parse_phandle(dn
, "cci-control-port", 0);
1139 for (i
= 0; i
< nb_cci_ports
; i
++) {
1140 ace_match
= ports
[i
].type
== type
;
1141 if (ace_match
&& cci_portn
== ports
[i
].dn
)
1147 int cci_ace_get_port(struct device_node
*dn
)
1149 return __cci_ace_get_port(dn
, ACE_LITE_PORT
);
1151 EXPORT_SYMBOL_GPL(cci_ace_get_port
);
1153 static void cci_ace_init_ports(void)
1156 struct device_node
*cpun
;
1159 * Port index look-up speeds up the function disabling ports by CPU,
1160 * since the logical to port index mapping is done once and does
1161 * not change after system boot.
1162 * The stashed index array is initialized for all possible CPUs
1165 for_each_possible_cpu(cpu
) {
1166 /* too early to use cpu->of_node */
1167 cpun
= of_get_cpu_node(cpu
, NULL
);
1169 if (WARN(!cpun
, "Missing cpu device node\n"))
1172 port
= __cci_ace_get_port(cpun
, ACE_PORT
);
1176 init_cpu_port(&cpu_port
[cpu
], port
, cpu_logical_map(cpu
));
1179 for_each_possible_cpu(cpu
) {
1180 WARN(!cpu_port_is_valid(&cpu_port
[cpu
]),
1181 "CPU %u does not have an associated CCI port\n",
1186 * Functions to enable/disable a CCI interconnect slave port
1188 * They are called by low-level power management code to disable slave
1189 * interfaces snoops and DVM broadcast.
1190 * Since they may execute with cache data allocation disabled and
1191 * after the caches have been cleaned and invalidated the functions provide
1192 * no explicit locking since they may run with D-cache disabled, so normal
1193 * cacheable kernel locks based on ldrex/strex may not work.
1194 * Locking has to be provided by BSP implementations to ensure proper
1199 * cci_port_control() - function to control a CCI port
1201 * @port: index of the port to setup
1202 * @enable: if true enables the port, if false disables it
1204 static void notrace
cci_port_control(unsigned int port
, bool enable
)
1206 void __iomem
*base
= ports
[port
].base
;
1208 writel_relaxed(enable
? CCI_ENABLE_REQ
: 0, base
+ CCI_PORT_CTRL
);
1210 * This function is called from power down procedures
1211 * and must not execute any instruction that might
1212 * cause the processor to be put in a quiescent state
1213 * (eg wfi). Hence, cpu_relax() can not be added to this
1214 * read loop to optimize power, since it might hide possibly
1215 * disruptive operations.
1217 while (readl_relaxed(cci_ctrl_base
+ CCI_CTRL_STATUS
) & 0x1)
1222 * cci_disable_port_by_cpu() - function to disable a CCI port by CPU
1225 * @mpidr: mpidr of the CPU whose CCI port should be disabled
1227 * Disabling a CCI port for a CPU implies disabling the CCI port
1228 * controlling that CPU cluster. Code disabling CPU CCI ports
1229 * must make sure that the CPU running the code is the last active CPU
1230 * in the cluster ie all other CPUs are quiescent in a low power state.
1234 * -ENODEV on port look-up failure
1236 int notrace
cci_disable_port_by_cpu(u64 mpidr
)
1240 for (cpu
= 0; cpu
< nr_cpu_ids
; cpu
++) {
1241 is_valid
= cpu_port_is_valid(&cpu_port
[cpu
]);
1242 if (is_valid
&& cpu_port_match(&cpu_port
[cpu
], mpidr
)) {
1243 cci_port_control(cpu_port
[cpu
].port
, false);
1249 EXPORT_SYMBOL_GPL(cci_disable_port_by_cpu
);
1252 * cci_enable_port_for_self() - enable a CCI port for calling CPU
1254 * Enabling a CCI port for the calling CPU implies enabling the CCI
1255 * port controlling that CPU's cluster. Caller must make sure that the
1256 * CPU running the code is the first active CPU in the cluster and all
1257 * other CPUs are quiescent in a low power state or waiting for this CPU
1258 * to complete the CCI initialization.
1260 * Because this is called when the MMU is still off and with no stack,
1261 * the code must be position independent and ideally rely on callee
1262 * clobbered registers only. To achieve this we must code this function
1263 * entirely in assembler.
1265 * On success this returns with the proper CCI port enabled. In case of
1266 * any failure this never returns as the inability to enable the CCI is
1267 * fatal and there is no possible recovery at this stage.
1269 asmlinkage
void __naked
cci_enable_port_for_self(void)
1273 " mrc p15, 0, r0, c0, c0, 5 @ get MPIDR value \n"
1274 " and r0, r0, #"__stringify(MPIDR_HWID_BITMASK
)" \n"
1277 " add r1, r1, r2 @ &cpu_port \n"
1278 " add ip, r1, %[sizeof_cpu_port] \n"
1280 /* Loop over the cpu_port array looking for a matching MPIDR */
1281 "1: ldr r2, [r1, %[offsetof_cpu_port_mpidr_lsb]] \n"
1282 " cmp r2, r0 @ compare MPIDR \n"
1285 /* Found a match, now test port validity */
1286 " ldr r3, [r1, %[offsetof_cpu_port_port]] \n"
1287 " tst r3, #"__stringify(PORT_VALID
)" \n"
1290 /* no match, loop with the next cpu_port entry */
1291 "2: add r1, r1, %[sizeof_struct_cpu_port] \n"
1292 " cmp r1, ip @ done? \n"
1295 /* CCI port not found -- cheaply try to stall this CPU */
1296 "cci_port_not_found: \n"
1299 " b cci_port_not_found \n"
1301 /* Use matched port index to look up the corresponding ports entry */
1302 "3: bic r3, r3, #"__stringify(PORT_VALID
)" \n"
1304 " ldmia r0, {r1, r2} \n"
1305 " sub r1, r1, r0 @ virt - phys \n"
1306 " ldr r0, [r0, r2] @ *(&ports) \n"
1307 " mov r2, %[sizeof_struct_ace_port] \n"
1308 " mla r0, r2, r3, r0 @ &ports[index] \n"
1309 " sub r0, r0, r1 @ virt_to_phys() \n"
1311 /* Enable the CCI port */
1312 " ldr r0, [r0, %[offsetof_port_phys]] \n"
1313 " mov r3, %[cci_enable_req]\n"
1314 " str r3, [r0, #"__stringify(CCI_PORT_CTRL
)"] \n"
1316 /* poll the status reg for completion */
1319 " ldr r0, [r0, r1] @ cci_ctrl_base \n"
1320 "4: ldr r1, [r0, #"__stringify(CCI_CTRL_STATUS
)"] \n"
1321 " tst r1, %[cci_control_status_bits] \n"
1328 "5: .word cpu_port - . \n"
1330 " .word ports - 6b \n"
1331 "7: .word cci_ctrl_phys - . \n"
1333 [sizeof_cpu_port
] "i" (sizeof(cpu_port
)),
1334 [cci_enable_req
] "i" cpu_to_le32(CCI_ENABLE_REQ
),
1335 [cci_control_status_bits
] "i" cpu_to_le32(1),
1337 [offsetof_cpu_port_mpidr_lsb
] "i" (offsetof(struct cpu_port
, mpidr
)),
1339 [offsetof_cpu_port_mpidr_lsb
] "i" (offsetof(struct cpu_port
, mpidr
)+4),
1341 [offsetof_cpu_port_port
] "i" (offsetof(struct cpu_port
, port
)),
1342 [sizeof_struct_cpu_port
] "i" (sizeof(struct cpu_port
)),
1343 [sizeof_struct_ace_port
] "i" (sizeof(struct cci_ace_port
)),
1344 [offsetof_port_phys
] "i" (offsetof(struct cci_ace_port
, phys
)) );
1350 * __cci_control_port_by_device() - function to control a CCI port by device
1353 * @dn: device node pointer of the device whose CCI port should be
1355 * @enable: if true enables the port, if false disables it
1359 * -ENODEV on port look-up failure
1361 int notrace
__cci_control_port_by_device(struct device_node
*dn
, bool enable
)
1368 port
= __cci_ace_get_port(dn
, ACE_LITE_PORT
);
1369 if (WARN_ONCE(port
< 0, "node %s ACE lite port look-up failure\n",
1372 cci_port_control(port
, enable
);
1375 EXPORT_SYMBOL_GPL(__cci_control_port_by_device
);
1378 * __cci_control_port_by_index() - function to control a CCI port by port index
1380 * @port: port index previously retrieved with cci_ace_get_port()
1381 * @enable: if true enables the port, if false disables it
1385 * -ENODEV on port index out of range
1386 * -EPERM if operation carried out on an ACE PORT
1388 int notrace
__cci_control_port_by_index(u32 port
, bool enable
)
1390 if (port
>= nb_cci_ports
|| ports
[port
].type
== ACE_INVALID_PORT
)
1393 * CCI control for ports connected to CPUS is extremely fragile
1394 * and must be made to go through a specific and controlled
1395 * interface (ie cci_disable_port_by_cpu(); control by general purpose
1396 * indexing is therefore disabled for ACE ports.
1398 if (ports
[port
].type
== ACE_PORT
)
1401 cci_port_control(port
, enable
);
1404 EXPORT_SYMBOL_GPL(__cci_control_port_by_index
);
1406 static const struct of_device_id arm_cci_ctrl_if_matches
[] = {
1407 {.compatible
= "arm,cci-400-ctrl-if", },
1411 static int cci_probe_ports(struct device_node
*np
)
1413 struct cci_nb_ports
const *cci_config
;
1414 int ret
, i
, nb_ace
= 0, nb_ace_lite
= 0;
1415 struct device_node
*cp
;
1416 struct resource res
;
1417 const char *match_str
;
1421 cci_config
= of_match_node(arm_cci_matches
, np
)->data
;
1425 nb_cci_ports
= cci_config
->nb_ace
+ cci_config
->nb_ace_lite
;
1427 ports
= kcalloc(nb_cci_ports
, sizeof(*ports
), GFP_KERNEL
);
1431 for_each_child_of_node(np
, cp
) {
1432 if (!of_match_node(arm_cci_ctrl_if_matches
, cp
))
1435 i
= nb_ace
+ nb_ace_lite
;
1437 if (i
>= nb_cci_ports
)
1440 if (of_property_read_string(cp
, "interface-type",
1442 WARN(1, "node %s missing interface-type property\n",
1446 is_ace
= strcmp(match_str
, "ace") == 0;
1447 if (!is_ace
&& strcmp(match_str
, "ace-lite")) {
1448 WARN(1, "node %s containing invalid interface-type property, skipping it\n",
1453 ret
= of_address_to_resource(cp
, 0, &res
);
1455 ports
[i
].base
= ioremap(res
.start
, resource_size(&res
));
1456 ports
[i
].phys
= res
.start
;
1458 if (ret
|| !ports
[i
].base
) {
1459 WARN(1, "unable to ioremap CCI port %d\n", i
);
1464 if (WARN_ON(nb_ace
>= cci_config
->nb_ace
))
1466 ports
[i
].type
= ACE_PORT
;
1469 if (WARN_ON(nb_ace_lite
>= cci_config
->nb_ace_lite
))
1471 ports
[i
].type
= ACE_LITE_PORT
;
1477 /* initialize a stashed array of ACE ports to speed-up look-up */
1478 cci_ace_init_ports();
1481 * Multi-cluster systems may need this data when non-coherent, during
1482 * cluster power-up/power-down. Make sure it reaches main memory.
1484 sync_cache_w(&cci_ctrl_base
);
1485 sync_cache_w(&cci_ctrl_phys
);
1486 sync_cache_w(&ports
);
1487 sync_cache_w(&cpu_port
);
1488 __sync_cache_range_w(ports
, sizeof(*ports
) * nb_cci_ports
);
1489 pr_info("ARM CCI driver probed\n");
1493 #else /* !CONFIG_ARM_CCI400_PORT_CTRL */
1494 static inline int cci_probe_ports(struct device_node
*np
)
1498 #endif /* CONFIG_ARM_CCI400_PORT_CTRL */
1500 static int cci_probe(void)
1503 struct device_node
*np
;
1504 struct resource res
;
1506 np
= of_find_matching_node(NULL
, arm_cci_matches
);
1507 if(!np
|| !of_device_is_available(np
))
1510 ret
= of_address_to_resource(np
, 0, &res
);
1512 cci_ctrl_base
= ioremap(res
.start
, resource_size(&res
));
1513 cci_ctrl_phys
= res
.start
;
1515 if (ret
|| !cci_ctrl_base
) {
1516 WARN(1, "unable to ioremap CCI ctrl\n");
1520 return cci_probe_ports(np
);
1523 static int cci_init_status
= -EAGAIN
;
1524 static DEFINE_MUTEX(cci_probing
);
1526 static int cci_init(void)
1528 if (cci_init_status
!= -EAGAIN
)
1529 return cci_init_status
;
1531 mutex_lock(&cci_probing
);
1532 if (cci_init_status
== -EAGAIN
)
1533 cci_init_status
= cci_probe();
1534 mutex_unlock(&cci_probing
);
1535 return cci_init_status
;
1539 * To sort out early init calls ordering a helper function is provided to
1540 * check if the CCI driver has beed initialized. Function check if the driver
1541 * has been initialized, if not it calls the init function that probes
1542 * the driver and updates the return value.
1544 bool cci_probed(void)
1546 return cci_init() == 0;
1548 EXPORT_SYMBOL_GPL(cci_probed
);
1550 early_initcall(cci_init
);
1551 core_initcall(cci_platform_init
);
1552 MODULE_LICENSE("GPL");
1553 MODULE_DESCRIPTION("ARM CCI support");