2 * CCI cache coherent interconnect driver
4 * Copyright (C) 2013 ARM Ltd.
5 * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12 * kind, whether express or implied; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
17 #include <linux/arm-cci.h>
19 #include <linux/interrupt.h>
20 #include <linux/module.h>
21 #include <linux/of_address.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_platform.h>
24 #include <linux/perf_event.h>
25 #include <linux/platform_device.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
29 #include <asm/cacheflush.h>
30 #include <asm/smp_plat.h>
32 #define DRIVER_NAME "CCI-400"
33 #define DRIVER_NAME_PMU DRIVER_NAME " PMU"
35 #define CCI_PORT_CTRL 0x0
36 #define CCI_CTRL_STATUS 0xc
38 #define CCI_ENABLE_SNOOP_REQ 0x1
39 #define CCI_ENABLE_DVM_REQ 0x2
40 #define CCI_ENABLE_REQ (CCI_ENABLE_SNOOP_REQ | CCI_ENABLE_DVM_REQ)
44 unsigned int nb_ace_lite
;
47 enum cci_ace_port_type
{
48 ACE_INVALID_PORT
= 0x0,
56 enum cci_ace_port_type type
;
57 struct device_node
*dn
;
60 static struct cci_ace_port
*ports
;
61 static unsigned int nb_cci_ports
;
63 static void __iomem
*cci_ctrl_base
;
64 static unsigned long cci_ctrl_phys
;
66 #ifdef CONFIG_HW_PERF_EVENTS
68 #define CCI_PMCR 0x0100
69 #define CCI_PID2 0x0fe8
71 #define CCI_PMCR_CEN 0x00000001
72 #define CCI_PMCR_NCNT_MASK 0x0000f800
73 #define CCI_PMCR_NCNT_SHIFT 11
75 #define CCI_PID2_REV_MASK 0xf0
76 #define CCI_PID2_REV_SHIFT 4
90 #define CCI_REV_R1_PX 5
92 #define CCI_PMU_EVT_SEL 0x000
93 #define CCI_PMU_CNTR 0x004
94 #define CCI_PMU_CNTR_CTRL 0x008
95 #define CCI_PMU_OVRFLW 0x00c
97 #define CCI_PMU_OVRFLW_FLAG 1
99 #define CCI_PMU_CNTR_BASE(idx) ((idx) * SZ_4K)
101 #define CCI_PMU_CNTR_MASK ((1ULL << 32) -1)
104 * Instead of an event id to monitor CCI cycles, a dedicated counter is
105 * provided. Use 0xff to represent CCI cycles and hope that no future revisions
106 * make use of this event in hardware.
108 enum cci400_perf_events
{
109 CCI_PMU_CYCLES
= 0xff
112 #define CCI_PMU_EVENT_MASK 0xff
113 #define CCI_PMU_EVENT_SOURCE(event) ((event >> 5) & 0x7)
114 #define CCI_PMU_EVENT_CODE(event) (event & 0x1f)
116 #define CCI_PMU_MAX_HW_EVENTS 5 /* CCI PMU has 4 counters + 1 cycle counter */
118 #define CCI_PMU_CYCLE_CNTR_IDX 0
119 #define CCI_PMU_CNTR0_IDX 1
120 #define CCI_PMU_CNTR_LAST(cci_pmu) (CCI_PMU_CYCLE_CNTR_IDX + cci_pmu->num_events - 1)
123 * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8
124 * ports and bits 4:0 are event codes. There are different event codes
125 * associated with each port type.
127 * Additionally, the range of events associated with the port types changed
128 * between Rev0 and Rev1.
130 * The constants below define the range of valid codes for each port type for
131 * the different revisions and are used to validate the event to be monitored.
134 #define CCI_REV_R0_SLAVE_PORT_MIN_EV 0x00
135 #define CCI_REV_R0_SLAVE_PORT_MAX_EV 0x13
136 #define CCI_REV_R0_MASTER_PORT_MIN_EV 0x14
137 #define CCI_REV_R0_MASTER_PORT_MAX_EV 0x1a
139 #define CCI_REV_R1_SLAVE_PORT_MIN_EV 0x00
140 #define CCI_REV_R1_SLAVE_PORT_MAX_EV 0x14
141 #define CCI_REV_R1_MASTER_PORT_MIN_EV 0x00
142 #define CCI_REV_R1_MASTER_PORT_MAX_EV 0x11
144 struct pmu_port_event_ranges
{
151 static struct pmu_port_event_ranges port_event_range
[] = {
153 .slave_min
= CCI_REV_R0_SLAVE_PORT_MIN_EV
,
154 .slave_max
= CCI_REV_R0_SLAVE_PORT_MAX_EV
,
155 .master_min
= CCI_REV_R0_MASTER_PORT_MIN_EV
,
156 .master_max
= CCI_REV_R0_MASTER_PORT_MAX_EV
,
159 .slave_min
= CCI_REV_R1_SLAVE_PORT_MIN_EV
,
160 .slave_max
= CCI_REV_R1_SLAVE_PORT_MAX_EV
,
161 .master_min
= CCI_REV_R1_MASTER_PORT_MIN_EV
,
162 .master_max
= CCI_REV_R1_MASTER_PORT_MAX_EV
,
167 * Export different PMU names for the different revisions so userspace knows
168 * because the event ids are different
170 static char *const pmu_names
[] = {
171 [CCI_REV_R0
] = "CCI_400",
172 [CCI_REV_R1
] = "CCI_400_r1",
175 struct cci_pmu_hw_events
{
176 struct perf_event
*events
[CCI_PMU_MAX_HW_EVENTS
];
177 unsigned long used_mask
[BITS_TO_LONGS(CCI_PMU_MAX_HW_EVENTS
)];
178 raw_spinlock_t pmu_lock
;
185 int irqs
[CCI_PMU_MAX_HW_EVENTS
];
186 unsigned long active_irqs
;
187 struct pmu_port_event_ranges
*port_ranges
;
188 struct cci_pmu_hw_events hw_events
;
189 struct platform_device
*plat_device
;
191 atomic_t active_events
;
192 struct mutex reserve_mutex
;
195 static struct cci_pmu
*pmu
;
197 #define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu))
199 static bool is_duplicate_irq(int irq
, int *irqs
, int nr_irqs
)
203 for (i
= 0; i
< nr_irqs
; i
++)
210 static int probe_cci_revision(void)
213 rev
= readl_relaxed(cci_ctrl_base
+ CCI_PID2
) & CCI_PID2_REV_MASK
;
214 rev
>>= CCI_PID2_REV_SHIFT
;
216 if (rev
< CCI_REV_R1_PX
)
222 static struct pmu_port_event_ranges
*port_range_by_rev(void)
224 int rev
= probe_cci_revision();
226 return &port_event_range
[rev
];
229 static int pmu_is_valid_slave_event(u8 ev_code
)
231 return pmu
->port_ranges
->slave_min
<= ev_code
&&
232 ev_code
<= pmu
->port_ranges
->slave_max
;
235 static int pmu_is_valid_master_event(u8 ev_code
)
237 return pmu
->port_ranges
->master_min
<= ev_code
&&
238 ev_code
<= pmu
->port_ranges
->master_max
;
241 static int pmu_validate_hw_event(u8 hw_event
)
243 u8 ev_source
= CCI_PMU_EVENT_SOURCE(hw_event
);
244 u8 ev_code
= CCI_PMU_EVENT_CODE(hw_event
);
252 /* Slave Interface */
253 if (pmu_is_valid_slave_event(ev_code
))
259 /* Master Interface */
260 if (pmu_is_valid_master_event(ev_code
))
268 static int pmu_is_valid_counter(struct cci_pmu
*cci_pmu
, int idx
)
270 return CCI_PMU_CYCLE_CNTR_IDX
<= idx
&&
271 idx
<= CCI_PMU_CNTR_LAST(cci_pmu
);
274 static u32
pmu_read_register(int idx
, unsigned int offset
)
276 return readl_relaxed(pmu
->base
+ CCI_PMU_CNTR_BASE(idx
) + offset
);
279 static void pmu_write_register(u32 value
, int idx
, unsigned int offset
)
281 return writel_relaxed(value
, pmu
->base
+ CCI_PMU_CNTR_BASE(idx
) + offset
);
284 static void pmu_disable_counter(int idx
)
286 pmu_write_register(0, idx
, CCI_PMU_CNTR_CTRL
);
289 static void pmu_enable_counter(int idx
)
291 pmu_write_register(1, idx
, CCI_PMU_CNTR_CTRL
);
294 static void pmu_set_event(int idx
, unsigned long event
)
296 event
&= CCI_PMU_EVENT_MASK
;
297 pmu_write_register(event
, idx
, CCI_PMU_EVT_SEL
);
300 static u32
pmu_get_max_counters(void)
302 u32 n_cnts
= (readl_relaxed(cci_ctrl_base
+ CCI_PMCR
) &
303 CCI_PMCR_NCNT_MASK
) >> CCI_PMCR_NCNT_SHIFT
;
305 /* add 1 for cycle counter */
309 static int pmu_get_event_idx(struct cci_pmu_hw_events
*hw
, struct perf_event
*event
)
311 struct cci_pmu
*cci_pmu
= to_cci_pmu(event
->pmu
);
312 struct hw_perf_event
*hw_event
= &event
->hw
;
313 unsigned long cci_event
= hw_event
->config_base
& CCI_PMU_EVENT_MASK
;
316 if (cci_event
== CCI_PMU_CYCLES
) {
317 if (test_and_set_bit(CCI_PMU_CYCLE_CNTR_IDX
, hw
->used_mask
))
320 return CCI_PMU_CYCLE_CNTR_IDX
;
323 for (idx
= CCI_PMU_CNTR0_IDX
; idx
<= CCI_PMU_CNTR_LAST(cci_pmu
); ++idx
)
324 if (!test_and_set_bit(idx
, hw
->used_mask
))
327 /* No counters available */
331 static int pmu_map_event(struct perf_event
*event
)
334 u8 config
= event
->attr
.config
& CCI_PMU_EVENT_MASK
;
336 if (event
->attr
.type
< PERF_TYPE_MAX
)
339 if (config
== CCI_PMU_CYCLES
)
342 mapping
= pmu_validate_hw_event(config
);
347 static int pmu_request_irq(struct cci_pmu
*cci_pmu
, irq_handler_t handler
)
350 struct platform_device
*pmu_device
= cci_pmu
->plat_device
;
352 if (unlikely(!pmu_device
))
355 if (pmu
->nr_irqs
< 1) {
356 dev_err(&pmu_device
->dev
, "no irqs for CCI PMUs defined\n");
361 * Register all available CCI PMU interrupts. In the interrupt handler
362 * we iterate over the counters checking for interrupt source (the
363 * overflowing counter) and clear it.
365 * This should allow handling of non-unique interrupt for the counters.
367 for (i
= 0; i
< pmu
->nr_irqs
; i
++) {
368 int err
= request_irq(pmu
->irqs
[i
], handler
, IRQF_SHARED
,
369 "arm-cci-pmu", cci_pmu
);
371 dev_err(&pmu_device
->dev
, "unable to request IRQ%d for ARM CCI PMU counters\n",
376 set_bit(i
, &pmu
->active_irqs
);
382 static void pmu_free_irq(struct cci_pmu
*cci_pmu
)
386 for (i
= 0; i
< pmu
->nr_irqs
; i
++) {
387 if (!test_and_clear_bit(i
, &pmu
->active_irqs
))
390 free_irq(pmu
->irqs
[i
], cci_pmu
);
394 static u32
pmu_read_counter(struct perf_event
*event
)
396 struct cci_pmu
*cci_pmu
= to_cci_pmu(event
->pmu
);
397 struct hw_perf_event
*hw_counter
= &event
->hw
;
398 int idx
= hw_counter
->idx
;
401 if (unlikely(!pmu_is_valid_counter(cci_pmu
, idx
))) {
402 dev_err(&cci_pmu
->plat_device
->dev
, "Invalid CCI PMU counter %d\n", idx
);
405 value
= pmu_read_register(idx
, CCI_PMU_CNTR
);
410 static void pmu_write_counter(struct perf_event
*event
, u32 value
)
412 struct cci_pmu
*cci_pmu
= to_cci_pmu(event
->pmu
);
413 struct hw_perf_event
*hw_counter
= &event
->hw
;
414 int idx
= hw_counter
->idx
;
416 if (unlikely(!pmu_is_valid_counter(cci_pmu
, idx
)))
417 dev_err(&cci_pmu
->plat_device
->dev
, "Invalid CCI PMU counter %d\n", idx
);
419 pmu_write_register(value
, idx
, CCI_PMU_CNTR
);
422 static u64
pmu_event_update(struct perf_event
*event
)
424 struct hw_perf_event
*hwc
= &event
->hw
;
425 u64 delta
, prev_raw_count
, new_raw_count
;
428 prev_raw_count
= local64_read(&hwc
->prev_count
);
429 new_raw_count
= pmu_read_counter(event
);
430 } while (local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
431 new_raw_count
) != prev_raw_count
);
433 delta
= (new_raw_count
- prev_raw_count
) & CCI_PMU_CNTR_MASK
;
435 local64_add(delta
, &event
->count
);
437 return new_raw_count
;
440 static void pmu_read(struct perf_event
*event
)
442 pmu_event_update(event
);
445 void pmu_event_set_period(struct perf_event
*event
)
447 struct hw_perf_event
*hwc
= &event
->hw
;
449 * The CCI PMU counters have a period of 2^32. To account for the
450 * possiblity of extreme interrupt latency we program for a period of
451 * half that. Hopefully we can handle the interrupt before another 2^31
452 * events occur and the counter overtakes its previous value.
454 u64 val
= 1ULL << 31;
455 local64_set(&hwc
->prev_count
, val
);
456 pmu_write_counter(event
, val
);
459 static irqreturn_t
pmu_handle_irq(int irq_num
, void *dev
)
462 struct cci_pmu
*cci_pmu
= dev
;
463 struct cci_pmu_hw_events
*events
= &pmu
->hw_events
;
464 int idx
, handled
= IRQ_NONE
;
466 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
468 * Iterate over counters and update the corresponding perf events.
469 * This should work regardless of whether we have per-counter overflow
470 * interrupt or a combined overflow interrupt.
472 for (idx
= CCI_PMU_CYCLE_CNTR_IDX
; idx
<= CCI_PMU_CNTR_LAST(cci_pmu
); idx
++) {
473 struct perf_event
*event
= events
->events
[idx
];
474 struct hw_perf_event
*hw_counter
;
479 hw_counter
= &event
->hw
;
481 /* Did this counter overflow? */
482 if (!(pmu_read_register(idx
, CCI_PMU_OVRFLW
) &
483 CCI_PMU_OVRFLW_FLAG
))
486 pmu_write_register(CCI_PMU_OVRFLW_FLAG
, idx
, CCI_PMU_OVRFLW
);
488 pmu_event_update(event
);
489 pmu_event_set_period(event
);
490 handled
= IRQ_HANDLED
;
492 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
494 return IRQ_RETVAL(handled
);
497 static int cci_pmu_get_hw(struct cci_pmu
*cci_pmu
)
499 int ret
= pmu_request_irq(cci_pmu
, pmu_handle_irq
);
501 pmu_free_irq(cci_pmu
);
507 static void cci_pmu_put_hw(struct cci_pmu
*cci_pmu
)
509 pmu_free_irq(cci_pmu
);
512 static void hw_perf_event_destroy(struct perf_event
*event
)
514 struct cci_pmu
*cci_pmu
= to_cci_pmu(event
->pmu
);
515 atomic_t
*active_events
= &cci_pmu
->active_events
;
516 struct mutex
*reserve_mutex
= &cci_pmu
->reserve_mutex
;
518 if (atomic_dec_and_mutex_lock(active_events
, reserve_mutex
)) {
519 cci_pmu_put_hw(cci_pmu
);
520 mutex_unlock(reserve_mutex
);
524 static void cci_pmu_enable(struct pmu
*pmu
)
526 struct cci_pmu
*cci_pmu
= to_cci_pmu(pmu
);
527 struct cci_pmu_hw_events
*hw_events
= &cci_pmu
->hw_events
;
528 int enabled
= bitmap_weight(hw_events
->used_mask
, cci_pmu
->num_events
);
535 raw_spin_lock_irqsave(&hw_events
->pmu_lock
, flags
);
537 /* Enable all the PMU counters. */
538 val
= readl_relaxed(cci_ctrl_base
+ CCI_PMCR
) | CCI_PMCR_CEN
;
539 writel(val
, cci_ctrl_base
+ CCI_PMCR
);
540 raw_spin_unlock_irqrestore(&hw_events
->pmu_lock
, flags
);
544 static void cci_pmu_disable(struct pmu
*pmu
)
546 struct cci_pmu
*cci_pmu
= to_cci_pmu(pmu
);
547 struct cci_pmu_hw_events
*hw_events
= &cci_pmu
->hw_events
;
551 raw_spin_lock_irqsave(&hw_events
->pmu_lock
, flags
);
553 /* Disable all the PMU counters. */
554 val
= readl_relaxed(cci_ctrl_base
+ CCI_PMCR
) & ~CCI_PMCR_CEN
;
555 writel(val
, cci_ctrl_base
+ CCI_PMCR
);
556 raw_spin_unlock_irqrestore(&hw_events
->pmu_lock
, flags
);
559 static void cci_pmu_start(struct perf_event
*event
, int pmu_flags
)
561 struct cci_pmu
*cci_pmu
= to_cci_pmu(event
->pmu
);
562 struct cci_pmu_hw_events
*hw_events
= &cci_pmu
->hw_events
;
563 struct hw_perf_event
*hwc
= &event
->hw
;
568 * To handle interrupt latency, we always reprogram the period
569 * regardlesss of PERF_EF_RELOAD.
571 if (pmu_flags
& PERF_EF_RELOAD
)
572 WARN_ON_ONCE(!(hwc
->state
& PERF_HES_UPTODATE
));
576 if (unlikely(!pmu_is_valid_counter(cci_pmu
, idx
))) {
577 dev_err(&cci_pmu
->plat_device
->dev
, "Invalid CCI PMU counter %d\n", idx
);
581 raw_spin_lock_irqsave(&hw_events
->pmu_lock
, flags
);
583 /* Configure the event to count, unless you are counting cycles */
584 if (idx
!= CCI_PMU_CYCLE_CNTR_IDX
)
585 pmu_set_event(idx
, hwc
->config_base
);
587 pmu_event_set_period(event
);
588 pmu_enable_counter(idx
);
590 raw_spin_unlock_irqrestore(&hw_events
->pmu_lock
, flags
);
593 static void cci_pmu_stop(struct perf_event
*event
, int pmu_flags
)
595 struct cci_pmu
*cci_pmu
= to_cci_pmu(event
->pmu
);
596 struct hw_perf_event
*hwc
= &event
->hw
;
599 if (hwc
->state
& PERF_HES_STOPPED
)
602 if (unlikely(!pmu_is_valid_counter(cci_pmu
, idx
))) {
603 dev_err(&cci_pmu
->plat_device
->dev
, "Invalid CCI PMU counter %d\n", idx
);
608 * We always reprogram the counter, so ignore PERF_EF_UPDATE. See
611 pmu_disable_counter(idx
);
612 pmu_event_update(event
);
613 hwc
->state
|= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
616 static int cci_pmu_add(struct perf_event
*event
, int flags
)
618 struct cci_pmu
*cci_pmu
= to_cci_pmu(event
->pmu
);
619 struct cci_pmu_hw_events
*hw_events
= &cci_pmu
->hw_events
;
620 struct hw_perf_event
*hwc
= &event
->hw
;
624 perf_pmu_disable(event
->pmu
);
626 /* If we don't have a space for the counter then finish early. */
627 idx
= pmu_get_event_idx(hw_events
, event
);
634 hw_events
->events
[idx
] = event
;
636 hwc
->state
= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
637 if (flags
& PERF_EF_START
)
638 cci_pmu_start(event
, PERF_EF_RELOAD
);
640 /* Propagate our changes to the userspace mapping. */
641 perf_event_update_userpage(event
);
644 perf_pmu_enable(event
->pmu
);
648 static void cci_pmu_del(struct perf_event
*event
, int flags
)
650 struct cci_pmu
*cci_pmu
= to_cci_pmu(event
->pmu
);
651 struct cci_pmu_hw_events
*hw_events
= &cci_pmu
->hw_events
;
652 struct hw_perf_event
*hwc
= &event
->hw
;
655 cci_pmu_stop(event
, PERF_EF_UPDATE
);
656 hw_events
->events
[idx
] = NULL
;
657 clear_bit(idx
, hw_events
->used_mask
);
659 perf_event_update_userpage(event
);
663 validate_event(struct cci_pmu_hw_events
*hw_events
,
664 struct perf_event
*event
)
666 if (is_software_event(event
))
669 if (event
->state
< PERF_EVENT_STATE_OFF
)
672 if (event
->state
== PERF_EVENT_STATE_OFF
&& !event
->attr
.enable_on_exec
)
675 return pmu_get_event_idx(hw_events
, event
) >= 0;
679 validate_group(struct perf_event
*event
)
681 struct perf_event
*sibling
, *leader
= event
->group_leader
;
682 struct cci_pmu_hw_events fake_pmu
= {
684 * Initialise the fake PMU. We only need to populate the
685 * used_mask for the purposes of validation.
687 .used_mask
= CPU_BITS_NONE
,
690 if (!validate_event(&fake_pmu
, leader
))
693 list_for_each_entry(sibling
, &leader
->sibling_list
, group_entry
) {
694 if (!validate_event(&fake_pmu
, sibling
))
698 if (!validate_event(&fake_pmu
, event
))
705 __hw_perf_event_init(struct perf_event
*event
)
707 struct hw_perf_event
*hwc
= &event
->hw
;
710 mapping
= pmu_map_event(event
);
713 pr_debug("event %x:%llx not supported\n", event
->attr
.type
,
719 * We don't assign an index until we actually place the event onto
720 * hardware. Use -1 to signify that we haven't decided where to put it
724 hwc
->config_base
= 0;
729 * Store the event encoding into the config_base field.
731 hwc
->config_base
|= (unsigned long)mapping
;
734 * Limit the sample_period to half of the counter width. That way, the
735 * new counter value is far less likely to overtake the previous one
736 * unless you have some serious IRQ latency issues.
738 hwc
->sample_period
= CCI_PMU_CNTR_MASK
>> 1;
739 hwc
->last_period
= hwc
->sample_period
;
740 local64_set(&hwc
->period_left
, hwc
->sample_period
);
742 if (event
->group_leader
!= event
) {
743 if (validate_group(event
) != 0)
750 static int cci_pmu_event_init(struct perf_event
*event
)
752 struct cci_pmu
*cci_pmu
= to_cci_pmu(event
->pmu
);
753 atomic_t
*active_events
= &cci_pmu
->active_events
;
757 if (event
->attr
.type
!= event
->pmu
->type
)
760 /* Shared by all CPUs, no meaningful state to sample */
761 if (is_sampling_event(event
) || event
->attach_state
& PERF_ATTACH_TASK
)
764 /* We have no filtering of any kind */
765 if (event
->attr
.exclude_user
||
766 event
->attr
.exclude_kernel
||
767 event
->attr
.exclude_hv
||
768 event
->attr
.exclude_idle
||
769 event
->attr
.exclude_host
||
770 event
->attr
.exclude_guest
)
774 * Following the example set by other "uncore" PMUs, we accept any CPU
775 * and rewrite its affinity dynamically rather than having perf core
776 * handle cpu == -1 and pid == -1 for this case.
778 * The perf core will pin online CPUs for the duration of this call and
779 * the event being installed into its context, so the PMU's CPU can't
780 * change under our feet.
782 cpu
= cpumask_first(&cci_pmu
->cpus
);
783 if (event
->cpu
< 0 || cpu
< 0)
787 event
->destroy
= hw_perf_event_destroy
;
788 if (!atomic_inc_not_zero(active_events
)) {
789 mutex_lock(&cci_pmu
->reserve_mutex
);
790 if (atomic_read(active_events
) == 0)
791 err
= cci_pmu_get_hw(cci_pmu
);
793 atomic_inc(active_events
);
794 mutex_unlock(&cci_pmu
->reserve_mutex
);
799 err
= __hw_perf_event_init(event
);
801 hw_perf_event_destroy(event
);
806 static ssize_t
pmu_attr_cpumask_show(struct device
*dev
,
807 struct device_attribute
*attr
, char *buf
)
809 int n
= cpulist_scnprintf(buf
, PAGE_SIZE
- 2, &pmu
->cpus
);
816 static DEVICE_ATTR(cpumask
, S_IRUGO
, pmu_attr_cpumask_show
, NULL
);
818 static struct attribute
*pmu_attrs
[] = {
819 &dev_attr_cpumask
.attr
,
823 static struct attribute_group pmu_attr_group
= {
827 static const struct attribute_group
*pmu_attr_groups
[] = {
832 static int cci_pmu_init(struct cci_pmu
*cci_pmu
, struct platform_device
*pdev
)
834 char *name
= pmu_names
[probe_cci_revision()];
835 cci_pmu
->pmu
= (struct pmu
) {
836 .name
= pmu_names
[probe_cci_revision()],
837 .task_ctx_nr
= perf_invalid_context
,
838 .pmu_enable
= cci_pmu_enable
,
839 .pmu_disable
= cci_pmu_disable
,
840 .event_init
= cci_pmu_event_init
,
843 .start
= cci_pmu_start
,
844 .stop
= cci_pmu_stop
,
846 .attr_groups
= pmu_attr_groups
,
849 cci_pmu
->plat_device
= pdev
;
850 cci_pmu
->num_events
= pmu_get_max_counters();
852 return perf_pmu_register(&cci_pmu
->pmu
, name
, -1);
855 static int cci_pmu_cpu_notifier(struct notifier_block
*self
,
856 unsigned long action
, void *hcpu
)
858 unsigned int cpu
= (long)hcpu
;
861 switch (action
& ~CPU_TASKS_FROZEN
) {
862 case CPU_DOWN_PREPARE
:
863 if (!cpumask_test_and_clear_cpu(cpu
, &pmu
->cpus
))
865 target
= cpumask_any_but(cpu_online_mask
, cpu
);
866 if (target
< 0) // UP, last CPU
869 * TODO: migrate context once core races on event->ctx have
872 cpumask_set_cpu(target
, &pmu
->cpus
);
880 static struct notifier_block cci_pmu_cpu_nb
= {
881 .notifier_call
= cci_pmu_cpu_notifier
,
883 * to migrate uncore events, our notifier should be executed
884 * before perf core's notifier.
886 .priority
= CPU_PRI_PERF
+ 1,
889 static const struct of_device_id arm_cci_pmu_matches
[] = {
891 .compatible
= "arm,cci-400-pmu",
896 static int cci_pmu_probe(struct platform_device
*pdev
)
898 struct resource
*res
;
901 pmu
= devm_kzalloc(&pdev
->dev
, sizeof(*pmu
), GFP_KERNEL
);
905 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
906 pmu
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
907 if (IS_ERR(pmu
->base
))
911 * CCI PMU has 5 overflow signals - one per counter; but some may be tied
912 * together to a common interrupt.
915 for (i
= 0; i
< CCI_PMU_MAX_HW_EVENTS
; i
++) {
916 irq
= platform_get_irq(pdev
, i
);
920 if (is_duplicate_irq(irq
, pmu
->irqs
, pmu
->nr_irqs
))
923 pmu
->irqs
[pmu
->nr_irqs
++] = irq
;
927 * Ensure that the device tree has as many interrupts as the number
930 if (i
< CCI_PMU_MAX_HW_EVENTS
) {
931 dev_warn(&pdev
->dev
, "In-correct number of interrupts: %d, should be %d\n",
932 i
, CCI_PMU_MAX_HW_EVENTS
);
936 pmu
->port_ranges
= port_range_by_rev();
937 if (!pmu
->port_ranges
) {
938 dev_warn(&pdev
->dev
, "CCI PMU version not supported\n");
942 raw_spin_lock_init(&pmu
->hw_events
.pmu_lock
);
943 mutex_init(&pmu
->reserve_mutex
);
944 atomic_set(&pmu
->active_events
, 0);
945 cpumask_set_cpu(smp_processor_id(), &pmu
->cpus
);
947 ret
= register_cpu_notifier(&cci_pmu_cpu_nb
);
951 ret
= cci_pmu_init(pmu
, pdev
);
958 static int cci_platform_probe(struct platform_device
*pdev
)
963 return of_platform_populate(pdev
->dev
.of_node
, NULL
, NULL
, &pdev
->dev
);
966 #endif /* CONFIG_HW_PERF_EVENTS */
974 * Use the port MSB as valid flag, shift can be made dynamic
975 * by computing number of bits required for port indexes.
976 * Code disabling CCI cpu ports runs with D-cache invalidated
977 * and SCTLR bit clear so data accesses must be kept to a minimum
978 * to improve performance; for now shift is left static to
979 * avoid one more data access while disabling the CCI port.
981 #define PORT_VALID_SHIFT 31
982 #define PORT_VALID (0x1 << PORT_VALID_SHIFT)
984 static inline void init_cpu_port(struct cpu_port
*port
, u32 index
, u64 mpidr
)
986 port
->port
= PORT_VALID
| index
;
990 static inline bool cpu_port_is_valid(struct cpu_port
*port
)
992 return !!(port
->port
& PORT_VALID
);
995 static inline bool cpu_port_match(struct cpu_port
*port
, u64 mpidr
)
997 return port
->mpidr
== (mpidr
& MPIDR_HWID_BITMASK
);
1000 static struct cpu_port cpu_port
[NR_CPUS
];
1003 * __cci_ace_get_port - Function to retrieve the port index connected to
1006 * @dn: device node of the device to look-up
1010 * - CCI port index if success
1011 * - -ENODEV if failure
1013 static int __cci_ace_get_port(struct device_node
*dn
, int type
)
1017 struct device_node
*cci_portn
;
1019 cci_portn
= of_parse_phandle(dn
, "cci-control-port", 0);
1020 for (i
= 0; i
< nb_cci_ports
; i
++) {
1021 ace_match
= ports
[i
].type
== type
;
1022 if (ace_match
&& cci_portn
== ports
[i
].dn
)
1028 int cci_ace_get_port(struct device_node
*dn
)
1030 return __cci_ace_get_port(dn
, ACE_LITE_PORT
);
1032 EXPORT_SYMBOL_GPL(cci_ace_get_port
);
1034 static void cci_ace_init_ports(void)
1037 struct device_node
*cpun
;
1040 * Port index look-up speeds up the function disabling ports by CPU,
1041 * since the logical to port index mapping is done once and does
1042 * not change after system boot.
1043 * The stashed index array is initialized for all possible CPUs
1046 for_each_possible_cpu(cpu
) {
1047 /* too early to use cpu->of_node */
1048 cpun
= of_get_cpu_node(cpu
, NULL
);
1050 if (WARN(!cpun
, "Missing cpu device node\n"))
1053 port
= __cci_ace_get_port(cpun
, ACE_PORT
);
1057 init_cpu_port(&cpu_port
[cpu
], port
, cpu_logical_map(cpu
));
1060 for_each_possible_cpu(cpu
) {
1061 WARN(!cpu_port_is_valid(&cpu_port
[cpu
]),
1062 "CPU %u does not have an associated CCI port\n",
1067 * Functions to enable/disable a CCI interconnect slave port
1069 * They are called by low-level power management code to disable slave
1070 * interfaces snoops and DVM broadcast.
1071 * Since they may execute with cache data allocation disabled and
1072 * after the caches have been cleaned and invalidated the functions provide
1073 * no explicit locking since they may run with D-cache disabled, so normal
1074 * cacheable kernel locks based on ldrex/strex may not work.
1075 * Locking has to be provided by BSP implementations to ensure proper
1080 * cci_port_control() - function to control a CCI port
1082 * @port: index of the port to setup
1083 * @enable: if true enables the port, if false disables it
1085 static void notrace
cci_port_control(unsigned int port
, bool enable
)
1087 void __iomem
*base
= ports
[port
].base
;
1089 writel_relaxed(enable
? CCI_ENABLE_REQ
: 0, base
+ CCI_PORT_CTRL
);
1091 * This function is called from power down procedures
1092 * and must not execute any instruction that might
1093 * cause the processor to be put in a quiescent state
1094 * (eg wfi). Hence, cpu_relax() can not be added to this
1095 * read loop to optimize power, since it might hide possibly
1096 * disruptive operations.
1098 while (readl_relaxed(cci_ctrl_base
+ CCI_CTRL_STATUS
) & 0x1)
1103 * cci_disable_port_by_cpu() - function to disable a CCI port by CPU
1106 * @mpidr: mpidr of the CPU whose CCI port should be disabled
1108 * Disabling a CCI port for a CPU implies disabling the CCI port
1109 * controlling that CPU cluster. Code disabling CPU CCI ports
1110 * must make sure that the CPU running the code is the last active CPU
1111 * in the cluster ie all other CPUs are quiescent in a low power state.
1115 * -ENODEV on port look-up failure
1117 int notrace
cci_disable_port_by_cpu(u64 mpidr
)
1121 for (cpu
= 0; cpu
< nr_cpu_ids
; cpu
++) {
1122 is_valid
= cpu_port_is_valid(&cpu_port
[cpu
]);
1123 if (is_valid
&& cpu_port_match(&cpu_port
[cpu
], mpidr
)) {
1124 cci_port_control(cpu_port
[cpu
].port
, false);
1130 EXPORT_SYMBOL_GPL(cci_disable_port_by_cpu
);
1133 * cci_enable_port_for_self() - enable a CCI port for calling CPU
1135 * Enabling a CCI port for the calling CPU implies enabling the CCI
1136 * port controlling that CPU's cluster. Caller must make sure that the
1137 * CPU running the code is the first active CPU in the cluster and all
1138 * other CPUs are quiescent in a low power state or waiting for this CPU
1139 * to complete the CCI initialization.
1141 * Because this is called when the MMU is still off and with no stack,
1142 * the code must be position independent and ideally rely on callee
1143 * clobbered registers only. To achieve this we must code this function
1144 * entirely in assembler.
1146 * On success this returns with the proper CCI port enabled. In case of
1147 * any failure this never returns as the inability to enable the CCI is
1148 * fatal and there is no possible recovery at this stage.
1150 asmlinkage
void __naked
cci_enable_port_for_self(void)
1154 " mrc p15, 0, r0, c0, c0, 5 @ get MPIDR value \n"
1155 " and r0, r0, #"__stringify(MPIDR_HWID_BITMASK
)" \n"
1158 " add r1, r1, r2 @ &cpu_port \n"
1159 " add ip, r1, %[sizeof_cpu_port] \n"
1161 /* Loop over the cpu_port array looking for a matching MPIDR */
1162 "1: ldr r2, [r1, %[offsetof_cpu_port_mpidr_lsb]] \n"
1163 " cmp r2, r0 @ compare MPIDR \n"
1166 /* Found a match, now test port validity */
1167 " ldr r3, [r1, %[offsetof_cpu_port_port]] \n"
1168 " tst r3, #"__stringify(PORT_VALID
)" \n"
1171 /* no match, loop with the next cpu_port entry */
1172 "2: add r1, r1, %[sizeof_struct_cpu_port] \n"
1173 " cmp r1, ip @ done? \n"
1176 /* CCI port not found -- cheaply try to stall this CPU */
1177 "cci_port_not_found: \n"
1180 " b cci_port_not_found \n"
1182 /* Use matched port index to look up the corresponding ports entry */
1183 "3: bic r3, r3, #"__stringify(PORT_VALID
)" \n"
1185 " ldmia r0, {r1, r2} \n"
1186 " sub r1, r1, r0 @ virt - phys \n"
1187 " ldr r0, [r0, r2] @ *(&ports) \n"
1188 " mov r2, %[sizeof_struct_ace_port] \n"
1189 " mla r0, r2, r3, r0 @ &ports[index] \n"
1190 " sub r0, r0, r1 @ virt_to_phys() \n"
1192 /* Enable the CCI port */
1193 " ldr r0, [r0, %[offsetof_port_phys]] \n"
1194 " mov r3, %[cci_enable_req]\n"
1195 " str r3, [r0, #"__stringify(CCI_PORT_CTRL
)"] \n"
1197 /* poll the status reg for completion */
1200 " ldr r0, [r0, r1] @ cci_ctrl_base \n"
1201 "4: ldr r1, [r0, #"__stringify(CCI_CTRL_STATUS
)"] \n"
1202 " tst r1, %[cci_control_status_bits] \n"
1209 "5: .word cpu_port - . \n"
1211 " .word ports - 6b \n"
1212 "7: .word cci_ctrl_phys - . \n"
1214 [sizeof_cpu_port
] "i" (sizeof(cpu_port
)),
1215 [cci_enable_req
] "i" cpu_to_le32(CCI_ENABLE_REQ
),
1216 [cci_control_status_bits
] "i" cpu_to_le32(1),
1218 [offsetof_cpu_port_mpidr_lsb
] "i" (offsetof(struct cpu_port
, mpidr
)),
1220 [offsetof_cpu_port_mpidr_lsb
] "i" (offsetof(struct cpu_port
, mpidr
)+4),
1222 [offsetof_cpu_port_port
] "i" (offsetof(struct cpu_port
, port
)),
1223 [sizeof_struct_cpu_port
] "i" (sizeof(struct cpu_port
)),
1224 [sizeof_struct_ace_port
] "i" (sizeof(struct cci_ace_port
)),
1225 [offsetof_port_phys
] "i" (offsetof(struct cci_ace_port
, phys
)) );
1231 * __cci_control_port_by_device() - function to control a CCI port by device
1234 * @dn: device node pointer of the device whose CCI port should be
1236 * @enable: if true enables the port, if false disables it
1240 * -ENODEV on port look-up failure
1242 int notrace
__cci_control_port_by_device(struct device_node
*dn
, bool enable
)
1249 port
= __cci_ace_get_port(dn
, ACE_LITE_PORT
);
1250 if (WARN_ONCE(port
< 0, "node %s ACE lite port look-up failure\n",
1253 cci_port_control(port
, enable
);
1256 EXPORT_SYMBOL_GPL(__cci_control_port_by_device
);
1259 * __cci_control_port_by_index() - function to control a CCI port by port index
1261 * @port: port index previously retrieved with cci_ace_get_port()
1262 * @enable: if true enables the port, if false disables it
1266 * -ENODEV on port index out of range
1267 * -EPERM if operation carried out on an ACE PORT
1269 int notrace
__cci_control_port_by_index(u32 port
, bool enable
)
1271 if (port
>= nb_cci_ports
|| ports
[port
].type
== ACE_INVALID_PORT
)
1274 * CCI control for ports connected to CPUS is extremely fragile
1275 * and must be made to go through a specific and controlled
1276 * interface (ie cci_disable_port_by_cpu(); control by general purpose
1277 * indexing is therefore disabled for ACE ports.
1279 if (ports
[port
].type
== ACE_PORT
)
1282 cci_port_control(port
, enable
);
1285 EXPORT_SYMBOL_GPL(__cci_control_port_by_index
);
1287 static const struct cci_nb_ports cci400_ports
= {
1292 static const struct of_device_id arm_cci_matches
[] = {
1293 {.compatible
= "arm,cci-400", .data
= &cci400_ports
},
1297 static const struct of_device_id arm_cci_ctrl_if_matches
[] = {
1298 {.compatible
= "arm,cci-400-ctrl-if", },
1302 static int cci_probe(void)
1304 struct cci_nb_ports
const *cci_config
;
1305 int ret
, i
, nb_ace
= 0, nb_ace_lite
= 0;
1306 struct device_node
*np
, *cp
;
1307 struct resource res
;
1308 const char *match_str
;
1311 np
= of_find_matching_node(NULL
, arm_cci_matches
);
1315 cci_config
= of_match_node(arm_cci_matches
, np
)->data
;
1319 nb_cci_ports
= cci_config
->nb_ace
+ cci_config
->nb_ace_lite
;
1321 ports
= kcalloc(nb_cci_ports
, sizeof(*ports
), GFP_KERNEL
);
1325 ret
= of_address_to_resource(np
, 0, &res
);
1327 cci_ctrl_base
= ioremap(res
.start
, resource_size(&res
));
1328 cci_ctrl_phys
= res
.start
;
1330 if (ret
|| !cci_ctrl_base
) {
1331 WARN(1, "unable to ioremap CCI ctrl\n");
1336 for_each_child_of_node(np
, cp
) {
1337 if (!of_match_node(arm_cci_ctrl_if_matches
, cp
))
1340 i
= nb_ace
+ nb_ace_lite
;
1342 if (i
>= nb_cci_ports
)
1345 if (of_property_read_string(cp
, "interface-type",
1347 WARN(1, "node %s missing interface-type property\n",
1351 is_ace
= strcmp(match_str
, "ace") == 0;
1352 if (!is_ace
&& strcmp(match_str
, "ace-lite")) {
1353 WARN(1, "node %s containing invalid interface-type property, skipping it\n",
1358 ret
= of_address_to_resource(cp
, 0, &res
);
1360 ports
[i
].base
= ioremap(res
.start
, resource_size(&res
));
1361 ports
[i
].phys
= res
.start
;
1363 if (ret
|| !ports
[i
].base
) {
1364 WARN(1, "unable to ioremap CCI port %d\n", i
);
1369 if (WARN_ON(nb_ace
>= cci_config
->nb_ace
))
1371 ports
[i
].type
= ACE_PORT
;
1374 if (WARN_ON(nb_ace_lite
>= cci_config
->nb_ace_lite
))
1376 ports
[i
].type
= ACE_LITE_PORT
;
1382 /* initialize a stashed array of ACE ports to speed-up look-up */
1383 cci_ace_init_ports();
1386 * Multi-cluster systems may need this data when non-coherent, during
1387 * cluster power-up/power-down. Make sure it reaches main memory.
1389 sync_cache_w(&cci_ctrl_base
);
1390 sync_cache_w(&cci_ctrl_phys
);
1391 sync_cache_w(&ports
);
1392 sync_cache_w(&cpu_port
);
1393 __sync_cache_range_w(ports
, sizeof(*ports
) * nb_cci_ports
);
1394 pr_info("ARM CCI driver probed\n");
1403 static int cci_init_status
= -EAGAIN
;
1404 static DEFINE_MUTEX(cci_probing
);
1406 static int cci_init(void)
1408 if (cci_init_status
!= -EAGAIN
)
1409 return cci_init_status
;
1411 mutex_lock(&cci_probing
);
1412 if (cci_init_status
== -EAGAIN
)
1413 cci_init_status
= cci_probe();
1414 mutex_unlock(&cci_probing
);
1415 return cci_init_status
;
1418 #ifdef CONFIG_HW_PERF_EVENTS
1419 static struct platform_driver cci_pmu_driver
= {
1421 .name
= DRIVER_NAME_PMU
,
1422 .of_match_table
= arm_cci_pmu_matches
,
1424 .probe
= cci_pmu_probe
,
1427 static struct platform_driver cci_platform_driver
= {
1429 .name
= DRIVER_NAME
,
1430 .of_match_table
= arm_cci_matches
,
1432 .probe
= cci_platform_probe
,
1435 static int __init
cci_platform_init(void)
1439 ret
= platform_driver_register(&cci_pmu_driver
);
1443 return platform_driver_register(&cci_platform_driver
);
1448 static int __init
cci_platform_init(void)
1455 * To sort out early init calls ordering a helper function is provided to
1456 * check if the CCI driver has beed initialized. Function check if the driver
1457 * has been initialized, if not it calls the init function that probes
1458 * the driver and updates the return value.
1460 bool cci_probed(void)
1462 return cci_init() == 0;
1464 EXPORT_SYMBOL_GPL(cci_probed
);
1466 early_initcall(cci_init
);
1467 core_initcall(cci_platform_init
);
1468 MODULE_LICENSE("GPL");
1469 MODULE_DESCRIPTION("ARM CCI support");