]>
Commit | Line | Data |
---|---|---|
ed69bdd8 LP |
1 | /* |
2 | * CCI cache coherent interconnect driver | |
3 | * | |
4 | * Copyright (C) 2013 ARM Ltd. | |
5 | * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | |
12 | * kind, whether express or implied; without even the implied warranty | |
13 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | */ | |
16 | ||
17 | #include <linux/arm-cci.h> | |
18 | #include <linux/io.h> | |
c6f85cb4 | 19 | #include <linux/interrupt.h> |
ed69bdd8 LP |
20 | #include <linux/module.h> |
21 | #include <linux/of_address.h> | |
b91c8f28 PA |
22 | #include <linux/of_irq.h> |
23 | #include <linux/of_platform.h> | |
c6f85cb4 | 24 | #include <linux/perf_event.h> |
b91c8f28 | 25 | #include <linux/platform_device.h> |
ed69bdd8 | 26 | #include <linux/slab.h> |
b91c8f28 | 27 | #include <linux/spinlock.h> |
ed69bdd8 LP |
28 | |
29 | #include <asm/cacheflush.h> | |
30 | #include <asm/smp_plat.h> | |
31 | ||
f6b9e83c SP |
32 | static void __iomem *cci_ctrl_base; |
33 | static unsigned long cci_ctrl_phys; | |
ed69bdd8 | 34 | |
ee8e5d5f | 35 | #ifdef CONFIG_ARM_CCI400_PORT_CTRL |
ed69bdd8 LP |
36 | struct cci_nb_ports { |
37 | unsigned int nb_ace; | |
38 | unsigned int nb_ace_lite; | |
39 | }; | |
40 | ||
f6b9e83c SP |
41 | static const struct cci_nb_ports cci400_ports = { |
42 | .nb_ace = 2, | |
43 | .nb_ace_lite = 3 | |
ed69bdd8 LP |
44 | }; |
45 | ||
ee8e5d5f SP |
46 | #define CCI400_PORTS_DATA (&cci400_ports) |
47 | #else | |
48 | #define CCI400_PORTS_DATA (NULL) | |
49 | #endif | |
50 | ||
f6b9e83c | 51 | static const struct of_device_id arm_cci_matches[] = { |
ee8e5d5f SP |
52 | #ifdef CONFIG_ARM_CCI400_COMMON |
53 | {.compatible = "arm,cci-400", .data = CCI400_PORTS_DATA }, | |
54 | #endif | |
f6b9e83c | 55 | {}, |
ed69bdd8 LP |
56 | }; |
57 | ||
ee8e5d5f | 58 | #ifdef CONFIG_ARM_CCI400_PMU |
b91c8f28 | 59 | |
f6b9e83c SP |
60 | #define DRIVER_NAME "CCI-400" |
61 | #define DRIVER_NAME_PMU DRIVER_NAME " PMU" | |
62 | ||
b91c8f28 PA |
63 | #define CCI_PMCR 0x0100 |
64 | #define CCI_PID2 0x0fe8 | |
65 | ||
66 | #define CCI_PMCR_CEN 0x00000001 | |
67 | #define CCI_PMCR_NCNT_MASK 0x0000f800 | |
68 | #define CCI_PMCR_NCNT_SHIFT 11 | |
69 | ||
70 | #define CCI_PID2_REV_MASK 0xf0 | |
71 | #define CCI_PID2_REV_SHIFT 4 | |
72 | ||
f6b9e83c SP |
73 | #define CCI_PMU_EVT_SEL 0x000 |
74 | #define CCI_PMU_CNTR 0x004 | |
75 | #define CCI_PMU_CNTR_CTRL 0x008 | |
76 | #define CCI_PMU_OVRFLW 0x00c | |
77 | ||
78 | #define CCI_PMU_OVRFLW_FLAG 1 | |
79 | ||
80 | #define CCI_PMU_CNTR_BASE(idx) ((idx) * SZ_4K) | |
81 | ||
82 | #define CCI_PMU_CNTR_MASK ((1ULL << 32) -1) | |
83 | ||
874c5714 | 84 | #define CCI_PMU_EVENT_MASK 0xffUL |
f6b9e83c SP |
85 | #define CCI_PMU_EVENT_SOURCE(event) ((event >> 5) & 0x7) |
86 | #define CCI_PMU_EVENT_CODE(event) (event & 0x1f) | |
87 | ||
88 | #define CCI_PMU_MAX_HW_EVENTS 5 /* CCI PMU has 4 counters + 1 cycle counter */ | |
89 | ||
fc17c839 SP |
90 | /* Types of interfaces that can generate events */ |
91 | enum { | |
92 | CCI_IF_SLAVE, | |
93 | CCI_IF_MASTER, | |
94 | CCI_IF_MAX, | |
95 | }; | |
96 | ||
97 | struct event_range { | |
98 | u32 min; | |
99 | u32 max; | |
100 | }; | |
101 | ||
f6b9e83c SP |
102 | struct cci_pmu_hw_events { |
103 | struct perf_event *events[CCI_PMU_MAX_HW_EVENTS]; | |
104 | unsigned long used_mask[BITS_TO_LONGS(CCI_PMU_MAX_HW_EVENTS)]; | |
105 | raw_spinlock_t pmu_lock; | |
106 | }; | |
107 | ||
fc17c839 SP |
108 | struct cci_pmu_model { |
109 | char *name; | |
110 | struct event_range event_ranges[CCI_IF_MAX]; | |
111 | }; | |
112 | ||
113 | static struct cci_pmu_model cci_pmu_models[]; | |
114 | ||
f6b9e83c SP |
115 | struct cci_pmu { |
116 | void __iomem *base; | |
117 | struct pmu pmu; | |
118 | int nr_irqs; | |
119 | int irqs[CCI_PMU_MAX_HW_EVENTS]; | |
120 | unsigned long active_irqs; | |
fc17c839 | 121 | const struct cci_pmu_model *model; |
f6b9e83c SP |
122 | struct cci_pmu_hw_events hw_events; |
123 | struct platform_device *plat_device; | |
124 | int num_events; | |
125 | atomic_t active_events; | |
126 | struct mutex reserve_mutex; | |
127 | cpumask_t cpus; | |
128 | }; | |
129 | static struct cci_pmu *pmu; | |
130 | ||
131 | #define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu)) | |
132 | ||
b91c8f28 PA |
133 | /* Port ids */ |
134 | #define CCI_PORT_S0 0 | |
135 | #define CCI_PORT_S1 1 | |
136 | #define CCI_PORT_S2 2 | |
137 | #define CCI_PORT_S3 3 | |
138 | #define CCI_PORT_S4 4 | |
139 | #define CCI_PORT_M0 5 | |
140 | #define CCI_PORT_M1 6 | |
141 | #define CCI_PORT_M2 7 | |
142 | ||
143 | #define CCI_REV_R0 0 | |
144 | #define CCI_REV_R1 1 | |
6fb0c4a7 | 145 | #define CCI_REV_R1_PX 5 |
b91c8f28 | 146 | |
b91c8f28 PA |
147 | /* |
148 | * Instead of an event id to monitor CCI cycles, a dedicated counter is | |
149 | * provided. Use 0xff to represent CCI cycles and hope that no future revisions | |
150 | * make use of this event in hardware. | |
151 | */ | |
152 | enum cci400_perf_events { | |
153 | CCI_PMU_CYCLES = 0xff | |
154 | }; | |
155 | ||
b91c8f28 PA |
156 | #define CCI_PMU_CYCLE_CNTR_IDX 0 |
157 | #define CCI_PMU_CNTR0_IDX 1 | |
158 | #define CCI_PMU_CNTR_LAST(cci_pmu) (CCI_PMU_CYCLE_CNTR_IDX + cci_pmu->num_events - 1) | |
159 | ||
160 | /* | |
161 | * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8 | |
162 | * ports and bits 4:0 are event codes. There are different event codes | |
163 | * associated with each port type. | |
164 | * | |
165 | * Additionally, the range of events associated with the port types changed | |
166 | * between Rev0 and Rev1. | |
167 | * | |
168 | * The constants below define the range of valid codes for each port type for | |
169 | * the different revisions and are used to validate the event to be monitored. | |
170 | */ | |
171 | ||
172 | #define CCI_REV_R0_SLAVE_PORT_MIN_EV 0x00 | |
173 | #define CCI_REV_R0_SLAVE_PORT_MAX_EV 0x13 | |
174 | #define CCI_REV_R0_MASTER_PORT_MIN_EV 0x14 | |
175 | #define CCI_REV_R0_MASTER_PORT_MAX_EV 0x1a | |
176 | ||
177 | #define CCI_REV_R1_SLAVE_PORT_MIN_EV 0x00 | |
178 | #define CCI_REV_R1_SLAVE_PORT_MAX_EV 0x14 | |
179 | #define CCI_REV_R1_MASTER_PORT_MIN_EV 0x00 | |
180 | #define CCI_REV_R1_MASTER_PORT_MAX_EV 0x11 | |
181 | ||
874c5714 | 182 | static int pmu_validate_hw_event(unsigned long hw_event) |
b91c8f28 PA |
183 | { |
184 | u8 ev_source = CCI_PMU_EVENT_SOURCE(hw_event); | |
185 | u8 ev_code = CCI_PMU_EVENT_CODE(hw_event); | |
fc17c839 | 186 | int if_type; |
b91c8f28 | 187 | |
874c5714 SP |
188 | if (hw_event & ~CCI_PMU_EVENT_MASK) |
189 | return -ENOENT; | |
190 | ||
b91c8f28 PA |
191 | switch (ev_source) { |
192 | case CCI_PORT_S0: | |
193 | case CCI_PORT_S1: | |
194 | case CCI_PORT_S2: | |
195 | case CCI_PORT_S3: | |
196 | case CCI_PORT_S4: | |
197 | /* Slave Interface */ | |
fc17c839 | 198 | if_type = CCI_IF_SLAVE; |
b91c8f28 PA |
199 | break; |
200 | case CCI_PORT_M0: | |
201 | case CCI_PORT_M1: | |
202 | case CCI_PORT_M2: | |
203 | /* Master Interface */ | |
fc17c839 | 204 | if_type = CCI_IF_MASTER; |
b91c8f28 | 205 | break; |
fc17c839 SP |
206 | default: |
207 | return -ENOENT; | |
b91c8f28 PA |
208 | } |
209 | ||
fc17c839 SP |
210 | if (ev_code >= pmu->model->event_ranges[if_type].min && |
211 | ev_code <= pmu->model->event_ranges[if_type].max) | |
212 | return hw_event; | |
213 | ||
b91c8f28 PA |
214 | return -ENOENT; |
215 | } | |
216 | ||
f6b9e83c SP |
217 | static int probe_cci_revision(void) |
218 | { | |
219 | int rev; | |
220 | rev = readl_relaxed(cci_ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK; | |
221 | rev >>= CCI_PID2_REV_SHIFT; | |
222 | ||
223 | if (rev < CCI_REV_R1_PX) | |
224 | return CCI_REV_R0; | |
225 | else | |
226 | return CCI_REV_R1; | |
227 | } | |
228 | ||
fc17c839 | 229 | static const struct cci_pmu_model *probe_cci_model(struct platform_device *pdev) |
f6b9e83c | 230 | { |
772742a6 SP |
231 | if (platform_has_secure_cci_access()) |
232 | return &cci_pmu_models[probe_cci_revision()]; | |
233 | return NULL; | |
f6b9e83c SP |
234 | } |
235 | ||
c6f85cb4 | 236 | static int pmu_is_valid_counter(struct cci_pmu *cci_pmu, int idx) |
b91c8f28 PA |
237 | { |
238 | return CCI_PMU_CYCLE_CNTR_IDX <= idx && | |
239 | idx <= CCI_PMU_CNTR_LAST(cci_pmu); | |
240 | } | |
241 | ||
242 | static u32 pmu_read_register(int idx, unsigned int offset) | |
243 | { | |
244 | return readl_relaxed(pmu->base + CCI_PMU_CNTR_BASE(idx) + offset); | |
245 | } | |
246 | ||
247 | static void pmu_write_register(u32 value, int idx, unsigned int offset) | |
248 | { | |
249 | return writel_relaxed(value, pmu->base + CCI_PMU_CNTR_BASE(idx) + offset); | |
250 | } | |
251 | ||
252 | static void pmu_disable_counter(int idx) | |
253 | { | |
254 | pmu_write_register(0, idx, CCI_PMU_CNTR_CTRL); | |
255 | } | |
256 | ||
257 | static void pmu_enable_counter(int idx) | |
258 | { | |
259 | pmu_write_register(1, idx, CCI_PMU_CNTR_CTRL); | |
260 | } | |
261 | ||
262 | static void pmu_set_event(int idx, unsigned long event) | |
263 | { | |
b91c8f28 PA |
264 | pmu_write_register(event, idx, CCI_PMU_EVT_SEL); |
265 | } | |
266 | ||
267 | static u32 pmu_get_max_counters(void) | |
268 | { | |
269 | u32 n_cnts = (readl_relaxed(cci_ctrl_base + CCI_PMCR) & | |
270 | CCI_PMCR_NCNT_MASK) >> CCI_PMCR_NCNT_SHIFT; | |
271 | ||
272 | /* add 1 for cycle counter */ | |
273 | return n_cnts + 1; | |
274 | } | |
275 | ||
c6f85cb4 | 276 | static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *event) |
b91c8f28 | 277 | { |
c6f85cb4 | 278 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); |
b91c8f28 | 279 | struct hw_perf_event *hw_event = &event->hw; |
874c5714 | 280 | unsigned long cci_event = hw_event->config_base; |
b91c8f28 PA |
281 | int idx; |
282 | ||
283 | if (cci_event == CCI_PMU_CYCLES) { | |
284 | if (test_and_set_bit(CCI_PMU_CYCLE_CNTR_IDX, hw->used_mask)) | |
285 | return -EAGAIN; | |
286 | ||
287 | return CCI_PMU_CYCLE_CNTR_IDX; | |
288 | } | |
289 | ||
290 | for (idx = CCI_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx) | |
291 | if (!test_and_set_bit(idx, hw->used_mask)) | |
292 | return idx; | |
293 | ||
294 | /* No counters available */ | |
295 | return -EAGAIN; | |
296 | } | |
297 | ||
298 | static int pmu_map_event(struct perf_event *event) | |
299 | { | |
300 | int mapping; | |
874c5714 | 301 | unsigned long config = event->attr.config; |
b91c8f28 PA |
302 | |
303 | if (event->attr.type < PERF_TYPE_MAX) | |
304 | return -ENOENT; | |
305 | ||
306 | if (config == CCI_PMU_CYCLES) | |
307 | mapping = config; | |
308 | else | |
309 | mapping = pmu_validate_hw_event(config); | |
310 | ||
311 | return mapping; | |
312 | } | |
313 | ||
c6f85cb4 | 314 | static int pmu_request_irq(struct cci_pmu *cci_pmu, irq_handler_t handler) |
b91c8f28 PA |
315 | { |
316 | int i; | |
317 | struct platform_device *pmu_device = cci_pmu->plat_device; | |
318 | ||
319 | if (unlikely(!pmu_device)) | |
320 | return -ENODEV; | |
321 | ||
322 | if (pmu->nr_irqs < 1) { | |
323 | dev_err(&pmu_device->dev, "no irqs for CCI PMUs defined\n"); | |
324 | return -ENODEV; | |
325 | } | |
326 | ||
327 | /* | |
328 | * Register all available CCI PMU interrupts. In the interrupt handler | |
329 | * we iterate over the counters checking for interrupt source (the | |
330 | * overflowing counter) and clear it. | |
331 | * | |
332 | * This should allow handling of non-unique interrupt for the counters. | |
333 | */ | |
334 | for (i = 0; i < pmu->nr_irqs; i++) { | |
335 | int err = request_irq(pmu->irqs[i], handler, IRQF_SHARED, | |
336 | "arm-cci-pmu", cci_pmu); | |
337 | if (err) { | |
338 | dev_err(&pmu_device->dev, "unable to request IRQ%d for ARM CCI PMU counters\n", | |
339 | pmu->irqs[i]); | |
340 | return err; | |
341 | } | |
342 | ||
343 | set_bit(i, &pmu->active_irqs); | |
344 | } | |
345 | ||
346 | return 0; | |
347 | } | |
348 | ||
c6f85cb4 MR |
349 | static void pmu_free_irq(struct cci_pmu *cci_pmu) |
350 | { | |
351 | int i; | |
352 | ||
353 | for (i = 0; i < pmu->nr_irqs; i++) { | |
354 | if (!test_and_clear_bit(i, &pmu->active_irqs)) | |
355 | continue; | |
356 | ||
357 | free_irq(pmu->irqs[i], cci_pmu); | |
358 | } | |
359 | } | |
360 | ||
361 | static u32 pmu_read_counter(struct perf_event *event) | |
362 | { | |
363 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
364 | struct hw_perf_event *hw_counter = &event->hw; | |
365 | int idx = hw_counter->idx; | |
366 | u32 value; | |
367 | ||
368 | if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { | |
369 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); | |
370 | return 0; | |
371 | } | |
372 | value = pmu_read_register(idx, CCI_PMU_CNTR); | |
373 | ||
374 | return value; | |
375 | } | |
376 | ||
377 | static void pmu_write_counter(struct perf_event *event, u32 value) | |
378 | { | |
379 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
380 | struct hw_perf_event *hw_counter = &event->hw; | |
381 | int idx = hw_counter->idx; | |
382 | ||
383 | if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) | |
384 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); | |
385 | else | |
386 | pmu_write_register(value, idx, CCI_PMU_CNTR); | |
387 | } | |
388 | ||
389 | static u64 pmu_event_update(struct perf_event *event) | |
390 | { | |
391 | struct hw_perf_event *hwc = &event->hw; | |
392 | u64 delta, prev_raw_count, new_raw_count; | |
393 | ||
394 | do { | |
395 | prev_raw_count = local64_read(&hwc->prev_count); | |
396 | new_raw_count = pmu_read_counter(event); | |
397 | } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count, | |
398 | new_raw_count) != prev_raw_count); | |
399 | ||
400 | delta = (new_raw_count - prev_raw_count) & CCI_PMU_CNTR_MASK; | |
401 | ||
402 | local64_add(delta, &event->count); | |
403 | ||
404 | return new_raw_count; | |
405 | } | |
406 | ||
407 | static void pmu_read(struct perf_event *event) | |
408 | { | |
409 | pmu_event_update(event); | |
410 | } | |
411 | ||
412 | void pmu_event_set_period(struct perf_event *event) | |
413 | { | |
414 | struct hw_perf_event *hwc = &event->hw; | |
415 | /* | |
416 | * The CCI PMU counters have a period of 2^32. To account for the | |
417 | * possiblity of extreme interrupt latency we program for a period of | |
418 | * half that. Hopefully we can handle the interrupt before another 2^31 | |
419 | * events occur and the counter overtakes its previous value. | |
420 | */ | |
421 | u64 val = 1ULL << 31; | |
422 | local64_set(&hwc->prev_count, val); | |
423 | pmu_write_counter(event, val); | |
424 | } | |
425 | ||
b91c8f28 PA |
426 | static irqreturn_t pmu_handle_irq(int irq_num, void *dev) |
427 | { | |
428 | unsigned long flags; | |
c6f85cb4 MR |
429 | struct cci_pmu *cci_pmu = dev; |
430 | struct cci_pmu_hw_events *events = &pmu->hw_events; | |
b91c8f28 PA |
431 | int idx, handled = IRQ_NONE; |
432 | ||
433 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | |
b91c8f28 PA |
434 | /* |
435 | * Iterate over counters and update the corresponding perf events. | |
436 | * This should work regardless of whether we have per-counter overflow | |
437 | * interrupt or a combined overflow interrupt. | |
438 | */ | |
439 | for (idx = CCI_PMU_CYCLE_CNTR_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) { | |
440 | struct perf_event *event = events->events[idx]; | |
441 | struct hw_perf_event *hw_counter; | |
442 | ||
443 | if (!event) | |
444 | continue; | |
445 | ||
446 | hw_counter = &event->hw; | |
447 | ||
448 | /* Did this counter overflow? */ | |
fc5130de HS |
449 | if (!(pmu_read_register(idx, CCI_PMU_OVRFLW) & |
450 | CCI_PMU_OVRFLW_FLAG)) | |
b91c8f28 PA |
451 | continue; |
452 | ||
453 | pmu_write_register(CCI_PMU_OVRFLW_FLAG, idx, CCI_PMU_OVRFLW); | |
454 | ||
c6f85cb4 MR |
455 | pmu_event_update(event); |
456 | pmu_event_set_period(event); | |
b91c8f28 | 457 | handled = IRQ_HANDLED; |
b91c8f28 PA |
458 | } |
459 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | |
460 | ||
461 | return IRQ_RETVAL(handled); | |
462 | } | |
463 | ||
c6f85cb4 | 464 | static int cci_pmu_get_hw(struct cci_pmu *cci_pmu) |
b91c8f28 | 465 | { |
c6f85cb4 MR |
466 | int ret = pmu_request_irq(cci_pmu, pmu_handle_irq); |
467 | if (ret) { | |
468 | pmu_free_irq(cci_pmu); | |
469 | return ret; | |
470 | } | |
471 | return 0; | |
472 | } | |
b91c8f28 | 473 | |
c6f85cb4 MR |
474 | static void cci_pmu_put_hw(struct cci_pmu *cci_pmu) |
475 | { | |
476 | pmu_free_irq(cci_pmu); | |
477 | } | |
b91c8f28 | 478 | |
c6f85cb4 MR |
479 | static void hw_perf_event_destroy(struct perf_event *event) |
480 | { | |
481 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
482 | atomic_t *active_events = &cci_pmu->active_events; | |
483 | struct mutex *reserve_mutex = &cci_pmu->reserve_mutex; | |
484 | ||
485 | if (atomic_dec_and_mutex_lock(active_events, reserve_mutex)) { | |
486 | cci_pmu_put_hw(cci_pmu); | |
487 | mutex_unlock(reserve_mutex); | |
b91c8f28 PA |
488 | } |
489 | } | |
490 | ||
c6f85cb4 | 491 | static void cci_pmu_enable(struct pmu *pmu) |
b91c8f28 | 492 | { |
c6f85cb4 MR |
493 | struct cci_pmu *cci_pmu = to_cci_pmu(pmu); |
494 | struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; | |
495 | int enabled = bitmap_weight(hw_events->used_mask, cci_pmu->num_events); | |
b91c8f28 | 496 | unsigned long flags; |
c6f85cb4 MR |
497 | u32 val; |
498 | ||
499 | if (!enabled) | |
500 | return; | |
501 | ||
502 | raw_spin_lock_irqsave(&hw_events->pmu_lock, flags); | |
503 | ||
504 | /* Enable all the PMU counters. */ | |
505 | val = readl_relaxed(cci_ctrl_base + CCI_PMCR) | CCI_PMCR_CEN; | |
506 | writel(val, cci_ctrl_base + CCI_PMCR); | |
507 | raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); | |
508 | ||
509 | } | |
510 | ||
511 | static void cci_pmu_disable(struct pmu *pmu) | |
512 | { | |
513 | struct cci_pmu *cci_pmu = to_cci_pmu(pmu); | |
514 | struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; | |
515 | unsigned long flags; | |
516 | u32 val; | |
517 | ||
518 | raw_spin_lock_irqsave(&hw_events->pmu_lock, flags); | |
519 | ||
520 | /* Disable all the PMU counters. */ | |
521 | val = readl_relaxed(cci_ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN; | |
522 | writel(val, cci_ctrl_base + CCI_PMCR); | |
523 | raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); | |
524 | } | |
525 | ||
526 | static void cci_pmu_start(struct perf_event *event, int pmu_flags) | |
527 | { | |
528 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
529 | struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; | |
530 | struct hw_perf_event *hwc = &event->hw; | |
531 | int idx = hwc->idx; | |
532 | unsigned long flags; | |
533 | ||
534 | /* | |
535 | * To handle interrupt latency, we always reprogram the period | |
536 | * regardlesss of PERF_EF_RELOAD. | |
537 | */ | |
538 | if (pmu_flags & PERF_EF_RELOAD) | |
539 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); | |
540 | ||
541 | hwc->state = 0; | |
b91c8f28 PA |
542 | |
543 | if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { | |
544 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); | |
545 | return; | |
546 | } | |
547 | ||
c6f85cb4 | 548 | raw_spin_lock_irqsave(&hw_events->pmu_lock, flags); |
b91c8f28 PA |
549 | |
550 | /* Configure the event to count, unless you are counting cycles */ | |
551 | if (idx != CCI_PMU_CYCLE_CNTR_IDX) | |
c6f85cb4 | 552 | pmu_set_event(idx, hwc->config_base); |
b91c8f28 | 553 | |
c6f85cb4 | 554 | pmu_event_set_period(event); |
b91c8f28 PA |
555 | pmu_enable_counter(idx); |
556 | ||
c6f85cb4 | 557 | raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); |
b91c8f28 PA |
558 | } |
559 | ||
c6f85cb4 | 560 | static void cci_pmu_stop(struct perf_event *event, int pmu_flags) |
b91c8f28 | 561 | { |
c6f85cb4 MR |
562 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); |
563 | struct hw_perf_event *hwc = &event->hw; | |
564 | int idx = hwc->idx; | |
565 | ||
566 | if (hwc->state & PERF_HES_STOPPED) | |
567 | return; | |
b91c8f28 PA |
568 | |
569 | if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { | |
570 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); | |
571 | return; | |
572 | } | |
573 | ||
c6f85cb4 MR |
574 | /* |
575 | * We always reprogram the counter, so ignore PERF_EF_UPDATE. See | |
576 | * cci_pmu_start() | |
577 | */ | |
b91c8f28 | 578 | pmu_disable_counter(idx); |
c6f85cb4 MR |
579 | pmu_event_update(event); |
580 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; | |
b91c8f28 PA |
581 | } |
582 | ||
c6f85cb4 | 583 | static int cci_pmu_add(struct perf_event *event, int flags) |
b91c8f28 | 584 | { |
c6f85cb4 MR |
585 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); |
586 | struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; | |
587 | struct hw_perf_event *hwc = &event->hw; | |
588 | int idx; | |
589 | int err = 0; | |
b91c8f28 | 590 | |
c6f85cb4 | 591 | perf_pmu_disable(event->pmu); |
b91c8f28 | 592 | |
c6f85cb4 MR |
593 | /* If we don't have a space for the counter then finish early. */ |
594 | idx = pmu_get_event_idx(hw_events, event); | |
595 | if (idx < 0) { | |
596 | err = idx; | |
597 | goto out; | |
598 | } | |
b91c8f28 | 599 | |
c6f85cb4 MR |
600 | event->hw.idx = idx; |
601 | hw_events->events[idx] = event; | |
602 | ||
603 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; | |
604 | if (flags & PERF_EF_START) | |
605 | cci_pmu_start(event, PERF_EF_RELOAD); | |
606 | ||
607 | /* Propagate our changes to the userspace mapping. */ | |
608 | perf_event_update_userpage(event); | |
609 | ||
610 | out: | |
611 | perf_pmu_enable(event->pmu); | |
612 | return err; | |
b91c8f28 PA |
613 | } |
614 | ||
c6f85cb4 | 615 | static void cci_pmu_del(struct perf_event *event, int flags) |
b91c8f28 | 616 | { |
c6f85cb4 MR |
617 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); |
618 | struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; | |
619 | struct hw_perf_event *hwc = &event->hw; | |
620 | int idx = hwc->idx; | |
b91c8f28 | 621 | |
c6f85cb4 MR |
622 | cci_pmu_stop(event, PERF_EF_UPDATE); |
623 | hw_events->events[idx] = NULL; | |
624 | clear_bit(idx, hw_events->used_mask); | |
b91c8f28 | 625 | |
c6f85cb4 MR |
626 | perf_event_update_userpage(event); |
627 | } | |
b91c8f28 | 628 | |
c6f85cb4 | 629 | static int |
b1862199 SP |
630 | validate_event(struct pmu *cci_pmu, |
631 | struct cci_pmu_hw_events *hw_events, | |
632 | struct perf_event *event) | |
c6f85cb4 MR |
633 | { |
634 | if (is_software_event(event)) | |
635 | return 1; | |
636 | ||
b1862199 SP |
637 | /* |
638 | * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The | |
639 | * core perf code won't check that the pmu->ctx == leader->ctx | |
640 | * until after pmu->event_init(event). | |
641 | */ | |
642 | if (event->pmu != cci_pmu) | |
643 | return 0; | |
644 | ||
c6f85cb4 MR |
645 | if (event->state < PERF_EVENT_STATE_OFF) |
646 | return 1; | |
647 | ||
648 | if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) | |
649 | return 1; | |
650 | ||
651 | return pmu_get_event_idx(hw_events, event) >= 0; | |
b91c8f28 PA |
652 | } |
653 | ||
c6f85cb4 MR |
654 | static int |
655 | validate_group(struct perf_event *event) | |
b91c8f28 | 656 | { |
c6f85cb4 MR |
657 | struct perf_event *sibling, *leader = event->group_leader; |
658 | struct cci_pmu_hw_events fake_pmu = { | |
659 | /* | |
660 | * Initialise the fake PMU. We only need to populate the | |
661 | * used_mask for the purposes of validation. | |
662 | */ | |
454be2af | 663 | .used_mask = { 0 }, |
c6f85cb4 | 664 | }; |
b91c8f28 | 665 | |
b1862199 | 666 | if (!validate_event(event->pmu, &fake_pmu, leader)) |
c6f85cb4 MR |
667 | return -EINVAL; |
668 | ||
669 | list_for_each_entry(sibling, &leader->sibling_list, group_entry) { | |
b1862199 | 670 | if (!validate_event(event->pmu, &fake_pmu, sibling)) |
c6f85cb4 | 671 | return -EINVAL; |
b91c8f28 | 672 | } |
b91c8f28 | 673 | |
b1862199 | 674 | if (!validate_event(event->pmu, &fake_pmu, event)) |
c6f85cb4 MR |
675 | return -EINVAL; |
676 | ||
677 | return 0; | |
b91c8f28 PA |
678 | } |
679 | ||
c6f85cb4 MR |
680 | static int |
681 | __hw_perf_event_init(struct perf_event *event) | |
b91c8f28 | 682 | { |
c6f85cb4 MR |
683 | struct hw_perf_event *hwc = &event->hw; |
684 | int mapping; | |
b91c8f28 | 685 | |
c6f85cb4 MR |
686 | mapping = pmu_map_event(event); |
687 | ||
688 | if (mapping < 0) { | |
689 | pr_debug("event %x:%llx not supported\n", event->attr.type, | |
690 | event->attr.config); | |
691 | return mapping; | |
692 | } | |
693 | ||
694 | /* | |
695 | * We don't assign an index until we actually place the event onto | |
696 | * hardware. Use -1 to signify that we haven't decided where to put it | |
697 | * yet. | |
698 | */ | |
699 | hwc->idx = -1; | |
700 | hwc->config_base = 0; | |
701 | hwc->config = 0; | |
702 | hwc->event_base = 0; | |
703 | ||
704 | /* | |
705 | * Store the event encoding into the config_base field. | |
706 | */ | |
707 | hwc->config_base |= (unsigned long)mapping; | |
708 | ||
709 | /* | |
710 | * Limit the sample_period to half of the counter width. That way, the | |
711 | * new counter value is far less likely to overtake the previous one | |
712 | * unless you have some serious IRQ latency issues. | |
713 | */ | |
714 | hwc->sample_period = CCI_PMU_CNTR_MASK >> 1; | |
715 | hwc->last_period = hwc->sample_period; | |
716 | local64_set(&hwc->period_left, hwc->sample_period); | |
717 | ||
718 | if (event->group_leader != event) { | |
719 | if (validate_group(event) != 0) | |
720 | return -EINVAL; | |
721 | } | |
722 | ||
723 | return 0; | |
724 | } | |
725 | ||
726 | static int cci_pmu_event_init(struct perf_event *event) | |
727 | { | |
728 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
729 | atomic_t *active_events = &cci_pmu->active_events; | |
730 | int err = 0; | |
731 | int cpu; | |
732 | ||
733 | if (event->attr.type != event->pmu->type) | |
734 | return -ENOENT; | |
735 | ||
736 | /* Shared by all CPUs, no meaningful state to sample */ | |
737 | if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) | |
738 | return -EOPNOTSUPP; | |
739 | ||
740 | /* We have no filtering of any kind */ | |
741 | if (event->attr.exclude_user || | |
742 | event->attr.exclude_kernel || | |
743 | event->attr.exclude_hv || | |
744 | event->attr.exclude_idle || | |
745 | event->attr.exclude_host || | |
746 | event->attr.exclude_guest) | |
747 | return -EINVAL; | |
748 | ||
749 | /* | |
750 | * Following the example set by other "uncore" PMUs, we accept any CPU | |
751 | * and rewrite its affinity dynamically rather than having perf core | |
752 | * handle cpu == -1 and pid == -1 for this case. | |
753 | * | |
754 | * The perf core will pin online CPUs for the duration of this call and | |
755 | * the event being installed into its context, so the PMU's CPU can't | |
756 | * change under our feet. | |
757 | */ | |
758 | cpu = cpumask_first(&cci_pmu->cpus); | |
759 | if (event->cpu < 0 || cpu < 0) | |
760 | return -EINVAL; | |
761 | event->cpu = cpu; | |
762 | ||
763 | event->destroy = hw_perf_event_destroy; | |
764 | if (!atomic_inc_not_zero(active_events)) { | |
765 | mutex_lock(&cci_pmu->reserve_mutex); | |
766 | if (atomic_read(active_events) == 0) | |
767 | err = cci_pmu_get_hw(cci_pmu); | |
768 | if (!err) | |
769 | atomic_inc(active_events); | |
770 | mutex_unlock(&cci_pmu->reserve_mutex); | |
771 | } | |
772 | if (err) | |
773 | return err; | |
774 | ||
775 | err = __hw_perf_event_init(event); | |
776 | if (err) | |
777 | hw_perf_event_destroy(event); | |
778 | ||
779 | return err; | |
b91c8f28 PA |
780 | } |
781 | ||
c6f85cb4 MR |
782 | static ssize_t pmu_attr_cpumask_show(struct device *dev, |
783 | struct device_attribute *attr, char *buf) | |
784 | { | |
660e5ec0 TH |
785 | int n = scnprintf(buf, PAGE_SIZE - 1, "%*pbl", |
786 | cpumask_pr_args(&pmu->cpus)); | |
c6f85cb4 MR |
787 | buf[n++] = '\n'; |
788 | buf[n] = '\0'; | |
789 | return n; | |
790 | } | |
791 | ||
792 | static DEVICE_ATTR(cpumask, S_IRUGO, pmu_attr_cpumask_show, NULL); | |
793 | ||
794 | static struct attribute *pmu_attrs[] = { | |
795 | &dev_attr_cpumask.attr, | |
796 | NULL, | |
797 | }; | |
798 | ||
799 | static struct attribute_group pmu_attr_group = { | |
800 | .attrs = pmu_attrs, | |
801 | }; | |
802 | ||
803 | static const struct attribute_group *pmu_attr_groups[] = { | |
804 | &pmu_attr_group, | |
805 | NULL | |
806 | }; | |
807 | ||
808 | static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev) | |
809 | { | |
fc17c839 | 810 | char *name = cci_pmu->model->name; |
c6f85cb4 | 811 | cci_pmu->pmu = (struct pmu) { |
fc17c839 | 812 | .name = cci_pmu->model->name, |
c6f85cb4 MR |
813 | .task_ctx_nr = perf_invalid_context, |
814 | .pmu_enable = cci_pmu_enable, | |
815 | .pmu_disable = cci_pmu_disable, | |
816 | .event_init = cci_pmu_event_init, | |
817 | .add = cci_pmu_add, | |
818 | .del = cci_pmu_del, | |
819 | .start = cci_pmu_start, | |
820 | .stop = cci_pmu_stop, | |
821 | .read = pmu_read, | |
822 | .attr_groups = pmu_attr_groups, | |
b91c8f28 PA |
823 | }; |
824 | ||
825 | cci_pmu->plat_device = pdev; | |
826 | cci_pmu->num_events = pmu_get_max_counters(); | |
827 | ||
c6f85cb4 | 828 | return perf_pmu_register(&cci_pmu->pmu, name, -1); |
b91c8f28 PA |
829 | } |
830 | ||
c6f85cb4 MR |
831 | static int cci_pmu_cpu_notifier(struct notifier_block *self, |
832 | unsigned long action, void *hcpu) | |
833 | { | |
834 | unsigned int cpu = (long)hcpu; | |
835 | unsigned int target; | |
836 | ||
837 | switch (action & ~CPU_TASKS_FROZEN) { | |
838 | case CPU_DOWN_PREPARE: | |
839 | if (!cpumask_test_and_clear_cpu(cpu, &pmu->cpus)) | |
840 | break; | |
841 | target = cpumask_any_but(cpu_online_mask, cpu); | |
842 | if (target < 0) // UP, last CPU | |
843 | break; | |
844 | /* | |
845 | * TODO: migrate context once core races on event->ctx have | |
846 | * been fixed. | |
847 | */ | |
848 | cpumask_set_cpu(target, &pmu->cpus); | |
849 | default: | |
850 | break; | |
851 | } | |
852 | ||
853 | return NOTIFY_OK; | |
854 | } | |
855 | ||
856 | static struct notifier_block cci_pmu_cpu_nb = { | |
857 | .notifier_call = cci_pmu_cpu_notifier, | |
858 | /* | |
859 | * to migrate uncore events, our notifier should be executed | |
860 | * before perf core's notifier. | |
861 | */ | |
862 | .priority = CPU_PRI_PERF + 1, | |
863 | }; | |
864 | ||
fc17c839 SP |
865 | static struct cci_pmu_model cci_pmu_models[] = { |
866 | [CCI_REV_R0] = { | |
867 | .name = "CCI_400", | |
868 | .event_ranges = { | |
869 | [CCI_IF_SLAVE] = { | |
870 | CCI_REV_R0_SLAVE_PORT_MIN_EV, | |
871 | CCI_REV_R0_SLAVE_PORT_MAX_EV, | |
872 | }, | |
873 | [CCI_IF_MASTER] = { | |
874 | CCI_REV_R0_MASTER_PORT_MIN_EV, | |
875 | CCI_REV_R0_MASTER_PORT_MAX_EV, | |
876 | }, | |
877 | }, | |
878 | }, | |
879 | [CCI_REV_R1] = { | |
880 | .name = "CCI_400_r1", | |
881 | .event_ranges = { | |
882 | [CCI_IF_SLAVE] = { | |
883 | CCI_REV_R1_SLAVE_PORT_MIN_EV, | |
884 | CCI_REV_R1_SLAVE_PORT_MAX_EV, | |
885 | }, | |
886 | [CCI_IF_MASTER] = { | |
887 | CCI_REV_R1_MASTER_PORT_MIN_EV, | |
888 | CCI_REV_R1_MASTER_PORT_MAX_EV, | |
889 | }, | |
890 | }, | |
891 | }, | |
892 | }; | |
893 | ||
b91c8f28 PA |
894 | static const struct of_device_id arm_cci_pmu_matches[] = { |
895 | { | |
896 | .compatible = "arm,cci-400-pmu", | |
772742a6 SP |
897 | .data = NULL, |
898 | }, | |
899 | { | |
900 | .compatible = "arm,cci-400-pmu,r0", | |
901 | .data = &cci_pmu_models[CCI_REV_R0], | |
902 | }, | |
903 | { | |
904 | .compatible = "arm,cci-400-pmu,r1", | |
905 | .data = &cci_pmu_models[CCI_REV_R1], | |
b91c8f28 PA |
906 | }, |
907 | {}, | |
908 | }; | |
909 | ||
fc17c839 SP |
910 | static inline const struct cci_pmu_model *get_cci_model(struct platform_device *pdev) |
911 | { | |
912 | const struct of_device_id *match = of_match_node(arm_cci_pmu_matches, | |
913 | pdev->dev.of_node); | |
914 | if (!match) | |
915 | return NULL; | |
772742a6 SP |
916 | if (match->data) |
917 | return match->data; | |
fc17c839 | 918 | |
772742a6 SP |
919 | dev_warn(&pdev->dev, "DEPRECATED compatible property," |
920 | "requires secure access to CCI registers"); | |
fc17c839 SP |
921 | return probe_cci_model(pdev); |
922 | } | |
923 | ||
f6b9e83c SP |
924 | static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs) |
925 | { | |
926 | int i; | |
927 | ||
928 | for (i = 0; i < nr_irqs; i++) | |
929 | if (irq == irqs[i]) | |
930 | return true; | |
931 | ||
932 | return false; | |
933 | } | |
934 | ||
b91c8f28 PA |
935 | static int cci_pmu_probe(struct platform_device *pdev) |
936 | { | |
937 | struct resource *res; | |
938 | int i, ret, irq; | |
fc17c839 SP |
939 | const struct cci_pmu_model *model; |
940 | ||
941 | model = get_cci_model(pdev); | |
942 | if (!model) { | |
943 | dev_warn(&pdev->dev, "CCI PMU version not supported\n"); | |
944 | return -ENODEV; | |
945 | } | |
b91c8f28 PA |
946 | |
947 | pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL); | |
948 | if (!pmu) | |
949 | return -ENOMEM; | |
950 | ||
fc17c839 | 951 | pmu->model = model; |
b91c8f28 | 952 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
b91c8f28 | 953 | pmu->base = devm_ioremap_resource(&pdev->dev, res); |
fee4f2c6 WY |
954 | if (IS_ERR(pmu->base)) |
955 | return -ENOMEM; | |
b91c8f28 PA |
956 | |
957 | /* | |
958 | * CCI PMU has 5 overflow signals - one per counter; but some may be tied | |
959 | * together to a common interrupt. | |
960 | */ | |
961 | pmu->nr_irqs = 0; | |
962 | for (i = 0; i < CCI_PMU_MAX_HW_EVENTS; i++) { | |
963 | irq = platform_get_irq(pdev, i); | |
964 | if (irq < 0) | |
965 | break; | |
966 | ||
967 | if (is_duplicate_irq(irq, pmu->irqs, pmu->nr_irqs)) | |
968 | continue; | |
969 | ||
970 | pmu->irqs[pmu->nr_irqs++] = irq; | |
971 | } | |
972 | ||
973 | /* | |
974 | * Ensure that the device tree has as many interrupts as the number | |
975 | * of counters. | |
976 | */ | |
977 | if (i < CCI_PMU_MAX_HW_EVENTS) { | |
978 | dev_warn(&pdev->dev, "In-correct number of interrupts: %d, should be %d\n", | |
979 | i, CCI_PMU_MAX_HW_EVENTS); | |
fee4f2c6 | 980 | return -EINVAL; |
b91c8f28 PA |
981 | } |
982 | ||
b91c8f28 | 983 | raw_spin_lock_init(&pmu->hw_events.pmu_lock); |
c6f85cb4 MR |
984 | mutex_init(&pmu->reserve_mutex); |
985 | atomic_set(&pmu->active_events, 0); | |
986 | cpumask_set_cpu(smp_processor_id(), &pmu->cpus); | |
987 | ||
988 | ret = register_cpu_notifier(&cci_pmu_cpu_nb); | |
989 | if (ret) | |
990 | return ret; | |
b91c8f28 | 991 | |
c6f85cb4 | 992 | ret = cci_pmu_init(pmu, pdev); |
b91c8f28 | 993 | if (ret) |
fee4f2c6 | 994 | return ret; |
b91c8f28 | 995 | |
fc17c839 | 996 | pr_info("ARM %s PMU driver probed", pmu->model->name); |
b91c8f28 | 997 | return 0; |
b91c8f28 PA |
998 | } |
999 | ||
1000 | static int cci_platform_probe(struct platform_device *pdev) | |
1001 | { | |
1002 | if (!cci_probed()) | |
1003 | return -ENODEV; | |
1004 | ||
1005 | return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); | |
1006 | } | |
1007 | ||
f6b9e83c SP |
1008 | static struct platform_driver cci_pmu_driver = { |
1009 | .driver = { | |
1010 | .name = DRIVER_NAME_PMU, | |
1011 | .of_match_table = arm_cci_pmu_matches, | |
1012 | }, | |
1013 | .probe = cci_pmu_probe, | |
1014 | }; | |
1015 | ||
1016 | static struct platform_driver cci_platform_driver = { | |
1017 | .driver = { | |
1018 | .name = DRIVER_NAME, | |
1019 | .of_match_table = arm_cci_matches, | |
1020 | }, | |
1021 | .probe = cci_platform_probe, | |
1022 | }; | |
1023 | ||
1024 | static int __init cci_platform_init(void) | |
1025 | { | |
1026 | int ret; | |
1027 | ||
1028 | ret = platform_driver_register(&cci_pmu_driver); | |
1029 | if (ret) | |
1030 | return ret; | |
1031 | ||
1032 | return platform_driver_register(&cci_platform_driver); | |
1033 | } | |
1034 | ||
ee8e5d5f | 1035 | #else /* !CONFIG_ARM_CCI400_PMU */ |
f6b9e83c SP |
1036 | |
1037 | static int __init cci_platform_init(void) | |
1038 | { | |
1039 | return 0; | |
1040 | } | |
1041 | ||
ee8e5d5f SP |
1042 | #endif /* CONFIG_ARM_CCI400_PMU */ |
1043 | ||
1044 | #ifdef CONFIG_ARM_CCI400_PORT_CTRL | |
b91c8f28 | 1045 | |
f6b9e83c SP |
1046 | #define CCI_PORT_CTRL 0x0 |
1047 | #define CCI_CTRL_STATUS 0xc | |
1048 | ||
1049 | #define CCI_ENABLE_SNOOP_REQ 0x1 | |
1050 | #define CCI_ENABLE_DVM_REQ 0x2 | |
1051 | #define CCI_ENABLE_REQ (CCI_ENABLE_SNOOP_REQ | CCI_ENABLE_DVM_REQ) | |
1052 | ||
1053 | enum cci_ace_port_type { | |
1054 | ACE_INVALID_PORT = 0x0, | |
1055 | ACE_PORT, | |
1056 | ACE_LITE_PORT, | |
1057 | }; | |
1058 | ||
1059 | struct cci_ace_port { | |
1060 | void __iomem *base; | |
1061 | unsigned long phys; | |
1062 | enum cci_ace_port_type type; | |
1063 | struct device_node *dn; | |
1064 | }; | |
1065 | ||
1066 | static struct cci_ace_port *ports; | |
1067 | static unsigned int nb_cci_ports; | |
1068 | ||
ed69bdd8 LP |
1069 | struct cpu_port { |
1070 | u64 mpidr; | |
1071 | u32 port; | |
1072 | }; | |
62158f81 | 1073 | |
ed69bdd8 LP |
1074 | /* |
1075 | * Use the port MSB as valid flag, shift can be made dynamic | |
1076 | * by computing number of bits required for port indexes. | |
1077 | * Code disabling CCI cpu ports runs with D-cache invalidated | |
1078 | * and SCTLR bit clear so data accesses must be kept to a minimum | |
1079 | * to improve performance; for now shift is left static to | |
1080 | * avoid one more data access while disabling the CCI port. | |
1081 | */ | |
1082 | #define PORT_VALID_SHIFT 31 | |
1083 | #define PORT_VALID (0x1 << PORT_VALID_SHIFT) | |
1084 | ||
1085 | static inline void init_cpu_port(struct cpu_port *port, u32 index, u64 mpidr) | |
1086 | { | |
1087 | port->port = PORT_VALID | index; | |
1088 | port->mpidr = mpidr; | |
1089 | } | |
1090 | ||
1091 | static inline bool cpu_port_is_valid(struct cpu_port *port) | |
1092 | { | |
1093 | return !!(port->port & PORT_VALID); | |
1094 | } | |
1095 | ||
1096 | static inline bool cpu_port_match(struct cpu_port *port, u64 mpidr) | |
1097 | { | |
1098 | return port->mpidr == (mpidr & MPIDR_HWID_BITMASK); | |
1099 | } | |
1100 | ||
1101 | static struct cpu_port cpu_port[NR_CPUS]; | |
1102 | ||
1103 | /** | |
1104 | * __cci_ace_get_port - Function to retrieve the port index connected to | |
1105 | * a cpu or device. | |
1106 | * | |
1107 | * @dn: device node of the device to look-up | |
1108 | * @type: port type | |
1109 | * | |
1110 | * Return value: | |
1111 | * - CCI port index if success | |
1112 | * - -ENODEV if failure | |
1113 | */ | |
1114 | static int __cci_ace_get_port(struct device_node *dn, int type) | |
1115 | { | |
1116 | int i; | |
1117 | bool ace_match; | |
1118 | struct device_node *cci_portn; | |
1119 | ||
1120 | cci_portn = of_parse_phandle(dn, "cci-control-port", 0); | |
1121 | for (i = 0; i < nb_cci_ports; i++) { | |
1122 | ace_match = ports[i].type == type; | |
1123 | if (ace_match && cci_portn == ports[i].dn) | |
1124 | return i; | |
1125 | } | |
1126 | return -ENODEV; | |
1127 | } | |
1128 | ||
1129 | int cci_ace_get_port(struct device_node *dn) | |
1130 | { | |
1131 | return __cci_ace_get_port(dn, ACE_LITE_PORT); | |
1132 | } | |
1133 | EXPORT_SYMBOL_GPL(cci_ace_get_port); | |
1134 | ||
b91c8f28 | 1135 | static void cci_ace_init_ports(void) |
ed69bdd8 | 1136 | { |
78b4d6e0 SH |
1137 | int port, cpu; |
1138 | struct device_node *cpun; | |
ed69bdd8 LP |
1139 | |
1140 | /* | |
1141 | * Port index look-up speeds up the function disabling ports by CPU, | |
1142 | * since the logical to port index mapping is done once and does | |
1143 | * not change after system boot. | |
1144 | * The stashed index array is initialized for all possible CPUs | |
1145 | * at probe time. | |
1146 | */ | |
78b4d6e0 SH |
1147 | for_each_possible_cpu(cpu) { |
1148 | /* too early to use cpu->of_node */ | |
1149 | cpun = of_get_cpu_node(cpu, NULL); | |
ed69bdd8 | 1150 | |
78b4d6e0 | 1151 | if (WARN(!cpun, "Missing cpu device node\n")) |
ed69bdd8 | 1152 | continue; |
78b4d6e0 | 1153 | |
ed69bdd8 LP |
1154 | port = __cci_ace_get_port(cpun, ACE_PORT); |
1155 | if (port < 0) | |
1156 | continue; | |
1157 | ||
1158 | init_cpu_port(&cpu_port[cpu], port, cpu_logical_map(cpu)); | |
1159 | } | |
1160 | ||
1161 | for_each_possible_cpu(cpu) { | |
1162 | WARN(!cpu_port_is_valid(&cpu_port[cpu]), | |
1163 | "CPU %u does not have an associated CCI port\n", | |
1164 | cpu); | |
1165 | } | |
1166 | } | |
1167 | /* | |
1168 | * Functions to enable/disable a CCI interconnect slave port | |
1169 | * | |
1170 | * They are called by low-level power management code to disable slave | |
1171 | * interfaces snoops and DVM broadcast. | |
1172 | * Since they may execute with cache data allocation disabled and | |
1173 | * after the caches have been cleaned and invalidated the functions provide | |
1174 | * no explicit locking since they may run with D-cache disabled, so normal | |
1175 | * cacheable kernel locks based on ldrex/strex may not work. | |
1176 | * Locking has to be provided by BSP implementations to ensure proper | |
1177 | * operations. | |
1178 | */ | |
1179 | ||
1180 | /** | |
1181 | * cci_port_control() - function to control a CCI port | |
1182 | * | |
1183 | * @port: index of the port to setup | |
1184 | * @enable: if true enables the port, if false disables it | |
1185 | */ | |
1186 | static void notrace cci_port_control(unsigned int port, bool enable) | |
1187 | { | |
1188 | void __iomem *base = ports[port].base; | |
1189 | ||
1190 | writel_relaxed(enable ? CCI_ENABLE_REQ : 0, base + CCI_PORT_CTRL); | |
1191 | /* | |
1192 | * This function is called from power down procedures | |
1193 | * and must not execute any instruction that might | |
1194 | * cause the processor to be put in a quiescent state | |
1195 | * (eg wfi). Hence, cpu_relax() can not be added to this | |
1196 | * read loop to optimize power, since it might hide possibly | |
1197 | * disruptive operations. | |
1198 | */ | |
1199 | while (readl_relaxed(cci_ctrl_base + CCI_CTRL_STATUS) & 0x1) | |
1200 | ; | |
1201 | } | |
1202 | ||
1203 | /** | |
1204 | * cci_disable_port_by_cpu() - function to disable a CCI port by CPU | |
1205 | * reference | |
1206 | * | |
1207 | * @mpidr: mpidr of the CPU whose CCI port should be disabled | |
1208 | * | |
1209 | * Disabling a CCI port for a CPU implies disabling the CCI port | |
1210 | * controlling that CPU cluster. Code disabling CPU CCI ports | |
1211 | * must make sure that the CPU running the code is the last active CPU | |
1212 | * in the cluster ie all other CPUs are quiescent in a low power state. | |
1213 | * | |
1214 | * Return: | |
1215 | * 0 on success | |
1216 | * -ENODEV on port look-up failure | |
1217 | */ | |
1218 | int notrace cci_disable_port_by_cpu(u64 mpidr) | |
1219 | { | |
1220 | int cpu; | |
1221 | bool is_valid; | |
1222 | for (cpu = 0; cpu < nr_cpu_ids; cpu++) { | |
1223 | is_valid = cpu_port_is_valid(&cpu_port[cpu]); | |
1224 | if (is_valid && cpu_port_match(&cpu_port[cpu], mpidr)) { | |
1225 | cci_port_control(cpu_port[cpu].port, false); | |
1226 | return 0; | |
1227 | } | |
1228 | } | |
1229 | return -ENODEV; | |
1230 | } | |
1231 | EXPORT_SYMBOL_GPL(cci_disable_port_by_cpu); | |
1232 | ||
62158f81 NP |
1233 | /** |
1234 | * cci_enable_port_for_self() - enable a CCI port for calling CPU | |
1235 | * | |
1236 | * Enabling a CCI port for the calling CPU implies enabling the CCI | |
1237 | * port controlling that CPU's cluster. Caller must make sure that the | |
1238 | * CPU running the code is the first active CPU in the cluster and all | |
1239 | * other CPUs are quiescent in a low power state or waiting for this CPU | |
1240 | * to complete the CCI initialization. | |
1241 | * | |
1242 | * Because this is called when the MMU is still off and with no stack, | |
1243 | * the code must be position independent and ideally rely on callee | |
1244 | * clobbered registers only. To achieve this we must code this function | |
1245 | * entirely in assembler. | |
1246 | * | |
1247 | * On success this returns with the proper CCI port enabled. In case of | |
1248 | * any failure this never returns as the inability to enable the CCI is | |
1249 | * fatal and there is no possible recovery at this stage. | |
1250 | */ | |
1251 | asmlinkage void __naked cci_enable_port_for_self(void) | |
1252 | { | |
1253 | asm volatile ("\n" | |
f4902492 | 1254 | " .arch armv7-a\n" |
62158f81 NP |
1255 | " mrc p15, 0, r0, c0, c0, 5 @ get MPIDR value \n" |
1256 | " and r0, r0, #"__stringify(MPIDR_HWID_BITMASK)" \n" | |
1257 | " adr r1, 5f \n" | |
1258 | " ldr r2, [r1] \n" | |
1259 | " add r1, r1, r2 @ &cpu_port \n" | |
1260 | " add ip, r1, %[sizeof_cpu_port] \n" | |
1261 | ||
1262 | /* Loop over the cpu_port array looking for a matching MPIDR */ | |
1263 | "1: ldr r2, [r1, %[offsetof_cpu_port_mpidr_lsb]] \n" | |
1264 | " cmp r2, r0 @ compare MPIDR \n" | |
1265 | " bne 2f \n" | |
1266 | ||
1267 | /* Found a match, now test port validity */ | |
1268 | " ldr r3, [r1, %[offsetof_cpu_port_port]] \n" | |
1269 | " tst r3, #"__stringify(PORT_VALID)" \n" | |
1270 | " bne 3f \n" | |
1271 | ||
1272 | /* no match, loop with the next cpu_port entry */ | |
1273 | "2: add r1, r1, %[sizeof_struct_cpu_port] \n" | |
1274 | " cmp r1, ip @ done? \n" | |
1275 | " blo 1b \n" | |
1276 | ||
1277 | /* CCI port not found -- cheaply try to stall this CPU */ | |
1278 | "cci_port_not_found: \n" | |
1279 | " wfi \n" | |
1280 | " wfe \n" | |
1281 | " b cci_port_not_found \n" | |
1282 | ||
1283 | /* Use matched port index to look up the corresponding ports entry */ | |
1284 | "3: bic r3, r3, #"__stringify(PORT_VALID)" \n" | |
1285 | " adr r0, 6f \n" | |
1286 | " ldmia r0, {r1, r2} \n" | |
1287 | " sub r1, r1, r0 @ virt - phys \n" | |
1288 | " ldr r0, [r0, r2] @ *(&ports) \n" | |
1289 | " mov r2, %[sizeof_struct_ace_port] \n" | |
1290 | " mla r0, r2, r3, r0 @ &ports[index] \n" | |
1291 | " sub r0, r0, r1 @ virt_to_phys() \n" | |
1292 | ||
1293 | /* Enable the CCI port */ | |
1294 | " ldr r0, [r0, %[offsetof_port_phys]] \n" | |
fdb07aee | 1295 | " mov r3, %[cci_enable_req]\n" |
62158f81 NP |
1296 | " str r3, [r0, #"__stringify(CCI_PORT_CTRL)"] \n" |
1297 | ||
1298 | /* poll the status reg for completion */ | |
1299 | " adr r1, 7f \n" | |
1300 | " ldr r0, [r1] \n" | |
1301 | " ldr r0, [r0, r1] @ cci_ctrl_base \n" | |
1302 | "4: ldr r1, [r0, #"__stringify(CCI_CTRL_STATUS)"] \n" | |
fdb07aee | 1303 | " tst r1, %[cci_control_status_bits] \n" |
62158f81 NP |
1304 | " bne 4b \n" |
1305 | ||
1306 | " mov r0, #0 \n" | |
1307 | " bx lr \n" | |
1308 | ||
1309 | " .align 2 \n" | |
1310 | "5: .word cpu_port - . \n" | |
1311 | "6: .word . \n" | |
1312 | " .word ports - 6b \n" | |
1313 | "7: .word cci_ctrl_phys - . \n" | |
1314 | : : | |
1315 | [sizeof_cpu_port] "i" (sizeof(cpu_port)), | |
fdb07aee VK |
1316 | [cci_enable_req] "i" cpu_to_le32(CCI_ENABLE_REQ), |
1317 | [cci_control_status_bits] "i" cpu_to_le32(1), | |
62158f81 NP |
1318 | #ifndef __ARMEB__ |
1319 | [offsetof_cpu_port_mpidr_lsb] "i" (offsetof(struct cpu_port, mpidr)), | |
1320 | #else | |
1321 | [offsetof_cpu_port_mpidr_lsb] "i" (offsetof(struct cpu_port, mpidr)+4), | |
1322 | #endif | |
1323 | [offsetof_cpu_port_port] "i" (offsetof(struct cpu_port, port)), | |
1324 | [sizeof_struct_cpu_port] "i" (sizeof(struct cpu_port)), | |
1325 | [sizeof_struct_ace_port] "i" (sizeof(struct cci_ace_port)), | |
1326 | [offsetof_port_phys] "i" (offsetof(struct cci_ace_port, phys)) ); | |
1327 | ||
1328 | unreachable(); | |
1329 | } | |
1330 | ||
ed69bdd8 LP |
1331 | /** |
1332 | * __cci_control_port_by_device() - function to control a CCI port by device | |
1333 | * reference | |
1334 | * | |
1335 | * @dn: device node pointer of the device whose CCI port should be | |
1336 | * controlled | |
1337 | * @enable: if true enables the port, if false disables it | |
1338 | * | |
1339 | * Return: | |
1340 | * 0 on success | |
1341 | * -ENODEV on port look-up failure | |
1342 | */ | |
1343 | int notrace __cci_control_port_by_device(struct device_node *dn, bool enable) | |
1344 | { | |
1345 | int port; | |
1346 | ||
1347 | if (!dn) | |
1348 | return -ENODEV; | |
1349 | ||
1350 | port = __cci_ace_get_port(dn, ACE_LITE_PORT); | |
1351 | if (WARN_ONCE(port < 0, "node %s ACE lite port look-up failure\n", | |
1352 | dn->full_name)) | |
1353 | return -ENODEV; | |
1354 | cci_port_control(port, enable); | |
1355 | return 0; | |
1356 | } | |
1357 | EXPORT_SYMBOL_GPL(__cci_control_port_by_device); | |
1358 | ||
1359 | /** | |
1360 | * __cci_control_port_by_index() - function to control a CCI port by port index | |
1361 | * | |
1362 | * @port: port index previously retrieved with cci_ace_get_port() | |
1363 | * @enable: if true enables the port, if false disables it | |
1364 | * | |
1365 | * Return: | |
1366 | * 0 on success | |
1367 | * -ENODEV on port index out of range | |
1368 | * -EPERM if operation carried out on an ACE PORT | |
1369 | */ | |
1370 | int notrace __cci_control_port_by_index(u32 port, bool enable) | |
1371 | { | |
1372 | if (port >= nb_cci_ports || ports[port].type == ACE_INVALID_PORT) | |
1373 | return -ENODEV; | |
1374 | /* | |
1375 | * CCI control for ports connected to CPUS is extremely fragile | |
1376 | * and must be made to go through a specific and controlled | |
1377 | * interface (ie cci_disable_port_by_cpu(); control by general purpose | |
1378 | * indexing is therefore disabled for ACE ports. | |
1379 | */ | |
1380 | if (ports[port].type == ACE_PORT) | |
1381 | return -EPERM; | |
1382 | ||
1383 | cci_port_control(port, enable); | |
1384 | return 0; | |
1385 | } | |
1386 | EXPORT_SYMBOL_GPL(__cci_control_port_by_index); | |
1387 | ||
ed69bdd8 LP |
1388 | static const struct of_device_id arm_cci_ctrl_if_matches[] = { |
1389 | {.compatible = "arm,cci-400-ctrl-if", }, | |
1390 | {}, | |
1391 | }; | |
1392 | ||
f6b9e83c | 1393 | static int cci_probe_ports(struct device_node *np) |
ed69bdd8 LP |
1394 | { |
1395 | struct cci_nb_ports const *cci_config; | |
1396 | int ret, i, nb_ace = 0, nb_ace_lite = 0; | |
f6b9e83c | 1397 | struct device_node *cp; |
62158f81 | 1398 | struct resource res; |
ed69bdd8 LP |
1399 | const char *match_str; |
1400 | bool is_ace; | |
1401 | ||
896ddd60 | 1402 | |
ed69bdd8 LP |
1403 | cci_config = of_match_node(arm_cci_matches, np)->data; |
1404 | if (!cci_config) | |
1405 | return -ENODEV; | |
1406 | ||
1407 | nb_cci_ports = cci_config->nb_ace + cci_config->nb_ace_lite; | |
1408 | ||
7c762036 | 1409 | ports = kcalloc(nb_cci_ports, sizeof(*ports), GFP_KERNEL); |
ed69bdd8 LP |
1410 | if (!ports) |
1411 | return -ENOMEM; | |
1412 | ||
ed69bdd8 LP |
1413 | for_each_child_of_node(np, cp) { |
1414 | if (!of_match_node(arm_cci_ctrl_if_matches, cp)) | |
1415 | continue; | |
1416 | ||
1417 | i = nb_ace + nb_ace_lite; | |
1418 | ||
1419 | if (i >= nb_cci_ports) | |
1420 | break; | |
1421 | ||
1422 | if (of_property_read_string(cp, "interface-type", | |
1423 | &match_str)) { | |
1424 | WARN(1, "node %s missing interface-type property\n", | |
1425 | cp->full_name); | |
1426 | continue; | |
1427 | } | |
1428 | is_ace = strcmp(match_str, "ace") == 0; | |
1429 | if (!is_ace && strcmp(match_str, "ace-lite")) { | |
1430 | WARN(1, "node %s containing invalid interface-type property, skipping it\n", | |
1431 | cp->full_name); | |
1432 | continue; | |
1433 | } | |
1434 | ||
62158f81 NP |
1435 | ret = of_address_to_resource(cp, 0, &res); |
1436 | if (!ret) { | |
1437 | ports[i].base = ioremap(res.start, resource_size(&res)); | |
1438 | ports[i].phys = res.start; | |
1439 | } | |
1440 | if (ret || !ports[i].base) { | |
ed69bdd8 LP |
1441 | WARN(1, "unable to ioremap CCI port %d\n", i); |
1442 | continue; | |
1443 | } | |
1444 | ||
1445 | if (is_ace) { | |
1446 | if (WARN_ON(nb_ace >= cci_config->nb_ace)) | |
1447 | continue; | |
1448 | ports[i].type = ACE_PORT; | |
1449 | ++nb_ace; | |
1450 | } else { | |
1451 | if (WARN_ON(nb_ace_lite >= cci_config->nb_ace_lite)) | |
1452 | continue; | |
1453 | ports[i].type = ACE_LITE_PORT; | |
1454 | ++nb_ace_lite; | |
1455 | } | |
1456 | ports[i].dn = cp; | |
1457 | } | |
1458 | ||
1459 | /* initialize a stashed array of ACE ports to speed-up look-up */ | |
1460 | cci_ace_init_ports(); | |
1461 | ||
1462 | /* | |
1463 | * Multi-cluster systems may need this data when non-coherent, during | |
1464 | * cluster power-up/power-down. Make sure it reaches main memory. | |
1465 | */ | |
1466 | sync_cache_w(&cci_ctrl_base); | |
62158f81 | 1467 | sync_cache_w(&cci_ctrl_phys); |
ed69bdd8 LP |
1468 | sync_cache_w(&ports); |
1469 | sync_cache_w(&cpu_port); | |
1470 | __sync_cache_range_w(ports, sizeof(*ports) * nb_cci_ports); | |
1471 | pr_info("ARM CCI driver probed\n"); | |
f6b9e83c | 1472 | |
ed69bdd8 | 1473 | return 0; |
f6b9e83c | 1474 | } |
ee8e5d5f SP |
1475 | #else /* !CONFIG_ARM_CCI400_PORT_CTRL */ |
1476 | static inline int cci_probe_ports(struct device_node *np) | |
1477 | { | |
1478 | return 0; | |
1479 | } | |
1480 | #endif /* CONFIG_ARM_CCI400_PORT_CTRL */ | |
ed69bdd8 | 1481 | |
f6b9e83c SP |
1482 | static int cci_probe(void) |
1483 | { | |
1484 | int ret; | |
1485 | struct device_node *np; | |
1486 | struct resource res; | |
ed69bdd8 | 1487 | |
f6b9e83c SP |
1488 | np = of_find_matching_node(NULL, arm_cci_matches); |
1489 | if(!np || !of_device_is_available(np)) | |
1490 | return -ENODEV; | |
1491 | ||
1492 | ret = of_address_to_resource(np, 0, &res); | |
1493 | if (!ret) { | |
1494 | cci_ctrl_base = ioremap(res.start, resource_size(&res)); | |
1495 | cci_ctrl_phys = res.start; | |
1496 | } | |
1497 | if (ret || !cci_ctrl_base) { | |
1498 | WARN(1, "unable to ioremap CCI ctrl\n"); | |
1499 | return -ENXIO; | |
1500 | } | |
1501 | ||
1502 | return cci_probe_ports(np); | |
ed69bdd8 LP |
1503 | } |
1504 | ||
1505 | static int cci_init_status = -EAGAIN; | |
1506 | static DEFINE_MUTEX(cci_probing); | |
1507 | ||
b91c8f28 | 1508 | static int cci_init(void) |
ed69bdd8 LP |
1509 | { |
1510 | if (cci_init_status != -EAGAIN) | |
1511 | return cci_init_status; | |
1512 | ||
1513 | mutex_lock(&cci_probing); | |
1514 | if (cci_init_status == -EAGAIN) | |
1515 | cci_init_status = cci_probe(); | |
1516 | mutex_unlock(&cci_probing); | |
1517 | return cci_init_status; | |
1518 | } | |
1519 | ||
1520 | /* | |
1521 | * To sort out early init calls ordering a helper function is provided to | |
1522 | * check if the CCI driver has beed initialized. Function check if the driver | |
1523 | * has been initialized, if not it calls the init function that probes | |
1524 | * the driver and updates the return value. | |
1525 | */ | |
b91c8f28 | 1526 | bool cci_probed(void) |
ed69bdd8 LP |
1527 | { |
1528 | return cci_init() == 0; | |
1529 | } | |
1530 | EXPORT_SYMBOL_GPL(cci_probed); | |
1531 | ||
1532 | early_initcall(cci_init); | |
b91c8f28 | 1533 | core_initcall(cci_platform_init); |
ed69bdd8 LP |
1534 | MODULE_LICENSE("GPL"); |
1535 | MODULE_DESCRIPTION("ARM CCI support"); |