]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/hwtracing/coresight/coresight-etm4x.c
Merge branch 'for-4.8/hid-led' into for-linus
[mirror_ubuntu-zesty-kernel.git] / drivers / hwtracing / coresight / coresight-etm4x.c
1 /* Copyright (c) 2014, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13 #include <linux/kernel.h>
14 #include <linux/moduleparam.h>
15 #include <linux/init.h>
16 #include <linux/types.h>
17 #include <linux/device.h>
18 #include <linux/io.h>
19 #include <linux/err.h>
20 #include <linux/fs.h>
21 #include <linux/slab.h>
22 #include <linux/delay.h>
23 #include <linux/smp.h>
24 #include <linux/sysfs.h>
25 #include <linux/stat.h>
26 #include <linux/clk.h>
27 #include <linux/cpu.h>
28 #include <linux/coresight.h>
29 #include <linux/coresight-pmu.h>
30 #include <linux/pm_wakeup.h>
31 #include <linux/amba/bus.h>
32 #include <linux/seq_file.h>
33 #include <linux/uaccess.h>
34 #include <linux/perf_event.h>
35 #include <linux/pm_runtime.h>
36 #include <linux/perf_event.h>
37 #include <asm/sections.h>
38 #include <asm/local.h>
39
40 #include "coresight-etm4x.h"
41 #include "coresight-etm-perf.h"
42
43 static int boot_enable;
44 module_param_named(boot_enable, boot_enable, int, S_IRUGO);
45
46 /* The number of ETMv4 currently registered */
47 static int etm4_count;
48 static struct etmv4_drvdata *etmdrvdata[NR_CPUS];
49 static void etm4_set_default(struct etmv4_config *config);
50
51 static void etm4_os_unlock(struct etmv4_drvdata *drvdata)
52 {
53 /* Writing any value to ETMOSLAR unlocks the trace registers */
54 writel_relaxed(0x0, drvdata->base + TRCOSLAR);
55 drvdata->os_unlock = true;
56 isb();
57 }
58
59 static bool etm4_arch_supported(u8 arch)
60 {
61 switch (arch) {
62 case ETM_ARCH_V4:
63 break;
64 default:
65 return false;
66 }
67 return true;
68 }
69
70 static int etm4_cpu_id(struct coresight_device *csdev)
71 {
72 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
73
74 return drvdata->cpu;
75 }
76
77 static int etm4_trace_id(struct coresight_device *csdev)
78 {
79 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
80 unsigned long flags;
81 int trace_id = -1;
82
83 if (!local_read(&drvdata->mode))
84 return drvdata->trcid;
85
86 spin_lock_irqsave(&drvdata->spinlock, flags);
87
88 CS_UNLOCK(drvdata->base);
89 trace_id = readl_relaxed(drvdata->base + TRCTRACEIDR);
90 trace_id &= ETM_TRACEID_MASK;
91 CS_LOCK(drvdata->base);
92
93 spin_unlock_irqrestore(&drvdata->spinlock, flags);
94
95 return trace_id;
96 }
97
98 static void etm4_enable_hw(void *info)
99 {
100 int i;
101 struct etmv4_drvdata *drvdata = info;
102 struct etmv4_config *config = &drvdata->config;
103
104 CS_UNLOCK(drvdata->base);
105
106 etm4_os_unlock(drvdata);
107
108 /* Disable the trace unit before programming trace registers */
109 writel_relaxed(0, drvdata->base + TRCPRGCTLR);
110
111 /* wait for TRCSTATR.IDLE to go up */
112 if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 1))
113 dev_err(drvdata->dev,
114 "timeout observed when probing at offset %#x\n",
115 TRCSTATR);
116
117 writel_relaxed(config->pe_sel, drvdata->base + TRCPROCSELR);
118 writel_relaxed(config->cfg, drvdata->base + TRCCONFIGR);
119 /* nothing specific implemented */
120 writel_relaxed(0x0, drvdata->base + TRCAUXCTLR);
121 writel_relaxed(config->eventctrl0, drvdata->base + TRCEVENTCTL0R);
122 writel_relaxed(config->eventctrl1, drvdata->base + TRCEVENTCTL1R);
123 writel_relaxed(config->stall_ctrl, drvdata->base + TRCSTALLCTLR);
124 writel_relaxed(config->ts_ctrl, drvdata->base + TRCTSCTLR);
125 writel_relaxed(config->syncfreq, drvdata->base + TRCSYNCPR);
126 writel_relaxed(config->ccctlr, drvdata->base + TRCCCCTLR);
127 writel_relaxed(config->bb_ctrl, drvdata->base + TRCBBCTLR);
128 writel_relaxed(drvdata->trcid, drvdata->base + TRCTRACEIDR);
129 writel_relaxed(config->vinst_ctrl, drvdata->base + TRCVICTLR);
130 writel_relaxed(config->viiectlr, drvdata->base + TRCVIIECTLR);
131 writel_relaxed(config->vissctlr,
132 drvdata->base + TRCVISSCTLR);
133 writel_relaxed(config->vipcssctlr,
134 drvdata->base + TRCVIPCSSCTLR);
135 for (i = 0; i < drvdata->nrseqstate - 1; i++)
136 writel_relaxed(config->seq_ctrl[i],
137 drvdata->base + TRCSEQEVRn(i));
138 writel_relaxed(config->seq_rst, drvdata->base + TRCSEQRSTEVR);
139 writel_relaxed(config->seq_state, drvdata->base + TRCSEQSTR);
140 writel_relaxed(config->ext_inp, drvdata->base + TRCEXTINSELR);
141 for (i = 0; i < drvdata->nr_cntr; i++) {
142 writel_relaxed(config->cntrldvr[i],
143 drvdata->base + TRCCNTRLDVRn(i));
144 writel_relaxed(config->cntr_ctrl[i],
145 drvdata->base + TRCCNTCTLRn(i));
146 writel_relaxed(config->cntr_val[i],
147 drvdata->base + TRCCNTVRn(i));
148 }
149
150 /* Resource selector pair 0 is always implemented and reserved */
151 for (i = 0; i < drvdata->nr_resource * 2; i++)
152 writel_relaxed(config->res_ctrl[i],
153 drvdata->base + TRCRSCTLRn(i));
154
155 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
156 writel_relaxed(config->ss_ctrl[i],
157 drvdata->base + TRCSSCCRn(i));
158 writel_relaxed(config->ss_status[i],
159 drvdata->base + TRCSSCSRn(i));
160 writel_relaxed(config->ss_pe_cmp[i],
161 drvdata->base + TRCSSPCICRn(i));
162 }
163 for (i = 0; i < drvdata->nr_addr_cmp; i++) {
164 writeq_relaxed(config->addr_val[i],
165 drvdata->base + TRCACVRn(i));
166 writeq_relaxed(config->addr_acc[i],
167 drvdata->base + TRCACATRn(i));
168 }
169 for (i = 0; i < drvdata->numcidc; i++)
170 writeq_relaxed(config->ctxid_pid[i],
171 drvdata->base + TRCCIDCVRn(i));
172 writel_relaxed(config->ctxid_mask0, drvdata->base + TRCCIDCCTLR0);
173 writel_relaxed(config->ctxid_mask1, drvdata->base + TRCCIDCCTLR1);
174
175 for (i = 0; i < drvdata->numvmidc; i++)
176 writeq_relaxed(config->vmid_val[i],
177 drvdata->base + TRCVMIDCVRn(i));
178 writel_relaxed(config->vmid_mask0, drvdata->base + TRCVMIDCCTLR0);
179 writel_relaxed(config->vmid_mask1, drvdata->base + TRCVMIDCCTLR1);
180
181 /* Enable the trace unit */
182 writel_relaxed(1, drvdata->base + TRCPRGCTLR);
183
184 /* wait for TRCSTATR.IDLE to go back down to '0' */
185 if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 0))
186 dev_err(drvdata->dev,
187 "timeout observed when probing at offset %#x\n",
188 TRCSTATR);
189
190 CS_LOCK(drvdata->base);
191
192 dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
193 }
194
195 static int etm4_parse_event_config(struct etmv4_drvdata *drvdata,
196 struct perf_event_attr *attr)
197 {
198 struct etmv4_config *config = &drvdata->config;
199
200 if (!attr)
201 return -EINVAL;
202
203 /* Clear configuration from previous run */
204 memset(config, 0, sizeof(struct etmv4_config));
205
206 if (attr->exclude_kernel)
207 config->mode = ETM_MODE_EXCL_KERN;
208
209 if (attr->exclude_user)
210 config->mode = ETM_MODE_EXCL_USER;
211
212 /* Always start from the default config */
213 etm4_set_default(config);
214
215 /*
216 * By default the tracers are configured to trace the whole address
217 * range. Narrow the field only if requested by user space.
218 */
219 if (config->mode)
220 etm4_config_trace_mode(config);
221
222 /* Go from generic option to ETMv4 specifics */
223 if (attr->config & BIT(ETM_OPT_CYCACC))
224 config->cfg |= ETMv4_MODE_CYCACC;
225 if (attr->config & BIT(ETM_OPT_TS))
226 config->cfg |= ETMv4_MODE_TIMESTAMP;
227
228 return 0;
229 }
230
231 static int etm4_enable_perf(struct coresight_device *csdev,
232 struct perf_event_attr *attr)
233 {
234 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
235
236 if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
237 return -EINVAL;
238
239 /* Configure the tracer based on the session's specifics */
240 etm4_parse_event_config(drvdata, attr);
241 /* And enable it */
242 etm4_enable_hw(drvdata);
243
244 return 0;
245 }
246
247 static int etm4_enable_sysfs(struct coresight_device *csdev)
248 {
249 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
250 int ret;
251
252 spin_lock(&drvdata->spinlock);
253
254 /*
255 * Executing etm4_enable_hw on the cpu whose ETM is being enabled
256 * ensures that register writes occur when cpu is powered.
257 */
258 ret = smp_call_function_single(drvdata->cpu,
259 etm4_enable_hw, drvdata, 1);
260 if (ret)
261 goto err;
262
263 drvdata->sticky_enable = true;
264 spin_unlock(&drvdata->spinlock);
265
266 dev_info(drvdata->dev, "ETM tracing enabled\n");
267 return 0;
268
269 err:
270 spin_unlock(&drvdata->spinlock);
271 return ret;
272 }
273
274 static int etm4_enable(struct coresight_device *csdev,
275 struct perf_event_attr *attr, u32 mode)
276 {
277 int ret;
278 u32 val;
279 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
280
281 val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode);
282
283 /* Someone is already using the tracer */
284 if (val)
285 return -EBUSY;
286
287 switch (mode) {
288 case CS_MODE_SYSFS:
289 ret = etm4_enable_sysfs(csdev);
290 break;
291 case CS_MODE_PERF:
292 ret = etm4_enable_perf(csdev, attr);
293 break;
294 default:
295 ret = -EINVAL;
296 }
297
298 /* The tracer didn't start */
299 if (ret)
300 local_set(&drvdata->mode, CS_MODE_DISABLED);
301
302 return ret;
303 }
304
305 static void etm4_disable_hw(void *info)
306 {
307 u32 control;
308 struct etmv4_drvdata *drvdata = info;
309
310 CS_UNLOCK(drvdata->base);
311
312 control = readl_relaxed(drvdata->base + TRCPRGCTLR);
313
314 /* EN, bit[0] Trace unit enable bit */
315 control &= ~0x1;
316
317 /* make sure everything completes before disabling */
318 mb();
319 isb();
320 writel_relaxed(control, drvdata->base + TRCPRGCTLR);
321
322 CS_LOCK(drvdata->base);
323
324 dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
325 }
326
327 static int etm4_disable_perf(struct coresight_device *csdev)
328 {
329 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
330
331 if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
332 return -EINVAL;
333
334 etm4_disable_hw(drvdata);
335 return 0;
336 }
337
338 static void etm4_disable_sysfs(struct coresight_device *csdev)
339 {
340 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
341
342 /*
343 * Taking hotplug lock here protects from clocks getting disabled
344 * with tracing being left on (crash scenario) if user disable occurs
345 * after cpu online mask indicates the cpu is offline but before the
346 * DYING hotplug callback is serviced by the ETM driver.
347 */
348 get_online_cpus();
349 spin_lock(&drvdata->spinlock);
350
351 /*
352 * Executing etm4_disable_hw on the cpu whose ETM is being disabled
353 * ensures that register writes occur when cpu is powered.
354 */
355 smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1);
356
357 spin_unlock(&drvdata->spinlock);
358 put_online_cpus();
359
360 dev_info(drvdata->dev, "ETM tracing disabled\n");
361 }
362
363 static void etm4_disable(struct coresight_device *csdev)
364 {
365 u32 mode;
366 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
367
368 /*
369 * For as long as the tracer isn't disabled another entity can't
370 * change its status. As such we can read the status here without
371 * fearing it will change under us.
372 */
373 mode = local_read(&drvdata->mode);
374
375 switch (mode) {
376 case CS_MODE_DISABLED:
377 break;
378 case CS_MODE_SYSFS:
379 etm4_disable_sysfs(csdev);
380 break;
381 case CS_MODE_PERF:
382 etm4_disable_perf(csdev);
383 break;
384 }
385
386 if (mode)
387 local_set(&drvdata->mode, CS_MODE_DISABLED);
388 }
389
390 static const struct coresight_ops_source etm4_source_ops = {
391 .cpu_id = etm4_cpu_id,
392 .trace_id = etm4_trace_id,
393 .enable = etm4_enable,
394 .disable = etm4_disable,
395 };
396
397 static const struct coresight_ops etm4_cs_ops = {
398 .source_ops = &etm4_source_ops,
399 };
400
401 static void etm4_init_arch_data(void *info)
402 {
403 u32 etmidr0;
404 u32 etmidr1;
405 u32 etmidr2;
406 u32 etmidr3;
407 u32 etmidr4;
408 u32 etmidr5;
409 struct etmv4_drvdata *drvdata = info;
410
411 /* Make sure all registers are accessible */
412 etm4_os_unlock(drvdata);
413
414 CS_UNLOCK(drvdata->base);
415
416 /* find all capabilities of the tracing unit */
417 etmidr0 = readl_relaxed(drvdata->base + TRCIDR0);
418
419 /* INSTP0, bits[2:1] P0 tracing support field */
420 if (BMVAL(etmidr0, 1, 1) && BMVAL(etmidr0, 2, 2))
421 drvdata->instrp0 = true;
422 else
423 drvdata->instrp0 = false;
424
425 /* TRCBB, bit[5] Branch broadcast tracing support bit */
426 if (BMVAL(etmidr0, 5, 5))
427 drvdata->trcbb = true;
428 else
429 drvdata->trcbb = false;
430
431 /* TRCCOND, bit[6] Conditional instruction tracing support bit */
432 if (BMVAL(etmidr0, 6, 6))
433 drvdata->trccond = true;
434 else
435 drvdata->trccond = false;
436
437 /* TRCCCI, bit[7] Cycle counting instruction bit */
438 if (BMVAL(etmidr0, 7, 7))
439 drvdata->trccci = true;
440 else
441 drvdata->trccci = false;
442
443 /* RETSTACK, bit[9] Return stack bit */
444 if (BMVAL(etmidr0, 9, 9))
445 drvdata->retstack = true;
446 else
447 drvdata->retstack = false;
448
449 /* NUMEVENT, bits[11:10] Number of events field */
450 drvdata->nr_event = BMVAL(etmidr0, 10, 11);
451 /* QSUPP, bits[16:15] Q element support field */
452 drvdata->q_support = BMVAL(etmidr0, 15, 16);
453 /* TSSIZE, bits[28:24] Global timestamp size field */
454 drvdata->ts_size = BMVAL(etmidr0, 24, 28);
455
456 /* base architecture of trace unit */
457 etmidr1 = readl_relaxed(drvdata->base + TRCIDR1);
458 /*
459 * TRCARCHMIN, bits[7:4] architecture the minor version number
460 * TRCARCHMAJ, bits[11:8] architecture major versin number
461 */
462 drvdata->arch = BMVAL(etmidr1, 4, 11);
463
464 /* maximum size of resources */
465 etmidr2 = readl_relaxed(drvdata->base + TRCIDR2);
466 /* CIDSIZE, bits[9:5] Indicates the Context ID size */
467 drvdata->ctxid_size = BMVAL(etmidr2, 5, 9);
468 /* VMIDSIZE, bits[14:10] Indicates the VMID size */
469 drvdata->vmid_size = BMVAL(etmidr2, 10, 14);
470 /* CCSIZE, bits[28:25] size of the cycle counter in bits minus 12 */
471 drvdata->ccsize = BMVAL(etmidr2, 25, 28);
472
473 etmidr3 = readl_relaxed(drvdata->base + TRCIDR3);
474 /* CCITMIN, bits[11:0] minimum threshold value that can be programmed */
475 drvdata->ccitmin = BMVAL(etmidr3, 0, 11);
476 /* EXLEVEL_S, bits[19:16] Secure state instruction tracing */
477 drvdata->s_ex_level = BMVAL(etmidr3, 16, 19);
478 /* EXLEVEL_NS, bits[23:20] Non-secure state instruction tracing */
479 drvdata->ns_ex_level = BMVAL(etmidr3, 20, 23);
480
481 /*
482 * TRCERR, bit[24] whether a trace unit can trace a
483 * system error exception.
484 */
485 if (BMVAL(etmidr3, 24, 24))
486 drvdata->trc_error = true;
487 else
488 drvdata->trc_error = false;
489
490 /* SYNCPR, bit[25] implementation has a fixed synchronization period? */
491 if (BMVAL(etmidr3, 25, 25))
492 drvdata->syncpr = true;
493 else
494 drvdata->syncpr = false;
495
496 /* STALLCTL, bit[26] is stall control implemented? */
497 if (BMVAL(etmidr3, 26, 26))
498 drvdata->stallctl = true;
499 else
500 drvdata->stallctl = false;
501
502 /* SYSSTALL, bit[27] implementation can support stall control? */
503 if (BMVAL(etmidr3, 27, 27))
504 drvdata->sysstall = true;
505 else
506 drvdata->sysstall = false;
507
508 /* NUMPROC, bits[30:28] the number of PEs available for tracing */
509 drvdata->nr_pe = BMVAL(etmidr3, 28, 30);
510
511 /* NOOVERFLOW, bit[31] is trace overflow prevention supported */
512 if (BMVAL(etmidr3, 31, 31))
513 drvdata->nooverflow = true;
514 else
515 drvdata->nooverflow = false;
516
517 /* number of resources trace unit supports */
518 etmidr4 = readl_relaxed(drvdata->base + TRCIDR4);
519 /* NUMACPAIRS, bits[0:3] number of addr comparator pairs for tracing */
520 drvdata->nr_addr_cmp = BMVAL(etmidr4, 0, 3);
521 /* NUMPC, bits[15:12] number of PE comparator inputs for tracing */
522 drvdata->nr_pe_cmp = BMVAL(etmidr4, 12, 15);
523 /*
524 * NUMRSPAIR, bits[19:16]
525 * The number of resource pairs conveyed by the HW starts at 0, i.e a
526 * value of 0x0 indicate 1 resource pair, 0x1 indicate two and so on.
527 * As such add 1 to the value of NUMRSPAIR for a better representation.
528 */
529 drvdata->nr_resource = BMVAL(etmidr4, 16, 19) + 1;
530 /*
531 * NUMSSCC, bits[23:20] the number of single-shot
532 * comparator control for tracing
533 */
534 drvdata->nr_ss_cmp = BMVAL(etmidr4, 20, 23);
535 /* NUMCIDC, bits[27:24] number of Context ID comparators for tracing */
536 drvdata->numcidc = BMVAL(etmidr4, 24, 27);
537 /* NUMVMIDC, bits[31:28] number of VMID comparators for tracing */
538 drvdata->numvmidc = BMVAL(etmidr4, 28, 31);
539
540 etmidr5 = readl_relaxed(drvdata->base + TRCIDR5);
541 /* NUMEXTIN, bits[8:0] number of external inputs implemented */
542 drvdata->nr_ext_inp = BMVAL(etmidr5, 0, 8);
543 /* TRACEIDSIZE, bits[21:16] indicates the trace ID width */
544 drvdata->trcid_size = BMVAL(etmidr5, 16, 21);
545 /* ATBTRIG, bit[22] implementation can support ATB triggers? */
546 if (BMVAL(etmidr5, 22, 22))
547 drvdata->atbtrig = true;
548 else
549 drvdata->atbtrig = false;
550 /*
551 * LPOVERRIDE, bit[23] implementation supports
552 * low-power state override
553 */
554 if (BMVAL(etmidr5, 23, 23))
555 drvdata->lpoverride = true;
556 else
557 drvdata->lpoverride = false;
558 /* NUMSEQSTATE, bits[27:25] number of sequencer states implemented */
559 drvdata->nrseqstate = BMVAL(etmidr5, 25, 27);
560 /* NUMCNTR, bits[30:28] number of counters available for tracing */
561 drvdata->nr_cntr = BMVAL(etmidr5, 28, 30);
562 CS_LOCK(drvdata->base);
563 }
564
565 static void etm4_set_default(struct etmv4_config *config)
566 {
567 if (WARN_ON_ONCE(!config))
568 return;
569
570 /*
571 * Make default initialisation trace everything
572 *
573 * Select the "always true" resource selector on the
574 * "Enablign Event" line and configure address range comparator
575 * '0' to trace all the possible address range. From there
576 * configure the "include/exclude" engine to include address
577 * range comparator '0'.
578 */
579
580 /* disable all events tracing */
581 config->eventctrl0 = 0x0;
582 config->eventctrl1 = 0x0;
583
584 /* disable stalling */
585 config->stall_ctrl = 0x0;
586
587 /* enable trace synchronization every 4096 bytes, if available */
588 config->syncfreq = 0xC;
589
590 /* disable timestamp event */
591 config->ts_ctrl = 0x0;
592
593 /* TRCVICTLR::EVENT = 0x01, select the always on logic */
594 config->vinst_ctrl |= BIT(0);
595
596 /*
597 * TRCVICTLR::SSSTATUS == 1, the start-stop logic is
598 * in the started state
599 */
600 config->vinst_ctrl |= BIT(9);
601
602 /*
603 * Configure address range comparator '0' to encompass all
604 * possible addresses.
605 */
606
607 /* First half of default address comparator: start at address 0 */
608 config->addr_val[ETM_DEFAULT_ADDR_COMP] = 0x0;
609 /* trace instruction addresses */
610 config->addr_acc[ETM_DEFAULT_ADDR_COMP] &= ~(BIT(0) | BIT(1));
611 /* EXLEVEL_NS, bits[12:15], only trace application and kernel space */
612 config->addr_acc[ETM_DEFAULT_ADDR_COMP] |= ETM_EXLEVEL_NS_HYP;
613 /* EXLEVEL_S, bits[11:8], don't trace anything in secure state */
614 config->addr_acc[ETM_DEFAULT_ADDR_COMP] |= (ETM_EXLEVEL_S_APP |
615 ETM_EXLEVEL_S_OS |
616 ETM_EXLEVEL_S_HYP);
617 config->addr_type[ETM_DEFAULT_ADDR_COMP] = ETM_ADDR_TYPE_RANGE;
618
619 /*
620 * Second half of default address comparator: go all
621 * the way to the top.
622 */
623 config->addr_val[ETM_DEFAULT_ADDR_COMP + 1] = ~0x0;
624 /* trace instruction addresses */
625 config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] &= ~(BIT(0) | BIT(1));
626 /* Address comparator type must be equal for both halves */
627 config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] =
628 config->addr_acc[ETM_DEFAULT_ADDR_COMP];
629 config->addr_type[ETM_DEFAULT_ADDR_COMP + 1] = ETM_ADDR_TYPE_RANGE;
630
631 /*
632 * Configure the ViewInst function to filter on address range
633 * comparator '0'.
634 */
635 config->viiectlr = BIT(0);
636
637 /* no start-stop filtering for ViewInst */
638 config->vissctlr = 0x0;
639 }
640
641 void etm4_config_trace_mode(struct etmv4_config *config)
642 {
643 u32 addr_acc, mode;
644
645 mode = config->mode;
646 mode &= (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER);
647
648 /* excluding kernel AND user space doesn't make sense */
649 WARN_ON_ONCE(mode == (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER));
650
651 /* nothing to do if neither flags are set */
652 if (!(mode & ETM_MODE_EXCL_KERN) && !(mode & ETM_MODE_EXCL_USER))
653 return;
654
655 addr_acc = config->addr_acc[ETM_DEFAULT_ADDR_COMP];
656 /* clear default config */
657 addr_acc &= ~(ETM_EXLEVEL_NS_APP | ETM_EXLEVEL_NS_OS);
658
659 /*
660 * EXLEVEL_NS, bits[15:12]
661 * The Exception levels are:
662 * Bit[12] Exception level 0 - Application
663 * Bit[13] Exception level 1 - OS
664 * Bit[14] Exception level 2 - Hypervisor
665 * Bit[15] Never implemented
666 */
667 if (mode & ETM_MODE_EXCL_KERN)
668 addr_acc |= ETM_EXLEVEL_NS_OS;
669 else
670 addr_acc |= ETM_EXLEVEL_NS_APP;
671
672 config->addr_acc[ETM_DEFAULT_ADDR_COMP] = addr_acc;
673 config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] = addr_acc;
674 }
675
676 static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action,
677 void *hcpu)
678 {
679 unsigned int cpu = (unsigned long)hcpu;
680
681 if (!etmdrvdata[cpu])
682 goto out;
683
684 switch (action & (~CPU_TASKS_FROZEN)) {
685 case CPU_STARTING:
686 spin_lock(&etmdrvdata[cpu]->spinlock);
687 if (!etmdrvdata[cpu]->os_unlock) {
688 etm4_os_unlock(etmdrvdata[cpu]);
689 etmdrvdata[cpu]->os_unlock = true;
690 }
691
692 if (local_read(&etmdrvdata[cpu]->mode))
693 etm4_enable_hw(etmdrvdata[cpu]);
694 spin_unlock(&etmdrvdata[cpu]->spinlock);
695 break;
696
697 case CPU_ONLINE:
698 if (etmdrvdata[cpu]->boot_enable &&
699 !etmdrvdata[cpu]->sticky_enable)
700 coresight_enable(etmdrvdata[cpu]->csdev);
701 break;
702
703 case CPU_DYING:
704 spin_lock(&etmdrvdata[cpu]->spinlock);
705 if (local_read(&etmdrvdata[cpu]->mode))
706 etm4_disable_hw(etmdrvdata[cpu]);
707 spin_unlock(&etmdrvdata[cpu]->spinlock);
708 break;
709 }
710 out:
711 return NOTIFY_OK;
712 }
713
714 static struct notifier_block etm4_cpu_notifier = {
715 .notifier_call = etm4_cpu_callback,
716 };
717
718 static void etm4_init_trace_id(struct etmv4_drvdata *drvdata)
719 {
720 drvdata->trcid = coresight_get_trace_id(drvdata->cpu);
721 }
722
723 static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
724 {
725 int ret;
726 void __iomem *base;
727 struct device *dev = &adev->dev;
728 struct coresight_platform_data *pdata = NULL;
729 struct etmv4_drvdata *drvdata;
730 struct resource *res = &adev->res;
731 struct coresight_desc *desc;
732 struct device_node *np = adev->dev.of_node;
733
734 desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
735 if (!desc)
736 return -ENOMEM;
737
738 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
739 if (!drvdata)
740 return -ENOMEM;
741
742 if (np) {
743 pdata = of_get_coresight_platform_data(dev, np);
744 if (IS_ERR(pdata))
745 return PTR_ERR(pdata);
746 adev->dev.platform_data = pdata;
747 }
748
749 drvdata->dev = &adev->dev;
750 dev_set_drvdata(dev, drvdata);
751
752 /* Validity for the resource is already checked by the AMBA core */
753 base = devm_ioremap_resource(dev, res);
754 if (IS_ERR(base))
755 return PTR_ERR(base);
756
757 drvdata->base = base;
758
759 spin_lock_init(&drvdata->spinlock);
760
761 drvdata->cpu = pdata ? pdata->cpu : 0;
762
763 get_online_cpus();
764 etmdrvdata[drvdata->cpu] = drvdata;
765
766 if (smp_call_function_single(drvdata->cpu,
767 etm4_init_arch_data, drvdata, 1))
768 dev_err(dev, "ETM arch init failed\n");
769
770 if (!etm4_count++)
771 register_hotcpu_notifier(&etm4_cpu_notifier);
772
773 put_online_cpus();
774
775 if (etm4_arch_supported(drvdata->arch) == false) {
776 ret = -EINVAL;
777 goto err_arch_supported;
778 }
779
780 etm4_init_trace_id(drvdata);
781 etm4_set_default(&drvdata->config);
782
783 desc->type = CORESIGHT_DEV_TYPE_SOURCE;
784 desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
785 desc->ops = &etm4_cs_ops;
786 desc->pdata = pdata;
787 desc->dev = dev;
788 desc->groups = coresight_etmv4_groups;
789 drvdata->csdev = coresight_register(desc);
790 if (IS_ERR(drvdata->csdev)) {
791 ret = PTR_ERR(drvdata->csdev);
792 goto err_arch_supported;
793 }
794
795 ret = etm_perf_symlink(drvdata->csdev, true);
796 if (ret) {
797 coresight_unregister(drvdata->csdev);
798 goto err_arch_supported;
799 }
800
801 pm_runtime_put(&adev->dev);
802 dev_info(dev, "%s initialized\n", (char *)id->data);
803
804 if (boot_enable) {
805 coresight_enable(drvdata->csdev);
806 drvdata->boot_enable = true;
807 }
808
809 return 0;
810
811 err_arch_supported:
812 if (--etm4_count == 0)
813 unregister_hotcpu_notifier(&etm4_cpu_notifier);
814 return ret;
815 }
816
817 static struct amba_id etm4_ids[] = {
818 { /* ETM 4.0 - Qualcomm */
819 .id = 0x0003b95d,
820 .mask = 0x0003ffff,
821 .data = "ETM 4.0",
822 },
823 { /* ETM 4.0 - Juno board */
824 .id = 0x000bb95e,
825 .mask = 0x000fffff,
826 .data = "ETM 4.0",
827 },
828 { /* ETM 4.0 - A72, Maia, HiSilicon */
829 .id = 0x000bb95a,
830 .mask = 0x000fffff,
831 .data = "ETM 4.0",
832 },
833 { 0, 0},
834 };
835
836 static struct amba_driver etm4x_driver = {
837 .drv = {
838 .name = "coresight-etm4x",
839 .suppress_bind_attrs = true,
840 },
841 .probe = etm4_probe,
842 .id_table = etm4_ids,
843 };
844 builtin_amba_driver(etm4x_driver);