1 /* Copyright (c) 2014, The Linux Foundation. All rights reserved.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
13 #include <linux/kernel.h>
14 #include <linux/moduleparam.h>
15 #include <linux/init.h>
16 #include <linux/types.h>
17 #include <linux/device.h>
18 #include <linux/module.h>
20 #include <linux/err.h>
22 #include <linux/slab.h>
23 #include <linux/delay.h>
24 #include <linux/smp.h>
25 #include <linux/sysfs.h>
26 #include <linux/stat.h>
27 #include <linux/clk.h>
28 #include <linux/cpu.h>
29 #include <linux/coresight.h>
30 #include <linux/pm_wakeup.h>
31 #include <linux/amba/bus.h>
32 #include <linux/seq_file.h>
33 #include <linux/uaccess.h>
34 #include <linux/pm_runtime.h>
35 #include <asm/sections.h>
37 #include "coresight-etm4x.h"
39 static int boot_enable
;
40 module_param_named(boot_enable
, boot_enable
, int, S_IRUGO
);
42 /* The number of ETMv4 currently registered */
43 static int etm4_count
;
44 static struct etmv4_drvdata
*etmdrvdata
[NR_CPUS
];
46 static void etm4_os_unlock(void *info
)
48 struct etmv4_drvdata
*drvdata
= (struct etmv4_drvdata
*)info
;
50 /* Writing any value to ETMOSLAR unlocks the trace registers */
51 writel_relaxed(0x0, drvdata
->base
+ TRCOSLAR
);
55 static bool etm4_arch_supported(u8 arch
)
66 static int etm4_trace_id(struct coresight_device
*csdev
)
68 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(csdev
->dev
.parent
);
73 return drvdata
->trcid
;
75 pm_runtime_get_sync(drvdata
->dev
);
76 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
78 CS_UNLOCK(drvdata
->base
);
79 trace_id
= readl_relaxed(drvdata
->base
+ TRCTRACEIDR
);
80 trace_id
&= ETM_TRACEID_MASK
;
81 CS_LOCK(drvdata
->base
);
83 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
84 pm_runtime_put(drvdata
->dev
);
89 static void etm4_enable_hw(void *info
)
92 struct etmv4_drvdata
*drvdata
= info
;
94 CS_UNLOCK(drvdata
->base
);
96 etm4_os_unlock(drvdata
);
98 /* Disable the trace unit before programming trace registers */
99 writel_relaxed(0, drvdata
->base
+ TRCPRGCTLR
);
101 /* wait for TRCSTATR.IDLE to go up */
102 if (coresight_timeout(drvdata
->base
, TRCSTATR
, TRCSTATR_IDLE_BIT
, 1))
103 dev_err(drvdata
->dev
,
104 "timeout observed when probing at offset %#x\n",
107 writel_relaxed(drvdata
->pe_sel
, drvdata
->base
+ TRCPROCSELR
);
108 writel_relaxed(drvdata
->cfg
, drvdata
->base
+ TRCCONFIGR
);
109 /* nothing specific implemented */
110 writel_relaxed(0x0, drvdata
->base
+ TRCAUXCTLR
);
111 writel_relaxed(drvdata
->eventctrl0
, drvdata
->base
+ TRCEVENTCTL0R
);
112 writel_relaxed(drvdata
->eventctrl1
, drvdata
->base
+ TRCEVENTCTL1R
);
113 writel_relaxed(drvdata
->stall_ctrl
, drvdata
->base
+ TRCSTALLCTLR
);
114 writel_relaxed(drvdata
->ts_ctrl
, drvdata
->base
+ TRCTSCTLR
);
115 writel_relaxed(drvdata
->syncfreq
, drvdata
->base
+ TRCSYNCPR
);
116 writel_relaxed(drvdata
->ccctlr
, drvdata
->base
+ TRCCCCTLR
);
117 writel_relaxed(drvdata
->bb_ctrl
, drvdata
->base
+ TRCBBCTLR
);
118 writel_relaxed(drvdata
->trcid
, drvdata
->base
+ TRCTRACEIDR
);
119 writel_relaxed(drvdata
->vinst_ctrl
, drvdata
->base
+ TRCVICTLR
);
120 writel_relaxed(drvdata
->viiectlr
, drvdata
->base
+ TRCVIIECTLR
);
121 writel_relaxed(drvdata
->vissctlr
,
122 drvdata
->base
+ TRCVISSCTLR
);
123 writel_relaxed(drvdata
->vipcssctlr
,
124 drvdata
->base
+ TRCVIPCSSCTLR
);
125 for (i
= 0; i
< drvdata
->nrseqstate
- 1; i
++)
126 writel_relaxed(drvdata
->seq_ctrl
[i
],
127 drvdata
->base
+ TRCSEQEVRn(i
));
128 writel_relaxed(drvdata
->seq_rst
, drvdata
->base
+ TRCSEQRSTEVR
);
129 writel_relaxed(drvdata
->seq_state
, drvdata
->base
+ TRCSEQSTR
);
130 writel_relaxed(drvdata
->ext_inp
, drvdata
->base
+ TRCEXTINSELR
);
131 for (i
= 0; i
< drvdata
->nr_cntr
; i
++) {
132 writel_relaxed(drvdata
->cntrldvr
[i
],
133 drvdata
->base
+ TRCCNTRLDVRn(i
));
134 writel_relaxed(drvdata
->cntr_ctrl
[i
],
135 drvdata
->base
+ TRCCNTCTLRn(i
));
136 writel_relaxed(drvdata
->cntr_val
[i
],
137 drvdata
->base
+ TRCCNTVRn(i
));
139 for (i
= 0; i
< drvdata
->nr_resource
; i
++)
140 writel_relaxed(drvdata
->res_ctrl
[i
],
141 drvdata
->base
+ TRCRSCTLRn(i
));
143 for (i
= 0; i
< drvdata
->nr_ss_cmp
; i
++) {
144 writel_relaxed(drvdata
->ss_ctrl
[i
],
145 drvdata
->base
+ TRCSSCCRn(i
));
146 writel_relaxed(drvdata
->ss_status
[i
],
147 drvdata
->base
+ TRCSSCSRn(i
));
148 writel_relaxed(drvdata
->ss_pe_cmp
[i
],
149 drvdata
->base
+ TRCSSPCICRn(i
));
151 for (i
= 0; i
< drvdata
->nr_addr_cmp
; i
++) {
152 writeq_relaxed(drvdata
->addr_val
[i
],
153 drvdata
->base
+ TRCACVRn(i
));
154 writeq_relaxed(drvdata
->addr_acc
[i
],
155 drvdata
->base
+ TRCACATRn(i
));
157 for (i
= 0; i
< drvdata
->numcidc
; i
++)
158 writeq_relaxed(drvdata
->ctxid_pid
[i
],
159 drvdata
->base
+ TRCCIDCVRn(i
));
160 writel_relaxed(drvdata
->ctxid_mask0
, drvdata
->base
+ TRCCIDCCTLR0
);
161 writel_relaxed(drvdata
->ctxid_mask1
, drvdata
->base
+ TRCCIDCCTLR1
);
163 for (i
= 0; i
< drvdata
->numvmidc
; i
++)
164 writeq_relaxed(drvdata
->vmid_val
[i
],
165 drvdata
->base
+ TRCVMIDCVRn(i
));
166 writel_relaxed(drvdata
->vmid_mask0
, drvdata
->base
+ TRCVMIDCCTLR0
);
167 writel_relaxed(drvdata
->vmid_mask1
, drvdata
->base
+ TRCVMIDCCTLR1
);
169 /* Enable the trace unit */
170 writel_relaxed(1, drvdata
->base
+ TRCPRGCTLR
);
172 /* wait for TRCSTATR.IDLE to go back down to '0' */
173 if (coresight_timeout(drvdata
->base
, TRCSTATR
, TRCSTATR_IDLE_BIT
, 0))
174 dev_err(drvdata
->dev
,
175 "timeout observed when probing at offset %#x\n",
178 CS_LOCK(drvdata
->base
);
180 dev_dbg(drvdata
->dev
, "cpu: %d enable smp call done\n", drvdata
->cpu
);
183 static int etm4_enable(struct coresight_device
*csdev
)
185 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(csdev
->dev
.parent
);
188 pm_runtime_get_sync(drvdata
->dev
);
189 spin_lock(&drvdata
->spinlock
);
192 * Executing etm4_enable_hw on the cpu whose ETM is being enabled
193 * ensures that register writes occur when cpu is powered.
195 ret
= smp_call_function_single(drvdata
->cpu
,
196 etm4_enable_hw
, drvdata
, 1);
199 drvdata
->enable
= true;
200 drvdata
->sticky_enable
= true;
202 spin_unlock(&drvdata
->spinlock
);
204 dev_info(drvdata
->dev
, "ETM tracing enabled\n");
207 spin_unlock(&drvdata
->spinlock
);
208 pm_runtime_put(drvdata
->dev
);
212 static void etm4_disable_hw(void *info
)
215 struct etmv4_drvdata
*drvdata
= info
;
217 CS_UNLOCK(drvdata
->base
);
219 control
= readl_relaxed(drvdata
->base
+ TRCPRGCTLR
);
221 /* EN, bit[0] Trace unit enable bit */
224 /* make sure everything completes before disabling */
227 writel_relaxed(control
, drvdata
->base
+ TRCPRGCTLR
);
229 CS_LOCK(drvdata
->base
);
231 dev_dbg(drvdata
->dev
, "cpu: %d disable smp call done\n", drvdata
->cpu
);
234 static void etm4_disable(struct coresight_device
*csdev
)
236 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(csdev
->dev
.parent
);
239 * Taking hotplug lock here protects from clocks getting disabled
240 * with tracing being left on (crash scenario) if user disable occurs
241 * after cpu online mask indicates the cpu is offline but before the
242 * DYING hotplug callback is serviced by the ETM driver.
245 spin_lock(&drvdata
->spinlock
);
248 * Executing etm4_disable_hw on the cpu whose ETM is being disabled
249 * ensures that register writes occur when cpu is powered.
251 smp_call_function_single(drvdata
->cpu
, etm4_disable_hw
, drvdata
, 1);
252 drvdata
->enable
= false;
254 spin_unlock(&drvdata
->spinlock
);
257 pm_runtime_put(drvdata
->dev
);
259 dev_info(drvdata
->dev
, "ETM tracing disabled\n");
262 static const struct coresight_ops_source etm4_source_ops
= {
263 .trace_id
= etm4_trace_id
,
264 .enable
= etm4_enable
,
265 .disable
= etm4_disable
,
268 static const struct coresight_ops etm4_cs_ops
= {
269 .source_ops
= &etm4_source_ops
,
272 static int etm4_set_mode_exclude(struct etmv4_drvdata
*drvdata
, bool exclude
)
274 u8 idx
= drvdata
->addr_idx
;
277 * TRCACATRn.TYPE bit[1:0]: type of comparison
278 * the trace unit performs
280 if (BMVAL(drvdata
->addr_acc
[idx
], 0, 1) == ETM_INSTR_ADDR
) {
285 * We are performing instruction address comparison. Set the
286 * relevant bit of ViewInst Include/Exclude Control register
287 * for corresponding address comparator pair.
289 if (drvdata
->addr_type
[idx
] != ETM_ADDR_TYPE_RANGE
||
290 drvdata
->addr_type
[idx
+ 1] != ETM_ADDR_TYPE_RANGE
)
293 if (exclude
== true) {
295 * Set exclude bit and unset the include bit
296 * corresponding to comparator pair
298 drvdata
->viiectlr
|= BIT(idx
/ 2 + 16);
299 drvdata
->viiectlr
&= ~BIT(idx
/ 2);
302 * Set include bit and unset exclude bit
303 * corresponding to comparator pair
305 drvdata
->viiectlr
|= BIT(idx
/ 2);
306 drvdata
->viiectlr
&= ~BIT(idx
/ 2 + 16);
312 static ssize_t
nr_pe_cmp_show(struct device
*dev
,
313 struct device_attribute
*attr
,
317 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
319 val
= drvdata
->nr_pe_cmp
;
320 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
322 static DEVICE_ATTR_RO(nr_pe_cmp
);
324 static ssize_t
nr_addr_cmp_show(struct device
*dev
,
325 struct device_attribute
*attr
,
329 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
331 val
= drvdata
->nr_addr_cmp
;
332 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
334 static DEVICE_ATTR_RO(nr_addr_cmp
);
336 static ssize_t
nr_cntr_show(struct device
*dev
,
337 struct device_attribute
*attr
,
341 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
343 val
= drvdata
->nr_cntr
;
344 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
346 static DEVICE_ATTR_RO(nr_cntr
);
348 static ssize_t
nr_ext_inp_show(struct device
*dev
,
349 struct device_attribute
*attr
,
353 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
355 val
= drvdata
->nr_ext_inp
;
356 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
358 static DEVICE_ATTR_RO(nr_ext_inp
);
360 static ssize_t
numcidc_show(struct device
*dev
,
361 struct device_attribute
*attr
,
365 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
367 val
= drvdata
->numcidc
;
368 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
370 static DEVICE_ATTR_RO(numcidc
);
372 static ssize_t
numvmidc_show(struct device
*dev
,
373 struct device_attribute
*attr
,
377 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
379 val
= drvdata
->numvmidc
;
380 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
382 static DEVICE_ATTR_RO(numvmidc
);
384 static ssize_t
nrseqstate_show(struct device
*dev
,
385 struct device_attribute
*attr
,
389 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
391 val
= drvdata
->nrseqstate
;
392 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
394 static DEVICE_ATTR_RO(nrseqstate
);
396 static ssize_t
nr_resource_show(struct device
*dev
,
397 struct device_attribute
*attr
,
401 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
403 val
= drvdata
->nr_resource
;
404 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
406 static DEVICE_ATTR_RO(nr_resource
);
408 static ssize_t
nr_ss_cmp_show(struct device
*dev
,
409 struct device_attribute
*attr
,
413 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
415 val
= drvdata
->nr_ss_cmp
;
416 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
418 static DEVICE_ATTR_RO(nr_ss_cmp
);
420 static ssize_t
reset_store(struct device
*dev
,
421 struct device_attribute
*attr
,
422 const char *buf
, size_t size
)
426 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
428 if (kstrtoul(buf
, 16, &val
))
431 spin_lock(&drvdata
->spinlock
);
435 /* Disable data tracing: do not trace load and store data transfers */
436 drvdata
->mode
&= ~(ETM_MODE_LOAD
| ETM_MODE_STORE
);
437 drvdata
->cfg
&= ~(BIT(1) | BIT(2));
439 /* Disable data value and data address tracing */
440 drvdata
->mode
&= ~(ETM_MODE_DATA_TRACE_ADDR
|
441 ETM_MODE_DATA_TRACE_VAL
);
442 drvdata
->cfg
&= ~(BIT(16) | BIT(17));
444 /* Disable all events tracing */
445 drvdata
->eventctrl0
= 0x0;
446 drvdata
->eventctrl1
= 0x0;
448 /* Disable timestamp event */
449 drvdata
->ts_ctrl
= 0x0;
451 /* Disable stalling */
452 drvdata
->stall_ctrl
= 0x0;
454 /* Reset trace synchronization period to 2^8 = 256 bytes*/
455 if (drvdata
->syncpr
== false)
456 drvdata
->syncfreq
= 0x8;
459 * Enable ViewInst to trace everything with start-stop logic in
460 * started state. ARM recommends start-stop logic is set before
463 drvdata
->vinst_ctrl
|= BIT(0);
464 if (drvdata
->nr_addr_cmp
== true) {
465 drvdata
->mode
|= ETM_MODE_VIEWINST_STARTSTOP
;
466 /* SSSTATUS, bit[9] */
467 drvdata
->vinst_ctrl
|= BIT(9);
470 /* No address range filtering for ViewInst */
471 drvdata
->viiectlr
= 0x0;
473 /* No start-stop filtering for ViewInst */
474 drvdata
->vissctlr
= 0x0;
476 /* Disable seq events */
477 for (i
= 0; i
< drvdata
->nrseqstate
-1; i
++)
478 drvdata
->seq_ctrl
[i
] = 0x0;
479 drvdata
->seq_rst
= 0x0;
480 drvdata
->seq_state
= 0x0;
482 /* Disable external input events */
483 drvdata
->ext_inp
= 0x0;
485 drvdata
->cntr_idx
= 0x0;
486 for (i
= 0; i
< drvdata
->nr_cntr
; i
++) {
487 drvdata
->cntrldvr
[i
] = 0x0;
488 drvdata
->cntr_ctrl
[i
] = 0x0;
489 drvdata
->cntr_val
[i
] = 0x0;
492 drvdata
->res_idx
= 0x0;
493 for (i
= 0; i
< drvdata
->nr_resource
; i
++)
494 drvdata
->res_ctrl
[i
] = 0x0;
496 for (i
= 0; i
< drvdata
->nr_ss_cmp
; i
++) {
497 drvdata
->ss_ctrl
[i
] = 0x0;
498 drvdata
->ss_pe_cmp
[i
] = 0x0;
501 drvdata
->addr_idx
= 0x0;
502 for (i
= 0; i
< drvdata
->nr_addr_cmp
* 2; i
++) {
503 drvdata
->addr_val
[i
] = 0x0;
504 drvdata
->addr_acc
[i
] = 0x0;
505 drvdata
->addr_type
[i
] = ETM_ADDR_TYPE_NONE
;
508 drvdata
->ctxid_idx
= 0x0;
509 for (i
= 0; i
< drvdata
->numcidc
; i
++) {
510 drvdata
->ctxid_pid
[i
] = 0x0;
511 drvdata
->ctxid_vpid
[i
] = 0x0;
514 drvdata
->ctxid_mask0
= 0x0;
515 drvdata
->ctxid_mask1
= 0x0;
517 drvdata
->vmid_idx
= 0x0;
518 for (i
= 0; i
< drvdata
->numvmidc
; i
++)
519 drvdata
->vmid_val
[i
] = 0x0;
520 drvdata
->vmid_mask0
= 0x0;
521 drvdata
->vmid_mask1
= 0x0;
523 drvdata
->trcid
= drvdata
->cpu
+ 1;
524 spin_unlock(&drvdata
->spinlock
);
527 static DEVICE_ATTR_WO(reset
);
529 static ssize_t
mode_show(struct device
*dev
,
530 struct device_attribute
*attr
,
534 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
537 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
540 static ssize_t
mode_store(struct device
*dev
,
541 struct device_attribute
*attr
,
542 const char *buf
, size_t size
)
544 unsigned long val
, mode
;
545 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
547 if (kstrtoul(buf
, 16, &val
))
550 spin_lock(&drvdata
->spinlock
);
551 drvdata
->mode
= val
& ETMv4_MODE_ALL
;
553 if (drvdata
->mode
& ETM_MODE_EXCLUDE
)
554 etm4_set_mode_exclude(drvdata
, true);
556 etm4_set_mode_exclude(drvdata
, false);
558 if (drvdata
->instrp0
== true) {
559 /* start by clearing instruction P0 field */
560 drvdata
->cfg
&= ~(BIT(1) | BIT(2));
561 if (drvdata
->mode
& ETM_MODE_LOAD
)
562 /* 0b01 Trace load instructions as P0 instructions */
563 drvdata
->cfg
|= BIT(1);
564 if (drvdata
->mode
& ETM_MODE_STORE
)
565 /* 0b10 Trace store instructions as P0 instructions */
566 drvdata
->cfg
|= BIT(2);
567 if (drvdata
->mode
& ETM_MODE_LOAD_STORE
)
569 * 0b11 Trace load and store instructions
572 drvdata
->cfg
|= BIT(1) | BIT(2);
575 /* bit[3], Branch broadcast mode */
576 if ((drvdata
->mode
& ETM_MODE_BB
) && (drvdata
->trcbb
== true))
577 drvdata
->cfg
|= BIT(3);
579 drvdata
->cfg
&= ~BIT(3);
581 /* bit[4], Cycle counting instruction trace bit */
582 if ((drvdata
->mode
& ETMv4_MODE_CYCACC
) &&
583 (drvdata
->trccci
== true))
584 drvdata
->cfg
|= BIT(4);
586 drvdata
->cfg
&= ~BIT(4);
588 /* bit[6], Context ID tracing bit */
589 if ((drvdata
->mode
& ETMv4_MODE_CTXID
) && (drvdata
->ctxid_size
))
590 drvdata
->cfg
|= BIT(6);
592 drvdata
->cfg
&= ~BIT(6);
594 if ((drvdata
->mode
& ETM_MODE_VMID
) && (drvdata
->vmid_size
))
595 drvdata
->cfg
|= BIT(7);
597 drvdata
->cfg
&= ~BIT(7);
599 /* bits[10:8], Conditional instruction tracing bit */
600 mode
= ETM_MODE_COND(drvdata
->mode
);
601 if (drvdata
->trccond
== true) {
602 drvdata
->cfg
&= ~(BIT(8) | BIT(9) | BIT(10));
603 drvdata
->cfg
|= mode
<< 8;
606 /* bit[11], Global timestamp tracing bit */
607 if ((drvdata
->mode
& ETMv4_MODE_TIMESTAMP
) && (drvdata
->ts_size
))
608 drvdata
->cfg
|= BIT(11);
610 drvdata
->cfg
&= ~BIT(11);
612 /* bit[12], Return stack enable bit */
613 if ((drvdata
->mode
& ETM_MODE_RETURNSTACK
) &&
614 (drvdata
->retstack
== true))
615 drvdata
->cfg
|= BIT(12);
617 drvdata
->cfg
&= ~BIT(12);
619 /* bits[14:13], Q element enable field */
620 mode
= ETM_MODE_QELEM(drvdata
->mode
);
621 /* start by clearing QE bits */
622 drvdata
->cfg
&= ~(BIT(13) | BIT(14));
623 /* if supported, Q elements with instruction counts are enabled */
624 if ((mode
& BIT(0)) && (drvdata
->q_support
& BIT(0)))
625 drvdata
->cfg
|= BIT(13);
627 * if supported, Q elements with and without instruction
630 if ((mode
& BIT(1)) && (drvdata
->q_support
& BIT(1)))
631 drvdata
->cfg
|= BIT(14);
633 /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
634 if ((drvdata
->mode
& ETM_MODE_ATB_TRIGGER
) &&
635 (drvdata
->atbtrig
== true))
636 drvdata
->eventctrl1
|= BIT(11);
638 drvdata
->eventctrl1
&= ~BIT(11);
640 /* bit[12], Low-power state behavior override bit */
641 if ((drvdata
->mode
& ETM_MODE_LPOVERRIDE
) &&
642 (drvdata
->lpoverride
== true))
643 drvdata
->eventctrl1
|= BIT(12);
645 drvdata
->eventctrl1
&= ~BIT(12);
647 /* bit[8], Instruction stall bit */
648 if (drvdata
->mode
& ETM_MODE_ISTALL_EN
)
649 drvdata
->stall_ctrl
|= BIT(8);
651 drvdata
->stall_ctrl
&= ~BIT(8);
653 /* bit[10], Prioritize instruction trace bit */
654 if (drvdata
->mode
& ETM_MODE_INSTPRIO
)
655 drvdata
->stall_ctrl
|= BIT(10);
657 drvdata
->stall_ctrl
&= ~BIT(10);
659 /* bit[13], Trace overflow prevention bit */
660 if ((drvdata
->mode
& ETM_MODE_NOOVERFLOW
) &&
661 (drvdata
->nooverflow
== true))
662 drvdata
->stall_ctrl
|= BIT(13);
664 drvdata
->stall_ctrl
&= ~BIT(13);
666 /* bit[9] Start/stop logic control bit */
667 if (drvdata
->mode
& ETM_MODE_VIEWINST_STARTSTOP
)
668 drvdata
->vinst_ctrl
|= BIT(9);
670 drvdata
->vinst_ctrl
&= ~BIT(9);
672 /* bit[10], Whether a trace unit must trace a Reset exception */
673 if (drvdata
->mode
& ETM_MODE_TRACE_RESET
)
674 drvdata
->vinst_ctrl
|= BIT(10);
676 drvdata
->vinst_ctrl
&= ~BIT(10);
678 /* bit[11], Whether a trace unit must trace a system error exception */
679 if ((drvdata
->mode
& ETM_MODE_TRACE_ERR
) &&
680 (drvdata
->trc_error
== true))
681 drvdata
->vinst_ctrl
|= BIT(11);
683 drvdata
->vinst_ctrl
&= ~BIT(11);
685 spin_unlock(&drvdata
->spinlock
);
688 static DEVICE_ATTR_RW(mode
);
690 static ssize_t
pe_show(struct device
*dev
,
691 struct device_attribute
*attr
,
695 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
697 val
= drvdata
->pe_sel
;
698 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
701 static ssize_t
pe_store(struct device
*dev
,
702 struct device_attribute
*attr
,
703 const char *buf
, size_t size
)
706 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
708 if (kstrtoul(buf
, 16, &val
))
711 spin_lock(&drvdata
->spinlock
);
712 if (val
> drvdata
->nr_pe
) {
713 spin_unlock(&drvdata
->spinlock
);
717 drvdata
->pe_sel
= val
;
718 spin_unlock(&drvdata
->spinlock
);
721 static DEVICE_ATTR_RW(pe
);
723 static ssize_t
event_show(struct device
*dev
,
724 struct device_attribute
*attr
,
728 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
730 val
= drvdata
->eventctrl0
;
731 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
734 static ssize_t
event_store(struct device
*dev
,
735 struct device_attribute
*attr
,
736 const char *buf
, size_t size
)
739 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
741 if (kstrtoul(buf
, 16, &val
))
744 spin_lock(&drvdata
->spinlock
);
745 switch (drvdata
->nr_event
) {
747 /* EVENT0, bits[7:0] */
748 drvdata
->eventctrl0
= val
& 0xFF;
751 /* EVENT1, bits[15:8] */
752 drvdata
->eventctrl0
= val
& 0xFFFF;
755 /* EVENT2, bits[23:16] */
756 drvdata
->eventctrl0
= val
& 0xFFFFFF;
759 /* EVENT3, bits[31:24] */
760 drvdata
->eventctrl0
= val
;
765 spin_unlock(&drvdata
->spinlock
);
768 static DEVICE_ATTR_RW(event
);
770 static ssize_t
event_instren_show(struct device
*dev
,
771 struct device_attribute
*attr
,
775 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
777 val
= BMVAL(drvdata
->eventctrl1
, 0, 3);
778 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
781 static ssize_t
event_instren_store(struct device
*dev
,
782 struct device_attribute
*attr
,
783 const char *buf
, size_t size
)
786 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
788 if (kstrtoul(buf
, 16, &val
))
791 spin_lock(&drvdata
->spinlock
);
792 /* start by clearing all instruction event enable bits */
793 drvdata
->eventctrl1
&= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
794 switch (drvdata
->nr_event
) {
796 /* generate Event element for event 1 */
797 drvdata
->eventctrl1
|= val
& BIT(1);
800 /* generate Event element for event 1 and 2 */
801 drvdata
->eventctrl1
|= val
& (BIT(0) | BIT(1));
804 /* generate Event element for event 1, 2 and 3 */
805 drvdata
->eventctrl1
|= val
& (BIT(0) | BIT(1) | BIT(2));
808 /* generate Event element for all 4 events */
809 drvdata
->eventctrl1
|= val
& 0xF;
814 spin_unlock(&drvdata
->spinlock
);
817 static DEVICE_ATTR_RW(event_instren
);
819 static ssize_t
event_ts_show(struct device
*dev
,
820 struct device_attribute
*attr
,
824 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
826 val
= drvdata
->ts_ctrl
;
827 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
830 static ssize_t
event_ts_store(struct device
*dev
,
831 struct device_attribute
*attr
,
832 const char *buf
, size_t size
)
835 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
837 if (kstrtoul(buf
, 16, &val
))
839 if (!drvdata
->ts_size
)
842 drvdata
->ts_ctrl
= val
& ETMv4_EVENT_MASK
;
845 static DEVICE_ATTR_RW(event_ts
);
847 static ssize_t
syncfreq_show(struct device
*dev
,
848 struct device_attribute
*attr
,
852 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
854 val
= drvdata
->syncfreq
;
855 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
858 static ssize_t
syncfreq_store(struct device
*dev
,
859 struct device_attribute
*attr
,
860 const char *buf
, size_t size
)
863 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
865 if (kstrtoul(buf
, 16, &val
))
867 if (drvdata
->syncpr
== true)
870 drvdata
->syncfreq
= val
& ETMv4_SYNC_MASK
;
873 static DEVICE_ATTR_RW(syncfreq
);
875 static ssize_t
cyc_threshold_show(struct device
*dev
,
876 struct device_attribute
*attr
,
880 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
882 val
= drvdata
->ccctlr
;
883 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
886 static ssize_t
cyc_threshold_store(struct device
*dev
,
887 struct device_attribute
*attr
,
888 const char *buf
, size_t size
)
891 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
893 if (kstrtoul(buf
, 16, &val
))
895 if (val
< drvdata
->ccitmin
)
898 drvdata
->ccctlr
= val
& ETM_CYC_THRESHOLD_MASK
;
901 static DEVICE_ATTR_RW(cyc_threshold
);
903 static ssize_t
bb_ctrl_show(struct device
*dev
,
904 struct device_attribute
*attr
,
908 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
910 val
= drvdata
->bb_ctrl
;
911 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
914 static ssize_t
bb_ctrl_store(struct device
*dev
,
915 struct device_attribute
*attr
,
916 const char *buf
, size_t size
)
919 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
921 if (kstrtoul(buf
, 16, &val
))
923 if (drvdata
->trcbb
== false)
925 if (!drvdata
->nr_addr_cmp
)
928 * Bit[7:0] selects which address range comparator is used for
929 * branch broadcast control.
931 if (BMVAL(val
, 0, 7) > drvdata
->nr_addr_cmp
)
934 drvdata
->bb_ctrl
= val
;
937 static DEVICE_ATTR_RW(bb_ctrl
);
939 static ssize_t
event_vinst_show(struct device
*dev
,
940 struct device_attribute
*attr
,
944 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
946 val
= drvdata
->vinst_ctrl
& ETMv4_EVENT_MASK
;
947 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
950 static ssize_t
event_vinst_store(struct device
*dev
,
951 struct device_attribute
*attr
,
952 const char *buf
, size_t size
)
955 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
957 if (kstrtoul(buf
, 16, &val
))
960 spin_lock(&drvdata
->spinlock
);
961 val
&= ETMv4_EVENT_MASK
;
962 drvdata
->vinst_ctrl
&= ~ETMv4_EVENT_MASK
;
963 drvdata
->vinst_ctrl
|= val
;
964 spin_unlock(&drvdata
->spinlock
);
967 static DEVICE_ATTR_RW(event_vinst
);
969 static ssize_t
s_exlevel_vinst_show(struct device
*dev
,
970 struct device_attribute
*attr
,
974 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
976 val
= BMVAL(drvdata
->vinst_ctrl
, 16, 19);
977 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
980 static ssize_t
s_exlevel_vinst_store(struct device
*dev
,
981 struct device_attribute
*attr
,
982 const char *buf
, size_t size
)
985 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
987 if (kstrtoul(buf
, 16, &val
))
990 spin_lock(&drvdata
->spinlock
);
991 /* clear all EXLEVEL_S bits (bit[18] is never implemented) */
992 drvdata
->vinst_ctrl
&= ~(BIT(16) | BIT(17) | BIT(19));
993 /* enable instruction tracing for corresponding exception level */
994 val
&= drvdata
->s_ex_level
;
995 drvdata
->vinst_ctrl
|= (val
<< 16);
996 spin_unlock(&drvdata
->spinlock
);
999 static DEVICE_ATTR_RW(s_exlevel_vinst
);
1001 static ssize_t
ns_exlevel_vinst_show(struct device
*dev
,
1002 struct device_attribute
*attr
,
1006 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1008 /* EXLEVEL_NS, bits[23:20] */
1009 val
= BMVAL(drvdata
->vinst_ctrl
, 20, 23);
1010 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1013 static ssize_t
ns_exlevel_vinst_store(struct device
*dev
,
1014 struct device_attribute
*attr
,
1015 const char *buf
, size_t size
)
1018 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1020 if (kstrtoul(buf
, 16, &val
))
1023 spin_lock(&drvdata
->spinlock
);
1024 /* clear EXLEVEL_NS bits (bit[23] is never implemented */
1025 drvdata
->vinst_ctrl
&= ~(BIT(20) | BIT(21) | BIT(22));
1026 /* enable instruction tracing for corresponding exception level */
1027 val
&= drvdata
->ns_ex_level
;
1028 drvdata
->vinst_ctrl
|= (val
<< 20);
1029 spin_unlock(&drvdata
->spinlock
);
1032 static DEVICE_ATTR_RW(ns_exlevel_vinst
);
1034 static ssize_t
addr_idx_show(struct device
*dev
,
1035 struct device_attribute
*attr
,
1039 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1041 val
= drvdata
->addr_idx
;
1042 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1045 static ssize_t
addr_idx_store(struct device
*dev
,
1046 struct device_attribute
*attr
,
1047 const char *buf
, size_t size
)
1050 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1052 if (kstrtoul(buf
, 16, &val
))
1054 if (val
>= drvdata
->nr_addr_cmp
* 2)
1058 * Use spinlock to ensure index doesn't change while it gets
1059 * dereferenced multiple times within a spinlock block elsewhere.
1061 spin_lock(&drvdata
->spinlock
);
1062 drvdata
->addr_idx
= val
;
1063 spin_unlock(&drvdata
->spinlock
);
1066 static DEVICE_ATTR_RW(addr_idx
);
1068 static ssize_t
addr_instdatatype_show(struct device
*dev
,
1069 struct device_attribute
*attr
,
1074 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1076 spin_lock(&drvdata
->spinlock
);
1077 idx
= drvdata
->addr_idx
;
1078 val
= BMVAL(drvdata
->addr_acc
[idx
], 0, 1);
1079 len
= scnprintf(buf
, PAGE_SIZE
, "%s\n",
1080 val
== ETM_INSTR_ADDR
? "instr" :
1081 (val
== ETM_DATA_LOAD_ADDR
? "data_load" :
1082 (val
== ETM_DATA_STORE_ADDR
? "data_store" :
1083 "data_load_store")));
1084 spin_unlock(&drvdata
->spinlock
);
1088 static ssize_t
addr_instdatatype_store(struct device
*dev
,
1089 struct device_attribute
*attr
,
1090 const char *buf
, size_t size
)
1094 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1096 if (strlen(buf
) >= 20)
1098 if (sscanf(buf
, "%s", str
) != 1)
1101 spin_lock(&drvdata
->spinlock
);
1102 idx
= drvdata
->addr_idx
;
1103 if (!strcmp(str
, "instr"))
1104 /* TYPE, bits[1:0] */
1105 drvdata
->addr_acc
[idx
] &= ~(BIT(0) | BIT(1));
1107 spin_unlock(&drvdata
->spinlock
);
1110 static DEVICE_ATTR_RW(addr_instdatatype
);
1112 static ssize_t
addr_single_show(struct device
*dev
,
1113 struct device_attribute
*attr
,
1118 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1120 idx
= drvdata
->addr_idx
;
1121 spin_lock(&drvdata
->spinlock
);
1122 if (!(drvdata
->addr_type
[idx
] == ETM_ADDR_TYPE_NONE
||
1123 drvdata
->addr_type
[idx
] == ETM_ADDR_TYPE_SINGLE
)) {
1124 spin_unlock(&drvdata
->spinlock
);
1127 val
= (unsigned long)drvdata
->addr_val
[idx
];
1128 spin_unlock(&drvdata
->spinlock
);
1129 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1132 static ssize_t
addr_single_store(struct device
*dev
,
1133 struct device_attribute
*attr
,
1134 const char *buf
, size_t size
)
1138 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1140 if (kstrtoul(buf
, 16, &val
))
1143 spin_lock(&drvdata
->spinlock
);
1144 idx
= drvdata
->addr_idx
;
1145 if (!(drvdata
->addr_type
[idx
] == ETM_ADDR_TYPE_NONE
||
1146 drvdata
->addr_type
[idx
] == ETM_ADDR_TYPE_SINGLE
)) {
1147 spin_unlock(&drvdata
->spinlock
);
1151 drvdata
->addr_val
[idx
] = (u64
)val
;
1152 drvdata
->addr_type
[idx
] = ETM_ADDR_TYPE_SINGLE
;
1153 spin_unlock(&drvdata
->spinlock
);
1156 static DEVICE_ATTR_RW(addr_single
);
1158 static ssize_t
addr_range_show(struct device
*dev
,
1159 struct device_attribute
*attr
,
1163 unsigned long val1
, val2
;
1164 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1166 spin_lock(&drvdata
->spinlock
);
1167 idx
= drvdata
->addr_idx
;
1169 spin_unlock(&drvdata
->spinlock
);
1172 if (!((drvdata
->addr_type
[idx
] == ETM_ADDR_TYPE_NONE
&&
1173 drvdata
->addr_type
[idx
+ 1] == ETM_ADDR_TYPE_NONE
) ||
1174 (drvdata
->addr_type
[idx
] == ETM_ADDR_TYPE_RANGE
&&
1175 drvdata
->addr_type
[idx
+ 1] == ETM_ADDR_TYPE_RANGE
))) {
1176 spin_unlock(&drvdata
->spinlock
);
1180 val1
= (unsigned long)drvdata
->addr_val
[idx
];
1181 val2
= (unsigned long)drvdata
->addr_val
[idx
+ 1];
1182 spin_unlock(&drvdata
->spinlock
);
1183 return scnprintf(buf
, PAGE_SIZE
, "%#lx %#lx\n", val1
, val2
);
1186 static ssize_t
addr_range_store(struct device
*dev
,
1187 struct device_attribute
*attr
,
1188 const char *buf
, size_t size
)
1191 unsigned long val1
, val2
;
1192 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1194 if (sscanf(buf
, "%lx %lx", &val1
, &val2
) != 2)
1196 /* lower address comparator cannot have a higher address value */
1200 spin_lock(&drvdata
->spinlock
);
1201 idx
= drvdata
->addr_idx
;
1203 spin_unlock(&drvdata
->spinlock
);
1207 if (!((drvdata
->addr_type
[idx
] == ETM_ADDR_TYPE_NONE
&&
1208 drvdata
->addr_type
[idx
+ 1] == ETM_ADDR_TYPE_NONE
) ||
1209 (drvdata
->addr_type
[idx
] == ETM_ADDR_TYPE_RANGE
&&
1210 drvdata
->addr_type
[idx
+ 1] == ETM_ADDR_TYPE_RANGE
))) {
1211 spin_unlock(&drvdata
->spinlock
);
1215 drvdata
->addr_val
[idx
] = (u64
)val1
;
1216 drvdata
->addr_type
[idx
] = ETM_ADDR_TYPE_RANGE
;
1217 drvdata
->addr_val
[idx
+ 1] = (u64
)val2
;
1218 drvdata
->addr_type
[idx
+ 1] = ETM_ADDR_TYPE_RANGE
;
1220 * Program include or exclude control bits for vinst or vdata
1221 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
1223 if (drvdata
->mode
& ETM_MODE_EXCLUDE
)
1224 etm4_set_mode_exclude(drvdata
, true);
1226 etm4_set_mode_exclude(drvdata
, false);
1228 spin_unlock(&drvdata
->spinlock
);
1231 static DEVICE_ATTR_RW(addr_range
);
1233 static ssize_t
addr_start_show(struct device
*dev
,
1234 struct device_attribute
*attr
,
1239 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1241 spin_lock(&drvdata
->spinlock
);
1242 idx
= drvdata
->addr_idx
;
1244 if (!(drvdata
->addr_type
[idx
] == ETM_ADDR_TYPE_NONE
||
1245 drvdata
->addr_type
[idx
] == ETM_ADDR_TYPE_START
)) {
1246 spin_unlock(&drvdata
->spinlock
);
1250 val
= (unsigned long)drvdata
->addr_val
[idx
];
1251 spin_unlock(&drvdata
->spinlock
);
1252 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1255 static ssize_t
addr_start_store(struct device
*dev
,
1256 struct device_attribute
*attr
,
1257 const char *buf
, size_t size
)
1261 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1263 if (kstrtoul(buf
, 16, &val
))
1266 spin_lock(&drvdata
->spinlock
);
1267 idx
= drvdata
->addr_idx
;
1268 if (!drvdata
->nr_addr_cmp
) {
1269 spin_unlock(&drvdata
->spinlock
);
1272 if (!(drvdata
->addr_type
[idx
] == ETM_ADDR_TYPE_NONE
||
1273 drvdata
->addr_type
[idx
] == ETM_ADDR_TYPE_START
)) {
1274 spin_unlock(&drvdata
->spinlock
);
1278 drvdata
->addr_val
[idx
] = (u64
)val
;
1279 drvdata
->addr_type
[idx
] = ETM_ADDR_TYPE_START
;
1280 drvdata
->vissctlr
|= BIT(idx
);
1281 /* SSSTATUS, bit[9] - turn on start/stop logic */
1282 drvdata
->vinst_ctrl
|= BIT(9);
1283 spin_unlock(&drvdata
->spinlock
);
1286 static DEVICE_ATTR_RW(addr_start
);
1288 static ssize_t
addr_stop_show(struct device
*dev
,
1289 struct device_attribute
*attr
,
1294 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1296 spin_lock(&drvdata
->spinlock
);
1297 idx
= drvdata
->addr_idx
;
1299 if (!(drvdata
->addr_type
[idx
] == ETM_ADDR_TYPE_NONE
||
1300 drvdata
->addr_type
[idx
] == ETM_ADDR_TYPE_STOP
)) {
1301 spin_unlock(&drvdata
->spinlock
);
1305 val
= (unsigned long)drvdata
->addr_val
[idx
];
1306 spin_unlock(&drvdata
->spinlock
);
1307 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1310 static ssize_t
addr_stop_store(struct device
*dev
,
1311 struct device_attribute
*attr
,
1312 const char *buf
, size_t size
)
1316 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1318 if (kstrtoul(buf
, 16, &val
))
1321 spin_lock(&drvdata
->spinlock
);
1322 idx
= drvdata
->addr_idx
;
1323 if (!drvdata
->nr_addr_cmp
) {
1324 spin_unlock(&drvdata
->spinlock
);
1327 if (!(drvdata
->addr_type
[idx
] == ETM_ADDR_TYPE_NONE
||
1328 drvdata
->addr_type
[idx
] == ETM_ADDR_TYPE_STOP
)) {
1329 spin_unlock(&drvdata
->spinlock
);
1333 drvdata
->addr_val
[idx
] = (u64
)val
;
1334 drvdata
->addr_type
[idx
] = ETM_ADDR_TYPE_STOP
;
1335 drvdata
->vissctlr
|= BIT(idx
+ 16);
1336 /* SSSTATUS, bit[9] - turn on start/stop logic */
1337 drvdata
->vinst_ctrl
|= BIT(9);
1338 spin_unlock(&drvdata
->spinlock
);
1341 static DEVICE_ATTR_RW(addr_stop
);
1343 static ssize_t
addr_ctxtype_show(struct device
*dev
,
1344 struct device_attribute
*attr
,
1349 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1351 spin_lock(&drvdata
->spinlock
);
1352 idx
= drvdata
->addr_idx
;
1353 /* CONTEXTTYPE, bits[3:2] */
1354 val
= BMVAL(drvdata
->addr_acc
[idx
], 2, 3);
1355 len
= scnprintf(buf
, PAGE_SIZE
, "%s\n", val
== ETM_CTX_NONE
? "none" :
1356 (val
== ETM_CTX_CTXID
? "ctxid" :
1357 (val
== ETM_CTX_VMID
? "vmid" : "all")));
1358 spin_unlock(&drvdata
->spinlock
);
1362 static ssize_t
addr_ctxtype_store(struct device
*dev
,
1363 struct device_attribute
*attr
,
1364 const char *buf
, size_t size
)
1368 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1370 if (strlen(buf
) >= 10)
1372 if (sscanf(buf
, "%s", str
) != 1)
1375 spin_lock(&drvdata
->spinlock
);
1376 idx
= drvdata
->addr_idx
;
1377 if (!strcmp(str
, "none"))
1378 /* start by clearing context type bits */
1379 drvdata
->addr_acc
[idx
] &= ~(BIT(2) | BIT(3));
1380 else if (!strcmp(str
, "ctxid")) {
1381 /* 0b01 The trace unit performs a Context ID */
1382 if (drvdata
->numcidc
) {
1383 drvdata
->addr_acc
[idx
] |= BIT(2);
1384 drvdata
->addr_acc
[idx
] &= ~BIT(3);
1386 } else if (!strcmp(str
, "vmid")) {
1387 /* 0b10 The trace unit performs a VMID */
1388 if (drvdata
->numvmidc
) {
1389 drvdata
->addr_acc
[idx
] &= ~BIT(2);
1390 drvdata
->addr_acc
[idx
] |= BIT(3);
1392 } else if (!strcmp(str
, "all")) {
1394 * 0b11 The trace unit performs a Context ID
1395 * comparison and a VMID
1397 if (drvdata
->numcidc
)
1398 drvdata
->addr_acc
[idx
] |= BIT(2);
1399 if (drvdata
->numvmidc
)
1400 drvdata
->addr_acc
[idx
] |= BIT(3);
1402 spin_unlock(&drvdata
->spinlock
);
1405 static DEVICE_ATTR_RW(addr_ctxtype
);
1407 static ssize_t
addr_context_show(struct device
*dev
,
1408 struct device_attribute
*attr
,
1413 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1415 spin_lock(&drvdata
->spinlock
);
1416 idx
= drvdata
->addr_idx
;
1417 /* context ID comparator bits[6:4] */
1418 val
= BMVAL(drvdata
->addr_acc
[idx
], 4, 6);
1419 spin_unlock(&drvdata
->spinlock
);
1420 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1423 static ssize_t
addr_context_store(struct device
*dev
,
1424 struct device_attribute
*attr
,
1425 const char *buf
, size_t size
)
1429 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1431 if (kstrtoul(buf
, 16, &val
))
1433 if ((drvdata
->numcidc
<= 1) && (drvdata
->numvmidc
<= 1))
1435 if (val
>= (drvdata
->numcidc
>= drvdata
->numvmidc
?
1436 drvdata
->numcidc
: drvdata
->numvmidc
))
1439 spin_lock(&drvdata
->spinlock
);
1440 idx
= drvdata
->addr_idx
;
1441 /* clear context ID comparator bits[6:4] */
1442 drvdata
->addr_acc
[idx
] &= ~(BIT(4) | BIT(5) | BIT(6));
1443 drvdata
->addr_acc
[idx
] |= (val
<< 4);
1444 spin_unlock(&drvdata
->spinlock
);
1447 static DEVICE_ATTR_RW(addr_context
);
1449 static ssize_t
seq_idx_show(struct device
*dev
,
1450 struct device_attribute
*attr
,
1454 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1456 val
= drvdata
->seq_idx
;
1457 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1460 static ssize_t
seq_idx_store(struct device
*dev
,
1461 struct device_attribute
*attr
,
1462 const char *buf
, size_t size
)
1465 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1467 if (kstrtoul(buf
, 16, &val
))
1469 if (val
>= drvdata
->nrseqstate
- 1)
1473 * Use spinlock to ensure index doesn't change while it gets
1474 * dereferenced multiple times within a spinlock block elsewhere.
1476 spin_lock(&drvdata
->spinlock
);
1477 drvdata
->seq_idx
= val
;
1478 spin_unlock(&drvdata
->spinlock
);
1481 static DEVICE_ATTR_RW(seq_idx
);
1483 static ssize_t
seq_state_show(struct device
*dev
,
1484 struct device_attribute
*attr
,
1488 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1490 val
= drvdata
->seq_state
;
1491 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1494 static ssize_t
seq_state_store(struct device
*dev
,
1495 struct device_attribute
*attr
,
1496 const char *buf
, size_t size
)
1499 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1501 if (kstrtoul(buf
, 16, &val
))
1503 if (val
>= drvdata
->nrseqstate
)
1506 drvdata
->seq_state
= val
;
1509 static DEVICE_ATTR_RW(seq_state
);
1511 static ssize_t
seq_event_show(struct device
*dev
,
1512 struct device_attribute
*attr
,
1517 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1519 spin_lock(&drvdata
->spinlock
);
1520 idx
= drvdata
->seq_idx
;
1521 val
= drvdata
->seq_ctrl
[idx
];
1522 spin_unlock(&drvdata
->spinlock
);
1523 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1526 static ssize_t
seq_event_store(struct device
*dev
,
1527 struct device_attribute
*attr
,
1528 const char *buf
, size_t size
)
1532 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1534 if (kstrtoul(buf
, 16, &val
))
1537 spin_lock(&drvdata
->spinlock
);
1538 idx
= drvdata
->seq_idx
;
1539 /* RST, bits[7:0] */
1540 drvdata
->seq_ctrl
[idx
] = val
& 0xFF;
1541 spin_unlock(&drvdata
->spinlock
);
1544 static DEVICE_ATTR_RW(seq_event
);
1546 static ssize_t
seq_reset_event_show(struct device
*dev
,
1547 struct device_attribute
*attr
,
1551 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1553 val
= drvdata
->seq_rst
;
1554 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1557 static ssize_t
seq_reset_event_store(struct device
*dev
,
1558 struct device_attribute
*attr
,
1559 const char *buf
, size_t size
)
1562 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1564 if (kstrtoul(buf
, 16, &val
))
1566 if (!(drvdata
->nrseqstate
))
1569 drvdata
->seq_rst
= val
& ETMv4_EVENT_MASK
;
1572 static DEVICE_ATTR_RW(seq_reset_event
);
1574 static ssize_t
cntr_idx_show(struct device
*dev
,
1575 struct device_attribute
*attr
,
1579 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1581 val
= drvdata
->cntr_idx
;
1582 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1585 static ssize_t
cntr_idx_store(struct device
*dev
,
1586 struct device_attribute
*attr
,
1587 const char *buf
, size_t size
)
1590 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1592 if (kstrtoul(buf
, 16, &val
))
1594 if (val
>= drvdata
->nr_cntr
)
1598 * Use spinlock to ensure index doesn't change while it gets
1599 * dereferenced multiple times within a spinlock block elsewhere.
1601 spin_lock(&drvdata
->spinlock
);
1602 drvdata
->cntr_idx
= val
;
1603 spin_unlock(&drvdata
->spinlock
);
1606 static DEVICE_ATTR_RW(cntr_idx
);
1608 static ssize_t
cntrldvr_show(struct device
*dev
,
1609 struct device_attribute
*attr
,
1614 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1616 spin_lock(&drvdata
->spinlock
);
1617 idx
= drvdata
->cntr_idx
;
1618 val
= drvdata
->cntrldvr
[idx
];
1619 spin_unlock(&drvdata
->spinlock
);
1620 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1623 static ssize_t
cntrldvr_store(struct device
*dev
,
1624 struct device_attribute
*attr
,
1625 const char *buf
, size_t size
)
1629 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1631 if (kstrtoul(buf
, 16, &val
))
1633 if (val
> ETM_CNTR_MAX_VAL
)
1636 spin_lock(&drvdata
->spinlock
);
1637 idx
= drvdata
->cntr_idx
;
1638 drvdata
->cntrldvr
[idx
] = val
;
1639 spin_unlock(&drvdata
->spinlock
);
1642 static DEVICE_ATTR_RW(cntrldvr
);
1644 static ssize_t
cntr_val_show(struct device
*dev
,
1645 struct device_attribute
*attr
,
1650 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1652 spin_lock(&drvdata
->spinlock
);
1653 idx
= drvdata
->cntr_idx
;
1654 val
= drvdata
->cntr_val
[idx
];
1655 spin_unlock(&drvdata
->spinlock
);
1656 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1659 static ssize_t
cntr_val_store(struct device
*dev
,
1660 struct device_attribute
*attr
,
1661 const char *buf
, size_t size
)
1665 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1667 if (kstrtoul(buf
, 16, &val
))
1669 if (val
> ETM_CNTR_MAX_VAL
)
1672 spin_lock(&drvdata
->spinlock
);
1673 idx
= drvdata
->cntr_idx
;
1674 drvdata
->cntr_val
[idx
] = val
;
1675 spin_unlock(&drvdata
->spinlock
);
1678 static DEVICE_ATTR_RW(cntr_val
);
1680 static ssize_t
cntr_ctrl_show(struct device
*dev
,
1681 struct device_attribute
*attr
,
1686 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1688 spin_lock(&drvdata
->spinlock
);
1689 idx
= drvdata
->cntr_idx
;
1690 val
= drvdata
->cntr_ctrl
[idx
];
1691 spin_unlock(&drvdata
->spinlock
);
1692 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1695 static ssize_t
cntr_ctrl_store(struct device
*dev
,
1696 struct device_attribute
*attr
,
1697 const char *buf
, size_t size
)
1701 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1703 if (kstrtoul(buf
, 16, &val
))
1706 spin_lock(&drvdata
->spinlock
);
1707 idx
= drvdata
->cntr_idx
;
1708 drvdata
->cntr_ctrl
[idx
] = val
;
1709 spin_unlock(&drvdata
->spinlock
);
1712 static DEVICE_ATTR_RW(cntr_ctrl
);
1714 static ssize_t
res_idx_show(struct device
*dev
,
1715 struct device_attribute
*attr
,
1719 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1721 val
= drvdata
->res_idx
;
1722 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1725 static ssize_t
res_idx_store(struct device
*dev
,
1726 struct device_attribute
*attr
,
1727 const char *buf
, size_t size
)
1730 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1732 if (kstrtoul(buf
, 16, &val
))
1734 /* Resource selector pair 0 is always implemented and reserved */
1735 if ((val
== 0) || (val
>= drvdata
->nr_resource
))
1739 * Use spinlock to ensure index doesn't change while it gets
1740 * dereferenced multiple times within a spinlock block elsewhere.
1742 spin_lock(&drvdata
->spinlock
);
1743 drvdata
->res_idx
= val
;
1744 spin_unlock(&drvdata
->spinlock
);
1747 static DEVICE_ATTR_RW(res_idx
);
1749 static ssize_t
res_ctrl_show(struct device
*dev
,
1750 struct device_attribute
*attr
,
1755 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1757 spin_lock(&drvdata
->spinlock
);
1758 idx
= drvdata
->res_idx
;
1759 val
= drvdata
->res_ctrl
[idx
];
1760 spin_unlock(&drvdata
->spinlock
);
1761 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1764 static ssize_t
res_ctrl_store(struct device
*dev
,
1765 struct device_attribute
*attr
,
1766 const char *buf
, size_t size
)
1770 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1772 if (kstrtoul(buf
, 16, &val
))
1775 spin_lock(&drvdata
->spinlock
);
1776 idx
= drvdata
->res_idx
;
1777 /* For odd idx pair inversal bit is RES0 */
1779 /* PAIRINV, bit[21] */
1781 drvdata
->res_ctrl
[idx
] = val
;
1782 spin_unlock(&drvdata
->spinlock
);
1785 static DEVICE_ATTR_RW(res_ctrl
);
1787 static ssize_t
ctxid_idx_show(struct device
*dev
,
1788 struct device_attribute
*attr
,
1792 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1794 val
= drvdata
->ctxid_idx
;
1795 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1798 static ssize_t
ctxid_idx_store(struct device
*dev
,
1799 struct device_attribute
*attr
,
1800 const char *buf
, size_t size
)
1803 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1805 if (kstrtoul(buf
, 16, &val
))
1807 if (val
>= drvdata
->numcidc
)
1811 * Use spinlock to ensure index doesn't change while it gets
1812 * dereferenced multiple times within a spinlock block elsewhere.
1814 spin_lock(&drvdata
->spinlock
);
1815 drvdata
->ctxid_idx
= val
;
1816 spin_unlock(&drvdata
->spinlock
);
1819 static DEVICE_ATTR_RW(ctxid_idx
);
1821 static ssize_t
ctxid_pid_show(struct device
*dev
,
1822 struct device_attribute
*attr
,
1827 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1829 spin_lock(&drvdata
->spinlock
);
1830 idx
= drvdata
->ctxid_idx
;
1831 val
= (unsigned long)drvdata
->ctxid_vpid
[idx
];
1832 spin_unlock(&drvdata
->spinlock
);
1833 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1836 static ssize_t
ctxid_pid_store(struct device
*dev
,
1837 struct device_attribute
*attr
,
1838 const char *buf
, size_t size
)
1841 unsigned long vpid
, pid
;
1842 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1845 * only implemented when ctxid tracing is enabled, i.e. at least one
1846 * ctxid comparator is implemented and ctxid is greater than 0 bits
1849 if (!drvdata
->ctxid_size
|| !drvdata
->numcidc
)
1851 if (kstrtoul(buf
, 16, &vpid
))
1854 pid
= coresight_vpid_to_pid(vpid
);
1856 spin_lock(&drvdata
->spinlock
);
1857 idx
= drvdata
->ctxid_idx
;
1858 drvdata
->ctxid_pid
[idx
] = (u64
)pid
;
1859 drvdata
->ctxid_vpid
[idx
] = (u64
)vpid
;
1860 spin_unlock(&drvdata
->spinlock
);
1863 static DEVICE_ATTR_RW(ctxid_pid
);
1865 static ssize_t
ctxid_masks_show(struct device
*dev
,
1866 struct device_attribute
*attr
,
1869 unsigned long val1
, val2
;
1870 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1872 spin_lock(&drvdata
->spinlock
);
1873 val1
= drvdata
->ctxid_mask0
;
1874 val2
= drvdata
->ctxid_mask1
;
1875 spin_unlock(&drvdata
->spinlock
);
1876 return scnprintf(buf
, PAGE_SIZE
, "%#lx %#lx\n", val1
, val2
);
1879 static ssize_t
ctxid_masks_store(struct device
*dev
,
1880 struct device_attribute
*attr
,
1881 const char *buf
, size_t size
)
1884 unsigned long val1
, val2
, mask
;
1885 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1888 * only implemented when ctxid tracing is enabled, i.e. at least one
1889 * ctxid comparator is implemented and ctxid is greater than 0 bits
1892 if (!drvdata
->ctxid_size
|| !drvdata
->numcidc
)
1894 if (sscanf(buf
, "%lx %lx", &val1
, &val2
) != 2)
1897 spin_lock(&drvdata
->spinlock
);
1899 * each byte[0..3] controls mask value applied to ctxid
1902 switch (drvdata
->numcidc
) {
1904 /* COMP0, bits[7:0] */
1905 drvdata
->ctxid_mask0
= val1
& 0xFF;
1908 /* COMP1, bits[15:8] */
1909 drvdata
->ctxid_mask0
= val1
& 0xFFFF;
1912 /* COMP2, bits[23:16] */
1913 drvdata
->ctxid_mask0
= val1
& 0xFFFFFF;
1916 /* COMP3, bits[31:24] */
1917 drvdata
->ctxid_mask0
= val1
;
1920 /* COMP4, bits[7:0] */
1921 drvdata
->ctxid_mask0
= val1
;
1922 drvdata
->ctxid_mask1
= val2
& 0xFF;
1925 /* COMP5, bits[15:8] */
1926 drvdata
->ctxid_mask0
= val1
;
1927 drvdata
->ctxid_mask1
= val2
& 0xFFFF;
1930 /* COMP6, bits[23:16] */
1931 drvdata
->ctxid_mask0
= val1
;
1932 drvdata
->ctxid_mask1
= val2
& 0xFFFFFF;
1935 /* COMP7, bits[31:24] */
1936 drvdata
->ctxid_mask0
= val1
;
1937 drvdata
->ctxid_mask1
= val2
;
1943 * If software sets a mask bit to 1, it must program relevant byte
1944 * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
1945 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
1946 * of ctxid comparator0 value (corresponding to byte 0) register.
1948 mask
= drvdata
->ctxid_mask0
;
1949 for (i
= 0; i
< drvdata
->numcidc
; i
++) {
1950 /* mask value of corresponding ctxid comparator */
1951 maskbyte
= mask
& ETMv4_EVENT_MASK
;
1953 * each bit corresponds to a byte of respective ctxid comparator
1956 for (j
= 0; j
< 8; j
++) {
1958 drvdata
->ctxid_pid
[i
] &= ~(0xFF << (j
* 8));
1961 /* Select the next ctxid comparator mask value */
1963 /* ctxid comparators[4-7] */
1964 mask
= drvdata
->ctxid_mask1
;
1969 spin_unlock(&drvdata
->spinlock
);
1972 static DEVICE_ATTR_RW(ctxid_masks
);
1974 static ssize_t
vmid_idx_show(struct device
*dev
,
1975 struct device_attribute
*attr
,
1979 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1981 val
= drvdata
->vmid_idx
;
1982 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1985 static ssize_t
vmid_idx_store(struct device
*dev
,
1986 struct device_attribute
*attr
,
1987 const char *buf
, size_t size
)
1990 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1992 if (kstrtoul(buf
, 16, &val
))
1994 if (val
>= drvdata
->numvmidc
)
1998 * Use spinlock to ensure index doesn't change while it gets
1999 * dereferenced multiple times within a spinlock block elsewhere.
2001 spin_lock(&drvdata
->spinlock
);
2002 drvdata
->vmid_idx
= val
;
2003 spin_unlock(&drvdata
->spinlock
);
2006 static DEVICE_ATTR_RW(vmid_idx
);
2008 static ssize_t
vmid_val_show(struct device
*dev
,
2009 struct device_attribute
*attr
,
2013 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
2015 val
= (unsigned long)drvdata
->vmid_val
[drvdata
->vmid_idx
];
2016 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
2019 static ssize_t
vmid_val_store(struct device
*dev
,
2020 struct device_attribute
*attr
,
2021 const char *buf
, size_t size
)
2024 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
2027 * only implemented when vmid tracing is enabled, i.e. at least one
2028 * vmid comparator is implemented and at least 8 bit vmid size
2030 if (!drvdata
->vmid_size
|| !drvdata
->numvmidc
)
2032 if (kstrtoul(buf
, 16, &val
))
2035 spin_lock(&drvdata
->spinlock
);
2036 drvdata
->vmid_val
[drvdata
->vmid_idx
] = (u64
)val
;
2037 spin_unlock(&drvdata
->spinlock
);
2040 static DEVICE_ATTR_RW(vmid_val
);
2042 static ssize_t
vmid_masks_show(struct device
*dev
,
2043 struct device_attribute
*attr
, char *buf
)
2045 unsigned long val1
, val2
;
2046 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
2048 spin_lock(&drvdata
->spinlock
);
2049 val1
= drvdata
->vmid_mask0
;
2050 val2
= drvdata
->vmid_mask1
;
2051 spin_unlock(&drvdata
->spinlock
);
2052 return scnprintf(buf
, PAGE_SIZE
, "%#lx %#lx\n", val1
, val2
);
2055 static ssize_t
vmid_masks_store(struct device
*dev
,
2056 struct device_attribute
*attr
,
2057 const char *buf
, size_t size
)
2060 unsigned long val1
, val2
, mask
;
2061 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
2063 * only implemented when vmid tracing is enabled, i.e. at least one
2064 * vmid comparator is implemented and at least 8 bit vmid size
2066 if (!drvdata
->vmid_size
|| !drvdata
->numvmidc
)
2068 if (sscanf(buf
, "%lx %lx", &val1
, &val2
) != 2)
2071 spin_lock(&drvdata
->spinlock
);
2074 * each byte[0..3] controls mask value applied to vmid
2077 switch (drvdata
->numvmidc
) {
2079 /* COMP0, bits[7:0] */
2080 drvdata
->vmid_mask0
= val1
& 0xFF;
2083 /* COMP1, bits[15:8] */
2084 drvdata
->vmid_mask0
= val1
& 0xFFFF;
2087 /* COMP2, bits[23:16] */
2088 drvdata
->vmid_mask0
= val1
& 0xFFFFFF;
2091 /* COMP3, bits[31:24] */
2092 drvdata
->vmid_mask0
= val1
;
2095 /* COMP4, bits[7:0] */
2096 drvdata
->vmid_mask0
= val1
;
2097 drvdata
->vmid_mask1
= val2
& 0xFF;
2100 /* COMP5, bits[15:8] */
2101 drvdata
->vmid_mask0
= val1
;
2102 drvdata
->vmid_mask1
= val2
& 0xFFFF;
2105 /* COMP6, bits[23:16] */
2106 drvdata
->vmid_mask0
= val1
;
2107 drvdata
->vmid_mask1
= val2
& 0xFFFFFF;
2110 /* COMP7, bits[31:24] */
2111 drvdata
->vmid_mask0
= val1
;
2112 drvdata
->vmid_mask1
= val2
;
2119 * If software sets a mask bit to 1, it must program relevant byte
2120 * of vmid comparator value 0x0, otherwise behavior is unpredictable.
2121 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
2122 * of vmid comparator0 value (corresponding to byte 0) register.
2124 mask
= drvdata
->vmid_mask0
;
2125 for (i
= 0; i
< drvdata
->numvmidc
; i
++) {
2126 /* mask value of corresponding vmid comparator */
2127 maskbyte
= mask
& ETMv4_EVENT_MASK
;
2129 * each bit corresponds to a byte of respective vmid comparator
2132 for (j
= 0; j
< 8; j
++) {
2134 drvdata
->vmid_val
[i
] &= ~(0xFF << (j
* 8));
2137 /* Select the next vmid comparator mask value */
2139 /* vmid comparators[4-7] */
2140 mask
= drvdata
->vmid_mask1
;
2144 spin_unlock(&drvdata
->spinlock
);
2147 static DEVICE_ATTR_RW(vmid_masks
);
2149 static ssize_t
cpu_show(struct device
*dev
,
2150 struct device_attribute
*attr
, char *buf
)
2153 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
2156 return scnprintf(buf
, PAGE_SIZE
, "%d\n", val
);
2159 static DEVICE_ATTR_RO(cpu
);
2161 static struct attribute
*coresight_etmv4_attrs
[] = {
2162 &dev_attr_nr_pe_cmp
.attr
,
2163 &dev_attr_nr_addr_cmp
.attr
,
2164 &dev_attr_nr_cntr
.attr
,
2165 &dev_attr_nr_ext_inp
.attr
,
2166 &dev_attr_numcidc
.attr
,
2167 &dev_attr_numvmidc
.attr
,
2168 &dev_attr_nrseqstate
.attr
,
2169 &dev_attr_nr_resource
.attr
,
2170 &dev_attr_nr_ss_cmp
.attr
,
2171 &dev_attr_reset
.attr
,
2172 &dev_attr_mode
.attr
,
2174 &dev_attr_event
.attr
,
2175 &dev_attr_event_instren
.attr
,
2176 &dev_attr_event_ts
.attr
,
2177 &dev_attr_syncfreq
.attr
,
2178 &dev_attr_cyc_threshold
.attr
,
2179 &dev_attr_bb_ctrl
.attr
,
2180 &dev_attr_event_vinst
.attr
,
2181 &dev_attr_s_exlevel_vinst
.attr
,
2182 &dev_attr_ns_exlevel_vinst
.attr
,
2183 &dev_attr_addr_idx
.attr
,
2184 &dev_attr_addr_instdatatype
.attr
,
2185 &dev_attr_addr_single
.attr
,
2186 &dev_attr_addr_range
.attr
,
2187 &dev_attr_addr_start
.attr
,
2188 &dev_attr_addr_stop
.attr
,
2189 &dev_attr_addr_ctxtype
.attr
,
2190 &dev_attr_addr_context
.attr
,
2191 &dev_attr_seq_idx
.attr
,
2192 &dev_attr_seq_state
.attr
,
2193 &dev_attr_seq_event
.attr
,
2194 &dev_attr_seq_reset_event
.attr
,
2195 &dev_attr_cntr_idx
.attr
,
2196 &dev_attr_cntrldvr
.attr
,
2197 &dev_attr_cntr_val
.attr
,
2198 &dev_attr_cntr_ctrl
.attr
,
2199 &dev_attr_res_idx
.attr
,
2200 &dev_attr_res_ctrl
.attr
,
2201 &dev_attr_ctxid_idx
.attr
,
2202 &dev_attr_ctxid_pid
.attr
,
2203 &dev_attr_ctxid_masks
.attr
,
2204 &dev_attr_vmid_idx
.attr
,
2205 &dev_attr_vmid_val
.attr
,
2206 &dev_attr_vmid_masks
.attr
,
2211 #define coresight_simple_func(name, offset) \
2212 static ssize_t name##_show(struct device *_dev, \
2213 struct device_attribute *attr, char *buf) \
2215 struct etmv4_drvdata *drvdata = dev_get_drvdata(_dev->parent); \
2216 return scnprintf(buf, PAGE_SIZE, "0x%x\n", \
2217 readl_relaxed(drvdata->base + offset)); \
2219 DEVICE_ATTR_RO(name)
2221 coresight_simple_func(trcoslsr
, TRCOSLSR
);
2222 coresight_simple_func(trcpdcr
, TRCPDCR
);
2223 coresight_simple_func(trcpdsr
, TRCPDSR
);
2224 coresight_simple_func(trclsr
, TRCLSR
);
2225 coresight_simple_func(trcauthstatus
, TRCAUTHSTATUS
);
2226 coresight_simple_func(trcdevid
, TRCDEVID
);
2227 coresight_simple_func(trcdevtype
, TRCDEVTYPE
);
2228 coresight_simple_func(trcpidr0
, TRCPIDR0
);
2229 coresight_simple_func(trcpidr1
, TRCPIDR1
);
2230 coresight_simple_func(trcpidr2
, TRCPIDR2
);
2231 coresight_simple_func(trcpidr3
, TRCPIDR3
);
2233 static struct attribute
*coresight_etmv4_mgmt_attrs
[] = {
2234 &dev_attr_trcoslsr
.attr
,
2235 &dev_attr_trcpdcr
.attr
,
2236 &dev_attr_trcpdsr
.attr
,
2237 &dev_attr_trclsr
.attr
,
2238 &dev_attr_trcauthstatus
.attr
,
2239 &dev_attr_trcdevid
.attr
,
2240 &dev_attr_trcdevtype
.attr
,
2241 &dev_attr_trcpidr0
.attr
,
2242 &dev_attr_trcpidr1
.attr
,
2243 &dev_attr_trcpidr2
.attr
,
2244 &dev_attr_trcpidr3
.attr
,
2248 coresight_simple_func(trcidr0
, TRCIDR0
);
2249 coresight_simple_func(trcidr1
, TRCIDR1
);
2250 coresight_simple_func(trcidr2
, TRCIDR2
);
2251 coresight_simple_func(trcidr3
, TRCIDR3
);
2252 coresight_simple_func(trcidr4
, TRCIDR4
);
2253 coresight_simple_func(trcidr5
, TRCIDR5
);
2254 /* trcidr[6,7] are reserved */
2255 coresight_simple_func(trcidr8
, TRCIDR8
);
2256 coresight_simple_func(trcidr9
, TRCIDR9
);
2257 coresight_simple_func(trcidr10
, TRCIDR10
);
2258 coresight_simple_func(trcidr11
, TRCIDR11
);
2259 coresight_simple_func(trcidr12
, TRCIDR12
);
2260 coresight_simple_func(trcidr13
, TRCIDR13
);
2262 static struct attribute
*coresight_etmv4_trcidr_attrs
[] = {
2263 &dev_attr_trcidr0
.attr
,
2264 &dev_attr_trcidr1
.attr
,
2265 &dev_attr_trcidr2
.attr
,
2266 &dev_attr_trcidr3
.attr
,
2267 &dev_attr_trcidr4
.attr
,
2268 &dev_attr_trcidr5
.attr
,
2269 /* trcidr[6,7] are reserved */
2270 &dev_attr_trcidr8
.attr
,
2271 &dev_attr_trcidr9
.attr
,
2272 &dev_attr_trcidr10
.attr
,
2273 &dev_attr_trcidr11
.attr
,
2274 &dev_attr_trcidr12
.attr
,
2275 &dev_attr_trcidr13
.attr
,
2279 static const struct attribute_group coresight_etmv4_group
= {
2280 .attrs
= coresight_etmv4_attrs
,
2283 static const struct attribute_group coresight_etmv4_mgmt_group
= {
2284 .attrs
= coresight_etmv4_mgmt_attrs
,
2288 static const struct attribute_group coresight_etmv4_trcidr_group
= {
2289 .attrs
= coresight_etmv4_trcidr_attrs
,
2293 static const struct attribute_group
*coresight_etmv4_groups
[] = {
2294 &coresight_etmv4_group
,
2295 &coresight_etmv4_mgmt_group
,
2296 &coresight_etmv4_trcidr_group
,
2300 static void etm4_init_arch_data(void *info
)
2308 struct etmv4_drvdata
*drvdata
= info
;
2310 CS_UNLOCK(drvdata
->base
);
2312 /* find all capabilities of the tracing unit */
2313 etmidr0
= readl_relaxed(drvdata
->base
+ TRCIDR0
);
2315 /* INSTP0, bits[2:1] P0 tracing support field */
2316 if (BMVAL(etmidr0
, 1, 1) && BMVAL(etmidr0
, 2, 2))
2317 drvdata
->instrp0
= true;
2319 drvdata
->instrp0
= false;
2321 /* TRCBB, bit[5] Branch broadcast tracing support bit */
2322 if (BMVAL(etmidr0
, 5, 5))
2323 drvdata
->trcbb
= true;
2325 drvdata
->trcbb
= false;
2327 /* TRCCOND, bit[6] Conditional instruction tracing support bit */
2328 if (BMVAL(etmidr0
, 6, 6))
2329 drvdata
->trccond
= true;
2331 drvdata
->trccond
= false;
2333 /* TRCCCI, bit[7] Cycle counting instruction bit */
2334 if (BMVAL(etmidr0
, 7, 7))
2335 drvdata
->trccci
= true;
2337 drvdata
->trccci
= false;
2339 /* RETSTACK, bit[9] Return stack bit */
2340 if (BMVAL(etmidr0
, 9, 9))
2341 drvdata
->retstack
= true;
2343 drvdata
->retstack
= false;
2345 /* NUMEVENT, bits[11:10] Number of events field */
2346 drvdata
->nr_event
= BMVAL(etmidr0
, 10, 11);
2347 /* QSUPP, bits[16:15] Q element support field */
2348 drvdata
->q_support
= BMVAL(etmidr0
, 15, 16);
2349 /* TSSIZE, bits[28:24] Global timestamp size field */
2350 drvdata
->ts_size
= BMVAL(etmidr0
, 24, 28);
2352 /* base architecture of trace unit */
2353 etmidr1
= readl_relaxed(drvdata
->base
+ TRCIDR1
);
2355 * TRCARCHMIN, bits[7:4] architecture the minor version number
2356 * TRCARCHMAJ, bits[11:8] architecture major versin number
2358 drvdata
->arch
= BMVAL(etmidr1
, 4, 11);
2360 /* maximum size of resources */
2361 etmidr2
= readl_relaxed(drvdata
->base
+ TRCIDR2
);
2362 /* CIDSIZE, bits[9:5] Indicates the Context ID size */
2363 drvdata
->ctxid_size
= BMVAL(etmidr2
, 5, 9);
2364 /* VMIDSIZE, bits[14:10] Indicates the VMID size */
2365 drvdata
->vmid_size
= BMVAL(etmidr2
, 10, 14);
2366 /* CCSIZE, bits[28:25] size of the cycle counter in bits minus 12 */
2367 drvdata
->ccsize
= BMVAL(etmidr2
, 25, 28);
2369 etmidr3
= readl_relaxed(drvdata
->base
+ TRCIDR3
);
2370 /* CCITMIN, bits[11:0] minimum threshold value that can be programmed */
2371 drvdata
->ccitmin
= BMVAL(etmidr3
, 0, 11);
2372 /* EXLEVEL_S, bits[19:16] Secure state instruction tracing */
2373 drvdata
->s_ex_level
= BMVAL(etmidr3
, 16, 19);
2374 /* EXLEVEL_NS, bits[23:20] Non-secure state instruction tracing */
2375 drvdata
->ns_ex_level
= BMVAL(etmidr3
, 20, 23);
2378 * TRCERR, bit[24] whether a trace unit can trace a
2379 * system error exception.
2381 if (BMVAL(etmidr3
, 24, 24))
2382 drvdata
->trc_error
= true;
2384 drvdata
->trc_error
= false;
2386 /* SYNCPR, bit[25] implementation has a fixed synchronization period? */
2387 if (BMVAL(etmidr3
, 25, 25))
2388 drvdata
->syncpr
= true;
2390 drvdata
->syncpr
= false;
2392 /* STALLCTL, bit[26] is stall control implemented? */
2393 if (BMVAL(etmidr3
, 26, 26))
2394 drvdata
->stallctl
= true;
2396 drvdata
->stallctl
= false;
2398 /* SYSSTALL, bit[27] implementation can support stall control? */
2399 if (BMVAL(etmidr3
, 27, 27))
2400 drvdata
->sysstall
= true;
2402 drvdata
->sysstall
= false;
2404 /* NUMPROC, bits[30:28] the number of PEs available for tracing */
2405 drvdata
->nr_pe
= BMVAL(etmidr3
, 28, 30);
2407 /* NOOVERFLOW, bit[31] is trace overflow prevention supported */
2408 if (BMVAL(etmidr3
, 31, 31))
2409 drvdata
->nooverflow
= true;
2411 drvdata
->nooverflow
= false;
2413 /* number of resources trace unit supports */
2414 etmidr4
= readl_relaxed(drvdata
->base
+ TRCIDR4
);
2415 /* NUMACPAIRS, bits[0:3] number of addr comparator pairs for tracing */
2416 drvdata
->nr_addr_cmp
= BMVAL(etmidr4
, 0, 3);
2417 /* NUMPC, bits[15:12] number of PE comparator inputs for tracing */
2418 drvdata
->nr_pe_cmp
= BMVAL(etmidr4
, 12, 15);
2419 /* NUMRSPAIR, bits[19:16] the number of resource pairs for tracing */
2420 drvdata
->nr_resource
= BMVAL(etmidr4
, 16, 19);
2422 * NUMSSCC, bits[23:20] the number of single-shot
2423 * comparator control for tracing
2425 drvdata
->nr_ss_cmp
= BMVAL(etmidr4
, 20, 23);
2426 /* NUMCIDC, bits[27:24] number of Context ID comparators for tracing */
2427 drvdata
->numcidc
= BMVAL(etmidr4
, 24, 27);
2428 /* NUMVMIDC, bits[31:28] number of VMID comparators for tracing */
2429 drvdata
->numvmidc
= BMVAL(etmidr4
, 28, 31);
2431 etmidr5
= readl_relaxed(drvdata
->base
+ TRCIDR5
);
2432 /* NUMEXTIN, bits[8:0] number of external inputs implemented */
2433 drvdata
->nr_ext_inp
= BMVAL(etmidr5
, 0, 8);
2434 /* TRACEIDSIZE, bits[21:16] indicates the trace ID width */
2435 drvdata
->trcid_size
= BMVAL(etmidr5
, 16, 21);
2436 /* ATBTRIG, bit[22] implementation can support ATB triggers? */
2437 if (BMVAL(etmidr5
, 22, 22))
2438 drvdata
->atbtrig
= true;
2440 drvdata
->atbtrig
= false;
2442 * LPOVERRIDE, bit[23] implementation supports
2443 * low-power state override
2445 if (BMVAL(etmidr5
, 23, 23))
2446 drvdata
->lpoverride
= true;
2448 drvdata
->lpoverride
= false;
2449 /* NUMSEQSTATE, bits[27:25] number of sequencer states implemented */
2450 drvdata
->nrseqstate
= BMVAL(etmidr5
, 25, 27);
2451 /* NUMCNTR, bits[30:28] number of counters available for tracing */
2452 drvdata
->nr_cntr
= BMVAL(etmidr5
, 28, 30);
2453 CS_LOCK(drvdata
->base
);
2456 static void etm4_init_default_data(struct etmv4_drvdata
*drvdata
)
2460 drvdata
->pe_sel
= 0x0;
2461 drvdata
->cfg
= (ETMv4_MODE_CTXID
| ETM_MODE_VMID
|
2462 ETMv4_MODE_TIMESTAMP
| ETM_MODE_RETURNSTACK
);
2464 /* disable all events tracing */
2465 drvdata
->eventctrl0
= 0x0;
2466 drvdata
->eventctrl1
= 0x0;
2468 /* disable stalling */
2469 drvdata
->stall_ctrl
= 0x0;
2471 /* disable timestamp event */
2472 drvdata
->ts_ctrl
= 0x0;
2474 /* enable trace synchronization every 4096 bytes for trace */
2475 if (drvdata
->syncpr
== false)
2476 drvdata
->syncfreq
= 0xC;
2479 * enable viewInst to trace everything with start-stop logic in
2482 drvdata
->vinst_ctrl
|= BIT(0);
2483 /* set initial state of start-stop logic */
2484 if (drvdata
->nr_addr_cmp
)
2485 drvdata
->vinst_ctrl
|= BIT(9);
2487 /* no address range filtering for ViewInst */
2488 drvdata
->viiectlr
= 0x0;
2489 /* no start-stop filtering for ViewInst */
2490 drvdata
->vissctlr
= 0x0;
2492 /* disable seq events */
2493 for (i
= 0; i
< drvdata
->nrseqstate
-1; i
++)
2494 drvdata
->seq_ctrl
[i
] = 0x0;
2495 drvdata
->seq_rst
= 0x0;
2496 drvdata
->seq_state
= 0x0;
2498 /* disable external input events */
2499 drvdata
->ext_inp
= 0x0;
2501 for (i
= 0; i
< drvdata
->nr_cntr
; i
++) {
2502 drvdata
->cntrldvr
[i
] = 0x0;
2503 drvdata
->cntr_ctrl
[i
] = 0x0;
2504 drvdata
->cntr_val
[i
] = 0x0;
2507 for (i
= 2; i
< drvdata
->nr_resource
* 2; i
++)
2508 drvdata
->res_ctrl
[i
] = 0x0;
2510 for (i
= 0; i
< drvdata
->nr_ss_cmp
; i
++) {
2511 drvdata
->ss_ctrl
[i
] = 0x0;
2512 drvdata
->ss_pe_cmp
[i
] = 0x0;
2515 if (drvdata
->nr_addr_cmp
>= 1) {
2516 drvdata
->addr_val
[0] = (unsigned long)_stext
;
2517 drvdata
->addr_val
[1] = (unsigned long)_etext
;
2518 drvdata
->addr_type
[0] = ETM_ADDR_TYPE_RANGE
;
2519 drvdata
->addr_type
[1] = ETM_ADDR_TYPE_RANGE
;
2522 for (i
= 0; i
< drvdata
->numcidc
; i
++) {
2523 drvdata
->ctxid_pid
[i
] = 0x0;
2524 drvdata
->ctxid_vpid
[i
] = 0x0;
2527 drvdata
->ctxid_mask0
= 0x0;
2528 drvdata
->ctxid_mask1
= 0x0;
2530 for (i
= 0; i
< drvdata
->numvmidc
; i
++)
2531 drvdata
->vmid_val
[i
] = 0x0;
2532 drvdata
->vmid_mask0
= 0x0;
2533 drvdata
->vmid_mask1
= 0x0;
2536 * A trace ID value of 0 is invalid, so let's start at some
2537 * random value that fits in 7 bits. ETMv3.x has 0x10 so let's
2540 drvdata
->trcid
= 0x20 + drvdata
->cpu
;
2543 static int etm4_cpu_callback(struct notifier_block
*nfb
, unsigned long action
,
2546 unsigned int cpu
= (unsigned long)hcpu
;
2548 if (!etmdrvdata
[cpu
])
2551 switch (action
& (~CPU_TASKS_FROZEN
)) {
2553 spin_lock(&etmdrvdata
[cpu
]->spinlock
);
2554 if (!etmdrvdata
[cpu
]->os_unlock
) {
2555 etm4_os_unlock(etmdrvdata
[cpu
]);
2556 etmdrvdata
[cpu
]->os_unlock
= true;
2559 if (etmdrvdata
[cpu
]->enable
)
2560 etm4_enable_hw(etmdrvdata
[cpu
]);
2561 spin_unlock(&etmdrvdata
[cpu
]->spinlock
);
2565 if (etmdrvdata
[cpu
]->boot_enable
&&
2566 !etmdrvdata
[cpu
]->sticky_enable
)
2567 coresight_enable(etmdrvdata
[cpu
]->csdev
);
2571 spin_lock(&etmdrvdata
[cpu
]->spinlock
);
2572 if (etmdrvdata
[cpu
]->enable
)
2573 etm4_disable_hw(etmdrvdata
[cpu
]);
2574 spin_unlock(&etmdrvdata
[cpu
]->spinlock
);
2581 static struct notifier_block etm4_cpu_notifier
= {
2582 .notifier_call
= etm4_cpu_callback
,
2585 static int etm4_probe(struct amba_device
*adev
, const struct amba_id
*id
)
2589 struct device
*dev
= &adev
->dev
;
2590 struct coresight_platform_data
*pdata
= NULL
;
2591 struct etmv4_drvdata
*drvdata
;
2592 struct resource
*res
= &adev
->res
;
2593 struct coresight_desc
*desc
;
2594 struct device_node
*np
= adev
->dev
.of_node
;
2596 desc
= devm_kzalloc(dev
, sizeof(*desc
), GFP_KERNEL
);
2600 drvdata
= devm_kzalloc(dev
, sizeof(*drvdata
), GFP_KERNEL
);
2605 pdata
= of_get_coresight_platform_data(dev
, np
);
2607 return PTR_ERR(pdata
);
2608 adev
->dev
.platform_data
= pdata
;
2611 drvdata
->dev
= &adev
->dev
;
2612 dev_set_drvdata(dev
, drvdata
);
2614 /* Validity for the resource is already checked by the AMBA core */
2615 base
= devm_ioremap_resource(dev
, res
);
2617 return PTR_ERR(base
);
2619 drvdata
->base
= base
;
2621 spin_lock_init(&drvdata
->spinlock
);
2623 drvdata
->cpu
= pdata
? pdata
->cpu
: 0;
2626 etmdrvdata
[drvdata
->cpu
] = drvdata
;
2628 if (!smp_call_function_single(drvdata
->cpu
, etm4_os_unlock
, drvdata
, 1))
2629 drvdata
->os_unlock
= true;
2631 if (smp_call_function_single(drvdata
->cpu
,
2632 etm4_init_arch_data
, drvdata
, 1))
2633 dev_err(dev
, "ETM arch init failed\n");
2636 register_hotcpu_notifier(&etm4_cpu_notifier
);
2640 if (etm4_arch_supported(drvdata
->arch
) == false) {
2642 goto err_arch_supported
;
2644 etm4_init_default_data(drvdata
);
2646 pm_runtime_put(&adev
->dev
);
2648 desc
->type
= CORESIGHT_DEV_TYPE_SOURCE
;
2649 desc
->subtype
.source_subtype
= CORESIGHT_DEV_SUBTYPE_SOURCE_PROC
;
2650 desc
->ops
= &etm4_cs_ops
;
2651 desc
->pdata
= pdata
;
2653 desc
->groups
= coresight_etmv4_groups
;
2654 drvdata
->csdev
= coresight_register(desc
);
2655 if (IS_ERR(drvdata
->csdev
)) {
2656 ret
= PTR_ERR(drvdata
->csdev
);
2657 goto err_coresight_register
;
2660 dev_info(dev
, "%s initialized\n", (char *)id
->data
);
2663 coresight_enable(drvdata
->csdev
);
2664 drvdata
->boot_enable
= true;
2670 pm_runtime_put(&adev
->dev
);
2671 err_coresight_register
:
2672 if (--etm4_count
== 0)
2673 unregister_hotcpu_notifier(&etm4_cpu_notifier
);
2677 static int etm4_remove(struct amba_device
*adev
)
2679 struct etmv4_drvdata
*drvdata
= amba_get_drvdata(adev
);
2681 coresight_unregister(drvdata
->csdev
);
2682 if (--etm4_count
== 0)
2683 unregister_hotcpu_notifier(&etm4_cpu_notifier
);
2688 static struct amba_id etm4_ids
[] = {
2689 { /* ETM 4.0 - Qualcomm */
2694 { /* ETM 4.0 - Juno board */
2702 static struct amba_driver etm4x_driver
= {
2704 .name
= "coresight-etm4x",
2706 .probe
= etm4_probe
,
2707 .remove
= etm4_remove
,
2708 .id_table
= etm4_ids
,
2711 module_amba_driver(etm4x_driver
);