]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/hwtracing/coresight/coresight-tmc-etf.c
coresight: tmc-etf: Fix NULL ptr dereference in tmc_enable_etf_sink_perf()
[mirror_ubuntu-hirsute-kernel.git] / drivers / hwtracing / coresight / coresight-tmc-etf.c
CommitLineData
ad0dfdfd 1// SPDX-License-Identifier: GPL-2.0
6c6ed1e2
MP
2/*
3 * Copyright(C) 2016 Linaro Limited. All rights reserved.
4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
6c6ed1e2
MP
5 */
6
f973d88b 7#include <linux/atomic.h>
2e499bbc 8#include <linux/circ_buf.h>
6c6ed1e2 9#include <linux/coresight.h>
2e499bbc 10#include <linux/perf_event.h>
de546197 11#include <linux/slab.h>
6c6ed1e2
MP
12#include "coresight-priv.h"
13#include "coresight-tmc.h"
3d6e8935
SP
14#include "coresight-etm-perf.h"
15
16static int tmc_set_etf_buffer(struct coresight_device *csdev,
17 struct perf_output_handle *handle);
6c6ed1e2 18
1d364034 19static void __tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
6c6ed1e2 20{
6c6ed1e2
MP
21 CS_UNLOCK(drvdata->base);
22
23 /* Wait for TMCSReady bit to be set */
24 tmc_wait_for_tmcready(drvdata);
25
26 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
27 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
28 TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
29 TMC_FFCR_TRIGON_TRIGIN,
30 drvdata->base + TMC_FFCR);
31
32 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
33 tmc_enable_hw(drvdata);
34
35 CS_LOCK(drvdata->base);
36}
37
1d364034
SP
38static int tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
39{
4d3ebd36
SP
40 int rc = coresight_claim_device(drvdata->base);
41
42 if (rc)
43 return rc;
44
1d364034
SP
45 __tmc_etb_enable_hw(drvdata);
46 return 0;
47}
48
6c6ed1e2
MP
49static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
50{
6c6ed1e2 51 char *bufp;
6f755e85 52 u32 read_data, lost;
6c6ed1e2 53
6f755e85
SP
54 /* Check if the buffer wrapped around. */
55 lost = readl_relaxed(drvdata->base + TMC_STS) & TMC_STS_FULL;
6c6ed1e2 56 bufp = drvdata->buf;
8505feae 57 drvdata->len = 0;
6c6ed1e2 58 while (1) {
b3bee19e
LY
59 read_data = readl_relaxed(drvdata->base + TMC_RRD);
60 if (read_data == 0xFFFFFFFF)
61 break;
62 memcpy(bufp, &read_data, 4);
63 bufp += 4;
64 drvdata->len += 4;
6c6ed1e2 65 }
b3bee19e 66
6f755e85
SP
67 if (lost)
68 coresight_insert_barrier_packet(drvdata->buf);
69 return;
6c6ed1e2
MP
70}
71
4d3ebd36 72static void __tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
6c6ed1e2
MP
73{
74 CS_UNLOCK(drvdata->base);
75
76 tmc_flush_and_stop(drvdata);
a40318fb
MP
77 /*
78 * When operating in sysFS mode the content of the buffer needs to be
79 * read before the TMC is disabled.
80 */
297ab90f 81 if (drvdata->mode == CS_MODE_SYSFS)
a40318fb 82 tmc_etb_dump_hw(drvdata);
6c6ed1e2
MP
83 tmc_disable_hw(drvdata);
84
85 CS_LOCK(drvdata->base);
86}
87
4d3ebd36
SP
88static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
89{
4d3ebd36 90 __tmc_etb_disable_hw(drvdata);
32c58c4d 91 coresight_disclaim_device(drvdata->base);
4d3ebd36
SP
92}
93
1d364034 94static void __tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
6c6ed1e2
MP
95{
96 CS_UNLOCK(drvdata->base);
97
98 /* Wait for TMCSReady bit to be set */
99 tmc_wait_for_tmcready(drvdata);
100
101 writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
102 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
103 drvdata->base + TMC_FFCR);
104 writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
105 tmc_enable_hw(drvdata);
106
107 CS_LOCK(drvdata->base);
108}
109
1d364034
SP
110static int tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
111{
4d3ebd36
SP
112 int rc = coresight_claim_device(drvdata->base);
113
114 if (rc)
115 return rc;
116
1d364034
SP
117 __tmc_etf_enable_hw(drvdata);
118 return 0;
119}
120
6c6ed1e2
MP
121static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
122{
123 CS_UNLOCK(drvdata->base);
124
125 tmc_flush_and_stop(drvdata);
126 tmc_disable_hw(drvdata);
4d3ebd36 127 coresight_disclaim_device_unlocked(drvdata->base);
6c6ed1e2
MP
128 CS_LOCK(drvdata->base);
129}
130
3495722a
SP
131/*
132 * Return the available trace data in the buffer from @pos, with
133 * a maximum limit of @len, updating the @bufpp on where to
134 * find it.
135 */
136ssize_t tmc_etb_get_sysfs_trace(struct tmc_drvdata *drvdata,
137 loff_t pos, size_t len, char **bufpp)
138{
139 ssize_t actual = len;
140
141 /* Adjust the len to available size @pos */
142 if (pos + actual > drvdata->len)
143 actual = drvdata->len - pos;
144 if (actual > 0)
145 *bufpp = drvdata->buf + pos;
146 return actual;
147}
148
c38e505e 149static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
6c6ed1e2 150{
de546197
MP
151 int ret = 0;
152 bool used = false;
153 char *buf = NULL;
6c6ed1e2
MP
154 unsigned long flags;
155 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
156
de546197
MP
157 /*
158 * If we don't have a buffer release the lock and allocate memory.
159 * Otherwise keep the lock and move along.
160 */
6c6ed1e2 161 spin_lock_irqsave(&drvdata->spinlock, flags);
de546197 162 if (!drvdata->buf) {
6c6ed1e2 163 spin_unlock_irqrestore(&drvdata->spinlock, flags);
de546197
MP
164
165 /* Allocating the memory here while outside of the spinlock */
166 buf = kzalloc(drvdata->size, GFP_KERNEL);
167 if (!buf)
168 return -ENOMEM;
169
170 /* Let's try again */
171 spin_lock_irqsave(&drvdata->spinlock, flags);
172 }
173
174 if (drvdata->reading) {
175 ret = -EBUSY;
176 goto out;
177 }
178
f2facc33
MP
179 /*
180 * In sysFS mode we can have multiple writers per sink. Since this
181 * sink is already enabled no memory is needed and the HW need not be
182 * touched.
183 */
f973d88b
MP
184 if (drvdata->mode == CS_MODE_SYSFS) {
185 atomic_inc(csdev->refcnt);
f2facc33 186 goto out;
f973d88b 187 }
f2facc33 188
de546197
MP
189 /*
190 * If drvdata::buf isn't NULL, memory was allocated for a previous
191 * trace run but wasn't read. If so simply zero-out the memory.
192 * Otherwise use the memory allocated above.
193 *
194 * The memory is freed when users read the buffer using the
195 * /dev/xyz.{etf|etb} interface. See tmc_read_unprepare_etf() for
196 * details.
197 */
198 if (drvdata->buf) {
199 memset(drvdata->buf, 0, drvdata->size);
200 } else {
201 used = true;
202 drvdata->buf = buf;
6c6ed1e2
MP
203 }
204
1d364034 205 ret = tmc_etb_enable_hw(drvdata);
f973d88b 206 if (!ret) {
1d364034 207 drvdata->mode = CS_MODE_SYSFS;
f973d88b
MP
208 atomic_inc(csdev->refcnt);
209 } else {
1d364034
SP
210 /* Free up the buffer if we failed to enable */
211 used = false;
f973d88b 212 }
de546197 213out:
6c6ed1e2
MP
214 spin_unlock_irqrestore(&drvdata->spinlock, flags);
215
de546197 216 /* Free memory outside the spinlock if need be */
1d37ae50 217 if (!used)
de546197
MP
218 kfree(buf);
219
de546197 220 return ret;
6c6ed1e2
MP
221}
222
3d6e8935 223static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
b217601e
MP
224{
225 int ret = 0;
880af782 226 pid_t pid;
b217601e
MP
227 unsigned long flags;
228 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
3d6e8935 229 struct perf_output_handle *handle = data;
868663dd 230 struct cs_buffers *buf = etm_perf_sink_config(handle);
b217601e 231
b217601e 232 spin_lock_irqsave(&drvdata->spinlock, flags);
1d364034 233 do {
b217601e 234 ret = -EINVAL;
1d364034
SP
235 if (drvdata->reading)
236 break;
237 /*
880af782
MP
238 * No need to continue if the ETB/ETF is already operated
239 * from sysFS.
1d364034 240 */
880af782
MP
241 if (drvdata->mode == CS_MODE_SYSFS) {
242 ret = -EBUSY;
1d364034 243 break;
880af782
MP
244 }
245
246 /* Get a handle on the pid of the process to monitor */
868663dd 247 pid = buf->pid;
880af782
MP
248
249 if (drvdata->pid != -1 && drvdata->pid != pid) {
250 ret = -EBUSY;
251 break;
252 }
b217601e 253
1d364034
SP
254 ret = tmc_set_etf_buffer(csdev, handle);
255 if (ret)
256 break;
880af782
MP
257
258 /*
259 * No HW configuration is needed if the sink is already in
260 * use for this session.
261 */
262 if (drvdata->pid == pid) {
263 atomic_inc(csdev->refcnt);
264 break;
265 }
266
1d364034 267 ret = tmc_etb_enable_hw(drvdata);
f973d88b 268 if (!ret) {
880af782
MP
269 /* Associate with monitored process. */
270 drvdata->pid = pid;
1d364034 271 drvdata->mode = CS_MODE_PERF;
f973d88b
MP
272 atomic_inc(csdev->refcnt);
273 }
1d364034 274 } while (0);
b217601e
MP
275 spin_unlock_irqrestore(&drvdata->spinlock, flags);
276
277 return ret;
278}
279
3d6e8935
SP
280static int tmc_enable_etf_sink(struct coresight_device *csdev,
281 u32 mode, void *data)
b217601e 282{
2cd54140 283 int ret;
2cd54140 284
b217601e
MP
285 switch (mode) {
286 case CS_MODE_SYSFS:
2cd54140
LY
287 ret = tmc_enable_etf_sink_sysfs(csdev);
288 break;
b217601e 289 case CS_MODE_PERF:
3d6e8935 290 ret = tmc_enable_etf_sink_perf(csdev, data);
2cd54140
LY
291 break;
292 /* We shouldn't be here */
293 default:
294 ret = -EINVAL;
295 break;
b217601e
MP
296 }
297
2cd54140
LY
298 if (ret)
299 return ret;
300
9dd0a920 301 dev_dbg(&csdev->dev, "TMC-ETB/ETF enabled\n");
2cd54140 302 return 0;
b217601e
MP
303}
304
6c817a95 305static int tmc_disable_etf_sink(struct coresight_device *csdev)
6c6ed1e2
MP
306{
307 unsigned long flags;
308 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
309
310 spin_lock_irqsave(&drvdata->spinlock, flags);
f973d88b 311
6c6ed1e2
MP
312 if (drvdata->reading) {
313 spin_unlock_irqrestore(&drvdata->spinlock, flags);
6c817a95 314 return -EBUSY;
6c6ed1e2
MP
315 }
316
f973d88b
MP
317 if (atomic_dec_return(csdev->refcnt)) {
318 spin_unlock_irqrestore(&drvdata->spinlock, flags);
319 return -EBUSY;
320 }
321
12dfc9e0
MP
322 /* Complain if we (somehow) got out of sync */
323 WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
324 tmc_etb_disable_hw(drvdata);
880af782
MP
325 /* Dissociate from monitored process. */
326 drvdata->pid = -1;
12dfc9e0 327 drvdata->mode = CS_MODE_DISABLED;
f2facc33 328
6c6ed1e2
MP
329 spin_unlock_irqrestore(&drvdata->spinlock, flags);
330
9dd0a920 331 dev_dbg(&csdev->dev, "TMC-ETB/ETF disabled\n");
6c817a95 332 return 0;
6c6ed1e2
MP
333}
334
335static int tmc_enable_etf_link(struct coresight_device *csdev,
336 int inport, int outport)
337{
edda32da 338 int ret = 0;
6c6ed1e2
MP
339 unsigned long flags;
340 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
edda32da 341 bool first_enable = false;
6c6ed1e2
MP
342
343 spin_lock_irqsave(&drvdata->spinlock, flags);
344 if (drvdata->reading) {
345 spin_unlock_irqrestore(&drvdata->spinlock, flags);
346 return -EBUSY;
347 }
348
edda32da
YC
349 if (atomic_read(&csdev->refcnt[0]) == 0) {
350 ret = tmc_etf_enable_hw(drvdata);
351 if (!ret) {
352 drvdata->mode = CS_MODE_SYSFS;
353 first_enable = true;
354 }
355 }
1d364034 356 if (!ret)
edda32da 357 atomic_inc(&csdev->refcnt[0]);
6c6ed1e2
MP
358 spin_unlock_irqrestore(&drvdata->spinlock, flags);
359
edda32da 360 if (first_enable)
9dd0a920 361 dev_dbg(&csdev->dev, "TMC-ETF enabled\n");
1d364034 362 return ret;
6c6ed1e2
MP
363}
364
365static void tmc_disable_etf_link(struct coresight_device *csdev,
366 int inport, int outport)
367{
368 unsigned long flags;
369 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
edda32da 370 bool last_disable = false;
6c6ed1e2
MP
371
372 spin_lock_irqsave(&drvdata->spinlock, flags);
373 if (drvdata->reading) {
374 spin_unlock_irqrestore(&drvdata->spinlock, flags);
375 return;
376 }
377
edda32da
YC
378 if (atomic_dec_return(&csdev->refcnt[0]) == 0) {
379 tmc_etf_disable_hw(drvdata);
380 drvdata->mode = CS_MODE_DISABLED;
381 last_disable = true;
382 }
6c6ed1e2
MP
383 spin_unlock_irqrestore(&drvdata->spinlock, flags);
384
edda32da
YC
385 if (last_disable)
386 dev_dbg(&csdev->dev, "TMC-ETF disabled\n");
6c6ed1e2
MP
387}
388
a0f08a6a
MP
389static void *tmc_alloc_etf_buffer(struct coresight_device *csdev,
390 struct perf_event *event, void **pages,
391 int nr_pages, bool overwrite)
2e499bbc 392{
024c1fd9 393 int node;
2e499bbc
MP
394 struct cs_buffers *buf;
395
024c1fd9 396 node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
2e499bbc
MP
397
398 /* Allocate memory structure for interaction with Perf */
399 buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
400 if (!buf)
401 return NULL;
402
868663dd 403 buf->pid = task_pid_nr(event->owner);
2e499bbc
MP
404 buf->snapshot = overwrite;
405 buf->nr_pages = nr_pages;
406 buf->data_pages = pages;
407
408 return buf;
409}
410
411static void tmc_free_etf_buffer(void *config)
412{
413 struct cs_buffers *buf = config;
414
415 kfree(buf);
416}
417
418static int tmc_set_etf_buffer(struct coresight_device *csdev,
3d6e8935 419 struct perf_output_handle *handle)
2e499bbc
MP
420{
421 int ret = 0;
422 unsigned long head;
3d6e8935
SP
423 struct cs_buffers *buf = etm_perf_sink_config(handle);
424
425 if (!buf)
426 return -EINVAL;
2e499bbc
MP
427
428 /* wrap head around to the amount of space we have */
429 head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
430
431 /* find the page to write to */
432 buf->cur = head / PAGE_SIZE;
433
434 /* and offset within that page */
435 buf->offset = head % PAGE_SIZE;
436
437 local_set(&buf->data_size, 0);
438
439 return ret;
440}
441
7ec786ad 442static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
2e499bbc
MP
443 struct perf_output_handle *handle,
444 void *sink_config)
445{
cfd9f630 446 bool lost = false;
2e499bbc 447 int i, cur;
0c3fc4d5 448 const u32 *barrier;
2e499bbc 449 u32 *buf_ptr;
6f6ab4fc 450 u64 read_ptr, write_ptr;
7ec786ad 451 u32 status;
880af782 452 unsigned long offset, to_read = 0, flags;
2e499bbc
MP
453 struct cs_buffers *buf = sink_config;
454 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
455
456 if (!buf)
7ec786ad 457 return 0;
2e499bbc
MP
458
459 /* This shouldn't happen */
297ab90f 460 if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF))
7ec786ad 461 return 0;
2e499bbc 462
0916447c 463 spin_lock_irqsave(&drvdata->spinlock, flags);
880af782
MP
464
465 /* Don't do anything if another tracer is using this sink */
466 if (atomic_read(csdev->refcnt) != 1)
467 goto out;
468
2e499bbc
MP
469 CS_UNLOCK(drvdata->base);
470
471 tmc_flush_and_stop(drvdata);
472
6f6ab4fc
SP
473 read_ptr = tmc_read_rrp(drvdata);
474 write_ptr = tmc_read_rwp(drvdata);
2e499bbc
MP
475
476 /*
477 * Get a hold of the status register and see if a wrap around
478 * has occurred. If so adjust things accordingly.
479 */
480 status = readl_relaxed(drvdata->base + TMC_STS);
481 if (status & TMC_STS_FULL) {
cfd9f630 482 lost = true;
2e499bbc
MP
483 to_read = drvdata->size;
484 } else {
485 to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size);
486 }
487
488 /*
489 * The TMC RAM buffer may be bigger than the space available in the
490 * perf ring buffer (handle->size). If so advance the RRP so that we
99f81eb9
MP
491 * get the latest trace data. In snapshot mode none of that matters
492 * since we are expected to clobber stale data in favour of the latest
493 * traces.
2e499bbc 494 */
99f81eb9 495 if (!buf->snapshot && to_read > handle->size) {
00bb485c 496 u32 mask = tmc_get_memwidth_mask(drvdata);
2e499bbc
MP
497
498 /*
499 * Make sure the new size is aligned in accordance with the
00bb485c 500 * requirement explained in function tmc_get_memwidth_mask().
2e499bbc
MP
501 */
502 to_read = handle->size & mask;
503 /* Move the RAM read pointer up */
504 read_ptr = (write_ptr + drvdata->size) - to_read;
505 /* Make sure we are still within our limits */
506 if (read_ptr > (drvdata->size - 1))
507 read_ptr -= drvdata->size;
508 /* Tell the HW */
6f6ab4fc 509 tmc_write_rrp(drvdata, read_ptr);
cfd9f630 510 lost = true;
2e499bbc
MP
511 }
512
5aafd9bf
MP
513 /*
514 * Don't set the TRUNCATED flag in snapshot mode because 1) the
515 * captured buffer is expected to be truncated and 2) a full buffer
516 * prevents the event from being re-enabled by the perf core,
517 * resulting in stale data being send to user space.
518 */
519 if (!buf->snapshot && lost)
cfd9f630
MP
520 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
521
2e499bbc
MP
522 cur = buf->cur;
523 offset = buf->offset;
92fc7d81 524 barrier = coresight_barrier_pkt;
2e499bbc
MP
525
526 /* for every byte to read */
527 for (i = 0; i < to_read; i += 4) {
528 buf_ptr = buf->data_pages[cur] + offset;
529 *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
530
0c3fc4d5
MP
531 if (lost && *barrier) {
532 *buf_ptr = *barrier;
533 barrier++;
534 }
535
2e499bbc
MP
536 offset += 4;
537 if (offset >= PAGE_SIZE) {
538 offset = 0;
539 cur++;
540 /* wrap around at the end of the buffer */
541 cur &= buf->nr_pages - 1;
542 }
543 }
544
0402f75e
MP
545 /*
546 * In snapshot mode we simply increment the head by the number of byte
547 * that were written. User space function cs_etm_find_snapshot() will
548 * figure out how many bytes to get from the AUX buffer based on the
549 * position of the head.
550 */
551 if (buf->snapshot)
552 handle->head += to_read;
553
2e499bbc 554 CS_LOCK(drvdata->base);
880af782 555out:
0916447c 556 spin_unlock_irqrestore(&drvdata->spinlock, flags);
7ec786ad
SP
557
558 return to_read;
2e499bbc
MP
559}
560
6c6ed1e2
MP
561static const struct coresight_ops_sink tmc_etf_sink_ops = {
562 .enable = tmc_enable_etf_sink,
563 .disable = tmc_disable_etf_sink,
2e499bbc
MP
564 .alloc_buffer = tmc_alloc_etf_buffer,
565 .free_buffer = tmc_free_etf_buffer,
2e499bbc 566 .update_buffer = tmc_update_etf_buffer,
6c6ed1e2
MP
567};
568
569static const struct coresight_ops_link tmc_etf_link_ops = {
570 .enable = tmc_enable_etf_link,
571 .disable = tmc_disable_etf_link,
572};
573
574const struct coresight_ops tmc_etb_cs_ops = {
575 .sink_ops = &tmc_etf_sink_ops,
576};
577
578const struct coresight_ops tmc_etf_cs_ops = {
579 .sink_ops = &tmc_etf_sink_ops,
580 .link_ops = &tmc_etf_link_ops,
581};
4525412a
MP
582
583int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
584{
585 enum tmc_mode mode;
586 int ret = 0;
587 unsigned long flags;
588
589 /* config types are set a boot time and never change */
590 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
591 drvdata->config_type != TMC_CONFIG_TYPE_ETF))
592 return -EINVAL;
593
594 spin_lock_irqsave(&drvdata->spinlock, flags);
595
f74debbe
MP
596 if (drvdata->reading) {
597 ret = -EBUSY;
598 goto out;
599 }
600
b217601e 601 /* Don't interfere if operated from Perf */
297ab90f 602 if (drvdata->mode == CS_MODE_PERF) {
b217601e
MP
603 ret = -EINVAL;
604 goto out;
605 }
606
de546197
MP
607 /* If drvdata::buf is NULL the trace data has been read already */
608 if (drvdata->buf == NULL) {
609 ret = -EINVAL;
610 goto out;
611 }
612
4525412a 613 /* Disable the TMC if need be */
347adb0d
SPR
614 if (drvdata->mode == CS_MODE_SYSFS) {
615 /* There is no point in reading a TMC in HW FIFO mode */
616 mode = readl_relaxed(drvdata->base + TMC_MODE);
617 if (mode != TMC_MODE_CIRCULAR_BUFFER) {
618 ret = -EINVAL;
619 goto out;
620 }
4d3ebd36 621 __tmc_etb_disable_hw(drvdata);
347adb0d 622 }
4525412a
MP
623
624 drvdata->reading = true;
625out:
626 spin_unlock_irqrestore(&drvdata->spinlock, flags);
627
628 return ret;
629}
630
631int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
632{
de546197 633 char *buf = NULL;
4525412a
MP
634 enum tmc_mode mode;
635 unsigned long flags;
636
637 /* config types are set a boot time and never change */
638 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
639 drvdata->config_type != TMC_CONFIG_TYPE_ETF))
640 return -EINVAL;
641
642 spin_lock_irqsave(&drvdata->spinlock, flags);
643
4525412a 644 /* Re-enable the TMC if need be */
297ab90f 645 if (drvdata->mode == CS_MODE_SYSFS) {
d021f5c5
SPR
646 /* There is no point in reading a TMC in HW FIFO mode */
647 mode = readl_relaxed(drvdata->base + TMC_MODE);
648 if (mode != TMC_MODE_CIRCULAR_BUFFER) {
649 spin_unlock_irqrestore(&drvdata->spinlock, flags);
650 return -EINVAL;
651 }
de546197
MP
652 /*
653 * The trace run will continue with the same allocated trace
654 * buffer. As such zero-out the buffer so that we don't end
655 * up with stale data.
656 *
657 * Since the tracer is still enabled drvdata::buf
658 * can't be NULL.
659 */
660 memset(drvdata->buf, 0, drvdata->size);
1d364034 661 __tmc_etb_enable_hw(drvdata);
de546197
MP
662 } else {
663 /*
664 * The ETB/ETF is not tracing and the buffer was just read.
665 * As such prepare to free the trace buffer.
666 */
667 buf = drvdata->buf;
668 drvdata->buf = NULL;
669 }
4525412a
MP
670
671 drvdata->reading = false;
672 spin_unlock_irqrestore(&drvdata->spinlock, flags);
673
de546197
MP
674 /*
675 * Free allocated memory outside of the spinlock. There is no need
676 * to assert the validity of 'buf' since calling kfree(NULL) is safe.
677 */
678 kfree(buf);
679
4525412a
MP
680 return 0;
681}