1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright(C) 2016 Linaro Limited. All rights reserved.
4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5 */
6
7 #include <linux/atomic.h>
8 #include <linux/circ_buf.h>
9 #include <linux/coresight.h>
10 #include <linux/perf_event.h>
11 #include <linux/slab.h>
12 #include "coresight-priv.h"
13 #include "coresight-tmc.h"
14 #include "coresight-etm-perf.h"
15
16 static int tmc_set_etf_buffer(struct coresight_device *csdev,
17 struct perf_output_handle *handle);
18
__tmc_etb_enable_hw(struct tmc_drvdata * drvdata)19 static void __tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
20 {
21 CS_UNLOCK(drvdata->base);
22
23 /* Wait for TMCSReady bit to be set */
24 tmc_wait_for_tmcready(drvdata);
25
26 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
27 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
28 TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
29 TMC_FFCR_TRIGON_TRIGIN,
30 drvdata->base + TMC_FFCR);
31
32 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
33 tmc_enable_hw(drvdata);
34
35 CS_LOCK(drvdata->base);
36 }
37
tmc_etb_enable_hw(struct tmc_drvdata * drvdata)38 static int tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
39 {
40 int rc = coresight_claim_device(drvdata->base);
41
42 if (rc)
43 return rc;
44
45 __tmc_etb_enable_hw(drvdata);
46 return 0;
47 }
48
tmc_etb_dump_hw(struct tmc_drvdata * drvdata)49 static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
50 {
51 char *bufp;
52 u32 read_data, lost;
53
54 /* Check if the buffer wrapped around. */
55 lost = readl_relaxed(drvdata->base + TMC_STS) & TMC_STS_FULL;
56 bufp = drvdata->buf;
57 drvdata->len = 0;
58 while (1) {
59 read_data = readl_relaxed(drvdata->base + TMC_RRD);
60 if (read_data == 0xFFFFFFFF)
61 break;
62 memcpy(bufp, &read_data, 4);
63 bufp += 4;
64 drvdata->len += 4;
65 }
66
67 if (lost)
68 coresight_insert_barrier_packet(drvdata->buf);
69 return;
70 }
71
__tmc_etb_disable_hw(struct tmc_drvdata * drvdata)72 static void __tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
73 {
74 CS_UNLOCK(drvdata->base);
75
76 tmc_flush_and_stop(drvdata);
77 /*
78 * When operating in sysFS mode the content of the buffer needs to be
79 * read before the TMC is disabled.
80 */
81 if (drvdata->mode == CS_MODE_SYSFS)
82 tmc_etb_dump_hw(drvdata);
83 tmc_disable_hw(drvdata);
84
85 CS_LOCK(drvdata->base);
86 }
87
tmc_etb_disable_hw(struct tmc_drvdata * drvdata)88 static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
89 {
90 __tmc_etb_disable_hw(drvdata);
91 coresight_disclaim_device(drvdata->base);
92 }
93
__tmc_etf_enable_hw(struct tmc_drvdata * drvdata)94 static void __tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
95 {
96 CS_UNLOCK(drvdata->base);
97
98 /* Wait for TMCSReady bit to be set */
99 tmc_wait_for_tmcready(drvdata);
100
101 writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
102 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
103 drvdata->base + TMC_FFCR);
104 writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
105 tmc_enable_hw(drvdata);
106
107 CS_LOCK(drvdata->base);
108 }
109
tmc_etf_enable_hw(struct tmc_drvdata * drvdata)110 static int tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
111 {
112 int rc = coresight_claim_device(drvdata->base);
113
114 if (rc)
115 return rc;
116
117 __tmc_etf_enable_hw(drvdata);
118 return 0;
119 }
120
tmc_etf_disable_hw(struct tmc_drvdata * drvdata)121 static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
122 {
123 CS_UNLOCK(drvdata->base);
124
125 tmc_flush_and_stop(drvdata);
126 tmc_disable_hw(drvdata);
127 coresight_disclaim_device_unlocked(drvdata->base);
128 CS_LOCK(drvdata->base);
129 }
130
131 /*
132 * Return the available trace data in the buffer from @pos, with
133 * a maximum limit of @len, updating the @bufpp on where to
134 * find it.
135 */
tmc_etb_get_sysfs_trace(struct tmc_drvdata * drvdata,loff_t pos,size_t len,char ** bufpp)136 ssize_t tmc_etb_get_sysfs_trace(struct tmc_drvdata *drvdata,
137 loff_t pos, size_t len, char **bufpp)
138 {
139 ssize_t actual = len;
140
141 /* Adjust the len to available size @pos */
142 if (pos + actual > drvdata->len)
143 actual = drvdata->len - pos;
144 if (actual > 0)
145 *bufpp = drvdata->buf + pos;
146 return actual;
147 }
148
tmc_enable_etf_sink_sysfs(struct coresight_device * csdev)149 static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
150 {
151 int ret = 0;
152 bool used = false;
153 char *buf = NULL;
154 unsigned long flags;
155 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
156
157 /*
158 * If we don't have a buffer release the lock and allocate memory.
159 * Otherwise keep the lock and move along.
160 */
161 spin_lock_irqsave(&drvdata->spinlock, flags);
162 if (!drvdata->buf) {
163 spin_unlock_irqrestore(&drvdata->spinlock, flags);
164
165 /* Allocating the memory here while outside of the spinlock */
166 buf = kzalloc(drvdata->size, GFP_KERNEL);
167 if (!buf)
168 return -ENOMEM;
169
170 /* Let's try again */
171 spin_lock_irqsave(&drvdata->spinlock, flags);
172 }
173
174 if (drvdata->reading) {
175 ret = -EBUSY;
176 goto out;
177 }
178
179 /*
180 * In sysFS mode we can have multiple writers per sink. Since this
181 * sink is already enabled no memory is needed and the HW need not be
182 * touched.
183 */
184 if (drvdata->mode == CS_MODE_SYSFS) {
185 atomic_inc(csdev->refcnt);
186 goto out;
187 }
188
189 /*
190 * If drvdata::buf isn't NULL, memory was allocated for a previous
191 * trace run but wasn't read. If so simply zero-out the memory.
192 * Otherwise use the memory allocated above.
193 *
194 * The memory is freed when users read the buffer using the
195 * /dev/xyz.{etf|etb} interface. See tmc_read_unprepare_etf() for
196 * details.
197 */
198 if (drvdata->buf) {
199 memset(drvdata->buf, 0, drvdata->size);
200 } else {
201 used = true;
202 drvdata->buf = buf;
203 }
204
205 ret = tmc_etb_enable_hw(drvdata);
206 if (!ret) {
207 drvdata->mode = CS_MODE_SYSFS;
208 atomic_inc(csdev->refcnt);
209 } else {
210 /* Free up the buffer if we failed to enable */
211 used = false;
212 }
213 out:
214 spin_unlock_irqrestore(&drvdata->spinlock, flags);
215
216 /* Free memory outside the spinlock if need be */
217 if (!used)
218 kfree(buf);
219
220 return ret;
221 }
222
tmc_enable_etf_sink_perf(struct coresight_device * csdev,void * data)223 static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
224 {
225 int ret = 0;
226 pid_t pid;
227 unsigned long flags;
228 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
229 struct perf_output_handle *handle = data;
230
231 spin_lock_irqsave(&drvdata->spinlock, flags);
232 do {
233 ret = -EINVAL;
234 if (drvdata->reading)
235 break;
236 /*
237 * No need to continue if the ETB/ETF is already operated
238 * from sysFS.
239 */
240 if (drvdata->mode == CS_MODE_SYSFS) {
241 ret = -EBUSY;
242 break;
243 }
244
245 /* Get a handle on the pid of the process to monitor */
246 pid = task_pid_nr(handle->event->owner);
247
248 if (drvdata->pid != -1 && drvdata->pid != pid) {
249 ret = -EBUSY;
250 break;
251 }
252
253 ret = tmc_set_etf_buffer(csdev, handle);
254 if (ret)
255 break;
256
257 /*
258 * No HW configuration is needed if the sink is already in
259 * use for this session.
260 */
261 if (drvdata->pid == pid) {
262 atomic_inc(csdev->refcnt);
263 break;
264 }
265
266 ret = tmc_etb_enable_hw(drvdata);
267 if (!ret) {
268 /* Associate with monitored process. */
269 drvdata->pid = pid;
270 drvdata->mode = CS_MODE_PERF;
271 atomic_inc(csdev->refcnt);
272 }
273 } while (0);
274 spin_unlock_irqrestore(&drvdata->spinlock, flags);
275
276 return ret;
277 }
278
tmc_enable_etf_sink(struct coresight_device * csdev,u32 mode,void * data)279 static int tmc_enable_etf_sink(struct coresight_device *csdev,
280 u32 mode, void *data)
281 {
282 int ret;
283
284 switch (mode) {
285 case CS_MODE_SYSFS:
286 ret = tmc_enable_etf_sink_sysfs(csdev);
287 break;
288 case CS_MODE_PERF:
289 ret = tmc_enable_etf_sink_perf(csdev, data);
290 break;
291 /* We shouldn't be here */
292 default:
293 ret = -EINVAL;
294 break;
295 }
296
297 if (ret)
298 return ret;
299
300 dev_dbg(&csdev->dev, "TMC-ETB/ETF enabled\n");
301 return 0;
302 }
303
tmc_disable_etf_sink(struct coresight_device * csdev)304 static int tmc_disable_etf_sink(struct coresight_device *csdev)
305 {
306 unsigned long flags;
307 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
308
309 spin_lock_irqsave(&drvdata->spinlock, flags);
310
311 if (drvdata->reading) {
312 spin_unlock_irqrestore(&drvdata->spinlock, flags);
313 return -EBUSY;
314 }
315
316 if (atomic_dec_return(csdev->refcnt)) {
317 spin_unlock_irqrestore(&drvdata->spinlock, flags);
318 return -EBUSY;
319 }
320
321 /* Complain if we (somehow) got out of sync */
322 WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
323 tmc_etb_disable_hw(drvdata);
324 /* Dissociate from monitored process. */
325 drvdata->pid = -1;
326 drvdata->mode = CS_MODE_DISABLED;
327
328 spin_unlock_irqrestore(&drvdata->spinlock, flags);
329
330 dev_dbg(&csdev->dev, "TMC-ETB/ETF disabled\n");
331 return 0;
332 }
333
tmc_enable_etf_link(struct coresight_device * csdev,int inport,int outport)334 static int tmc_enable_etf_link(struct coresight_device *csdev,
335 int inport, int outport)
336 {
337 int ret;
338 unsigned long flags;
339 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
340
341 spin_lock_irqsave(&drvdata->spinlock, flags);
342 if (drvdata->reading) {
343 spin_unlock_irqrestore(&drvdata->spinlock, flags);
344 return -EBUSY;
345 }
346
347 ret = tmc_etf_enable_hw(drvdata);
348 if (!ret)
349 drvdata->mode = CS_MODE_SYSFS;
350 spin_unlock_irqrestore(&drvdata->spinlock, flags);
351
352 if (!ret)
353 dev_dbg(&csdev->dev, "TMC-ETF enabled\n");
354 return ret;
355 }
356
tmc_disable_etf_link(struct coresight_device * csdev,int inport,int outport)357 static void tmc_disable_etf_link(struct coresight_device *csdev,
358 int inport, int outport)
359 {
360 unsigned long flags;
361 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
362
363 spin_lock_irqsave(&drvdata->spinlock, flags);
364 if (drvdata->reading) {
365 spin_unlock_irqrestore(&drvdata->spinlock, flags);
366 return;
367 }
368
369 tmc_etf_disable_hw(drvdata);
370 drvdata->mode = CS_MODE_DISABLED;
371 spin_unlock_irqrestore(&drvdata->spinlock, flags);
372
373 dev_dbg(&csdev->dev, "TMC-ETF disabled\n");
374 }
375
tmc_alloc_etf_buffer(struct coresight_device * csdev,struct perf_event * event,void ** pages,int nr_pages,bool overwrite)376 static void *tmc_alloc_etf_buffer(struct coresight_device *csdev,
377 struct perf_event *event, void **pages,
378 int nr_pages, bool overwrite)
379 {
380 int node;
381 struct cs_buffers *buf;
382
383 node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
384
385 /* Allocate memory structure for interaction with Perf */
386 buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
387 if (!buf)
388 return NULL;
389
390 buf->snapshot = overwrite;
391 buf->nr_pages = nr_pages;
392 buf->data_pages = pages;
393
394 return buf;
395 }
396
tmc_free_etf_buffer(void * config)397 static void tmc_free_etf_buffer(void *config)
398 {
399 struct cs_buffers *buf = config;
400
401 kfree(buf);
402 }
403
tmc_set_etf_buffer(struct coresight_device * csdev,struct perf_output_handle * handle)404 static int tmc_set_etf_buffer(struct coresight_device *csdev,
405 struct perf_output_handle *handle)
406 {
407 int ret = 0;
408 unsigned long head;
409 struct cs_buffers *buf = etm_perf_sink_config(handle);
410
411 if (!buf)
412 return -EINVAL;
413
414 /* wrap head around to the amount of space we have */
415 head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
416
417 /* find the page to write to */
418 buf->cur = head / PAGE_SIZE;
419
420 /* and offset within that page */
421 buf->offset = head % PAGE_SIZE;
422
423 local_set(&buf->data_size, 0);
424
425 return ret;
426 }
427
tmc_update_etf_buffer(struct coresight_device * csdev,struct perf_output_handle * handle,void * sink_config)428 static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
429 struct perf_output_handle *handle,
430 void *sink_config)
431 {
432 bool lost = false;
433 int i, cur;
434 const u32 *barrier;
435 u32 *buf_ptr;
436 u64 read_ptr, write_ptr;
437 u32 status;
438 unsigned long offset, to_read = 0, flags;
439 struct cs_buffers *buf = sink_config;
440 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
441
442 if (!buf)
443 return 0;
444
445 /* This shouldn't happen */
446 if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF))
447 return 0;
448
449 spin_lock_irqsave(&drvdata->spinlock, flags);
450
451 /* Don't do anything if another tracer is using this sink */
452 if (atomic_read(csdev->refcnt) != 1)
453 goto out;
454
455 CS_UNLOCK(drvdata->base);
456
457 tmc_flush_and_stop(drvdata);
458
459 read_ptr = tmc_read_rrp(drvdata);
460 write_ptr = tmc_read_rwp(drvdata);
461
462 /*
463 * Get a hold of the status register and see if a wrap around
464 * has occurred. If so adjust things accordingly.
465 */
466 status = readl_relaxed(drvdata->base + TMC_STS);
467 if (status & TMC_STS_FULL) {
468 lost = true;
469 to_read = drvdata->size;
470 } else {
471 to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size);
472 }
473
474 /*
475 * The TMC RAM buffer may be bigger than the space available in the
476 * perf ring buffer (handle->size). If so advance the RRP so that we
477 * get the latest trace data. In snapshot mode none of that matters
478 * since we are expected to clobber stale data in favour of the latest
479 * traces.
480 */
481 if (!buf->snapshot && to_read > handle->size) {
482 u32 mask = tmc_get_memwidth_mask(drvdata);
483
484 /*
485 * Make sure the new size is aligned in accordance with the
486 * requirement explained in function tmc_get_memwidth_mask().
487 */
488 to_read = handle->size & mask;
489 /* Move the RAM read pointer up */
490 read_ptr = (write_ptr + drvdata->size) - to_read;
491 /* Make sure we are still within our limits */
492 if (read_ptr > (drvdata->size - 1))
493 read_ptr -= drvdata->size;
494 /* Tell the HW */
495 tmc_write_rrp(drvdata, read_ptr);
496 lost = true;
497 }
498
499 /*
500 * Don't set the TRUNCATED flag in snapshot mode because 1) the
501 * captured buffer is expected to be truncated and 2) a full buffer
502 * prevents the event from being re-enabled by the perf core,
503 * resulting in stale data being send to user space.
504 */
505 if (!buf->snapshot && lost)
506 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
507
508 cur = buf->cur;
509 offset = buf->offset;
510 barrier = barrier_pkt;
511
512 /* for every byte to read */
513 for (i = 0; i < to_read; i += 4) {
514 buf_ptr = buf->data_pages[cur] + offset;
515 *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
516
517 if (lost && *barrier) {
518 *buf_ptr = *barrier;
519 barrier++;
520 }
521
522 offset += 4;
523 if (offset >= PAGE_SIZE) {
524 offset = 0;
525 cur++;
526 /* wrap around at the end of the buffer */
527 cur &= buf->nr_pages - 1;
528 }
529 }
530
531 /*
532 * In snapshot mode we simply increment the head by the number of byte
533 * that were written. User space function cs_etm_find_snapshot() will
534 * figure out how many bytes to get from the AUX buffer based on the
535 * position of the head.
536 */
537 if (buf->snapshot)
538 handle->head += to_read;
539
540 CS_LOCK(drvdata->base);
541 out:
542 spin_unlock_irqrestore(&drvdata->spinlock, flags);
543
544 return to_read;
545 }
546
547 static const struct coresight_ops_sink tmc_etf_sink_ops = {
548 .enable = tmc_enable_etf_sink,
549 .disable = tmc_disable_etf_sink,
550 .alloc_buffer = tmc_alloc_etf_buffer,
551 .free_buffer = tmc_free_etf_buffer,
552 .update_buffer = tmc_update_etf_buffer,
553 };
554
555 static const struct coresight_ops_link tmc_etf_link_ops = {
556 .enable = tmc_enable_etf_link,
557 .disable = tmc_disable_etf_link,
558 };
559
560 const struct coresight_ops tmc_etb_cs_ops = {
561 .sink_ops = &tmc_etf_sink_ops,
562 };
563
564 const struct coresight_ops tmc_etf_cs_ops = {
565 .sink_ops = &tmc_etf_sink_ops,
566 .link_ops = &tmc_etf_link_ops,
567 };
568
tmc_read_prepare_etb(struct tmc_drvdata * drvdata)569 int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
570 {
571 enum tmc_mode mode;
572 int ret = 0;
573 unsigned long flags;
574
575 /* config types are set a boot time and never change */
576 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
577 drvdata->config_type != TMC_CONFIG_TYPE_ETF))
578 return -EINVAL;
579
580 spin_lock_irqsave(&drvdata->spinlock, flags);
581
582 if (drvdata->reading) {
583 ret = -EBUSY;
584 goto out;
585 }
586
587 /* There is no point in reading a TMC in HW FIFO mode */
588 mode = readl_relaxed(drvdata->base + TMC_MODE);
589 if (mode != TMC_MODE_CIRCULAR_BUFFER) {
590 ret = -EINVAL;
591 goto out;
592 }
593
594 /* Don't interfere if operated from Perf */
595 if (drvdata->mode == CS_MODE_PERF) {
596 ret = -EINVAL;
597 goto out;
598 }
599
600 /* If drvdata::buf is NULL the trace data has been read already */
601 if (drvdata->buf == NULL) {
602 ret = -EINVAL;
603 goto out;
604 }
605
606 /* Disable the TMC if need be */
607 if (drvdata->mode == CS_MODE_SYSFS)
608 __tmc_etb_disable_hw(drvdata);
609
610 drvdata->reading = true;
611 out:
612 spin_unlock_irqrestore(&drvdata->spinlock, flags);
613
614 return ret;
615 }
616
tmc_read_unprepare_etb(struct tmc_drvdata * drvdata)617 int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
618 {
619 char *buf = NULL;
620 enum tmc_mode mode;
621 unsigned long flags;
622
623 /* config types are set a boot time and never change */
624 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
625 drvdata->config_type != TMC_CONFIG_TYPE_ETF))
626 return -EINVAL;
627
628 spin_lock_irqsave(&drvdata->spinlock, flags);
629
630 /* There is no point in reading a TMC in HW FIFO mode */
631 mode = readl_relaxed(drvdata->base + TMC_MODE);
632 if (mode != TMC_MODE_CIRCULAR_BUFFER) {
633 spin_unlock_irqrestore(&drvdata->spinlock, flags);
634 return -EINVAL;
635 }
636
637 /* Re-enable the TMC if need be */
638 if (drvdata->mode == CS_MODE_SYSFS) {
639 /*
640 * The trace run will continue with the same allocated trace
641 * buffer. As such zero-out the buffer so that we don't end
642 * up with stale data.
643 *
644 * Since the tracer is still enabled drvdata::buf
645 * can't be NULL.
646 */
647 memset(drvdata->buf, 0, drvdata->size);
648 __tmc_etb_enable_hw(drvdata);
649 } else {
650 /*
651 * The ETB/ETF is not tracing and the buffer was just read.
652 * As such prepare to free the trace buffer.
653 */
654 buf = drvdata->buf;
655 drvdata->buf = NULL;
656 }
657
658 drvdata->reading = false;
659 spin_unlock_irqrestore(&drvdata->spinlock, flags);
660
661 /*
662 * Free allocated memory outside of the spinlock. There is no need
663 * to assert the validity of 'buf' since calling kfree(NULL) is safe.
664 */
665 kfree(buf);
666
667 return 0;
668 }
669