Lines Matching refs:drvdata

19 static void __tmc_etb_enable_hw(struct tmc_drvdata *drvdata)  in __tmc_etb_enable_hw()  argument
21 CS_UNLOCK(drvdata->base); in __tmc_etb_enable_hw()
24 tmc_wait_for_tmcready(drvdata); in __tmc_etb_enable_hw()
26 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE); in __tmc_etb_enable_hw()
30 drvdata->base + TMC_FFCR); in __tmc_etb_enable_hw()
32 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG); in __tmc_etb_enable_hw()
33 tmc_enable_hw(drvdata); in __tmc_etb_enable_hw()
35 CS_LOCK(drvdata->base); in __tmc_etb_enable_hw()
38 static int tmc_etb_enable_hw(struct tmc_drvdata *drvdata) in tmc_etb_enable_hw() argument
40 int rc = coresight_claim_device(drvdata->csdev); in tmc_etb_enable_hw()
45 __tmc_etb_enable_hw(drvdata); in tmc_etb_enable_hw()
49 static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata) in tmc_etb_dump_hw() argument
55 lost = readl_relaxed(drvdata->base + TMC_STS) & TMC_STS_FULL; in tmc_etb_dump_hw()
56 bufp = drvdata->buf; in tmc_etb_dump_hw()
57 drvdata->len = 0; in tmc_etb_dump_hw()
59 read_data = readl_relaxed(drvdata->base + TMC_RRD); in tmc_etb_dump_hw()
64 drvdata->len += 4; in tmc_etb_dump_hw()
68 coresight_insert_barrier_packet(drvdata->buf); in tmc_etb_dump_hw()
72 static void __tmc_etb_disable_hw(struct tmc_drvdata *drvdata) in __tmc_etb_disable_hw() argument
74 CS_UNLOCK(drvdata->base); in __tmc_etb_disable_hw()
76 tmc_flush_and_stop(drvdata); in __tmc_etb_disable_hw()
81 if (drvdata->mode == CS_MODE_SYSFS) in __tmc_etb_disable_hw()
82 tmc_etb_dump_hw(drvdata); in __tmc_etb_disable_hw()
83 tmc_disable_hw(drvdata); in __tmc_etb_disable_hw()
85 CS_LOCK(drvdata->base); in __tmc_etb_disable_hw()
88 static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata) in tmc_etb_disable_hw() argument
90 __tmc_etb_disable_hw(drvdata); in tmc_etb_disable_hw()
91 coresight_disclaim_device(drvdata->csdev); in tmc_etb_disable_hw()
94 static void __tmc_etf_enable_hw(struct tmc_drvdata *drvdata) in __tmc_etf_enable_hw() argument
96 CS_UNLOCK(drvdata->base); in __tmc_etf_enable_hw()
99 tmc_wait_for_tmcready(drvdata); in __tmc_etf_enable_hw()
101 writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE); in __tmc_etf_enable_hw()
103 drvdata->base + TMC_FFCR); in __tmc_etf_enable_hw()
104 writel_relaxed(0x0, drvdata->base + TMC_BUFWM); in __tmc_etf_enable_hw()
105 tmc_enable_hw(drvdata); in __tmc_etf_enable_hw()
107 CS_LOCK(drvdata->base); in __tmc_etf_enable_hw()
110 static int tmc_etf_enable_hw(struct tmc_drvdata *drvdata) in tmc_etf_enable_hw() argument
112 int rc = coresight_claim_device(drvdata->csdev); in tmc_etf_enable_hw()
117 __tmc_etf_enable_hw(drvdata); in tmc_etf_enable_hw()
121 static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata) in tmc_etf_disable_hw() argument
123 struct coresight_device *csdev = drvdata->csdev; in tmc_etf_disable_hw()
125 CS_UNLOCK(drvdata->base); in tmc_etf_disable_hw()
127 tmc_flush_and_stop(drvdata); in tmc_etf_disable_hw()
128 tmc_disable_hw(drvdata); in tmc_etf_disable_hw()
130 CS_LOCK(drvdata->base); in tmc_etf_disable_hw()
138 ssize_t tmc_etb_get_sysfs_trace(struct tmc_drvdata *drvdata, in tmc_etb_get_sysfs_trace() argument
144 if (pos + actual > drvdata->len) in tmc_etb_get_sysfs_trace()
145 actual = drvdata->len - pos; in tmc_etb_get_sysfs_trace()
147 *bufpp = drvdata->buf + pos; in tmc_etb_get_sysfs_trace()
157 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); in tmc_enable_etf_sink_sysfs() local
163 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_enable_etf_sink_sysfs()
164 if (!drvdata->buf) { in tmc_enable_etf_sink_sysfs()
165 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_enable_etf_sink_sysfs()
168 buf = kzalloc(drvdata->size, GFP_KERNEL); in tmc_enable_etf_sink_sysfs()
173 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_enable_etf_sink_sysfs()
176 if (drvdata->reading) { in tmc_enable_etf_sink_sysfs()
186 if (drvdata->mode == CS_MODE_SYSFS) { in tmc_enable_etf_sink_sysfs()
200 if (drvdata->buf) { in tmc_enable_etf_sink_sysfs()
201 memset(drvdata->buf, 0, drvdata->size); in tmc_enable_etf_sink_sysfs()
204 drvdata->buf = buf; in tmc_enable_etf_sink_sysfs()
207 ret = tmc_etb_enable_hw(drvdata); in tmc_enable_etf_sink_sysfs()
209 drvdata->mode = CS_MODE_SYSFS; in tmc_enable_etf_sink_sysfs()
216 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_enable_etf_sink_sysfs()
230 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); in tmc_enable_etf_sink_perf() local
234 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_enable_etf_sink_perf()
237 if (drvdata->reading) in tmc_enable_etf_sink_perf()
243 if (drvdata->mode == CS_MODE_SYSFS) { in tmc_enable_etf_sink_perf()
251 if (drvdata->pid != -1 && drvdata->pid != pid) { in tmc_enable_etf_sink_perf()
264 if (drvdata->pid == pid) { in tmc_enable_etf_sink_perf()
269 ret = tmc_etb_enable_hw(drvdata); in tmc_enable_etf_sink_perf()
272 drvdata->pid = pid; in tmc_enable_etf_sink_perf()
273 drvdata->mode = CS_MODE_PERF; in tmc_enable_etf_sink_perf()
277 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_enable_etf_sink_perf()
310 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); in tmc_disable_etf_sink() local
312 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_disable_etf_sink()
314 if (drvdata->reading) { in tmc_disable_etf_sink()
315 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_disable_etf_sink()
320 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_disable_etf_sink()
325 WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED); in tmc_disable_etf_sink()
326 tmc_etb_disable_hw(drvdata); in tmc_disable_etf_sink()
328 drvdata->pid = -1; in tmc_disable_etf_sink()
329 drvdata->mode = CS_MODE_DISABLED; in tmc_disable_etf_sink()
331 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_disable_etf_sink()
342 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); in tmc_enable_etf_link() local
345 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_enable_etf_link()
346 if (drvdata->reading) { in tmc_enable_etf_link()
347 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_enable_etf_link()
352 ret = tmc_etf_enable_hw(drvdata); in tmc_enable_etf_link()
354 drvdata->mode = CS_MODE_SYSFS; in tmc_enable_etf_link()
360 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_enable_etf_link()
371 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); in tmc_disable_etf_link() local
374 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_disable_etf_link()
375 if (drvdata->reading) { in tmc_disable_etf_link()
376 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_disable_etf_link()
381 tmc_etf_disable_hw(drvdata); in tmc_disable_etf_link()
382 drvdata->mode = CS_MODE_DISABLED; in tmc_disable_etf_link()
385 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_disable_etf_link()
456 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); in tmc_update_etf_buffer() local
462 if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF)) in tmc_update_etf_buffer()
465 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_update_etf_buffer()
471 CS_UNLOCK(drvdata->base); in tmc_update_etf_buffer()
473 tmc_flush_and_stop(drvdata); in tmc_update_etf_buffer()
475 read_ptr = tmc_read_rrp(drvdata); in tmc_update_etf_buffer()
476 write_ptr = tmc_read_rwp(drvdata); in tmc_update_etf_buffer()
482 status = readl_relaxed(drvdata->base + TMC_STS); in tmc_update_etf_buffer()
485 to_read = drvdata->size; in tmc_update_etf_buffer()
487 to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size); in tmc_update_etf_buffer()
498 u32 mask = tmc_get_memwidth_mask(drvdata); in tmc_update_etf_buffer()
506 read_ptr = (write_ptr + drvdata->size) - to_read; in tmc_update_etf_buffer()
508 if (read_ptr > (drvdata->size - 1)) in tmc_update_etf_buffer()
509 read_ptr -= drvdata->size; in tmc_update_etf_buffer()
511 tmc_write_rrp(drvdata, read_ptr); in tmc_update_etf_buffer()
531 *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD); in tmc_update_etf_buffer()
556 CS_LOCK(drvdata->base); in tmc_update_etf_buffer()
558 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_update_etf_buffer()
585 int tmc_read_prepare_etb(struct tmc_drvdata *drvdata) in tmc_read_prepare_etb() argument
592 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB && in tmc_read_prepare_etb()
593 drvdata->config_type != TMC_CONFIG_TYPE_ETF)) in tmc_read_prepare_etb()
596 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_read_prepare_etb()
598 if (drvdata->reading) { in tmc_read_prepare_etb()
604 if (drvdata->mode == CS_MODE_PERF) { in tmc_read_prepare_etb()
610 if (drvdata->buf == NULL) { in tmc_read_prepare_etb()
616 if (drvdata->mode == CS_MODE_SYSFS) { in tmc_read_prepare_etb()
618 mode = readl_relaxed(drvdata->base + TMC_MODE); in tmc_read_prepare_etb()
623 __tmc_etb_disable_hw(drvdata); in tmc_read_prepare_etb()
626 drvdata->reading = true; in tmc_read_prepare_etb()
628 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_read_prepare_etb()
633 int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata) in tmc_read_unprepare_etb() argument
640 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB && in tmc_read_unprepare_etb()
641 drvdata->config_type != TMC_CONFIG_TYPE_ETF)) in tmc_read_unprepare_etb()
644 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_read_unprepare_etb()
647 if (drvdata->mode == CS_MODE_SYSFS) { in tmc_read_unprepare_etb()
649 mode = readl_relaxed(drvdata->base + TMC_MODE); in tmc_read_unprepare_etb()
651 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_read_unprepare_etb()
662 memset(drvdata->buf, 0, drvdata->size); in tmc_read_unprepare_etb()
663 __tmc_etb_enable_hw(drvdata); in tmc_read_unprepare_etb()
669 buf = drvdata->buf; in tmc_read_unprepare_etb()
670 drvdata->buf = NULL; in tmc_read_unprepare_etb()
673 drvdata->reading = false; in tmc_read_unprepare_etb()
674 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_read_unprepare_etb()