Lines Matching refs:drvdata

40 	struct tmc_drvdata	*drvdata;  member
597 static int tmc_etr_alloc_flat_buf(struct tmc_drvdata *drvdata, in tmc_etr_alloc_flat_buf() argument
602 struct device *real_dev = drvdata->csdev->dev.parent; in tmc_etr_alloc_flat_buf()
620 flat_buf->dev = &drvdata->csdev->dev; in tmc_etr_alloc_flat_buf()
677 static int tmc_etr_alloc_sg_buf(struct tmc_drvdata *drvdata, in tmc_etr_alloc_sg_buf() argument
682 struct device *dev = &drvdata->csdev->dev; in tmc_etr_alloc_sg_buf()
760 tmc_etr_get_catu_device(struct tmc_drvdata *drvdata) in tmc_etr_get_catu_device() argument
763 struct coresight_device *tmp, *etr = drvdata->csdev; in tmc_etr_get_catu_device()
778 static inline int tmc_etr_enable_catu(struct tmc_drvdata *drvdata, in tmc_etr_enable_catu() argument
781 struct coresight_device *catu = tmc_etr_get_catu_device(drvdata); in tmc_etr_enable_catu()
788 static inline void tmc_etr_disable_catu(struct tmc_drvdata *drvdata) in tmc_etr_disable_catu() argument
790 struct coresight_device *catu = tmc_etr_get_catu_device(drvdata); in tmc_etr_disable_catu()
793 helper_ops(catu)->disable(catu, drvdata->etr_buf); in tmc_etr_disable_catu()
815 struct tmc_drvdata *drvdata, in tmc_etr_mode_alloc_buf() argument
826 rc = etr_buf_ops[mode]->alloc(drvdata, etr_buf, in tmc_etr_mode_alloc_buf()
844 static struct etr_buf *tmc_alloc_etr_buf(struct tmc_drvdata *drvdata, in tmc_alloc_etr_buf() argument
852 struct device *dev = &drvdata->csdev->dev; in tmc_alloc_etr_buf()
854 has_etr_sg = tmc_etr_has_cap(drvdata, TMC_ETR_SG); in tmc_alloc_etr_buf()
856 has_catu = !!tmc_etr_get_catu_device(drvdata); in tmc_alloc_etr_buf()
880 rc = tmc_etr_mode_alloc_buf(ETR_MODE_FLAT, drvdata, in tmc_alloc_etr_buf()
883 rc = tmc_etr_mode_alloc_buf(ETR_MODE_ETR_SG, drvdata, in tmc_alloc_etr_buf()
886 rc = tmc_etr_mode_alloc_buf(ETR_MODE_CATU, drvdata, in tmc_alloc_etr_buf()
941 static void tmc_sync_etr_buf(struct tmc_drvdata *drvdata) in tmc_sync_etr_buf() argument
943 struct etr_buf *etr_buf = drvdata->etr_buf; in tmc_sync_etr_buf()
947 rrp = tmc_read_rrp(drvdata); in tmc_sync_etr_buf()
948 rwp = tmc_read_rwp(drvdata); in tmc_sync_etr_buf()
949 status = readl_relaxed(drvdata->base + TMC_STS); in tmc_sync_etr_buf()
956 dev_dbg(&drvdata->csdev->dev, in tmc_sync_etr_buf()
970 static void __tmc_etr_enable_hw(struct tmc_drvdata *drvdata) in __tmc_etr_enable_hw() argument
973 struct etr_buf *etr_buf = drvdata->etr_buf; in __tmc_etr_enable_hw()
975 CS_UNLOCK(drvdata->base); in __tmc_etr_enable_hw()
978 tmc_wait_for_tmcready(drvdata); in __tmc_etr_enable_hw()
980 writel_relaxed(etr_buf->size / 4, drvdata->base + TMC_RSZ); in __tmc_etr_enable_hw()
981 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE); in __tmc_etr_enable_hw()
983 axictl = readl_relaxed(drvdata->base + TMC_AXICTL); in __tmc_etr_enable_hw()
988 if (tmc_etr_has_cap(drvdata, TMC_ETR_AXI_ARCACHE)) { in __tmc_etr_enable_hw()
996 writel_relaxed(axictl, drvdata->base + TMC_AXICTL); in __tmc_etr_enable_hw()
997 tmc_write_dba(drvdata, etr_buf->hwaddr); in __tmc_etr_enable_hw()
1003 if (tmc_etr_has_cap(drvdata, TMC_ETR_SAVE_RESTORE)) { in __tmc_etr_enable_hw()
1004 tmc_write_rrp(drvdata, etr_buf->hwaddr); in __tmc_etr_enable_hw()
1005 tmc_write_rwp(drvdata, etr_buf->hwaddr); in __tmc_etr_enable_hw()
1006 sts = readl_relaxed(drvdata->base + TMC_STS) & ~TMC_STS_FULL; in __tmc_etr_enable_hw()
1007 writel_relaxed(sts, drvdata->base + TMC_STS); in __tmc_etr_enable_hw()
1013 drvdata->base + TMC_FFCR); in __tmc_etr_enable_hw()
1014 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG); in __tmc_etr_enable_hw()
1015 tmc_enable_hw(drvdata); in __tmc_etr_enable_hw()
1017 CS_LOCK(drvdata->base); in __tmc_etr_enable_hw()
1020 static int tmc_etr_enable_hw(struct tmc_drvdata *drvdata, in tmc_etr_enable_hw() argument
1030 WARN_ON(!tmc_etr_has_cap(drvdata, TMC_ETR_SG))) in tmc_etr_enable_hw()
1033 if (WARN_ON(drvdata->etr_buf)) in tmc_etr_enable_hw()
1040 rc = tmc_etr_enable_catu(drvdata, etr_buf); in tmc_etr_enable_hw()
1043 rc = coresight_claim_device(drvdata->csdev); in tmc_etr_enable_hw()
1045 drvdata->etr_buf = etr_buf; in tmc_etr_enable_hw()
1046 __tmc_etr_enable_hw(drvdata); in tmc_etr_enable_hw()
1062 ssize_t tmc_etr_get_sysfs_trace(struct tmc_drvdata *drvdata, in tmc_etr_get_sysfs_trace() argument
1067 struct etr_buf *etr_buf = drvdata->sysfs_buf; in tmc_etr_get_sysfs_trace()
1082 tmc_etr_setup_sysfs_buf(struct tmc_drvdata *drvdata) in tmc_etr_setup_sysfs_buf() argument
1084 return tmc_alloc_etr_buf(drvdata, drvdata->size, in tmc_etr_setup_sysfs_buf()
1095 static void tmc_etr_sync_sysfs_buf(struct tmc_drvdata *drvdata) in tmc_etr_sync_sysfs_buf() argument
1097 struct etr_buf *etr_buf = drvdata->etr_buf; in tmc_etr_sync_sysfs_buf()
1099 if (WARN_ON(drvdata->sysfs_buf != etr_buf)) { in tmc_etr_sync_sysfs_buf()
1100 tmc_etr_free_sysfs_buf(drvdata->sysfs_buf); in tmc_etr_sync_sysfs_buf()
1101 drvdata->sysfs_buf = NULL; in tmc_etr_sync_sysfs_buf()
1103 tmc_sync_etr_buf(drvdata); in tmc_etr_sync_sysfs_buf()
1114 static void __tmc_etr_disable_hw(struct tmc_drvdata *drvdata) in __tmc_etr_disable_hw() argument
1116 CS_UNLOCK(drvdata->base); in __tmc_etr_disable_hw()
1118 tmc_flush_and_stop(drvdata); in __tmc_etr_disable_hw()
1123 if (drvdata->mode == CS_MODE_SYSFS) in __tmc_etr_disable_hw()
1124 tmc_etr_sync_sysfs_buf(drvdata); in __tmc_etr_disable_hw()
1126 tmc_disable_hw(drvdata); in __tmc_etr_disable_hw()
1128 CS_LOCK(drvdata->base); in __tmc_etr_disable_hw()
1132 void tmc_etr_disable_hw(struct tmc_drvdata *drvdata) in tmc_etr_disable_hw() argument
1134 __tmc_etr_disable_hw(drvdata); in tmc_etr_disable_hw()
1136 tmc_etr_disable_catu(drvdata); in tmc_etr_disable_hw()
1137 coresight_disclaim_device(drvdata->csdev); in tmc_etr_disable_hw()
1139 drvdata->etr_buf = NULL; in tmc_etr_disable_hw()
1146 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); in tmc_enable_etr_sink_sysfs() local
1157 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_enable_etr_sink_sysfs()
1158 sysfs_buf = READ_ONCE(drvdata->sysfs_buf); in tmc_enable_etr_sink_sysfs()
1159 if (!sysfs_buf || (sysfs_buf->size != drvdata->size)) { in tmc_enable_etr_sink_sysfs()
1160 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_enable_etr_sink_sysfs()
1163 free_buf = new_buf = tmc_etr_setup_sysfs_buf(drvdata); in tmc_enable_etr_sink_sysfs()
1168 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_enable_etr_sink_sysfs()
1171 if (drvdata->reading || drvdata->mode == CS_MODE_PERF) { in tmc_enable_etr_sink_sysfs()
1181 if (drvdata->mode == CS_MODE_SYSFS) { in tmc_enable_etr_sink_sysfs()
1190 sysfs_buf = READ_ONCE(drvdata->sysfs_buf); in tmc_enable_etr_sink_sysfs()
1193 drvdata->sysfs_buf = new_buf; in tmc_enable_etr_sink_sysfs()
1196 ret = tmc_etr_enable_hw(drvdata, drvdata->sysfs_buf); in tmc_enable_etr_sink_sysfs()
1198 drvdata->mode = CS_MODE_SYSFS; in tmc_enable_etr_sink_sysfs()
1202 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_enable_etr_sink_sysfs()
1222 alloc_etr_buf(struct tmc_drvdata *drvdata, struct perf_event *event, in alloc_etr_buf() argument
1234 if ((nr_pages << PAGE_SHIFT) > drvdata->size) { in alloc_etr_buf()
1235 etr_buf = tmc_alloc_etr_buf(drvdata, (nr_pages << PAGE_SHIFT), in alloc_etr_buf()
1245 size = drvdata->size; in alloc_etr_buf()
1247 etr_buf = tmc_alloc_etr_buf(drvdata, size, 0, node, NULL); in alloc_etr_buf()
1260 get_perf_etr_buf_cpu_wide(struct tmc_drvdata *drvdata, in get_perf_etr_buf_cpu_wide() argument
1287 mutex_lock(&drvdata->idr_mutex); in get_perf_etr_buf_cpu_wide()
1288 etr_buf = idr_find(&drvdata->idr, pid); in get_perf_etr_buf_cpu_wide()
1291 mutex_unlock(&drvdata->idr_mutex); in get_perf_etr_buf_cpu_wide()
1296 mutex_unlock(&drvdata->idr_mutex); in get_perf_etr_buf_cpu_wide()
1298 etr_buf = alloc_etr_buf(drvdata, event, nr_pages, pages, snapshot); in get_perf_etr_buf_cpu_wide()
1303 mutex_lock(&drvdata->idr_mutex); in get_perf_etr_buf_cpu_wide()
1304 ret = idr_alloc(&drvdata->idr, etr_buf, pid, pid + 1, GFP_KERNEL); in get_perf_etr_buf_cpu_wide()
1305 mutex_unlock(&drvdata->idr_mutex); in get_perf_etr_buf_cpu_wide()
1324 get_perf_etr_buf_per_thread(struct tmc_drvdata *drvdata, in get_perf_etr_buf_per_thread() argument
1332 return alloc_etr_buf(drvdata, event, nr_pages, pages, snapshot); in get_perf_etr_buf_per_thread()
1336 get_perf_etr_buf(struct tmc_drvdata *drvdata, struct perf_event *event, in get_perf_etr_buf() argument
1340 return get_perf_etr_buf_per_thread(drvdata, event, nr_pages, in get_perf_etr_buf()
1343 return get_perf_etr_buf_cpu_wide(drvdata, event, nr_pages, in get_perf_etr_buf()
1348 tmc_etr_setup_perf_buf(struct tmc_drvdata *drvdata, struct perf_event *event, in tmc_etr_setup_perf_buf() argument
1361 etr_buf = get_perf_etr_buf(drvdata, event, nr_pages, pages, snapshot); in tmc_etr_setup_perf_buf()
1373 etr_perf->drvdata = drvdata; in tmc_etr_setup_perf_buf()
1385 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); in tmc_alloc_etr_buffer() local
1387 etr_perf = tmc_etr_setup_perf_buf(drvdata, event, in tmc_alloc_etr_buffer()
1405 struct tmc_drvdata *drvdata = etr_perf->drvdata; in tmc_free_etr_buffer() local
1411 mutex_lock(&drvdata->idr_mutex); in tmc_free_etr_buffer()
1414 mutex_unlock(&drvdata->idr_mutex); in tmc_free_etr_buffer()
1419 buf = idr_remove(&drvdata->idr, etr_perf->pid); in tmc_free_etr_buffer()
1420 mutex_unlock(&drvdata->idr_mutex); in tmc_free_etr_buffer()
1501 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); in tmc_update_etr_buffer() local
1505 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_update_etr_buffer()
1509 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_update_etr_buffer()
1513 if (WARN_ON(drvdata->perf_buf != etr_buf)) { in tmc_update_etr_buffer()
1515 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_update_etr_buffer()
1519 CS_UNLOCK(drvdata->base); in tmc_update_etr_buffer()
1521 tmc_flush_and_stop(drvdata); in tmc_update_etr_buffer()
1522 tmc_sync_etr_buf(drvdata); in tmc_update_etr_buffer()
1524 CS_LOCK(drvdata->base); in tmc_update_etr_buffer()
1525 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_update_etr_buffer()
1539 u32 mask = tmc_get_memwidth_mask(drvdata); in tmc_update_etr_buffer()
1583 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); in tmc_enable_etr_sink_perf() local
1587 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_enable_etr_sink_perf()
1589 if (drvdata->mode == CS_MODE_SYSFS) { in tmc_enable_etr_sink_perf()
1603 if (drvdata->pid != -1 && drvdata->pid != pid) { in tmc_enable_etr_sink_perf()
1614 if (drvdata->pid == pid) { in tmc_enable_etr_sink_perf()
1619 rc = tmc_etr_enable_hw(drvdata, etr_perf->etr_buf); in tmc_enable_etr_sink_perf()
1622 drvdata->pid = pid; in tmc_enable_etr_sink_perf()
1623 drvdata->mode = CS_MODE_PERF; in tmc_enable_etr_sink_perf()
1624 drvdata->perf_buf = etr_perf->etr_buf; in tmc_enable_etr_sink_perf()
1629 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_enable_etr_sink_perf()
1650 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); in tmc_disable_etr_sink() local
1652 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_disable_etr_sink()
1654 if (drvdata->reading) { in tmc_disable_etr_sink()
1655 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_disable_etr_sink()
1660 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_disable_etr_sink()
1665 WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED); in tmc_disable_etr_sink()
1666 tmc_etr_disable_hw(drvdata); in tmc_disable_etr_sink()
1668 drvdata->pid = -1; in tmc_disable_etr_sink()
1669 drvdata->mode = CS_MODE_DISABLED; in tmc_disable_etr_sink()
1671 drvdata->perf_buf = NULL; in tmc_disable_etr_sink()
1673 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_disable_etr_sink()
1691 int tmc_read_prepare_etr(struct tmc_drvdata *drvdata) in tmc_read_prepare_etr() argument
1697 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR)) in tmc_read_prepare_etr()
1700 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_read_prepare_etr()
1701 if (drvdata->reading) { in tmc_read_prepare_etr()
1711 if (!drvdata->sysfs_buf) { in tmc_read_prepare_etr()
1717 if (drvdata->mode == CS_MODE_SYSFS) in tmc_read_prepare_etr()
1718 __tmc_etr_disable_hw(drvdata); in tmc_read_prepare_etr()
1720 drvdata->reading = true; in tmc_read_prepare_etr()
1722 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_read_prepare_etr()
1727 int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata) in tmc_read_unprepare_etr() argument
1733 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR)) in tmc_read_unprepare_etr()
1736 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_read_unprepare_etr()
1739 if (drvdata->mode == CS_MODE_SYSFS) { in tmc_read_unprepare_etr()
1745 __tmc_etr_enable_hw(drvdata); in tmc_read_unprepare_etr()
1751 sysfs_buf = drvdata->sysfs_buf; in tmc_read_unprepare_etr()
1752 drvdata->sysfs_buf = NULL; in tmc_read_unprepare_etr()
1755 drvdata->reading = false; in tmc_read_unprepare_etr()
1756 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_read_unprepare_etr()