Lines Matching refs:drvdata

40 	struct tmc_drvdata	*drvdata;  member
595 static int tmc_etr_alloc_flat_buf(struct tmc_drvdata *drvdata, in tmc_etr_alloc_flat_buf() argument
600 struct device *real_dev = drvdata->csdev->dev.parent; in tmc_etr_alloc_flat_buf()
618 flat_buf->dev = &drvdata->csdev->dev; in tmc_etr_alloc_flat_buf()
675 static int tmc_etr_alloc_sg_buf(struct tmc_drvdata *drvdata, in tmc_etr_alloc_sg_buf() argument
680 struct device *dev = &drvdata->csdev->dev; in tmc_etr_alloc_sg_buf()
758 tmc_etr_get_catu_device(struct tmc_drvdata *drvdata) in tmc_etr_get_catu_device() argument
761 struct coresight_device *tmp, *etr = drvdata->csdev; in tmc_etr_get_catu_device()
776 static inline int tmc_etr_enable_catu(struct tmc_drvdata *drvdata, in tmc_etr_enable_catu() argument
779 struct coresight_device *catu = tmc_etr_get_catu_device(drvdata); in tmc_etr_enable_catu()
786 static inline void tmc_etr_disable_catu(struct tmc_drvdata *drvdata) in tmc_etr_disable_catu() argument
788 struct coresight_device *catu = tmc_etr_get_catu_device(drvdata); in tmc_etr_disable_catu()
791 helper_ops(catu)->disable(catu, drvdata->etr_buf); in tmc_etr_disable_catu()
813 struct tmc_drvdata *drvdata, in tmc_etr_mode_alloc_buf() argument
824 rc = etr_buf_ops[mode]->alloc(drvdata, etr_buf, in tmc_etr_mode_alloc_buf()
842 static struct etr_buf *tmc_alloc_etr_buf(struct tmc_drvdata *drvdata, in tmc_alloc_etr_buf() argument
850 struct device *dev = &drvdata->csdev->dev; in tmc_alloc_etr_buf()
852 has_etr_sg = tmc_etr_has_cap(drvdata, TMC_ETR_SG); in tmc_alloc_etr_buf()
854 has_catu = !!tmc_etr_get_catu_device(drvdata); in tmc_alloc_etr_buf()
878 rc = tmc_etr_mode_alloc_buf(ETR_MODE_FLAT, drvdata, in tmc_alloc_etr_buf()
881 rc = tmc_etr_mode_alloc_buf(ETR_MODE_ETR_SG, drvdata, in tmc_alloc_etr_buf()
884 rc = tmc_etr_mode_alloc_buf(ETR_MODE_CATU, drvdata, in tmc_alloc_etr_buf()
939 static void tmc_sync_etr_buf(struct tmc_drvdata *drvdata) in tmc_sync_etr_buf() argument
941 struct etr_buf *etr_buf = drvdata->etr_buf; in tmc_sync_etr_buf()
945 rrp = tmc_read_rrp(drvdata); in tmc_sync_etr_buf()
946 rwp = tmc_read_rwp(drvdata); in tmc_sync_etr_buf()
947 status = readl_relaxed(drvdata->base + TMC_STS); in tmc_sync_etr_buf()
954 dev_dbg(&drvdata->csdev->dev, in tmc_sync_etr_buf()
968 static void __tmc_etr_enable_hw(struct tmc_drvdata *drvdata) in __tmc_etr_enable_hw() argument
971 struct etr_buf *etr_buf = drvdata->etr_buf; in __tmc_etr_enable_hw()
973 CS_UNLOCK(drvdata->base); in __tmc_etr_enable_hw()
976 tmc_wait_for_tmcready(drvdata); in __tmc_etr_enable_hw()
978 writel_relaxed(etr_buf->size / 4, drvdata->base + TMC_RSZ); in __tmc_etr_enable_hw()
979 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE); in __tmc_etr_enable_hw()
981 axictl = readl_relaxed(drvdata->base + TMC_AXICTL); in __tmc_etr_enable_hw()
986 if (tmc_etr_has_cap(drvdata, TMC_ETR_AXI_ARCACHE)) { in __tmc_etr_enable_hw()
994 writel_relaxed(axictl, drvdata->base + TMC_AXICTL); in __tmc_etr_enable_hw()
995 tmc_write_dba(drvdata, etr_buf->hwaddr); in __tmc_etr_enable_hw()
1001 if (tmc_etr_has_cap(drvdata, TMC_ETR_SAVE_RESTORE)) { in __tmc_etr_enable_hw()
1002 tmc_write_rrp(drvdata, etr_buf->hwaddr); in __tmc_etr_enable_hw()
1003 tmc_write_rwp(drvdata, etr_buf->hwaddr); in __tmc_etr_enable_hw()
1004 sts = readl_relaxed(drvdata->base + TMC_STS) & ~TMC_STS_FULL; in __tmc_etr_enable_hw()
1005 writel_relaxed(sts, drvdata->base + TMC_STS); in __tmc_etr_enable_hw()
1011 drvdata->base + TMC_FFCR); in __tmc_etr_enable_hw()
1012 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG); in __tmc_etr_enable_hw()
1013 tmc_enable_hw(drvdata); in __tmc_etr_enable_hw()
1015 CS_LOCK(drvdata->base); in __tmc_etr_enable_hw()
1018 static int tmc_etr_enable_hw(struct tmc_drvdata *drvdata, in tmc_etr_enable_hw() argument
1028 WARN_ON(!tmc_etr_has_cap(drvdata, TMC_ETR_SG))) in tmc_etr_enable_hw()
1031 if (WARN_ON(drvdata->etr_buf)) in tmc_etr_enable_hw()
1038 rc = tmc_etr_enable_catu(drvdata, etr_buf); in tmc_etr_enable_hw()
1041 rc = coresight_claim_device(drvdata->base); in tmc_etr_enable_hw()
1043 drvdata->etr_buf = etr_buf; in tmc_etr_enable_hw()
1044 __tmc_etr_enable_hw(drvdata); in tmc_etr_enable_hw()
1060 ssize_t tmc_etr_get_sysfs_trace(struct tmc_drvdata *drvdata, in tmc_etr_get_sysfs_trace() argument
1065 struct etr_buf *etr_buf = drvdata->sysfs_buf; in tmc_etr_get_sysfs_trace()
1080 tmc_etr_setup_sysfs_buf(struct tmc_drvdata *drvdata) in tmc_etr_setup_sysfs_buf() argument
1082 return tmc_alloc_etr_buf(drvdata, drvdata->size, in tmc_etr_setup_sysfs_buf()
1093 static void tmc_etr_sync_sysfs_buf(struct tmc_drvdata *drvdata) in tmc_etr_sync_sysfs_buf() argument
1095 struct etr_buf *etr_buf = drvdata->etr_buf; in tmc_etr_sync_sysfs_buf()
1097 if (WARN_ON(drvdata->sysfs_buf != etr_buf)) { in tmc_etr_sync_sysfs_buf()
1098 tmc_etr_free_sysfs_buf(drvdata->sysfs_buf); in tmc_etr_sync_sysfs_buf()
1099 drvdata->sysfs_buf = NULL; in tmc_etr_sync_sysfs_buf()
1101 tmc_sync_etr_buf(drvdata); in tmc_etr_sync_sysfs_buf()
1112 static void __tmc_etr_disable_hw(struct tmc_drvdata *drvdata) in __tmc_etr_disable_hw() argument
1114 CS_UNLOCK(drvdata->base); in __tmc_etr_disable_hw()
1116 tmc_flush_and_stop(drvdata); in __tmc_etr_disable_hw()
1121 if (drvdata->mode == CS_MODE_SYSFS) in __tmc_etr_disable_hw()
1122 tmc_etr_sync_sysfs_buf(drvdata); in __tmc_etr_disable_hw()
1124 tmc_disable_hw(drvdata); in __tmc_etr_disable_hw()
1126 CS_LOCK(drvdata->base); in __tmc_etr_disable_hw()
1130 void tmc_etr_disable_hw(struct tmc_drvdata *drvdata) in tmc_etr_disable_hw() argument
1132 __tmc_etr_disable_hw(drvdata); in tmc_etr_disable_hw()
1134 tmc_etr_disable_catu(drvdata); in tmc_etr_disable_hw()
1135 coresight_disclaim_device(drvdata->base); in tmc_etr_disable_hw()
1137 drvdata->etr_buf = NULL; in tmc_etr_disable_hw()
1144 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); in tmc_enable_etr_sink_sysfs() local
1155 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_enable_etr_sink_sysfs()
1156 sysfs_buf = READ_ONCE(drvdata->sysfs_buf); in tmc_enable_etr_sink_sysfs()
1157 if (!sysfs_buf || (sysfs_buf->size != drvdata->size)) { in tmc_enable_etr_sink_sysfs()
1158 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_enable_etr_sink_sysfs()
1161 free_buf = new_buf = tmc_etr_setup_sysfs_buf(drvdata); in tmc_enable_etr_sink_sysfs()
1166 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_enable_etr_sink_sysfs()
1169 if (drvdata->reading || drvdata->mode == CS_MODE_PERF) { in tmc_enable_etr_sink_sysfs()
1179 if (drvdata->mode == CS_MODE_SYSFS) { in tmc_enable_etr_sink_sysfs()
1188 sysfs_buf = READ_ONCE(drvdata->sysfs_buf); in tmc_enable_etr_sink_sysfs()
1191 drvdata->sysfs_buf = new_buf; in tmc_enable_etr_sink_sysfs()
1194 ret = tmc_etr_enable_hw(drvdata, drvdata->sysfs_buf); in tmc_enable_etr_sink_sysfs()
1196 drvdata->mode = CS_MODE_SYSFS; in tmc_enable_etr_sink_sysfs()
1200 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_enable_etr_sink_sysfs()
1220 alloc_etr_buf(struct tmc_drvdata *drvdata, struct perf_event *event, in alloc_etr_buf() argument
1232 if ((nr_pages << PAGE_SHIFT) > drvdata->size) { in alloc_etr_buf()
1233 etr_buf = tmc_alloc_etr_buf(drvdata, (nr_pages << PAGE_SHIFT), in alloc_etr_buf()
1243 size = drvdata->size; in alloc_etr_buf()
1245 etr_buf = tmc_alloc_etr_buf(drvdata, size, 0, node, NULL); in alloc_etr_buf()
1258 get_perf_etr_buf_cpu_wide(struct tmc_drvdata *drvdata, in get_perf_etr_buf_cpu_wide() argument
1285 mutex_lock(&drvdata->idr_mutex); in get_perf_etr_buf_cpu_wide()
1286 etr_buf = idr_find(&drvdata->idr, pid); in get_perf_etr_buf_cpu_wide()
1289 mutex_unlock(&drvdata->idr_mutex); in get_perf_etr_buf_cpu_wide()
1294 mutex_unlock(&drvdata->idr_mutex); in get_perf_etr_buf_cpu_wide()
1296 etr_buf = alloc_etr_buf(drvdata, event, nr_pages, pages, snapshot); in get_perf_etr_buf_cpu_wide()
1301 mutex_lock(&drvdata->idr_mutex); in get_perf_etr_buf_cpu_wide()
1302 ret = idr_alloc(&drvdata->idr, etr_buf, pid, pid + 1, GFP_KERNEL); in get_perf_etr_buf_cpu_wide()
1303 mutex_unlock(&drvdata->idr_mutex); in get_perf_etr_buf_cpu_wide()
1322 get_perf_etr_buf_per_thread(struct tmc_drvdata *drvdata, in get_perf_etr_buf_per_thread() argument
1330 return alloc_etr_buf(drvdata, event, nr_pages, pages, snapshot); in get_perf_etr_buf_per_thread()
1334 get_perf_etr_buf(struct tmc_drvdata *drvdata, struct perf_event *event, in get_perf_etr_buf() argument
1338 return get_perf_etr_buf_per_thread(drvdata, event, nr_pages, in get_perf_etr_buf()
1341 return get_perf_etr_buf_cpu_wide(drvdata, event, nr_pages, in get_perf_etr_buf()
1346 tmc_etr_setup_perf_buf(struct tmc_drvdata *drvdata, struct perf_event *event, in tmc_etr_setup_perf_buf() argument
1359 etr_buf = get_perf_etr_buf(drvdata, event, nr_pages, pages, snapshot); in tmc_etr_setup_perf_buf()
1371 etr_perf->drvdata = drvdata; in tmc_etr_setup_perf_buf()
1383 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); in tmc_alloc_etr_buffer() local
1385 etr_perf = tmc_etr_setup_perf_buf(drvdata, event, in tmc_alloc_etr_buffer()
1403 struct tmc_drvdata *drvdata = etr_perf->drvdata; in tmc_free_etr_buffer() local
1409 mutex_lock(&drvdata->idr_mutex); in tmc_free_etr_buffer()
1412 mutex_unlock(&drvdata->idr_mutex); in tmc_free_etr_buffer()
1417 buf = idr_remove(&drvdata->idr, etr_perf->pid); in tmc_free_etr_buffer()
1418 mutex_unlock(&drvdata->idr_mutex); in tmc_free_etr_buffer()
1499 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); in tmc_update_etr_buffer() local
1503 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_update_etr_buffer()
1507 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_update_etr_buffer()
1511 if (WARN_ON(drvdata->perf_buf != etr_buf)) { in tmc_update_etr_buffer()
1513 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_update_etr_buffer()
1517 CS_UNLOCK(drvdata->base); in tmc_update_etr_buffer()
1519 tmc_flush_and_stop(drvdata); in tmc_update_etr_buffer()
1520 tmc_sync_etr_buf(drvdata); in tmc_update_etr_buffer()
1522 CS_LOCK(drvdata->base); in tmc_update_etr_buffer()
1523 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_update_etr_buffer()
1537 u32 mask = tmc_get_memwidth_mask(drvdata); in tmc_update_etr_buffer()
1581 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); in tmc_enable_etr_sink_perf() local
1585 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_enable_etr_sink_perf()
1587 if (drvdata->mode == CS_MODE_SYSFS) { in tmc_enable_etr_sink_perf()
1601 if (drvdata->pid != -1 && drvdata->pid != pid) { in tmc_enable_etr_sink_perf()
1612 if (drvdata->pid == pid) { in tmc_enable_etr_sink_perf()
1617 rc = tmc_etr_enable_hw(drvdata, etr_perf->etr_buf); in tmc_enable_etr_sink_perf()
1620 drvdata->pid = pid; in tmc_enable_etr_sink_perf()
1621 drvdata->mode = CS_MODE_PERF; in tmc_enable_etr_sink_perf()
1622 drvdata->perf_buf = etr_perf->etr_buf; in tmc_enable_etr_sink_perf()
1627 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_enable_etr_sink_perf()
1648 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); in tmc_disable_etr_sink() local
1650 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_disable_etr_sink()
1652 if (drvdata->reading) { in tmc_disable_etr_sink()
1653 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_disable_etr_sink()
1658 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_disable_etr_sink()
1663 WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED); in tmc_disable_etr_sink()
1664 tmc_etr_disable_hw(drvdata); in tmc_disable_etr_sink()
1666 drvdata->pid = -1; in tmc_disable_etr_sink()
1667 drvdata->mode = CS_MODE_DISABLED; in tmc_disable_etr_sink()
1669 drvdata->perf_buf = NULL; in tmc_disable_etr_sink()
1671 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_disable_etr_sink()
1689 int tmc_read_prepare_etr(struct tmc_drvdata *drvdata) in tmc_read_prepare_etr() argument
1695 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR)) in tmc_read_prepare_etr()
1698 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_read_prepare_etr()
1699 if (drvdata->reading) { in tmc_read_prepare_etr()
1709 if (!drvdata->sysfs_buf) { in tmc_read_prepare_etr()
1715 if (drvdata->mode == CS_MODE_SYSFS) in tmc_read_prepare_etr()
1716 __tmc_etr_disable_hw(drvdata); in tmc_read_prepare_etr()
1718 drvdata->reading = true; in tmc_read_prepare_etr()
1720 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_read_prepare_etr()
1725 int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata) in tmc_read_unprepare_etr() argument
1731 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR)) in tmc_read_unprepare_etr()
1734 spin_lock_irqsave(&drvdata->spinlock, flags); in tmc_read_unprepare_etr()
1737 if (drvdata->mode == CS_MODE_SYSFS) { in tmc_read_unprepare_etr()
1743 __tmc_etr_enable_hw(drvdata); in tmc_read_unprepare_etr()
1749 sysfs_buf = drvdata->sysfs_buf; in tmc_read_unprepare_etr()
1750 drvdata->sysfs_buf = NULL; in tmc_read_unprepare_etr()
1753 drvdata->reading = false; in tmc_read_unprepare_etr()
1754 spin_unlock_irqrestore(&drvdata->spinlock, flags); in tmc_read_unprepare_etr()