1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /*
3 * Siemens System Memory Buffer driver.
4 * Copyright(c) 2022, HiSilicon Limited.
5 */
6
7 #include <linux/atomic.h>
8 #include <linux/acpi.h>
9 #include <linux/circ_buf.h>
10 #include <linux/err.h>
11 #include <linux/fs.h>
12 #include <linux/module.h>
13 #include <linux/mod_devicetable.h>
14 #include <linux/platform_device.h>
15
16 #include "coresight-etm-perf.h"
17 #include "coresight-priv.h"
18 #include "ultrasoc-smb.h"
19
20 DEFINE_CORESIGHT_DEVLIST(sink_devs, "ultra_smb");
21
22 #define ULTRASOC_SMB_DSM_UUID "82ae1283-7f6a-4cbe-aa06-53e8fb24db18"
23
smb_buffer_not_empty(struct smb_drv_data * drvdata)24 static bool smb_buffer_not_empty(struct smb_drv_data *drvdata)
25 {
26 u32 buf_status = readl(drvdata->base + SMB_LB_INT_STS_REG);
27
28 return FIELD_GET(SMB_LB_INT_STS_NOT_EMPTY_MSK, buf_status);
29 }
30
smb_update_data_size(struct smb_drv_data * drvdata)31 static void smb_update_data_size(struct smb_drv_data *drvdata)
32 {
33 struct smb_data_buffer *sdb = &drvdata->sdb;
34 u32 buf_wrptr;
35
36 buf_wrptr = readl(drvdata->base + SMB_LB_WR_ADDR_REG) -
37 sdb->buf_hw_base;
38
39 /* Buffer is full */
40 if (buf_wrptr == sdb->buf_rdptr && smb_buffer_not_empty(drvdata)) {
41 sdb->data_size = sdb->buf_size;
42 return;
43 }
44
45 /* The buffer mode is circular buffer mode */
46 sdb->data_size = CIRC_CNT(buf_wrptr, sdb->buf_rdptr,
47 sdb->buf_size);
48 }
49
50 /*
51 * The read pointer adds @nbytes bytes (may round up to the beginning)
52 * after the data is read or discarded, while needing to update the
53 * available data size.
54 */
smb_update_read_ptr(struct smb_drv_data * drvdata,u32 nbytes)55 static void smb_update_read_ptr(struct smb_drv_data *drvdata, u32 nbytes)
56 {
57 struct smb_data_buffer *sdb = &drvdata->sdb;
58
59 sdb->buf_rdptr += nbytes;
60 sdb->buf_rdptr %= sdb->buf_size;
61 writel(sdb->buf_hw_base + sdb->buf_rdptr,
62 drvdata->base + SMB_LB_RD_ADDR_REG);
63
64 sdb->data_size -= nbytes;
65 }
66
smb_reset_buffer(struct smb_drv_data * drvdata)67 static void smb_reset_buffer(struct smb_drv_data *drvdata)
68 {
69 struct smb_data_buffer *sdb = &drvdata->sdb;
70 u32 write_ptr;
71
72 /*
73 * We must flush and discard any data left in hardware path
74 * to avoid corrupting the next session.
75 * Note: The write pointer will never exceed the read pointer.
76 */
77 writel(SMB_LB_PURGE_PURGED, drvdata->base + SMB_LB_PURGE_REG);
78
79 /* Reset SMB logical buffer status flags */
80 writel(SMB_LB_INT_STS_RESET, drvdata->base + SMB_LB_INT_STS_REG);
81
82 write_ptr = readl(drvdata->base + SMB_LB_WR_ADDR_REG);
83
84 /* Do nothing, not data left in hardware path */
85 if (!write_ptr || write_ptr == sdb->buf_rdptr + sdb->buf_hw_base)
86 return;
87
88 /*
89 * The SMB_LB_WR_ADDR_REG register is read-only,
90 * Synchronize the read pointer to write pointer.
91 */
92 writel(write_ptr, drvdata->base + SMB_LB_RD_ADDR_REG);
93 sdb->buf_rdptr = write_ptr - sdb->buf_hw_base;
94 }
95
smb_open(struct inode * inode,struct file * file)96 static int smb_open(struct inode *inode, struct file *file)
97 {
98 struct smb_drv_data *drvdata = container_of(file->private_data,
99 struct smb_drv_data, miscdev);
100 int ret = 0;
101
102 mutex_lock(&drvdata->mutex);
103
104 if (drvdata->reading) {
105 ret = -EBUSY;
106 goto out;
107 }
108
109 if (atomic_read(&drvdata->csdev->refcnt)) {
110 ret = -EBUSY;
111 goto out;
112 }
113
114 smb_update_data_size(drvdata);
115
116 drvdata->reading = true;
117 out:
118 mutex_unlock(&drvdata->mutex);
119
120 return ret;
121 }
122
smb_read(struct file * file,char __user * data,size_t len,loff_t * ppos)123 static ssize_t smb_read(struct file *file, char __user *data, size_t len,
124 loff_t *ppos)
125 {
126 struct smb_drv_data *drvdata = container_of(file->private_data,
127 struct smb_drv_data, miscdev);
128 struct smb_data_buffer *sdb = &drvdata->sdb;
129 struct device *dev = &drvdata->csdev->dev;
130 ssize_t to_copy = 0;
131
132 if (!len)
133 return 0;
134
135 mutex_lock(&drvdata->mutex);
136
137 if (!sdb->data_size)
138 goto out;
139
140 to_copy = min(sdb->data_size, len);
141
142 /* Copy parts of trace data when read pointer wrap around SMB buffer */
143 if (sdb->buf_rdptr + to_copy > sdb->buf_size)
144 to_copy = sdb->buf_size - sdb->buf_rdptr;
145
146 if (copy_to_user(data, sdb->buf_base + sdb->buf_rdptr, to_copy)) {
147 dev_dbg(dev, "Failed to copy data to user\n");
148 to_copy = -EFAULT;
149 goto out;
150 }
151
152 *ppos += to_copy;
153
154 smb_update_read_ptr(drvdata, to_copy);
155
156 dev_dbg(dev, "%zu bytes copied\n", to_copy);
157 out:
158 if (!sdb->data_size)
159 smb_reset_buffer(drvdata);
160 mutex_unlock(&drvdata->mutex);
161
162 return to_copy;
163 }
164
smb_release(struct inode * inode,struct file * file)165 static int smb_release(struct inode *inode, struct file *file)
166 {
167 struct smb_drv_data *drvdata = container_of(file->private_data,
168 struct smb_drv_data, miscdev);
169
170 mutex_lock(&drvdata->mutex);
171 drvdata->reading = false;
172 mutex_unlock(&drvdata->mutex);
173
174 return 0;
175 }
176
177 static const struct file_operations smb_fops = {
178 .owner = THIS_MODULE,
179 .open = smb_open,
180 .read = smb_read,
181 .release = smb_release,
182 .llseek = no_llseek,
183 };
184
buf_size_show(struct device * dev,struct device_attribute * attr,char * buf)185 static ssize_t buf_size_show(struct device *dev, struct device_attribute *attr,
186 char *buf)
187 {
188 struct smb_drv_data *drvdata = dev_get_drvdata(dev->parent);
189
190 return sysfs_emit(buf, "0x%lx\n", drvdata->sdb.buf_size);
191 }
192 static DEVICE_ATTR_RO(buf_size);
193
194 static struct attribute *smb_sink_attrs[] = {
195 coresight_simple_reg32(read_pos, SMB_LB_RD_ADDR_REG),
196 coresight_simple_reg32(write_pos, SMB_LB_WR_ADDR_REG),
197 coresight_simple_reg32(buf_status, SMB_LB_INT_STS_REG),
198 &dev_attr_buf_size.attr,
199 NULL
200 };
201
202 static const struct attribute_group smb_sink_group = {
203 .attrs = smb_sink_attrs,
204 .name = "mgmt",
205 };
206
207 static const struct attribute_group *smb_sink_groups[] = {
208 &smb_sink_group,
209 NULL
210 };
211
smb_enable_hw(struct smb_drv_data * drvdata)212 static void smb_enable_hw(struct smb_drv_data *drvdata)
213 {
214 writel(SMB_GLB_EN_HW_ENABLE, drvdata->base + SMB_GLB_EN_REG);
215 }
216
smb_disable_hw(struct smb_drv_data * drvdata)217 static void smb_disable_hw(struct smb_drv_data *drvdata)
218 {
219 writel(0x0, drvdata->base + SMB_GLB_EN_REG);
220 }
221
smb_enable_sysfs(struct coresight_device * csdev)222 static void smb_enable_sysfs(struct coresight_device *csdev)
223 {
224 struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
225
226 if (drvdata->mode != CS_MODE_DISABLED)
227 return;
228
229 smb_enable_hw(drvdata);
230 drvdata->mode = CS_MODE_SYSFS;
231 }
232
smb_enable_perf(struct coresight_device * csdev,void * data)233 static int smb_enable_perf(struct coresight_device *csdev, void *data)
234 {
235 struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
236 struct perf_output_handle *handle = data;
237 struct cs_buffers *buf = etm_perf_sink_config(handle);
238 pid_t pid;
239
240 if (!buf)
241 return -EINVAL;
242
243 /* Get a handle on the pid of the target process */
244 pid = buf->pid;
245
246 /* Device is already in used by other session */
247 if (drvdata->pid != -1 && drvdata->pid != pid)
248 return -EBUSY;
249
250 if (drvdata->pid == -1) {
251 smb_enable_hw(drvdata);
252 drvdata->pid = pid;
253 drvdata->mode = CS_MODE_PERF;
254 }
255
256 return 0;
257 }
258
smb_enable(struct coresight_device * csdev,enum cs_mode mode,void * data)259 static int smb_enable(struct coresight_device *csdev, enum cs_mode mode,
260 void *data)
261 {
262 struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
263 int ret = 0;
264
265 mutex_lock(&drvdata->mutex);
266
267 /* Do nothing, the trace data is reading by other interface now */
268 if (drvdata->reading) {
269 ret = -EBUSY;
270 goto out;
271 }
272
273 /* Do nothing, the SMB is already enabled as other mode */
274 if (drvdata->mode != CS_MODE_DISABLED && drvdata->mode != mode) {
275 ret = -EBUSY;
276 goto out;
277 }
278
279 switch (mode) {
280 case CS_MODE_SYSFS:
281 smb_enable_sysfs(csdev);
282 break;
283 case CS_MODE_PERF:
284 ret = smb_enable_perf(csdev, data);
285 break;
286 default:
287 ret = -EINVAL;
288 }
289
290 if (ret)
291 goto out;
292
293 atomic_inc(&csdev->refcnt);
294
295 dev_dbg(&csdev->dev, "Ultrasoc SMB enabled\n");
296 out:
297 mutex_unlock(&drvdata->mutex);
298
299 return ret;
300 }
301
smb_disable(struct coresight_device * csdev)302 static int smb_disable(struct coresight_device *csdev)
303 {
304 struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
305 int ret = 0;
306
307 mutex_lock(&drvdata->mutex);
308
309 if (drvdata->reading) {
310 ret = -EBUSY;
311 goto out;
312 }
313
314 if (atomic_dec_return(&csdev->refcnt)) {
315 ret = -EBUSY;
316 goto out;
317 }
318
319 /* Complain if we (somehow) got out of sync */
320 WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
321
322 smb_disable_hw(drvdata);
323
324 /* Dissociate from the target process. */
325 drvdata->pid = -1;
326 drvdata->mode = CS_MODE_DISABLED;
327
328 dev_dbg(&csdev->dev, "Ultrasoc SMB disabled\n");
329 out:
330 mutex_unlock(&drvdata->mutex);
331
332 return ret;
333 }
334
smb_alloc_buffer(struct coresight_device * csdev,struct perf_event * event,void ** pages,int nr_pages,bool overwrite)335 static void *smb_alloc_buffer(struct coresight_device *csdev,
336 struct perf_event *event, void **pages,
337 int nr_pages, bool overwrite)
338 {
339 struct cs_buffers *buf;
340 int node;
341
342 node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
343 buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
344 if (!buf)
345 return NULL;
346
347 buf->snapshot = overwrite;
348 buf->nr_pages = nr_pages;
349 buf->data_pages = pages;
350 buf->pid = task_pid_nr(event->owner);
351
352 return buf;
353 }
354
smb_free_buffer(void * config)355 static void smb_free_buffer(void *config)
356 {
357 struct cs_buffers *buf = config;
358
359 kfree(buf);
360 }
361
smb_sync_perf_buffer(struct smb_drv_data * drvdata,struct cs_buffers * buf,unsigned long head)362 static void smb_sync_perf_buffer(struct smb_drv_data *drvdata,
363 struct cs_buffers *buf,
364 unsigned long head)
365 {
366 struct smb_data_buffer *sdb = &drvdata->sdb;
367 char **dst_pages = (char **)buf->data_pages;
368 unsigned long to_copy;
369 long pg_idx, pg_offset;
370
371 pg_idx = head >> PAGE_SHIFT;
372 pg_offset = head & (PAGE_SIZE - 1);
373
374 while (sdb->data_size) {
375 unsigned long pg_space = PAGE_SIZE - pg_offset;
376
377 to_copy = min(sdb->data_size, pg_space);
378
379 /* Copy parts of trace data when read pointer wrap around */
380 if (sdb->buf_rdptr + to_copy > sdb->buf_size)
381 to_copy = sdb->buf_size - sdb->buf_rdptr;
382
383 memcpy(dst_pages[pg_idx] + pg_offset,
384 sdb->buf_base + sdb->buf_rdptr, to_copy);
385
386 pg_offset += to_copy;
387 if (pg_offset >= PAGE_SIZE) {
388 pg_offset = 0;
389 pg_idx++;
390 pg_idx %= buf->nr_pages;
391 }
392 smb_update_read_ptr(drvdata, to_copy);
393 }
394
395 smb_reset_buffer(drvdata);
396 }
397
smb_update_buffer(struct coresight_device * csdev,struct perf_output_handle * handle,void * sink_config)398 static unsigned long smb_update_buffer(struct coresight_device *csdev,
399 struct perf_output_handle *handle,
400 void *sink_config)
401 {
402 struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
403 struct smb_data_buffer *sdb = &drvdata->sdb;
404 struct cs_buffers *buf = sink_config;
405 unsigned long data_size = 0;
406 bool lost = false;
407
408 if (!buf)
409 return 0;
410
411 mutex_lock(&drvdata->mutex);
412
413 /* Don't do anything if another tracer is using this sink. */
414 if (atomic_read(&csdev->refcnt) != 1)
415 goto out;
416
417 smb_disable_hw(drvdata);
418 smb_update_data_size(drvdata);
419
420 /*
421 * The SMB buffer may be bigger than the space available in the
422 * perf ring buffer (handle->size). If so advance the offset so
423 * that we get the latest trace data.
424 */
425 if (sdb->data_size > handle->size) {
426 smb_update_read_ptr(drvdata, sdb->data_size - handle->size);
427 lost = true;
428 }
429
430 data_size = sdb->data_size;
431 smb_sync_perf_buffer(drvdata, buf, handle->head);
432 if (!buf->snapshot && lost)
433 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
434 out:
435 mutex_unlock(&drvdata->mutex);
436
437 return data_size;
438 }
439
440 static const struct coresight_ops_sink smb_cs_ops = {
441 .enable = smb_enable,
442 .disable = smb_disable,
443 .alloc_buffer = smb_alloc_buffer,
444 .free_buffer = smb_free_buffer,
445 .update_buffer = smb_update_buffer,
446 };
447
448 static const struct coresight_ops cs_ops = {
449 .sink_ops = &smb_cs_ops,
450 };
451
smb_init_data_buffer(struct platform_device * pdev,struct smb_data_buffer * sdb)452 static int smb_init_data_buffer(struct platform_device *pdev,
453 struct smb_data_buffer *sdb)
454 {
455 struct resource *res;
456 void *base;
457
458 res = platform_get_resource(pdev, IORESOURCE_MEM, SMB_BUF_ADDR_RES);
459 if (!res) {
460 dev_err(&pdev->dev, "SMB device failed to get resource\n");
461 return -EINVAL;
462 }
463
464 sdb->buf_rdptr = 0;
465 sdb->buf_hw_base = FIELD_GET(SMB_BUF_ADDR_LO_MSK, res->start);
466 sdb->buf_size = resource_size(res);
467 if (sdb->buf_size == 0)
468 return -EINVAL;
469
470 /*
471 * This is a chunk of memory, use classic mapping with better
472 * performance.
473 */
474 base = devm_memremap(&pdev->dev, sdb->buf_hw_base, sdb->buf_size,
475 MEMREMAP_WB);
476 if (IS_ERR(base))
477 return PTR_ERR(base);
478
479 sdb->buf_base = base;
480
481 return 0;
482 }
483
smb_init_hw(struct smb_drv_data * drvdata)484 static void smb_init_hw(struct smb_drv_data *drvdata)
485 {
486 smb_disable_hw(drvdata);
487 smb_reset_buffer(drvdata);
488
489 writel(SMB_LB_CFG_LO_DEFAULT, drvdata->base + SMB_LB_CFG_LO_REG);
490 writel(SMB_LB_CFG_HI_DEFAULT, drvdata->base + SMB_LB_CFG_HI_REG);
491 writel(SMB_GLB_CFG_DEFAULT, drvdata->base + SMB_GLB_CFG_REG);
492 writel(SMB_GLB_INT_CFG, drvdata->base + SMB_GLB_INT_REG);
493 writel(SMB_LB_INT_CTRL_CFG, drvdata->base + SMB_LB_INT_CTRL_REG);
494 }
495
smb_register_sink(struct platform_device * pdev,struct smb_drv_data * drvdata)496 static int smb_register_sink(struct platform_device *pdev,
497 struct smb_drv_data *drvdata)
498 {
499 struct coresight_platform_data *pdata = NULL;
500 struct coresight_desc desc = { 0 };
501 int ret;
502
503 pdata = coresight_get_platform_data(&pdev->dev);
504 if (IS_ERR(pdata))
505 return PTR_ERR(pdata);
506
507 desc.type = CORESIGHT_DEV_TYPE_SINK;
508 desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
509 desc.ops = &cs_ops;
510 desc.pdata = pdata;
511 desc.dev = &pdev->dev;
512 desc.groups = smb_sink_groups;
513 desc.name = coresight_alloc_device_name(&sink_devs, &pdev->dev);
514 if (!desc.name) {
515 dev_err(&pdev->dev, "Failed to alloc coresight device name");
516 return -ENOMEM;
517 }
518 desc.access = CSDEV_ACCESS_IOMEM(drvdata->base);
519
520 drvdata->csdev = coresight_register(&desc);
521 if (IS_ERR(drvdata->csdev))
522 return PTR_ERR(drvdata->csdev);
523
524 drvdata->miscdev.name = desc.name;
525 drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
526 drvdata->miscdev.fops = &smb_fops;
527 ret = misc_register(&drvdata->miscdev);
528 if (ret) {
529 coresight_unregister(drvdata->csdev);
530 dev_err(&pdev->dev, "Failed to register misc, ret=%d\n", ret);
531 }
532
533 return ret;
534 }
535
smb_unregister_sink(struct smb_drv_data * drvdata)536 static void smb_unregister_sink(struct smb_drv_data *drvdata)
537 {
538 misc_deregister(&drvdata->miscdev);
539 coresight_unregister(drvdata->csdev);
540 }
541
smb_config_inport(struct device * dev,bool enable)542 static int smb_config_inport(struct device *dev, bool enable)
543 {
544 u64 func = enable ? 1 : 0;
545 union acpi_object *obj;
546 guid_t guid;
547 u64 rev = 0;
548
549 /*
550 * Using DSM calls to enable/disable ultrasoc hardwares on
551 * tracing path, to prevent ultrasoc packet format being exposed.
552 */
553 if (guid_parse(ULTRASOC_SMB_DSM_UUID, &guid)) {
554 dev_err(dev, "Get GUID failed\n");
555 return -EINVAL;
556 }
557
558 obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &guid, rev, func, NULL);
559 if (!obj) {
560 dev_err(dev, "ACPI handle failed\n");
561 return -ENODEV;
562 }
563
564 ACPI_FREE(obj);
565
566 return 0;
567 }
568
smb_probe(struct platform_device * pdev)569 static int smb_probe(struct platform_device *pdev)
570 {
571 struct device *dev = &pdev->dev;
572 struct smb_drv_data *drvdata;
573 int ret;
574
575 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
576 if (!drvdata)
577 return -ENOMEM;
578
579 drvdata->base = devm_platform_ioremap_resource(pdev, SMB_REG_ADDR_RES);
580 if (IS_ERR(drvdata->base)) {
581 dev_err(dev, "Failed to ioremap resource\n");
582 return PTR_ERR(drvdata->base);
583 }
584
585 smb_init_hw(drvdata);
586
587 ret = smb_init_data_buffer(pdev, &drvdata->sdb);
588 if (ret) {
589 dev_err(dev, "Failed to init buffer, ret = %d\n", ret);
590 return ret;
591 }
592
593 mutex_init(&drvdata->mutex);
594 drvdata->pid = -1;
595
596 ret = smb_register_sink(pdev, drvdata);
597 if (ret) {
598 dev_err(dev, "Failed to register SMB sink\n");
599 return ret;
600 }
601
602 ret = smb_config_inport(dev, true);
603 if (ret) {
604 smb_unregister_sink(drvdata);
605 return ret;
606 }
607
608 platform_set_drvdata(pdev, drvdata);
609
610 return 0;
611 }
612
smb_remove(struct platform_device * pdev)613 static int smb_remove(struct platform_device *pdev)
614 {
615 struct smb_drv_data *drvdata = platform_get_drvdata(pdev);
616 int ret;
617
618 ret = smb_config_inport(&pdev->dev, false);
619 if (ret)
620 return ret;
621
622 smb_unregister_sink(drvdata);
623
624 return 0;
625 }
626
627 #ifdef CONFIG_ACPI
628 static const struct acpi_device_id ultrasoc_smb_acpi_match[] = {
629 {"HISI03A1", 0},
630 {}
631 };
632 MODULE_DEVICE_TABLE(acpi, ultrasoc_smb_acpi_match);
633 #endif
634
635 static struct platform_driver smb_driver = {
636 .driver = {
637 .name = "ultrasoc-smb",
638 .acpi_match_table = ACPI_PTR(ultrasoc_smb_acpi_match),
639 .suppress_bind_attrs = true,
640 },
641 .probe = smb_probe,
642 .remove = smb_remove,
643 };
644 module_platform_driver(smb_driver);
645
646 MODULE_DESCRIPTION("UltraSoc SMB CoreSight driver");
647 MODULE_LICENSE("Dual MIT/GPL");
648 MODULE_AUTHOR("Jonathan Zhou <jonathan.zhouwen@huawei.com>");
649 MODULE_AUTHOR("Qi Liu <liuqi115@huawei.com>");
650