1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright(C) 2015 Linaro Limited. All rights reserved.
4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5 */
6
7 #include <linux/pid_namespace.h>
8 #include <linux/pm_runtime.h>
9 #include <linux/sysfs.h>
10 #include "coresight-etm4x.h"
11 #include "coresight-priv.h"
12 #include "coresight-syscfg.h"
13
etm4_set_mode_exclude(struct etmv4_drvdata * drvdata,bool exclude)14 static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
15 {
16 u8 idx;
17 struct etmv4_config *config = &drvdata->config;
18
19 idx = config->addr_idx;
20
21 /*
22 * TRCACATRn.TYPE bit[1:0]: type of comparison
23 * the trace unit performs
24 */
25 if (BMVAL(config->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
26 if (idx % 2 != 0)
27 return -EINVAL;
28
29 /*
30 * We are performing instruction address comparison. Set the
31 * relevant bit of ViewInst Include/Exclude Control register
32 * for corresponding address comparator pair.
33 */
34 if (config->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
35 config->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
36 return -EINVAL;
37
38 if (exclude == true) {
39 /*
40 * Set exclude bit and unset the include bit
41 * corresponding to comparator pair
42 */
43 config->viiectlr |= BIT(idx / 2 + 16);
44 config->viiectlr &= ~BIT(idx / 2);
45 } else {
46 /*
47 * Set include bit and unset exclude bit
48 * corresponding to comparator pair
49 */
50 config->viiectlr |= BIT(idx / 2);
51 config->viiectlr &= ~BIT(idx / 2 + 16);
52 }
53 }
54 return 0;
55 }
56
nr_pe_cmp_show(struct device * dev,struct device_attribute * attr,char * buf)57 static ssize_t nr_pe_cmp_show(struct device *dev,
58 struct device_attribute *attr,
59 char *buf)
60 {
61 unsigned long val;
62 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
63
64 val = drvdata->nr_pe_cmp;
65 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
66 }
67 static DEVICE_ATTR_RO(nr_pe_cmp);
68
nr_addr_cmp_show(struct device * dev,struct device_attribute * attr,char * buf)69 static ssize_t nr_addr_cmp_show(struct device *dev,
70 struct device_attribute *attr,
71 char *buf)
72 {
73 unsigned long val;
74 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
75
76 val = drvdata->nr_addr_cmp;
77 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
78 }
79 static DEVICE_ATTR_RO(nr_addr_cmp);
80
nr_cntr_show(struct device * dev,struct device_attribute * attr,char * buf)81 static ssize_t nr_cntr_show(struct device *dev,
82 struct device_attribute *attr,
83 char *buf)
84 {
85 unsigned long val;
86 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
87
88 val = drvdata->nr_cntr;
89 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
90 }
91 static DEVICE_ATTR_RO(nr_cntr);
92
nr_ext_inp_show(struct device * dev,struct device_attribute * attr,char * buf)93 static ssize_t nr_ext_inp_show(struct device *dev,
94 struct device_attribute *attr,
95 char *buf)
96 {
97 unsigned long val;
98 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
99
100 val = drvdata->nr_ext_inp;
101 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
102 }
103 static DEVICE_ATTR_RO(nr_ext_inp);
104
numcidc_show(struct device * dev,struct device_attribute * attr,char * buf)105 static ssize_t numcidc_show(struct device *dev,
106 struct device_attribute *attr,
107 char *buf)
108 {
109 unsigned long val;
110 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
111
112 val = drvdata->numcidc;
113 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
114 }
115 static DEVICE_ATTR_RO(numcidc);
116
numvmidc_show(struct device * dev,struct device_attribute * attr,char * buf)117 static ssize_t numvmidc_show(struct device *dev,
118 struct device_attribute *attr,
119 char *buf)
120 {
121 unsigned long val;
122 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
123
124 val = drvdata->numvmidc;
125 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
126 }
127 static DEVICE_ATTR_RO(numvmidc);
128
nrseqstate_show(struct device * dev,struct device_attribute * attr,char * buf)129 static ssize_t nrseqstate_show(struct device *dev,
130 struct device_attribute *attr,
131 char *buf)
132 {
133 unsigned long val;
134 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
135
136 val = drvdata->nrseqstate;
137 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
138 }
139 static DEVICE_ATTR_RO(nrseqstate);
140
nr_resource_show(struct device * dev,struct device_attribute * attr,char * buf)141 static ssize_t nr_resource_show(struct device *dev,
142 struct device_attribute *attr,
143 char *buf)
144 {
145 unsigned long val;
146 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
147
148 val = drvdata->nr_resource;
149 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
150 }
151 static DEVICE_ATTR_RO(nr_resource);
152
nr_ss_cmp_show(struct device * dev,struct device_attribute * attr,char * buf)153 static ssize_t nr_ss_cmp_show(struct device *dev,
154 struct device_attribute *attr,
155 char *buf)
156 {
157 unsigned long val;
158 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
159
160 val = drvdata->nr_ss_cmp;
161 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
162 }
163 static DEVICE_ATTR_RO(nr_ss_cmp);
164
reset_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)165 static ssize_t reset_store(struct device *dev,
166 struct device_attribute *attr,
167 const char *buf, size_t size)
168 {
169 int i;
170 unsigned long val;
171 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
172 struct etmv4_config *config = &drvdata->config;
173
174 if (kstrtoul(buf, 16, &val))
175 return -EINVAL;
176
177 spin_lock(&drvdata->spinlock);
178 if (val)
179 config->mode = 0x0;
180
181 /* Disable data tracing: do not trace load and store data transfers */
182 config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
183 config->cfg &= ~(BIT(1) | BIT(2));
184
185 /* Disable data value and data address tracing */
186 config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
187 ETM_MODE_DATA_TRACE_VAL);
188 config->cfg &= ~(BIT(16) | BIT(17));
189
190 /* Disable all events tracing */
191 config->eventctrl0 = 0x0;
192 config->eventctrl1 = 0x0;
193
194 /* Disable timestamp event */
195 config->ts_ctrl = 0x0;
196
197 /* Disable stalling */
198 config->stall_ctrl = 0x0;
199
200 /* Reset trace synchronization period to 2^8 = 256 bytes*/
201 if (drvdata->syncpr == false)
202 config->syncfreq = 0x8;
203
204 /*
205 * Enable ViewInst to trace everything with start-stop logic in
206 * started state. ARM recommends start-stop logic is set before
207 * each trace run.
208 */
209 config->vinst_ctrl = BIT(0);
210 if (drvdata->nr_addr_cmp > 0) {
211 config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
212 /* SSSTATUS, bit[9] */
213 config->vinst_ctrl |= BIT(9);
214 }
215
216 /* No address range filtering for ViewInst */
217 config->viiectlr = 0x0;
218
219 /* No start-stop filtering for ViewInst */
220 config->vissctlr = 0x0;
221 config->vipcssctlr = 0x0;
222
223 /* Disable seq events */
224 for (i = 0; i < drvdata->nrseqstate-1; i++)
225 config->seq_ctrl[i] = 0x0;
226 config->seq_rst = 0x0;
227 config->seq_state = 0x0;
228
229 /* Disable external input events */
230 config->ext_inp = 0x0;
231
232 config->cntr_idx = 0x0;
233 for (i = 0; i < drvdata->nr_cntr; i++) {
234 config->cntrldvr[i] = 0x0;
235 config->cntr_ctrl[i] = 0x0;
236 config->cntr_val[i] = 0x0;
237 }
238
239 config->res_idx = 0x0;
240 for (i = 2; i < 2 * drvdata->nr_resource; i++)
241 config->res_ctrl[i] = 0x0;
242
243 config->ss_idx = 0x0;
244 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
245 config->ss_ctrl[i] = 0x0;
246 config->ss_pe_cmp[i] = 0x0;
247 }
248
249 config->addr_idx = 0x0;
250 for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
251 config->addr_val[i] = 0x0;
252 config->addr_acc[i] = 0x0;
253 config->addr_type[i] = ETM_ADDR_TYPE_NONE;
254 }
255
256 config->ctxid_idx = 0x0;
257 for (i = 0; i < drvdata->numcidc; i++)
258 config->ctxid_pid[i] = 0x0;
259
260 config->ctxid_mask0 = 0x0;
261 config->ctxid_mask1 = 0x0;
262
263 config->vmid_idx = 0x0;
264 for (i = 0; i < drvdata->numvmidc; i++)
265 config->vmid_val[i] = 0x0;
266 config->vmid_mask0 = 0x0;
267 config->vmid_mask1 = 0x0;
268
269 drvdata->trcid = drvdata->cpu + 1;
270
271 spin_unlock(&drvdata->spinlock);
272
273 cscfg_csdev_reset_feats(to_coresight_device(dev));
274
275 return size;
276 }
277 static DEVICE_ATTR_WO(reset);
278
mode_show(struct device * dev,struct device_attribute * attr,char * buf)279 static ssize_t mode_show(struct device *dev,
280 struct device_attribute *attr,
281 char *buf)
282 {
283 unsigned long val;
284 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
285 struct etmv4_config *config = &drvdata->config;
286
287 val = config->mode;
288 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
289 }
290
mode_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)291 static ssize_t mode_store(struct device *dev,
292 struct device_attribute *attr,
293 const char *buf, size_t size)
294 {
295 unsigned long val, mode;
296 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
297 struct etmv4_config *config = &drvdata->config;
298
299 if (kstrtoul(buf, 16, &val))
300 return -EINVAL;
301
302 spin_lock(&drvdata->spinlock);
303 config->mode = val & ETMv4_MODE_ALL;
304
305 if (drvdata->instrp0 == true) {
306 /* start by clearing instruction P0 field */
307 config->cfg &= ~(BIT(1) | BIT(2));
308 if (config->mode & ETM_MODE_LOAD)
309 /* 0b01 Trace load instructions as P0 instructions */
310 config->cfg |= BIT(1);
311 if (config->mode & ETM_MODE_STORE)
312 /* 0b10 Trace store instructions as P0 instructions */
313 config->cfg |= BIT(2);
314 if (config->mode & ETM_MODE_LOAD_STORE)
315 /*
316 * 0b11 Trace load and store instructions
317 * as P0 instructions
318 */
319 config->cfg |= BIT(1) | BIT(2);
320 }
321
322 /* bit[3], Branch broadcast mode */
323 if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
324 config->cfg |= BIT(3);
325 else
326 config->cfg &= ~BIT(3);
327
328 /* bit[4], Cycle counting instruction trace bit */
329 if ((config->mode & ETMv4_MODE_CYCACC) &&
330 (drvdata->trccci == true))
331 config->cfg |= BIT(4);
332 else
333 config->cfg &= ~BIT(4);
334
335 /* bit[6], Context ID tracing bit */
336 if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
337 config->cfg |= BIT(6);
338 else
339 config->cfg &= ~BIT(6);
340
341 if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
342 config->cfg |= BIT(7);
343 else
344 config->cfg &= ~BIT(7);
345
346 /* bits[10:8], Conditional instruction tracing bit */
347 mode = ETM_MODE_COND(config->mode);
348 if (drvdata->trccond == true) {
349 config->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
350 config->cfg |= mode << 8;
351 }
352
353 /* bit[11], Global timestamp tracing bit */
354 if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
355 config->cfg |= BIT(11);
356 else
357 config->cfg &= ~BIT(11);
358
359 /* bit[12], Return stack enable bit */
360 if ((config->mode & ETM_MODE_RETURNSTACK) &&
361 (drvdata->retstack == true))
362 config->cfg |= BIT(12);
363 else
364 config->cfg &= ~BIT(12);
365
366 /* bits[14:13], Q element enable field */
367 mode = ETM_MODE_QELEM(config->mode);
368 /* start by clearing QE bits */
369 config->cfg &= ~(BIT(13) | BIT(14));
370 /* if supported, Q elements with instruction counts are enabled */
371 if ((mode & BIT(0)) && (drvdata->q_support & BIT(0)))
372 config->cfg |= BIT(13);
373 /*
374 * if supported, Q elements with and without instruction
375 * counts are enabled
376 */
377 if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
378 config->cfg |= BIT(14);
379
380 /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
381 if ((config->mode & ETM_MODE_ATB_TRIGGER) &&
382 (drvdata->atbtrig == true))
383 config->eventctrl1 |= BIT(11);
384 else
385 config->eventctrl1 &= ~BIT(11);
386
387 /* bit[12], Low-power state behavior override bit */
388 if ((config->mode & ETM_MODE_LPOVERRIDE) &&
389 (drvdata->lpoverride == true))
390 config->eventctrl1 |= BIT(12);
391 else
392 config->eventctrl1 &= ~BIT(12);
393
394 /* bit[8], Instruction stall bit */
395 if ((config->mode & ETM_MODE_ISTALL_EN) && (drvdata->stallctl == true))
396 config->stall_ctrl |= BIT(8);
397 else
398 config->stall_ctrl &= ~BIT(8);
399
400 /* bit[10], Prioritize instruction trace bit */
401 if (config->mode & ETM_MODE_INSTPRIO)
402 config->stall_ctrl |= BIT(10);
403 else
404 config->stall_ctrl &= ~BIT(10);
405
406 /* bit[13], Trace overflow prevention bit */
407 if ((config->mode & ETM_MODE_NOOVERFLOW) &&
408 (drvdata->nooverflow == true))
409 config->stall_ctrl |= BIT(13);
410 else
411 config->stall_ctrl &= ~BIT(13);
412
413 /* bit[9] Start/stop logic control bit */
414 if (config->mode & ETM_MODE_VIEWINST_STARTSTOP)
415 config->vinst_ctrl |= BIT(9);
416 else
417 config->vinst_ctrl &= ~BIT(9);
418
419 /* bit[10], Whether a trace unit must trace a Reset exception */
420 if (config->mode & ETM_MODE_TRACE_RESET)
421 config->vinst_ctrl |= BIT(10);
422 else
423 config->vinst_ctrl &= ~BIT(10);
424
425 /* bit[11], Whether a trace unit must trace a system error exception */
426 if ((config->mode & ETM_MODE_TRACE_ERR) &&
427 (drvdata->trc_error == true))
428 config->vinst_ctrl |= BIT(11);
429 else
430 config->vinst_ctrl &= ~BIT(11);
431
432 if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
433 etm4_config_trace_mode(config);
434
435 spin_unlock(&drvdata->spinlock);
436
437 return size;
438 }
439 static DEVICE_ATTR_RW(mode);
440
pe_show(struct device * dev,struct device_attribute * attr,char * buf)441 static ssize_t pe_show(struct device *dev,
442 struct device_attribute *attr,
443 char *buf)
444 {
445 unsigned long val;
446 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
447 struct etmv4_config *config = &drvdata->config;
448
449 val = config->pe_sel;
450 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
451 }
452
pe_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)453 static ssize_t pe_store(struct device *dev,
454 struct device_attribute *attr,
455 const char *buf, size_t size)
456 {
457 unsigned long val;
458 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
459 struct etmv4_config *config = &drvdata->config;
460
461 if (kstrtoul(buf, 16, &val))
462 return -EINVAL;
463
464 spin_lock(&drvdata->spinlock);
465 if (val > drvdata->nr_pe) {
466 spin_unlock(&drvdata->spinlock);
467 return -EINVAL;
468 }
469
470 config->pe_sel = val;
471 spin_unlock(&drvdata->spinlock);
472 return size;
473 }
474 static DEVICE_ATTR_RW(pe);
475
event_show(struct device * dev,struct device_attribute * attr,char * buf)476 static ssize_t event_show(struct device *dev,
477 struct device_attribute *attr,
478 char *buf)
479 {
480 unsigned long val;
481 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
482 struct etmv4_config *config = &drvdata->config;
483
484 val = config->eventctrl0;
485 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
486 }
487
event_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)488 static ssize_t event_store(struct device *dev,
489 struct device_attribute *attr,
490 const char *buf, size_t size)
491 {
492 unsigned long val;
493 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
494 struct etmv4_config *config = &drvdata->config;
495
496 if (kstrtoul(buf, 16, &val))
497 return -EINVAL;
498
499 spin_lock(&drvdata->spinlock);
500 switch (drvdata->nr_event) {
501 case 0x0:
502 /* EVENT0, bits[7:0] */
503 config->eventctrl0 = val & 0xFF;
504 break;
505 case 0x1:
506 /* EVENT1, bits[15:8] */
507 config->eventctrl0 = val & 0xFFFF;
508 break;
509 case 0x2:
510 /* EVENT2, bits[23:16] */
511 config->eventctrl0 = val & 0xFFFFFF;
512 break;
513 case 0x3:
514 /* EVENT3, bits[31:24] */
515 config->eventctrl0 = val;
516 break;
517 default:
518 break;
519 }
520 spin_unlock(&drvdata->spinlock);
521 return size;
522 }
523 static DEVICE_ATTR_RW(event);
524
event_instren_show(struct device * dev,struct device_attribute * attr,char * buf)525 static ssize_t event_instren_show(struct device *dev,
526 struct device_attribute *attr,
527 char *buf)
528 {
529 unsigned long val;
530 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
531 struct etmv4_config *config = &drvdata->config;
532
533 val = BMVAL(config->eventctrl1, 0, 3);
534 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
535 }
536
event_instren_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)537 static ssize_t event_instren_store(struct device *dev,
538 struct device_attribute *attr,
539 const char *buf, size_t size)
540 {
541 unsigned long val;
542 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
543 struct etmv4_config *config = &drvdata->config;
544
545 if (kstrtoul(buf, 16, &val))
546 return -EINVAL;
547
548 spin_lock(&drvdata->spinlock);
549 /* start by clearing all instruction event enable bits */
550 config->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
551 switch (drvdata->nr_event) {
552 case 0x0:
553 /* generate Event element for event 1 */
554 config->eventctrl1 |= val & BIT(1);
555 break;
556 case 0x1:
557 /* generate Event element for event 1 and 2 */
558 config->eventctrl1 |= val & (BIT(0) | BIT(1));
559 break;
560 case 0x2:
561 /* generate Event element for event 1, 2 and 3 */
562 config->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
563 break;
564 case 0x3:
565 /* generate Event element for all 4 events */
566 config->eventctrl1 |= val & 0xF;
567 break;
568 default:
569 break;
570 }
571 spin_unlock(&drvdata->spinlock);
572 return size;
573 }
574 static DEVICE_ATTR_RW(event_instren);
575
event_ts_show(struct device * dev,struct device_attribute * attr,char * buf)576 static ssize_t event_ts_show(struct device *dev,
577 struct device_attribute *attr,
578 char *buf)
579 {
580 unsigned long val;
581 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
582 struct etmv4_config *config = &drvdata->config;
583
584 val = config->ts_ctrl;
585 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
586 }
587
event_ts_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)588 static ssize_t event_ts_store(struct device *dev,
589 struct device_attribute *attr,
590 const char *buf, size_t size)
591 {
592 unsigned long val;
593 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
594 struct etmv4_config *config = &drvdata->config;
595
596 if (kstrtoul(buf, 16, &val))
597 return -EINVAL;
598 if (!drvdata->ts_size)
599 return -EINVAL;
600
601 config->ts_ctrl = val & ETMv4_EVENT_MASK;
602 return size;
603 }
604 static DEVICE_ATTR_RW(event_ts);
605
syncfreq_show(struct device * dev,struct device_attribute * attr,char * buf)606 static ssize_t syncfreq_show(struct device *dev,
607 struct device_attribute *attr,
608 char *buf)
609 {
610 unsigned long val;
611 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
612 struct etmv4_config *config = &drvdata->config;
613
614 val = config->syncfreq;
615 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
616 }
617
syncfreq_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)618 static ssize_t syncfreq_store(struct device *dev,
619 struct device_attribute *attr,
620 const char *buf, size_t size)
621 {
622 unsigned long val;
623 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
624 struct etmv4_config *config = &drvdata->config;
625
626 if (kstrtoul(buf, 16, &val))
627 return -EINVAL;
628 if (drvdata->syncpr == true)
629 return -EINVAL;
630
631 config->syncfreq = val & ETMv4_SYNC_MASK;
632 return size;
633 }
634 static DEVICE_ATTR_RW(syncfreq);
635
cyc_threshold_show(struct device * dev,struct device_attribute * attr,char * buf)636 static ssize_t cyc_threshold_show(struct device *dev,
637 struct device_attribute *attr,
638 char *buf)
639 {
640 unsigned long val;
641 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
642 struct etmv4_config *config = &drvdata->config;
643
644 val = config->ccctlr;
645 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
646 }
647
cyc_threshold_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)648 static ssize_t cyc_threshold_store(struct device *dev,
649 struct device_attribute *attr,
650 const char *buf, size_t size)
651 {
652 unsigned long val;
653 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
654 struct etmv4_config *config = &drvdata->config;
655
656 if (kstrtoul(buf, 16, &val))
657 return -EINVAL;
658
659 /* mask off max threshold before checking min value */
660 val &= ETM_CYC_THRESHOLD_MASK;
661 if (val < drvdata->ccitmin)
662 return -EINVAL;
663
664 config->ccctlr = val;
665 return size;
666 }
667 static DEVICE_ATTR_RW(cyc_threshold);
668
bb_ctrl_show(struct device * dev,struct device_attribute * attr,char * buf)669 static ssize_t bb_ctrl_show(struct device *dev,
670 struct device_attribute *attr,
671 char *buf)
672 {
673 unsigned long val;
674 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
675 struct etmv4_config *config = &drvdata->config;
676
677 val = config->bb_ctrl;
678 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
679 }
680
bb_ctrl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)681 static ssize_t bb_ctrl_store(struct device *dev,
682 struct device_attribute *attr,
683 const char *buf, size_t size)
684 {
685 unsigned long val;
686 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
687 struct etmv4_config *config = &drvdata->config;
688
689 if (kstrtoul(buf, 16, &val))
690 return -EINVAL;
691 if (drvdata->trcbb == false)
692 return -EINVAL;
693 if (!drvdata->nr_addr_cmp)
694 return -EINVAL;
695
696 /*
697 * Bit[8] controls include(1) / exclude(0), bits[0-7] select
698 * individual range comparators. If include then at least 1
699 * range must be selected.
700 */
701 if ((val & BIT(8)) && (BMVAL(val, 0, 7) == 0))
702 return -EINVAL;
703
704 config->bb_ctrl = val & GENMASK(8, 0);
705 return size;
706 }
707 static DEVICE_ATTR_RW(bb_ctrl);
708
event_vinst_show(struct device * dev,struct device_attribute * attr,char * buf)709 static ssize_t event_vinst_show(struct device *dev,
710 struct device_attribute *attr,
711 char *buf)
712 {
713 unsigned long val;
714 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
715 struct etmv4_config *config = &drvdata->config;
716
717 val = config->vinst_ctrl & ETMv4_EVENT_MASK;
718 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
719 }
720
event_vinst_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)721 static ssize_t event_vinst_store(struct device *dev,
722 struct device_attribute *attr,
723 const char *buf, size_t size)
724 {
725 unsigned long val;
726 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
727 struct etmv4_config *config = &drvdata->config;
728
729 if (kstrtoul(buf, 16, &val))
730 return -EINVAL;
731
732 spin_lock(&drvdata->spinlock);
733 val &= ETMv4_EVENT_MASK;
734 config->vinst_ctrl &= ~ETMv4_EVENT_MASK;
735 config->vinst_ctrl |= val;
736 spin_unlock(&drvdata->spinlock);
737 return size;
738 }
739 static DEVICE_ATTR_RW(event_vinst);
740
s_exlevel_vinst_show(struct device * dev,struct device_attribute * attr,char * buf)741 static ssize_t s_exlevel_vinst_show(struct device *dev,
742 struct device_attribute *attr,
743 char *buf)
744 {
745 unsigned long val;
746 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
747 struct etmv4_config *config = &drvdata->config;
748
749 val = (config->vinst_ctrl & TRCVICTLR_EXLEVEL_S_MASK) >> TRCVICTLR_EXLEVEL_S_SHIFT;
750 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
751 }
752
s_exlevel_vinst_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)753 static ssize_t s_exlevel_vinst_store(struct device *dev,
754 struct device_attribute *attr,
755 const char *buf, size_t size)
756 {
757 unsigned long val;
758 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
759 struct etmv4_config *config = &drvdata->config;
760
761 if (kstrtoul(buf, 16, &val))
762 return -EINVAL;
763
764 spin_lock(&drvdata->spinlock);
765 /* clear all EXLEVEL_S bits */
766 config->vinst_ctrl &= ~(TRCVICTLR_EXLEVEL_S_MASK);
767 /* enable instruction tracing for corresponding exception level */
768 val &= drvdata->s_ex_level;
769 config->vinst_ctrl |= (val << TRCVICTLR_EXLEVEL_S_SHIFT);
770 spin_unlock(&drvdata->spinlock);
771 return size;
772 }
773 static DEVICE_ATTR_RW(s_exlevel_vinst);
774
ns_exlevel_vinst_show(struct device * dev,struct device_attribute * attr,char * buf)775 static ssize_t ns_exlevel_vinst_show(struct device *dev,
776 struct device_attribute *attr,
777 char *buf)
778 {
779 unsigned long val;
780 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
781 struct etmv4_config *config = &drvdata->config;
782
783 /* EXLEVEL_NS, bits[23:20] */
784 val = (config->vinst_ctrl & TRCVICTLR_EXLEVEL_NS_MASK) >> TRCVICTLR_EXLEVEL_NS_SHIFT;
785 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
786 }
787
ns_exlevel_vinst_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)788 static ssize_t ns_exlevel_vinst_store(struct device *dev,
789 struct device_attribute *attr,
790 const char *buf, size_t size)
791 {
792 unsigned long val;
793 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
794 struct etmv4_config *config = &drvdata->config;
795
796 if (kstrtoul(buf, 16, &val))
797 return -EINVAL;
798
799 spin_lock(&drvdata->spinlock);
800 /* clear EXLEVEL_NS bits */
801 config->vinst_ctrl &= ~(TRCVICTLR_EXLEVEL_NS_MASK);
802 /* enable instruction tracing for corresponding exception level */
803 val &= drvdata->ns_ex_level;
804 config->vinst_ctrl |= (val << TRCVICTLR_EXLEVEL_NS_SHIFT);
805 spin_unlock(&drvdata->spinlock);
806 return size;
807 }
808 static DEVICE_ATTR_RW(ns_exlevel_vinst);
809
addr_idx_show(struct device * dev,struct device_attribute * attr,char * buf)810 static ssize_t addr_idx_show(struct device *dev,
811 struct device_attribute *attr,
812 char *buf)
813 {
814 unsigned long val;
815 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
816 struct etmv4_config *config = &drvdata->config;
817
818 val = config->addr_idx;
819 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
820 }
821
addr_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)822 static ssize_t addr_idx_store(struct device *dev,
823 struct device_attribute *attr,
824 const char *buf, size_t size)
825 {
826 unsigned long val;
827 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
828 struct etmv4_config *config = &drvdata->config;
829
830 if (kstrtoul(buf, 16, &val))
831 return -EINVAL;
832 if (val >= drvdata->nr_addr_cmp * 2)
833 return -EINVAL;
834
835 /*
836 * Use spinlock to ensure index doesn't change while it gets
837 * dereferenced multiple times within a spinlock block elsewhere.
838 */
839 spin_lock(&drvdata->spinlock);
840 config->addr_idx = val;
841 spin_unlock(&drvdata->spinlock);
842 return size;
843 }
844 static DEVICE_ATTR_RW(addr_idx);
845
addr_instdatatype_show(struct device * dev,struct device_attribute * attr,char * buf)846 static ssize_t addr_instdatatype_show(struct device *dev,
847 struct device_attribute *attr,
848 char *buf)
849 {
850 ssize_t len;
851 u8 val, idx;
852 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
853 struct etmv4_config *config = &drvdata->config;
854
855 spin_lock(&drvdata->spinlock);
856 idx = config->addr_idx;
857 val = BMVAL(config->addr_acc[idx], 0, 1);
858 len = scnprintf(buf, PAGE_SIZE, "%s\n",
859 val == ETM_INSTR_ADDR ? "instr" :
860 (val == ETM_DATA_LOAD_ADDR ? "data_load" :
861 (val == ETM_DATA_STORE_ADDR ? "data_store" :
862 "data_load_store")));
863 spin_unlock(&drvdata->spinlock);
864 return len;
865 }
866
addr_instdatatype_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)867 static ssize_t addr_instdatatype_store(struct device *dev,
868 struct device_attribute *attr,
869 const char *buf, size_t size)
870 {
871 u8 idx;
872 char str[20] = "";
873 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
874 struct etmv4_config *config = &drvdata->config;
875
876 if (strlen(buf) >= 20)
877 return -EINVAL;
878 if (sscanf(buf, "%s", str) != 1)
879 return -EINVAL;
880
881 spin_lock(&drvdata->spinlock);
882 idx = config->addr_idx;
883 if (!strcmp(str, "instr"))
884 /* TYPE, bits[1:0] */
885 config->addr_acc[idx] &= ~(BIT(0) | BIT(1));
886
887 spin_unlock(&drvdata->spinlock);
888 return size;
889 }
890 static DEVICE_ATTR_RW(addr_instdatatype);
891
addr_single_show(struct device * dev,struct device_attribute * attr,char * buf)892 static ssize_t addr_single_show(struct device *dev,
893 struct device_attribute *attr,
894 char *buf)
895 {
896 u8 idx;
897 unsigned long val;
898 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
899 struct etmv4_config *config = &drvdata->config;
900
901 idx = config->addr_idx;
902 spin_lock(&drvdata->spinlock);
903 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
904 config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
905 spin_unlock(&drvdata->spinlock);
906 return -EPERM;
907 }
908 val = (unsigned long)config->addr_val[idx];
909 spin_unlock(&drvdata->spinlock);
910 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
911 }
912
addr_single_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)913 static ssize_t addr_single_store(struct device *dev,
914 struct device_attribute *attr,
915 const char *buf, size_t size)
916 {
917 u8 idx;
918 unsigned long val;
919 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
920 struct etmv4_config *config = &drvdata->config;
921
922 if (kstrtoul(buf, 16, &val))
923 return -EINVAL;
924
925 spin_lock(&drvdata->spinlock);
926 idx = config->addr_idx;
927 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
928 config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
929 spin_unlock(&drvdata->spinlock);
930 return -EPERM;
931 }
932
933 config->addr_val[idx] = (u64)val;
934 config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
935 spin_unlock(&drvdata->spinlock);
936 return size;
937 }
938 static DEVICE_ATTR_RW(addr_single);
939
addr_range_show(struct device * dev,struct device_attribute * attr,char * buf)940 static ssize_t addr_range_show(struct device *dev,
941 struct device_attribute *attr,
942 char *buf)
943 {
944 u8 idx;
945 unsigned long val1, val2;
946 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
947 struct etmv4_config *config = &drvdata->config;
948
949 spin_lock(&drvdata->spinlock);
950 idx = config->addr_idx;
951 if (idx % 2 != 0) {
952 spin_unlock(&drvdata->spinlock);
953 return -EPERM;
954 }
955 if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
956 config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
957 (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
958 config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
959 spin_unlock(&drvdata->spinlock);
960 return -EPERM;
961 }
962
963 val1 = (unsigned long)config->addr_val[idx];
964 val2 = (unsigned long)config->addr_val[idx + 1];
965 spin_unlock(&drvdata->spinlock);
966 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
967 }
968
addr_range_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)969 static ssize_t addr_range_store(struct device *dev,
970 struct device_attribute *attr,
971 const char *buf, size_t size)
972 {
973 u8 idx;
974 unsigned long val1, val2;
975 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
976 struct etmv4_config *config = &drvdata->config;
977 int elements, exclude;
978
979 elements = sscanf(buf, "%lx %lx %x", &val1, &val2, &exclude);
980
981 /* exclude is optional, but need at least two parameter */
982 if (elements < 2)
983 return -EINVAL;
984 /* lower address comparator cannot have a higher address value */
985 if (val1 > val2)
986 return -EINVAL;
987
988 spin_lock(&drvdata->spinlock);
989 idx = config->addr_idx;
990 if (idx % 2 != 0) {
991 spin_unlock(&drvdata->spinlock);
992 return -EPERM;
993 }
994
995 if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
996 config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
997 (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
998 config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
999 spin_unlock(&drvdata->spinlock);
1000 return -EPERM;
1001 }
1002
1003 config->addr_val[idx] = (u64)val1;
1004 config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
1005 config->addr_val[idx + 1] = (u64)val2;
1006 config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
1007 /*
1008 * Program include or exclude control bits for vinst or vdata
1009 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
1010 * use supplied value, or default to bit set in 'mode'
1011 */
1012 if (elements != 3)
1013 exclude = config->mode & ETM_MODE_EXCLUDE;
1014 etm4_set_mode_exclude(drvdata, exclude ? true : false);
1015
1016 spin_unlock(&drvdata->spinlock);
1017 return size;
1018 }
1019 static DEVICE_ATTR_RW(addr_range);
1020
addr_start_show(struct device * dev,struct device_attribute * attr,char * buf)1021 static ssize_t addr_start_show(struct device *dev,
1022 struct device_attribute *attr,
1023 char *buf)
1024 {
1025 u8 idx;
1026 unsigned long val;
1027 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1028 struct etmv4_config *config = &drvdata->config;
1029
1030 spin_lock(&drvdata->spinlock);
1031 idx = config->addr_idx;
1032
1033 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1034 config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1035 spin_unlock(&drvdata->spinlock);
1036 return -EPERM;
1037 }
1038
1039 val = (unsigned long)config->addr_val[idx];
1040 spin_unlock(&drvdata->spinlock);
1041 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1042 }
1043
addr_start_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1044 static ssize_t addr_start_store(struct device *dev,
1045 struct device_attribute *attr,
1046 const char *buf, size_t size)
1047 {
1048 u8 idx;
1049 unsigned long val;
1050 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1051 struct etmv4_config *config = &drvdata->config;
1052
1053 if (kstrtoul(buf, 16, &val))
1054 return -EINVAL;
1055
1056 spin_lock(&drvdata->spinlock);
1057 idx = config->addr_idx;
1058 if (!drvdata->nr_addr_cmp) {
1059 spin_unlock(&drvdata->spinlock);
1060 return -EINVAL;
1061 }
1062 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1063 config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1064 spin_unlock(&drvdata->spinlock);
1065 return -EPERM;
1066 }
1067
1068 config->addr_val[idx] = (u64)val;
1069 config->addr_type[idx] = ETM_ADDR_TYPE_START;
1070 config->vissctlr |= BIT(idx);
1071 spin_unlock(&drvdata->spinlock);
1072 return size;
1073 }
1074 static DEVICE_ATTR_RW(addr_start);
1075
addr_stop_show(struct device * dev,struct device_attribute * attr,char * buf)1076 static ssize_t addr_stop_show(struct device *dev,
1077 struct device_attribute *attr,
1078 char *buf)
1079 {
1080 u8 idx;
1081 unsigned long val;
1082 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1083 struct etmv4_config *config = &drvdata->config;
1084
1085 spin_lock(&drvdata->spinlock);
1086 idx = config->addr_idx;
1087
1088 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1089 config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1090 spin_unlock(&drvdata->spinlock);
1091 return -EPERM;
1092 }
1093
1094 val = (unsigned long)config->addr_val[idx];
1095 spin_unlock(&drvdata->spinlock);
1096 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1097 }
1098
addr_stop_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1099 static ssize_t addr_stop_store(struct device *dev,
1100 struct device_attribute *attr,
1101 const char *buf, size_t size)
1102 {
1103 u8 idx;
1104 unsigned long val;
1105 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1106 struct etmv4_config *config = &drvdata->config;
1107
1108 if (kstrtoul(buf, 16, &val))
1109 return -EINVAL;
1110
1111 spin_lock(&drvdata->spinlock);
1112 idx = config->addr_idx;
1113 if (!drvdata->nr_addr_cmp) {
1114 spin_unlock(&drvdata->spinlock);
1115 return -EINVAL;
1116 }
1117 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1118 config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1119 spin_unlock(&drvdata->spinlock);
1120 return -EPERM;
1121 }
1122
1123 config->addr_val[idx] = (u64)val;
1124 config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
1125 config->vissctlr |= BIT(idx + 16);
1126 spin_unlock(&drvdata->spinlock);
1127 return size;
1128 }
1129 static DEVICE_ATTR_RW(addr_stop);
1130
addr_ctxtype_show(struct device * dev,struct device_attribute * attr,char * buf)1131 static ssize_t addr_ctxtype_show(struct device *dev,
1132 struct device_attribute *attr,
1133 char *buf)
1134 {
1135 ssize_t len;
1136 u8 idx, val;
1137 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1138 struct etmv4_config *config = &drvdata->config;
1139
1140 spin_lock(&drvdata->spinlock);
1141 idx = config->addr_idx;
1142 /* CONTEXTTYPE, bits[3:2] */
1143 val = BMVAL(config->addr_acc[idx], 2, 3);
1144 len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
1145 (val == ETM_CTX_CTXID ? "ctxid" :
1146 (val == ETM_CTX_VMID ? "vmid" : "all")));
1147 spin_unlock(&drvdata->spinlock);
1148 return len;
1149 }
1150
addr_ctxtype_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1151 static ssize_t addr_ctxtype_store(struct device *dev,
1152 struct device_attribute *attr,
1153 const char *buf, size_t size)
1154 {
1155 u8 idx;
1156 char str[10] = "";
1157 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1158 struct etmv4_config *config = &drvdata->config;
1159
1160 if (strlen(buf) >= 10)
1161 return -EINVAL;
1162 if (sscanf(buf, "%s", str) != 1)
1163 return -EINVAL;
1164
1165 spin_lock(&drvdata->spinlock);
1166 idx = config->addr_idx;
1167 if (!strcmp(str, "none"))
1168 /* start by clearing context type bits */
1169 config->addr_acc[idx] &= ~(BIT(2) | BIT(3));
1170 else if (!strcmp(str, "ctxid")) {
1171 /* 0b01 The trace unit performs a Context ID */
1172 if (drvdata->numcidc) {
1173 config->addr_acc[idx] |= BIT(2);
1174 config->addr_acc[idx] &= ~BIT(3);
1175 }
1176 } else if (!strcmp(str, "vmid")) {
1177 /* 0b10 The trace unit performs a VMID */
1178 if (drvdata->numvmidc) {
1179 config->addr_acc[idx] &= ~BIT(2);
1180 config->addr_acc[idx] |= BIT(3);
1181 }
1182 } else if (!strcmp(str, "all")) {
1183 /*
1184 * 0b11 The trace unit performs a Context ID
1185 * comparison and a VMID
1186 */
1187 if (drvdata->numcidc)
1188 config->addr_acc[idx] |= BIT(2);
1189 if (drvdata->numvmidc)
1190 config->addr_acc[idx] |= BIT(3);
1191 }
1192 spin_unlock(&drvdata->spinlock);
1193 return size;
1194 }
1195 static DEVICE_ATTR_RW(addr_ctxtype);
1196
addr_context_show(struct device * dev,struct device_attribute * attr,char * buf)1197 static ssize_t addr_context_show(struct device *dev,
1198 struct device_attribute *attr,
1199 char *buf)
1200 {
1201 u8 idx;
1202 unsigned long val;
1203 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1204 struct etmv4_config *config = &drvdata->config;
1205
1206 spin_lock(&drvdata->spinlock);
1207 idx = config->addr_idx;
1208 /* context ID comparator bits[6:4] */
1209 val = BMVAL(config->addr_acc[idx], 4, 6);
1210 spin_unlock(&drvdata->spinlock);
1211 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1212 }
1213
addr_context_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1214 static ssize_t addr_context_store(struct device *dev,
1215 struct device_attribute *attr,
1216 const char *buf, size_t size)
1217 {
1218 u8 idx;
1219 unsigned long val;
1220 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1221 struct etmv4_config *config = &drvdata->config;
1222
1223 if (kstrtoul(buf, 16, &val))
1224 return -EINVAL;
1225 if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
1226 return -EINVAL;
1227 if (val >= (drvdata->numcidc >= drvdata->numvmidc ?
1228 drvdata->numcidc : drvdata->numvmidc))
1229 return -EINVAL;
1230
1231 spin_lock(&drvdata->spinlock);
1232 idx = config->addr_idx;
1233 /* clear context ID comparator bits[6:4] */
1234 config->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
1235 config->addr_acc[idx] |= (val << 4);
1236 spin_unlock(&drvdata->spinlock);
1237 return size;
1238 }
1239 static DEVICE_ATTR_RW(addr_context);
1240
addr_exlevel_s_ns_show(struct device * dev,struct device_attribute * attr,char * buf)1241 static ssize_t addr_exlevel_s_ns_show(struct device *dev,
1242 struct device_attribute *attr,
1243 char *buf)
1244 {
1245 u8 idx;
1246 unsigned long val;
1247 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1248 struct etmv4_config *config = &drvdata->config;
1249
1250 spin_lock(&drvdata->spinlock);
1251 idx = config->addr_idx;
1252 val = BMVAL(config->addr_acc[idx], 8, 14);
1253 spin_unlock(&drvdata->spinlock);
1254 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1255 }
1256
addr_exlevel_s_ns_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1257 static ssize_t addr_exlevel_s_ns_store(struct device *dev,
1258 struct device_attribute *attr,
1259 const char *buf, size_t size)
1260 {
1261 u8 idx;
1262 unsigned long val;
1263 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1264 struct etmv4_config *config = &drvdata->config;
1265
1266 if (kstrtoul(buf, 0, &val))
1267 return -EINVAL;
1268
1269 if (val & ~((GENMASK(14, 8) >> 8)))
1270 return -EINVAL;
1271
1272 spin_lock(&drvdata->spinlock);
1273 idx = config->addr_idx;
1274 /* clear Exlevel_ns & Exlevel_s bits[14:12, 11:8], bit[15] is res0 */
1275 config->addr_acc[idx] &= ~(GENMASK(14, 8));
1276 config->addr_acc[idx] |= (val << 8);
1277 spin_unlock(&drvdata->spinlock);
1278 return size;
1279 }
1280 static DEVICE_ATTR_RW(addr_exlevel_s_ns);
1281
1282 static const char * const addr_type_names[] = {
1283 "unused",
1284 "single",
1285 "range",
1286 "start",
1287 "stop"
1288 };
1289
addr_cmp_view_show(struct device * dev,struct device_attribute * attr,char * buf)1290 static ssize_t addr_cmp_view_show(struct device *dev,
1291 struct device_attribute *attr, char *buf)
1292 {
1293 u8 idx, addr_type;
1294 unsigned long addr_v, addr_v2, addr_ctrl;
1295 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1296 struct etmv4_config *config = &drvdata->config;
1297 int size = 0;
1298 bool exclude = false;
1299
1300 spin_lock(&drvdata->spinlock);
1301 idx = config->addr_idx;
1302 addr_v = config->addr_val[idx];
1303 addr_ctrl = config->addr_acc[idx];
1304 addr_type = config->addr_type[idx];
1305 if (addr_type == ETM_ADDR_TYPE_RANGE) {
1306 if (idx & 0x1) {
1307 idx -= 1;
1308 addr_v2 = addr_v;
1309 addr_v = config->addr_val[idx];
1310 } else {
1311 addr_v2 = config->addr_val[idx + 1];
1312 }
1313 exclude = config->viiectlr & BIT(idx / 2 + 16);
1314 }
1315 spin_unlock(&drvdata->spinlock);
1316 if (addr_type) {
1317 size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] %s %#lx", idx,
1318 addr_type_names[addr_type], addr_v);
1319 if (addr_type == ETM_ADDR_TYPE_RANGE) {
1320 size += scnprintf(buf + size, PAGE_SIZE - size,
1321 " %#lx %s", addr_v2,
1322 exclude ? "exclude" : "include");
1323 }
1324 size += scnprintf(buf + size, PAGE_SIZE - size,
1325 " ctrl(%#lx)\n", addr_ctrl);
1326 } else {
1327 size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] unused\n", idx);
1328 }
1329 return size;
1330 }
1331 static DEVICE_ATTR_RO(addr_cmp_view);
1332
vinst_pe_cmp_start_stop_show(struct device * dev,struct device_attribute * attr,char * buf)1333 static ssize_t vinst_pe_cmp_start_stop_show(struct device *dev,
1334 struct device_attribute *attr,
1335 char *buf)
1336 {
1337 unsigned long val;
1338 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1339 struct etmv4_config *config = &drvdata->config;
1340
1341 if (!drvdata->nr_pe_cmp)
1342 return -EINVAL;
1343 val = config->vipcssctlr;
1344 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1345 }
vinst_pe_cmp_start_stop_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1346 static ssize_t vinst_pe_cmp_start_stop_store(struct device *dev,
1347 struct device_attribute *attr,
1348 const char *buf, size_t size)
1349 {
1350 unsigned long val;
1351 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1352 struct etmv4_config *config = &drvdata->config;
1353
1354 if (kstrtoul(buf, 16, &val))
1355 return -EINVAL;
1356 if (!drvdata->nr_pe_cmp)
1357 return -EINVAL;
1358
1359 spin_lock(&drvdata->spinlock);
1360 config->vipcssctlr = val;
1361 spin_unlock(&drvdata->spinlock);
1362 return size;
1363 }
1364 static DEVICE_ATTR_RW(vinst_pe_cmp_start_stop);
1365
seq_idx_show(struct device * dev,struct device_attribute * attr,char * buf)1366 static ssize_t seq_idx_show(struct device *dev,
1367 struct device_attribute *attr,
1368 char *buf)
1369 {
1370 unsigned long val;
1371 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1372 struct etmv4_config *config = &drvdata->config;
1373
1374 val = config->seq_idx;
1375 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1376 }
1377
seq_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1378 static ssize_t seq_idx_store(struct device *dev,
1379 struct device_attribute *attr,
1380 const char *buf, size_t size)
1381 {
1382 unsigned long val;
1383 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1384 struct etmv4_config *config = &drvdata->config;
1385
1386 if (kstrtoul(buf, 16, &val))
1387 return -EINVAL;
1388 if (val >= drvdata->nrseqstate - 1)
1389 return -EINVAL;
1390
1391 /*
1392 * Use spinlock to ensure index doesn't change while it gets
1393 * dereferenced multiple times within a spinlock block elsewhere.
1394 */
1395 spin_lock(&drvdata->spinlock);
1396 config->seq_idx = val;
1397 spin_unlock(&drvdata->spinlock);
1398 return size;
1399 }
1400 static DEVICE_ATTR_RW(seq_idx);
1401
seq_state_show(struct device * dev,struct device_attribute * attr,char * buf)1402 static ssize_t seq_state_show(struct device *dev,
1403 struct device_attribute *attr,
1404 char *buf)
1405 {
1406 unsigned long val;
1407 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1408 struct etmv4_config *config = &drvdata->config;
1409
1410 val = config->seq_state;
1411 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1412 }
1413
seq_state_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1414 static ssize_t seq_state_store(struct device *dev,
1415 struct device_attribute *attr,
1416 const char *buf, size_t size)
1417 {
1418 unsigned long val;
1419 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1420 struct etmv4_config *config = &drvdata->config;
1421
1422 if (kstrtoul(buf, 16, &val))
1423 return -EINVAL;
1424 if (val >= drvdata->nrseqstate)
1425 return -EINVAL;
1426
1427 config->seq_state = val;
1428 return size;
1429 }
1430 static DEVICE_ATTR_RW(seq_state);
1431
seq_event_show(struct device * dev,struct device_attribute * attr,char * buf)1432 static ssize_t seq_event_show(struct device *dev,
1433 struct device_attribute *attr,
1434 char *buf)
1435 {
1436 u8 idx;
1437 unsigned long val;
1438 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1439 struct etmv4_config *config = &drvdata->config;
1440
1441 spin_lock(&drvdata->spinlock);
1442 idx = config->seq_idx;
1443 val = config->seq_ctrl[idx];
1444 spin_unlock(&drvdata->spinlock);
1445 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1446 }
1447
seq_event_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1448 static ssize_t seq_event_store(struct device *dev,
1449 struct device_attribute *attr,
1450 const char *buf, size_t size)
1451 {
1452 u8 idx;
1453 unsigned long val;
1454 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1455 struct etmv4_config *config = &drvdata->config;
1456
1457 if (kstrtoul(buf, 16, &val))
1458 return -EINVAL;
1459
1460 spin_lock(&drvdata->spinlock);
1461 idx = config->seq_idx;
1462 /* Seq control has two masks B[15:8] F[7:0] */
1463 config->seq_ctrl[idx] = val & 0xFFFF;
1464 spin_unlock(&drvdata->spinlock);
1465 return size;
1466 }
1467 static DEVICE_ATTR_RW(seq_event);
1468
seq_reset_event_show(struct device * dev,struct device_attribute * attr,char * buf)1469 static ssize_t seq_reset_event_show(struct device *dev,
1470 struct device_attribute *attr,
1471 char *buf)
1472 {
1473 unsigned long val;
1474 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1475 struct etmv4_config *config = &drvdata->config;
1476
1477 val = config->seq_rst;
1478 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1479 }
1480
seq_reset_event_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1481 static ssize_t seq_reset_event_store(struct device *dev,
1482 struct device_attribute *attr,
1483 const char *buf, size_t size)
1484 {
1485 unsigned long val;
1486 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1487 struct etmv4_config *config = &drvdata->config;
1488
1489 if (kstrtoul(buf, 16, &val))
1490 return -EINVAL;
1491 if (!(drvdata->nrseqstate))
1492 return -EINVAL;
1493
1494 config->seq_rst = val & ETMv4_EVENT_MASK;
1495 return size;
1496 }
1497 static DEVICE_ATTR_RW(seq_reset_event);
1498
cntr_idx_show(struct device * dev,struct device_attribute * attr,char * buf)1499 static ssize_t cntr_idx_show(struct device *dev,
1500 struct device_attribute *attr,
1501 char *buf)
1502 {
1503 unsigned long val;
1504 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1505 struct etmv4_config *config = &drvdata->config;
1506
1507 val = config->cntr_idx;
1508 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1509 }
1510
cntr_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1511 static ssize_t cntr_idx_store(struct device *dev,
1512 struct device_attribute *attr,
1513 const char *buf, size_t size)
1514 {
1515 unsigned long val;
1516 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1517 struct etmv4_config *config = &drvdata->config;
1518
1519 if (kstrtoul(buf, 16, &val))
1520 return -EINVAL;
1521 if (val >= drvdata->nr_cntr)
1522 return -EINVAL;
1523
1524 /*
1525 * Use spinlock to ensure index doesn't change while it gets
1526 * dereferenced multiple times within a spinlock block elsewhere.
1527 */
1528 spin_lock(&drvdata->spinlock);
1529 config->cntr_idx = val;
1530 spin_unlock(&drvdata->spinlock);
1531 return size;
1532 }
1533 static DEVICE_ATTR_RW(cntr_idx);
1534
cntrldvr_show(struct device * dev,struct device_attribute * attr,char * buf)1535 static ssize_t cntrldvr_show(struct device *dev,
1536 struct device_attribute *attr,
1537 char *buf)
1538 {
1539 u8 idx;
1540 unsigned long val;
1541 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1542 struct etmv4_config *config = &drvdata->config;
1543
1544 spin_lock(&drvdata->spinlock);
1545 idx = config->cntr_idx;
1546 val = config->cntrldvr[idx];
1547 spin_unlock(&drvdata->spinlock);
1548 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1549 }
1550
cntrldvr_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1551 static ssize_t cntrldvr_store(struct device *dev,
1552 struct device_attribute *attr,
1553 const char *buf, size_t size)
1554 {
1555 u8 idx;
1556 unsigned long val;
1557 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1558 struct etmv4_config *config = &drvdata->config;
1559
1560 if (kstrtoul(buf, 16, &val))
1561 return -EINVAL;
1562 if (val > ETM_CNTR_MAX_VAL)
1563 return -EINVAL;
1564
1565 spin_lock(&drvdata->spinlock);
1566 idx = config->cntr_idx;
1567 config->cntrldvr[idx] = val;
1568 spin_unlock(&drvdata->spinlock);
1569 return size;
1570 }
1571 static DEVICE_ATTR_RW(cntrldvr);
1572
cntr_val_show(struct device * dev,struct device_attribute * attr,char * buf)1573 static ssize_t cntr_val_show(struct device *dev,
1574 struct device_attribute *attr,
1575 char *buf)
1576 {
1577 u8 idx;
1578 unsigned long val;
1579 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1580 struct etmv4_config *config = &drvdata->config;
1581
1582 spin_lock(&drvdata->spinlock);
1583 idx = config->cntr_idx;
1584 val = config->cntr_val[idx];
1585 spin_unlock(&drvdata->spinlock);
1586 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1587 }
1588
cntr_val_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1589 static ssize_t cntr_val_store(struct device *dev,
1590 struct device_attribute *attr,
1591 const char *buf, size_t size)
1592 {
1593 u8 idx;
1594 unsigned long val;
1595 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1596 struct etmv4_config *config = &drvdata->config;
1597
1598 if (kstrtoul(buf, 16, &val))
1599 return -EINVAL;
1600 if (val > ETM_CNTR_MAX_VAL)
1601 return -EINVAL;
1602
1603 spin_lock(&drvdata->spinlock);
1604 idx = config->cntr_idx;
1605 config->cntr_val[idx] = val;
1606 spin_unlock(&drvdata->spinlock);
1607 return size;
1608 }
1609 static DEVICE_ATTR_RW(cntr_val);
1610
cntr_ctrl_show(struct device * dev,struct device_attribute * attr,char * buf)1611 static ssize_t cntr_ctrl_show(struct device *dev,
1612 struct device_attribute *attr,
1613 char *buf)
1614 {
1615 u8 idx;
1616 unsigned long val;
1617 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1618 struct etmv4_config *config = &drvdata->config;
1619
1620 spin_lock(&drvdata->spinlock);
1621 idx = config->cntr_idx;
1622 val = config->cntr_ctrl[idx];
1623 spin_unlock(&drvdata->spinlock);
1624 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1625 }
1626
cntr_ctrl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1627 static ssize_t cntr_ctrl_store(struct device *dev,
1628 struct device_attribute *attr,
1629 const char *buf, size_t size)
1630 {
1631 u8 idx;
1632 unsigned long val;
1633 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1634 struct etmv4_config *config = &drvdata->config;
1635
1636 if (kstrtoul(buf, 16, &val))
1637 return -EINVAL;
1638
1639 spin_lock(&drvdata->spinlock);
1640 idx = config->cntr_idx;
1641 config->cntr_ctrl[idx] = val;
1642 spin_unlock(&drvdata->spinlock);
1643 return size;
1644 }
1645 static DEVICE_ATTR_RW(cntr_ctrl);
1646
res_idx_show(struct device * dev,struct device_attribute * attr,char * buf)1647 static ssize_t res_idx_show(struct device *dev,
1648 struct device_attribute *attr,
1649 char *buf)
1650 {
1651 unsigned long val;
1652 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1653 struct etmv4_config *config = &drvdata->config;
1654
1655 val = config->res_idx;
1656 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1657 }
1658
res_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1659 static ssize_t res_idx_store(struct device *dev,
1660 struct device_attribute *attr,
1661 const char *buf, size_t size)
1662 {
1663 unsigned long val;
1664 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1665 struct etmv4_config *config = &drvdata->config;
1666
1667 if (kstrtoul(buf, 16, &val))
1668 return -EINVAL;
1669 /*
1670 * Resource selector pair 0 is always implemented and reserved,
1671 * namely an idx with 0 and 1 is illegal.
1672 */
1673 if ((val < 2) || (val >= 2 * drvdata->nr_resource))
1674 return -EINVAL;
1675
1676 /*
1677 * Use spinlock to ensure index doesn't change while it gets
1678 * dereferenced multiple times within a spinlock block elsewhere.
1679 */
1680 spin_lock(&drvdata->spinlock);
1681 config->res_idx = val;
1682 spin_unlock(&drvdata->spinlock);
1683 return size;
1684 }
1685 static DEVICE_ATTR_RW(res_idx);
1686
res_ctrl_show(struct device * dev,struct device_attribute * attr,char * buf)1687 static ssize_t res_ctrl_show(struct device *dev,
1688 struct device_attribute *attr,
1689 char *buf)
1690 {
1691 u8 idx;
1692 unsigned long val;
1693 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1694 struct etmv4_config *config = &drvdata->config;
1695
1696 spin_lock(&drvdata->spinlock);
1697 idx = config->res_idx;
1698 val = config->res_ctrl[idx];
1699 spin_unlock(&drvdata->spinlock);
1700 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1701 }
1702
res_ctrl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1703 static ssize_t res_ctrl_store(struct device *dev,
1704 struct device_attribute *attr,
1705 const char *buf, size_t size)
1706 {
1707 u8 idx;
1708 unsigned long val;
1709 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1710 struct etmv4_config *config = &drvdata->config;
1711
1712 if (kstrtoul(buf, 16, &val))
1713 return -EINVAL;
1714
1715 spin_lock(&drvdata->spinlock);
1716 idx = config->res_idx;
1717 /* For odd idx pair inversal bit is RES0 */
1718 if (idx % 2 != 0)
1719 /* PAIRINV, bit[21] */
1720 val &= ~BIT(21);
1721 config->res_ctrl[idx] = val & GENMASK(21, 0);
1722 spin_unlock(&drvdata->spinlock);
1723 return size;
1724 }
1725 static DEVICE_ATTR_RW(res_ctrl);
1726
sshot_idx_show(struct device * dev,struct device_attribute * attr,char * buf)1727 static ssize_t sshot_idx_show(struct device *dev,
1728 struct device_attribute *attr, char *buf)
1729 {
1730 unsigned long val;
1731 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1732 struct etmv4_config *config = &drvdata->config;
1733
1734 val = config->ss_idx;
1735 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1736 }
1737
sshot_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1738 static ssize_t sshot_idx_store(struct device *dev,
1739 struct device_attribute *attr,
1740 const char *buf, size_t size)
1741 {
1742 unsigned long val;
1743 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1744 struct etmv4_config *config = &drvdata->config;
1745
1746 if (kstrtoul(buf, 16, &val))
1747 return -EINVAL;
1748 if (val >= drvdata->nr_ss_cmp)
1749 return -EINVAL;
1750
1751 spin_lock(&drvdata->spinlock);
1752 config->ss_idx = val;
1753 spin_unlock(&drvdata->spinlock);
1754 return size;
1755 }
1756 static DEVICE_ATTR_RW(sshot_idx);
1757
sshot_ctrl_show(struct device * dev,struct device_attribute * attr,char * buf)1758 static ssize_t sshot_ctrl_show(struct device *dev,
1759 struct device_attribute *attr,
1760 char *buf)
1761 {
1762 unsigned long val;
1763 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1764 struct etmv4_config *config = &drvdata->config;
1765
1766 spin_lock(&drvdata->spinlock);
1767 val = config->ss_ctrl[config->ss_idx];
1768 spin_unlock(&drvdata->spinlock);
1769 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1770 }
1771
sshot_ctrl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1772 static ssize_t sshot_ctrl_store(struct device *dev,
1773 struct device_attribute *attr,
1774 const char *buf, size_t size)
1775 {
1776 u8 idx;
1777 unsigned long val;
1778 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1779 struct etmv4_config *config = &drvdata->config;
1780
1781 if (kstrtoul(buf, 16, &val))
1782 return -EINVAL;
1783
1784 spin_lock(&drvdata->spinlock);
1785 idx = config->ss_idx;
1786 config->ss_ctrl[idx] = val & GENMASK(24, 0);
1787 /* must clear bit 31 in related status register on programming */
1788 config->ss_status[idx] &= ~BIT(31);
1789 spin_unlock(&drvdata->spinlock);
1790 return size;
1791 }
1792 static DEVICE_ATTR_RW(sshot_ctrl);
1793
sshot_status_show(struct device * dev,struct device_attribute * attr,char * buf)1794 static ssize_t sshot_status_show(struct device *dev,
1795 struct device_attribute *attr, char *buf)
1796 {
1797 unsigned long val;
1798 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1799 struct etmv4_config *config = &drvdata->config;
1800
1801 spin_lock(&drvdata->spinlock);
1802 val = config->ss_status[config->ss_idx];
1803 spin_unlock(&drvdata->spinlock);
1804 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1805 }
1806 static DEVICE_ATTR_RO(sshot_status);
1807
sshot_pe_ctrl_show(struct device * dev,struct device_attribute * attr,char * buf)1808 static ssize_t sshot_pe_ctrl_show(struct device *dev,
1809 struct device_attribute *attr,
1810 char *buf)
1811 {
1812 unsigned long val;
1813 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1814 struct etmv4_config *config = &drvdata->config;
1815
1816 spin_lock(&drvdata->spinlock);
1817 val = config->ss_pe_cmp[config->ss_idx];
1818 spin_unlock(&drvdata->spinlock);
1819 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1820 }
1821
sshot_pe_ctrl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1822 static ssize_t sshot_pe_ctrl_store(struct device *dev,
1823 struct device_attribute *attr,
1824 const char *buf, size_t size)
1825 {
1826 u8 idx;
1827 unsigned long val;
1828 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1829 struct etmv4_config *config = &drvdata->config;
1830
1831 if (kstrtoul(buf, 16, &val))
1832 return -EINVAL;
1833
1834 spin_lock(&drvdata->spinlock);
1835 idx = config->ss_idx;
1836 config->ss_pe_cmp[idx] = val & GENMASK(7, 0);
1837 /* must clear bit 31 in related status register on programming */
1838 config->ss_status[idx] &= ~BIT(31);
1839 spin_unlock(&drvdata->spinlock);
1840 return size;
1841 }
1842 static DEVICE_ATTR_RW(sshot_pe_ctrl);
1843
ctxid_idx_show(struct device * dev,struct device_attribute * attr,char * buf)1844 static ssize_t ctxid_idx_show(struct device *dev,
1845 struct device_attribute *attr,
1846 char *buf)
1847 {
1848 unsigned long val;
1849 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1850 struct etmv4_config *config = &drvdata->config;
1851
1852 val = config->ctxid_idx;
1853 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1854 }
1855
ctxid_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1856 static ssize_t ctxid_idx_store(struct device *dev,
1857 struct device_attribute *attr,
1858 const char *buf, size_t size)
1859 {
1860 unsigned long val;
1861 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1862 struct etmv4_config *config = &drvdata->config;
1863
1864 if (kstrtoul(buf, 16, &val))
1865 return -EINVAL;
1866 if (val >= drvdata->numcidc)
1867 return -EINVAL;
1868
1869 /*
1870 * Use spinlock to ensure index doesn't change while it gets
1871 * dereferenced multiple times within a spinlock block elsewhere.
1872 */
1873 spin_lock(&drvdata->spinlock);
1874 config->ctxid_idx = val;
1875 spin_unlock(&drvdata->spinlock);
1876 return size;
1877 }
1878 static DEVICE_ATTR_RW(ctxid_idx);
1879
ctxid_pid_show(struct device * dev,struct device_attribute * attr,char * buf)1880 static ssize_t ctxid_pid_show(struct device *dev,
1881 struct device_attribute *attr,
1882 char *buf)
1883 {
1884 u8 idx;
1885 unsigned long val;
1886 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1887 struct etmv4_config *config = &drvdata->config;
1888
1889 /*
1890 * Don't use contextID tracing if coming from a PID namespace. See
1891 * comment in ctxid_pid_store().
1892 */
1893 if (task_active_pid_ns(current) != &init_pid_ns)
1894 return -EINVAL;
1895
1896 spin_lock(&drvdata->spinlock);
1897 idx = config->ctxid_idx;
1898 val = (unsigned long)config->ctxid_pid[idx];
1899 spin_unlock(&drvdata->spinlock);
1900 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1901 }
1902
ctxid_pid_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1903 static ssize_t ctxid_pid_store(struct device *dev,
1904 struct device_attribute *attr,
1905 const char *buf, size_t size)
1906 {
1907 u8 idx;
1908 unsigned long pid;
1909 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1910 struct etmv4_config *config = &drvdata->config;
1911
1912 /*
1913 * When contextID tracing is enabled the tracers will insert the
1914 * value found in the contextID register in the trace stream. But if
1915 * a process is in a namespace the PID of that process as seen from the
1916 * namespace won't be what the kernel sees, something that makes the
1917 * feature confusing and can potentially leak kernel only information.
1918 * As such refuse to use the feature if @current is not in the initial
1919 * PID namespace.
1920 */
1921 if (task_active_pid_ns(current) != &init_pid_ns)
1922 return -EINVAL;
1923
1924 /*
1925 * only implemented when ctxid tracing is enabled, i.e. at least one
1926 * ctxid comparator is implemented and ctxid is greater than 0 bits
1927 * in length
1928 */
1929 if (!drvdata->ctxid_size || !drvdata->numcidc)
1930 return -EINVAL;
1931 if (kstrtoul(buf, 16, &pid))
1932 return -EINVAL;
1933
1934 spin_lock(&drvdata->spinlock);
1935 idx = config->ctxid_idx;
1936 config->ctxid_pid[idx] = (u64)pid;
1937 spin_unlock(&drvdata->spinlock);
1938 return size;
1939 }
1940 static DEVICE_ATTR_RW(ctxid_pid);
1941
ctxid_masks_show(struct device * dev,struct device_attribute * attr,char * buf)1942 static ssize_t ctxid_masks_show(struct device *dev,
1943 struct device_attribute *attr,
1944 char *buf)
1945 {
1946 unsigned long val1, val2;
1947 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1948 struct etmv4_config *config = &drvdata->config;
1949
1950 /*
1951 * Don't use contextID tracing if coming from a PID namespace. See
1952 * comment in ctxid_pid_store().
1953 */
1954 if (task_active_pid_ns(current) != &init_pid_ns)
1955 return -EINVAL;
1956
1957 spin_lock(&drvdata->spinlock);
1958 val1 = config->ctxid_mask0;
1959 val2 = config->ctxid_mask1;
1960 spin_unlock(&drvdata->spinlock);
1961 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1962 }
1963
ctxid_masks_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1964 static ssize_t ctxid_masks_store(struct device *dev,
1965 struct device_attribute *attr,
1966 const char *buf, size_t size)
1967 {
1968 u8 i, j, maskbyte;
1969 unsigned long val1, val2, mask;
1970 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1971 struct etmv4_config *config = &drvdata->config;
1972 int nr_inputs;
1973
1974 /*
1975 * Don't use contextID tracing if coming from a PID namespace. See
1976 * comment in ctxid_pid_store().
1977 */
1978 if (task_active_pid_ns(current) != &init_pid_ns)
1979 return -EINVAL;
1980
1981 /*
1982 * only implemented when ctxid tracing is enabled, i.e. at least one
1983 * ctxid comparator is implemented and ctxid is greater than 0 bits
1984 * in length
1985 */
1986 if (!drvdata->ctxid_size || !drvdata->numcidc)
1987 return -EINVAL;
1988 /* one mask if <= 4 comparators, two for up to 8 */
1989 nr_inputs = sscanf(buf, "%lx %lx", &val1, &val2);
1990 if ((drvdata->numcidc > 4) && (nr_inputs != 2))
1991 return -EINVAL;
1992
1993 spin_lock(&drvdata->spinlock);
1994 /*
1995 * each byte[0..3] controls mask value applied to ctxid
1996 * comparator[0..3]
1997 */
1998 switch (drvdata->numcidc) {
1999 case 0x1:
2000 /* COMP0, bits[7:0] */
2001 config->ctxid_mask0 = val1 & 0xFF;
2002 break;
2003 case 0x2:
2004 /* COMP1, bits[15:8] */
2005 config->ctxid_mask0 = val1 & 0xFFFF;
2006 break;
2007 case 0x3:
2008 /* COMP2, bits[23:16] */
2009 config->ctxid_mask0 = val1 & 0xFFFFFF;
2010 break;
2011 case 0x4:
2012 /* COMP3, bits[31:24] */
2013 config->ctxid_mask0 = val1;
2014 break;
2015 case 0x5:
2016 /* COMP4, bits[7:0] */
2017 config->ctxid_mask0 = val1;
2018 config->ctxid_mask1 = val2 & 0xFF;
2019 break;
2020 case 0x6:
2021 /* COMP5, bits[15:8] */
2022 config->ctxid_mask0 = val1;
2023 config->ctxid_mask1 = val2 & 0xFFFF;
2024 break;
2025 case 0x7:
2026 /* COMP6, bits[23:16] */
2027 config->ctxid_mask0 = val1;
2028 config->ctxid_mask1 = val2 & 0xFFFFFF;
2029 break;
2030 case 0x8:
2031 /* COMP7, bits[31:24] */
2032 config->ctxid_mask0 = val1;
2033 config->ctxid_mask1 = val2;
2034 break;
2035 default:
2036 break;
2037 }
2038 /*
2039 * If software sets a mask bit to 1, it must program relevant byte
2040 * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
2041 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
2042 * of ctxid comparator0 value (corresponding to byte 0) register.
2043 */
2044 mask = config->ctxid_mask0;
2045 for (i = 0; i < drvdata->numcidc; i++) {
2046 /* mask value of corresponding ctxid comparator */
2047 maskbyte = mask & ETMv4_EVENT_MASK;
2048 /*
2049 * each bit corresponds to a byte of respective ctxid comparator
2050 * value register
2051 */
2052 for (j = 0; j < 8; j++) {
2053 if (maskbyte & 1)
2054 config->ctxid_pid[i] &= ~(0xFFUL << (j * 8));
2055 maskbyte >>= 1;
2056 }
2057 /* Select the next ctxid comparator mask value */
2058 if (i == 3)
2059 /* ctxid comparators[4-7] */
2060 mask = config->ctxid_mask1;
2061 else
2062 mask >>= 0x8;
2063 }
2064
2065 spin_unlock(&drvdata->spinlock);
2066 return size;
2067 }
2068 static DEVICE_ATTR_RW(ctxid_masks);
2069
vmid_idx_show(struct device * dev,struct device_attribute * attr,char * buf)2070 static ssize_t vmid_idx_show(struct device *dev,
2071 struct device_attribute *attr,
2072 char *buf)
2073 {
2074 unsigned long val;
2075 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2076 struct etmv4_config *config = &drvdata->config;
2077
2078 val = config->vmid_idx;
2079 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
2080 }
2081
vmid_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)2082 static ssize_t vmid_idx_store(struct device *dev,
2083 struct device_attribute *attr,
2084 const char *buf, size_t size)
2085 {
2086 unsigned long val;
2087 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2088 struct etmv4_config *config = &drvdata->config;
2089
2090 if (kstrtoul(buf, 16, &val))
2091 return -EINVAL;
2092 if (val >= drvdata->numvmidc)
2093 return -EINVAL;
2094
2095 /*
2096 * Use spinlock to ensure index doesn't change while it gets
2097 * dereferenced multiple times within a spinlock block elsewhere.
2098 */
2099 spin_lock(&drvdata->spinlock);
2100 config->vmid_idx = val;
2101 spin_unlock(&drvdata->spinlock);
2102 return size;
2103 }
2104 static DEVICE_ATTR_RW(vmid_idx);
2105
vmid_val_show(struct device * dev,struct device_attribute * attr,char * buf)2106 static ssize_t vmid_val_show(struct device *dev,
2107 struct device_attribute *attr,
2108 char *buf)
2109 {
2110 unsigned long val;
2111 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2112 struct etmv4_config *config = &drvdata->config;
2113
2114 val = (unsigned long)config->vmid_val[config->vmid_idx];
2115 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
2116 }
2117
vmid_val_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)2118 static ssize_t vmid_val_store(struct device *dev,
2119 struct device_attribute *attr,
2120 const char *buf, size_t size)
2121 {
2122 unsigned long val;
2123 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2124 struct etmv4_config *config = &drvdata->config;
2125
2126 /*
2127 * only implemented when vmid tracing is enabled, i.e. at least one
2128 * vmid comparator is implemented and at least 8 bit vmid size
2129 */
2130 if (!drvdata->vmid_size || !drvdata->numvmidc)
2131 return -EINVAL;
2132 if (kstrtoul(buf, 16, &val))
2133 return -EINVAL;
2134
2135 spin_lock(&drvdata->spinlock);
2136 config->vmid_val[config->vmid_idx] = (u64)val;
2137 spin_unlock(&drvdata->spinlock);
2138 return size;
2139 }
2140 static DEVICE_ATTR_RW(vmid_val);
2141
vmid_masks_show(struct device * dev,struct device_attribute * attr,char * buf)2142 static ssize_t vmid_masks_show(struct device *dev,
2143 struct device_attribute *attr, char *buf)
2144 {
2145 unsigned long val1, val2;
2146 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2147 struct etmv4_config *config = &drvdata->config;
2148
2149 spin_lock(&drvdata->spinlock);
2150 val1 = config->vmid_mask0;
2151 val2 = config->vmid_mask1;
2152 spin_unlock(&drvdata->spinlock);
2153 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
2154 }
2155
vmid_masks_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)2156 static ssize_t vmid_masks_store(struct device *dev,
2157 struct device_attribute *attr,
2158 const char *buf, size_t size)
2159 {
2160 u8 i, j, maskbyte;
2161 unsigned long val1, val2, mask;
2162 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2163 struct etmv4_config *config = &drvdata->config;
2164 int nr_inputs;
2165
2166 /*
2167 * only implemented when vmid tracing is enabled, i.e. at least one
2168 * vmid comparator is implemented and at least 8 bit vmid size
2169 */
2170 if (!drvdata->vmid_size || !drvdata->numvmidc)
2171 return -EINVAL;
2172 /* one mask if <= 4 comparators, two for up to 8 */
2173 nr_inputs = sscanf(buf, "%lx %lx", &val1, &val2);
2174 if ((drvdata->numvmidc > 4) && (nr_inputs != 2))
2175 return -EINVAL;
2176
2177 spin_lock(&drvdata->spinlock);
2178
2179 /*
2180 * each byte[0..3] controls mask value applied to vmid
2181 * comparator[0..3]
2182 */
2183 switch (drvdata->numvmidc) {
2184 case 0x1:
2185 /* COMP0, bits[7:0] */
2186 config->vmid_mask0 = val1 & 0xFF;
2187 break;
2188 case 0x2:
2189 /* COMP1, bits[15:8] */
2190 config->vmid_mask0 = val1 & 0xFFFF;
2191 break;
2192 case 0x3:
2193 /* COMP2, bits[23:16] */
2194 config->vmid_mask0 = val1 & 0xFFFFFF;
2195 break;
2196 case 0x4:
2197 /* COMP3, bits[31:24] */
2198 config->vmid_mask0 = val1;
2199 break;
2200 case 0x5:
2201 /* COMP4, bits[7:0] */
2202 config->vmid_mask0 = val1;
2203 config->vmid_mask1 = val2 & 0xFF;
2204 break;
2205 case 0x6:
2206 /* COMP5, bits[15:8] */
2207 config->vmid_mask0 = val1;
2208 config->vmid_mask1 = val2 & 0xFFFF;
2209 break;
2210 case 0x7:
2211 /* COMP6, bits[23:16] */
2212 config->vmid_mask0 = val1;
2213 config->vmid_mask1 = val2 & 0xFFFFFF;
2214 break;
2215 case 0x8:
2216 /* COMP7, bits[31:24] */
2217 config->vmid_mask0 = val1;
2218 config->vmid_mask1 = val2;
2219 break;
2220 default:
2221 break;
2222 }
2223
2224 /*
2225 * If software sets a mask bit to 1, it must program relevant byte
2226 * of vmid comparator value 0x0, otherwise behavior is unpredictable.
2227 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
2228 * of vmid comparator0 value (corresponding to byte 0) register.
2229 */
2230 mask = config->vmid_mask0;
2231 for (i = 0; i < drvdata->numvmidc; i++) {
2232 /* mask value of corresponding vmid comparator */
2233 maskbyte = mask & ETMv4_EVENT_MASK;
2234 /*
2235 * each bit corresponds to a byte of respective vmid comparator
2236 * value register
2237 */
2238 for (j = 0; j < 8; j++) {
2239 if (maskbyte & 1)
2240 config->vmid_val[i] &= ~(0xFFUL << (j * 8));
2241 maskbyte >>= 1;
2242 }
2243 /* Select the next vmid comparator mask value */
2244 if (i == 3)
2245 /* vmid comparators[4-7] */
2246 mask = config->vmid_mask1;
2247 else
2248 mask >>= 0x8;
2249 }
2250 spin_unlock(&drvdata->spinlock);
2251 return size;
2252 }
2253 static DEVICE_ATTR_RW(vmid_masks);
2254
cpu_show(struct device * dev,struct device_attribute * attr,char * buf)2255 static ssize_t cpu_show(struct device *dev,
2256 struct device_attribute *attr, char *buf)
2257 {
2258 int val;
2259 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2260
2261 val = drvdata->cpu;
2262 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
2263
2264 }
2265 static DEVICE_ATTR_RO(cpu);
2266
2267 static struct attribute *coresight_etmv4_attrs[] = {
2268 &dev_attr_nr_pe_cmp.attr,
2269 &dev_attr_nr_addr_cmp.attr,
2270 &dev_attr_nr_cntr.attr,
2271 &dev_attr_nr_ext_inp.attr,
2272 &dev_attr_numcidc.attr,
2273 &dev_attr_numvmidc.attr,
2274 &dev_attr_nrseqstate.attr,
2275 &dev_attr_nr_resource.attr,
2276 &dev_attr_nr_ss_cmp.attr,
2277 &dev_attr_reset.attr,
2278 &dev_attr_mode.attr,
2279 &dev_attr_pe.attr,
2280 &dev_attr_event.attr,
2281 &dev_attr_event_instren.attr,
2282 &dev_attr_event_ts.attr,
2283 &dev_attr_syncfreq.attr,
2284 &dev_attr_cyc_threshold.attr,
2285 &dev_attr_bb_ctrl.attr,
2286 &dev_attr_event_vinst.attr,
2287 &dev_attr_s_exlevel_vinst.attr,
2288 &dev_attr_ns_exlevel_vinst.attr,
2289 &dev_attr_addr_idx.attr,
2290 &dev_attr_addr_instdatatype.attr,
2291 &dev_attr_addr_single.attr,
2292 &dev_attr_addr_range.attr,
2293 &dev_attr_addr_start.attr,
2294 &dev_attr_addr_stop.attr,
2295 &dev_attr_addr_ctxtype.attr,
2296 &dev_attr_addr_context.attr,
2297 &dev_attr_addr_exlevel_s_ns.attr,
2298 &dev_attr_addr_cmp_view.attr,
2299 &dev_attr_vinst_pe_cmp_start_stop.attr,
2300 &dev_attr_sshot_idx.attr,
2301 &dev_attr_sshot_ctrl.attr,
2302 &dev_attr_sshot_pe_ctrl.attr,
2303 &dev_attr_sshot_status.attr,
2304 &dev_attr_seq_idx.attr,
2305 &dev_attr_seq_state.attr,
2306 &dev_attr_seq_event.attr,
2307 &dev_attr_seq_reset_event.attr,
2308 &dev_attr_cntr_idx.attr,
2309 &dev_attr_cntrldvr.attr,
2310 &dev_attr_cntr_val.attr,
2311 &dev_attr_cntr_ctrl.attr,
2312 &dev_attr_res_idx.attr,
2313 &dev_attr_res_ctrl.attr,
2314 &dev_attr_ctxid_idx.attr,
2315 &dev_attr_ctxid_pid.attr,
2316 &dev_attr_ctxid_masks.attr,
2317 &dev_attr_vmid_idx.attr,
2318 &dev_attr_vmid_val.attr,
2319 &dev_attr_vmid_masks.attr,
2320 &dev_attr_cpu.attr,
2321 NULL,
2322 };
2323
2324 struct etmv4_reg {
2325 struct coresight_device *csdev;
2326 u32 offset;
2327 u32 data;
2328 };
2329
do_smp_cross_read(void * data)2330 static void do_smp_cross_read(void *data)
2331 {
2332 struct etmv4_reg *reg = data;
2333
2334 reg->data = etm4x_relaxed_read32(®->csdev->access, reg->offset);
2335 }
2336
etmv4_cross_read(const struct etmv4_drvdata * drvdata,u32 offset)2337 static u32 etmv4_cross_read(const struct etmv4_drvdata *drvdata, u32 offset)
2338 {
2339 struct etmv4_reg reg;
2340
2341 reg.offset = offset;
2342 reg.csdev = drvdata->csdev;
2343
2344 /*
2345 * smp cross call ensures the CPU will be powered up before
2346 * accessing the ETMv4 trace core registers
2347 */
2348 smp_call_function_single(drvdata->cpu, do_smp_cross_read, ®, 1);
2349 return reg.data;
2350 }
2351
coresight_etm4x_attr_to_offset(struct device_attribute * attr)2352 static inline u32 coresight_etm4x_attr_to_offset(struct device_attribute *attr)
2353 {
2354 struct dev_ext_attribute *eattr;
2355
2356 eattr = container_of(attr, struct dev_ext_attribute, attr);
2357 return (u32)(unsigned long)eattr->var;
2358 }
2359
coresight_etm4x_reg_show(struct device * dev,struct device_attribute * d_attr,char * buf)2360 static ssize_t coresight_etm4x_reg_show(struct device *dev,
2361 struct device_attribute *d_attr,
2362 char *buf)
2363 {
2364 u32 val, offset;
2365 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2366
2367 offset = coresight_etm4x_attr_to_offset(d_attr);
2368
2369 pm_runtime_get_sync(dev->parent);
2370 val = etmv4_cross_read(drvdata, offset);
2371 pm_runtime_put_sync(dev->parent);
2372
2373 return scnprintf(buf, PAGE_SIZE, "0x%x\n", val);
2374 }
2375
2376 static inline bool
etm4x_register_implemented(struct etmv4_drvdata * drvdata,u32 offset)2377 etm4x_register_implemented(struct etmv4_drvdata *drvdata, u32 offset)
2378 {
2379 switch (offset) {
2380 ETM_COMMON_SYSREG_LIST_CASES
2381 /*
2382 * Common registers to ETE & ETM4x accessible via system
2383 * instructions are always implemented.
2384 */
2385 return true;
2386
2387 ETM4x_ONLY_SYSREG_LIST_CASES
2388 /*
2389 * We only support etm4x and ete. So if the device is not
2390 * ETE, it must be ETMv4x.
2391 */
2392 return !etm4x_is_ete(drvdata);
2393
2394 ETM4x_MMAP_LIST_CASES
2395 /*
2396 * Registers accessible only via memory-mapped registers
2397 * must not be accessed via system instructions.
2398 * We cannot access the drvdata->csdev here, as this
2399 * function is called during the device creation, via
2400 * coresight_register() and the csdev is not initialized
2401 * until that is done. So rely on the drvdata->base to
2402 * detect if we have a memory mapped access.
2403 * Also ETE doesn't implement memory mapped access, thus
2404 * it is sufficient to check that we are using mmio.
2405 */
2406 return !!drvdata->base;
2407
2408 ETE_ONLY_SYSREG_LIST_CASES
2409 return etm4x_is_ete(drvdata);
2410 }
2411
2412 return false;
2413 }
2414
2415 /*
2416 * Hide the ETM4x registers that may not be available on the
2417 * hardware.
2418 * There are certain management registers unavailable via system
2419 * instructions. Make those sysfs attributes hidden on such
2420 * systems.
2421 */
2422 static umode_t
coresight_etm4x_attr_reg_implemented(struct kobject * kobj,struct attribute * attr,int unused)2423 coresight_etm4x_attr_reg_implemented(struct kobject *kobj,
2424 struct attribute *attr, int unused)
2425 {
2426 struct device *dev = kobj_to_dev(kobj);
2427 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2428 struct device_attribute *d_attr;
2429 u32 offset;
2430
2431 d_attr = container_of(attr, struct device_attribute, attr);
2432 offset = coresight_etm4x_attr_to_offset(d_attr);
2433
2434 if (etm4x_register_implemented(drvdata, offset))
2435 return attr->mode;
2436 return 0;
2437 }
2438
2439 #define coresight_etm4x_reg(name, offset) \
2440 &((struct dev_ext_attribute[]) { \
2441 { \
2442 __ATTR(name, 0444, coresight_etm4x_reg_show, NULL), \
2443 (void *)(unsigned long)offset \
2444 } \
2445 })[0].attr.attr
2446
2447 static struct attribute *coresight_etmv4_mgmt_attrs[] = {
2448 coresight_etm4x_reg(trcpdcr, TRCPDCR),
2449 coresight_etm4x_reg(trcpdsr, TRCPDSR),
2450 coresight_etm4x_reg(trclsr, TRCLSR),
2451 coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS),
2452 coresight_etm4x_reg(trcdevid, TRCDEVID),
2453 coresight_etm4x_reg(trcdevtype, TRCDEVTYPE),
2454 coresight_etm4x_reg(trcpidr0, TRCPIDR0),
2455 coresight_etm4x_reg(trcpidr1, TRCPIDR1),
2456 coresight_etm4x_reg(trcpidr2, TRCPIDR2),
2457 coresight_etm4x_reg(trcpidr3, TRCPIDR3),
2458 coresight_etm4x_reg(trcoslsr, TRCOSLSR),
2459 coresight_etm4x_reg(trcconfig, TRCCONFIGR),
2460 coresight_etm4x_reg(trctraceid, TRCTRACEIDR),
2461 coresight_etm4x_reg(trcdevarch, TRCDEVARCH),
2462 NULL,
2463 };
2464
2465 static struct attribute *coresight_etmv4_trcidr_attrs[] = {
2466 coresight_etm4x_reg(trcidr0, TRCIDR0),
2467 coresight_etm4x_reg(trcidr1, TRCIDR1),
2468 coresight_etm4x_reg(trcidr2, TRCIDR2),
2469 coresight_etm4x_reg(trcidr3, TRCIDR3),
2470 coresight_etm4x_reg(trcidr4, TRCIDR4),
2471 coresight_etm4x_reg(trcidr5, TRCIDR5),
2472 /* trcidr[6,7] are reserved */
2473 coresight_etm4x_reg(trcidr8, TRCIDR8),
2474 coresight_etm4x_reg(trcidr9, TRCIDR9),
2475 coresight_etm4x_reg(trcidr10, TRCIDR10),
2476 coresight_etm4x_reg(trcidr11, TRCIDR11),
2477 coresight_etm4x_reg(trcidr12, TRCIDR12),
2478 coresight_etm4x_reg(trcidr13, TRCIDR13),
2479 NULL,
2480 };
2481
2482 static const struct attribute_group coresight_etmv4_group = {
2483 .attrs = coresight_etmv4_attrs,
2484 };
2485
2486 static const struct attribute_group coresight_etmv4_mgmt_group = {
2487 .is_visible = coresight_etm4x_attr_reg_implemented,
2488 .attrs = coresight_etmv4_mgmt_attrs,
2489 .name = "mgmt",
2490 };
2491
2492 static const struct attribute_group coresight_etmv4_trcidr_group = {
2493 .attrs = coresight_etmv4_trcidr_attrs,
2494 .name = "trcidr",
2495 };
2496
2497 const struct attribute_group *coresight_etmv4_groups[] = {
2498 &coresight_etmv4_group,
2499 &coresight_etmv4_mgmt_group,
2500 &coresight_etmv4_trcidr_group,
2501 NULL,
2502 };
2503