1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright(C) 2015 Linaro Limited. All rights reserved.
4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5 */
6
7 #include <linux/pid_namespace.h>
8 #include <linux/pm_runtime.h>
9 #include <linux/sysfs.h>
10 #include "coresight-etm4x.h"
11 #include "coresight-priv.h"
12
etm4_set_mode_exclude(struct etmv4_drvdata * drvdata,bool exclude)13 static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
14 {
15 u8 idx;
16 struct etmv4_config *config = &drvdata->config;
17
18 idx = config->addr_idx;
19
20 /*
21 * TRCACATRn.TYPE bit[1:0]: type of comparison
22 * the trace unit performs
23 */
24 if (BMVAL(config->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
25 if (idx % 2 != 0)
26 return -EINVAL;
27
28 /*
29 * We are performing instruction address comparison. Set the
30 * relevant bit of ViewInst Include/Exclude Control register
31 * for corresponding address comparator pair.
32 */
33 if (config->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
34 config->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
35 return -EINVAL;
36
37 if (exclude == true) {
38 /*
39 * Set exclude bit and unset the include bit
40 * corresponding to comparator pair
41 */
42 config->viiectlr |= BIT(idx / 2 + 16);
43 config->viiectlr &= ~BIT(idx / 2);
44 } else {
45 /*
46 * Set include bit and unset exclude bit
47 * corresponding to comparator pair
48 */
49 config->viiectlr |= BIT(idx / 2);
50 config->viiectlr &= ~BIT(idx / 2 + 16);
51 }
52 }
53 return 0;
54 }
55
nr_pe_cmp_show(struct device * dev,struct device_attribute * attr,char * buf)56 static ssize_t nr_pe_cmp_show(struct device *dev,
57 struct device_attribute *attr,
58 char *buf)
59 {
60 unsigned long val;
61 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
62
63 val = drvdata->nr_pe_cmp;
64 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
65 }
66 static DEVICE_ATTR_RO(nr_pe_cmp);
67
nr_addr_cmp_show(struct device * dev,struct device_attribute * attr,char * buf)68 static ssize_t nr_addr_cmp_show(struct device *dev,
69 struct device_attribute *attr,
70 char *buf)
71 {
72 unsigned long val;
73 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
74
75 val = drvdata->nr_addr_cmp;
76 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
77 }
78 static DEVICE_ATTR_RO(nr_addr_cmp);
79
nr_cntr_show(struct device * dev,struct device_attribute * attr,char * buf)80 static ssize_t nr_cntr_show(struct device *dev,
81 struct device_attribute *attr,
82 char *buf)
83 {
84 unsigned long val;
85 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
86
87 val = drvdata->nr_cntr;
88 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
89 }
90 static DEVICE_ATTR_RO(nr_cntr);
91
nr_ext_inp_show(struct device * dev,struct device_attribute * attr,char * buf)92 static ssize_t nr_ext_inp_show(struct device *dev,
93 struct device_attribute *attr,
94 char *buf)
95 {
96 unsigned long val;
97 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
98
99 val = drvdata->nr_ext_inp;
100 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
101 }
102 static DEVICE_ATTR_RO(nr_ext_inp);
103
numcidc_show(struct device * dev,struct device_attribute * attr,char * buf)104 static ssize_t numcidc_show(struct device *dev,
105 struct device_attribute *attr,
106 char *buf)
107 {
108 unsigned long val;
109 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
110
111 val = drvdata->numcidc;
112 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
113 }
114 static DEVICE_ATTR_RO(numcidc);
115
numvmidc_show(struct device * dev,struct device_attribute * attr,char * buf)116 static ssize_t numvmidc_show(struct device *dev,
117 struct device_attribute *attr,
118 char *buf)
119 {
120 unsigned long val;
121 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
122
123 val = drvdata->numvmidc;
124 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
125 }
126 static DEVICE_ATTR_RO(numvmidc);
127
nrseqstate_show(struct device * dev,struct device_attribute * attr,char * buf)128 static ssize_t nrseqstate_show(struct device *dev,
129 struct device_attribute *attr,
130 char *buf)
131 {
132 unsigned long val;
133 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
134
135 val = drvdata->nrseqstate;
136 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
137 }
138 static DEVICE_ATTR_RO(nrseqstate);
139
nr_resource_show(struct device * dev,struct device_attribute * attr,char * buf)140 static ssize_t nr_resource_show(struct device *dev,
141 struct device_attribute *attr,
142 char *buf)
143 {
144 unsigned long val;
145 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
146
147 val = drvdata->nr_resource;
148 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
149 }
150 static DEVICE_ATTR_RO(nr_resource);
151
nr_ss_cmp_show(struct device * dev,struct device_attribute * attr,char * buf)152 static ssize_t nr_ss_cmp_show(struct device *dev,
153 struct device_attribute *attr,
154 char *buf)
155 {
156 unsigned long val;
157 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
158
159 val = drvdata->nr_ss_cmp;
160 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
161 }
162 static DEVICE_ATTR_RO(nr_ss_cmp);
163
reset_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)164 static ssize_t reset_store(struct device *dev,
165 struct device_attribute *attr,
166 const char *buf, size_t size)
167 {
168 int i;
169 unsigned long val;
170 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
171 struct etmv4_config *config = &drvdata->config;
172
173 if (kstrtoul(buf, 16, &val))
174 return -EINVAL;
175
176 spin_lock(&drvdata->spinlock);
177 if (val)
178 config->mode = 0x0;
179
180 /* Disable data tracing: do not trace load and store data transfers */
181 config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
182 config->cfg &= ~(BIT(1) | BIT(2));
183
184 /* Disable data value and data address tracing */
185 config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
186 ETM_MODE_DATA_TRACE_VAL);
187 config->cfg &= ~(BIT(16) | BIT(17));
188
189 /* Disable all events tracing */
190 config->eventctrl0 = 0x0;
191 config->eventctrl1 = 0x0;
192
193 /* Disable timestamp event */
194 config->ts_ctrl = 0x0;
195
196 /* Disable stalling */
197 config->stall_ctrl = 0x0;
198
199 /* Reset trace synchronization period to 2^8 = 256 bytes*/
200 if (drvdata->syncpr == false)
201 config->syncfreq = 0x8;
202
203 /*
204 * Enable ViewInst to trace everything with start-stop logic in
205 * started state. ARM recommends start-stop logic is set before
206 * each trace run.
207 */
208 config->vinst_ctrl |= BIT(0);
209 if (drvdata->nr_addr_cmp == true) {
210 config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
211 /* SSSTATUS, bit[9] */
212 config->vinst_ctrl |= BIT(9);
213 }
214
215 /* No address range filtering for ViewInst */
216 config->viiectlr = 0x0;
217
218 /* No start-stop filtering for ViewInst */
219 config->vissctlr = 0x0;
220
221 /* Disable seq events */
222 for (i = 0; i < drvdata->nrseqstate-1; i++)
223 config->seq_ctrl[i] = 0x0;
224 config->seq_rst = 0x0;
225 config->seq_state = 0x0;
226
227 /* Disable external input events */
228 config->ext_inp = 0x0;
229
230 config->cntr_idx = 0x0;
231 for (i = 0; i < drvdata->nr_cntr; i++) {
232 config->cntrldvr[i] = 0x0;
233 config->cntr_ctrl[i] = 0x0;
234 config->cntr_val[i] = 0x0;
235 }
236
237 config->res_idx = 0x0;
238 for (i = 0; i < drvdata->nr_resource; i++)
239 config->res_ctrl[i] = 0x0;
240
241 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
242 config->ss_ctrl[i] = 0x0;
243 config->ss_pe_cmp[i] = 0x0;
244 }
245
246 config->addr_idx = 0x0;
247 for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
248 config->addr_val[i] = 0x0;
249 config->addr_acc[i] = 0x0;
250 config->addr_type[i] = ETM_ADDR_TYPE_NONE;
251 }
252
253 config->ctxid_idx = 0x0;
254 for (i = 0; i < drvdata->numcidc; i++)
255 config->ctxid_pid[i] = 0x0;
256
257 config->ctxid_mask0 = 0x0;
258 config->ctxid_mask1 = 0x0;
259
260 config->vmid_idx = 0x0;
261 for (i = 0; i < drvdata->numvmidc; i++)
262 config->vmid_val[i] = 0x0;
263 config->vmid_mask0 = 0x0;
264 config->vmid_mask1 = 0x0;
265
266 drvdata->trcid = drvdata->cpu + 1;
267
268 spin_unlock(&drvdata->spinlock);
269
270 return size;
271 }
272 static DEVICE_ATTR_WO(reset);
273
mode_show(struct device * dev,struct device_attribute * attr,char * buf)274 static ssize_t mode_show(struct device *dev,
275 struct device_attribute *attr,
276 char *buf)
277 {
278 unsigned long val;
279 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
280 struct etmv4_config *config = &drvdata->config;
281
282 val = config->mode;
283 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
284 }
285
mode_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)286 static ssize_t mode_store(struct device *dev,
287 struct device_attribute *attr,
288 const char *buf, size_t size)
289 {
290 unsigned long val, mode;
291 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
292 struct etmv4_config *config = &drvdata->config;
293
294 if (kstrtoul(buf, 16, &val))
295 return -EINVAL;
296
297 spin_lock(&drvdata->spinlock);
298 config->mode = val & ETMv4_MODE_ALL;
299 etm4_set_mode_exclude(drvdata,
300 config->mode & ETM_MODE_EXCLUDE ? true : false);
301
302 if (drvdata->instrp0 == true) {
303 /* start by clearing instruction P0 field */
304 config->cfg &= ~(BIT(1) | BIT(2));
305 if (config->mode & ETM_MODE_LOAD)
306 /* 0b01 Trace load instructions as P0 instructions */
307 config->cfg |= BIT(1);
308 if (config->mode & ETM_MODE_STORE)
309 /* 0b10 Trace store instructions as P0 instructions */
310 config->cfg |= BIT(2);
311 if (config->mode & ETM_MODE_LOAD_STORE)
312 /*
313 * 0b11 Trace load and store instructions
314 * as P0 instructions
315 */
316 config->cfg |= BIT(1) | BIT(2);
317 }
318
319 /* bit[3], Branch broadcast mode */
320 if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
321 config->cfg |= BIT(3);
322 else
323 config->cfg &= ~BIT(3);
324
325 /* bit[4], Cycle counting instruction trace bit */
326 if ((config->mode & ETMv4_MODE_CYCACC) &&
327 (drvdata->trccci == true))
328 config->cfg |= BIT(4);
329 else
330 config->cfg &= ~BIT(4);
331
332 /* bit[6], Context ID tracing bit */
333 if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
334 config->cfg |= BIT(6);
335 else
336 config->cfg &= ~BIT(6);
337
338 if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
339 config->cfg |= BIT(7);
340 else
341 config->cfg &= ~BIT(7);
342
343 /* bits[10:8], Conditional instruction tracing bit */
344 mode = ETM_MODE_COND(config->mode);
345 if (drvdata->trccond == true) {
346 config->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
347 config->cfg |= mode << 8;
348 }
349
350 /* bit[11], Global timestamp tracing bit */
351 if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
352 config->cfg |= BIT(11);
353 else
354 config->cfg &= ~BIT(11);
355
356 /* bit[12], Return stack enable bit */
357 if ((config->mode & ETM_MODE_RETURNSTACK) &&
358 (drvdata->retstack == true))
359 config->cfg |= BIT(12);
360 else
361 config->cfg &= ~BIT(12);
362
363 /* bits[14:13], Q element enable field */
364 mode = ETM_MODE_QELEM(config->mode);
365 /* start by clearing QE bits */
366 config->cfg &= ~(BIT(13) | BIT(14));
367 /* if supported, Q elements with instruction counts are enabled */
368 if ((mode & BIT(0)) && (drvdata->q_support & BIT(0)))
369 config->cfg |= BIT(13);
370 /*
371 * if supported, Q elements with and without instruction
372 * counts are enabled
373 */
374 if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
375 config->cfg |= BIT(14);
376
377 /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
378 if ((config->mode & ETM_MODE_ATB_TRIGGER) &&
379 (drvdata->atbtrig == true))
380 config->eventctrl1 |= BIT(11);
381 else
382 config->eventctrl1 &= ~BIT(11);
383
384 /* bit[12], Low-power state behavior override bit */
385 if ((config->mode & ETM_MODE_LPOVERRIDE) &&
386 (drvdata->lpoverride == true))
387 config->eventctrl1 |= BIT(12);
388 else
389 config->eventctrl1 &= ~BIT(12);
390
391 /* bit[8], Instruction stall bit */
392 if (config->mode & ETM_MODE_ISTALL_EN)
393 config->stall_ctrl |= BIT(8);
394 else
395 config->stall_ctrl &= ~BIT(8);
396
397 /* bit[10], Prioritize instruction trace bit */
398 if (config->mode & ETM_MODE_INSTPRIO)
399 config->stall_ctrl |= BIT(10);
400 else
401 config->stall_ctrl &= ~BIT(10);
402
403 /* bit[13], Trace overflow prevention bit */
404 if ((config->mode & ETM_MODE_NOOVERFLOW) &&
405 (drvdata->nooverflow == true))
406 config->stall_ctrl |= BIT(13);
407 else
408 config->stall_ctrl &= ~BIT(13);
409
410 /* bit[9] Start/stop logic control bit */
411 if (config->mode & ETM_MODE_VIEWINST_STARTSTOP)
412 config->vinst_ctrl |= BIT(9);
413 else
414 config->vinst_ctrl &= ~BIT(9);
415
416 /* bit[10], Whether a trace unit must trace a Reset exception */
417 if (config->mode & ETM_MODE_TRACE_RESET)
418 config->vinst_ctrl |= BIT(10);
419 else
420 config->vinst_ctrl &= ~BIT(10);
421
422 /* bit[11], Whether a trace unit must trace a system error exception */
423 if ((config->mode & ETM_MODE_TRACE_ERR) &&
424 (drvdata->trc_error == true))
425 config->vinst_ctrl |= BIT(11);
426 else
427 config->vinst_ctrl &= ~BIT(11);
428
429 if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
430 etm4_config_trace_mode(config);
431
432 spin_unlock(&drvdata->spinlock);
433
434 return size;
435 }
436 static DEVICE_ATTR_RW(mode);
437
pe_show(struct device * dev,struct device_attribute * attr,char * buf)438 static ssize_t pe_show(struct device *dev,
439 struct device_attribute *attr,
440 char *buf)
441 {
442 unsigned long val;
443 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
444 struct etmv4_config *config = &drvdata->config;
445
446 val = config->pe_sel;
447 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
448 }
449
pe_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)450 static ssize_t pe_store(struct device *dev,
451 struct device_attribute *attr,
452 const char *buf, size_t size)
453 {
454 unsigned long val;
455 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
456 struct etmv4_config *config = &drvdata->config;
457
458 if (kstrtoul(buf, 16, &val))
459 return -EINVAL;
460
461 spin_lock(&drvdata->spinlock);
462 if (val > drvdata->nr_pe) {
463 spin_unlock(&drvdata->spinlock);
464 return -EINVAL;
465 }
466
467 config->pe_sel = val;
468 spin_unlock(&drvdata->spinlock);
469 return size;
470 }
471 static DEVICE_ATTR_RW(pe);
472
event_show(struct device * dev,struct device_attribute * attr,char * buf)473 static ssize_t event_show(struct device *dev,
474 struct device_attribute *attr,
475 char *buf)
476 {
477 unsigned long val;
478 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
479 struct etmv4_config *config = &drvdata->config;
480
481 val = config->eventctrl0;
482 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
483 }
484
event_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)485 static ssize_t event_store(struct device *dev,
486 struct device_attribute *attr,
487 const char *buf, size_t size)
488 {
489 unsigned long val;
490 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
491 struct etmv4_config *config = &drvdata->config;
492
493 if (kstrtoul(buf, 16, &val))
494 return -EINVAL;
495
496 spin_lock(&drvdata->spinlock);
497 switch (drvdata->nr_event) {
498 case 0x0:
499 /* EVENT0, bits[7:0] */
500 config->eventctrl0 = val & 0xFF;
501 break;
502 case 0x1:
503 /* EVENT1, bits[15:8] */
504 config->eventctrl0 = val & 0xFFFF;
505 break;
506 case 0x2:
507 /* EVENT2, bits[23:16] */
508 config->eventctrl0 = val & 0xFFFFFF;
509 break;
510 case 0x3:
511 /* EVENT3, bits[31:24] */
512 config->eventctrl0 = val;
513 break;
514 default:
515 break;
516 }
517 spin_unlock(&drvdata->spinlock);
518 return size;
519 }
520 static DEVICE_ATTR_RW(event);
521
event_instren_show(struct device * dev,struct device_attribute * attr,char * buf)522 static ssize_t event_instren_show(struct device *dev,
523 struct device_attribute *attr,
524 char *buf)
525 {
526 unsigned long val;
527 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
528 struct etmv4_config *config = &drvdata->config;
529
530 val = BMVAL(config->eventctrl1, 0, 3);
531 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
532 }
533
event_instren_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)534 static ssize_t event_instren_store(struct device *dev,
535 struct device_attribute *attr,
536 const char *buf, size_t size)
537 {
538 unsigned long val;
539 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
540 struct etmv4_config *config = &drvdata->config;
541
542 if (kstrtoul(buf, 16, &val))
543 return -EINVAL;
544
545 spin_lock(&drvdata->spinlock);
546 /* start by clearing all instruction event enable bits */
547 config->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
548 switch (drvdata->nr_event) {
549 case 0x0:
550 /* generate Event element for event 1 */
551 config->eventctrl1 |= val & BIT(1);
552 break;
553 case 0x1:
554 /* generate Event element for event 1 and 2 */
555 config->eventctrl1 |= val & (BIT(0) | BIT(1));
556 break;
557 case 0x2:
558 /* generate Event element for event 1, 2 and 3 */
559 config->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
560 break;
561 case 0x3:
562 /* generate Event element for all 4 events */
563 config->eventctrl1 |= val & 0xF;
564 break;
565 default:
566 break;
567 }
568 spin_unlock(&drvdata->spinlock);
569 return size;
570 }
571 static DEVICE_ATTR_RW(event_instren);
572
event_ts_show(struct device * dev,struct device_attribute * attr,char * buf)573 static ssize_t event_ts_show(struct device *dev,
574 struct device_attribute *attr,
575 char *buf)
576 {
577 unsigned long val;
578 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
579 struct etmv4_config *config = &drvdata->config;
580
581 val = config->ts_ctrl;
582 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
583 }
584
event_ts_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)585 static ssize_t event_ts_store(struct device *dev,
586 struct device_attribute *attr,
587 const char *buf, size_t size)
588 {
589 unsigned long val;
590 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
591 struct etmv4_config *config = &drvdata->config;
592
593 if (kstrtoul(buf, 16, &val))
594 return -EINVAL;
595 if (!drvdata->ts_size)
596 return -EINVAL;
597
598 config->ts_ctrl = val & ETMv4_EVENT_MASK;
599 return size;
600 }
601 static DEVICE_ATTR_RW(event_ts);
602
syncfreq_show(struct device * dev,struct device_attribute * attr,char * buf)603 static ssize_t syncfreq_show(struct device *dev,
604 struct device_attribute *attr,
605 char *buf)
606 {
607 unsigned long val;
608 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
609 struct etmv4_config *config = &drvdata->config;
610
611 val = config->syncfreq;
612 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
613 }
614
syncfreq_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)615 static ssize_t syncfreq_store(struct device *dev,
616 struct device_attribute *attr,
617 const char *buf, size_t size)
618 {
619 unsigned long val;
620 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
621 struct etmv4_config *config = &drvdata->config;
622
623 if (kstrtoul(buf, 16, &val))
624 return -EINVAL;
625 if (drvdata->syncpr == true)
626 return -EINVAL;
627
628 config->syncfreq = val & ETMv4_SYNC_MASK;
629 return size;
630 }
631 static DEVICE_ATTR_RW(syncfreq);
632
cyc_threshold_show(struct device * dev,struct device_attribute * attr,char * buf)633 static ssize_t cyc_threshold_show(struct device *dev,
634 struct device_attribute *attr,
635 char *buf)
636 {
637 unsigned long val;
638 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
639 struct etmv4_config *config = &drvdata->config;
640
641 val = config->ccctlr;
642 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
643 }
644
cyc_threshold_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)645 static ssize_t cyc_threshold_store(struct device *dev,
646 struct device_attribute *attr,
647 const char *buf, size_t size)
648 {
649 unsigned long val;
650 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
651 struct etmv4_config *config = &drvdata->config;
652
653 if (kstrtoul(buf, 16, &val))
654 return -EINVAL;
655 if (val < drvdata->ccitmin)
656 return -EINVAL;
657
658 config->ccctlr = val & ETM_CYC_THRESHOLD_MASK;
659 return size;
660 }
661 static DEVICE_ATTR_RW(cyc_threshold);
662
bb_ctrl_show(struct device * dev,struct device_attribute * attr,char * buf)663 static ssize_t bb_ctrl_show(struct device *dev,
664 struct device_attribute *attr,
665 char *buf)
666 {
667 unsigned long val;
668 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
669 struct etmv4_config *config = &drvdata->config;
670
671 val = config->bb_ctrl;
672 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
673 }
674
bb_ctrl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)675 static ssize_t bb_ctrl_store(struct device *dev,
676 struct device_attribute *attr,
677 const char *buf, size_t size)
678 {
679 unsigned long val;
680 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
681 struct etmv4_config *config = &drvdata->config;
682
683 if (kstrtoul(buf, 16, &val))
684 return -EINVAL;
685 if (drvdata->trcbb == false)
686 return -EINVAL;
687 if (!drvdata->nr_addr_cmp)
688 return -EINVAL;
689 /*
690 * Bit[7:0] selects which address range comparator is used for
691 * branch broadcast control.
692 */
693 if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp)
694 return -EINVAL;
695
696 config->bb_ctrl = val;
697 return size;
698 }
699 static DEVICE_ATTR_RW(bb_ctrl);
700
event_vinst_show(struct device * dev,struct device_attribute * attr,char * buf)701 static ssize_t event_vinst_show(struct device *dev,
702 struct device_attribute *attr,
703 char *buf)
704 {
705 unsigned long val;
706 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
707 struct etmv4_config *config = &drvdata->config;
708
709 val = config->vinst_ctrl & ETMv4_EVENT_MASK;
710 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
711 }
712
event_vinst_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)713 static ssize_t event_vinst_store(struct device *dev,
714 struct device_attribute *attr,
715 const char *buf, size_t size)
716 {
717 unsigned long val;
718 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
719 struct etmv4_config *config = &drvdata->config;
720
721 if (kstrtoul(buf, 16, &val))
722 return -EINVAL;
723
724 spin_lock(&drvdata->spinlock);
725 val &= ETMv4_EVENT_MASK;
726 config->vinst_ctrl &= ~ETMv4_EVENT_MASK;
727 config->vinst_ctrl |= val;
728 spin_unlock(&drvdata->spinlock);
729 return size;
730 }
731 static DEVICE_ATTR_RW(event_vinst);
732
s_exlevel_vinst_show(struct device * dev,struct device_attribute * attr,char * buf)733 static ssize_t s_exlevel_vinst_show(struct device *dev,
734 struct device_attribute *attr,
735 char *buf)
736 {
737 unsigned long val;
738 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
739 struct etmv4_config *config = &drvdata->config;
740
741 val = BMVAL(config->vinst_ctrl, 16, 19);
742 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
743 }
744
s_exlevel_vinst_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)745 static ssize_t s_exlevel_vinst_store(struct device *dev,
746 struct device_attribute *attr,
747 const char *buf, size_t size)
748 {
749 unsigned long val;
750 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
751 struct etmv4_config *config = &drvdata->config;
752
753 if (kstrtoul(buf, 16, &val))
754 return -EINVAL;
755
756 spin_lock(&drvdata->spinlock);
757 /* clear all EXLEVEL_S bits (bit[18] is never implemented) */
758 config->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19));
759 /* enable instruction tracing for corresponding exception level */
760 val &= drvdata->s_ex_level;
761 config->vinst_ctrl |= (val << 16);
762 spin_unlock(&drvdata->spinlock);
763 return size;
764 }
765 static DEVICE_ATTR_RW(s_exlevel_vinst);
766
ns_exlevel_vinst_show(struct device * dev,struct device_attribute * attr,char * buf)767 static ssize_t ns_exlevel_vinst_show(struct device *dev,
768 struct device_attribute *attr,
769 char *buf)
770 {
771 unsigned long val;
772 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
773 struct etmv4_config *config = &drvdata->config;
774
775 /* EXLEVEL_NS, bits[23:20] */
776 val = BMVAL(config->vinst_ctrl, 20, 23);
777 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
778 }
779
ns_exlevel_vinst_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)780 static ssize_t ns_exlevel_vinst_store(struct device *dev,
781 struct device_attribute *attr,
782 const char *buf, size_t size)
783 {
784 unsigned long val;
785 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
786 struct etmv4_config *config = &drvdata->config;
787
788 if (kstrtoul(buf, 16, &val))
789 return -EINVAL;
790
791 spin_lock(&drvdata->spinlock);
792 /* clear EXLEVEL_NS bits (bit[23] is never implemented */
793 config->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22));
794 /* enable instruction tracing for corresponding exception level */
795 val &= drvdata->ns_ex_level;
796 config->vinst_ctrl |= (val << 20);
797 spin_unlock(&drvdata->spinlock);
798 return size;
799 }
800 static DEVICE_ATTR_RW(ns_exlevel_vinst);
801
addr_idx_show(struct device * dev,struct device_attribute * attr,char * buf)802 static ssize_t addr_idx_show(struct device *dev,
803 struct device_attribute *attr,
804 char *buf)
805 {
806 unsigned long val;
807 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
808 struct etmv4_config *config = &drvdata->config;
809
810 val = config->addr_idx;
811 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
812 }
813
addr_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)814 static ssize_t addr_idx_store(struct device *dev,
815 struct device_attribute *attr,
816 const char *buf, size_t size)
817 {
818 unsigned long val;
819 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
820 struct etmv4_config *config = &drvdata->config;
821
822 if (kstrtoul(buf, 16, &val))
823 return -EINVAL;
824 if (val >= drvdata->nr_addr_cmp * 2)
825 return -EINVAL;
826
827 /*
828 * Use spinlock to ensure index doesn't change while it gets
829 * dereferenced multiple times within a spinlock block elsewhere.
830 */
831 spin_lock(&drvdata->spinlock);
832 config->addr_idx = val;
833 spin_unlock(&drvdata->spinlock);
834 return size;
835 }
836 static DEVICE_ATTR_RW(addr_idx);
837
addr_instdatatype_show(struct device * dev,struct device_attribute * attr,char * buf)838 static ssize_t addr_instdatatype_show(struct device *dev,
839 struct device_attribute *attr,
840 char *buf)
841 {
842 ssize_t len;
843 u8 val, idx;
844 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
845 struct etmv4_config *config = &drvdata->config;
846
847 spin_lock(&drvdata->spinlock);
848 idx = config->addr_idx;
849 val = BMVAL(config->addr_acc[idx], 0, 1);
850 len = scnprintf(buf, PAGE_SIZE, "%s\n",
851 val == ETM_INSTR_ADDR ? "instr" :
852 (val == ETM_DATA_LOAD_ADDR ? "data_load" :
853 (val == ETM_DATA_STORE_ADDR ? "data_store" :
854 "data_load_store")));
855 spin_unlock(&drvdata->spinlock);
856 return len;
857 }
858
addr_instdatatype_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)859 static ssize_t addr_instdatatype_store(struct device *dev,
860 struct device_attribute *attr,
861 const char *buf, size_t size)
862 {
863 u8 idx;
864 char str[20] = "";
865 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
866 struct etmv4_config *config = &drvdata->config;
867
868 if (strlen(buf) >= 20)
869 return -EINVAL;
870 if (sscanf(buf, "%s", str) != 1)
871 return -EINVAL;
872
873 spin_lock(&drvdata->spinlock);
874 idx = config->addr_idx;
875 if (!strcmp(str, "instr"))
876 /* TYPE, bits[1:0] */
877 config->addr_acc[idx] &= ~(BIT(0) | BIT(1));
878
879 spin_unlock(&drvdata->spinlock);
880 return size;
881 }
882 static DEVICE_ATTR_RW(addr_instdatatype);
883
addr_single_show(struct device * dev,struct device_attribute * attr,char * buf)884 static ssize_t addr_single_show(struct device *dev,
885 struct device_attribute *attr,
886 char *buf)
887 {
888 u8 idx;
889 unsigned long val;
890 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
891 struct etmv4_config *config = &drvdata->config;
892
893 idx = config->addr_idx;
894 spin_lock(&drvdata->spinlock);
895 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
896 config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
897 spin_unlock(&drvdata->spinlock);
898 return -EPERM;
899 }
900 val = (unsigned long)config->addr_val[idx];
901 spin_unlock(&drvdata->spinlock);
902 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
903 }
904
addr_single_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)905 static ssize_t addr_single_store(struct device *dev,
906 struct device_attribute *attr,
907 const char *buf, size_t size)
908 {
909 u8 idx;
910 unsigned long val;
911 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
912 struct etmv4_config *config = &drvdata->config;
913
914 if (kstrtoul(buf, 16, &val))
915 return -EINVAL;
916
917 spin_lock(&drvdata->spinlock);
918 idx = config->addr_idx;
919 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
920 config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
921 spin_unlock(&drvdata->spinlock);
922 return -EPERM;
923 }
924
925 config->addr_val[idx] = (u64)val;
926 config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
927 spin_unlock(&drvdata->spinlock);
928 return size;
929 }
930 static DEVICE_ATTR_RW(addr_single);
931
addr_range_show(struct device * dev,struct device_attribute * attr,char * buf)932 static ssize_t addr_range_show(struct device *dev,
933 struct device_attribute *attr,
934 char *buf)
935 {
936 u8 idx;
937 unsigned long val1, val2;
938 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
939 struct etmv4_config *config = &drvdata->config;
940
941 spin_lock(&drvdata->spinlock);
942 idx = config->addr_idx;
943 if (idx % 2 != 0) {
944 spin_unlock(&drvdata->spinlock);
945 return -EPERM;
946 }
947 if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
948 config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
949 (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
950 config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
951 spin_unlock(&drvdata->spinlock);
952 return -EPERM;
953 }
954
955 val1 = (unsigned long)config->addr_val[idx];
956 val2 = (unsigned long)config->addr_val[idx + 1];
957 spin_unlock(&drvdata->spinlock);
958 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
959 }
960
addr_range_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)961 static ssize_t addr_range_store(struct device *dev,
962 struct device_attribute *attr,
963 const char *buf, size_t size)
964 {
965 u8 idx;
966 unsigned long val1, val2;
967 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
968 struct etmv4_config *config = &drvdata->config;
969
970 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
971 return -EINVAL;
972 /* lower address comparator cannot have a higher address value */
973 if (val1 > val2)
974 return -EINVAL;
975
976 spin_lock(&drvdata->spinlock);
977 idx = config->addr_idx;
978 if (idx % 2 != 0) {
979 spin_unlock(&drvdata->spinlock);
980 return -EPERM;
981 }
982
983 if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
984 config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
985 (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
986 config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
987 spin_unlock(&drvdata->spinlock);
988 return -EPERM;
989 }
990
991 config->addr_val[idx] = (u64)val1;
992 config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
993 config->addr_val[idx + 1] = (u64)val2;
994 config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
995 /*
996 * Program include or exclude control bits for vinst or vdata
997 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
998 */
999 etm4_set_mode_exclude(drvdata,
1000 config->mode & ETM_MODE_EXCLUDE ? true : false);
1001
1002 spin_unlock(&drvdata->spinlock);
1003 return size;
1004 }
1005 static DEVICE_ATTR_RW(addr_range);
1006
addr_start_show(struct device * dev,struct device_attribute * attr,char * buf)1007 static ssize_t addr_start_show(struct device *dev,
1008 struct device_attribute *attr,
1009 char *buf)
1010 {
1011 u8 idx;
1012 unsigned long val;
1013 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1014 struct etmv4_config *config = &drvdata->config;
1015
1016 spin_lock(&drvdata->spinlock);
1017 idx = config->addr_idx;
1018
1019 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1020 config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1021 spin_unlock(&drvdata->spinlock);
1022 return -EPERM;
1023 }
1024
1025 val = (unsigned long)config->addr_val[idx];
1026 spin_unlock(&drvdata->spinlock);
1027 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1028 }
1029
addr_start_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1030 static ssize_t addr_start_store(struct device *dev,
1031 struct device_attribute *attr,
1032 const char *buf, size_t size)
1033 {
1034 u8 idx;
1035 unsigned long val;
1036 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1037 struct etmv4_config *config = &drvdata->config;
1038
1039 if (kstrtoul(buf, 16, &val))
1040 return -EINVAL;
1041
1042 spin_lock(&drvdata->spinlock);
1043 idx = config->addr_idx;
1044 if (!drvdata->nr_addr_cmp) {
1045 spin_unlock(&drvdata->spinlock);
1046 return -EINVAL;
1047 }
1048 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1049 config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1050 spin_unlock(&drvdata->spinlock);
1051 return -EPERM;
1052 }
1053
1054 config->addr_val[idx] = (u64)val;
1055 config->addr_type[idx] = ETM_ADDR_TYPE_START;
1056 config->vissctlr |= BIT(idx);
1057 /* SSSTATUS, bit[9] - turn on start/stop logic */
1058 config->vinst_ctrl |= BIT(9);
1059 spin_unlock(&drvdata->spinlock);
1060 return size;
1061 }
1062 static DEVICE_ATTR_RW(addr_start);
1063
addr_stop_show(struct device * dev,struct device_attribute * attr,char * buf)1064 static ssize_t addr_stop_show(struct device *dev,
1065 struct device_attribute *attr,
1066 char *buf)
1067 {
1068 u8 idx;
1069 unsigned long val;
1070 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1071 struct etmv4_config *config = &drvdata->config;
1072
1073 spin_lock(&drvdata->spinlock);
1074 idx = config->addr_idx;
1075
1076 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1077 config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1078 spin_unlock(&drvdata->spinlock);
1079 return -EPERM;
1080 }
1081
1082 val = (unsigned long)config->addr_val[idx];
1083 spin_unlock(&drvdata->spinlock);
1084 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1085 }
1086
addr_stop_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1087 static ssize_t addr_stop_store(struct device *dev,
1088 struct device_attribute *attr,
1089 const char *buf, size_t size)
1090 {
1091 u8 idx;
1092 unsigned long val;
1093 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1094 struct etmv4_config *config = &drvdata->config;
1095
1096 if (kstrtoul(buf, 16, &val))
1097 return -EINVAL;
1098
1099 spin_lock(&drvdata->spinlock);
1100 idx = config->addr_idx;
1101 if (!drvdata->nr_addr_cmp) {
1102 spin_unlock(&drvdata->spinlock);
1103 return -EINVAL;
1104 }
1105 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1106 config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1107 spin_unlock(&drvdata->spinlock);
1108 return -EPERM;
1109 }
1110
1111 config->addr_val[idx] = (u64)val;
1112 config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
1113 config->vissctlr |= BIT(idx + 16);
1114 /* SSSTATUS, bit[9] - turn on start/stop logic */
1115 config->vinst_ctrl |= BIT(9);
1116 spin_unlock(&drvdata->spinlock);
1117 return size;
1118 }
1119 static DEVICE_ATTR_RW(addr_stop);
1120
addr_ctxtype_show(struct device * dev,struct device_attribute * attr,char * buf)1121 static ssize_t addr_ctxtype_show(struct device *dev,
1122 struct device_attribute *attr,
1123 char *buf)
1124 {
1125 ssize_t len;
1126 u8 idx, val;
1127 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1128 struct etmv4_config *config = &drvdata->config;
1129
1130 spin_lock(&drvdata->spinlock);
1131 idx = config->addr_idx;
1132 /* CONTEXTTYPE, bits[3:2] */
1133 val = BMVAL(config->addr_acc[idx], 2, 3);
1134 len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
1135 (val == ETM_CTX_CTXID ? "ctxid" :
1136 (val == ETM_CTX_VMID ? "vmid" : "all")));
1137 spin_unlock(&drvdata->spinlock);
1138 return len;
1139 }
1140
addr_ctxtype_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1141 static ssize_t addr_ctxtype_store(struct device *dev,
1142 struct device_attribute *attr,
1143 const char *buf, size_t size)
1144 {
1145 u8 idx;
1146 char str[10] = "";
1147 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1148 struct etmv4_config *config = &drvdata->config;
1149
1150 if (strlen(buf) >= 10)
1151 return -EINVAL;
1152 if (sscanf(buf, "%s", str) != 1)
1153 return -EINVAL;
1154
1155 spin_lock(&drvdata->spinlock);
1156 idx = config->addr_idx;
1157 if (!strcmp(str, "none"))
1158 /* start by clearing context type bits */
1159 config->addr_acc[idx] &= ~(BIT(2) | BIT(3));
1160 else if (!strcmp(str, "ctxid")) {
1161 /* 0b01 The trace unit performs a Context ID */
1162 if (drvdata->numcidc) {
1163 config->addr_acc[idx] |= BIT(2);
1164 config->addr_acc[idx] &= ~BIT(3);
1165 }
1166 } else if (!strcmp(str, "vmid")) {
1167 /* 0b10 The trace unit performs a VMID */
1168 if (drvdata->numvmidc) {
1169 config->addr_acc[idx] &= ~BIT(2);
1170 config->addr_acc[idx] |= BIT(3);
1171 }
1172 } else if (!strcmp(str, "all")) {
1173 /*
1174 * 0b11 The trace unit performs a Context ID
1175 * comparison and a VMID
1176 */
1177 if (drvdata->numcidc)
1178 config->addr_acc[idx] |= BIT(2);
1179 if (drvdata->numvmidc)
1180 config->addr_acc[idx] |= BIT(3);
1181 }
1182 spin_unlock(&drvdata->spinlock);
1183 return size;
1184 }
1185 static DEVICE_ATTR_RW(addr_ctxtype);
1186
addr_context_show(struct device * dev,struct device_attribute * attr,char * buf)1187 static ssize_t addr_context_show(struct device *dev,
1188 struct device_attribute *attr,
1189 char *buf)
1190 {
1191 u8 idx;
1192 unsigned long val;
1193 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1194 struct etmv4_config *config = &drvdata->config;
1195
1196 spin_lock(&drvdata->spinlock);
1197 idx = config->addr_idx;
1198 /* context ID comparator bits[6:4] */
1199 val = BMVAL(config->addr_acc[idx], 4, 6);
1200 spin_unlock(&drvdata->spinlock);
1201 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1202 }
1203
addr_context_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1204 static ssize_t addr_context_store(struct device *dev,
1205 struct device_attribute *attr,
1206 const char *buf, size_t size)
1207 {
1208 u8 idx;
1209 unsigned long val;
1210 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1211 struct etmv4_config *config = &drvdata->config;
1212
1213 if (kstrtoul(buf, 16, &val))
1214 return -EINVAL;
1215 if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
1216 return -EINVAL;
1217 if (val >= (drvdata->numcidc >= drvdata->numvmidc ?
1218 drvdata->numcidc : drvdata->numvmidc))
1219 return -EINVAL;
1220
1221 spin_lock(&drvdata->spinlock);
1222 idx = config->addr_idx;
1223 /* clear context ID comparator bits[6:4] */
1224 config->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
1225 config->addr_acc[idx] |= (val << 4);
1226 spin_unlock(&drvdata->spinlock);
1227 return size;
1228 }
1229 static DEVICE_ATTR_RW(addr_context);
1230
seq_idx_show(struct device * dev,struct device_attribute * attr,char * buf)1231 static ssize_t seq_idx_show(struct device *dev,
1232 struct device_attribute *attr,
1233 char *buf)
1234 {
1235 unsigned long val;
1236 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1237 struct etmv4_config *config = &drvdata->config;
1238
1239 val = config->seq_idx;
1240 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1241 }
1242
seq_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1243 static ssize_t seq_idx_store(struct device *dev,
1244 struct device_attribute *attr,
1245 const char *buf, size_t size)
1246 {
1247 unsigned long val;
1248 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1249 struct etmv4_config *config = &drvdata->config;
1250
1251 if (kstrtoul(buf, 16, &val))
1252 return -EINVAL;
1253 if (val >= drvdata->nrseqstate - 1)
1254 return -EINVAL;
1255
1256 /*
1257 * Use spinlock to ensure index doesn't change while it gets
1258 * dereferenced multiple times within a spinlock block elsewhere.
1259 */
1260 spin_lock(&drvdata->spinlock);
1261 config->seq_idx = val;
1262 spin_unlock(&drvdata->spinlock);
1263 return size;
1264 }
1265 static DEVICE_ATTR_RW(seq_idx);
1266
seq_state_show(struct device * dev,struct device_attribute * attr,char * buf)1267 static ssize_t seq_state_show(struct device *dev,
1268 struct device_attribute *attr,
1269 char *buf)
1270 {
1271 unsigned long val;
1272 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1273 struct etmv4_config *config = &drvdata->config;
1274
1275 val = config->seq_state;
1276 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1277 }
1278
seq_state_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1279 static ssize_t seq_state_store(struct device *dev,
1280 struct device_attribute *attr,
1281 const char *buf, size_t size)
1282 {
1283 unsigned long val;
1284 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1285 struct etmv4_config *config = &drvdata->config;
1286
1287 if (kstrtoul(buf, 16, &val))
1288 return -EINVAL;
1289 if (val >= drvdata->nrseqstate)
1290 return -EINVAL;
1291
1292 config->seq_state = val;
1293 return size;
1294 }
1295 static DEVICE_ATTR_RW(seq_state);
1296
seq_event_show(struct device * dev,struct device_attribute * attr,char * buf)1297 static ssize_t seq_event_show(struct device *dev,
1298 struct device_attribute *attr,
1299 char *buf)
1300 {
1301 u8 idx;
1302 unsigned long val;
1303 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1304 struct etmv4_config *config = &drvdata->config;
1305
1306 spin_lock(&drvdata->spinlock);
1307 idx = config->seq_idx;
1308 val = config->seq_ctrl[idx];
1309 spin_unlock(&drvdata->spinlock);
1310 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1311 }
1312
seq_event_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1313 static ssize_t seq_event_store(struct device *dev,
1314 struct device_attribute *attr,
1315 const char *buf, size_t size)
1316 {
1317 u8 idx;
1318 unsigned long val;
1319 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1320 struct etmv4_config *config = &drvdata->config;
1321
1322 if (kstrtoul(buf, 16, &val))
1323 return -EINVAL;
1324
1325 spin_lock(&drvdata->spinlock);
1326 idx = config->seq_idx;
1327 /* RST, bits[7:0] */
1328 config->seq_ctrl[idx] = val & 0xFF;
1329 spin_unlock(&drvdata->spinlock);
1330 return size;
1331 }
1332 static DEVICE_ATTR_RW(seq_event);
1333
seq_reset_event_show(struct device * dev,struct device_attribute * attr,char * buf)1334 static ssize_t seq_reset_event_show(struct device *dev,
1335 struct device_attribute *attr,
1336 char *buf)
1337 {
1338 unsigned long val;
1339 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1340 struct etmv4_config *config = &drvdata->config;
1341
1342 val = config->seq_rst;
1343 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1344 }
1345
seq_reset_event_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1346 static ssize_t seq_reset_event_store(struct device *dev,
1347 struct device_attribute *attr,
1348 const char *buf, size_t size)
1349 {
1350 unsigned long val;
1351 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1352 struct etmv4_config *config = &drvdata->config;
1353
1354 if (kstrtoul(buf, 16, &val))
1355 return -EINVAL;
1356 if (!(drvdata->nrseqstate))
1357 return -EINVAL;
1358
1359 config->seq_rst = val & ETMv4_EVENT_MASK;
1360 return size;
1361 }
1362 static DEVICE_ATTR_RW(seq_reset_event);
1363
cntr_idx_show(struct device * dev,struct device_attribute * attr,char * buf)1364 static ssize_t cntr_idx_show(struct device *dev,
1365 struct device_attribute *attr,
1366 char *buf)
1367 {
1368 unsigned long val;
1369 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1370 struct etmv4_config *config = &drvdata->config;
1371
1372 val = config->cntr_idx;
1373 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1374 }
1375
cntr_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1376 static ssize_t cntr_idx_store(struct device *dev,
1377 struct device_attribute *attr,
1378 const char *buf, size_t size)
1379 {
1380 unsigned long val;
1381 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1382 struct etmv4_config *config = &drvdata->config;
1383
1384 if (kstrtoul(buf, 16, &val))
1385 return -EINVAL;
1386 if (val >= drvdata->nr_cntr)
1387 return -EINVAL;
1388
1389 /*
1390 * Use spinlock to ensure index doesn't change while it gets
1391 * dereferenced multiple times within a spinlock block elsewhere.
1392 */
1393 spin_lock(&drvdata->spinlock);
1394 config->cntr_idx = val;
1395 spin_unlock(&drvdata->spinlock);
1396 return size;
1397 }
1398 static DEVICE_ATTR_RW(cntr_idx);
1399
cntrldvr_show(struct device * dev,struct device_attribute * attr,char * buf)1400 static ssize_t cntrldvr_show(struct device *dev,
1401 struct device_attribute *attr,
1402 char *buf)
1403 {
1404 u8 idx;
1405 unsigned long val;
1406 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1407 struct etmv4_config *config = &drvdata->config;
1408
1409 spin_lock(&drvdata->spinlock);
1410 idx = config->cntr_idx;
1411 val = config->cntrldvr[idx];
1412 spin_unlock(&drvdata->spinlock);
1413 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1414 }
1415
cntrldvr_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1416 static ssize_t cntrldvr_store(struct device *dev,
1417 struct device_attribute *attr,
1418 const char *buf, size_t size)
1419 {
1420 u8 idx;
1421 unsigned long val;
1422 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1423 struct etmv4_config *config = &drvdata->config;
1424
1425 if (kstrtoul(buf, 16, &val))
1426 return -EINVAL;
1427 if (val > ETM_CNTR_MAX_VAL)
1428 return -EINVAL;
1429
1430 spin_lock(&drvdata->spinlock);
1431 idx = config->cntr_idx;
1432 config->cntrldvr[idx] = val;
1433 spin_unlock(&drvdata->spinlock);
1434 return size;
1435 }
1436 static DEVICE_ATTR_RW(cntrldvr);
1437
cntr_val_show(struct device * dev,struct device_attribute * attr,char * buf)1438 static ssize_t cntr_val_show(struct device *dev,
1439 struct device_attribute *attr,
1440 char *buf)
1441 {
1442 u8 idx;
1443 unsigned long val;
1444 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1445 struct etmv4_config *config = &drvdata->config;
1446
1447 spin_lock(&drvdata->spinlock);
1448 idx = config->cntr_idx;
1449 val = config->cntr_val[idx];
1450 spin_unlock(&drvdata->spinlock);
1451 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1452 }
1453
cntr_val_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1454 static ssize_t cntr_val_store(struct device *dev,
1455 struct device_attribute *attr,
1456 const char *buf, size_t size)
1457 {
1458 u8 idx;
1459 unsigned long val;
1460 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1461 struct etmv4_config *config = &drvdata->config;
1462
1463 if (kstrtoul(buf, 16, &val))
1464 return -EINVAL;
1465 if (val > ETM_CNTR_MAX_VAL)
1466 return -EINVAL;
1467
1468 spin_lock(&drvdata->spinlock);
1469 idx = config->cntr_idx;
1470 config->cntr_val[idx] = val;
1471 spin_unlock(&drvdata->spinlock);
1472 return size;
1473 }
1474 static DEVICE_ATTR_RW(cntr_val);
1475
cntr_ctrl_show(struct device * dev,struct device_attribute * attr,char * buf)1476 static ssize_t cntr_ctrl_show(struct device *dev,
1477 struct device_attribute *attr,
1478 char *buf)
1479 {
1480 u8 idx;
1481 unsigned long val;
1482 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1483 struct etmv4_config *config = &drvdata->config;
1484
1485 spin_lock(&drvdata->spinlock);
1486 idx = config->cntr_idx;
1487 val = config->cntr_ctrl[idx];
1488 spin_unlock(&drvdata->spinlock);
1489 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1490 }
1491
cntr_ctrl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1492 static ssize_t cntr_ctrl_store(struct device *dev,
1493 struct device_attribute *attr,
1494 const char *buf, size_t size)
1495 {
1496 u8 idx;
1497 unsigned long val;
1498 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1499 struct etmv4_config *config = &drvdata->config;
1500
1501 if (kstrtoul(buf, 16, &val))
1502 return -EINVAL;
1503
1504 spin_lock(&drvdata->spinlock);
1505 idx = config->cntr_idx;
1506 config->cntr_ctrl[idx] = val;
1507 spin_unlock(&drvdata->spinlock);
1508 return size;
1509 }
1510 static DEVICE_ATTR_RW(cntr_ctrl);
1511
res_idx_show(struct device * dev,struct device_attribute * attr,char * buf)1512 static ssize_t res_idx_show(struct device *dev,
1513 struct device_attribute *attr,
1514 char *buf)
1515 {
1516 unsigned long val;
1517 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1518 struct etmv4_config *config = &drvdata->config;
1519
1520 val = config->res_idx;
1521 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1522 }
1523
res_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1524 static ssize_t res_idx_store(struct device *dev,
1525 struct device_attribute *attr,
1526 const char *buf, size_t size)
1527 {
1528 unsigned long val;
1529 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1530 struct etmv4_config *config = &drvdata->config;
1531
1532 if (kstrtoul(buf, 16, &val))
1533 return -EINVAL;
1534 /* Resource selector pair 0 is always implemented and reserved */
1535 if ((val == 0) || (val >= drvdata->nr_resource))
1536 return -EINVAL;
1537
1538 /*
1539 * Use spinlock to ensure index doesn't change while it gets
1540 * dereferenced multiple times within a spinlock block elsewhere.
1541 */
1542 spin_lock(&drvdata->spinlock);
1543 config->res_idx = val;
1544 spin_unlock(&drvdata->spinlock);
1545 return size;
1546 }
1547 static DEVICE_ATTR_RW(res_idx);
1548
res_ctrl_show(struct device * dev,struct device_attribute * attr,char * buf)1549 static ssize_t res_ctrl_show(struct device *dev,
1550 struct device_attribute *attr,
1551 char *buf)
1552 {
1553 u8 idx;
1554 unsigned long val;
1555 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1556 struct etmv4_config *config = &drvdata->config;
1557
1558 spin_lock(&drvdata->spinlock);
1559 idx = config->res_idx;
1560 val = config->res_ctrl[idx];
1561 spin_unlock(&drvdata->spinlock);
1562 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1563 }
1564
res_ctrl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1565 static ssize_t res_ctrl_store(struct device *dev,
1566 struct device_attribute *attr,
1567 const char *buf, size_t size)
1568 {
1569 u8 idx;
1570 unsigned long val;
1571 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1572 struct etmv4_config *config = &drvdata->config;
1573
1574 if (kstrtoul(buf, 16, &val))
1575 return -EINVAL;
1576
1577 spin_lock(&drvdata->spinlock);
1578 idx = config->res_idx;
1579 /* For odd idx pair inversal bit is RES0 */
1580 if (idx % 2 != 0)
1581 /* PAIRINV, bit[21] */
1582 val &= ~BIT(21);
1583 config->res_ctrl[idx] = val;
1584 spin_unlock(&drvdata->spinlock);
1585 return size;
1586 }
1587 static DEVICE_ATTR_RW(res_ctrl);
1588
ctxid_idx_show(struct device * dev,struct device_attribute * attr,char * buf)1589 static ssize_t ctxid_idx_show(struct device *dev,
1590 struct device_attribute *attr,
1591 char *buf)
1592 {
1593 unsigned long val;
1594 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1595 struct etmv4_config *config = &drvdata->config;
1596
1597 val = config->ctxid_idx;
1598 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1599 }
1600
ctxid_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1601 static ssize_t ctxid_idx_store(struct device *dev,
1602 struct device_attribute *attr,
1603 const char *buf, size_t size)
1604 {
1605 unsigned long val;
1606 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1607 struct etmv4_config *config = &drvdata->config;
1608
1609 if (kstrtoul(buf, 16, &val))
1610 return -EINVAL;
1611 if (val >= drvdata->numcidc)
1612 return -EINVAL;
1613
1614 /*
1615 * Use spinlock to ensure index doesn't change while it gets
1616 * dereferenced multiple times within a spinlock block elsewhere.
1617 */
1618 spin_lock(&drvdata->spinlock);
1619 config->ctxid_idx = val;
1620 spin_unlock(&drvdata->spinlock);
1621 return size;
1622 }
1623 static DEVICE_ATTR_RW(ctxid_idx);
1624
ctxid_pid_show(struct device * dev,struct device_attribute * attr,char * buf)1625 static ssize_t ctxid_pid_show(struct device *dev,
1626 struct device_attribute *attr,
1627 char *buf)
1628 {
1629 u8 idx;
1630 unsigned long val;
1631 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1632 struct etmv4_config *config = &drvdata->config;
1633
1634 /*
1635 * Don't use contextID tracing if coming from a PID namespace. See
1636 * comment in ctxid_pid_store().
1637 */
1638 if (task_active_pid_ns(current) != &init_pid_ns)
1639 return -EINVAL;
1640
1641 spin_lock(&drvdata->spinlock);
1642 idx = config->ctxid_idx;
1643 val = (unsigned long)config->ctxid_pid[idx];
1644 spin_unlock(&drvdata->spinlock);
1645 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1646 }
1647
ctxid_pid_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1648 static ssize_t ctxid_pid_store(struct device *dev,
1649 struct device_attribute *attr,
1650 const char *buf, size_t size)
1651 {
1652 u8 idx;
1653 unsigned long pid;
1654 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1655 struct etmv4_config *config = &drvdata->config;
1656
1657 /*
1658 * When contextID tracing is enabled the tracers will insert the
1659 * value found in the contextID register in the trace stream. But if
1660 * a process is in a namespace the PID of that process as seen from the
1661 * namespace won't be what the kernel sees, something that makes the
1662 * feature confusing and can potentially leak kernel only information.
1663 * As such refuse to use the feature if @current is not in the initial
1664 * PID namespace.
1665 */
1666 if (task_active_pid_ns(current) != &init_pid_ns)
1667 return -EINVAL;
1668
1669 /*
1670 * only implemented when ctxid tracing is enabled, i.e. at least one
1671 * ctxid comparator is implemented and ctxid is greater than 0 bits
1672 * in length
1673 */
1674 if (!drvdata->ctxid_size || !drvdata->numcidc)
1675 return -EINVAL;
1676 if (kstrtoul(buf, 16, &pid))
1677 return -EINVAL;
1678
1679 spin_lock(&drvdata->spinlock);
1680 idx = config->ctxid_idx;
1681 config->ctxid_pid[idx] = (u64)pid;
1682 spin_unlock(&drvdata->spinlock);
1683 return size;
1684 }
1685 static DEVICE_ATTR_RW(ctxid_pid);
1686
ctxid_masks_show(struct device * dev,struct device_attribute * attr,char * buf)1687 static ssize_t ctxid_masks_show(struct device *dev,
1688 struct device_attribute *attr,
1689 char *buf)
1690 {
1691 unsigned long val1, val2;
1692 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1693 struct etmv4_config *config = &drvdata->config;
1694
1695 /*
1696 * Don't use contextID tracing if coming from a PID namespace. See
1697 * comment in ctxid_pid_store().
1698 */
1699 if (task_active_pid_ns(current) != &init_pid_ns)
1700 return -EINVAL;
1701
1702 spin_lock(&drvdata->spinlock);
1703 val1 = config->ctxid_mask0;
1704 val2 = config->ctxid_mask1;
1705 spin_unlock(&drvdata->spinlock);
1706 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1707 }
1708
ctxid_masks_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1709 static ssize_t ctxid_masks_store(struct device *dev,
1710 struct device_attribute *attr,
1711 const char *buf, size_t size)
1712 {
1713 u8 i, j, maskbyte;
1714 unsigned long val1, val2, mask;
1715 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1716 struct etmv4_config *config = &drvdata->config;
1717
1718 /*
1719 * Don't use contextID tracing if coming from a PID namespace. See
1720 * comment in ctxid_pid_store().
1721 */
1722 if (task_active_pid_ns(current) != &init_pid_ns)
1723 return -EINVAL;
1724
1725 /*
1726 * only implemented when ctxid tracing is enabled, i.e. at least one
1727 * ctxid comparator is implemented and ctxid is greater than 0 bits
1728 * in length
1729 */
1730 if (!drvdata->ctxid_size || !drvdata->numcidc)
1731 return -EINVAL;
1732 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
1733 return -EINVAL;
1734
1735 spin_lock(&drvdata->spinlock);
1736 /*
1737 * each byte[0..3] controls mask value applied to ctxid
1738 * comparator[0..3]
1739 */
1740 switch (drvdata->numcidc) {
1741 case 0x1:
1742 /* COMP0, bits[7:0] */
1743 config->ctxid_mask0 = val1 & 0xFF;
1744 break;
1745 case 0x2:
1746 /* COMP1, bits[15:8] */
1747 config->ctxid_mask0 = val1 & 0xFFFF;
1748 break;
1749 case 0x3:
1750 /* COMP2, bits[23:16] */
1751 config->ctxid_mask0 = val1 & 0xFFFFFF;
1752 break;
1753 case 0x4:
1754 /* COMP3, bits[31:24] */
1755 config->ctxid_mask0 = val1;
1756 break;
1757 case 0x5:
1758 /* COMP4, bits[7:0] */
1759 config->ctxid_mask0 = val1;
1760 config->ctxid_mask1 = val2 & 0xFF;
1761 break;
1762 case 0x6:
1763 /* COMP5, bits[15:8] */
1764 config->ctxid_mask0 = val1;
1765 config->ctxid_mask1 = val2 & 0xFFFF;
1766 break;
1767 case 0x7:
1768 /* COMP6, bits[23:16] */
1769 config->ctxid_mask0 = val1;
1770 config->ctxid_mask1 = val2 & 0xFFFFFF;
1771 break;
1772 case 0x8:
1773 /* COMP7, bits[31:24] */
1774 config->ctxid_mask0 = val1;
1775 config->ctxid_mask1 = val2;
1776 break;
1777 default:
1778 break;
1779 }
1780 /*
1781 * If software sets a mask bit to 1, it must program relevant byte
1782 * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
1783 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
1784 * of ctxid comparator0 value (corresponding to byte 0) register.
1785 */
1786 mask = config->ctxid_mask0;
1787 for (i = 0; i < drvdata->numcidc; i++) {
1788 /* mask value of corresponding ctxid comparator */
1789 maskbyte = mask & ETMv4_EVENT_MASK;
1790 /*
1791 * each bit corresponds to a byte of respective ctxid comparator
1792 * value register
1793 */
1794 for (j = 0; j < 8; j++) {
1795 if (maskbyte & 1)
1796 config->ctxid_pid[i] &= ~(0xFFUL << (j * 8));
1797 maskbyte >>= 1;
1798 }
1799 /* Select the next ctxid comparator mask value */
1800 if (i == 3)
1801 /* ctxid comparators[4-7] */
1802 mask = config->ctxid_mask1;
1803 else
1804 mask >>= 0x8;
1805 }
1806
1807 spin_unlock(&drvdata->spinlock);
1808 return size;
1809 }
1810 static DEVICE_ATTR_RW(ctxid_masks);
1811
vmid_idx_show(struct device * dev,struct device_attribute * attr,char * buf)1812 static ssize_t vmid_idx_show(struct device *dev,
1813 struct device_attribute *attr,
1814 char *buf)
1815 {
1816 unsigned long val;
1817 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1818 struct etmv4_config *config = &drvdata->config;
1819
1820 val = config->vmid_idx;
1821 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1822 }
1823
vmid_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1824 static ssize_t vmid_idx_store(struct device *dev,
1825 struct device_attribute *attr,
1826 const char *buf, size_t size)
1827 {
1828 unsigned long val;
1829 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1830 struct etmv4_config *config = &drvdata->config;
1831
1832 if (kstrtoul(buf, 16, &val))
1833 return -EINVAL;
1834 if (val >= drvdata->numvmidc)
1835 return -EINVAL;
1836
1837 /*
1838 * Use spinlock to ensure index doesn't change while it gets
1839 * dereferenced multiple times within a spinlock block elsewhere.
1840 */
1841 spin_lock(&drvdata->spinlock);
1842 config->vmid_idx = val;
1843 spin_unlock(&drvdata->spinlock);
1844 return size;
1845 }
1846 static DEVICE_ATTR_RW(vmid_idx);
1847
vmid_val_show(struct device * dev,struct device_attribute * attr,char * buf)1848 static ssize_t vmid_val_show(struct device *dev,
1849 struct device_attribute *attr,
1850 char *buf)
1851 {
1852 unsigned long val;
1853 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1854 struct etmv4_config *config = &drvdata->config;
1855
1856 val = (unsigned long)config->vmid_val[config->vmid_idx];
1857 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1858 }
1859
vmid_val_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1860 static ssize_t vmid_val_store(struct device *dev,
1861 struct device_attribute *attr,
1862 const char *buf, size_t size)
1863 {
1864 unsigned long val;
1865 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1866 struct etmv4_config *config = &drvdata->config;
1867
1868 /*
1869 * only implemented when vmid tracing is enabled, i.e. at least one
1870 * vmid comparator is implemented and at least 8 bit vmid size
1871 */
1872 if (!drvdata->vmid_size || !drvdata->numvmidc)
1873 return -EINVAL;
1874 if (kstrtoul(buf, 16, &val))
1875 return -EINVAL;
1876
1877 spin_lock(&drvdata->spinlock);
1878 config->vmid_val[config->vmid_idx] = (u64)val;
1879 spin_unlock(&drvdata->spinlock);
1880 return size;
1881 }
1882 static DEVICE_ATTR_RW(vmid_val);
1883
vmid_masks_show(struct device * dev,struct device_attribute * attr,char * buf)1884 static ssize_t vmid_masks_show(struct device *dev,
1885 struct device_attribute *attr, char *buf)
1886 {
1887 unsigned long val1, val2;
1888 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1889 struct etmv4_config *config = &drvdata->config;
1890
1891 spin_lock(&drvdata->spinlock);
1892 val1 = config->vmid_mask0;
1893 val2 = config->vmid_mask1;
1894 spin_unlock(&drvdata->spinlock);
1895 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1896 }
1897
vmid_masks_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1898 static ssize_t vmid_masks_store(struct device *dev,
1899 struct device_attribute *attr,
1900 const char *buf, size_t size)
1901 {
1902 u8 i, j, maskbyte;
1903 unsigned long val1, val2, mask;
1904 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1905 struct etmv4_config *config = &drvdata->config;
1906
1907 /*
1908 * only implemented when vmid tracing is enabled, i.e. at least one
1909 * vmid comparator is implemented and at least 8 bit vmid size
1910 */
1911 if (!drvdata->vmid_size || !drvdata->numvmidc)
1912 return -EINVAL;
1913 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
1914 return -EINVAL;
1915
1916 spin_lock(&drvdata->spinlock);
1917
1918 /*
1919 * each byte[0..3] controls mask value applied to vmid
1920 * comparator[0..3]
1921 */
1922 switch (drvdata->numvmidc) {
1923 case 0x1:
1924 /* COMP0, bits[7:0] */
1925 config->vmid_mask0 = val1 & 0xFF;
1926 break;
1927 case 0x2:
1928 /* COMP1, bits[15:8] */
1929 config->vmid_mask0 = val1 & 0xFFFF;
1930 break;
1931 case 0x3:
1932 /* COMP2, bits[23:16] */
1933 config->vmid_mask0 = val1 & 0xFFFFFF;
1934 break;
1935 case 0x4:
1936 /* COMP3, bits[31:24] */
1937 config->vmid_mask0 = val1;
1938 break;
1939 case 0x5:
1940 /* COMP4, bits[7:0] */
1941 config->vmid_mask0 = val1;
1942 config->vmid_mask1 = val2 & 0xFF;
1943 break;
1944 case 0x6:
1945 /* COMP5, bits[15:8] */
1946 config->vmid_mask0 = val1;
1947 config->vmid_mask1 = val2 & 0xFFFF;
1948 break;
1949 case 0x7:
1950 /* COMP6, bits[23:16] */
1951 config->vmid_mask0 = val1;
1952 config->vmid_mask1 = val2 & 0xFFFFFF;
1953 break;
1954 case 0x8:
1955 /* COMP7, bits[31:24] */
1956 config->vmid_mask0 = val1;
1957 config->vmid_mask1 = val2;
1958 break;
1959 default:
1960 break;
1961 }
1962
1963 /*
1964 * If software sets a mask bit to 1, it must program relevant byte
1965 * of vmid comparator value 0x0, otherwise behavior is unpredictable.
1966 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
1967 * of vmid comparator0 value (corresponding to byte 0) register.
1968 */
1969 mask = config->vmid_mask0;
1970 for (i = 0; i < drvdata->numvmidc; i++) {
1971 /* mask value of corresponding vmid comparator */
1972 maskbyte = mask & ETMv4_EVENT_MASK;
1973 /*
1974 * each bit corresponds to a byte of respective vmid comparator
1975 * value register
1976 */
1977 for (j = 0; j < 8; j++) {
1978 if (maskbyte & 1)
1979 config->vmid_val[i] &= ~(0xFFUL << (j * 8));
1980 maskbyte >>= 1;
1981 }
1982 /* Select the next vmid comparator mask value */
1983 if (i == 3)
1984 /* vmid comparators[4-7] */
1985 mask = config->vmid_mask1;
1986 else
1987 mask >>= 0x8;
1988 }
1989 spin_unlock(&drvdata->spinlock);
1990 return size;
1991 }
1992 static DEVICE_ATTR_RW(vmid_masks);
1993
cpu_show(struct device * dev,struct device_attribute * attr,char * buf)1994 static ssize_t cpu_show(struct device *dev,
1995 struct device_attribute *attr, char *buf)
1996 {
1997 int val;
1998 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1999
2000 val = drvdata->cpu;
2001 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
2002
2003 }
2004 static DEVICE_ATTR_RO(cpu);
2005
2006 static struct attribute *coresight_etmv4_attrs[] = {
2007 &dev_attr_nr_pe_cmp.attr,
2008 &dev_attr_nr_addr_cmp.attr,
2009 &dev_attr_nr_cntr.attr,
2010 &dev_attr_nr_ext_inp.attr,
2011 &dev_attr_numcidc.attr,
2012 &dev_attr_numvmidc.attr,
2013 &dev_attr_nrseqstate.attr,
2014 &dev_attr_nr_resource.attr,
2015 &dev_attr_nr_ss_cmp.attr,
2016 &dev_attr_reset.attr,
2017 &dev_attr_mode.attr,
2018 &dev_attr_pe.attr,
2019 &dev_attr_event.attr,
2020 &dev_attr_event_instren.attr,
2021 &dev_attr_event_ts.attr,
2022 &dev_attr_syncfreq.attr,
2023 &dev_attr_cyc_threshold.attr,
2024 &dev_attr_bb_ctrl.attr,
2025 &dev_attr_event_vinst.attr,
2026 &dev_attr_s_exlevel_vinst.attr,
2027 &dev_attr_ns_exlevel_vinst.attr,
2028 &dev_attr_addr_idx.attr,
2029 &dev_attr_addr_instdatatype.attr,
2030 &dev_attr_addr_single.attr,
2031 &dev_attr_addr_range.attr,
2032 &dev_attr_addr_start.attr,
2033 &dev_attr_addr_stop.attr,
2034 &dev_attr_addr_ctxtype.attr,
2035 &dev_attr_addr_context.attr,
2036 &dev_attr_seq_idx.attr,
2037 &dev_attr_seq_state.attr,
2038 &dev_attr_seq_event.attr,
2039 &dev_attr_seq_reset_event.attr,
2040 &dev_attr_cntr_idx.attr,
2041 &dev_attr_cntrldvr.attr,
2042 &dev_attr_cntr_val.attr,
2043 &dev_attr_cntr_ctrl.attr,
2044 &dev_attr_res_idx.attr,
2045 &dev_attr_res_ctrl.attr,
2046 &dev_attr_ctxid_idx.attr,
2047 &dev_attr_ctxid_pid.attr,
2048 &dev_attr_ctxid_masks.attr,
2049 &dev_attr_vmid_idx.attr,
2050 &dev_attr_vmid_val.attr,
2051 &dev_attr_vmid_masks.attr,
2052 &dev_attr_cpu.attr,
2053 NULL,
2054 };
2055
2056 struct etmv4_reg {
2057 void __iomem *addr;
2058 u32 data;
2059 };
2060
do_smp_cross_read(void * data)2061 static void do_smp_cross_read(void *data)
2062 {
2063 struct etmv4_reg *reg = data;
2064
2065 reg->data = readl_relaxed(reg->addr);
2066 }
2067
etmv4_cross_read(const struct device * dev,u32 offset)2068 static u32 etmv4_cross_read(const struct device *dev, u32 offset)
2069 {
2070 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
2071 struct etmv4_reg reg;
2072
2073 reg.addr = drvdata->base + offset;
2074 /*
2075 * smp cross call ensures the CPU will be powered up before
2076 * accessing the ETMv4 trace core registers
2077 */
2078 smp_call_function_single(drvdata->cpu, do_smp_cross_read, ®, 1);
2079 return reg.data;
2080 }
2081
2082 #define coresight_etm4x_reg(name, offset) \
2083 coresight_simple_reg32(struct etmv4_drvdata, name, offset)
2084
2085 #define coresight_etm4x_cross_read(name, offset) \
2086 coresight_simple_func(struct etmv4_drvdata, etmv4_cross_read, \
2087 name, offset)
2088
2089 coresight_etm4x_reg(trcpdcr, TRCPDCR);
2090 coresight_etm4x_reg(trcpdsr, TRCPDSR);
2091 coresight_etm4x_reg(trclsr, TRCLSR);
2092 coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS);
2093 coresight_etm4x_reg(trcdevid, TRCDEVID);
2094 coresight_etm4x_reg(trcdevtype, TRCDEVTYPE);
2095 coresight_etm4x_reg(trcpidr0, TRCPIDR0);
2096 coresight_etm4x_reg(trcpidr1, TRCPIDR1);
2097 coresight_etm4x_reg(trcpidr2, TRCPIDR2);
2098 coresight_etm4x_reg(trcpidr3, TRCPIDR3);
2099 coresight_etm4x_cross_read(trcoslsr, TRCOSLSR);
2100 coresight_etm4x_cross_read(trcconfig, TRCCONFIGR);
2101 coresight_etm4x_cross_read(trctraceid, TRCTRACEIDR);
2102
2103 static struct attribute *coresight_etmv4_mgmt_attrs[] = {
2104 &dev_attr_trcoslsr.attr,
2105 &dev_attr_trcpdcr.attr,
2106 &dev_attr_trcpdsr.attr,
2107 &dev_attr_trclsr.attr,
2108 &dev_attr_trcconfig.attr,
2109 &dev_attr_trctraceid.attr,
2110 &dev_attr_trcauthstatus.attr,
2111 &dev_attr_trcdevid.attr,
2112 &dev_attr_trcdevtype.attr,
2113 &dev_attr_trcpidr0.attr,
2114 &dev_attr_trcpidr1.attr,
2115 &dev_attr_trcpidr2.attr,
2116 &dev_attr_trcpidr3.attr,
2117 NULL,
2118 };
2119
2120 coresight_etm4x_cross_read(trcidr0, TRCIDR0);
2121 coresight_etm4x_cross_read(trcidr1, TRCIDR1);
2122 coresight_etm4x_cross_read(trcidr2, TRCIDR2);
2123 coresight_etm4x_cross_read(trcidr3, TRCIDR3);
2124 coresight_etm4x_cross_read(trcidr4, TRCIDR4);
2125 coresight_etm4x_cross_read(trcidr5, TRCIDR5);
2126 /* trcidr[6,7] are reserved */
2127 coresight_etm4x_cross_read(trcidr8, TRCIDR8);
2128 coresight_etm4x_cross_read(trcidr9, TRCIDR9);
2129 coresight_etm4x_cross_read(trcidr10, TRCIDR10);
2130 coresight_etm4x_cross_read(trcidr11, TRCIDR11);
2131 coresight_etm4x_cross_read(trcidr12, TRCIDR12);
2132 coresight_etm4x_cross_read(trcidr13, TRCIDR13);
2133
2134 static struct attribute *coresight_etmv4_trcidr_attrs[] = {
2135 &dev_attr_trcidr0.attr,
2136 &dev_attr_trcidr1.attr,
2137 &dev_attr_trcidr2.attr,
2138 &dev_attr_trcidr3.attr,
2139 &dev_attr_trcidr4.attr,
2140 &dev_attr_trcidr5.attr,
2141 /* trcidr[6,7] are reserved */
2142 &dev_attr_trcidr8.attr,
2143 &dev_attr_trcidr9.attr,
2144 &dev_attr_trcidr10.attr,
2145 &dev_attr_trcidr11.attr,
2146 &dev_attr_trcidr12.attr,
2147 &dev_attr_trcidr13.attr,
2148 NULL,
2149 };
2150
2151 static const struct attribute_group coresight_etmv4_group = {
2152 .attrs = coresight_etmv4_attrs,
2153 };
2154
2155 static const struct attribute_group coresight_etmv4_mgmt_group = {
2156 .attrs = coresight_etmv4_mgmt_attrs,
2157 .name = "mgmt",
2158 };
2159
2160 static const struct attribute_group coresight_etmv4_trcidr_group = {
2161 .attrs = coresight_etmv4_trcidr_attrs,
2162 .name = "trcidr",
2163 };
2164
2165 const struct attribute_group *coresight_etmv4_groups[] = {
2166 &coresight_etmv4_group,
2167 &coresight_etmv4_mgmt_group,
2168 &coresight_etmv4_trcidr_group,
2169 NULL,
2170 };
2171