1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright(C) 2015 Linaro Limited. All rights reserved.
4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5 */
6
7 #include <linux/pid_namespace.h>
8 #include <linux/pm_runtime.h>
9 #include <linux/sysfs.h>
10 #include "coresight-etm4x.h"
11 #include "coresight-priv.h"
12
etm4_set_mode_exclude(struct etmv4_drvdata * drvdata,bool exclude)13 static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
14 {
15 u8 idx;
16 struct etmv4_config *config = &drvdata->config;
17
18 idx = config->addr_idx;
19
20 /*
21 * TRCACATRn.TYPE bit[1:0]: type of comparison
22 * the trace unit performs
23 */
24 if (BMVAL(config->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
25 if (idx % 2 != 0)
26 return -EINVAL;
27
28 /*
29 * We are performing instruction address comparison. Set the
30 * relevant bit of ViewInst Include/Exclude Control register
31 * for corresponding address comparator pair.
32 */
33 if (config->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
34 config->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
35 return -EINVAL;
36
37 if (exclude == true) {
38 /*
39 * Set exclude bit and unset the include bit
40 * corresponding to comparator pair
41 */
42 config->viiectlr |= BIT(idx / 2 + 16);
43 config->viiectlr &= ~BIT(idx / 2);
44 } else {
45 /*
46 * Set include bit and unset exclude bit
47 * corresponding to comparator pair
48 */
49 config->viiectlr |= BIT(idx / 2);
50 config->viiectlr &= ~BIT(idx / 2 + 16);
51 }
52 }
53 return 0;
54 }
55
nr_pe_cmp_show(struct device * dev,struct device_attribute * attr,char * buf)56 static ssize_t nr_pe_cmp_show(struct device *dev,
57 struct device_attribute *attr,
58 char *buf)
59 {
60 unsigned long val;
61 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
62
63 val = drvdata->nr_pe_cmp;
64 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
65 }
66 static DEVICE_ATTR_RO(nr_pe_cmp);
67
nr_addr_cmp_show(struct device * dev,struct device_attribute * attr,char * buf)68 static ssize_t nr_addr_cmp_show(struct device *dev,
69 struct device_attribute *attr,
70 char *buf)
71 {
72 unsigned long val;
73 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
74
75 val = drvdata->nr_addr_cmp;
76 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
77 }
78 static DEVICE_ATTR_RO(nr_addr_cmp);
79
nr_cntr_show(struct device * dev,struct device_attribute * attr,char * buf)80 static ssize_t nr_cntr_show(struct device *dev,
81 struct device_attribute *attr,
82 char *buf)
83 {
84 unsigned long val;
85 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
86
87 val = drvdata->nr_cntr;
88 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
89 }
90 static DEVICE_ATTR_RO(nr_cntr);
91
nr_ext_inp_show(struct device * dev,struct device_attribute * attr,char * buf)92 static ssize_t nr_ext_inp_show(struct device *dev,
93 struct device_attribute *attr,
94 char *buf)
95 {
96 unsigned long val;
97 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
98
99 val = drvdata->nr_ext_inp;
100 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
101 }
102 static DEVICE_ATTR_RO(nr_ext_inp);
103
numcidc_show(struct device * dev,struct device_attribute * attr,char * buf)104 static ssize_t numcidc_show(struct device *dev,
105 struct device_attribute *attr,
106 char *buf)
107 {
108 unsigned long val;
109 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
110
111 val = drvdata->numcidc;
112 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
113 }
114 static DEVICE_ATTR_RO(numcidc);
115
numvmidc_show(struct device * dev,struct device_attribute * attr,char * buf)116 static ssize_t numvmidc_show(struct device *dev,
117 struct device_attribute *attr,
118 char *buf)
119 {
120 unsigned long val;
121 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
122
123 val = drvdata->numvmidc;
124 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
125 }
126 static DEVICE_ATTR_RO(numvmidc);
127
nrseqstate_show(struct device * dev,struct device_attribute * attr,char * buf)128 static ssize_t nrseqstate_show(struct device *dev,
129 struct device_attribute *attr,
130 char *buf)
131 {
132 unsigned long val;
133 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
134
135 val = drvdata->nrseqstate;
136 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
137 }
138 static DEVICE_ATTR_RO(nrseqstate);
139
nr_resource_show(struct device * dev,struct device_attribute * attr,char * buf)140 static ssize_t nr_resource_show(struct device *dev,
141 struct device_attribute *attr,
142 char *buf)
143 {
144 unsigned long val;
145 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
146
147 val = drvdata->nr_resource;
148 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
149 }
150 static DEVICE_ATTR_RO(nr_resource);
151
nr_ss_cmp_show(struct device * dev,struct device_attribute * attr,char * buf)152 static ssize_t nr_ss_cmp_show(struct device *dev,
153 struct device_attribute *attr,
154 char *buf)
155 {
156 unsigned long val;
157 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
158
159 val = drvdata->nr_ss_cmp;
160 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
161 }
162 static DEVICE_ATTR_RO(nr_ss_cmp);
163
reset_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)164 static ssize_t reset_store(struct device *dev,
165 struct device_attribute *attr,
166 const char *buf, size_t size)
167 {
168 int i;
169 unsigned long val;
170 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
171 struct etmv4_config *config = &drvdata->config;
172
173 if (kstrtoul(buf, 16, &val))
174 return -EINVAL;
175
176 spin_lock(&drvdata->spinlock);
177 if (val)
178 config->mode = 0x0;
179
180 /* Disable data tracing: do not trace load and store data transfers */
181 config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
182 config->cfg &= ~(BIT(1) | BIT(2));
183
184 /* Disable data value and data address tracing */
185 config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
186 ETM_MODE_DATA_TRACE_VAL);
187 config->cfg &= ~(BIT(16) | BIT(17));
188
189 /* Disable all events tracing */
190 config->eventctrl0 = 0x0;
191 config->eventctrl1 = 0x0;
192
193 /* Disable timestamp event */
194 config->ts_ctrl = 0x0;
195
196 /* Disable stalling */
197 config->stall_ctrl = 0x0;
198
199 /* Reset trace synchronization period to 2^8 = 256 bytes*/
200 if (drvdata->syncpr == false)
201 config->syncfreq = 0x8;
202
203 /*
204 * Enable ViewInst to trace everything with start-stop logic in
205 * started state. ARM recommends start-stop logic is set before
206 * each trace run.
207 */
208 config->vinst_ctrl |= BIT(0);
209 if (drvdata->nr_addr_cmp == true) {
210 config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
211 /* SSSTATUS, bit[9] */
212 config->vinst_ctrl |= BIT(9);
213 }
214
215 /* No address range filtering for ViewInst */
216 config->viiectlr = 0x0;
217
218 /* No start-stop filtering for ViewInst */
219 config->vissctlr = 0x0;
220
221 /* Disable seq events */
222 for (i = 0; i < drvdata->nrseqstate-1; i++)
223 config->seq_ctrl[i] = 0x0;
224 config->seq_rst = 0x0;
225 config->seq_state = 0x0;
226
227 /* Disable external input events */
228 config->ext_inp = 0x0;
229
230 config->cntr_idx = 0x0;
231 for (i = 0; i < drvdata->nr_cntr; i++) {
232 config->cntrldvr[i] = 0x0;
233 config->cntr_ctrl[i] = 0x0;
234 config->cntr_val[i] = 0x0;
235 }
236
237 config->res_idx = 0x0;
238 for (i = 0; i < drvdata->nr_resource; i++)
239 config->res_ctrl[i] = 0x0;
240
241 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
242 config->ss_ctrl[i] = 0x0;
243 config->ss_pe_cmp[i] = 0x0;
244 }
245
246 config->addr_idx = 0x0;
247 for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
248 config->addr_val[i] = 0x0;
249 config->addr_acc[i] = 0x0;
250 config->addr_type[i] = ETM_ADDR_TYPE_NONE;
251 }
252
253 config->ctxid_idx = 0x0;
254 for (i = 0; i < drvdata->numcidc; i++)
255 config->ctxid_pid[i] = 0x0;
256
257 config->ctxid_mask0 = 0x0;
258 config->ctxid_mask1 = 0x0;
259
260 config->vmid_idx = 0x0;
261 for (i = 0; i < drvdata->numvmidc; i++)
262 config->vmid_val[i] = 0x0;
263 config->vmid_mask0 = 0x0;
264 config->vmid_mask1 = 0x0;
265
266 drvdata->trcid = drvdata->cpu + 1;
267
268 spin_unlock(&drvdata->spinlock);
269
270 return size;
271 }
272 static DEVICE_ATTR_WO(reset);
273
mode_show(struct device * dev,struct device_attribute * attr,char * buf)274 static ssize_t mode_show(struct device *dev,
275 struct device_attribute *attr,
276 char *buf)
277 {
278 unsigned long val;
279 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
280 struct etmv4_config *config = &drvdata->config;
281
282 val = config->mode;
283 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
284 }
285
mode_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)286 static ssize_t mode_store(struct device *dev,
287 struct device_attribute *attr,
288 const char *buf, size_t size)
289 {
290 unsigned long val, mode;
291 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
292 struct etmv4_config *config = &drvdata->config;
293
294 if (kstrtoul(buf, 16, &val))
295 return -EINVAL;
296
297 spin_lock(&drvdata->spinlock);
298 config->mode = val & ETMv4_MODE_ALL;
299
300 if (config->mode & ETM_MODE_EXCLUDE)
301 etm4_set_mode_exclude(drvdata, true);
302 else
303 etm4_set_mode_exclude(drvdata, false);
304
305 if (drvdata->instrp0 == true) {
306 /* start by clearing instruction P0 field */
307 config->cfg &= ~(BIT(1) | BIT(2));
308 if (config->mode & ETM_MODE_LOAD)
309 /* 0b01 Trace load instructions as P0 instructions */
310 config->cfg |= BIT(1);
311 if (config->mode & ETM_MODE_STORE)
312 /* 0b10 Trace store instructions as P0 instructions */
313 config->cfg |= BIT(2);
314 if (config->mode & ETM_MODE_LOAD_STORE)
315 /*
316 * 0b11 Trace load and store instructions
317 * as P0 instructions
318 */
319 config->cfg |= BIT(1) | BIT(2);
320 }
321
322 /* bit[3], Branch broadcast mode */
323 if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
324 config->cfg |= BIT(3);
325 else
326 config->cfg &= ~BIT(3);
327
328 /* bit[4], Cycle counting instruction trace bit */
329 if ((config->mode & ETMv4_MODE_CYCACC) &&
330 (drvdata->trccci == true))
331 config->cfg |= BIT(4);
332 else
333 config->cfg &= ~BIT(4);
334
335 /* bit[6], Context ID tracing bit */
336 if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
337 config->cfg |= BIT(6);
338 else
339 config->cfg &= ~BIT(6);
340
341 if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
342 config->cfg |= BIT(7);
343 else
344 config->cfg &= ~BIT(7);
345
346 /* bits[10:8], Conditional instruction tracing bit */
347 mode = ETM_MODE_COND(config->mode);
348 if (drvdata->trccond == true) {
349 config->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
350 config->cfg |= mode << 8;
351 }
352
353 /* bit[11], Global timestamp tracing bit */
354 if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
355 config->cfg |= BIT(11);
356 else
357 config->cfg &= ~BIT(11);
358
359 /* bit[12], Return stack enable bit */
360 if ((config->mode & ETM_MODE_RETURNSTACK) &&
361 (drvdata->retstack == true))
362 config->cfg |= BIT(12);
363 else
364 config->cfg &= ~BIT(12);
365
366 /* bits[14:13], Q element enable field */
367 mode = ETM_MODE_QELEM(config->mode);
368 /* start by clearing QE bits */
369 config->cfg &= ~(BIT(13) | BIT(14));
370 /* if supported, Q elements with instruction counts are enabled */
371 if ((mode & BIT(0)) && (drvdata->q_support & BIT(0)))
372 config->cfg |= BIT(13);
373 /*
374 * if supported, Q elements with and without instruction
375 * counts are enabled
376 */
377 if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
378 config->cfg |= BIT(14);
379
380 /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
381 if ((config->mode & ETM_MODE_ATB_TRIGGER) &&
382 (drvdata->atbtrig == true))
383 config->eventctrl1 |= BIT(11);
384 else
385 config->eventctrl1 &= ~BIT(11);
386
387 /* bit[12], Low-power state behavior override bit */
388 if ((config->mode & ETM_MODE_LPOVERRIDE) &&
389 (drvdata->lpoverride == true))
390 config->eventctrl1 |= BIT(12);
391 else
392 config->eventctrl1 &= ~BIT(12);
393
394 /* bit[8], Instruction stall bit */
395 if (config->mode & ETM_MODE_ISTALL_EN)
396 config->stall_ctrl |= BIT(8);
397 else
398 config->stall_ctrl &= ~BIT(8);
399
400 /* bit[10], Prioritize instruction trace bit */
401 if (config->mode & ETM_MODE_INSTPRIO)
402 config->stall_ctrl |= BIT(10);
403 else
404 config->stall_ctrl &= ~BIT(10);
405
406 /* bit[13], Trace overflow prevention bit */
407 if ((config->mode & ETM_MODE_NOOVERFLOW) &&
408 (drvdata->nooverflow == true))
409 config->stall_ctrl |= BIT(13);
410 else
411 config->stall_ctrl &= ~BIT(13);
412
413 /* bit[9] Start/stop logic control bit */
414 if (config->mode & ETM_MODE_VIEWINST_STARTSTOP)
415 config->vinst_ctrl |= BIT(9);
416 else
417 config->vinst_ctrl &= ~BIT(9);
418
419 /* bit[10], Whether a trace unit must trace a Reset exception */
420 if (config->mode & ETM_MODE_TRACE_RESET)
421 config->vinst_ctrl |= BIT(10);
422 else
423 config->vinst_ctrl &= ~BIT(10);
424
425 /* bit[11], Whether a trace unit must trace a system error exception */
426 if ((config->mode & ETM_MODE_TRACE_ERR) &&
427 (drvdata->trc_error == true))
428 config->vinst_ctrl |= BIT(11);
429 else
430 config->vinst_ctrl &= ~BIT(11);
431
432 if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
433 etm4_config_trace_mode(config);
434
435 spin_unlock(&drvdata->spinlock);
436
437 return size;
438 }
439 static DEVICE_ATTR_RW(mode);
440
pe_show(struct device * dev,struct device_attribute * attr,char * buf)441 static ssize_t pe_show(struct device *dev,
442 struct device_attribute *attr,
443 char *buf)
444 {
445 unsigned long val;
446 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
447 struct etmv4_config *config = &drvdata->config;
448
449 val = config->pe_sel;
450 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
451 }
452
pe_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)453 static ssize_t pe_store(struct device *dev,
454 struct device_attribute *attr,
455 const char *buf, size_t size)
456 {
457 unsigned long val;
458 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
459 struct etmv4_config *config = &drvdata->config;
460
461 if (kstrtoul(buf, 16, &val))
462 return -EINVAL;
463
464 spin_lock(&drvdata->spinlock);
465 if (val > drvdata->nr_pe) {
466 spin_unlock(&drvdata->spinlock);
467 return -EINVAL;
468 }
469
470 config->pe_sel = val;
471 spin_unlock(&drvdata->spinlock);
472 return size;
473 }
474 static DEVICE_ATTR_RW(pe);
475
event_show(struct device * dev,struct device_attribute * attr,char * buf)476 static ssize_t event_show(struct device *dev,
477 struct device_attribute *attr,
478 char *buf)
479 {
480 unsigned long val;
481 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
482 struct etmv4_config *config = &drvdata->config;
483
484 val = config->eventctrl0;
485 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
486 }
487
event_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)488 static ssize_t event_store(struct device *dev,
489 struct device_attribute *attr,
490 const char *buf, size_t size)
491 {
492 unsigned long val;
493 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
494 struct etmv4_config *config = &drvdata->config;
495
496 if (kstrtoul(buf, 16, &val))
497 return -EINVAL;
498
499 spin_lock(&drvdata->spinlock);
500 switch (drvdata->nr_event) {
501 case 0x0:
502 /* EVENT0, bits[7:0] */
503 config->eventctrl0 = val & 0xFF;
504 break;
505 case 0x1:
506 /* EVENT1, bits[15:8] */
507 config->eventctrl0 = val & 0xFFFF;
508 break;
509 case 0x2:
510 /* EVENT2, bits[23:16] */
511 config->eventctrl0 = val & 0xFFFFFF;
512 break;
513 case 0x3:
514 /* EVENT3, bits[31:24] */
515 config->eventctrl0 = val;
516 break;
517 default:
518 break;
519 }
520 spin_unlock(&drvdata->spinlock);
521 return size;
522 }
523 static DEVICE_ATTR_RW(event);
524
event_instren_show(struct device * dev,struct device_attribute * attr,char * buf)525 static ssize_t event_instren_show(struct device *dev,
526 struct device_attribute *attr,
527 char *buf)
528 {
529 unsigned long val;
530 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
531 struct etmv4_config *config = &drvdata->config;
532
533 val = BMVAL(config->eventctrl1, 0, 3);
534 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
535 }
536
event_instren_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)537 static ssize_t event_instren_store(struct device *dev,
538 struct device_attribute *attr,
539 const char *buf, size_t size)
540 {
541 unsigned long val;
542 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
543 struct etmv4_config *config = &drvdata->config;
544
545 if (kstrtoul(buf, 16, &val))
546 return -EINVAL;
547
548 spin_lock(&drvdata->spinlock);
549 /* start by clearing all instruction event enable bits */
550 config->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
551 switch (drvdata->nr_event) {
552 case 0x0:
553 /* generate Event element for event 1 */
554 config->eventctrl1 |= val & BIT(1);
555 break;
556 case 0x1:
557 /* generate Event element for event 1 and 2 */
558 config->eventctrl1 |= val & (BIT(0) | BIT(1));
559 break;
560 case 0x2:
561 /* generate Event element for event 1, 2 and 3 */
562 config->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
563 break;
564 case 0x3:
565 /* generate Event element for all 4 events */
566 config->eventctrl1 |= val & 0xF;
567 break;
568 default:
569 break;
570 }
571 spin_unlock(&drvdata->spinlock);
572 return size;
573 }
574 static DEVICE_ATTR_RW(event_instren);
575
event_ts_show(struct device * dev,struct device_attribute * attr,char * buf)576 static ssize_t event_ts_show(struct device *dev,
577 struct device_attribute *attr,
578 char *buf)
579 {
580 unsigned long val;
581 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
582 struct etmv4_config *config = &drvdata->config;
583
584 val = config->ts_ctrl;
585 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
586 }
587
event_ts_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)588 static ssize_t event_ts_store(struct device *dev,
589 struct device_attribute *attr,
590 const char *buf, size_t size)
591 {
592 unsigned long val;
593 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
594 struct etmv4_config *config = &drvdata->config;
595
596 if (kstrtoul(buf, 16, &val))
597 return -EINVAL;
598 if (!drvdata->ts_size)
599 return -EINVAL;
600
601 config->ts_ctrl = val & ETMv4_EVENT_MASK;
602 return size;
603 }
604 static DEVICE_ATTR_RW(event_ts);
605
syncfreq_show(struct device * dev,struct device_attribute * attr,char * buf)606 static ssize_t syncfreq_show(struct device *dev,
607 struct device_attribute *attr,
608 char *buf)
609 {
610 unsigned long val;
611 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
612 struct etmv4_config *config = &drvdata->config;
613
614 val = config->syncfreq;
615 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
616 }
617
syncfreq_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)618 static ssize_t syncfreq_store(struct device *dev,
619 struct device_attribute *attr,
620 const char *buf, size_t size)
621 {
622 unsigned long val;
623 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
624 struct etmv4_config *config = &drvdata->config;
625
626 if (kstrtoul(buf, 16, &val))
627 return -EINVAL;
628 if (drvdata->syncpr == true)
629 return -EINVAL;
630
631 config->syncfreq = val & ETMv4_SYNC_MASK;
632 return size;
633 }
634 static DEVICE_ATTR_RW(syncfreq);
635
cyc_threshold_show(struct device * dev,struct device_attribute * attr,char * buf)636 static ssize_t cyc_threshold_show(struct device *dev,
637 struct device_attribute *attr,
638 char *buf)
639 {
640 unsigned long val;
641 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
642 struct etmv4_config *config = &drvdata->config;
643
644 val = config->ccctlr;
645 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
646 }
647
cyc_threshold_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)648 static ssize_t cyc_threshold_store(struct device *dev,
649 struct device_attribute *attr,
650 const char *buf, size_t size)
651 {
652 unsigned long val;
653 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
654 struct etmv4_config *config = &drvdata->config;
655
656 if (kstrtoul(buf, 16, &val))
657 return -EINVAL;
658 if (val < drvdata->ccitmin)
659 return -EINVAL;
660
661 config->ccctlr = val & ETM_CYC_THRESHOLD_MASK;
662 return size;
663 }
664 static DEVICE_ATTR_RW(cyc_threshold);
665
bb_ctrl_show(struct device * dev,struct device_attribute * attr,char * buf)666 static ssize_t bb_ctrl_show(struct device *dev,
667 struct device_attribute *attr,
668 char *buf)
669 {
670 unsigned long val;
671 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
672 struct etmv4_config *config = &drvdata->config;
673
674 val = config->bb_ctrl;
675 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
676 }
677
bb_ctrl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)678 static ssize_t bb_ctrl_store(struct device *dev,
679 struct device_attribute *attr,
680 const char *buf, size_t size)
681 {
682 unsigned long val;
683 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
684 struct etmv4_config *config = &drvdata->config;
685
686 if (kstrtoul(buf, 16, &val))
687 return -EINVAL;
688 if (drvdata->trcbb == false)
689 return -EINVAL;
690 if (!drvdata->nr_addr_cmp)
691 return -EINVAL;
692 /*
693 * Bit[7:0] selects which address range comparator is used for
694 * branch broadcast control.
695 */
696 if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp)
697 return -EINVAL;
698
699 config->bb_ctrl = val;
700 return size;
701 }
702 static DEVICE_ATTR_RW(bb_ctrl);
703
event_vinst_show(struct device * dev,struct device_attribute * attr,char * buf)704 static ssize_t event_vinst_show(struct device *dev,
705 struct device_attribute *attr,
706 char *buf)
707 {
708 unsigned long val;
709 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
710 struct etmv4_config *config = &drvdata->config;
711
712 val = config->vinst_ctrl & ETMv4_EVENT_MASK;
713 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
714 }
715
event_vinst_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)716 static ssize_t event_vinst_store(struct device *dev,
717 struct device_attribute *attr,
718 const char *buf, size_t size)
719 {
720 unsigned long val;
721 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
722 struct etmv4_config *config = &drvdata->config;
723
724 if (kstrtoul(buf, 16, &val))
725 return -EINVAL;
726
727 spin_lock(&drvdata->spinlock);
728 val &= ETMv4_EVENT_MASK;
729 config->vinst_ctrl &= ~ETMv4_EVENT_MASK;
730 config->vinst_ctrl |= val;
731 spin_unlock(&drvdata->spinlock);
732 return size;
733 }
734 static DEVICE_ATTR_RW(event_vinst);
735
s_exlevel_vinst_show(struct device * dev,struct device_attribute * attr,char * buf)736 static ssize_t s_exlevel_vinst_show(struct device *dev,
737 struct device_attribute *attr,
738 char *buf)
739 {
740 unsigned long val;
741 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
742 struct etmv4_config *config = &drvdata->config;
743
744 val = BMVAL(config->vinst_ctrl, 16, 19);
745 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
746 }
747
s_exlevel_vinst_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)748 static ssize_t s_exlevel_vinst_store(struct device *dev,
749 struct device_attribute *attr,
750 const char *buf, size_t size)
751 {
752 unsigned long val;
753 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
754 struct etmv4_config *config = &drvdata->config;
755
756 if (kstrtoul(buf, 16, &val))
757 return -EINVAL;
758
759 spin_lock(&drvdata->spinlock);
760 /* clear all EXLEVEL_S bits (bit[18] is never implemented) */
761 config->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19));
762 /* enable instruction tracing for corresponding exception level */
763 val &= drvdata->s_ex_level;
764 config->vinst_ctrl |= (val << 16);
765 spin_unlock(&drvdata->spinlock);
766 return size;
767 }
768 static DEVICE_ATTR_RW(s_exlevel_vinst);
769
ns_exlevel_vinst_show(struct device * dev,struct device_attribute * attr,char * buf)770 static ssize_t ns_exlevel_vinst_show(struct device *dev,
771 struct device_attribute *attr,
772 char *buf)
773 {
774 unsigned long val;
775 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
776 struct etmv4_config *config = &drvdata->config;
777
778 /* EXLEVEL_NS, bits[23:20] */
779 val = BMVAL(config->vinst_ctrl, 20, 23);
780 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
781 }
782
ns_exlevel_vinst_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)783 static ssize_t ns_exlevel_vinst_store(struct device *dev,
784 struct device_attribute *attr,
785 const char *buf, size_t size)
786 {
787 unsigned long val;
788 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
789 struct etmv4_config *config = &drvdata->config;
790
791 if (kstrtoul(buf, 16, &val))
792 return -EINVAL;
793
794 spin_lock(&drvdata->spinlock);
795 /* clear EXLEVEL_NS bits (bit[23] is never implemented */
796 config->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22));
797 /* enable instruction tracing for corresponding exception level */
798 val &= drvdata->ns_ex_level;
799 config->vinst_ctrl |= (val << 20);
800 spin_unlock(&drvdata->spinlock);
801 return size;
802 }
803 static DEVICE_ATTR_RW(ns_exlevel_vinst);
804
addr_idx_show(struct device * dev,struct device_attribute * attr,char * buf)805 static ssize_t addr_idx_show(struct device *dev,
806 struct device_attribute *attr,
807 char *buf)
808 {
809 unsigned long val;
810 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
811 struct etmv4_config *config = &drvdata->config;
812
813 val = config->addr_idx;
814 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
815 }
816
addr_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)817 static ssize_t addr_idx_store(struct device *dev,
818 struct device_attribute *attr,
819 const char *buf, size_t size)
820 {
821 unsigned long val;
822 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
823 struct etmv4_config *config = &drvdata->config;
824
825 if (kstrtoul(buf, 16, &val))
826 return -EINVAL;
827 if (val >= drvdata->nr_addr_cmp * 2)
828 return -EINVAL;
829
830 /*
831 * Use spinlock to ensure index doesn't change while it gets
832 * dereferenced multiple times within a spinlock block elsewhere.
833 */
834 spin_lock(&drvdata->spinlock);
835 config->addr_idx = val;
836 spin_unlock(&drvdata->spinlock);
837 return size;
838 }
839 static DEVICE_ATTR_RW(addr_idx);
840
addr_instdatatype_show(struct device * dev,struct device_attribute * attr,char * buf)841 static ssize_t addr_instdatatype_show(struct device *dev,
842 struct device_attribute *attr,
843 char *buf)
844 {
845 ssize_t len;
846 u8 val, idx;
847 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
848 struct etmv4_config *config = &drvdata->config;
849
850 spin_lock(&drvdata->spinlock);
851 idx = config->addr_idx;
852 val = BMVAL(config->addr_acc[idx], 0, 1);
853 len = scnprintf(buf, PAGE_SIZE, "%s\n",
854 val == ETM_INSTR_ADDR ? "instr" :
855 (val == ETM_DATA_LOAD_ADDR ? "data_load" :
856 (val == ETM_DATA_STORE_ADDR ? "data_store" :
857 "data_load_store")));
858 spin_unlock(&drvdata->spinlock);
859 return len;
860 }
861
addr_instdatatype_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)862 static ssize_t addr_instdatatype_store(struct device *dev,
863 struct device_attribute *attr,
864 const char *buf, size_t size)
865 {
866 u8 idx;
867 char str[20] = "";
868 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
869 struct etmv4_config *config = &drvdata->config;
870
871 if (strlen(buf) >= 20)
872 return -EINVAL;
873 if (sscanf(buf, "%s", str) != 1)
874 return -EINVAL;
875
876 spin_lock(&drvdata->spinlock);
877 idx = config->addr_idx;
878 if (!strcmp(str, "instr"))
879 /* TYPE, bits[1:0] */
880 config->addr_acc[idx] &= ~(BIT(0) | BIT(1));
881
882 spin_unlock(&drvdata->spinlock);
883 return size;
884 }
885 static DEVICE_ATTR_RW(addr_instdatatype);
886
addr_single_show(struct device * dev,struct device_attribute * attr,char * buf)887 static ssize_t addr_single_show(struct device *dev,
888 struct device_attribute *attr,
889 char *buf)
890 {
891 u8 idx;
892 unsigned long val;
893 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
894 struct etmv4_config *config = &drvdata->config;
895
896 idx = config->addr_idx;
897 spin_lock(&drvdata->spinlock);
898 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
899 config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
900 spin_unlock(&drvdata->spinlock);
901 return -EPERM;
902 }
903 val = (unsigned long)config->addr_val[idx];
904 spin_unlock(&drvdata->spinlock);
905 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
906 }
907
addr_single_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)908 static ssize_t addr_single_store(struct device *dev,
909 struct device_attribute *attr,
910 const char *buf, size_t size)
911 {
912 u8 idx;
913 unsigned long val;
914 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
915 struct etmv4_config *config = &drvdata->config;
916
917 if (kstrtoul(buf, 16, &val))
918 return -EINVAL;
919
920 spin_lock(&drvdata->spinlock);
921 idx = config->addr_idx;
922 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
923 config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
924 spin_unlock(&drvdata->spinlock);
925 return -EPERM;
926 }
927
928 config->addr_val[idx] = (u64)val;
929 config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
930 spin_unlock(&drvdata->spinlock);
931 return size;
932 }
933 static DEVICE_ATTR_RW(addr_single);
934
addr_range_show(struct device * dev,struct device_attribute * attr,char * buf)935 static ssize_t addr_range_show(struct device *dev,
936 struct device_attribute *attr,
937 char *buf)
938 {
939 u8 idx;
940 unsigned long val1, val2;
941 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
942 struct etmv4_config *config = &drvdata->config;
943
944 spin_lock(&drvdata->spinlock);
945 idx = config->addr_idx;
946 if (idx % 2 != 0) {
947 spin_unlock(&drvdata->spinlock);
948 return -EPERM;
949 }
950 if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
951 config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
952 (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
953 config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
954 spin_unlock(&drvdata->spinlock);
955 return -EPERM;
956 }
957
958 val1 = (unsigned long)config->addr_val[idx];
959 val2 = (unsigned long)config->addr_val[idx + 1];
960 spin_unlock(&drvdata->spinlock);
961 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
962 }
963
addr_range_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)964 static ssize_t addr_range_store(struct device *dev,
965 struct device_attribute *attr,
966 const char *buf, size_t size)
967 {
968 u8 idx;
969 unsigned long val1, val2;
970 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
971 struct etmv4_config *config = &drvdata->config;
972
973 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
974 return -EINVAL;
975 /* lower address comparator cannot have a higher address value */
976 if (val1 > val2)
977 return -EINVAL;
978
979 spin_lock(&drvdata->spinlock);
980 idx = config->addr_idx;
981 if (idx % 2 != 0) {
982 spin_unlock(&drvdata->spinlock);
983 return -EPERM;
984 }
985
986 if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
987 config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
988 (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
989 config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
990 spin_unlock(&drvdata->spinlock);
991 return -EPERM;
992 }
993
994 config->addr_val[idx] = (u64)val1;
995 config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
996 config->addr_val[idx + 1] = (u64)val2;
997 config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
998 /*
999 * Program include or exclude control bits for vinst or vdata
1000 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
1001 */
1002 if (config->mode & ETM_MODE_EXCLUDE)
1003 etm4_set_mode_exclude(drvdata, true);
1004 else
1005 etm4_set_mode_exclude(drvdata, false);
1006
1007 spin_unlock(&drvdata->spinlock);
1008 return size;
1009 }
1010 static DEVICE_ATTR_RW(addr_range);
1011
addr_start_show(struct device * dev,struct device_attribute * attr,char * buf)1012 static ssize_t addr_start_show(struct device *dev,
1013 struct device_attribute *attr,
1014 char *buf)
1015 {
1016 u8 idx;
1017 unsigned long val;
1018 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1019 struct etmv4_config *config = &drvdata->config;
1020
1021 spin_lock(&drvdata->spinlock);
1022 idx = config->addr_idx;
1023
1024 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1025 config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1026 spin_unlock(&drvdata->spinlock);
1027 return -EPERM;
1028 }
1029
1030 val = (unsigned long)config->addr_val[idx];
1031 spin_unlock(&drvdata->spinlock);
1032 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1033 }
1034
addr_start_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1035 static ssize_t addr_start_store(struct device *dev,
1036 struct device_attribute *attr,
1037 const char *buf, size_t size)
1038 {
1039 u8 idx;
1040 unsigned long val;
1041 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1042 struct etmv4_config *config = &drvdata->config;
1043
1044 if (kstrtoul(buf, 16, &val))
1045 return -EINVAL;
1046
1047 spin_lock(&drvdata->spinlock);
1048 idx = config->addr_idx;
1049 if (!drvdata->nr_addr_cmp) {
1050 spin_unlock(&drvdata->spinlock);
1051 return -EINVAL;
1052 }
1053 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1054 config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1055 spin_unlock(&drvdata->spinlock);
1056 return -EPERM;
1057 }
1058
1059 config->addr_val[idx] = (u64)val;
1060 config->addr_type[idx] = ETM_ADDR_TYPE_START;
1061 config->vissctlr |= BIT(idx);
1062 /* SSSTATUS, bit[9] - turn on start/stop logic */
1063 config->vinst_ctrl |= BIT(9);
1064 spin_unlock(&drvdata->spinlock);
1065 return size;
1066 }
1067 static DEVICE_ATTR_RW(addr_start);
1068
addr_stop_show(struct device * dev,struct device_attribute * attr,char * buf)1069 static ssize_t addr_stop_show(struct device *dev,
1070 struct device_attribute *attr,
1071 char *buf)
1072 {
1073 u8 idx;
1074 unsigned long val;
1075 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1076 struct etmv4_config *config = &drvdata->config;
1077
1078 spin_lock(&drvdata->spinlock);
1079 idx = config->addr_idx;
1080
1081 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1082 config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1083 spin_unlock(&drvdata->spinlock);
1084 return -EPERM;
1085 }
1086
1087 val = (unsigned long)config->addr_val[idx];
1088 spin_unlock(&drvdata->spinlock);
1089 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1090 }
1091
addr_stop_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1092 static ssize_t addr_stop_store(struct device *dev,
1093 struct device_attribute *attr,
1094 const char *buf, size_t size)
1095 {
1096 u8 idx;
1097 unsigned long val;
1098 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1099 struct etmv4_config *config = &drvdata->config;
1100
1101 if (kstrtoul(buf, 16, &val))
1102 return -EINVAL;
1103
1104 spin_lock(&drvdata->spinlock);
1105 idx = config->addr_idx;
1106 if (!drvdata->nr_addr_cmp) {
1107 spin_unlock(&drvdata->spinlock);
1108 return -EINVAL;
1109 }
1110 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1111 config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1112 spin_unlock(&drvdata->spinlock);
1113 return -EPERM;
1114 }
1115
1116 config->addr_val[idx] = (u64)val;
1117 config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
1118 config->vissctlr |= BIT(idx + 16);
1119 /* SSSTATUS, bit[9] - turn on start/stop logic */
1120 config->vinst_ctrl |= BIT(9);
1121 spin_unlock(&drvdata->spinlock);
1122 return size;
1123 }
1124 static DEVICE_ATTR_RW(addr_stop);
1125
addr_ctxtype_show(struct device * dev,struct device_attribute * attr,char * buf)1126 static ssize_t addr_ctxtype_show(struct device *dev,
1127 struct device_attribute *attr,
1128 char *buf)
1129 {
1130 ssize_t len;
1131 u8 idx, val;
1132 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1133 struct etmv4_config *config = &drvdata->config;
1134
1135 spin_lock(&drvdata->spinlock);
1136 idx = config->addr_idx;
1137 /* CONTEXTTYPE, bits[3:2] */
1138 val = BMVAL(config->addr_acc[idx], 2, 3);
1139 len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
1140 (val == ETM_CTX_CTXID ? "ctxid" :
1141 (val == ETM_CTX_VMID ? "vmid" : "all")));
1142 spin_unlock(&drvdata->spinlock);
1143 return len;
1144 }
1145
addr_ctxtype_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1146 static ssize_t addr_ctxtype_store(struct device *dev,
1147 struct device_attribute *attr,
1148 const char *buf, size_t size)
1149 {
1150 u8 idx;
1151 char str[10] = "";
1152 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1153 struct etmv4_config *config = &drvdata->config;
1154
1155 if (strlen(buf) >= 10)
1156 return -EINVAL;
1157 if (sscanf(buf, "%s", str) != 1)
1158 return -EINVAL;
1159
1160 spin_lock(&drvdata->spinlock);
1161 idx = config->addr_idx;
1162 if (!strcmp(str, "none"))
1163 /* start by clearing context type bits */
1164 config->addr_acc[idx] &= ~(BIT(2) | BIT(3));
1165 else if (!strcmp(str, "ctxid")) {
1166 /* 0b01 The trace unit performs a Context ID */
1167 if (drvdata->numcidc) {
1168 config->addr_acc[idx] |= BIT(2);
1169 config->addr_acc[idx] &= ~BIT(3);
1170 }
1171 } else if (!strcmp(str, "vmid")) {
1172 /* 0b10 The trace unit performs a VMID */
1173 if (drvdata->numvmidc) {
1174 config->addr_acc[idx] &= ~BIT(2);
1175 config->addr_acc[idx] |= BIT(3);
1176 }
1177 } else if (!strcmp(str, "all")) {
1178 /*
1179 * 0b11 The trace unit performs a Context ID
1180 * comparison and a VMID
1181 */
1182 if (drvdata->numcidc)
1183 config->addr_acc[idx] |= BIT(2);
1184 if (drvdata->numvmidc)
1185 config->addr_acc[idx] |= BIT(3);
1186 }
1187 spin_unlock(&drvdata->spinlock);
1188 return size;
1189 }
1190 static DEVICE_ATTR_RW(addr_ctxtype);
1191
addr_context_show(struct device * dev,struct device_attribute * attr,char * buf)1192 static ssize_t addr_context_show(struct device *dev,
1193 struct device_attribute *attr,
1194 char *buf)
1195 {
1196 u8 idx;
1197 unsigned long val;
1198 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1199 struct etmv4_config *config = &drvdata->config;
1200
1201 spin_lock(&drvdata->spinlock);
1202 idx = config->addr_idx;
1203 /* context ID comparator bits[6:4] */
1204 val = BMVAL(config->addr_acc[idx], 4, 6);
1205 spin_unlock(&drvdata->spinlock);
1206 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1207 }
1208
addr_context_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1209 static ssize_t addr_context_store(struct device *dev,
1210 struct device_attribute *attr,
1211 const char *buf, size_t size)
1212 {
1213 u8 idx;
1214 unsigned long val;
1215 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1216 struct etmv4_config *config = &drvdata->config;
1217
1218 if (kstrtoul(buf, 16, &val))
1219 return -EINVAL;
1220 if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
1221 return -EINVAL;
1222 if (val >= (drvdata->numcidc >= drvdata->numvmidc ?
1223 drvdata->numcidc : drvdata->numvmidc))
1224 return -EINVAL;
1225
1226 spin_lock(&drvdata->spinlock);
1227 idx = config->addr_idx;
1228 /* clear context ID comparator bits[6:4] */
1229 config->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
1230 config->addr_acc[idx] |= (val << 4);
1231 spin_unlock(&drvdata->spinlock);
1232 return size;
1233 }
1234 static DEVICE_ATTR_RW(addr_context);
1235
seq_idx_show(struct device * dev,struct device_attribute * attr,char * buf)1236 static ssize_t seq_idx_show(struct device *dev,
1237 struct device_attribute *attr,
1238 char *buf)
1239 {
1240 unsigned long val;
1241 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1242 struct etmv4_config *config = &drvdata->config;
1243
1244 val = config->seq_idx;
1245 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1246 }
1247
seq_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1248 static ssize_t seq_idx_store(struct device *dev,
1249 struct device_attribute *attr,
1250 const char *buf, size_t size)
1251 {
1252 unsigned long val;
1253 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1254 struct etmv4_config *config = &drvdata->config;
1255
1256 if (kstrtoul(buf, 16, &val))
1257 return -EINVAL;
1258 if (val >= drvdata->nrseqstate - 1)
1259 return -EINVAL;
1260
1261 /*
1262 * Use spinlock to ensure index doesn't change while it gets
1263 * dereferenced multiple times within a spinlock block elsewhere.
1264 */
1265 spin_lock(&drvdata->spinlock);
1266 config->seq_idx = val;
1267 spin_unlock(&drvdata->spinlock);
1268 return size;
1269 }
1270 static DEVICE_ATTR_RW(seq_idx);
1271
seq_state_show(struct device * dev,struct device_attribute * attr,char * buf)1272 static ssize_t seq_state_show(struct device *dev,
1273 struct device_attribute *attr,
1274 char *buf)
1275 {
1276 unsigned long val;
1277 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1278 struct etmv4_config *config = &drvdata->config;
1279
1280 val = config->seq_state;
1281 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1282 }
1283
seq_state_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1284 static ssize_t seq_state_store(struct device *dev,
1285 struct device_attribute *attr,
1286 const char *buf, size_t size)
1287 {
1288 unsigned long val;
1289 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1290 struct etmv4_config *config = &drvdata->config;
1291
1292 if (kstrtoul(buf, 16, &val))
1293 return -EINVAL;
1294 if (val >= drvdata->nrseqstate)
1295 return -EINVAL;
1296
1297 config->seq_state = val;
1298 return size;
1299 }
1300 static DEVICE_ATTR_RW(seq_state);
1301
seq_event_show(struct device * dev,struct device_attribute * attr,char * buf)1302 static ssize_t seq_event_show(struct device *dev,
1303 struct device_attribute *attr,
1304 char *buf)
1305 {
1306 u8 idx;
1307 unsigned long val;
1308 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1309 struct etmv4_config *config = &drvdata->config;
1310
1311 spin_lock(&drvdata->spinlock);
1312 idx = config->seq_idx;
1313 val = config->seq_ctrl[idx];
1314 spin_unlock(&drvdata->spinlock);
1315 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1316 }
1317
seq_event_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1318 static ssize_t seq_event_store(struct device *dev,
1319 struct device_attribute *attr,
1320 const char *buf, size_t size)
1321 {
1322 u8 idx;
1323 unsigned long val;
1324 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1325 struct etmv4_config *config = &drvdata->config;
1326
1327 if (kstrtoul(buf, 16, &val))
1328 return -EINVAL;
1329
1330 spin_lock(&drvdata->spinlock);
1331 idx = config->seq_idx;
1332 /* RST, bits[7:0] */
1333 config->seq_ctrl[idx] = val & 0xFF;
1334 spin_unlock(&drvdata->spinlock);
1335 return size;
1336 }
1337 static DEVICE_ATTR_RW(seq_event);
1338
seq_reset_event_show(struct device * dev,struct device_attribute * attr,char * buf)1339 static ssize_t seq_reset_event_show(struct device *dev,
1340 struct device_attribute *attr,
1341 char *buf)
1342 {
1343 unsigned long val;
1344 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1345 struct etmv4_config *config = &drvdata->config;
1346
1347 val = config->seq_rst;
1348 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1349 }
1350
seq_reset_event_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1351 static ssize_t seq_reset_event_store(struct device *dev,
1352 struct device_attribute *attr,
1353 const char *buf, size_t size)
1354 {
1355 unsigned long val;
1356 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1357 struct etmv4_config *config = &drvdata->config;
1358
1359 if (kstrtoul(buf, 16, &val))
1360 return -EINVAL;
1361 if (!(drvdata->nrseqstate))
1362 return -EINVAL;
1363
1364 config->seq_rst = val & ETMv4_EVENT_MASK;
1365 return size;
1366 }
1367 static DEVICE_ATTR_RW(seq_reset_event);
1368
cntr_idx_show(struct device * dev,struct device_attribute * attr,char * buf)1369 static ssize_t cntr_idx_show(struct device *dev,
1370 struct device_attribute *attr,
1371 char *buf)
1372 {
1373 unsigned long val;
1374 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1375 struct etmv4_config *config = &drvdata->config;
1376
1377 val = config->cntr_idx;
1378 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1379 }
1380
cntr_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1381 static ssize_t cntr_idx_store(struct device *dev,
1382 struct device_attribute *attr,
1383 const char *buf, size_t size)
1384 {
1385 unsigned long val;
1386 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1387 struct etmv4_config *config = &drvdata->config;
1388
1389 if (kstrtoul(buf, 16, &val))
1390 return -EINVAL;
1391 if (val >= drvdata->nr_cntr)
1392 return -EINVAL;
1393
1394 /*
1395 * Use spinlock to ensure index doesn't change while it gets
1396 * dereferenced multiple times within a spinlock block elsewhere.
1397 */
1398 spin_lock(&drvdata->spinlock);
1399 config->cntr_idx = val;
1400 spin_unlock(&drvdata->spinlock);
1401 return size;
1402 }
1403 static DEVICE_ATTR_RW(cntr_idx);
1404
cntrldvr_show(struct device * dev,struct device_attribute * attr,char * buf)1405 static ssize_t cntrldvr_show(struct device *dev,
1406 struct device_attribute *attr,
1407 char *buf)
1408 {
1409 u8 idx;
1410 unsigned long val;
1411 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1412 struct etmv4_config *config = &drvdata->config;
1413
1414 spin_lock(&drvdata->spinlock);
1415 idx = config->cntr_idx;
1416 val = config->cntrldvr[idx];
1417 spin_unlock(&drvdata->spinlock);
1418 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1419 }
1420
cntrldvr_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1421 static ssize_t cntrldvr_store(struct device *dev,
1422 struct device_attribute *attr,
1423 const char *buf, size_t size)
1424 {
1425 u8 idx;
1426 unsigned long val;
1427 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1428 struct etmv4_config *config = &drvdata->config;
1429
1430 if (kstrtoul(buf, 16, &val))
1431 return -EINVAL;
1432 if (val > ETM_CNTR_MAX_VAL)
1433 return -EINVAL;
1434
1435 spin_lock(&drvdata->spinlock);
1436 idx = config->cntr_idx;
1437 config->cntrldvr[idx] = val;
1438 spin_unlock(&drvdata->spinlock);
1439 return size;
1440 }
1441 static DEVICE_ATTR_RW(cntrldvr);
1442
cntr_val_show(struct device * dev,struct device_attribute * attr,char * buf)1443 static ssize_t cntr_val_show(struct device *dev,
1444 struct device_attribute *attr,
1445 char *buf)
1446 {
1447 u8 idx;
1448 unsigned long val;
1449 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1450 struct etmv4_config *config = &drvdata->config;
1451
1452 spin_lock(&drvdata->spinlock);
1453 idx = config->cntr_idx;
1454 val = config->cntr_val[idx];
1455 spin_unlock(&drvdata->spinlock);
1456 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1457 }
1458
cntr_val_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1459 static ssize_t cntr_val_store(struct device *dev,
1460 struct device_attribute *attr,
1461 const char *buf, size_t size)
1462 {
1463 u8 idx;
1464 unsigned long val;
1465 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1466 struct etmv4_config *config = &drvdata->config;
1467
1468 if (kstrtoul(buf, 16, &val))
1469 return -EINVAL;
1470 if (val > ETM_CNTR_MAX_VAL)
1471 return -EINVAL;
1472
1473 spin_lock(&drvdata->spinlock);
1474 idx = config->cntr_idx;
1475 config->cntr_val[idx] = val;
1476 spin_unlock(&drvdata->spinlock);
1477 return size;
1478 }
1479 static DEVICE_ATTR_RW(cntr_val);
1480
cntr_ctrl_show(struct device * dev,struct device_attribute * attr,char * buf)1481 static ssize_t cntr_ctrl_show(struct device *dev,
1482 struct device_attribute *attr,
1483 char *buf)
1484 {
1485 u8 idx;
1486 unsigned long val;
1487 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1488 struct etmv4_config *config = &drvdata->config;
1489
1490 spin_lock(&drvdata->spinlock);
1491 idx = config->cntr_idx;
1492 val = config->cntr_ctrl[idx];
1493 spin_unlock(&drvdata->spinlock);
1494 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1495 }
1496
cntr_ctrl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1497 static ssize_t cntr_ctrl_store(struct device *dev,
1498 struct device_attribute *attr,
1499 const char *buf, size_t size)
1500 {
1501 u8 idx;
1502 unsigned long val;
1503 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1504 struct etmv4_config *config = &drvdata->config;
1505
1506 if (kstrtoul(buf, 16, &val))
1507 return -EINVAL;
1508
1509 spin_lock(&drvdata->spinlock);
1510 idx = config->cntr_idx;
1511 config->cntr_ctrl[idx] = val;
1512 spin_unlock(&drvdata->spinlock);
1513 return size;
1514 }
1515 static DEVICE_ATTR_RW(cntr_ctrl);
1516
res_idx_show(struct device * dev,struct device_attribute * attr,char * buf)1517 static ssize_t res_idx_show(struct device *dev,
1518 struct device_attribute *attr,
1519 char *buf)
1520 {
1521 unsigned long val;
1522 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1523 struct etmv4_config *config = &drvdata->config;
1524
1525 val = config->res_idx;
1526 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1527 }
1528
res_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1529 static ssize_t res_idx_store(struct device *dev,
1530 struct device_attribute *attr,
1531 const char *buf, size_t size)
1532 {
1533 unsigned long val;
1534 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1535 struct etmv4_config *config = &drvdata->config;
1536
1537 if (kstrtoul(buf, 16, &val))
1538 return -EINVAL;
1539 /* Resource selector pair 0 is always implemented and reserved */
1540 if ((val == 0) || (val >= drvdata->nr_resource))
1541 return -EINVAL;
1542
1543 /*
1544 * Use spinlock to ensure index doesn't change while it gets
1545 * dereferenced multiple times within a spinlock block elsewhere.
1546 */
1547 spin_lock(&drvdata->spinlock);
1548 config->res_idx = val;
1549 spin_unlock(&drvdata->spinlock);
1550 return size;
1551 }
1552 static DEVICE_ATTR_RW(res_idx);
1553
res_ctrl_show(struct device * dev,struct device_attribute * attr,char * buf)1554 static ssize_t res_ctrl_show(struct device *dev,
1555 struct device_attribute *attr,
1556 char *buf)
1557 {
1558 u8 idx;
1559 unsigned long val;
1560 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1561 struct etmv4_config *config = &drvdata->config;
1562
1563 spin_lock(&drvdata->spinlock);
1564 idx = config->res_idx;
1565 val = config->res_ctrl[idx];
1566 spin_unlock(&drvdata->spinlock);
1567 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1568 }
1569
res_ctrl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1570 static ssize_t res_ctrl_store(struct device *dev,
1571 struct device_attribute *attr,
1572 const char *buf, size_t size)
1573 {
1574 u8 idx;
1575 unsigned long val;
1576 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1577 struct etmv4_config *config = &drvdata->config;
1578
1579 if (kstrtoul(buf, 16, &val))
1580 return -EINVAL;
1581
1582 spin_lock(&drvdata->spinlock);
1583 idx = config->res_idx;
1584 /* For odd idx pair inversal bit is RES0 */
1585 if (idx % 2 != 0)
1586 /* PAIRINV, bit[21] */
1587 val &= ~BIT(21);
1588 config->res_ctrl[idx] = val;
1589 spin_unlock(&drvdata->spinlock);
1590 return size;
1591 }
1592 static DEVICE_ATTR_RW(res_ctrl);
1593
ctxid_idx_show(struct device * dev,struct device_attribute * attr,char * buf)1594 static ssize_t ctxid_idx_show(struct device *dev,
1595 struct device_attribute *attr,
1596 char *buf)
1597 {
1598 unsigned long val;
1599 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1600 struct etmv4_config *config = &drvdata->config;
1601
1602 val = config->ctxid_idx;
1603 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1604 }
1605
ctxid_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1606 static ssize_t ctxid_idx_store(struct device *dev,
1607 struct device_attribute *attr,
1608 const char *buf, size_t size)
1609 {
1610 unsigned long val;
1611 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1612 struct etmv4_config *config = &drvdata->config;
1613
1614 if (kstrtoul(buf, 16, &val))
1615 return -EINVAL;
1616 if (val >= drvdata->numcidc)
1617 return -EINVAL;
1618
1619 /*
1620 * Use spinlock to ensure index doesn't change while it gets
1621 * dereferenced multiple times within a spinlock block elsewhere.
1622 */
1623 spin_lock(&drvdata->spinlock);
1624 config->ctxid_idx = val;
1625 spin_unlock(&drvdata->spinlock);
1626 return size;
1627 }
1628 static DEVICE_ATTR_RW(ctxid_idx);
1629
ctxid_pid_show(struct device * dev,struct device_attribute * attr,char * buf)1630 static ssize_t ctxid_pid_show(struct device *dev,
1631 struct device_attribute *attr,
1632 char *buf)
1633 {
1634 u8 idx;
1635 unsigned long val;
1636 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1637 struct etmv4_config *config = &drvdata->config;
1638
1639 /*
1640 * Don't use contextID tracing if coming from a PID namespace. See
1641 * comment in ctxid_pid_store().
1642 */
1643 if (task_active_pid_ns(current) != &init_pid_ns)
1644 return -EINVAL;
1645
1646 spin_lock(&drvdata->spinlock);
1647 idx = config->ctxid_idx;
1648 val = (unsigned long)config->ctxid_pid[idx];
1649 spin_unlock(&drvdata->spinlock);
1650 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1651 }
1652
ctxid_pid_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1653 static ssize_t ctxid_pid_store(struct device *dev,
1654 struct device_attribute *attr,
1655 const char *buf, size_t size)
1656 {
1657 u8 idx;
1658 unsigned long pid;
1659 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1660 struct etmv4_config *config = &drvdata->config;
1661
1662 /*
1663 * When contextID tracing is enabled the tracers will insert the
1664 * value found in the contextID register in the trace stream. But if
1665 * a process is in a namespace the PID of that process as seen from the
1666 * namespace won't be what the kernel sees, something that makes the
1667 * feature confusing and can potentially leak kernel only information.
1668 * As such refuse to use the feature if @current is not in the initial
1669 * PID namespace.
1670 */
1671 if (task_active_pid_ns(current) != &init_pid_ns)
1672 return -EINVAL;
1673
1674 /*
1675 * only implemented when ctxid tracing is enabled, i.e. at least one
1676 * ctxid comparator is implemented and ctxid is greater than 0 bits
1677 * in length
1678 */
1679 if (!drvdata->ctxid_size || !drvdata->numcidc)
1680 return -EINVAL;
1681 if (kstrtoul(buf, 16, &pid))
1682 return -EINVAL;
1683
1684 spin_lock(&drvdata->spinlock);
1685 idx = config->ctxid_idx;
1686 config->ctxid_pid[idx] = (u64)pid;
1687 spin_unlock(&drvdata->spinlock);
1688 return size;
1689 }
1690 static DEVICE_ATTR_RW(ctxid_pid);
1691
ctxid_masks_show(struct device * dev,struct device_attribute * attr,char * buf)1692 static ssize_t ctxid_masks_show(struct device *dev,
1693 struct device_attribute *attr,
1694 char *buf)
1695 {
1696 unsigned long val1, val2;
1697 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1698 struct etmv4_config *config = &drvdata->config;
1699
1700 /*
1701 * Don't use contextID tracing if coming from a PID namespace. See
1702 * comment in ctxid_pid_store().
1703 */
1704 if (task_active_pid_ns(current) != &init_pid_ns)
1705 return -EINVAL;
1706
1707 spin_lock(&drvdata->spinlock);
1708 val1 = config->ctxid_mask0;
1709 val2 = config->ctxid_mask1;
1710 spin_unlock(&drvdata->spinlock);
1711 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1712 }
1713
ctxid_masks_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1714 static ssize_t ctxid_masks_store(struct device *dev,
1715 struct device_attribute *attr,
1716 const char *buf, size_t size)
1717 {
1718 u8 i, j, maskbyte;
1719 unsigned long val1, val2, mask;
1720 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1721 struct etmv4_config *config = &drvdata->config;
1722
1723 /*
1724 * Don't use contextID tracing if coming from a PID namespace. See
1725 * comment in ctxid_pid_store().
1726 */
1727 if (task_active_pid_ns(current) != &init_pid_ns)
1728 return -EINVAL;
1729
1730 /*
1731 * only implemented when ctxid tracing is enabled, i.e. at least one
1732 * ctxid comparator is implemented and ctxid is greater than 0 bits
1733 * in length
1734 */
1735 if (!drvdata->ctxid_size || !drvdata->numcidc)
1736 return -EINVAL;
1737 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
1738 return -EINVAL;
1739
1740 spin_lock(&drvdata->spinlock);
1741 /*
1742 * each byte[0..3] controls mask value applied to ctxid
1743 * comparator[0..3]
1744 */
1745 switch (drvdata->numcidc) {
1746 case 0x1:
1747 /* COMP0, bits[7:0] */
1748 config->ctxid_mask0 = val1 & 0xFF;
1749 break;
1750 case 0x2:
1751 /* COMP1, bits[15:8] */
1752 config->ctxid_mask0 = val1 & 0xFFFF;
1753 break;
1754 case 0x3:
1755 /* COMP2, bits[23:16] */
1756 config->ctxid_mask0 = val1 & 0xFFFFFF;
1757 break;
1758 case 0x4:
1759 /* COMP3, bits[31:24] */
1760 config->ctxid_mask0 = val1;
1761 break;
1762 case 0x5:
1763 /* COMP4, bits[7:0] */
1764 config->ctxid_mask0 = val1;
1765 config->ctxid_mask1 = val2 & 0xFF;
1766 break;
1767 case 0x6:
1768 /* COMP5, bits[15:8] */
1769 config->ctxid_mask0 = val1;
1770 config->ctxid_mask1 = val2 & 0xFFFF;
1771 break;
1772 case 0x7:
1773 /* COMP6, bits[23:16] */
1774 config->ctxid_mask0 = val1;
1775 config->ctxid_mask1 = val2 & 0xFFFFFF;
1776 break;
1777 case 0x8:
1778 /* COMP7, bits[31:24] */
1779 config->ctxid_mask0 = val1;
1780 config->ctxid_mask1 = val2;
1781 break;
1782 default:
1783 break;
1784 }
1785 /*
1786 * If software sets a mask bit to 1, it must program relevant byte
1787 * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
1788 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
1789 * of ctxid comparator0 value (corresponding to byte 0) register.
1790 */
1791 mask = config->ctxid_mask0;
1792 for (i = 0; i < drvdata->numcidc; i++) {
1793 /* mask value of corresponding ctxid comparator */
1794 maskbyte = mask & ETMv4_EVENT_MASK;
1795 /*
1796 * each bit corresponds to a byte of respective ctxid comparator
1797 * value register
1798 */
1799 for (j = 0; j < 8; j++) {
1800 if (maskbyte & 1)
1801 config->ctxid_pid[i] &= ~(0xFFUL << (j * 8));
1802 maskbyte >>= 1;
1803 }
1804 /* Select the next ctxid comparator mask value */
1805 if (i == 3)
1806 /* ctxid comparators[4-7] */
1807 mask = config->ctxid_mask1;
1808 else
1809 mask >>= 0x8;
1810 }
1811
1812 spin_unlock(&drvdata->spinlock);
1813 return size;
1814 }
1815 static DEVICE_ATTR_RW(ctxid_masks);
1816
vmid_idx_show(struct device * dev,struct device_attribute * attr,char * buf)1817 static ssize_t vmid_idx_show(struct device *dev,
1818 struct device_attribute *attr,
1819 char *buf)
1820 {
1821 unsigned long val;
1822 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1823 struct etmv4_config *config = &drvdata->config;
1824
1825 val = config->vmid_idx;
1826 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1827 }
1828
vmid_idx_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1829 static ssize_t vmid_idx_store(struct device *dev,
1830 struct device_attribute *attr,
1831 const char *buf, size_t size)
1832 {
1833 unsigned long val;
1834 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1835 struct etmv4_config *config = &drvdata->config;
1836
1837 if (kstrtoul(buf, 16, &val))
1838 return -EINVAL;
1839 if (val >= drvdata->numvmidc)
1840 return -EINVAL;
1841
1842 /*
1843 * Use spinlock to ensure index doesn't change while it gets
1844 * dereferenced multiple times within a spinlock block elsewhere.
1845 */
1846 spin_lock(&drvdata->spinlock);
1847 config->vmid_idx = val;
1848 spin_unlock(&drvdata->spinlock);
1849 return size;
1850 }
1851 static DEVICE_ATTR_RW(vmid_idx);
1852
vmid_val_show(struct device * dev,struct device_attribute * attr,char * buf)1853 static ssize_t vmid_val_show(struct device *dev,
1854 struct device_attribute *attr,
1855 char *buf)
1856 {
1857 unsigned long val;
1858 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1859 struct etmv4_config *config = &drvdata->config;
1860
1861 val = (unsigned long)config->vmid_val[config->vmid_idx];
1862 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1863 }
1864
vmid_val_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1865 static ssize_t vmid_val_store(struct device *dev,
1866 struct device_attribute *attr,
1867 const char *buf, size_t size)
1868 {
1869 unsigned long val;
1870 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1871 struct etmv4_config *config = &drvdata->config;
1872
1873 /*
1874 * only implemented when vmid tracing is enabled, i.e. at least one
1875 * vmid comparator is implemented and at least 8 bit vmid size
1876 */
1877 if (!drvdata->vmid_size || !drvdata->numvmidc)
1878 return -EINVAL;
1879 if (kstrtoul(buf, 16, &val))
1880 return -EINVAL;
1881
1882 spin_lock(&drvdata->spinlock);
1883 config->vmid_val[config->vmid_idx] = (u64)val;
1884 spin_unlock(&drvdata->spinlock);
1885 return size;
1886 }
1887 static DEVICE_ATTR_RW(vmid_val);
1888
vmid_masks_show(struct device * dev,struct device_attribute * attr,char * buf)1889 static ssize_t vmid_masks_show(struct device *dev,
1890 struct device_attribute *attr, char *buf)
1891 {
1892 unsigned long val1, val2;
1893 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1894 struct etmv4_config *config = &drvdata->config;
1895
1896 spin_lock(&drvdata->spinlock);
1897 val1 = config->vmid_mask0;
1898 val2 = config->vmid_mask1;
1899 spin_unlock(&drvdata->spinlock);
1900 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1901 }
1902
vmid_masks_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1903 static ssize_t vmid_masks_store(struct device *dev,
1904 struct device_attribute *attr,
1905 const char *buf, size_t size)
1906 {
1907 u8 i, j, maskbyte;
1908 unsigned long val1, val2, mask;
1909 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1910 struct etmv4_config *config = &drvdata->config;
1911
1912 /*
1913 * only implemented when vmid tracing is enabled, i.e. at least one
1914 * vmid comparator is implemented and at least 8 bit vmid size
1915 */
1916 if (!drvdata->vmid_size || !drvdata->numvmidc)
1917 return -EINVAL;
1918 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
1919 return -EINVAL;
1920
1921 spin_lock(&drvdata->spinlock);
1922
1923 /*
1924 * each byte[0..3] controls mask value applied to vmid
1925 * comparator[0..3]
1926 */
1927 switch (drvdata->numvmidc) {
1928 case 0x1:
1929 /* COMP0, bits[7:0] */
1930 config->vmid_mask0 = val1 & 0xFF;
1931 break;
1932 case 0x2:
1933 /* COMP1, bits[15:8] */
1934 config->vmid_mask0 = val1 & 0xFFFF;
1935 break;
1936 case 0x3:
1937 /* COMP2, bits[23:16] */
1938 config->vmid_mask0 = val1 & 0xFFFFFF;
1939 break;
1940 case 0x4:
1941 /* COMP3, bits[31:24] */
1942 config->vmid_mask0 = val1;
1943 break;
1944 case 0x5:
1945 /* COMP4, bits[7:0] */
1946 config->vmid_mask0 = val1;
1947 config->vmid_mask1 = val2 & 0xFF;
1948 break;
1949 case 0x6:
1950 /* COMP5, bits[15:8] */
1951 config->vmid_mask0 = val1;
1952 config->vmid_mask1 = val2 & 0xFFFF;
1953 break;
1954 case 0x7:
1955 /* COMP6, bits[23:16] */
1956 config->vmid_mask0 = val1;
1957 config->vmid_mask1 = val2 & 0xFFFFFF;
1958 break;
1959 case 0x8:
1960 /* COMP7, bits[31:24] */
1961 config->vmid_mask0 = val1;
1962 config->vmid_mask1 = val2;
1963 break;
1964 default:
1965 break;
1966 }
1967
1968 /*
1969 * If software sets a mask bit to 1, it must program relevant byte
1970 * of vmid comparator value 0x0, otherwise behavior is unpredictable.
1971 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
1972 * of vmid comparator0 value (corresponding to byte 0) register.
1973 */
1974 mask = config->vmid_mask0;
1975 for (i = 0; i < drvdata->numvmidc; i++) {
1976 /* mask value of corresponding vmid comparator */
1977 maskbyte = mask & ETMv4_EVENT_MASK;
1978 /*
1979 * each bit corresponds to a byte of respective vmid comparator
1980 * value register
1981 */
1982 for (j = 0; j < 8; j++) {
1983 if (maskbyte & 1)
1984 config->vmid_val[i] &= ~(0xFFUL << (j * 8));
1985 maskbyte >>= 1;
1986 }
1987 /* Select the next vmid comparator mask value */
1988 if (i == 3)
1989 /* vmid comparators[4-7] */
1990 mask = config->vmid_mask1;
1991 else
1992 mask >>= 0x8;
1993 }
1994 spin_unlock(&drvdata->spinlock);
1995 return size;
1996 }
1997 static DEVICE_ATTR_RW(vmid_masks);
1998
cpu_show(struct device * dev,struct device_attribute * attr,char * buf)1999 static ssize_t cpu_show(struct device *dev,
2000 struct device_attribute *attr, char *buf)
2001 {
2002 int val;
2003 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2004
2005 val = drvdata->cpu;
2006 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
2007
2008 }
2009 static DEVICE_ATTR_RO(cpu);
2010
2011 static struct attribute *coresight_etmv4_attrs[] = {
2012 &dev_attr_nr_pe_cmp.attr,
2013 &dev_attr_nr_addr_cmp.attr,
2014 &dev_attr_nr_cntr.attr,
2015 &dev_attr_nr_ext_inp.attr,
2016 &dev_attr_numcidc.attr,
2017 &dev_attr_numvmidc.attr,
2018 &dev_attr_nrseqstate.attr,
2019 &dev_attr_nr_resource.attr,
2020 &dev_attr_nr_ss_cmp.attr,
2021 &dev_attr_reset.attr,
2022 &dev_attr_mode.attr,
2023 &dev_attr_pe.attr,
2024 &dev_attr_event.attr,
2025 &dev_attr_event_instren.attr,
2026 &dev_attr_event_ts.attr,
2027 &dev_attr_syncfreq.attr,
2028 &dev_attr_cyc_threshold.attr,
2029 &dev_attr_bb_ctrl.attr,
2030 &dev_attr_event_vinst.attr,
2031 &dev_attr_s_exlevel_vinst.attr,
2032 &dev_attr_ns_exlevel_vinst.attr,
2033 &dev_attr_addr_idx.attr,
2034 &dev_attr_addr_instdatatype.attr,
2035 &dev_attr_addr_single.attr,
2036 &dev_attr_addr_range.attr,
2037 &dev_attr_addr_start.attr,
2038 &dev_attr_addr_stop.attr,
2039 &dev_attr_addr_ctxtype.attr,
2040 &dev_attr_addr_context.attr,
2041 &dev_attr_seq_idx.attr,
2042 &dev_attr_seq_state.attr,
2043 &dev_attr_seq_event.attr,
2044 &dev_attr_seq_reset_event.attr,
2045 &dev_attr_cntr_idx.attr,
2046 &dev_attr_cntrldvr.attr,
2047 &dev_attr_cntr_val.attr,
2048 &dev_attr_cntr_ctrl.attr,
2049 &dev_attr_res_idx.attr,
2050 &dev_attr_res_ctrl.attr,
2051 &dev_attr_ctxid_idx.attr,
2052 &dev_attr_ctxid_pid.attr,
2053 &dev_attr_ctxid_masks.attr,
2054 &dev_attr_vmid_idx.attr,
2055 &dev_attr_vmid_val.attr,
2056 &dev_attr_vmid_masks.attr,
2057 &dev_attr_cpu.attr,
2058 NULL,
2059 };
2060
2061 struct etmv4_reg {
2062 void __iomem *addr;
2063 u32 data;
2064 };
2065
do_smp_cross_read(void * data)2066 static void do_smp_cross_read(void *data)
2067 {
2068 struct etmv4_reg *reg = data;
2069
2070 reg->data = readl_relaxed(reg->addr);
2071 }
2072
etmv4_cross_read(const struct device * dev,u32 offset)2073 static u32 etmv4_cross_read(const struct device *dev, u32 offset)
2074 {
2075 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
2076 struct etmv4_reg reg;
2077
2078 reg.addr = drvdata->base + offset;
2079 /*
2080 * smp cross call ensures the CPU will be powered up before
2081 * accessing the ETMv4 trace core registers
2082 */
2083 smp_call_function_single(drvdata->cpu, do_smp_cross_read, ®, 1);
2084 return reg.data;
2085 }
2086
2087 #define coresight_etm4x_reg(name, offset) \
2088 coresight_simple_reg32(struct etmv4_drvdata, name, offset)
2089
2090 #define coresight_etm4x_cross_read(name, offset) \
2091 coresight_simple_func(struct etmv4_drvdata, etmv4_cross_read, \
2092 name, offset)
2093
2094 coresight_etm4x_reg(trcpdcr, TRCPDCR);
2095 coresight_etm4x_reg(trcpdsr, TRCPDSR);
2096 coresight_etm4x_reg(trclsr, TRCLSR);
2097 coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS);
2098 coresight_etm4x_reg(trcdevid, TRCDEVID);
2099 coresight_etm4x_reg(trcdevtype, TRCDEVTYPE);
2100 coresight_etm4x_reg(trcpidr0, TRCPIDR0);
2101 coresight_etm4x_reg(trcpidr1, TRCPIDR1);
2102 coresight_etm4x_reg(trcpidr2, TRCPIDR2);
2103 coresight_etm4x_reg(trcpidr3, TRCPIDR3);
2104 coresight_etm4x_cross_read(trcoslsr, TRCOSLSR);
2105 coresight_etm4x_cross_read(trcconfig, TRCCONFIGR);
2106 coresight_etm4x_cross_read(trctraceid, TRCTRACEIDR);
2107
2108 static struct attribute *coresight_etmv4_mgmt_attrs[] = {
2109 &dev_attr_trcoslsr.attr,
2110 &dev_attr_trcpdcr.attr,
2111 &dev_attr_trcpdsr.attr,
2112 &dev_attr_trclsr.attr,
2113 &dev_attr_trcconfig.attr,
2114 &dev_attr_trctraceid.attr,
2115 &dev_attr_trcauthstatus.attr,
2116 &dev_attr_trcdevid.attr,
2117 &dev_attr_trcdevtype.attr,
2118 &dev_attr_trcpidr0.attr,
2119 &dev_attr_trcpidr1.attr,
2120 &dev_attr_trcpidr2.attr,
2121 &dev_attr_trcpidr3.attr,
2122 NULL,
2123 };
2124
2125 coresight_etm4x_cross_read(trcidr0, TRCIDR0);
2126 coresight_etm4x_cross_read(trcidr1, TRCIDR1);
2127 coresight_etm4x_cross_read(trcidr2, TRCIDR2);
2128 coresight_etm4x_cross_read(trcidr3, TRCIDR3);
2129 coresight_etm4x_cross_read(trcidr4, TRCIDR4);
2130 coresight_etm4x_cross_read(trcidr5, TRCIDR5);
2131 /* trcidr[6,7] are reserved */
2132 coresight_etm4x_cross_read(trcidr8, TRCIDR8);
2133 coresight_etm4x_cross_read(trcidr9, TRCIDR9);
2134 coresight_etm4x_cross_read(trcidr10, TRCIDR10);
2135 coresight_etm4x_cross_read(trcidr11, TRCIDR11);
2136 coresight_etm4x_cross_read(trcidr12, TRCIDR12);
2137 coresight_etm4x_cross_read(trcidr13, TRCIDR13);
2138
2139 static struct attribute *coresight_etmv4_trcidr_attrs[] = {
2140 &dev_attr_trcidr0.attr,
2141 &dev_attr_trcidr1.attr,
2142 &dev_attr_trcidr2.attr,
2143 &dev_attr_trcidr3.attr,
2144 &dev_attr_trcidr4.attr,
2145 &dev_attr_trcidr5.attr,
2146 /* trcidr[6,7] are reserved */
2147 &dev_attr_trcidr8.attr,
2148 &dev_attr_trcidr9.attr,
2149 &dev_attr_trcidr10.attr,
2150 &dev_attr_trcidr11.attr,
2151 &dev_attr_trcidr12.attr,
2152 &dev_attr_trcidr13.attr,
2153 NULL,
2154 };
2155
2156 static const struct attribute_group coresight_etmv4_group = {
2157 .attrs = coresight_etmv4_attrs,
2158 };
2159
2160 static const struct attribute_group coresight_etmv4_mgmt_group = {
2161 .attrs = coresight_etmv4_mgmt_attrs,
2162 .name = "mgmt",
2163 };
2164
2165 static const struct attribute_group coresight_etmv4_trcidr_group = {
2166 .attrs = coresight_etmv4_trcidr_attrs,
2167 .name = "trcidr",
2168 };
2169
2170 const struct attribute_group *coresight_etmv4_groups[] = {
2171 &coresight_etmv4_group,
2172 &coresight_etmv4_mgmt_group,
2173 &coresight_etmv4_trcidr_group,
2174 NULL,
2175 };
2176