1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright(C) 2015 Linaro Limited. All rights reserved.
4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5 */
6
7 #include <api/fs/fs.h>
8 #include <linux/bits.h>
9 #include <linux/bitops.h>
10 #include <linux/compiler.h>
11 #include <linux/coresight-pmu.h>
12 #include <linux/kernel.h>
13 #include <linux/log2.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/zalloc.h>
17
18 #include "cs-etm.h"
19 #include "../../util/debug.h"
20 #include "../../util/record.h"
21 #include "../../util/auxtrace.h"
22 #include "../../util/cpumap.h"
23 #include "../../util/event.h"
24 #include "../../util/evlist.h"
25 #include "../../util/evsel.h"
26 #include "../../util/evsel_config.h"
27 #include "../../util/pmu.h"
28 #include "../../util/cs-etm.h"
29 #include <internal/lib.h> // page_size
30 #include "../../util/session.h"
31
32 #include <errno.h>
33 #include <stdlib.h>
34 #include <sys/stat.h>
35
36 struct cs_etm_recording {
37 struct auxtrace_record itr;
38 struct perf_pmu *cs_etm_pmu;
39 struct evlist *evlist;
40 int wrapped_cnt;
41 bool *wrapped;
42 bool snapshot_mode;
43 size_t snapshot_size;
44 };
45
46 static const char *metadata_etmv3_ro[CS_ETM_PRIV_MAX] = {
47 [CS_ETM_ETMCCER] = "mgmt/etmccer",
48 [CS_ETM_ETMIDR] = "mgmt/etmidr",
49 };
50
51 static const char *metadata_etmv4_ro[CS_ETMV4_PRIV_MAX] = {
52 [CS_ETMV4_TRCIDR0] = "trcidr/trcidr0",
53 [CS_ETMV4_TRCIDR1] = "trcidr/trcidr1",
54 [CS_ETMV4_TRCIDR2] = "trcidr/trcidr2",
55 [CS_ETMV4_TRCIDR8] = "trcidr/trcidr8",
56 [CS_ETMV4_TRCAUTHSTATUS] = "mgmt/trcauthstatus",
57 };
58
59 static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu);
60
cs_etm_set_context_id(struct auxtrace_record * itr,struct evsel * evsel,int cpu)61 static int cs_etm_set_context_id(struct auxtrace_record *itr,
62 struct evsel *evsel, int cpu)
63 {
64 struct cs_etm_recording *ptr;
65 struct perf_pmu *cs_etm_pmu;
66 char path[PATH_MAX];
67 int err = -EINVAL;
68 u32 val;
69
70 ptr = container_of(itr, struct cs_etm_recording, itr);
71 cs_etm_pmu = ptr->cs_etm_pmu;
72
73 if (!cs_etm_is_etmv4(itr, cpu))
74 goto out;
75
76 /* Get a handle on TRCIRD2 */
77 snprintf(path, PATH_MAX, "cpu%d/%s",
78 cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR2]);
79 err = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val);
80
81 /* There was a problem reading the file, bailing out */
82 if (err != 1) {
83 pr_err("%s: can't read file %s\n",
84 CORESIGHT_ETM_PMU_NAME, path);
85 goto out;
86 }
87
88 /*
89 * TRCIDR2.CIDSIZE, bit [9-5], indicates whether contextID tracing
90 * is supported:
91 * 0b00000 Context ID tracing is not supported.
92 * 0b00100 Maximum of 32-bit Context ID size.
93 * All other values are reserved.
94 */
95 val = BMVAL(val, 5, 9);
96 if (!val || val != 0x4) {
97 err = -EINVAL;
98 goto out;
99 }
100
101 /* All good, let the kernel know */
102 evsel->core.attr.config |= (1 << ETM_OPT_CTXTID);
103 err = 0;
104
105 out:
106
107 return err;
108 }
109
cs_etm_set_timestamp(struct auxtrace_record * itr,struct evsel * evsel,int cpu)110 static int cs_etm_set_timestamp(struct auxtrace_record *itr,
111 struct evsel *evsel, int cpu)
112 {
113 struct cs_etm_recording *ptr;
114 struct perf_pmu *cs_etm_pmu;
115 char path[PATH_MAX];
116 int err = -EINVAL;
117 u32 val;
118
119 ptr = container_of(itr, struct cs_etm_recording, itr);
120 cs_etm_pmu = ptr->cs_etm_pmu;
121
122 if (!cs_etm_is_etmv4(itr, cpu))
123 goto out;
124
125 /* Get a handle on TRCIRD0 */
126 snprintf(path, PATH_MAX, "cpu%d/%s",
127 cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0]);
128 err = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val);
129
130 /* There was a problem reading the file, bailing out */
131 if (err != 1) {
132 pr_err("%s: can't read file %s\n",
133 CORESIGHT_ETM_PMU_NAME, path);
134 goto out;
135 }
136
137 /*
138 * TRCIDR0.TSSIZE, bit [28-24], indicates whether global timestamping
139 * is supported:
140 * 0b00000 Global timestamping is not implemented
141 * 0b00110 Implementation supports a maximum timestamp of 48bits.
142 * 0b01000 Implementation supports a maximum timestamp of 64bits.
143 */
144 val &= GENMASK(28, 24);
145 if (!val) {
146 err = -EINVAL;
147 goto out;
148 }
149
150 /* All good, let the kernel know */
151 evsel->core.attr.config |= (1 << ETM_OPT_TS);
152 err = 0;
153
154 out:
155 return err;
156 }
157
cs_etm_set_option(struct auxtrace_record * itr,struct evsel * evsel,u32 option)158 static int cs_etm_set_option(struct auxtrace_record *itr,
159 struct evsel *evsel, u32 option)
160 {
161 int i, err = -EINVAL;
162 struct perf_cpu_map *event_cpus = evsel->evlist->core.cpus;
163 struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
164
165 /* Set option of each CPU we have */
166 for (i = 0; i < cpu__max_cpu(); i++) {
167 if (!cpu_map__has(event_cpus, i) ||
168 !cpu_map__has(online_cpus, i))
169 continue;
170
171 if (option & ETM_OPT_CTXTID) {
172 err = cs_etm_set_context_id(itr, evsel, i);
173 if (err)
174 goto out;
175 }
176 if (option & ETM_OPT_TS) {
177 err = cs_etm_set_timestamp(itr, evsel, i);
178 if (err)
179 goto out;
180 }
181 if (option & ~(ETM_OPT_CTXTID | ETM_OPT_TS))
182 /* Nothing else is currently supported */
183 goto out;
184 }
185
186 err = 0;
187 out:
188 perf_cpu_map__put(online_cpus);
189 return err;
190 }
191
cs_etm_parse_snapshot_options(struct auxtrace_record * itr,struct record_opts * opts,const char * str)192 static int cs_etm_parse_snapshot_options(struct auxtrace_record *itr,
193 struct record_opts *opts,
194 const char *str)
195 {
196 struct cs_etm_recording *ptr =
197 container_of(itr, struct cs_etm_recording, itr);
198 unsigned long long snapshot_size = 0;
199 char *endptr;
200
201 if (str) {
202 snapshot_size = strtoull(str, &endptr, 0);
203 if (*endptr || snapshot_size > SIZE_MAX)
204 return -1;
205 }
206
207 opts->auxtrace_snapshot_mode = true;
208 opts->auxtrace_snapshot_size = snapshot_size;
209 ptr->snapshot_size = snapshot_size;
210
211 return 0;
212 }
213
cs_etm_set_sink_attr(struct perf_pmu * pmu,struct evsel * evsel)214 static int cs_etm_set_sink_attr(struct perf_pmu *pmu,
215 struct evsel *evsel)
216 {
217 char msg[BUFSIZ], path[PATH_MAX], *sink;
218 struct perf_evsel_config_term *term;
219 int ret = -EINVAL;
220 u32 hash;
221
222 if (evsel->core.attr.config2 & GENMASK(31, 0))
223 return 0;
224
225 list_for_each_entry(term, &evsel->config_terms, list) {
226 if (term->type != PERF_EVSEL__CONFIG_TERM_DRV_CFG)
227 continue;
228
229 sink = term->val.drv_cfg;
230 snprintf(path, PATH_MAX, "sinks/%s", sink);
231
232 ret = perf_pmu__scan_file(pmu, path, "%x", &hash);
233 if (ret != 1) {
234 pr_err("failed to set sink \"%s\" on event %s with %d (%s)\n",
235 sink, perf_evsel__name(evsel), errno,
236 str_error_r(errno, msg, sizeof(msg)));
237 return ret;
238 }
239
240 evsel->core.attr.config2 |= hash;
241 return 0;
242 }
243
244 /*
245 * No sink was provided on the command line - for _now_ treat
246 * this as an error.
247 */
248 return ret;
249 }
250
cs_etm_recording_options(struct auxtrace_record * itr,struct evlist * evlist,struct record_opts * opts)251 static int cs_etm_recording_options(struct auxtrace_record *itr,
252 struct evlist *evlist,
253 struct record_opts *opts)
254 {
255 int ret;
256 struct cs_etm_recording *ptr =
257 container_of(itr, struct cs_etm_recording, itr);
258 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
259 struct evsel *evsel, *cs_etm_evsel = NULL;
260 struct perf_cpu_map *cpus = evlist->core.cpus;
261 bool privileged = perf_event_paranoid_check(-1);
262 int err = 0;
263
264 ptr->evlist = evlist;
265 ptr->snapshot_mode = opts->auxtrace_snapshot_mode;
266
267 if (perf_can_record_switch_events())
268 opts->record_switch_events = true;
269
270 evlist__for_each_entry(evlist, evsel) {
271 if (evsel->core.attr.type == cs_etm_pmu->type) {
272 if (cs_etm_evsel) {
273 pr_err("There may be only one %s event\n",
274 CORESIGHT_ETM_PMU_NAME);
275 return -EINVAL;
276 }
277 evsel->core.attr.freq = 0;
278 evsel->core.attr.sample_period = 1;
279 cs_etm_evsel = evsel;
280 opts->full_auxtrace = true;
281 }
282 }
283
284 /* no need to continue if at least one event of interest was found */
285 if (!cs_etm_evsel)
286 return 0;
287
288 ret = cs_etm_set_sink_attr(cs_etm_pmu, cs_etm_evsel);
289 if (ret)
290 return ret;
291
292 if (opts->use_clockid) {
293 pr_err("Cannot use clockid (-k option) with %s\n",
294 CORESIGHT_ETM_PMU_NAME);
295 return -EINVAL;
296 }
297
298 /* we are in snapshot mode */
299 if (opts->auxtrace_snapshot_mode) {
300 /*
301 * No size were given to '-S' or '-m,', so go with
302 * the default
303 */
304 if (!opts->auxtrace_snapshot_size &&
305 !opts->auxtrace_mmap_pages) {
306 if (privileged) {
307 opts->auxtrace_mmap_pages = MiB(4) / page_size;
308 } else {
309 opts->auxtrace_mmap_pages =
310 KiB(128) / page_size;
311 if (opts->mmap_pages == UINT_MAX)
312 opts->mmap_pages = KiB(256) / page_size;
313 }
314 } else if (!opts->auxtrace_mmap_pages && !privileged &&
315 opts->mmap_pages == UINT_MAX) {
316 opts->mmap_pages = KiB(256) / page_size;
317 }
318
319 /*
320 * '-m,xyz' was specified but no snapshot size, so make the
321 * snapshot size as big as the auxtrace mmap area.
322 */
323 if (!opts->auxtrace_snapshot_size) {
324 opts->auxtrace_snapshot_size =
325 opts->auxtrace_mmap_pages * (size_t)page_size;
326 }
327
328 /*
329 * -Sxyz was specified but no auxtrace mmap area, so make the
330 * auxtrace mmap area big enough to fit the requested snapshot
331 * size.
332 */
333 if (!opts->auxtrace_mmap_pages) {
334 size_t sz = opts->auxtrace_snapshot_size;
335
336 sz = round_up(sz, page_size) / page_size;
337 opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
338 }
339
340 /* Snapshost size can't be bigger than the auxtrace area */
341 if (opts->auxtrace_snapshot_size >
342 opts->auxtrace_mmap_pages * (size_t)page_size) {
343 pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n",
344 opts->auxtrace_snapshot_size,
345 opts->auxtrace_mmap_pages * (size_t)page_size);
346 return -EINVAL;
347 }
348
349 /* Something went wrong somewhere - this shouldn't happen */
350 if (!opts->auxtrace_snapshot_size ||
351 !opts->auxtrace_mmap_pages) {
352 pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n");
353 return -EINVAL;
354 }
355 }
356
357 /* We are in full trace mode but '-m,xyz' wasn't specified */
358 if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) {
359 if (privileged) {
360 opts->auxtrace_mmap_pages = MiB(4) / page_size;
361 } else {
362 opts->auxtrace_mmap_pages = KiB(128) / page_size;
363 if (opts->mmap_pages == UINT_MAX)
364 opts->mmap_pages = KiB(256) / page_size;
365 }
366
367 }
368
369 /* Validate auxtrace_mmap_pages provided by user */
370 if (opts->auxtrace_mmap_pages) {
371 unsigned int max_page = (KiB(128) / page_size);
372 size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size;
373
374 if (!privileged &&
375 opts->auxtrace_mmap_pages > max_page) {
376 opts->auxtrace_mmap_pages = max_page;
377 pr_err("auxtrace too big, truncating to %d\n",
378 max_page);
379 }
380
381 if (!is_power_of_2(sz)) {
382 pr_err("Invalid mmap size for %s: must be a power of 2\n",
383 CORESIGHT_ETM_PMU_NAME);
384 return -EINVAL;
385 }
386 }
387
388 if (opts->auxtrace_snapshot_mode)
389 pr_debug2("%s snapshot size: %zu\n", CORESIGHT_ETM_PMU_NAME,
390 opts->auxtrace_snapshot_size);
391
392 /*
393 * To obtain the auxtrace buffer file descriptor, the auxtrace
394 * event must come first.
395 */
396 perf_evlist__to_front(evlist, cs_etm_evsel);
397
398 /*
399 * In the case of per-cpu mmaps, we need the CPU on the
400 * AUX event. We also need the contextID in order to be notified
401 * when a context switch happened.
402 */
403 if (!perf_cpu_map__empty(cpus)) {
404 perf_evsel__set_sample_bit(cs_etm_evsel, CPU);
405
406 err = cs_etm_set_option(itr, cs_etm_evsel,
407 ETM_OPT_CTXTID | ETM_OPT_TS);
408 if (err)
409 goto out;
410 }
411
412 /* Add dummy event to keep tracking */
413 if (opts->full_auxtrace) {
414 struct evsel *tracking_evsel;
415
416 err = parse_events(evlist, "dummy:u", NULL);
417 if (err)
418 goto out;
419
420 tracking_evsel = evlist__last(evlist);
421 perf_evlist__set_tracking_event(evlist, tracking_evsel);
422
423 tracking_evsel->core.attr.freq = 0;
424 tracking_evsel->core.attr.sample_period = 1;
425
426 /* In per-cpu case, always need the time of mmap events etc */
427 if (!perf_cpu_map__empty(cpus))
428 perf_evsel__set_sample_bit(tracking_evsel, TIME);
429 }
430
431 out:
432 return err;
433 }
434
cs_etm_get_config(struct auxtrace_record * itr)435 static u64 cs_etm_get_config(struct auxtrace_record *itr)
436 {
437 u64 config = 0;
438 struct cs_etm_recording *ptr =
439 container_of(itr, struct cs_etm_recording, itr);
440 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
441 struct evlist *evlist = ptr->evlist;
442 struct evsel *evsel;
443
444 evlist__for_each_entry(evlist, evsel) {
445 if (evsel->core.attr.type == cs_etm_pmu->type) {
446 /*
447 * Variable perf_event_attr::config is assigned to
448 * ETMv3/PTM. The bit fields have been made to match
449 * the ETMv3.5 ETRMCR register specification. See the
450 * PMU_FORMAT_ATTR() declarations in
451 * drivers/hwtracing/coresight/coresight-perf.c for
452 * details.
453 */
454 config = evsel->core.attr.config;
455 break;
456 }
457 }
458
459 return config;
460 }
461
462 #ifndef BIT
463 #define BIT(N) (1UL << (N))
464 #endif
465
cs_etmv4_get_config(struct auxtrace_record * itr)466 static u64 cs_etmv4_get_config(struct auxtrace_record *itr)
467 {
468 u64 config = 0;
469 u64 config_opts = 0;
470
471 /*
472 * The perf event variable config bits represent both
473 * the command line options and register programming
474 * bits in ETMv3/PTM. For ETMv4 we must remap options
475 * to real bits
476 */
477 config_opts = cs_etm_get_config(itr);
478 if (config_opts & BIT(ETM_OPT_CYCACC))
479 config |= BIT(ETM4_CFG_BIT_CYCACC);
480 if (config_opts & BIT(ETM_OPT_CTXTID))
481 config |= BIT(ETM4_CFG_BIT_CTXTID);
482 if (config_opts & BIT(ETM_OPT_TS))
483 config |= BIT(ETM4_CFG_BIT_TS);
484 if (config_opts & BIT(ETM_OPT_RETSTK))
485 config |= BIT(ETM4_CFG_BIT_RETSTK);
486
487 return config;
488 }
489
490 static size_t
cs_etm_info_priv_size(struct auxtrace_record * itr __maybe_unused,struct evlist * evlist __maybe_unused)491 cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
492 struct evlist *evlist __maybe_unused)
493 {
494 int i;
495 int etmv3 = 0, etmv4 = 0;
496 struct perf_cpu_map *event_cpus = evlist->core.cpus;
497 struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
498
499 /* cpu map is not empty, we have specific CPUs to work with */
500 if (!perf_cpu_map__empty(event_cpus)) {
501 for (i = 0; i < cpu__max_cpu(); i++) {
502 if (!cpu_map__has(event_cpus, i) ||
503 !cpu_map__has(online_cpus, i))
504 continue;
505
506 if (cs_etm_is_etmv4(itr, i))
507 etmv4++;
508 else
509 etmv3++;
510 }
511 } else {
512 /* get configuration for all CPUs in the system */
513 for (i = 0; i < cpu__max_cpu(); i++) {
514 if (!cpu_map__has(online_cpus, i))
515 continue;
516
517 if (cs_etm_is_etmv4(itr, i))
518 etmv4++;
519 else
520 etmv3++;
521 }
522 }
523
524 perf_cpu_map__put(online_cpus);
525
526 return (CS_ETM_HEADER_SIZE +
527 (etmv4 * CS_ETMV4_PRIV_SIZE) +
528 (etmv3 * CS_ETMV3_PRIV_SIZE));
529 }
530
cs_etm_is_etmv4(struct auxtrace_record * itr,int cpu)531 static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu)
532 {
533 bool ret = false;
534 char path[PATH_MAX];
535 int scan;
536 unsigned int val;
537 struct cs_etm_recording *ptr =
538 container_of(itr, struct cs_etm_recording, itr);
539 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
540
541 /* Take any of the RO files for ETMv4 and see if it present */
542 snprintf(path, PATH_MAX, "cpu%d/%s",
543 cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0]);
544 scan = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val);
545
546 /* The file was read successfully, we have a winner */
547 if (scan == 1)
548 ret = true;
549
550 return ret;
551 }
552
cs_etm_get_ro(struct perf_pmu * pmu,int cpu,const char * path)553 static int cs_etm_get_ro(struct perf_pmu *pmu, int cpu, const char *path)
554 {
555 char pmu_path[PATH_MAX];
556 int scan;
557 unsigned int val = 0;
558
559 /* Get RO metadata from sysfs */
560 snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu, path);
561
562 scan = perf_pmu__scan_file(pmu, pmu_path, "%x", &val);
563 if (scan != 1)
564 pr_err("%s: error reading: %s\n", __func__, pmu_path);
565
566 return val;
567 }
568
cs_etm_get_metadata(int cpu,u32 * offset,struct auxtrace_record * itr,struct perf_record_auxtrace_info * info)569 static void cs_etm_get_metadata(int cpu, u32 *offset,
570 struct auxtrace_record *itr,
571 struct perf_record_auxtrace_info *info)
572 {
573 u32 increment;
574 u64 magic;
575 struct cs_etm_recording *ptr =
576 container_of(itr, struct cs_etm_recording, itr);
577 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
578
579 /* first see what kind of tracer this cpu is affined to */
580 if (cs_etm_is_etmv4(itr, cpu)) {
581 magic = __perf_cs_etmv4_magic;
582 /* Get trace configuration register */
583 info->priv[*offset + CS_ETMV4_TRCCONFIGR] =
584 cs_etmv4_get_config(itr);
585 /* Get traceID from the framework */
586 info->priv[*offset + CS_ETMV4_TRCTRACEIDR] =
587 coresight_get_trace_id(cpu);
588 /* Get read-only information from sysFS */
589 info->priv[*offset + CS_ETMV4_TRCIDR0] =
590 cs_etm_get_ro(cs_etm_pmu, cpu,
591 metadata_etmv4_ro[CS_ETMV4_TRCIDR0]);
592 info->priv[*offset + CS_ETMV4_TRCIDR1] =
593 cs_etm_get_ro(cs_etm_pmu, cpu,
594 metadata_etmv4_ro[CS_ETMV4_TRCIDR1]);
595 info->priv[*offset + CS_ETMV4_TRCIDR2] =
596 cs_etm_get_ro(cs_etm_pmu, cpu,
597 metadata_etmv4_ro[CS_ETMV4_TRCIDR2]);
598 info->priv[*offset + CS_ETMV4_TRCIDR8] =
599 cs_etm_get_ro(cs_etm_pmu, cpu,
600 metadata_etmv4_ro[CS_ETMV4_TRCIDR8]);
601 info->priv[*offset + CS_ETMV4_TRCAUTHSTATUS] =
602 cs_etm_get_ro(cs_etm_pmu, cpu,
603 metadata_etmv4_ro
604 [CS_ETMV4_TRCAUTHSTATUS]);
605
606 /* How much space was used */
607 increment = CS_ETMV4_PRIV_MAX;
608 } else {
609 magic = __perf_cs_etmv3_magic;
610 /* Get configuration register */
611 info->priv[*offset + CS_ETM_ETMCR] = cs_etm_get_config(itr);
612 /* Get traceID from the framework */
613 info->priv[*offset + CS_ETM_ETMTRACEIDR] =
614 coresight_get_trace_id(cpu);
615 /* Get read-only information from sysFS */
616 info->priv[*offset + CS_ETM_ETMCCER] =
617 cs_etm_get_ro(cs_etm_pmu, cpu,
618 metadata_etmv3_ro[CS_ETM_ETMCCER]);
619 info->priv[*offset + CS_ETM_ETMIDR] =
620 cs_etm_get_ro(cs_etm_pmu, cpu,
621 metadata_etmv3_ro[CS_ETM_ETMIDR]);
622
623 /* How much space was used */
624 increment = CS_ETM_PRIV_MAX;
625 }
626
627 /* Build generic header portion */
628 info->priv[*offset + CS_ETM_MAGIC] = magic;
629 info->priv[*offset + CS_ETM_CPU] = cpu;
630 /* Where the next CPU entry should start from */
631 *offset += increment;
632 }
633
cs_etm_info_fill(struct auxtrace_record * itr,struct perf_session * session,struct perf_record_auxtrace_info * info,size_t priv_size)634 static int cs_etm_info_fill(struct auxtrace_record *itr,
635 struct perf_session *session,
636 struct perf_record_auxtrace_info *info,
637 size_t priv_size)
638 {
639 int i;
640 u32 offset;
641 u64 nr_cpu, type;
642 struct perf_cpu_map *cpu_map;
643 struct perf_cpu_map *event_cpus = session->evlist->core.cpus;
644 struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
645 struct cs_etm_recording *ptr =
646 container_of(itr, struct cs_etm_recording, itr);
647 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
648
649 if (priv_size != cs_etm_info_priv_size(itr, session->evlist))
650 return -EINVAL;
651
652 if (!session->evlist->core.nr_mmaps)
653 return -EINVAL;
654
655 /* If the cpu_map is empty all online CPUs are involved */
656 if (perf_cpu_map__empty(event_cpus)) {
657 cpu_map = online_cpus;
658 } else {
659 /* Make sure all specified CPUs are online */
660 for (i = 0; i < perf_cpu_map__nr(event_cpus); i++) {
661 if (cpu_map__has(event_cpus, i) &&
662 !cpu_map__has(online_cpus, i))
663 return -EINVAL;
664 }
665
666 cpu_map = event_cpus;
667 }
668
669 nr_cpu = perf_cpu_map__nr(cpu_map);
670 /* Get PMU type as dynamically assigned by the core */
671 type = cs_etm_pmu->type;
672
673 /* First fill out the session header */
674 info->type = PERF_AUXTRACE_CS_ETM;
675 info->priv[CS_HEADER_VERSION_0] = 0;
676 info->priv[CS_PMU_TYPE_CPUS] = type << 32;
677 info->priv[CS_PMU_TYPE_CPUS] |= nr_cpu;
678 info->priv[CS_ETM_SNAPSHOT] = ptr->snapshot_mode;
679
680 offset = CS_ETM_SNAPSHOT + 1;
681
682 for (i = 0; i < cpu__max_cpu() && offset < priv_size; i++)
683 if (cpu_map__has(cpu_map, i))
684 cs_etm_get_metadata(i, &offset, itr, info);
685
686 perf_cpu_map__put(online_cpus);
687
688 return 0;
689 }
690
cs_etm_alloc_wrapped_array(struct cs_etm_recording * ptr,int idx)691 static int cs_etm_alloc_wrapped_array(struct cs_etm_recording *ptr, int idx)
692 {
693 bool *wrapped;
694 int cnt = ptr->wrapped_cnt;
695
696 /* Make @ptr->wrapped as big as @idx */
697 while (cnt <= idx)
698 cnt++;
699
700 /*
701 * Free'ed in cs_etm_recording_free(). Using realloc() to avoid
702 * cross compilation problems where the host's system supports
703 * reallocarray() but not the target.
704 */
705 wrapped = realloc(ptr->wrapped, cnt * sizeof(bool));
706 if (!wrapped)
707 return -ENOMEM;
708
709 wrapped[cnt - 1] = false;
710 ptr->wrapped_cnt = cnt;
711 ptr->wrapped = wrapped;
712
713 return 0;
714 }
715
cs_etm_buffer_has_wrapped(unsigned char * buffer,size_t buffer_size,u64 head)716 static bool cs_etm_buffer_has_wrapped(unsigned char *buffer,
717 size_t buffer_size, u64 head)
718 {
719 u64 i, watermark;
720 u64 *buf = (u64 *)buffer;
721 size_t buf_size = buffer_size;
722
723 /*
724 * We want to look the very last 512 byte (chosen arbitrarily) in
725 * the ring buffer.
726 */
727 watermark = buf_size - 512;
728
729 /*
730 * @head is continuously increasing - if its value is equal or greater
731 * than the size of the ring buffer, it has wrapped around.
732 */
733 if (head >= buffer_size)
734 return true;
735
736 /*
737 * The value of @head is somewhere within the size of the ring buffer.
738 * This can be that there hasn't been enough data to fill the ring
739 * buffer yet or the trace time was so long that @head has numerically
740 * wrapped around. To find we need to check if we have data at the very
741 * end of the ring buffer. We can reliably do this because mmap'ed
742 * pages are zeroed out and there is a fresh mapping with every new
743 * session.
744 */
745
746 /* @head is less than 512 byte from the end of the ring buffer */
747 if (head > watermark)
748 watermark = head;
749
750 /*
751 * Speed things up by using 64 bit transactions (see "u64 *buf" above)
752 */
753 watermark >>= 3;
754 buf_size >>= 3;
755
756 /*
757 * If we find trace data at the end of the ring buffer, @head has
758 * been there and has numerically wrapped around at least once.
759 */
760 for (i = watermark; i < buf_size; i++)
761 if (buf[i])
762 return true;
763
764 return false;
765 }
766
cs_etm_find_snapshot(struct auxtrace_record * itr,int idx,struct auxtrace_mmap * mm,unsigned char * data,u64 * head,u64 * old)767 static int cs_etm_find_snapshot(struct auxtrace_record *itr,
768 int idx, struct auxtrace_mmap *mm,
769 unsigned char *data,
770 u64 *head, u64 *old)
771 {
772 int err;
773 bool wrapped;
774 struct cs_etm_recording *ptr =
775 container_of(itr, struct cs_etm_recording, itr);
776
777 /*
778 * Allocate memory to keep track of wrapping if this is the first
779 * time we deal with this *mm.
780 */
781 if (idx >= ptr->wrapped_cnt) {
782 err = cs_etm_alloc_wrapped_array(ptr, idx);
783 if (err)
784 return err;
785 }
786
787 /*
788 * Check to see if *head has wrapped around. If it hasn't only the
789 * amount of data between *head and *old is snapshot'ed to avoid
790 * bloating the perf.data file with zeros. But as soon as *head has
791 * wrapped around the entire size of the AUX ring buffer it taken.
792 */
793 wrapped = ptr->wrapped[idx];
794 if (!wrapped && cs_etm_buffer_has_wrapped(data, mm->len, *head)) {
795 wrapped = true;
796 ptr->wrapped[idx] = true;
797 }
798
799 pr_debug3("%s: mmap index %d old head %zu new head %zu size %zu\n",
800 __func__, idx, (size_t)*old, (size_t)*head, mm->len);
801
802 /* No wrap has occurred, we can just use *head and *old. */
803 if (!wrapped)
804 return 0;
805
806 /*
807 * *head has wrapped around - adjust *head and *old to pickup the
808 * entire content of the AUX buffer.
809 */
810 if (*head >= mm->len) {
811 *old = *head - mm->len;
812 } else {
813 *head += mm->len;
814 *old = *head - mm->len;
815 }
816
817 return 0;
818 }
819
cs_etm_snapshot_start(struct auxtrace_record * itr)820 static int cs_etm_snapshot_start(struct auxtrace_record *itr)
821 {
822 struct cs_etm_recording *ptr =
823 container_of(itr, struct cs_etm_recording, itr);
824 struct evsel *evsel;
825
826 evlist__for_each_entry(ptr->evlist, evsel) {
827 if (evsel->core.attr.type == ptr->cs_etm_pmu->type)
828 return evsel__disable(evsel);
829 }
830 return -EINVAL;
831 }
832
cs_etm_snapshot_finish(struct auxtrace_record * itr)833 static int cs_etm_snapshot_finish(struct auxtrace_record *itr)
834 {
835 struct cs_etm_recording *ptr =
836 container_of(itr, struct cs_etm_recording, itr);
837 struct evsel *evsel;
838
839 evlist__for_each_entry(ptr->evlist, evsel) {
840 if (evsel->core.attr.type == ptr->cs_etm_pmu->type)
841 return evsel__enable(evsel);
842 }
843 return -EINVAL;
844 }
845
cs_etm_reference(struct auxtrace_record * itr __maybe_unused)846 static u64 cs_etm_reference(struct auxtrace_record *itr __maybe_unused)
847 {
848 return (((u64) rand() << 0) & 0x00000000FFFFFFFFull) |
849 (((u64) rand() << 32) & 0xFFFFFFFF00000000ull);
850 }
851
cs_etm_recording_free(struct auxtrace_record * itr)852 static void cs_etm_recording_free(struct auxtrace_record *itr)
853 {
854 struct cs_etm_recording *ptr =
855 container_of(itr, struct cs_etm_recording, itr);
856
857 zfree(&ptr->wrapped);
858 free(ptr);
859 }
860
cs_etm_read_finish(struct auxtrace_record * itr,int idx)861 static int cs_etm_read_finish(struct auxtrace_record *itr, int idx)
862 {
863 struct cs_etm_recording *ptr =
864 container_of(itr, struct cs_etm_recording, itr);
865 struct evsel *evsel;
866
867 evlist__for_each_entry(ptr->evlist, evsel) {
868 if (evsel->core.attr.type == ptr->cs_etm_pmu->type)
869 return perf_evlist__enable_event_idx(ptr->evlist,
870 evsel, idx);
871 }
872
873 return -EINVAL;
874 }
875
cs_etm_record_init(int * err)876 struct auxtrace_record *cs_etm_record_init(int *err)
877 {
878 struct perf_pmu *cs_etm_pmu;
879 struct cs_etm_recording *ptr;
880
881 cs_etm_pmu = perf_pmu__find(CORESIGHT_ETM_PMU_NAME);
882
883 if (!cs_etm_pmu) {
884 *err = -EINVAL;
885 goto out;
886 }
887
888 ptr = zalloc(sizeof(struct cs_etm_recording));
889 if (!ptr) {
890 *err = -ENOMEM;
891 goto out;
892 }
893
894 ptr->cs_etm_pmu = cs_etm_pmu;
895 ptr->itr.parse_snapshot_options = cs_etm_parse_snapshot_options;
896 ptr->itr.recording_options = cs_etm_recording_options;
897 ptr->itr.info_priv_size = cs_etm_info_priv_size;
898 ptr->itr.info_fill = cs_etm_info_fill;
899 ptr->itr.find_snapshot = cs_etm_find_snapshot;
900 ptr->itr.snapshot_start = cs_etm_snapshot_start;
901 ptr->itr.snapshot_finish = cs_etm_snapshot_finish;
902 ptr->itr.reference = cs_etm_reference;
903 ptr->itr.free = cs_etm_recording_free;
904 ptr->itr.read_finish = cs_etm_read_finish;
905
906 *err = 0;
907 return &ptr->itr;
908 out:
909 return NULL;
910 }
911