1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4  *
5  * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further
6  * copyright notes.
7  */
8 
9 #include <sys/mman.h>
10 #include <inttypes.h>
11 #include <asm/bug.h>
12 #include <linux/zalloc.h>
13 #include <stdlib.h>
14 #include <string.h>
15 #include <unistd.h> // sysconf()
16 #include <perf/mmap.h>
17 #ifdef HAVE_LIBNUMA_SUPPORT
18 #include <numaif.h>
19 #endif
20 #include "cpumap.h"
21 #include "debug.h"
22 #include "event.h"
23 #include "mmap.h"
24 #include "../perf.h"
25 #include <internal/lib.h> /* page_size */
26 #include <linux/bitmap.h>
27 
28 #define MASK_SIZE 1023
mmap_cpu_mask__scnprintf(struct mmap_cpu_mask * mask,const char * tag)29 void mmap_cpu_mask__scnprintf(struct mmap_cpu_mask *mask, const char *tag)
30 {
31 	char buf[MASK_SIZE + 1];
32 	size_t len;
33 
34 	len = bitmap_scnprintf(mask->bits, mask->nbits, buf, MASK_SIZE);
35 	buf[len] = '\0';
36 	pr_debug("%p: %s mask[%zd]: %s\n", mask, tag, mask->nbits, buf);
37 }
38 
mmap__mmap_len(struct mmap * map)39 size_t mmap__mmap_len(struct mmap *map)
40 {
41 	return perf_mmap__mmap_len(&map->core);
42 }
43 
auxtrace_mmap__mmap(struct auxtrace_mmap * mm __maybe_unused,struct auxtrace_mmap_params * mp __maybe_unused,void * userpg __maybe_unused,int fd __maybe_unused)44 int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
45 			       struct auxtrace_mmap_params *mp __maybe_unused,
46 			       void *userpg __maybe_unused,
47 			       int fd __maybe_unused)
48 {
49 	return 0;
50 }
51 
auxtrace_mmap__munmap(struct auxtrace_mmap * mm __maybe_unused)52 void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
53 {
54 }
55 
auxtrace_mmap_params__init(struct auxtrace_mmap_params * mp __maybe_unused,off_t auxtrace_offset __maybe_unused,unsigned int auxtrace_pages __maybe_unused,bool auxtrace_overwrite __maybe_unused)56 void __weak auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp __maybe_unused,
57 				       off_t auxtrace_offset __maybe_unused,
58 				       unsigned int auxtrace_pages __maybe_unused,
59 				       bool auxtrace_overwrite __maybe_unused)
60 {
61 }
62 
auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params * mp __maybe_unused,struct evlist * evlist __maybe_unused,int idx __maybe_unused,bool per_cpu __maybe_unused)63 void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __maybe_unused,
64 					  struct evlist *evlist __maybe_unused,
65 					  int idx __maybe_unused,
66 					  bool per_cpu __maybe_unused)
67 {
68 }
69 
70 #ifdef HAVE_AIO_SUPPORT
perf_mmap__aio_enabled(struct mmap * map)71 static int perf_mmap__aio_enabled(struct mmap *map)
72 {
73 	return map->aio.nr_cblocks > 0;
74 }
75 
76 #ifdef HAVE_LIBNUMA_SUPPORT
perf_mmap__aio_alloc(struct mmap * map,int idx)77 static int perf_mmap__aio_alloc(struct mmap *map, int idx)
78 {
79 	map->aio.data[idx] = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE,
80 				  MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
81 	if (map->aio.data[idx] == MAP_FAILED) {
82 		map->aio.data[idx] = NULL;
83 		return -1;
84 	}
85 
86 	return 0;
87 }
88 
perf_mmap__aio_free(struct mmap * map,int idx)89 static void perf_mmap__aio_free(struct mmap *map, int idx)
90 {
91 	if (map->aio.data[idx]) {
92 		munmap(map->aio.data[idx], mmap__mmap_len(map));
93 		map->aio.data[idx] = NULL;
94 	}
95 }
96 
perf_mmap__aio_bind(struct mmap * map,int idx,int cpu,int affinity)97 static int perf_mmap__aio_bind(struct mmap *map, int idx, int cpu, int affinity)
98 {
99 	void *data;
100 	size_t mmap_len;
101 	unsigned long *node_mask;
102 	unsigned long node_index;
103 	int err = 0;
104 
105 	if (affinity != PERF_AFFINITY_SYS && cpu__max_node() > 1) {
106 		data = map->aio.data[idx];
107 		mmap_len = mmap__mmap_len(map);
108 		node_index = cpu__get_node(cpu);
109 		node_mask = bitmap_alloc(node_index + 1);
110 		if (!node_mask) {
111 			pr_err("Failed to allocate node mask for mbind: error %m\n");
112 			return -1;
113 		}
114 		set_bit(node_index, node_mask);
115 		if (mbind(data, mmap_len, MPOL_BIND, node_mask, node_index + 1 + 1, 0)) {
116 			pr_err("Failed to bind [%p-%p] AIO buffer to node %lu: error %m\n",
117 				data, data + mmap_len, node_index);
118 			err = -1;
119 		}
120 		bitmap_free(node_mask);
121 	}
122 
123 	return err;
124 }
125 #else /* !HAVE_LIBNUMA_SUPPORT */
perf_mmap__aio_alloc(struct mmap * map,int idx)126 static int perf_mmap__aio_alloc(struct mmap *map, int idx)
127 {
128 	map->aio.data[idx] = malloc(mmap__mmap_len(map));
129 	if (map->aio.data[idx] == NULL)
130 		return -1;
131 
132 	return 0;
133 }
134 
perf_mmap__aio_free(struct mmap * map,int idx)135 static void perf_mmap__aio_free(struct mmap *map, int idx)
136 {
137 	zfree(&(map->aio.data[idx]));
138 }
139 
perf_mmap__aio_bind(struct mmap * map __maybe_unused,int idx __maybe_unused,int cpu __maybe_unused,int affinity __maybe_unused)140 static int perf_mmap__aio_bind(struct mmap *map __maybe_unused, int idx __maybe_unused,
141 		int cpu __maybe_unused, int affinity __maybe_unused)
142 {
143 	return 0;
144 }
145 #endif
146 
perf_mmap__aio_mmap(struct mmap * map,struct mmap_params * mp)147 static int perf_mmap__aio_mmap(struct mmap *map, struct mmap_params *mp)
148 {
149 	int delta_max, i, prio, ret;
150 
151 	map->aio.nr_cblocks = mp->nr_cblocks;
152 	if (map->aio.nr_cblocks) {
153 		map->aio.aiocb = calloc(map->aio.nr_cblocks, sizeof(struct aiocb *));
154 		if (!map->aio.aiocb) {
155 			pr_debug2("failed to allocate aiocb for data buffer, error %m\n");
156 			return -1;
157 		}
158 		map->aio.cblocks = calloc(map->aio.nr_cblocks, sizeof(struct aiocb));
159 		if (!map->aio.cblocks) {
160 			pr_debug2("failed to allocate cblocks for data buffer, error %m\n");
161 			return -1;
162 		}
163 		map->aio.data = calloc(map->aio.nr_cblocks, sizeof(void *));
164 		if (!map->aio.data) {
165 			pr_debug2("failed to allocate data buffer, error %m\n");
166 			return -1;
167 		}
168 		delta_max = sysconf(_SC_AIO_PRIO_DELTA_MAX);
169 		for (i = 0; i < map->aio.nr_cblocks; ++i) {
170 			ret = perf_mmap__aio_alloc(map, i);
171 			if (ret == -1) {
172 				pr_debug2("failed to allocate data buffer area, error %m");
173 				return -1;
174 			}
175 			ret = perf_mmap__aio_bind(map, i, map->core.cpu, mp->affinity);
176 			if (ret == -1)
177 				return -1;
178 			/*
179 			 * Use cblock.aio_fildes value different from -1
180 			 * to denote started aio write operation on the
181 			 * cblock so it requires explicit record__aio_sync()
182 			 * call prior the cblock may be reused again.
183 			 */
184 			map->aio.cblocks[i].aio_fildes = -1;
185 			/*
186 			 * Allocate cblocks with priority delta to have
187 			 * faster aio write system calls because queued requests
188 			 * are kept in separate per-prio queues and adding
189 			 * a new request will iterate thru shorter per-prio
190 			 * list. Blocks with numbers higher than
191 			 *  _SC_AIO_PRIO_DELTA_MAX go with priority 0.
192 			 */
193 			prio = delta_max - i;
194 			map->aio.cblocks[i].aio_reqprio = prio >= 0 ? prio : 0;
195 		}
196 	}
197 
198 	return 0;
199 }
200 
perf_mmap__aio_munmap(struct mmap * map)201 static void perf_mmap__aio_munmap(struct mmap *map)
202 {
203 	int i;
204 
205 	for (i = 0; i < map->aio.nr_cblocks; ++i)
206 		perf_mmap__aio_free(map, i);
207 	if (map->aio.data)
208 		zfree(&map->aio.data);
209 	zfree(&map->aio.cblocks);
210 	zfree(&map->aio.aiocb);
211 }
212 #else /* !HAVE_AIO_SUPPORT */
perf_mmap__aio_enabled(struct mmap * map __maybe_unused)213 static int perf_mmap__aio_enabled(struct mmap *map __maybe_unused)
214 {
215 	return 0;
216 }
217 
perf_mmap__aio_mmap(struct mmap * map __maybe_unused,struct mmap_params * mp __maybe_unused)218 static int perf_mmap__aio_mmap(struct mmap *map __maybe_unused,
219 			       struct mmap_params *mp __maybe_unused)
220 {
221 	return 0;
222 }
223 
perf_mmap__aio_munmap(struct mmap * map __maybe_unused)224 static void perf_mmap__aio_munmap(struct mmap *map __maybe_unused)
225 {
226 }
227 #endif
228 
mmap__munmap(struct mmap * map)229 void mmap__munmap(struct mmap *map)
230 {
231 	bitmap_free(map->affinity_mask.bits);
232 
233 	perf_mmap__aio_munmap(map);
234 	if (map->data != NULL) {
235 		munmap(map->data, mmap__mmap_len(map));
236 		map->data = NULL;
237 	}
238 	auxtrace_mmap__munmap(&map->auxtrace_mmap);
239 }
240 
build_node_mask(int node,struct mmap_cpu_mask * mask)241 static void build_node_mask(int node, struct mmap_cpu_mask *mask)
242 {
243 	int c, cpu, nr_cpus;
244 	const struct perf_cpu_map *cpu_map = NULL;
245 
246 	cpu_map = cpu_map__online();
247 	if (!cpu_map)
248 		return;
249 
250 	nr_cpus = perf_cpu_map__nr(cpu_map);
251 	for (c = 0; c < nr_cpus; c++) {
252 		cpu = cpu_map->map[c]; /* map c index to online cpu index */
253 		if (cpu__get_node(cpu) == node)
254 			set_bit(cpu, mask->bits);
255 	}
256 }
257 
perf_mmap__setup_affinity_mask(struct mmap * map,struct mmap_params * mp)258 static int perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *mp)
259 {
260 	map->affinity_mask.nbits = cpu__max_cpu();
261 	map->affinity_mask.bits = bitmap_alloc(map->affinity_mask.nbits);
262 	if (!map->affinity_mask.bits)
263 		return -1;
264 
265 	if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1)
266 		build_node_mask(cpu__get_node(map->core.cpu), &map->affinity_mask);
267 	else if (mp->affinity == PERF_AFFINITY_CPU)
268 		set_bit(map->core.cpu, map->affinity_mask.bits);
269 
270 	return 0;
271 }
272 
mmap__mmap(struct mmap * map,struct mmap_params * mp,int fd,int cpu)273 int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu)
274 {
275 	if (perf_mmap__mmap(&map->core, &mp->core, fd, cpu)) {
276 		pr_debug2("failed to mmap perf event ring buffer, error %d\n",
277 			  errno);
278 		return -1;
279 	}
280 
281 	if (mp->affinity != PERF_AFFINITY_SYS &&
282 		perf_mmap__setup_affinity_mask(map, mp)) {
283 		pr_debug2("failed to alloc mmap affinity mask, error %d\n",
284 			  errno);
285 		return -1;
286 	}
287 
288 	if (verbose == 2)
289 		mmap_cpu_mask__scnprintf(&map->affinity_mask, "mmap");
290 
291 	map->core.flush = mp->flush;
292 
293 	map->comp_level = mp->comp_level;
294 
295 	if (map->comp_level && !perf_mmap__aio_enabled(map)) {
296 		map->data = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE,
297 				 MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
298 		if (map->data == MAP_FAILED) {
299 			pr_debug2("failed to mmap data buffer, error %d\n",
300 					errno);
301 			map->data = NULL;
302 			return -1;
303 		}
304 	}
305 
306 	if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
307 				&mp->auxtrace_mp, map->core.base, fd))
308 		return -1;
309 
310 	return perf_mmap__aio_mmap(map, mp);
311 }
312 
perf_mmap__push(struct mmap * md,void * to,int push (struct mmap * map,void * to,void * buf,size_t size))313 int perf_mmap__push(struct mmap *md, void *to,
314 		    int push(struct mmap *map, void *to, void *buf, size_t size))
315 {
316 	u64 head = perf_mmap__read_head(&md->core);
317 	unsigned char *data = md->core.base + page_size;
318 	unsigned long size;
319 	void *buf;
320 	int rc = 0;
321 
322 	rc = perf_mmap__read_init(&md->core);
323 	if (rc < 0)
324 		return (rc == -EAGAIN) ? 1 : -1;
325 
326 	size = md->core.end - md->core.start;
327 
328 	if ((md->core.start & md->core.mask) + size != (md->core.end & md->core.mask)) {
329 		buf = &data[md->core.start & md->core.mask];
330 		size = md->core.mask + 1 - (md->core.start & md->core.mask);
331 		md->core.start += size;
332 
333 		if (push(md, to, buf, size) < 0) {
334 			rc = -1;
335 			goto out;
336 		}
337 	}
338 
339 	buf = &data[md->core.start & md->core.mask];
340 	size = md->core.end - md->core.start;
341 	md->core.start += size;
342 
343 	if (push(md, to, buf, size) < 0) {
344 		rc = -1;
345 		goto out;
346 	}
347 
348 	md->core.prev = head;
349 	perf_mmap__consume(&md->core);
350 out:
351 	return rc;
352 }
353