1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <perf/cpumap.h>
3 #include <stdlib.h>
4 #include <linux/refcount.h>
5 #include <internal/cpumap.h>
6 #include <asm/bug.h>
7 #include <stdio.h>
8 #include <string.h>
9 #include <unistd.h>
10 #include <ctype.h>
11 #include <limits.h>
12 
perf_cpu_map__dummy_new(void)13 struct perf_cpu_map *perf_cpu_map__dummy_new(void)
14 {
15 	struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int));
16 
17 	if (cpus != NULL) {
18 		cpus->nr = 1;
19 		cpus->map[0] = -1;
20 		refcount_set(&cpus->refcnt, 1);
21 	}
22 
23 	return cpus;
24 }
25 
cpu_map__delete(struct perf_cpu_map * map)26 static void cpu_map__delete(struct perf_cpu_map *map)
27 {
28 	if (map) {
29 		WARN_ONCE(refcount_read(&map->refcnt) != 0,
30 			  "cpu_map refcnt unbalanced\n");
31 		free(map);
32 	}
33 }
34 
perf_cpu_map__get(struct perf_cpu_map * map)35 struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map)
36 {
37 	if (map)
38 		refcount_inc(&map->refcnt);
39 	return map;
40 }
41 
perf_cpu_map__put(struct perf_cpu_map * map)42 void perf_cpu_map__put(struct perf_cpu_map *map)
43 {
44 	if (map && refcount_dec_and_test(&map->refcnt))
45 		cpu_map__delete(map);
46 }
47 
cpu_map__default_new(void)48 static struct perf_cpu_map *cpu_map__default_new(void)
49 {
50 	struct perf_cpu_map *cpus;
51 	int nr_cpus;
52 
53 	nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
54 	if (nr_cpus < 0)
55 		return NULL;
56 
57 	cpus = malloc(sizeof(*cpus) + nr_cpus * sizeof(int));
58 	if (cpus != NULL) {
59 		int i;
60 
61 		for (i = 0; i < nr_cpus; ++i)
62 			cpus->map[i] = i;
63 
64 		cpus->nr = nr_cpus;
65 		refcount_set(&cpus->refcnt, 1);
66 	}
67 
68 	return cpus;
69 }
70 
cmp_int(const void * a,const void * b)71 static int cmp_int(const void *a, const void *b)
72 {
73 	return *(const int *)a - *(const int*)b;
74 }
75 
cpu_map__trim_new(int nr_cpus,int * tmp_cpus)76 static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus)
77 {
78 	size_t payload_size = nr_cpus * sizeof(int);
79 	struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + payload_size);
80 	int i, j;
81 
82 	if (cpus != NULL) {
83 		memcpy(cpus->map, tmp_cpus, payload_size);
84 		qsort(cpus->map, nr_cpus, sizeof(int), cmp_int);
85 		/* Remove dups */
86 		j = 0;
87 		for (i = 0; i < nr_cpus; i++) {
88 			if (i == 0 || cpus->map[i] != cpus->map[i - 1])
89 				cpus->map[j++] = cpus->map[i];
90 		}
91 		cpus->nr = j;
92 		assert(j <= nr_cpus);
93 		refcount_set(&cpus->refcnt, 1);
94 	}
95 
96 	return cpus;
97 }
98 
perf_cpu_map__read(FILE * file)99 struct perf_cpu_map *perf_cpu_map__read(FILE *file)
100 {
101 	struct perf_cpu_map *cpus = NULL;
102 	int nr_cpus = 0;
103 	int *tmp_cpus = NULL, *tmp;
104 	int max_entries = 0;
105 	int n, cpu, prev;
106 	char sep;
107 
108 	sep = 0;
109 	prev = -1;
110 	for (;;) {
111 		n = fscanf(file, "%u%c", &cpu, &sep);
112 		if (n <= 0)
113 			break;
114 		if (prev >= 0) {
115 			int new_max = nr_cpus + cpu - prev - 1;
116 
117 			WARN_ONCE(new_max >= MAX_NR_CPUS, "Perf can support %d CPUs. "
118 							  "Consider raising MAX_NR_CPUS\n", MAX_NR_CPUS);
119 
120 			if (new_max >= max_entries) {
121 				max_entries = new_max + MAX_NR_CPUS / 2;
122 				tmp = realloc(tmp_cpus, max_entries * sizeof(int));
123 				if (tmp == NULL)
124 					goto out_free_tmp;
125 				tmp_cpus = tmp;
126 			}
127 
128 			while (++prev < cpu)
129 				tmp_cpus[nr_cpus++] = prev;
130 		}
131 		if (nr_cpus == max_entries) {
132 			max_entries += MAX_NR_CPUS;
133 			tmp = realloc(tmp_cpus, max_entries * sizeof(int));
134 			if (tmp == NULL)
135 				goto out_free_tmp;
136 			tmp_cpus = tmp;
137 		}
138 
139 		tmp_cpus[nr_cpus++] = cpu;
140 		if (n == 2 && sep == '-')
141 			prev = cpu;
142 		else
143 			prev = -1;
144 		if (n == 1 || sep == '\n')
145 			break;
146 	}
147 
148 	if (nr_cpus > 0)
149 		cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
150 	else
151 		cpus = cpu_map__default_new();
152 out_free_tmp:
153 	free(tmp_cpus);
154 	return cpus;
155 }
156 
cpu_map__read_all_cpu_map(void)157 static struct perf_cpu_map *cpu_map__read_all_cpu_map(void)
158 {
159 	struct perf_cpu_map *cpus = NULL;
160 	FILE *onlnf;
161 
162 	onlnf = fopen("/sys/devices/system/cpu/online", "r");
163 	if (!onlnf)
164 		return cpu_map__default_new();
165 
166 	cpus = perf_cpu_map__read(onlnf);
167 	fclose(onlnf);
168 	return cpus;
169 }
170 
perf_cpu_map__new(const char * cpu_list)171 struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
172 {
173 	struct perf_cpu_map *cpus = NULL;
174 	unsigned long start_cpu, end_cpu = 0;
175 	char *p = NULL;
176 	int i, nr_cpus = 0;
177 	int *tmp_cpus = NULL, *tmp;
178 	int max_entries = 0;
179 
180 	if (!cpu_list)
181 		return cpu_map__read_all_cpu_map();
182 
183 	/*
184 	 * must handle the case of empty cpumap to cover
185 	 * TOPOLOGY header for NUMA nodes with no CPU
186 	 * ( e.g., because of CPU hotplug)
187 	 */
188 	if (!isdigit(*cpu_list) && *cpu_list != '\0')
189 		goto out;
190 
191 	while (isdigit(*cpu_list)) {
192 		p = NULL;
193 		start_cpu = strtoul(cpu_list, &p, 0);
194 		if (start_cpu >= INT_MAX
195 		    || (*p != '\0' && *p != ',' && *p != '-'))
196 			goto invalid;
197 
198 		if (*p == '-') {
199 			cpu_list = ++p;
200 			p = NULL;
201 			end_cpu = strtoul(cpu_list, &p, 0);
202 
203 			if (end_cpu >= INT_MAX || (*p != '\0' && *p != ','))
204 				goto invalid;
205 
206 			if (end_cpu < start_cpu)
207 				goto invalid;
208 		} else {
209 			end_cpu = start_cpu;
210 		}
211 
212 		WARN_ONCE(end_cpu >= MAX_NR_CPUS, "Perf can support %d CPUs. "
213 						  "Consider raising MAX_NR_CPUS\n", MAX_NR_CPUS);
214 
215 		for (; start_cpu <= end_cpu; start_cpu++) {
216 			/* check for duplicates */
217 			for (i = 0; i < nr_cpus; i++)
218 				if (tmp_cpus[i] == (int)start_cpu)
219 					goto invalid;
220 
221 			if (nr_cpus == max_entries) {
222 				max_entries += MAX_NR_CPUS;
223 				tmp = realloc(tmp_cpus, max_entries * sizeof(int));
224 				if (tmp == NULL)
225 					goto invalid;
226 				tmp_cpus = tmp;
227 			}
228 			tmp_cpus[nr_cpus++] = (int)start_cpu;
229 		}
230 		if (*p)
231 			++p;
232 
233 		cpu_list = p;
234 	}
235 
236 	if (nr_cpus > 0)
237 		cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
238 	else if (*cpu_list != '\0')
239 		cpus = cpu_map__default_new();
240 	else
241 		cpus = perf_cpu_map__dummy_new();
242 invalid:
243 	free(tmp_cpus);
244 out:
245 	return cpus;
246 }
247 
perf_cpu_map__cpu(const struct perf_cpu_map * cpus,int idx)248 int perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
249 {
250 	if (cpus && idx < cpus->nr)
251 		return cpus->map[idx];
252 
253 	return -1;
254 }
255 
perf_cpu_map__nr(const struct perf_cpu_map * cpus)256 int perf_cpu_map__nr(const struct perf_cpu_map *cpus)
257 {
258 	return cpus ? cpus->nr : 1;
259 }
260 
perf_cpu_map__empty(const struct perf_cpu_map * map)261 bool perf_cpu_map__empty(const struct perf_cpu_map *map)
262 {
263 	return map ? map->map[0] == -1 : true;
264 }
265 
perf_cpu_map__idx(struct perf_cpu_map * cpus,int cpu)266 int perf_cpu_map__idx(struct perf_cpu_map *cpus, int cpu)
267 {
268 	int i;
269 
270 	for (i = 0; i < cpus->nr; ++i) {
271 		if (cpus->map[i] == cpu)
272 			return i;
273 	}
274 
275 	return -1;
276 }
277 
perf_cpu_map__max(struct perf_cpu_map * map)278 int perf_cpu_map__max(struct perf_cpu_map *map)
279 {
280 	int i, max = -1;
281 
282 	for (i = 0; i < map->nr; i++) {
283 		if (map->map[i] > max)
284 			max = map->map[i];
285 	}
286 
287 	return max;
288 }
289 
290 /*
291  * Merge two cpumaps
292  *
293  * orig either gets freed and replaced with a new map, or reused
294  * with no reference count change (similar to "realloc")
295  * other has its reference count increased.
296  */
297 
perf_cpu_map__merge(struct perf_cpu_map * orig,struct perf_cpu_map * other)298 struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig,
299 					 struct perf_cpu_map *other)
300 {
301 	int *tmp_cpus;
302 	int tmp_len;
303 	int i, j, k;
304 	struct perf_cpu_map *merged;
305 
306 	if (!orig && !other)
307 		return NULL;
308 	if (!orig) {
309 		perf_cpu_map__get(other);
310 		return other;
311 	}
312 	if (!other)
313 		return orig;
314 	if (orig->nr == other->nr &&
315 	    !memcmp(orig->map, other->map, orig->nr * sizeof(int)))
316 		return orig;
317 
318 	tmp_len = orig->nr + other->nr;
319 	tmp_cpus = malloc(tmp_len * sizeof(int));
320 	if (!tmp_cpus)
321 		return NULL;
322 
323 	/* Standard merge algorithm from wikipedia */
324 	i = j = k = 0;
325 	while (i < orig->nr && j < other->nr) {
326 		if (orig->map[i] <= other->map[j]) {
327 			if (orig->map[i] == other->map[j])
328 				j++;
329 			tmp_cpus[k++] = orig->map[i++];
330 		} else
331 			tmp_cpus[k++] = other->map[j++];
332 	}
333 
334 	while (i < orig->nr)
335 		tmp_cpus[k++] = orig->map[i++];
336 
337 	while (j < other->nr)
338 		tmp_cpus[k++] = other->map[j++];
339 	assert(k <= tmp_len);
340 
341 	merged = cpu_map__trim_new(k, tmp_cpus);
342 	free(tmp_cpus);
343 	perf_cpu_map__put(orig);
344 	return merged;
345 }
346