1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <stdlib.h>
4 #include <stdio.h>
5 #include <string.h>
6 #include <linux/kernel.h>
7 #include <linux/zalloc.h>
8 #include "dso.h"
9 #include "session.h"
10 #include "thread.h"
11 #include "thread-stack.h"
12 #include "debug.h"
13 #include "namespaces.h"
14 #include "comm.h"
15 #include "map.h"
16 #include "symbol.h"
17 #include "unwind.h"
18 #include "callchain.h"
19
20 #include <api/fs/fs.h>
21
thread__init_map_groups(struct thread * thread,struct machine * machine)22 int thread__init_map_groups(struct thread *thread, struct machine *machine)
23 {
24 pid_t pid = thread->pid_;
25
26 if (pid == thread->tid || pid == -1) {
27 thread->mg = map_groups__new(machine);
28 } else {
29 struct thread *leader = __machine__findnew_thread(machine, pid, pid);
30 if (leader) {
31 thread->mg = map_groups__get(leader->mg);
32 thread__put(leader);
33 }
34 }
35
36 return thread->mg ? 0 : -1;
37 }
38
thread__new(pid_t pid,pid_t tid)39 struct thread *thread__new(pid_t pid, pid_t tid)
40 {
41 char *comm_str;
42 struct comm *comm;
43 struct thread *thread = zalloc(sizeof(*thread));
44
45 if (thread != NULL) {
46 thread->pid_ = pid;
47 thread->tid = tid;
48 thread->ppid = -1;
49 thread->cpu = -1;
50 INIT_LIST_HEAD(&thread->namespaces_list);
51 INIT_LIST_HEAD(&thread->comm_list);
52 init_rwsem(&thread->namespaces_lock);
53 init_rwsem(&thread->comm_lock);
54
55 comm_str = malloc(32);
56 if (!comm_str)
57 goto err_thread;
58
59 snprintf(comm_str, 32, ":%d", tid);
60 comm = comm__new(comm_str, 0, false);
61 free(comm_str);
62 if (!comm)
63 goto err_thread;
64
65 list_add(&comm->list, &thread->comm_list);
66 refcount_set(&thread->refcnt, 1);
67 RB_CLEAR_NODE(&thread->rb_node);
68 /* Thread holds first ref to nsdata. */
69 thread->nsinfo = nsinfo__new(pid);
70 srccode_state_init(&thread->srccode_state);
71 }
72
73 return thread;
74
75 err_thread:
76 free(thread);
77 return NULL;
78 }
79
thread__delete(struct thread * thread)80 void thread__delete(struct thread *thread)
81 {
82 struct namespaces *namespaces, *tmp_namespaces;
83 struct comm *comm, *tmp_comm;
84
85 BUG_ON(!RB_EMPTY_NODE(&thread->rb_node));
86
87 thread_stack__free(thread);
88
89 if (thread->mg) {
90 map_groups__put(thread->mg);
91 thread->mg = NULL;
92 }
93 down_write(&thread->namespaces_lock);
94 list_for_each_entry_safe(namespaces, tmp_namespaces,
95 &thread->namespaces_list, list) {
96 list_del_init(&namespaces->list);
97 namespaces__free(namespaces);
98 }
99 up_write(&thread->namespaces_lock);
100
101 down_write(&thread->comm_lock);
102 list_for_each_entry_safe(comm, tmp_comm, &thread->comm_list, list) {
103 list_del_init(&comm->list);
104 comm__free(comm);
105 }
106 up_write(&thread->comm_lock);
107
108 nsinfo__zput(thread->nsinfo);
109 srccode_state_free(&thread->srccode_state);
110
111 exit_rwsem(&thread->namespaces_lock);
112 exit_rwsem(&thread->comm_lock);
113 free(thread);
114 }
115
thread__get(struct thread * thread)116 struct thread *thread__get(struct thread *thread)
117 {
118 if (thread)
119 refcount_inc(&thread->refcnt);
120 return thread;
121 }
122
thread__put(struct thread * thread)123 void thread__put(struct thread *thread)
124 {
125 if (thread && refcount_dec_and_test(&thread->refcnt)) {
126 /*
127 * Remove it from the dead threads list, as last reference is
128 * gone, if it is in a dead threads list.
129 *
130 * We may not be there anymore if say, the machine where it was
131 * stored was already deleted, so we already removed it from
132 * the dead threads and some other piece of code still keeps a
133 * reference.
134 *
135 * This is what 'perf sched' does and finally drops it in
136 * perf_sched__lat(), where it calls perf_sched__read_events(),
137 * that processes the events by creating a session and deleting
138 * it, which ends up destroying the list heads for the dead
139 * threads, but before it does that it removes all threads from
140 * it using list_del_init().
141 *
142 * So we need to check here if it is in a dead threads list and
143 * if so, remove it before finally deleting the thread, to avoid
144 * an use after free situation.
145 */
146 if (!list_empty(&thread->node))
147 list_del_init(&thread->node);
148 thread__delete(thread);
149 }
150 }
151
__thread__namespaces(const struct thread * thread)152 static struct namespaces *__thread__namespaces(const struct thread *thread)
153 {
154 if (list_empty(&thread->namespaces_list))
155 return NULL;
156
157 return list_first_entry(&thread->namespaces_list, struct namespaces, list);
158 }
159
thread__namespaces(struct thread * thread)160 struct namespaces *thread__namespaces(struct thread *thread)
161 {
162 struct namespaces *ns;
163
164 down_read(&thread->namespaces_lock);
165 ns = __thread__namespaces(thread);
166 up_read(&thread->namespaces_lock);
167
168 return ns;
169 }
170
__thread__set_namespaces(struct thread * thread,u64 timestamp,struct perf_record_namespaces * event)171 static int __thread__set_namespaces(struct thread *thread, u64 timestamp,
172 struct perf_record_namespaces *event)
173 {
174 struct namespaces *new, *curr = __thread__namespaces(thread);
175
176 new = namespaces__new(event);
177 if (!new)
178 return -ENOMEM;
179
180 list_add(&new->list, &thread->namespaces_list);
181
182 if (timestamp && curr) {
183 /*
184 * setns syscall must have changed few or all the namespaces
185 * of this thread. Update end time for the namespaces
186 * previously used.
187 */
188 curr = list_next_entry(new, list);
189 curr->end_time = timestamp;
190 }
191
192 return 0;
193 }
194
thread__set_namespaces(struct thread * thread,u64 timestamp,struct perf_record_namespaces * event)195 int thread__set_namespaces(struct thread *thread, u64 timestamp,
196 struct perf_record_namespaces *event)
197 {
198 int ret;
199
200 down_write(&thread->namespaces_lock);
201 ret = __thread__set_namespaces(thread, timestamp, event);
202 up_write(&thread->namespaces_lock);
203 return ret;
204 }
205
thread__comm(const struct thread * thread)206 struct comm *thread__comm(const struct thread *thread)
207 {
208 if (list_empty(&thread->comm_list))
209 return NULL;
210
211 return list_first_entry(&thread->comm_list, struct comm, list);
212 }
213
thread__exec_comm(const struct thread * thread)214 struct comm *thread__exec_comm(const struct thread *thread)
215 {
216 struct comm *comm, *last = NULL, *second_last = NULL;
217
218 list_for_each_entry(comm, &thread->comm_list, list) {
219 if (comm->exec)
220 return comm;
221 second_last = last;
222 last = comm;
223 }
224
225 /*
226 * 'last' with no start time might be the parent's comm of a synthesized
227 * thread (created by processing a synthesized fork event). For a main
228 * thread, that is very probably wrong. Prefer a later comm to avoid
229 * that case.
230 */
231 if (second_last && !last->start && thread->pid_ == thread->tid)
232 return second_last;
233
234 return last;
235 }
236
____thread__set_comm(struct thread * thread,const char * str,u64 timestamp,bool exec)237 static int ____thread__set_comm(struct thread *thread, const char *str,
238 u64 timestamp, bool exec)
239 {
240 struct comm *new, *curr = thread__comm(thread);
241
242 /* Override the default :tid entry */
243 if (!thread->comm_set) {
244 int err = comm__override(curr, str, timestamp, exec);
245 if (err)
246 return err;
247 } else {
248 new = comm__new(str, timestamp, exec);
249 if (!new)
250 return -ENOMEM;
251 list_add(&new->list, &thread->comm_list);
252
253 if (exec)
254 unwind__flush_access(thread->mg);
255 }
256
257 thread->comm_set = true;
258
259 return 0;
260 }
261
__thread__set_comm(struct thread * thread,const char * str,u64 timestamp,bool exec)262 int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp,
263 bool exec)
264 {
265 int ret;
266
267 down_write(&thread->comm_lock);
268 ret = ____thread__set_comm(thread, str, timestamp, exec);
269 up_write(&thread->comm_lock);
270 return ret;
271 }
272
thread__set_comm_from_proc(struct thread * thread)273 int thread__set_comm_from_proc(struct thread *thread)
274 {
275 char path[64];
276 char *comm = NULL;
277 size_t sz;
278 int err = -1;
279
280 if (!(snprintf(path, sizeof(path), "%d/task/%d/comm",
281 thread->pid_, thread->tid) >= (int)sizeof(path)) &&
282 procfs__read_str(path, &comm, &sz) == 0) {
283 comm[sz - 1] = '\0';
284 err = thread__set_comm(thread, comm, 0);
285 }
286
287 return err;
288 }
289
__thread__comm_str(const struct thread * thread)290 static const char *__thread__comm_str(const struct thread *thread)
291 {
292 const struct comm *comm = thread__comm(thread);
293
294 if (!comm)
295 return NULL;
296
297 return comm__str(comm);
298 }
299
thread__comm_str(struct thread * thread)300 const char *thread__comm_str(struct thread *thread)
301 {
302 const char *str;
303
304 down_read(&thread->comm_lock);
305 str = __thread__comm_str(thread);
306 up_read(&thread->comm_lock);
307
308 return str;
309 }
310
311 /* CHECKME: it should probably better return the max comm len from its comm list */
thread__comm_len(struct thread * thread)312 int thread__comm_len(struct thread *thread)
313 {
314 if (!thread->comm_len) {
315 const char *comm = thread__comm_str(thread);
316 if (!comm)
317 return 0;
318 thread->comm_len = strlen(comm);
319 }
320
321 return thread->comm_len;
322 }
323
thread__fprintf(struct thread * thread,FILE * fp)324 size_t thread__fprintf(struct thread *thread, FILE *fp)
325 {
326 return fprintf(fp, "Thread %d %s\n", thread->tid, thread__comm_str(thread)) +
327 map_groups__fprintf(thread->mg, fp);
328 }
329
thread__insert_map(struct thread * thread,struct map * map)330 int thread__insert_map(struct thread *thread, struct map *map)
331 {
332 int ret;
333
334 ret = unwind__prepare_access(thread->mg, map, NULL);
335 if (ret)
336 return ret;
337
338 map_groups__fixup_overlappings(thread->mg, map, stderr);
339 map_groups__insert(thread->mg, map);
340
341 return 0;
342 }
343
__thread__prepare_access(struct thread * thread)344 static int __thread__prepare_access(struct thread *thread)
345 {
346 bool initialized = false;
347 int err = 0;
348 struct maps *maps = &thread->mg->maps;
349 struct map *map;
350
351 down_read(&maps->lock);
352
353 for (map = maps__first(maps); map; map = map__next(map)) {
354 err = unwind__prepare_access(thread->mg, map, &initialized);
355 if (err || initialized)
356 break;
357 }
358
359 up_read(&maps->lock);
360
361 return err;
362 }
363
thread__prepare_access(struct thread * thread)364 static int thread__prepare_access(struct thread *thread)
365 {
366 int err = 0;
367
368 if (dwarf_callchain_users)
369 err = __thread__prepare_access(thread);
370
371 return err;
372 }
373
thread__clone_map_groups(struct thread * thread,struct thread * parent,bool do_maps_clone)374 static int thread__clone_map_groups(struct thread *thread,
375 struct thread *parent,
376 bool do_maps_clone)
377 {
378 /* This is new thread, we share map groups for process. */
379 if (thread->pid_ == parent->pid_)
380 return thread__prepare_access(thread);
381
382 if (thread->mg == parent->mg) {
383 pr_debug("broken map groups on thread %d/%d parent %d/%d\n",
384 thread->pid_, thread->tid, parent->pid_, parent->tid);
385 return 0;
386 }
387 /* But this one is new process, copy maps. */
388 return do_maps_clone ? map_groups__clone(thread, parent->mg) : 0;
389 }
390
thread__fork(struct thread * thread,struct thread * parent,u64 timestamp,bool do_maps_clone)391 int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp, bool do_maps_clone)
392 {
393 if (parent->comm_set) {
394 const char *comm = thread__comm_str(parent);
395 int err;
396 if (!comm)
397 return -ENOMEM;
398 err = thread__set_comm(thread, comm, timestamp);
399 if (err)
400 return err;
401 }
402
403 thread->ppid = parent->tid;
404 return thread__clone_map_groups(thread, parent, do_maps_clone);
405 }
406
thread__find_cpumode_addr_location(struct thread * thread,u64 addr,struct addr_location * al)407 void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
408 struct addr_location *al)
409 {
410 size_t i;
411 const u8 cpumodes[] = {
412 PERF_RECORD_MISC_USER,
413 PERF_RECORD_MISC_KERNEL,
414 PERF_RECORD_MISC_GUEST_USER,
415 PERF_RECORD_MISC_GUEST_KERNEL
416 };
417
418 for (i = 0; i < ARRAY_SIZE(cpumodes); i++) {
419 thread__find_symbol(thread, cpumodes[i], addr, al);
420 if (al->map)
421 break;
422 }
423 }
424
thread__main_thread(struct machine * machine,struct thread * thread)425 struct thread *thread__main_thread(struct machine *machine, struct thread *thread)
426 {
427 if (thread->pid_ == thread->tid)
428 return thread__get(thread);
429
430 if (thread->pid_ == -1)
431 return NULL;
432
433 return machine__find_thread(machine, thread->pid_, thread->pid_);
434 }
435
thread__memcpy(struct thread * thread,struct machine * machine,void * buf,u64 ip,int len,bool * is64bit)436 int thread__memcpy(struct thread *thread, struct machine *machine,
437 void *buf, u64 ip, int len, bool *is64bit)
438 {
439 u8 cpumode = PERF_RECORD_MISC_USER;
440 struct addr_location al;
441 long offset;
442
443 if (machine__kernel_ip(machine, ip))
444 cpumode = PERF_RECORD_MISC_KERNEL;
445
446 if (!thread__find_map(thread, cpumode, ip, &al) || !al.map->dso ||
447 al.map->dso->data.status == DSO_DATA_STATUS_ERROR ||
448 map__load(al.map) < 0)
449 return -1;
450
451 offset = al.map->map_ip(al.map, ip);
452 if (is64bit)
453 *is64bit = al.map->dso->is_64_bit;
454
455 return dso__data_read_offset(al.map->dso, machine, offset, buf, len);
456 }
457