1 // SPDX-License-Identifier: GPL-2.0
2 #include "../perf.h"
3 #include <errno.h>
4 #include <stdlib.h>
5 #include <stdio.h>
6 #include <string.h>
7 #include <linux/kernel.h>
8 #include "session.h"
9 #include "thread.h"
10 #include "thread-stack.h"
11 #include "util.h"
12 #include "debug.h"
13 #include "namespaces.h"
14 #include "comm.h"
15 #include "unwind.h"
16
17 #include <api/fs/fs.h>
18
thread__init_map_groups(struct thread * thread,struct machine * machine)19 int thread__init_map_groups(struct thread *thread, struct machine *machine)
20 {
21 pid_t pid = thread->pid_;
22
23 if (pid == thread->tid || pid == -1) {
24 thread->mg = map_groups__new(machine);
25 } else {
26 struct thread *leader = __machine__findnew_thread(machine, pid, pid);
27 if (leader) {
28 thread->mg = map_groups__get(leader->mg);
29 thread__put(leader);
30 }
31 }
32
33 return thread->mg ? 0 : -1;
34 }
35
thread__new(pid_t pid,pid_t tid)36 struct thread *thread__new(pid_t pid, pid_t tid)
37 {
38 char *comm_str;
39 struct comm *comm;
40 struct thread *thread = zalloc(sizeof(*thread));
41
42 if (thread != NULL) {
43 thread->pid_ = pid;
44 thread->tid = tid;
45 thread->ppid = -1;
46 thread->cpu = -1;
47 INIT_LIST_HEAD(&thread->namespaces_list);
48 INIT_LIST_HEAD(&thread->comm_list);
49 init_rwsem(&thread->namespaces_lock);
50 init_rwsem(&thread->comm_lock);
51
52 comm_str = malloc(32);
53 if (!comm_str)
54 goto err_thread;
55
56 snprintf(comm_str, 32, ":%d", tid);
57 comm = comm__new(comm_str, 0, false);
58 free(comm_str);
59 if (!comm)
60 goto err_thread;
61
62 list_add(&comm->list, &thread->comm_list);
63 refcount_set(&thread->refcnt, 1);
64 RB_CLEAR_NODE(&thread->rb_node);
65 /* Thread holds first ref to nsdata. */
66 thread->nsinfo = nsinfo__new(pid);
67 }
68
69 return thread;
70
71 err_thread:
72 free(thread);
73 return NULL;
74 }
75
thread__delete(struct thread * thread)76 void thread__delete(struct thread *thread)
77 {
78 struct namespaces *namespaces, *tmp_namespaces;
79 struct comm *comm, *tmp_comm;
80
81 BUG_ON(!RB_EMPTY_NODE(&thread->rb_node));
82
83 thread_stack__free(thread);
84
85 if (thread->mg) {
86 map_groups__put(thread->mg);
87 thread->mg = NULL;
88 }
89 down_write(&thread->namespaces_lock);
90 list_for_each_entry_safe(namespaces, tmp_namespaces,
91 &thread->namespaces_list, list) {
92 list_del(&namespaces->list);
93 namespaces__free(namespaces);
94 }
95 up_write(&thread->namespaces_lock);
96
97 down_write(&thread->comm_lock);
98 list_for_each_entry_safe(comm, tmp_comm, &thread->comm_list, list) {
99 list_del(&comm->list);
100 comm__free(comm);
101 }
102 up_write(&thread->comm_lock);
103
104 unwind__finish_access(thread);
105 nsinfo__zput(thread->nsinfo);
106
107 exit_rwsem(&thread->namespaces_lock);
108 exit_rwsem(&thread->comm_lock);
109 free(thread);
110 }
111
thread__get(struct thread * thread)112 struct thread *thread__get(struct thread *thread)
113 {
114 if (thread)
115 refcount_inc(&thread->refcnt);
116 return thread;
117 }
118
thread__put(struct thread * thread)119 void thread__put(struct thread *thread)
120 {
121 if (thread && refcount_dec_and_test(&thread->refcnt)) {
122 /*
123 * Remove it from the dead_threads list, as last reference
124 * is gone.
125 */
126 list_del_init(&thread->node);
127 thread__delete(thread);
128 }
129 }
130
thread__namespaces(const struct thread * thread)131 struct namespaces *thread__namespaces(const struct thread *thread)
132 {
133 if (list_empty(&thread->namespaces_list))
134 return NULL;
135
136 return list_first_entry(&thread->namespaces_list, struct namespaces, list);
137 }
138
__thread__set_namespaces(struct thread * thread,u64 timestamp,struct namespaces_event * event)139 static int __thread__set_namespaces(struct thread *thread, u64 timestamp,
140 struct namespaces_event *event)
141 {
142 struct namespaces *new, *curr = thread__namespaces(thread);
143
144 new = namespaces__new(event);
145 if (!new)
146 return -ENOMEM;
147
148 list_add(&new->list, &thread->namespaces_list);
149
150 if (timestamp && curr) {
151 /*
152 * setns syscall must have changed few or all the namespaces
153 * of this thread. Update end time for the namespaces
154 * previously used.
155 */
156 curr = list_next_entry(new, list);
157 curr->end_time = timestamp;
158 }
159
160 return 0;
161 }
162
thread__set_namespaces(struct thread * thread,u64 timestamp,struct namespaces_event * event)163 int thread__set_namespaces(struct thread *thread, u64 timestamp,
164 struct namespaces_event *event)
165 {
166 int ret;
167
168 down_write(&thread->namespaces_lock);
169 ret = __thread__set_namespaces(thread, timestamp, event);
170 up_write(&thread->namespaces_lock);
171 return ret;
172 }
173
thread__comm(const struct thread * thread)174 struct comm *thread__comm(const struct thread *thread)
175 {
176 if (list_empty(&thread->comm_list))
177 return NULL;
178
179 return list_first_entry(&thread->comm_list, struct comm, list);
180 }
181
thread__exec_comm(const struct thread * thread)182 struct comm *thread__exec_comm(const struct thread *thread)
183 {
184 struct comm *comm, *last = NULL;
185
186 list_for_each_entry(comm, &thread->comm_list, list) {
187 if (comm->exec)
188 return comm;
189 last = comm;
190 }
191
192 return last;
193 }
194
____thread__set_comm(struct thread * thread,const char * str,u64 timestamp,bool exec)195 static int ____thread__set_comm(struct thread *thread, const char *str,
196 u64 timestamp, bool exec)
197 {
198 struct comm *new, *curr = thread__comm(thread);
199
200 /* Override the default :tid entry */
201 if (!thread->comm_set) {
202 int err = comm__override(curr, str, timestamp, exec);
203 if (err)
204 return err;
205 } else {
206 new = comm__new(str, timestamp, exec);
207 if (!new)
208 return -ENOMEM;
209 list_add(&new->list, &thread->comm_list);
210
211 if (exec)
212 unwind__flush_access(thread);
213 }
214
215 thread->comm_set = true;
216
217 return 0;
218 }
219
__thread__set_comm(struct thread * thread,const char * str,u64 timestamp,bool exec)220 int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp,
221 bool exec)
222 {
223 int ret;
224
225 down_write(&thread->comm_lock);
226 ret = ____thread__set_comm(thread, str, timestamp, exec);
227 up_write(&thread->comm_lock);
228 return ret;
229 }
230
thread__set_comm_from_proc(struct thread * thread)231 int thread__set_comm_from_proc(struct thread *thread)
232 {
233 char path[64];
234 char *comm = NULL;
235 size_t sz;
236 int err = -1;
237
238 if (!(snprintf(path, sizeof(path), "%d/task/%d/comm",
239 thread->pid_, thread->tid) >= (int)sizeof(path)) &&
240 procfs__read_str(path, &comm, &sz) == 0) {
241 comm[sz - 1] = '\0';
242 err = thread__set_comm(thread, comm, 0);
243 }
244
245 return err;
246 }
247
__thread__comm_str(const struct thread * thread)248 static const char *__thread__comm_str(const struct thread *thread)
249 {
250 const struct comm *comm = thread__comm(thread);
251
252 if (!comm)
253 return NULL;
254
255 return comm__str(comm);
256 }
257
thread__comm_str(const struct thread * thread)258 const char *thread__comm_str(const struct thread *thread)
259 {
260 const char *str;
261
262 down_read((struct rw_semaphore *)&thread->comm_lock);
263 str = __thread__comm_str(thread);
264 up_read((struct rw_semaphore *)&thread->comm_lock);
265
266 return str;
267 }
268
269 /* CHECKME: it should probably better return the max comm len from its comm list */
thread__comm_len(struct thread * thread)270 int thread__comm_len(struct thread *thread)
271 {
272 if (!thread->comm_len) {
273 const char *comm = thread__comm_str(thread);
274 if (!comm)
275 return 0;
276 thread->comm_len = strlen(comm);
277 }
278
279 return thread->comm_len;
280 }
281
thread__fprintf(struct thread * thread,FILE * fp)282 size_t thread__fprintf(struct thread *thread, FILE *fp)
283 {
284 return fprintf(fp, "Thread %d %s\n", thread->tid, thread__comm_str(thread)) +
285 map_groups__fprintf(thread->mg, fp);
286 }
287
thread__insert_map(struct thread * thread,struct map * map)288 int thread__insert_map(struct thread *thread, struct map *map)
289 {
290 int ret;
291
292 ret = unwind__prepare_access(thread, map, NULL);
293 if (ret)
294 return ret;
295
296 map_groups__fixup_overlappings(thread->mg, map, stderr);
297 map_groups__insert(thread->mg, map);
298
299 return 0;
300 }
301
__thread__prepare_access(struct thread * thread)302 static int __thread__prepare_access(struct thread *thread)
303 {
304 bool initialized = false;
305 int err = 0;
306 struct maps *maps = &thread->mg->maps;
307 struct map *map;
308
309 down_read(&maps->lock);
310
311 for (map = maps__first(maps); map; map = map__next(map)) {
312 err = unwind__prepare_access(thread, map, &initialized);
313 if (err || initialized)
314 break;
315 }
316
317 up_read(&maps->lock);
318
319 return err;
320 }
321
thread__prepare_access(struct thread * thread)322 static int thread__prepare_access(struct thread *thread)
323 {
324 int err = 0;
325
326 if (symbol_conf.use_callchain)
327 err = __thread__prepare_access(thread);
328
329 return err;
330 }
331
thread__clone_map_groups(struct thread * thread,struct thread * parent)332 static int thread__clone_map_groups(struct thread *thread,
333 struct thread *parent)
334 {
335 /* This is new thread, we share map groups for process. */
336 if (thread->pid_ == parent->pid_)
337 return thread__prepare_access(thread);
338
339 if (thread->mg == parent->mg) {
340 pr_debug("broken map groups on thread %d/%d parent %d/%d\n",
341 thread->pid_, thread->tid, parent->pid_, parent->tid);
342 return 0;
343 }
344
345 /* But this one is new process, copy maps. */
346 if (map_groups__clone(thread, parent->mg) < 0)
347 return -ENOMEM;
348
349 return 0;
350 }
351
thread__fork(struct thread * thread,struct thread * parent,u64 timestamp)352 int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp)
353 {
354 if (parent->comm_set) {
355 const char *comm = thread__comm_str(parent);
356 int err;
357 if (!comm)
358 return -ENOMEM;
359 err = thread__set_comm(thread, comm, timestamp);
360 if (err)
361 return err;
362 }
363
364 thread->ppid = parent->tid;
365 return thread__clone_map_groups(thread, parent);
366 }
367
thread__find_cpumode_addr_location(struct thread * thread,u64 addr,struct addr_location * al)368 void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
369 struct addr_location *al)
370 {
371 size_t i;
372 const u8 cpumodes[] = {
373 PERF_RECORD_MISC_USER,
374 PERF_RECORD_MISC_KERNEL,
375 PERF_RECORD_MISC_GUEST_USER,
376 PERF_RECORD_MISC_GUEST_KERNEL
377 };
378
379 for (i = 0; i < ARRAY_SIZE(cpumodes); i++) {
380 thread__find_symbol(thread, cpumodes[i], addr, al);
381 if (al->map)
382 break;
383 }
384 }
385
thread__main_thread(struct machine * machine,struct thread * thread)386 struct thread *thread__main_thread(struct machine *machine, struct thread *thread)
387 {
388 if (thread->pid_ == thread->tid)
389 return thread__get(thread);
390
391 if (thread->pid_ == -1)
392 return NULL;
393
394 return machine__find_thread(machine, thread->pid_, thread->pid_);
395 }
396