1 /*
2 * latencytop.c: Latency display infrastructure
3 *
4 * (C) Copyright 2008 Intel Corporation
5 * Author: Arjan van de Ven <arjan@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 */
12
13 /*
14 * CONFIG_LATENCYTOP enables a kernel latency tracking infrastructure that is
15 * used by the "latencytop" userspace tool. The latency that is tracked is not
16 * the 'traditional' interrupt latency (which is primarily caused by something
17 * else consuming CPU), but instead, it is the latency an application encounters
18 * because the kernel sleeps on its behalf for various reasons.
19 *
20 * This code tracks 2 levels of statistics:
21 * 1) System level latency
22 * 2) Per process latency
23 *
24 * The latency is stored in fixed sized data structures in an accumulated form;
25 * if the "same" latency cause is hit twice, this will be tracked as one entry
26 * in the data structure. Both the count, total accumulated latency and maximum
27 * latency are tracked in this data structure. When the fixed size structure is
28 * full, no new causes are tracked until the buffer is flushed by writing to
29 * the /proc file; the userspace tool does this on a regular basis.
30 *
31 * A latency cause is identified by a stringified backtrace at the point that
32 * the scheduler gets invoked. The userland tool will use this string to
33 * identify the cause of the latency in human readable form.
34 *
35 * The information is exported via /proc/latency_stats and /proc/<pid>/latency.
36 * These files look like this:
37 *
38 * Latency Top version : v0.1
39 * 70 59433 4897 i915_irq_wait drm_ioctl vfs_ioctl do_vfs_ioctl sys_ioctl
40 * | | | |
41 * | | | +----> the stringified backtrace
42 * | | +---------> The maximum latency for this entry in microseconds
43 * | +--------------> The accumulated latency for this entry (microseconds)
44 * +-------------------> The number of times this entry is hit
45 *
46 * (note: the average latency is the accumulated latency divided by the number
47 * of times)
48 */
49
50 #include <linux/kallsyms.h>
51 #include <linux/seq_file.h>
52 #include <linux/notifier.h>
53 #include <linux/spinlock.h>
54 #include <linux/proc_fs.h>
55 #include <linux/latencytop.h>
56 #include <linux/export.h>
57 #include <linux/sched.h>
58 #include <linux/sched/debug.h>
59 #include <linux/sched/stat.h>
60 #include <linux/list.h>
61 #include <linux/stacktrace.h>
62
63 static DEFINE_RAW_SPINLOCK(latency_lock);
64
65 #define MAXLR 128
66 static struct latency_record latency_record[MAXLR];
67
68 int latencytop_enabled;
69
clear_all_latency_tracing(struct task_struct * p)70 void clear_all_latency_tracing(struct task_struct *p)
71 {
72 unsigned long flags;
73
74 if (!latencytop_enabled)
75 return;
76
77 raw_spin_lock_irqsave(&latency_lock, flags);
78 memset(&p->latency_record, 0, sizeof(p->latency_record));
79 p->latency_record_count = 0;
80 raw_spin_unlock_irqrestore(&latency_lock, flags);
81 }
82
clear_global_latency_tracing(void)83 static void clear_global_latency_tracing(void)
84 {
85 unsigned long flags;
86
87 raw_spin_lock_irqsave(&latency_lock, flags);
88 memset(&latency_record, 0, sizeof(latency_record));
89 raw_spin_unlock_irqrestore(&latency_lock, flags);
90 }
91
92 static void __sched
account_global_scheduler_latency(struct task_struct * tsk,struct latency_record * lat)93 account_global_scheduler_latency(struct task_struct *tsk,
94 struct latency_record *lat)
95 {
96 int firstnonnull = MAXLR + 1;
97 int i;
98
99 if (!latencytop_enabled)
100 return;
101
102 /* skip kernel threads for now */
103 if (!tsk->mm)
104 return;
105
106 for (i = 0; i < MAXLR; i++) {
107 int q, same = 1;
108
109 /* Nothing stored: */
110 if (!latency_record[i].backtrace[0]) {
111 if (firstnonnull > i)
112 firstnonnull = i;
113 continue;
114 }
115 for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
116 unsigned long record = lat->backtrace[q];
117
118 if (latency_record[i].backtrace[q] != record) {
119 same = 0;
120 break;
121 }
122
123 /* 0 and ULONG_MAX entries mean end of backtrace: */
124 if (record == 0 || record == ULONG_MAX)
125 break;
126 }
127 if (same) {
128 latency_record[i].count++;
129 latency_record[i].time += lat->time;
130 if (lat->time > latency_record[i].max)
131 latency_record[i].max = lat->time;
132 return;
133 }
134 }
135
136 i = firstnonnull;
137 if (i >= MAXLR - 1)
138 return;
139
140 /* Allocted a new one: */
141 memcpy(&latency_record[i], lat, sizeof(struct latency_record));
142 }
143
144 /*
145 * Iterator to store a backtrace into a latency record entry
146 */
store_stacktrace(struct task_struct * tsk,struct latency_record * lat)147 static inline void store_stacktrace(struct task_struct *tsk,
148 struct latency_record *lat)
149 {
150 struct stack_trace trace;
151
152 memset(&trace, 0, sizeof(trace));
153 trace.max_entries = LT_BACKTRACEDEPTH;
154 trace.entries = &lat->backtrace[0];
155 save_stack_trace_tsk(tsk, &trace);
156 }
157
158 /**
159 * __account_scheduler_latency - record an occurred latency
160 * @tsk - the task struct of the task hitting the latency
161 * @usecs - the duration of the latency in microseconds
162 * @inter - 1 if the sleep was interruptible, 0 if uninterruptible
163 *
164 * This function is the main entry point for recording latency entries
165 * as called by the scheduler.
166 *
167 * This function has a few special cases to deal with normal 'non-latency'
168 * sleeps: specifically, interruptible sleep longer than 5 msec is skipped
169 * since this usually is caused by waiting for events via select() and co.
170 *
171 * Negative latencies (caused by time going backwards) are also explicitly
172 * skipped.
173 */
174 void __sched
__account_scheduler_latency(struct task_struct * tsk,int usecs,int inter)175 __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
176 {
177 unsigned long flags;
178 int i, q;
179 struct latency_record lat;
180
181 /* Long interruptible waits are generally user requested... */
182 if (inter && usecs > 5000)
183 return;
184
185 /* Negative sleeps are time going backwards */
186 /* Zero-time sleeps are non-interesting */
187 if (usecs <= 0)
188 return;
189
190 memset(&lat, 0, sizeof(lat));
191 lat.count = 1;
192 lat.time = usecs;
193 lat.max = usecs;
194 store_stacktrace(tsk, &lat);
195
196 raw_spin_lock_irqsave(&latency_lock, flags);
197
198 account_global_scheduler_latency(tsk, &lat);
199
200 for (i = 0; i < tsk->latency_record_count; i++) {
201 struct latency_record *mylat;
202 int same = 1;
203
204 mylat = &tsk->latency_record[i];
205 for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
206 unsigned long record = lat.backtrace[q];
207
208 if (mylat->backtrace[q] != record) {
209 same = 0;
210 break;
211 }
212
213 /* 0 and ULONG_MAX entries mean end of backtrace: */
214 if (record == 0 || record == ULONG_MAX)
215 break;
216 }
217 if (same) {
218 mylat->count++;
219 mylat->time += lat.time;
220 if (lat.time > mylat->max)
221 mylat->max = lat.time;
222 goto out_unlock;
223 }
224 }
225
226 /*
227 * short term hack; if we're > 32 we stop; future we recycle:
228 */
229 if (tsk->latency_record_count >= LT_SAVECOUNT)
230 goto out_unlock;
231
232 /* Allocated a new one: */
233 i = tsk->latency_record_count++;
234 memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record));
235
236 out_unlock:
237 raw_spin_unlock_irqrestore(&latency_lock, flags);
238 }
239
lstats_show(struct seq_file * m,void * v)240 static int lstats_show(struct seq_file *m, void *v)
241 {
242 int i;
243
244 seq_puts(m, "Latency Top version : v0.1\n");
245
246 for (i = 0; i < MAXLR; i++) {
247 struct latency_record *lr = &latency_record[i];
248
249 if (lr->backtrace[0]) {
250 int q;
251 seq_printf(m, "%i %lu %lu",
252 lr->count, lr->time, lr->max);
253 for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
254 unsigned long bt = lr->backtrace[q];
255 if (!bt)
256 break;
257 if (bt == ULONG_MAX)
258 break;
259 seq_printf(m, " %ps", (void *)bt);
260 }
261 seq_puts(m, "\n");
262 }
263 }
264 return 0;
265 }
266
267 static ssize_t
lstats_write(struct file * file,const char __user * buf,size_t count,loff_t * offs)268 lstats_write(struct file *file, const char __user *buf, size_t count,
269 loff_t *offs)
270 {
271 clear_global_latency_tracing();
272
273 return count;
274 }
275
lstats_open(struct inode * inode,struct file * filp)276 static int lstats_open(struct inode *inode, struct file *filp)
277 {
278 return single_open(filp, lstats_show, NULL);
279 }
280
281 static const struct file_operations lstats_fops = {
282 .open = lstats_open,
283 .read = seq_read,
284 .write = lstats_write,
285 .llseek = seq_lseek,
286 .release = single_release,
287 };
288
init_lstats_procfs(void)289 static int __init init_lstats_procfs(void)
290 {
291 proc_create("latency_stats", 0644, NULL, &lstats_fops);
292 return 0;
293 }
294
sysctl_latencytop(struct ctl_table * table,int write,void __user * buffer,size_t * lenp,loff_t * ppos)295 int sysctl_latencytop(struct ctl_table *table, int write,
296 void __user *buffer, size_t *lenp, loff_t *ppos)
297 {
298 int err;
299
300 err = proc_dointvec(table, write, buffer, lenp, ppos);
301 if (latencytop_enabled)
302 force_schedstat_enabled();
303
304 return err;
305 }
306 device_initcall(init_lstats_procfs);
307