1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) "kcov: " fmt
3
4 #define DISABLE_BRANCH_PROFILING
5 #include <linux/atomic.h>
6 #include <linux/compiler.h>
7 #include <linux/errno.h>
8 #include <linux/export.h>
9 #include <linux/types.h>
10 #include <linux/file.h>
11 #include <linux/fs.h>
12 #include <linux/init.h>
13 #include <linux/mm.h>
14 #include <linux/preempt.h>
15 #include <linux/printk.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/vmalloc.h>
20 #include <linux/debugfs.h>
21 #include <linux/uaccess.h>
22 #include <linux/kcov.h>
23 #include <asm/setup.h>
24
25 /* Number of 64-bit words written per one comparison: */
26 #define KCOV_WORDS_PER_CMP 4
27
28 /*
29 * kcov descriptor (one per opened debugfs file).
30 * State transitions of the descriptor:
31 * - initial state after open()
32 * - then there must be a single ioctl(KCOV_INIT_TRACE) call
33 * - then, mmap() call (several calls are allowed but not useful)
34 * - then, ioctl(KCOV_ENABLE, arg), where arg is
35 * KCOV_TRACE_PC - to trace only the PCs
36 * or
37 * KCOV_TRACE_CMP - to trace only the comparison operands
38 * - then, ioctl(KCOV_DISABLE) to disable the task.
39 * Enabling/disabling ioctls can be repeated (only one task a time allowed).
40 */
41 struct kcov {
42 /*
43 * Reference counter. We keep one for:
44 * - opened file descriptor
45 * - task with enabled coverage (we can't unwire it from another task)
46 */
47 atomic_t refcount;
48 /* The lock protects mode, size, area and t. */
49 spinlock_t lock;
50 enum kcov_mode mode;
51 /* Size of arena (in long's for KCOV_MODE_TRACE). */
52 unsigned size;
53 /* Coverage buffer shared with user space. */
54 void *area;
55 /* Task for which we collect coverage, or NULL. */
56 struct task_struct *t;
57 };
58
check_kcov_mode(enum kcov_mode needed_mode,struct task_struct * t)59 static bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
60 {
61 unsigned int mode;
62
63 /*
64 * We are interested in code coverage as a function of a syscall inputs,
65 * so we ignore code executed in interrupts.
66 */
67 if (!in_task())
68 return false;
69 mode = READ_ONCE(t->kcov_mode);
70 /*
71 * There is some code that runs in interrupts but for which
72 * in_interrupt() returns false (e.g. preempt_schedule_irq()).
73 * READ_ONCE()/barrier() effectively provides load-acquire wrt
74 * interrupts, there are paired barrier()/WRITE_ONCE() in
75 * kcov_ioctl_locked().
76 */
77 barrier();
78 return mode == needed_mode;
79 }
80
canonicalize_ip(unsigned long ip)81 static unsigned long canonicalize_ip(unsigned long ip)
82 {
83 #ifdef CONFIG_RANDOMIZE_BASE
84 ip -= kaslr_offset();
85 #endif
86 return ip;
87 }
88
89 /*
90 * Entry point from instrumented code.
91 * This is called once per basic-block/edge.
92 */
__sanitizer_cov_trace_pc(void)93 void notrace __sanitizer_cov_trace_pc(void)
94 {
95 struct task_struct *t;
96 unsigned long *area;
97 unsigned long ip = canonicalize_ip(_RET_IP_);
98 unsigned long pos;
99
100 t = current;
101 if (!check_kcov_mode(KCOV_MODE_TRACE_PC, t))
102 return;
103
104 area = t->kcov_area;
105 /* The first 64-bit word is the number of subsequent PCs. */
106 pos = READ_ONCE(area[0]) + 1;
107 if (likely(pos < t->kcov_size)) {
108 area[pos] = ip;
109 WRITE_ONCE(area[0], pos);
110 }
111 }
112 EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
113
114 #ifdef CONFIG_KCOV_ENABLE_COMPARISONS
write_comp_data(u64 type,u64 arg1,u64 arg2,u64 ip)115 static void write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
116 {
117 struct task_struct *t;
118 u64 *area;
119 u64 count, start_index, end_pos, max_pos;
120
121 t = current;
122 if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t))
123 return;
124
125 ip = canonicalize_ip(ip);
126
127 /*
128 * We write all comparison arguments and types as u64.
129 * The buffer was allocated for t->kcov_size unsigned longs.
130 */
131 area = (u64 *)t->kcov_area;
132 max_pos = t->kcov_size * sizeof(unsigned long);
133
134 count = READ_ONCE(area[0]);
135
136 /* Every record is KCOV_WORDS_PER_CMP 64-bit words. */
137 start_index = 1 + count * KCOV_WORDS_PER_CMP;
138 end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64);
139 if (likely(end_pos <= max_pos)) {
140 area[start_index] = type;
141 area[start_index + 1] = arg1;
142 area[start_index + 2] = arg2;
143 area[start_index + 3] = ip;
144 WRITE_ONCE(area[0], count + 1);
145 }
146 }
147
__sanitizer_cov_trace_cmp1(u8 arg1,u8 arg2)148 void notrace __sanitizer_cov_trace_cmp1(u8 arg1, u8 arg2)
149 {
150 write_comp_data(KCOV_CMP_SIZE(0), arg1, arg2, _RET_IP_);
151 }
152 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp1);
153
__sanitizer_cov_trace_cmp2(u16 arg1,u16 arg2)154 void notrace __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2)
155 {
156 write_comp_data(KCOV_CMP_SIZE(1), arg1, arg2, _RET_IP_);
157 }
158 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp2);
159
__sanitizer_cov_trace_cmp4(u32 arg1,u32 arg2)160 void notrace __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2)
161 {
162 write_comp_data(KCOV_CMP_SIZE(2), arg1, arg2, _RET_IP_);
163 }
164 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp4);
165
__sanitizer_cov_trace_cmp8(u64 arg1,u64 arg2)166 void notrace __sanitizer_cov_trace_cmp8(u64 arg1, u64 arg2)
167 {
168 write_comp_data(KCOV_CMP_SIZE(3), arg1, arg2, _RET_IP_);
169 }
170 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp8);
171
__sanitizer_cov_trace_const_cmp1(u8 arg1,u8 arg2)172 void notrace __sanitizer_cov_trace_const_cmp1(u8 arg1, u8 arg2)
173 {
174 write_comp_data(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2,
175 _RET_IP_);
176 }
177 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp1);
178
__sanitizer_cov_trace_const_cmp2(u16 arg1,u16 arg2)179 void notrace __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2)
180 {
181 write_comp_data(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2,
182 _RET_IP_);
183 }
184 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp2);
185
__sanitizer_cov_trace_const_cmp4(u32 arg1,u32 arg2)186 void notrace __sanitizer_cov_trace_const_cmp4(u32 arg1, u32 arg2)
187 {
188 write_comp_data(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2,
189 _RET_IP_);
190 }
191 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp4);
192
__sanitizer_cov_trace_const_cmp8(u64 arg1,u64 arg2)193 void notrace __sanitizer_cov_trace_const_cmp8(u64 arg1, u64 arg2)
194 {
195 write_comp_data(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2,
196 _RET_IP_);
197 }
198 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp8);
199
__sanitizer_cov_trace_switch(u64 val,u64 * cases)200 void notrace __sanitizer_cov_trace_switch(u64 val, u64 *cases)
201 {
202 u64 i;
203 u64 count = cases[0];
204 u64 size = cases[1];
205 u64 type = KCOV_CMP_CONST;
206
207 switch (size) {
208 case 8:
209 type |= KCOV_CMP_SIZE(0);
210 break;
211 case 16:
212 type |= KCOV_CMP_SIZE(1);
213 break;
214 case 32:
215 type |= KCOV_CMP_SIZE(2);
216 break;
217 case 64:
218 type |= KCOV_CMP_SIZE(3);
219 break;
220 default:
221 return;
222 }
223 for (i = 0; i < count; i++)
224 write_comp_data(type, cases[i + 2], val, _RET_IP_);
225 }
226 EXPORT_SYMBOL(__sanitizer_cov_trace_switch);
227 #endif /* ifdef CONFIG_KCOV_ENABLE_COMPARISONS */
228
kcov_get(struct kcov * kcov)229 static void kcov_get(struct kcov *kcov)
230 {
231 atomic_inc(&kcov->refcount);
232 }
233
kcov_put(struct kcov * kcov)234 static void kcov_put(struct kcov *kcov)
235 {
236 if (atomic_dec_and_test(&kcov->refcount)) {
237 vfree(kcov->area);
238 kfree(kcov);
239 }
240 }
241
kcov_task_init(struct task_struct * t)242 void kcov_task_init(struct task_struct *t)
243 {
244 WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED);
245 barrier();
246 t->kcov_size = 0;
247 t->kcov_area = NULL;
248 t->kcov = NULL;
249 }
250
kcov_task_exit(struct task_struct * t)251 void kcov_task_exit(struct task_struct *t)
252 {
253 struct kcov *kcov;
254
255 kcov = t->kcov;
256 if (kcov == NULL)
257 return;
258 spin_lock(&kcov->lock);
259 if (WARN_ON(kcov->t != t)) {
260 spin_unlock(&kcov->lock);
261 return;
262 }
263 /* Just to not leave dangling references behind. */
264 kcov_task_init(t);
265 kcov->t = NULL;
266 kcov->mode = KCOV_MODE_INIT;
267 spin_unlock(&kcov->lock);
268 kcov_put(kcov);
269 }
270
kcov_mmap(struct file * filep,struct vm_area_struct * vma)271 static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
272 {
273 int res = 0;
274 void *area;
275 struct kcov *kcov = vma->vm_file->private_data;
276 unsigned long size, off;
277 struct page *page;
278
279 area = vmalloc_user(vma->vm_end - vma->vm_start);
280 if (!area)
281 return -ENOMEM;
282
283 spin_lock(&kcov->lock);
284 size = kcov->size * sizeof(unsigned long);
285 if (kcov->mode != KCOV_MODE_INIT || vma->vm_pgoff != 0 ||
286 vma->vm_end - vma->vm_start != size) {
287 res = -EINVAL;
288 goto exit;
289 }
290 if (!kcov->area) {
291 kcov->area = area;
292 vma->vm_flags |= VM_DONTEXPAND;
293 spin_unlock(&kcov->lock);
294 for (off = 0; off < size; off += PAGE_SIZE) {
295 page = vmalloc_to_page(kcov->area + off);
296 if (vm_insert_page(vma, vma->vm_start + off, page))
297 WARN_ONCE(1, "vm_insert_page() failed");
298 }
299 return 0;
300 }
301 exit:
302 spin_unlock(&kcov->lock);
303 vfree(area);
304 return res;
305 }
306
kcov_open(struct inode * inode,struct file * filep)307 static int kcov_open(struct inode *inode, struct file *filep)
308 {
309 struct kcov *kcov;
310
311 kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
312 if (!kcov)
313 return -ENOMEM;
314 kcov->mode = KCOV_MODE_DISABLED;
315 atomic_set(&kcov->refcount, 1);
316 spin_lock_init(&kcov->lock);
317 filep->private_data = kcov;
318 return nonseekable_open(inode, filep);
319 }
320
kcov_close(struct inode * inode,struct file * filep)321 static int kcov_close(struct inode *inode, struct file *filep)
322 {
323 kcov_put(filep->private_data);
324 return 0;
325 }
326
327 /*
328 * Fault in a lazily-faulted vmalloc area before it can be used by
329 * __santizer_cov_trace_pc(), to avoid recursion issues if any code on the
330 * vmalloc fault handling path is instrumented.
331 */
kcov_fault_in_area(struct kcov * kcov)332 static void kcov_fault_in_area(struct kcov *kcov)
333 {
334 unsigned long stride = PAGE_SIZE / sizeof(unsigned long);
335 unsigned long *area = kcov->area;
336 unsigned long offset;
337
338 for (offset = 0; offset < kcov->size; offset += stride)
339 READ_ONCE(area[offset]);
340 }
341
kcov_ioctl_locked(struct kcov * kcov,unsigned int cmd,unsigned long arg)342 static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
343 unsigned long arg)
344 {
345 struct task_struct *t;
346 unsigned long size, unused;
347
348 switch (cmd) {
349 case KCOV_INIT_TRACE:
350 /*
351 * Enable kcov in trace mode and setup buffer size.
352 * Must happen before anything else.
353 */
354 if (kcov->mode != KCOV_MODE_DISABLED)
355 return -EBUSY;
356 /*
357 * Size must be at least 2 to hold current position and one PC.
358 * Later we allocate size * sizeof(unsigned long) memory,
359 * that must not overflow.
360 */
361 size = arg;
362 if (size < 2 || size > INT_MAX / sizeof(unsigned long))
363 return -EINVAL;
364 kcov->size = size;
365 kcov->mode = KCOV_MODE_INIT;
366 return 0;
367 case KCOV_ENABLE:
368 /*
369 * Enable coverage for the current task.
370 * At this point user must have been enabled trace mode,
371 * and mmapped the file. Coverage collection is disabled only
372 * at task exit or voluntary by KCOV_DISABLE. After that it can
373 * be enabled for another task.
374 */
375 if (kcov->mode != KCOV_MODE_INIT || !kcov->area)
376 return -EINVAL;
377 t = current;
378 if (kcov->t != NULL || t->kcov != NULL)
379 return -EBUSY;
380 if (arg == KCOV_TRACE_PC)
381 kcov->mode = KCOV_MODE_TRACE_PC;
382 else if (arg == KCOV_TRACE_CMP)
383 #ifdef CONFIG_KCOV_ENABLE_COMPARISONS
384 kcov->mode = KCOV_MODE_TRACE_CMP;
385 #else
386 return -ENOTSUPP;
387 #endif
388 else
389 return -EINVAL;
390 kcov_fault_in_area(kcov);
391 /* Cache in task struct for performance. */
392 t->kcov_size = kcov->size;
393 t->kcov_area = kcov->area;
394 /* See comment in check_kcov_mode(). */
395 barrier();
396 WRITE_ONCE(t->kcov_mode, kcov->mode);
397 t->kcov = kcov;
398 kcov->t = t;
399 /* This is put either in kcov_task_exit() or in KCOV_DISABLE. */
400 kcov_get(kcov);
401 return 0;
402 case KCOV_DISABLE:
403 /* Disable coverage for the current task. */
404 unused = arg;
405 if (unused != 0 || current->kcov != kcov)
406 return -EINVAL;
407 t = current;
408 if (WARN_ON(kcov->t != t))
409 return -EINVAL;
410 kcov_task_init(t);
411 kcov->t = NULL;
412 kcov->mode = KCOV_MODE_INIT;
413 kcov_put(kcov);
414 return 0;
415 default:
416 return -ENOTTY;
417 }
418 }
419
kcov_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)420 static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
421 {
422 struct kcov *kcov;
423 int res;
424
425 kcov = filep->private_data;
426 spin_lock(&kcov->lock);
427 res = kcov_ioctl_locked(kcov, cmd, arg);
428 spin_unlock(&kcov->lock);
429 return res;
430 }
431
432 static const struct file_operations kcov_fops = {
433 .open = kcov_open,
434 .unlocked_ioctl = kcov_ioctl,
435 .compat_ioctl = kcov_ioctl,
436 .mmap = kcov_mmap,
437 .release = kcov_close,
438 };
439
kcov_init(void)440 static int __init kcov_init(void)
441 {
442 /*
443 * The kcov debugfs file won't ever get removed and thus,
444 * there is no need to protect it against removal races. The
445 * use of debugfs_create_file_unsafe() is actually safe here.
446 */
447 if (!debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops)) {
448 pr_err("failed to create kcov in debugfs\n");
449 return -ENOMEM;
450 }
451 return 0;
452 }
453
454 device_initcall(kcov_init);
455