1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Test module to generate lockups
4 */
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/delay.h>
10 #include <linux/sched.h>
11 #include <linux/sched/signal.h>
12 #include <linux/sched/clock.h>
13 #include <linux/cpu.h>
14 #include <linux/nmi.h>
15 #include <linux/mm.h>
16 #include <linux/uaccess.h>
17 #include <linux/file.h>
18
19 static unsigned int time_secs;
20 module_param(time_secs, uint, 0600);
21 MODULE_PARM_DESC(time_secs, "lockup time in seconds, default 0");
22
23 static unsigned int time_nsecs;
24 module_param(time_nsecs, uint, 0600);
25 MODULE_PARM_DESC(time_nsecs, "nanoseconds part of lockup time, default 0");
26
27 static unsigned int cooldown_secs;
28 module_param(cooldown_secs, uint, 0600);
29 MODULE_PARM_DESC(cooldown_secs, "cooldown time between iterations in seconds, default 0");
30
31 static unsigned int cooldown_nsecs;
32 module_param(cooldown_nsecs, uint, 0600);
33 MODULE_PARM_DESC(cooldown_nsecs, "nanoseconds part of cooldown, default 0");
34
35 static unsigned int iterations = 1;
36 module_param(iterations, uint, 0600);
37 MODULE_PARM_DESC(iterations, "lockup iterations, default 1");
38
39 static bool all_cpus;
40 module_param(all_cpus, bool, 0400);
41 MODULE_PARM_DESC(all_cpus, "trigger lockup at all cpus at once");
42
43 static int wait_state;
44 static char *state = "R";
45 module_param(state, charp, 0400);
46 MODULE_PARM_DESC(state, "wait in 'R' running (default), 'D' uninterruptible, 'K' killable, 'S' interruptible state");
47
48 static bool use_hrtimer;
49 module_param(use_hrtimer, bool, 0400);
50 MODULE_PARM_DESC(use_hrtimer, "use high-resolution timer for sleeping");
51
52 static bool iowait;
53 module_param(iowait, bool, 0400);
54 MODULE_PARM_DESC(iowait, "account sleep time as iowait");
55
56 static bool lock_read;
57 module_param(lock_read, bool, 0400);
58 MODULE_PARM_DESC(lock_read, "lock read-write locks for read");
59
60 static bool lock_single;
61 module_param(lock_single, bool, 0400);
62 MODULE_PARM_DESC(lock_single, "acquire locks only at one cpu");
63
64 static bool reacquire_locks;
65 module_param(reacquire_locks, bool, 0400);
66 MODULE_PARM_DESC(reacquire_locks, "release and reacquire locks/irq/preempt between iterations");
67
68 static bool touch_softlockup;
69 module_param(touch_softlockup, bool, 0600);
70 MODULE_PARM_DESC(touch_softlockup, "touch soft-lockup watchdog between iterations");
71
72 static bool touch_hardlockup;
73 module_param(touch_hardlockup, bool, 0600);
74 MODULE_PARM_DESC(touch_hardlockup, "touch hard-lockup watchdog between iterations");
75
76 static bool call_cond_resched;
77 module_param(call_cond_resched, bool, 0600);
78 MODULE_PARM_DESC(call_cond_resched, "call cond_resched() between iterations");
79
80 static bool measure_lock_wait;
81 module_param(measure_lock_wait, bool, 0400);
82 MODULE_PARM_DESC(measure_lock_wait, "measure lock wait time");
83
84 static unsigned long lock_wait_threshold = ULONG_MAX;
85 module_param(lock_wait_threshold, ulong, 0400);
86 MODULE_PARM_DESC(lock_wait_threshold, "print lock wait time longer than this in nanoseconds, default off");
87
88 static bool test_disable_irq;
89 module_param_named(disable_irq, test_disable_irq, bool, 0400);
90 MODULE_PARM_DESC(disable_irq, "disable interrupts: generate hard-lockups");
91
92 static bool disable_softirq;
93 module_param(disable_softirq, bool, 0400);
94 MODULE_PARM_DESC(disable_softirq, "disable bottom-half irq handlers");
95
96 static bool disable_preempt;
97 module_param(disable_preempt, bool, 0400);
98 MODULE_PARM_DESC(disable_preempt, "disable preemption: generate soft-lockups");
99
100 static bool lock_rcu;
101 module_param(lock_rcu, bool, 0400);
102 MODULE_PARM_DESC(lock_rcu, "grab rcu_read_lock: generate rcu stalls");
103
104 static bool lock_mmap_sem;
105 module_param(lock_mmap_sem, bool, 0400);
106 MODULE_PARM_DESC(lock_mmap_sem, "lock mm->mmap_lock: block procfs interfaces");
107
108 static unsigned long lock_rwsem_ptr;
109 module_param_unsafe(lock_rwsem_ptr, ulong, 0400);
110 MODULE_PARM_DESC(lock_rwsem_ptr, "lock rw_semaphore at address");
111
112 static unsigned long lock_mutex_ptr;
113 module_param_unsafe(lock_mutex_ptr, ulong, 0400);
114 MODULE_PARM_DESC(lock_mutex_ptr, "lock mutex at address");
115
116 static unsigned long lock_spinlock_ptr;
117 module_param_unsafe(lock_spinlock_ptr, ulong, 0400);
118 MODULE_PARM_DESC(lock_spinlock_ptr, "lock spinlock at address");
119
120 static unsigned long lock_rwlock_ptr;
121 module_param_unsafe(lock_rwlock_ptr, ulong, 0400);
122 MODULE_PARM_DESC(lock_rwlock_ptr, "lock rwlock at address");
123
124 static unsigned int alloc_pages_nr;
125 module_param_unsafe(alloc_pages_nr, uint, 0600);
126 MODULE_PARM_DESC(alloc_pages_nr, "allocate and free pages under locks");
127
128 static unsigned int alloc_pages_order;
129 module_param(alloc_pages_order, uint, 0400);
130 MODULE_PARM_DESC(alloc_pages_order, "page order to allocate");
131
132 static gfp_t alloc_pages_gfp = GFP_KERNEL;
133 module_param_unsafe(alloc_pages_gfp, uint, 0400);
134 MODULE_PARM_DESC(alloc_pages_gfp, "allocate pages with this gfp_mask, default GFP_KERNEL");
135
136 static bool alloc_pages_atomic;
137 module_param(alloc_pages_atomic, bool, 0400);
138 MODULE_PARM_DESC(alloc_pages_atomic, "allocate pages with GFP_ATOMIC");
139
140 static bool reallocate_pages;
141 module_param(reallocate_pages, bool, 0400);
142 MODULE_PARM_DESC(reallocate_pages, "free and allocate pages between iterations");
143
144 struct file *test_file;
145 static struct inode *test_inode;
146 static char test_file_path[256];
147 module_param_string(file_path, test_file_path, sizeof(test_file_path), 0400);
148 MODULE_PARM_DESC(file_path, "file path to test");
149
150 static bool test_lock_inode;
151 module_param_named(lock_inode, test_lock_inode, bool, 0400);
152 MODULE_PARM_DESC(lock_inode, "lock file -> inode -> i_rwsem");
153
154 static bool test_lock_mapping;
155 module_param_named(lock_mapping, test_lock_mapping, bool, 0400);
156 MODULE_PARM_DESC(lock_mapping, "lock file -> mapping -> i_mmap_rwsem");
157
158 static bool test_lock_sb_umount;
159 module_param_named(lock_sb_umount, test_lock_sb_umount, bool, 0400);
160 MODULE_PARM_DESC(lock_sb_umount, "lock file -> sb -> s_umount");
161
162 static atomic_t alloc_pages_failed = ATOMIC_INIT(0);
163
164 static atomic64_t max_lock_wait = ATOMIC64_INIT(0);
165
166 static struct task_struct *main_task;
167 static int master_cpu;
168
test_lock(bool master,bool verbose)169 static void test_lock(bool master, bool verbose)
170 {
171 u64 wait_start;
172
173 if (measure_lock_wait)
174 wait_start = local_clock();
175
176 if (lock_mutex_ptr && master) {
177 if (verbose)
178 pr_notice("lock mutex %ps\n", (void *)lock_mutex_ptr);
179 mutex_lock((struct mutex *)lock_mutex_ptr);
180 }
181
182 if (lock_rwsem_ptr && master) {
183 if (verbose)
184 pr_notice("lock rw_semaphore %ps\n",
185 (void *)lock_rwsem_ptr);
186 if (lock_read)
187 down_read((struct rw_semaphore *)lock_rwsem_ptr);
188 else
189 down_write((struct rw_semaphore *)lock_rwsem_ptr);
190 }
191
192 if (lock_mmap_sem && master) {
193 if (verbose)
194 pr_notice("lock mmap_lock pid=%d\n", main_task->pid);
195 if (lock_read)
196 mmap_read_lock(main_task->mm);
197 else
198 mmap_write_lock(main_task->mm);
199 }
200
201 if (test_disable_irq)
202 local_irq_disable();
203
204 if (disable_softirq)
205 local_bh_disable();
206
207 if (disable_preempt)
208 preempt_disable();
209
210 if (lock_rcu)
211 rcu_read_lock();
212
213 if (lock_spinlock_ptr && master) {
214 if (verbose)
215 pr_notice("lock spinlock %ps\n",
216 (void *)lock_spinlock_ptr);
217 spin_lock((spinlock_t *)lock_spinlock_ptr);
218 }
219
220 if (lock_rwlock_ptr && master) {
221 if (verbose)
222 pr_notice("lock rwlock %ps\n",
223 (void *)lock_rwlock_ptr);
224 if (lock_read)
225 read_lock((rwlock_t *)lock_rwlock_ptr);
226 else
227 write_lock((rwlock_t *)lock_rwlock_ptr);
228 }
229
230 if (measure_lock_wait) {
231 s64 cur_wait = local_clock() - wait_start;
232 s64 max_wait = atomic64_read(&max_lock_wait);
233
234 do {
235 if (cur_wait < max_wait)
236 break;
237 max_wait = atomic64_cmpxchg(&max_lock_wait,
238 max_wait, cur_wait);
239 } while (max_wait != cur_wait);
240
241 if (cur_wait > lock_wait_threshold)
242 pr_notice_ratelimited("lock wait %lld ns\n", cur_wait);
243 }
244 }
245
test_unlock(bool master,bool verbose)246 static void test_unlock(bool master, bool verbose)
247 {
248 if (lock_rwlock_ptr && master) {
249 if (lock_read)
250 read_unlock((rwlock_t *)lock_rwlock_ptr);
251 else
252 write_unlock((rwlock_t *)lock_rwlock_ptr);
253 if (verbose)
254 pr_notice("unlock rwlock %ps\n",
255 (void *)lock_rwlock_ptr);
256 }
257
258 if (lock_spinlock_ptr && master) {
259 spin_unlock((spinlock_t *)lock_spinlock_ptr);
260 if (verbose)
261 pr_notice("unlock spinlock %ps\n",
262 (void *)lock_spinlock_ptr);
263 }
264
265 if (lock_rcu)
266 rcu_read_unlock();
267
268 if (disable_preempt)
269 preempt_enable();
270
271 if (disable_softirq)
272 local_bh_enable();
273
274 if (test_disable_irq)
275 local_irq_enable();
276
277 if (lock_mmap_sem && master) {
278 if (lock_read)
279 mmap_read_unlock(main_task->mm);
280 else
281 mmap_write_unlock(main_task->mm);
282 if (verbose)
283 pr_notice("unlock mmap_lock pid=%d\n", main_task->pid);
284 }
285
286 if (lock_rwsem_ptr && master) {
287 if (lock_read)
288 up_read((struct rw_semaphore *)lock_rwsem_ptr);
289 else
290 up_write((struct rw_semaphore *)lock_rwsem_ptr);
291 if (verbose)
292 pr_notice("unlock rw_semaphore %ps\n",
293 (void *)lock_rwsem_ptr);
294 }
295
296 if (lock_mutex_ptr && master) {
297 mutex_unlock((struct mutex *)lock_mutex_ptr);
298 if (verbose)
299 pr_notice("unlock mutex %ps\n",
300 (void *)lock_mutex_ptr);
301 }
302 }
303
test_alloc_pages(struct list_head * pages)304 static void test_alloc_pages(struct list_head *pages)
305 {
306 struct page *page;
307 unsigned int i;
308
309 for (i = 0; i < alloc_pages_nr; i++) {
310 page = alloc_pages(alloc_pages_gfp, alloc_pages_order);
311 if (!page) {
312 atomic_inc(&alloc_pages_failed);
313 break;
314 }
315 list_add(&page->lru, pages);
316 }
317 }
318
test_free_pages(struct list_head * pages)319 static void test_free_pages(struct list_head *pages)
320 {
321 struct page *page, *next;
322
323 list_for_each_entry_safe(page, next, pages, lru)
324 __free_pages(page, alloc_pages_order);
325 INIT_LIST_HEAD(pages);
326 }
327
test_wait(unsigned int secs,unsigned int nsecs)328 static void test_wait(unsigned int secs, unsigned int nsecs)
329 {
330 if (wait_state == TASK_RUNNING) {
331 if (secs)
332 mdelay(secs * MSEC_PER_SEC);
333 if (nsecs)
334 ndelay(nsecs);
335 return;
336 }
337
338 __set_current_state(wait_state);
339 if (use_hrtimer) {
340 ktime_t time;
341
342 time = ns_to_ktime((u64)secs * NSEC_PER_SEC + nsecs);
343 schedule_hrtimeout(&time, HRTIMER_MODE_REL);
344 } else {
345 schedule_timeout(secs * HZ + nsecs_to_jiffies(nsecs));
346 }
347 }
348
test_lockup(bool master)349 static void test_lockup(bool master)
350 {
351 u64 lockup_start = local_clock();
352 unsigned int iter = 0;
353 LIST_HEAD(pages);
354
355 pr_notice("Start on CPU%d\n", raw_smp_processor_id());
356
357 test_lock(master, true);
358
359 test_alloc_pages(&pages);
360
361 while (iter++ < iterations && !signal_pending(main_task)) {
362
363 if (iowait)
364 current->in_iowait = 1;
365
366 test_wait(time_secs, time_nsecs);
367
368 if (iowait)
369 current->in_iowait = 0;
370
371 if (reallocate_pages)
372 test_free_pages(&pages);
373
374 if (reacquire_locks)
375 test_unlock(master, false);
376
377 if (touch_softlockup)
378 touch_softlockup_watchdog();
379
380 if (touch_hardlockup)
381 touch_nmi_watchdog();
382
383 if (call_cond_resched)
384 cond_resched();
385
386 test_wait(cooldown_secs, cooldown_nsecs);
387
388 if (reacquire_locks)
389 test_lock(master, false);
390
391 if (reallocate_pages)
392 test_alloc_pages(&pages);
393 }
394
395 pr_notice("Finish on CPU%d in %lld ns\n", raw_smp_processor_id(),
396 local_clock() - lockup_start);
397
398 test_free_pages(&pages);
399
400 test_unlock(master, true);
401 }
402
403 static DEFINE_PER_CPU(struct work_struct, test_works);
404
test_work_fn(struct work_struct * work)405 static void test_work_fn(struct work_struct *work)
406 {
407 test_lockup(!lock_single ||
408 work == per_cpu_ptr(&test_works, master_cpu));
409 }
410
test_kernel_ptr(unsigned long addr,int size)411 static bool test_kernel_ptr(unsigned long addr, int size)
412 {
413 void *ptr = (void *)addr;
414 char buf;
415
416 if (!addr)
417 return false;
418
419 /* should be at least readable kernel address */
420 if (access_ok(ptr, 1) ||
421 access_ok(ptr + size - 1, 1) ||
422 get_kernel_nofault(buf, ptr) ||
423 get_kernel_nofault(buf, ptr + size - 1)) {
424 pr_err("invalid kernel ptr: %#lx\n", addr);
425 return true;
426 }
427
428 return false;
429 }
430
test_magic(unsigned long addr,int offset,unsigned int expected)431 static bool __maybe_unused test_magic(unsigned long addr, int offset,
432 unsigned int expected)
433 {
434 void *ptr = (void *)addr + offset;
435 unsigned int magic = 0;
436
437 if (!addr)
438 return false;
439
440 if (get_kernel_nofault(magic, ptr) || magic != expected) {
441 pr_err("invalid magic at %#lx + %#x = %#x, expected %#x\n",
442 addr, offset, magic, expected);
443 return true;
444 }
445
446 return false;
447 }
448
test_lockup_init(void)449 static int __init test_lockup_init(void)
450 {
451 u64 test_start = local_clock();
452
453 main_task = current;
454
455 switch (state[0]) {
456 case 'S':
457 wait_state = TASK_INTERRUPTIBLE;
458 break;
459 case 'D':
460 wait_state = TASK_UNINTERRUPTIBLE;
461 break;
462 case 'K':
463 wait_state = TASK_KILLABLE;
464 break;
465 case 'R':
466 wait_state = TASK_RUNNING;
467 break;
468 default:
469 pr_err("unknown state=%s\n", state);
470 return -EINVAL;
471 }
472
473 if (alloc_pages_atomic)
474 alloc_pages_gfp = GFP_ATOMIC;
475
476 if (test_kernel_ptr(lock_spinlock_ptr, sizeof(spinlock_t)) ||
477 test_kernel_ptr(lock_rwlock_ptr, sizeof(rwlock_t)) ||
478 test_kernel_ptr(lock_mutex_ptr, sizeof(struct mutex)) ||
479 test_kernel_ptr(lock_rwsem_ptr, sizeof(struct rw_semaphore)))
480 return -EINVAL;
481
482 #ifdef CONFIG_DEBUG_SPINLOCK
483 if (test_magic(lock_spinlock_ptr,
484 offsetof(spinlock_t, rlock.magic),
485 SPINLOCK_MAGIC) ||
486 test_magic(lock_rwlock_ptr,
487 offsetof(rwlock_t, magic),
488 RWLOCK_MAGIC) ||
489 test_magic(lock_mutex_ptr,
490 offsetof(struct mutex, wait_lock.rlock.magic),
491 SPINLOCK_MAGIC) ||
492 test_magic(lock_rwsem_ptr,
493 offsetof(struct rw_semaphore, wait_lock.magic),
494 SPINLOCK_MAGIC))
495 return -EINVAL;
496 #endif
497
498 if ((wait_state != TASK_RUNNING ||
499 (call_cond_resched && !reacquire_locks) ||
500 (alloc_pages_nr && gfpflags_allow_blocking(alloc_pages_gfp))) &&
501 (test_disable_irq || disable_softirq || disable_preempt ||
502 lock_rcu || lock_spinlock_ptr || lock_rwlock_ptr)) {
503 pr_err("refuse to sleep in atomic context\n");
504 return -EINVAL;
505 }
506
507 if (lock_mmap_sem && !main_task->mm) {
508 pr_err("no mm to lock mmap_lock\n");
509 return -EINVAL;
510 }
511
512 if (test_file_path[0]) {
513 test_file = filp_open(test_file_path, O_RDONLY, 0);
514 if (IS_ERR(test_file)) {
515 pr_err("failed to open %s: %ld\n", test_file_path, PTR_ERR(test_file));
516 return PTR_ERR(test_file);
517 }
518 test_inode = file_inode(test_file);
519 } else if (test_lock_inode ||
520 test_lock_mapping ||
521 test_lock_sb_umount) {
522 pr_err("no file to lock\n");
523 return -EINVAL;
524 }
525
526 if (test_lock_inode && test_inode)
527 lock_rwsem_ptr = (unsigned long)&test_inode->i_rwsem;
528
529 if (test_lock_mapping && test_file && test_file->f_mapping)
530 lock_rwsem_ptr = (unsigned long)&test_file->f_mapping->i_mmap_rwsem;
531
532 if (test_lock_sb_umount && test_inode)
533 lock_rwsem_ptr = (unsigned long)&test_inode->i_sb->s_umount;
534
535 pr_notice("START pid=%d time=%u +%u ns cooldown=%u +%u ns iterations=%u state=%s %s%s%s%s%s%s%s%s%s%s%s\n",
536 main_task->pid, time_secs, time_nsecs,
537 cooldown_secs, cooldown_nsecs, iterations, state,
538 all_cpus ? "all_cpus " : "",
539 iowait ? "iowait " : "",
540 test_disable_irq ? "disable_irq " : "",
541 disable_softirq ? "disable_softirq " : "",
542 disable_preempt ? "disable_preempt " : "",
543 lock_rcu ? "lock_rcu " : "",
544 lock_read ? "lock_read " : "",
545 touch_softlockup ? "touch_softlockup " : "",
546 touch_hardlockup ? "touch_hardlockup " : "",
547 call_cond_resched ? "call_cond_resched " : "",
548 reacquire_locks ? "reacquire_locks " : "");
549
550 if (alloc_pages_nr)
551 pr_notice("ALLOCATE PAGES nr=%u order=%u gfp=%pGg %s\n",
552 alloc_pages_nr, alloc_pages_order, &alloc_pages_gfp,
553 reallocate_pages ? "reallocate_pages " : "");
554
555 if (all_cpus) {
556 unsigned int cpu;
557
558 cpus_read_lock();
559
560 preempt_disable();
561 master_cpu = smp_processor_id();
562 for_each_online_cpu(cpu) {
563 INIT_WORK(per_cpu_ptr(&test_works, cpu), test_work_fn);
564 queue_work_on(cpu, system_highpri_wq,
565 per_cpu_ptr(&test_works, cpu));
566 }
567 preempt_enable();
568
569 for_each_online_cpu(cpu)
570 flush_work(per_cpu_ptr(&test_works, cpu));
571
572 cpus_read_unlock();
573 } else {
574 test_lockup(true);
575 }
576
577 if (measure_lock_wait)
578 pr_notice("Maximum lock wait: %lld ns\n",
579 atomic64_read(&max_lock_wait));
580
581 if (alloc_pages_nr)
582 pr_notice("Page allocation failed %u times\n",
583 atomic_read(&alloc_pages_failed));
584
585 pr_notice("FINISH in %llu ns\n", local_clock() - test_start);
586
587 if (test_file)
588 fput(test_file);
589
590 if (signal_pending(main_task))
591 return -EINTR;
592
593 return -EAGAIN;
594 }
595 module_init(test_lockup_init);
596
597 MODULE_LICENSE("GPL");
598 MODULE_AUTHOR("Konstantin Khlebnikov <khlebnikov@yandex-team.ru>");
599 MODULE_DESCRIPTION("Test module to generate lockups");
600