1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * This is for all the tests related to logic bugs (e.g. bad dereferences,
4 * bad alignment, bad loops, bad locking, bad scheduling, deep stacks, and
5 * lockups) along with other things that don't fit well into existing LKDTM
6 * test source files.
7 */
8 #include "lkdtm.h"
9 #include <linux/list.h>
10 #include <linux/sched.h>
11 #include <linux/sched/signal.h>
12 #include <linux/sched/task_stack.h>
13 #include <linux/uaccess.h>
14
15 struct lkdtm_list {
16 struct list_head node;
17 };
18
19 /*
20 * Make sure our attempts to over run the kernel stack doesn't trigger
21 * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
22 * recurse past the end of THREAD_SIZE by default.
23 */
24 #if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
25 #define REC_STACK_SIZE (_AC(CONFIG_FRAME_WARN, UL) / 2)
26 #else
27 #define REC_STACK_SIZE (THREAD_SIZE / 8)
28 #endif
29 #define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
30
31 static int recur_count = REC_NUM_DEFAULT;
32
33 static DEFINE_SPINLOCK(lock_me_up);
34
35 /*
36 * Make sure compiler does not optimize this function or stack frame away:
37 * - function marked noinline
38 * - stack variables are marked volatile
39 * - stack variables are written (memset()) and read (pr_info())
40 * - function has external effects (pr_info())
41 * */
recursive_loop(int remaining)42 static int noinline recursive_loop(int remaining)
43 {
44 volatile char buf[REC_STACK_SIZE];
45
46 memset((void *)buf, remaining & 0xFF, sizeof(buf));
47 pr_info("loop %d/%d ...\n", (int)buf[remaining % sizeof(buf)],
48 recur_count);
49 if (!remaining)
50 return 0;
51 else
52 return recursive_loop(remaining - 1);
53 }
54
55 /* If the depth is negative, use the default, otherwise keep parameter. */
lkdtm_bugs_init(int * recur_param)56 void __init lkdtm_bugs_init(int *recur_param)
57 {
58 if (*recur_param < 0)
59 *recur_param = recur_count;
60 else
61 recur_count = *recur_param;
62 }
63
lkdtm_PANIC(void)64 void lkdtm_PANIC(void)
65 {
66 panic("dumptest");
67 }
68
lkdtm_BUG(void)69 void lkdtm_BUG(void)
70 {
71 BUG();
72 }
73
74 static int warn_counter;
75
lkdtm_WARNING(void)76 void lkdtm_WARNING(void)
77 {
78 WARN_ON(++warn_counter);
79 }
80
lkdtm_WARNING_MESSAGE(void)81 void lkdtm_WARNING_MESSAGE(void)
82 {
83 WARN(1, "Warning message trigger count: %d\n", ++warn_counter);
84 }
85
lkdtm_EXCEPTION(void)86 void lkdtm_EXCEPTION(void)
87 {
88 *((volatile int *) 0) = 0;
89 }
90
lkdtm_LOOP(void)91 void lkdtm_LOOP(void)
92 {
93 for (;;)
94 ;
95 }
96
lkdtm_EXHAUST_STACK(void)97 void lkdtm_EXHAUST_STACK(void)
98 {
99 pr_info("Calling function with %lu frame size to depth %d ...\n",
100 REC_STACK_SIZE, recur_count);
101 recursive_loop(recur_count);
102 pr_info("FAIL: survived without exhausting stack?!\n");
103 }
104
__lkdtm_CORRUPT_STACK(void * stack)105 static noinline void __lkdtm_CORRUPT_STACK(void *stack)
106 {
107 memset(stack, '\xff', 64);
108 }
109
110 /* This should trip the stack canary, not corrupt the return address. */
lkdtm_CORRUPT_STACK(void)111 noinline void lkdtm_CORRUPT_STACK(void)
112 {
113 /* Use default char array length that triggers stack protection. */
114 char data[8] __aligned(sizeof(void *));
115
116 __lkdtm_CORRUPT_STACK(&data);
117
118 pr_info("Corrupted stack containing char array ...\n");
119 }
120
121 /* Same as above but will only get a canary with -fstack-protector-strong */
lkdtm_CORRUPT_STACK_STRONG(void)122 noinline void lkdtm_CORRUPT_STACK_STRONG(void)
123 {
124 union {
125 unsigned short shorts[4];
126 unsigned long *ptr;
127 } data __aligned(sizeof(void *));
128
129 __lkdtm_CORRUPT_STACK(&data);
130
131 pr_info("Corrupted stack containing union ...\n");
132 }
133
lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)134 void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
135 {
136 static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5};
137 u32 *p;
138 u32 val = 0x12345678;
139
140 p = (u32 *)(data + 1);
141 if (*p == 0)
142 val = 0x87654321;
143 *p = val;
144 }
145
lkdtm_SOFTLOCKUP(void)146 void lkdtm_SOFTLOCKUP(void)
147 {
148 preempt_disable();
149 for (;;)
150 cpu_relax();
151 }
152
lkdtm_HARDLOCKUP(void)153 void lkdtm_HARDLOCKUP(void)
154 {
155 local_irq_disable();
156 for (;;)
157 cpu_relax();
158 }
159
lkdtm_SPINLOCKUP(void)160 void lkdtm_SPINLOCKUP(void)
161 {
162 /* Must be called twice to trigger. */
163 spin_lock(&lock_me_up);
164 /* Let sparse know we intended to exit holding the lock. */
165 __release(&lock_me_up);
166 }
167
lkdtm_HUNG_TASK(void)168 void lkdtm_HUNG_TASK(void)
169 {
170 set_current_state(TASK_UNINTERRUPTIBLE);
171 schedule();
172 }
173
lkdtm_CORRUPT_LIST_ADD(void)174 void lkdtm_CORRUPT_LIST_ADD(void)
175 {
176 /*
177 * Initially, an empty list via LIST_HEAD:
178 * test_head.next = &test_head
179 * test_head.prev = &test_head
180 */
181 LIST_HEAD(test_head);
182 struct lkdtm_list good, bad;
183 void *target[2] = { };
184 void *redirection = ⌖
185
186 pr_info("attempting good list addition\n");
187
188 /*
189 * Adding to the list performs these actions:
190 * test_head.next->prev = &good.node
191 * good.node.next = test_head.next
192 * good.node.prev = test_head
193 * test_head.next = good.node
194 */
195 list_add(&good.node, &test_head);
196
197 pr_info("attempting corrupted list addition\n");
198 /*
199 * In simulating this "write what where" primitive, the "what" is
200 * the address of &bad.node, and the "where" is the address held
201 * by "redirection".
202 */
203 test_head.next = redirection;
204 list_add(&bad.node, &test_head);
205
206 if (target[0] == NULL && target[1] == NULL)
207 pr_err("Overwrite did not happen, but no BUG?!\n");
208 else
209 pr_err("list_add() corruption not detected!\n");
210 }
211
lkdtm_CORRUPT_LIST_DEL(void)212 void lkdtm_CORRUPT_LIST_DEL(void)
213 {
214 LIST_HEAD(test_head);
215 struct lkdtm_list item;
216 void *target[2] = { };
217 void *redirection = ⌖
218
219 list_add(&item.node, &test_head);
220
221 pr_info("attempting good list removal\n");
222 list_del(&item.node);
223
224 pr_info("attempting corrupted list removal\n");
225 list_add(&item.node, &test_head);
226
227 /* As with the list_add() test above, this corrupts "next". */
228 item.node.next = redirection;
229 list_del(&item.node);
230
231 if (target[0] == NULL && target[1] == NULL)
232 pr_err("Overwrite did not happen, but no BUG?!\n");
233 else
234 pr_err("list_del() corruption not detected!\n");
235 }
236
237 /* Test if unbalanced set_fs(KERNEL_DS)/set_fs(USER_DS) check exists. */
lkdtm_CORRUPT_USER_DS(void)238 void lkdtm_CORRUPT_USER_DS(void)
239 {
240 pr_info("setting bad task size limit\n");
241 set_fs(KERNEL_DS);
242
243 /* Make sure we do not keep running with a KERNEL_DS! */
244 force_sig(SIGKILL);
245 }
246
247 /* Test that VMAP_STACK is actually allocating with a leading guard page */
lkdtm_STACK_GUARD_PAGE_LEADING(void)248 void lkdtm_STACK_GUARD_PAGE_LEADING(void)
249 {
250 const unsigned char *stack = task_stack_page(current);
251 const unsigned char *ptr = stack - 1;
252 volatile unsigned char byte;
253
254 pr_info("attempting bad read from page below current stack\n");
255
256 byte = *ptr;
257
258 pr_err("FAIL: accessed page before stack!\n");
259 }
260
261 /* Test that VMAP_STACK is actually allocating with a trailing guard page */
lkdtm_STACK_GUARD_PAGE_TRAILING(void)262 void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
263 {
264 const unsigned char *stack = task_stack_page(current);
265 const unsigned char *ptr = stack + THREAD_SIZE;
266 volatile unsigned char byte;
267
268 pr_info("attempting bad read from page above current stack\n");
269
270 byte = *ptr;
271
272 pr_err("FAIL: accessed page after stack!\n");
273 }
274
lkdtm_UNSET_SMEP(void)275 void lkdtm_UNSET_SMEP(void)
276 {
277 #ifdef CONFIG_X86_64
278 #define MOV_CR4_DEPTH 64
279 void (*direct_write_cr4)(unsigned long val);
280 unsigned char *insn;
281 unsigned long cr4;
282 int i;
283
284 cr4 = native_read_cr4();
285
286 if ((cr4 & X86_CR4_SMEP) != X86_CR4_SMEP) {
287 pr_err("FAIL: SMEP not in use\n");
288 return;
289 }
290 cr4 &= ~(X86_CR4_SMEP);
291
292 pr_info("trying to clear SMEP normally\n");
293 native_write_cr4(cr4);
294 if (cr4 == native_read_cr4()) {
295 pr_err("FAIL: pinning SMEP failed!\n");
296 cr4 |= X86_CR4_SMEP;
297 pr_info("restoring SMEP\n");
298 native_write_cr4(cr4);
299 return;
300 }
301 pr_info("ok: SMEP did not get cleared\n");
302
303 /*
304 * To test the post-write pinning verification we need to call
305 * directly into the middle of native_write_cr4() where the
306 * cr4 write happens, skipping any pinning. This searches for
307 * the cr4 writing instruction.
308 */
309 insn = (unsigned char *)native_write_cr4;
310 for (i = 0; i < MOV_CR4_DEPTH; i++) {
311 /* mov %rdi, %cr4 */
312 if (insn[i] == 0x0f && insn[i+1] == 0x22 && insn[i+2] == 0xe7)
313 break;
314 /* mov %rdi,%rax; mov %rax, %cr4 */
315 if (insn[i] == 0x48 && insn[i+1] == 0x89 &&
316 insn[i+2] == 0xf8 && insn[i+3] == 0x0f &&
317 insn[i+4] == 0x22 && insn[i+5] == 0xe0)
318 break;
319 }
320 if (i >= MOV_CR4_DEPTH) {
321 pr_info("ok: cannot locate cr4 writing call gadget\n");
322 return;
323 }
324 direct_write_cr4 = (void *)(insn + i);
325
326 pr_info("trying to clear SMEP with call gadget\n");
327 direct_write_cr4(cr4);
328 if (native_read_cr4() & X86_CR4_SMEP) {
329 pr_info("ok: SMEP removal was reverted\n");
330 } else {
331 pr_err("FAIL: cleared SMEP not detected!\n");
332 cr4 |= X86_CR4_SMEP;
333 pr_info("restoring SMEP\n");
334 native_write_cr4(cr4);
335 }
336 #else
337 pr_err("FAIL: this test is x86_64-only\n");
338 #endif
339 }
340