1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * This is for all the tests related to validating kernel memory
4 * permissions: non-executable regions, non-writable regions, and
5 * even non-readable regions.
6 */
7 #include "lkdtm.h"
8 #include <linux/slab.h>
9 #include <linux/vmalloc.h>
10 #include <linux/mman.h>
11 #include <linux/uaccess.h>
12 #include <asm/cacheflush.h>
13
14 /* Whether or not to fill the target memory area with do_nothing(). */
15 #define CODE_WRITE true
16 #define CODE_AS_IS false
17
18 /* How many bytes to copy to be sure we've copied enough of do_nothing(). */
19 #define EXEC_SIZE 64
20
21 /* This is non-const, so it will end up in the .data section. */
22 static u8 data_area[EXEC_SIZE];
23
24 /* This is cost, so it will end up in the .rodata section. */
25 static const unsigned long rodata = 0xAA55AA55;
26
27 /* This is marked __ro_after_init, so it should ultimately be .rodata. */
28 static unsigned long ro_after_init __ro_after_init = 0x55AA5500;
29
30 /*
31 * This just returns to the caller. It is designed to be copied into
32 * non-executable memory regions.
33 */
do_nothing(void)34 static void do_nothing(void)
35 {
36 return;
37 }
38
39 /* Must immediately follow do_nothing for size calculuations to work out. */
do_overwritten(void)40 static void do_overwritten(void)
41 {
42 pr_info("do_overwritten wasn't overwritten!\n");
43 return;
44 }
45
execute_location(void * dst,bool write)46 static noinline void execute_location(void *dst, bool write)
47 {
48 void (*func)(void) = dst;
49
50 pr_info("attempting ok execution at %p\n", do_nothing);
51 do_nothing();
52
53 if (write == CODE_WRITE) {
54 memcpy(dst, do_nothing, EXEC_SIZE);
55 flush_icache_range((unsigned long)dst,
56 (unsigned long)dst + EXEC_SIZE);
57 }
58 pr_info("attempting bad execution at %p\n", func);
59 func();
60 }
61
execute_user_location(void * dst)62 static void execute_user_location(void *dst)
63 {
64 int copied;
65
66 /* Intentionally crossing kernel/user memory boundary. */
67 void (*func)(void) = dst;
68
69 pr_info("attempting ok execution at %p\n", do_nothing);
70 do_nothing();
71
72 copied = access_process_vm(current, (unsigned long)dst, do_nothing,
73 EXEC_SIZE, FOLL_WRITE);
74 if (copied < EXEC_SIZE)
75 return;
76 pr_info("attempting bad execution at %p\n", func);
77 func();
78 }
79
lkdtm_WRITE_RO(void)80 void lkdtm_WRITE_RO(void)
81 {
82 /* Explicitly cast away "const" for the test. */
83 unsigned long *ptr = (unsigned long *)&rodata;
84
85 pr_info("attempting bad rodata write at %p\n", ptr);
86 *ptr ^= 0xabcd1234;
87 }
88
lkdtm_WRITE_RO_AFTER_INIT(void)89 void lkdtm_WRITE_RO_AFTER_INIT(void)
90 {
91 unsigned long *ptr = &ro_after_init;
92
93 /*
94 * Verify we were written to during init. Since an Oops
95 * is considered a "success", a failure is to just skip the
96 * real test.
97 */
98 if ((*ptr & 0xAA) != 0xAA) {
99 pr_info("%p was NOT written during init!?\n", ptr);
100 return;
101 }
102
103 pr_info("attempting bad ro_after_init write at %p\n", ptr);
104 *ptr ^= 0xabcd1234;
105 }
106
lkdtm_WRITE_KERN(void)107 void lkdtm_WRITE_KERN(void)
108 {
109 size_t size;
110 unsigned char *ptr;
111
112 size = (unsigned long)do_overwritten - (unsigned long)do_nothing;
113 ptr = (unsigned char *)do_overwritten;
114
115 pr_info("attempting bad %zu byte write at %p\n", size, ptr);
116 memcpy(ptr, (unsigned char *)do_nothing, size);
117 flush_icache_range((unsigned long)ptr, (unsigned long)(ptr + size));
118
119 do_overwritten();
120 }
121
lkdtm_EXEC_DATA(void)122 void lkdtm_EXEC_DATA(void)
123 {
124 execute_location(data_area, CODE_WRITE);
125 }
126
lkdtm_EXEC_STACK(void)127 void lkdtm_EXEC_STACK(void)
128 {
129 u8 stack_area[EXEC_SIZE];
130 execute_location(stack_area, CODE_WRITE);
131 }
132
lkdtm_EXEC_KMALLOC(void)133 void lkdtm_EXEC_KMALLOC(void)
134 {
135 u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL);
136 execute_location(kmalloc_area, CODE_WRITE);
137 kfree(kmalloc_area);
138 }
139
lkdtm_EXEC_VMALLOC(void)140 void lkdtm_EXEC_VMALLOC(void)
141 {
142 u32 *vmalloc_area = vmalloc(EXEC_SIZE);
143 execute_location(vmalloc_area, CODE_WRITE);
144 vfree(vmalloc_area);
145 }
146
lkdtm_EXEC_RODATA(void)147 void lkdtm_EXEC_RODATA(void)
148 {
149 execute_location(lkdtm_rodata_do_nothing, CODE_AS_IS);
150 }
151
lkdtm_EXEC_USERSPACE(void)152 void lkdtm_EXEC_USERSPACE(void)
153 {
154 unsigned long user_addr;
155
156 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
157 PROT_READ | PROT_WRITE | PROT_EXEC,
158 MAP_ANONYMOUS | MAP_PRIVATE, 0);
159 if (user_addr >= TASK_SIZE) {
160 pr_warn("Failed to allocate user memory\n");
161 return;
162 }
163 execute_user_location((void *)user_addr);
164 vm_munmap(user_addr, PAGE_SIZE);
165 }
166
lkdtm_ACCESS_USERSPACE(void)167 void lkdtm_ACCESS_USERSPACE(void)
168 {
169 unsigned long user_addr, tmp = 0;
170 unsigned long *ptr;
171
172 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
173 PROT_READ | PROT_WRITE | PROT_EXEC,
174 MAP_ANONYMOUS | MAP_PRIVATE, 0);
175 if (user_addr >= TASK_SIZE) {
176 pr_warn("Failed to allocate user memory\n");
177 return;
178 }
179
180 if (copy_to_user((void __user *)user_addr, &tmp, sizeof(tmp))) {
181 pr_warn("copy_to_user failed\n");
182 vm_munmap(user_addr, PAGE_SIZE);
183 return;
184 }
185
186 ptr = (unsigned long *)user_addr;
187
188 pr_info("attempting bad read at %p\n", ptr);
189 tmp = *ptr;
190 tmp += 0xc0dec0de;
191
192 pr_info("attempting bad write at %p\n", ptr);
193 *ptr = tmp;
194
195 vm_munmap(user_addr, PAGE_SIZE);
196 }
197
lkdtm_perms_init(void)198 void __init lkdtm_perms_init(void)
199 {
200 /* Make sure we can write to __ro_after_init values during __init */
201 ro_after_init |= 0xAA;
202
203 }
204