1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_KASAN_H
3 #define _LINUX_KASAN_H
4
5 #include <linux/types.h>
6
7 struct kmem_cache;
8 struct page;
9 struct vm_struct;
10 struct task_struct;
11
12 #ifdef CONFIG_KASAN
13
14 #include <linux/pgtable.h>
15 #include <asm/kasan.h>
16
17 /* kasan_data struct is used in KUnit tests for KASAN expected failures */
18 struct kunit_kasan_expectation {
19 bool report_expected;
20 bool report_found;
21 };
22
23 extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
24 extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE];
25 extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD];
26 extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD];
27 extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
28
29 int kasan_populate_early_shadow(const void *shadow_start,
30 const void *shadow_end);
31
kasan_mem_to_shadow(const void * addr)32 static inline void *kasan_mem_to_shadow(const void *addr)
33 {
34 return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
35 + KASAN_SHADOW_OFFSET;
36 }
37
38 /* Enable reporting bugs after kasan_disable_current() */
39 extern void kasan_enable_current(void);
40
41 /* Disable reporting bugs for current task */
42 extern void kasan_disable_current(void);
43
44 void kasan_unpoison_shadow(const void *address, size_t size);
45
46 void kasan_unpoison_task_stack(struct task_struct *task);
47
48 void kasan_alloc_pages(struct page *page, unsigned int order);
49 void kasan_free_pages(struct page *page, unsigned int order);
50
51 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
52 slab_flags_t *flags);
53
54 void kasan_poison_slab(struct page *page);
55 void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
56 void kasan_poison_object_data(struct kmem_cache *cache, void *object);
57 void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
58 const void *object);
59
60 void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
61 gfp_t flags);
62 void kasan_kfree_large(void *ptr, unsigned long ip);
63 void kasan_poison_kfree(void *ptr, unsigned long ip);
64 void * __must_check kasan_kmalloc(struct kmem_cache *s, const void *object,
65 size_t size, gfp_t flags);
66 void * __must_check kasan_krealloc(const void *object, size_t new_size,
67 gfp_t flags);
68
69 void * __must_check kasan_slab_alloc(struct kmem_cache *s, void *object,
70 gfp_t flags);
71 bool kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip);
72
73 struct kasan_cache {
74 int alloc_meta_offset;
75 int free_meta_offset;
76 };
77
78 /*
79 * These functions provide a special case to support backing module
80 * allocations with real shadow memory. With KASAN vmalloc, the special
81 * case is unnecessary, as the work is handled in the generic case.
82 */
83 #ifndef CONFIG_KASAN_VMALLOC
84 int kasan_module_alloc(void *addr, size_t size);
85 void kasan_free_shadow(const struct vm_struct *vm);
86 #else
kasan_module_alloc(void * addr,size_t size)87 static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
kasan_free_shadow(const struct vm_struct * vm)88 static inline void kasan_free_shadow(const struct vm_struct *vm) {}
89 #endif
90
91 int kasan_add_zero_shadow(void *start, unsigned long size);
92 void kasan_remove_zero_shadow(void *start, unsigned long size);
93
94 size_t __ksize(const void *);
kasan_unpoison_slab(const void * ptr)95 static inline void kasan_unpoison_slab(const void *ptr)
96 {
97 kasan_unpoison_shadow(ptr, __ksize(ptr));
98 }
99 size_t kasan_metadata_size(struct kmem_cache *cache);
100
101 bool kasan_save_enable_multi_shot(void);
102 void kasan_restore_multi_shot(bool enabled);
103
104 #else /* CONFIG_KASAN */
105
kasan_unpoison_shadow(const void * address,size_t size)106 static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
107
kasan_unpoison_task_stack(struct task_struct * task)108 static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
109
kasan_enable_current(void)110 static inline void kasan_enable_current(void) {}
kasan_disable_current(void)111 static inline void kasan_disable_current(void) {}
112
kasan_alloc_pages(struct page * page,unsigned int order)113 static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
kasan_free_pages(struct page * page,unsigned int order)114 static inline void kasan_free_pages(struct page *page, unsigned int order) {}
115
kasan_cache_create(struct kmem_cache * cache,unsigned int * size,slab_flags_t * flags)116 static inline void kasan_cache_create(struct kmem_cache *cache,
117 unsigned int *size,
118 slab_flags_t *flags) {}
119
kasan_poison_slab(struct page * page)120 static inline void kasan_poison_slab(struct page *page) {}
kasan_unpoison_object_data(struct kmem_cache * cache,void * object)121 static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
122 void *object) {}
kasan_poison_object_data(struct kmem_cache * cache,void * object)123 static inline void kasan_poison_object_data(struct kmem_cache *cache,
124 void *object) {}
kasan_init_slab_obj(struct kmem_cache * cache,const void * object)125 static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
126 const void *object)
127 {
128 return (void *)object;
129 }
130
kasan_kmalloc_large(void * ptr,size_t size,gfp_t flags)131 static inline void *kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags)
132 {
133 return ptr;
134 }
kasan_kfree_large(void * ptr,unsigned long ip)135 static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}
kasan_poison_kfree(void * ptr,unsigned long ip)136 static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {}
kasan_kmalloc(struct kmem_cache * s,const void * object,size_t size,gfp_t flags)137 static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
138 size_t size, gfp_t flags)
139 {
140 return (void *)object;
141 }
kasan_krealloc(const void * object,size_t new_size,gfp_t flags)142 static inline void *kasan_krealloc(const void *object, size_t new_size,
143 gfp_t flags)
144 {
145 return (void *)object;
146 }
147
kasan_slab_alloc(struct kmem_cache * s,void * object,gfp_t flags)148 static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
149 gfp_t flags)
150 {
151 return object;
152 }
kasan_slab_free(struct kmem_cache * s,void * object,unsigned long ip)153 static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
154 unsigned long ip)
155 {
156 return false;
157 }
158
kasan_module_alloc(void * addr,size_t size)159 static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
kasan_free_shadow(const struct vm_struct * vm)160 static inline void kasan_free_shadow(const struct vm_struct *vm) {}
161
kasan_add_zero_shadow(void * start,unsigned long size)162 static inline int kasan_add_zero_shadow(void *start, unsigned long size)
163 {
164 return 0;
165 }
kasan_remove_zero_shadow(void * start,unsigned long size)166 static inline void kasan_remove_zero_shadow(void *start,
167 unsigned long size)
168 {}
169
kasan_unpoison_slab(const void * ptr)170 static inline void kasan_unpoison_slab(const void *ptr) { }
kasan_metadata_size(struct kmem_cache * cache)171 static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
172
173 #endif /* CONFIG_KASAN */
174
175 #ifdef CONFIG_KASAN_GENERIC
176
177 #define KASAN_SHADOW_INIT 0
178
179 void kasan_cache_shrink(struct kmem_cache *cache);
180 void kasan_cache_shutdown(struct kmem_cache *cache);
181 void kasan_record_aux_stack(void *ptr);
182
183 #else /* CONFIG_KASAN_GENERIC */
184
kasan_cache_shrink(struct kmem_cache * cache)185 static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
kasan_cache_shutdown(struct kmem_cache * cache)186 static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
kasan_record_aux_stack(void * ptr)187 static inline void kasan_record_aux_stack(void *ptr) {}
188
189 #endif /* CONFIG_KASAN_GENERIC */
190
191 #ifdef CONFIG_KASAN_SW_TAGS
192
193 #define KASAN_SHADOW_INIT 0xFF
194
195 void kasan_init_tags(void);
196
197 void *kasan_reset_tag(const void *addr);
198
199 bool kasan_report(unsigned long addr, size_t size,
200 bool is_write, unsigned long ip);
201
202 #else /* CONFIG_KASAN_SW_TAGS */
203
kasan_init_tags(void)204 static inline void kasan_init_tags(void) { }
205
kasan_reset_tag(const void * addr)206 static inline void *kasan_reset_tag(const void *addr)
207 {
208 return (void *)addr;
209 }
210
211 #endif /* CONFIG_KASAN_SW_TAGS */
212
213 #ifdef CONFIG_KASAN_VMALLOC
214 int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
215 void kasan_poison_vmalloc(const void *start, unsigned long size);
216 void kasan_unpoison_vmalloc(const void *start, unsigned long size);
217 void kasan_release_vmalloc(unsigned long start, unsigned long end,
218 unsigned long free_region_start,
219 unsigned long free_region_end);
220 #else
kasan_populate_vmalloc(unsigned long start,unsigned long size)221 static inline int kasan_populate_vmalloc(unsigned long start,
222 unsigned long size)
223 {
224 return 0;
225 }
226
kasan_poison_vmalloc(const void * start,unsigned long size)227 static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
228 { }
kasan_unpoison_vmalloc(const void * start,unsigned long size)229 static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size)
230 { }
kasan_release_vmalloc(unsigned long start,unsigned long end,unsigned long free_region_start,unsigned long free_region_end)231 static inline void kasan_release_vmalloc(unsigned long start,
232 unsigned long end,
233 unsigned long free_region_start,
234 unsigned long free_region_end) {}
235 #endif
236
237 #ifdef CONFIG_KASAN_INLINE
238 void kasan_non_canonical_hook(unsigned long addr);
239 #else /* CONFIG_KASAN_INLINE */
kasan_non_canonical_hook(unsigned long addr)240 static inline void kasan_non_canonical_hook(unsigned long addr) { }
241 #endif /* CONFIG_KASAN_INLINE */
242
243 #endif /* LINUX_KASAN_H */
244