Lines Matching +full:page +full:- +full:based

1 // SPDX-License-Identifier: GPL-2.0-only
29 #include <asm/page.h>
37 /* Fields set based on lines observed in the console. */
63 return -1; in kasan_suite_init()
70 * Temporarily enable multi-shot mode. Otherwise, KASAN would only in kasan_suite_init()
94 * KUNIT_EXPECT_KASAN_FAIL() - check that the executed expression produces a
99 * For hardware tag-based KASAN, when a synchronous tag fault happens, tag
100 * checking is auto-disabled. When this happens, this test handler reenables
162 size_t size = 128 - KASAN_GRANULE_SIZE - 5; in kmalloc_oob_right()
176 * An aligned access into the first out-of-bounds granule that falls in kmalloc_oob_right()
181 /* Out-of-bounds access past the aligned kmalloc object. */ in kmalloc_oob_right()
197 KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1)); in kmalloc_oob_left()
216 * fit into a slab cache and therefore is allocated via the page allocator
266 struct page *pages; in pagealloc_oob_right()
271 * With generic KASAN page allocations have no redzones, thus in pagealloc_oob_right()
272 * out-of-bounds detection is not guaranteed. in pagealloc_oob_right()
288 struct page *pages; in pagealloc_uaf()
302 size_t size = KMALLOC_MAX_CACHE_SIZE - 256; in kmalloc_large_oob_right()
306 * and does not trigger the page allocator fallback in SLUB. in kmalloc_large_oob_right()
323 middle = size1 + (size2 - size1) / 2; in krealloc_more_oob_helper()
331 /* Suppress -Warray-bounds warnings. */ in krealloc_more_oob_helper()
335 ptr2[size1 - 1] = 'x'; in krealloc_more_oob_helper()
338 ptr2[size2 - 1] = 'x'; in krealloc_more_oob_helper()
358 middle = size2 + (size1 - size2) / 2; in krealloc_less_oob_helper()
366 /* Suppress -Warray-bounds warnings. */ in krealloc_less_oob_helper()
370 ptr2[size2 - 1] = 'x'; in krealloc_less_oob_helper()
389 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1 - 1] = 'x'); in krealloc_less_oob_helper()
424 * Check that krealloc() detects a use-after-free, returns NULL,
453 ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL); in kmalloc_oob_16()
494 size_t size = 128 - KASAN_GRANULE_SIZE; in kmalloc_oob_memset_2()
502 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 1, 0, 2)); in kmalloc_oob_memset_2()
509 size_t size = 128 - KASAN_GRANULE_SIZE; in kmalloc_oob_memset_4()
517 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 3, 0, 4)); in kmalloc_oob_memset_4()
524 size_t size = 128 - KASAN_GRANULE_SIZE; in kmalloc_oob_memset_8()
532 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 7, 0, 8)); in kmalloc_oob_memset_8()
539 size_t size = 128 - KASAN_GRANULE_SIZE; in kmalloc_oob_memset_16()
547 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 15, 0, 16)); in kmalloc_oob_memset_16()
554 size_t size = 128 - KASAN_GRANULE_SIZE; in kmalloc_oob_in_memset()
572 size_t invalid_size = -2; in kmalloc_memmove_negative_size()
577 * Hardware tag-based mode doesn't check memmove for negative size. in kmalloc_memmove_negative_size()
578 * As a result, this test introduces a side-effect memory corruption, in kmalloc_memmove_negative_size()
661 * For tag-based KASAN ptr1 and ptr2 tags might happen to be the same. in kmalloc_uaf2()
676 * Check that KASAN detects use-after-free when another object was allocated in
677 * the same slot. Relevant for the tag-based modes, which do not use quarantine.
684 /* This test is specifically crafted for tag-based modes. */ in kmalloc_uaf3()
702 struct page *page; in kfree_via_page() local
708 page = virt_to_page(ptr); in kfree_via_page()
710 kfree(page_address(page) + offset); in kfree_via_page()
794 p[i][0] = p[i][size - 1] = 42; in kmem_cache_bulk()
805 * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS in kasan_global_oob_right()
828 char *p = array - 3; in kasan_global_oob_left()
843 size_t size = 128 - KASAN_GRANULE_SIZE - 5; in ksize_unpoisons_memory()
856 ptr[size - 1] = 'x'; in ksize_unpoisons_memory()
862 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]); in ksize_unpoisons_memory()
868 * Check that a use-after-free is detected by ksize() and via normal accesses
874 int size = 128 - KASAN_GRANULE_SIZE; in ksize_uaf()
904 char *p = array - 1; in kasan_alloca_oob_left()
1123 * below accesses are still out-of-bounds, since bitops are defined to in kasan_bitops_generic()
1140 /* This test is specifically crafted for tag-based modes. */ in kasan_bitops_tags()
1143 /* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */ in kasan_bitops_tags()
1185 ((volatile struct kasan_rcu_info *)fp)->i; in rcu_uaf_reclaim()
1199 call_rcu(&global_rcu_ptr->rcu, rcu_uaf_reclaim); in rcu_uaf()
1224 ((volatile struct work_struct *)work)->data); in workqueue_uaf()
1231 /* This test is intended for tag-based modes. */ in vmalloc_helpers_tags()
1265 struct page *page; in vmalloc_oob() local
1266 size_t size = PAGE_SIZE / 2 - KASAN_GRANULE_SIZE - 5; in vmalloc_oob()
1276 * We have to be careful not to hit the guard page in vmalloc tests. in vmalloc_oob()
1280 /* Make sure in-bounds accesses are valid. */ in vmalloc_oob()
1282 v_ptr[size - 1] = 0; in vmalloc_oob()
1291 /* An aligned access into the first out-of-bounds granule. */ in vmalloc_oob()
1294 /* Check that in-bounds accesses to the physical page are valid. */ in vmalloc_oob()
1295 page = vmalloc_to_page(v_ptr); in vmalloc_oob()
1296 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page); in vmalloc_oob()
1297 p_ptr = page_address(page); in vmalloc_oob()
1304 * We can't check for use-after-unmap bugs in this nor in the following in vmalloc_oob()
1305 * vmalloc tests, as the page might be fully unmapped and accessing it in vmalloc_oob()
1313 struct page *p_page, *v_page; in vmap_tags()
1316 * This test is specifically crafted for the software tag-based mode, in vmap_tags()
1317 * the only tag-based mode that poisons vmap mappings. in vmap_tags()
1332 * We can't check for out-of-bounds bugs in this nor in the following in vmap_tags()
1333 * vmalloc tests, as allocations have page granularity and accessing in vmap_tags()
1334 * the guard page will crash the kernel. in vmap_tags()
1340 /* Make sure that in-bounds accesses through both pointers work. */ in vmap_tags()
1344 /* Make sure vmalloc_to_page() correctly recovers the page pointer. */ in vmap_tags()
1356 struct page *page; in vm_map_ram_tags() local
1359 * This test is specifically crafted for the software tag-based mode, in vm_map_ram_tags()
1360 * the only tag-based mode that poisons vm_map_ram mappings. in vm_map_ram_tags()
1364 page = alloc_pages(GFP_KERNEL, 1); in vm_map_ram_tags()
1365 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page); in vm_map_ram_tags()
1366 p_ptr = page_address(page); in vm_map_ram_tags()
1369 v_ptr = vm_map_ram(&page, 1, -1); in vm_map_ram_tags()
1375 /* Make sure that in-bounds accesses through both pointers work. */ in vm_map_ram_tags()
1389 * This test is specifically crafted for the software tag-based mode, in vmalloc_percpu()
1390 * the only tag-based mode that poisons percpu mappings. in vmalloc_percpu()
1402 /* Make sure that in-bounds accesses don't crash the kernel. */ in vmalloc_percpu()
1411 * KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based
1417 struct page *pages; in match_all_not_assigned()
1454 /* Check that 0xff works as a match-all pointer tag for tag-based modes. */
1480 /* Check that there are no match-all memory tags for tag-based modes. */