Searched refs:KFENCE_POOL_SIZE (Results 1 – 4 of 4) sorted by relevance
559 for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) { in kfence_init_pool()636 for (char *p = (char *)addr; p < __kfence_pool + KFENCE_POOL_SIZE; p += PAGE_SIZE) { in kfence_init_pool_early()646 memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool)); in kfence_init_pool_early()661 free_size = KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool); in kfence_init_pool_late()827 __kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE); in kfence_alloc_pool()849 pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE, in kfence_init_enable()851 (void *)(__kfence_pool + KFENCE_POOL_SIZE)); in kfence_init_enable()872 const unsigned long nr_pages = KFENCE_POOL_SIZE / PAGE_SIZE; in kfence_init_late()885 __kfence_pool = alloc_pages_exact(KFENCE_POOL_SIZE, GFP_KERNEL); in kfence_init_late()
27 #define KFENCE_POOL_SIZE ((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 * PAGE_SIZE) macro58 return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && __kfence_pool); in is_kfence_address()
30 unsigned long pool_pages = KFENCE_POOL_SIZE >> PAGE_SHIFT; in kfence_split_mapping()
307 address_markers[KFENCE_END_NR].start_address = kfence_start + KFENCE_POOL_SIZE; in pt_dump_init()