| /Linux-v5.15/mm/ |
| D | init-mm.c | 29 struct mm_struct init_mm = { variable 34 .write_protect_seq = SEQCNT_ZERO(init_mm.write_protect_seq), 35 MMAP_LOCK_INITIALIZER(init_mm) 36 .page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock), 37 .arg_lock = __SPIN_LOCK_UNLOCKED(init_mm.arg_lock), 38 .mmlist = LIST_HEAD_INIT(init_mm.mmlist), 41 INIT_MM_CONTEXT(init_mm) 47 init_mm.start_code = (unsigned long)start_code; in setup_initial_init_mm() 48 init_mm.end_code = (unsigned long)end_code; in setup_initial_init_mm() 49 init_mm.end_data = (unsigned long)end_data; in setup_initial_init_mm() [all …]
|
| D | sparse-vmemmap.c | 63 pte_t *pgtable = pte_alloc_one_kernel(&init_mm); in split_vmemmap_huge_pmd() 68 pmd_populate_kernel(&init_mm, &__pmd, pgtable); in split_vmemmap_huge_pmd() 76 set_pte_at(&init_mm, addr, pte, entry); in split_vmemmap_huge_pmd() 81 pmd_populate_kernel(&init_mm, pmd, pgtable); in split_vmemmap_huge_pmd() 245 set_pte_at(&init_mm, addr, pte, entry); in vmemmap_remap_pte() 262 set_pte_at(&init_mm, addr, pte, mk_pte(page, pgprot)); in vmemmap_restore_pte() 303 mmap_write_lock(&init_mm); in vmemmap_remap_free() 305 mmap_write_downgrade(&init_mm); in vmemmap_remap_free() 323 mmap_read_unlock(&init_mm); in vmemmap_remap_free() 380 mmap_read_lock(&init_mm); in vmemmap_remap_alloc() [all …]
|
| /Linux-v5.15/mm/kasan/ |
| D | init.c | 103 set_pte_at(&init_mm, addr, pte, zero_pte); in zero_pte_populate() 119 pmd_populate_kernel(&init_mm, pmd, in zero_pmd_populate() 128 p = pte_alloc_one_kernel(&init_mm); in zero_pmd_populate() 134 pmd_populate_kernel(&init_mm, pmd, p); in zero_pmd_populate() 153 pud_populate(&init_mm, pud, in zero_pud_populate() 156 pmd_populate_kernel(&init_mm, pmd, in zero_pud_populate() 165 p = pmd_alloc(&init_mm, pud, addr); in zero_pud_populate() 169 pud_populate(&init_mm, pud, in zero_pud_populate() 191 p4d_populate(&init_mm, p4d, in zero_p4d_populate() 194 pud_populate(&init_mm, pud, in zero_p4d_populate() [all …]
|
| D | shadow.c | 273 spin_lock(&init_mm.page_table_lock); in kasan_populate_vmalloc_pte() 275 set_pte_at(&init_mm, addr, ptep, pte); in kasan_populate_vmalloc_pte() 278 spin_unlock(&init_mm.page_table_lock); in kasan_populate_vmalloc_pte() 297 ret = apply_to_page_range(&init_mm, shadow_start, in kasan_populate_vmalloc() 371 spin_lock(&init_mm.page_table_lock); in kasan_depopulate_vmalloc_pte() 374 pte_clear(&init_mm, addr, ptep); in kasan_depopulate_vmalloc_pte() 377 spin_unlock(&init_mm.page_table_lock); in kasan_depopulate_vmalloc_pte() 485 apply_to_existing_page_range(&init_mm, in kasan_release_vmalloc()
|
| /Linux-v5.15/arch/powerpc/mm/ |
| D | pageattr.c | 33 spin_lock(&init_mm.page_table_lock); in change_page_attr() 56 pte_update(&init_mm, addr, ptep, ~0UL, pte_val(pte), 0); in change_page_attr() 64 spin_unlock(&init_mm.page_table_lock); in change_page_attr() 96 return apply_to_existing_page_range(&init_mm, start, size, in change_memory_attr() 111 spin_lock(&init_mm.page_table_lock); in set_page_attr() 113 set_pte_at(&init_mm, addr, ptep, pte_modify(*ptep, prot)); in set_page_attr() 116 spin_unlock(&init_mm.page_table_lock); in set_page_attr() 129 return apply_to_existing_page_range(&init_mm, start, sz, set_page_attr, in set_memory_attr()
|
| /Linux-v5.15/arch/powerpc/mm/book3s64/ |
| D | radix_pgtable.c | 86 p4d_populate(&init_mm, p4dp, pudp); in early_map_kernel_page() 96 pud_populate(&init_mm, pudp, pmdp); in early_map_kernel_page() 106 pmd_populate_kernel(&init_mm, pmdp, ptep); in early_map_kernel_page() 111 set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags)); in early_map_kernel_page() 152 pudp = pud_alloc(&init_mm, p4dp, ea); in __map_kernel_page() 159 pmdp = pmd_alloc(&init_mm, pudp, ea); in __map_kernel_page() 171 set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags)); in __map_kernel_page() 203 pudp = pud_alloc(&init_mm, p4dp, idx); in radix__change_memory_range() 210 pmdp = pmd_alloc(&init_mm, pudp, idx); in radix__change_memory_range() 221 radix__pte_update(&init_mm, idx, ptep, clear, 0, 0); in radix__change_memory_range() [all …]
|
| /Linux-v5.15/arch/openrisc/kernel/ |
| D | dma.c | 77 mmap_read_lock(&init_mm); in arch_dma_set_uncached() 78 error = walk_page_range(&init_mm, va, va + size, &set_nocache_walk_ops, in arch_dma_set_uncached() 80 mmap_read_unlock(&init_mm); in arch_dma_set_uncached() 91 mmap_read_lock(&init_mm); in arch_dma_clear_uncached() 93 WARN_ON(walk_page_range(&init_mm, va, va + size, in arch_dma_clear_uncached() 95 mmap_read_unlock(&init_mm); in arch_dma_clear_uncached()
|
| /Linux-v5.15/arch/powerpc/mm/nohash/ |
| D | book3e_pgtable.c | 85 pudp = pud_alloc(&init_mm, p4dp, ea); in map_kernel_page() 88 pmdp = pmd_alloc(&init_mm, pudp, ea); in map_kernel_page() 99 p4d_populate(&init_mm, p4dp, pmdp); in map_kernel_page() 104 pud_populate(&init_mm, pudp, pmdp); in map_kernel_page() 109 pmd_populate_kernel(&init_mm, pmdp, ptep); in map_kernel_page() 113 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot)); in map_kernel_page()
|
| /Linux-v5.15/arch/x86/mm/ |
| D | init_64.c | 248 pgd_populate(&init_mm, pgd, p4d); in fill_p4d() 260 p4d_populate(&init_mm, p4d, pud); in fill_pud() 272 pud_populate(&init_mm, pud, pmd); in fill_pmd() 284 pmd_populate_kernel(&init_mm, pmd, pte); in fill_pte() 529 spin_lock(&init_mm.page_table_lock); in phys_pmd_init() 534 spin_unlock(&init_mm.page_table_lock); in phys_pmd_init() 560 spin_lock(&init_mm.page_table_lock); in phys_pmd_init() 565 spin_unlock(&init_mm.page_table_lock); in phys_pmd_init() 573 spin_lock(&init_mm.page_table_lock); in phys_pmd_init() 574 pmd_populate_kernel_init(&init_mm, pmd, pte, init); in phys_pmd_init() [all …]
|
| D | kasan_init_64.c | 56 pmd_populate_kernel(&init_mm, pmd, p); in kasan_populate_pmd() 69 set_pte_at(&init_mm, addr, pte, entry); in kasan_populate_pmd() 92 pud_populate(&init_mm, pud, p); in kasan_populate_pud() 112 p4d_populate(&init_mm, p4d, p); in kasan_populate_p4d() 132 pgd_populate(&init_mm, pgd, p); in kasan_populate_pgd() 259 p4d_populate(&init_mm, p4d, p); in kasan_shallow_populate_p4ds() 277 pgd_populate(&init_mm, pgd, p); in kasan_shallow_populate_pgds()
|
| D | tlb.c | 307 if (loaded_mm == &init_mm) in leave_mm() 313 switch_mm(NULL, &init_mm, NULL); in leave_mm() 562 if (WARN_ON_ONCE(real_prev != &init_mm && in switch_mm_irqs_off() 604 if (real_prev != &init_mm) { in switch_mm_irqs_off() 613 if (next != &init_mm) in switch_mm_irqs_off() 664 if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm) in enter_lazy_tlb() 687 u64 tlb_gen = atomic64_read(&init_mm.context.tlb_gen); in initialize_tlbstate_and_flush() 753 if (unlikely(loaded_mm == &init_mm)) in flush_tlb_func() 769 switch_mm_irqs_off(NULL, &init_mm, NULL); in flush_tlb_func()
|
| /Linux-v5.15/arch/riscv/mm/ |
| D | pageattr.c | 121 mmap_read_lock(&init_mm); in __set_memory() 122 ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL, in __set_memory() 124 mmap_read_unlock(&init_mm); in __set_memory() 169 mmap_read_lock(&init_mm); in set_direct_map_invalid_noflush() 170 ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks); in set_direct_map_invalid_noflush() 171 mmap_read_unlock(&init_mm); in set_direct_map_invalid_noflush() 186 mmap_read_lock(&init_mm); in set_direct_map_default_noflush() 187 ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks); in set_direct_map_default_noflush() 188 mmap_read_unlock(&init_mm); in set_direct_map_default_noflush()
|
| /Linux-v5.15/arch/s390/mm/ |
| D | kasan_init.c | 119 pgd_populate(&init_mm, pg_dir, in kasan_early_pgtable_populate() 125 pgd_populate(&init_mm, pg_dir, p4_dir); in kasan_early_pgtable_populate() 138 p4d_populate(&init_mm, p4_dir, in kasan_early_pgtable_populate() 144 p4d_populate(&init_mm, p4_dir, pu_dir); in kasan_early_pgtable_populate() 152 pud_populate(&init_mm, pu_dir, in kasan_early_pgtable_populate() 158 pud_populate(&init_mm, pu_dir, pm_dir); in kasan_early_pgtable_populate() 166 pmd_populate(&init_mm, pm_dir, kasan_early_shadow_pte); in kasan_early_pgtable_populate() 184 pmd_populate(&init_mm, pm_dir, pt_dir); in kasan_early_pgtable_populate() 316 init_mm.pgd = early_pg_dir; in kasan_early_init() 393 pg_dir_dst = pgd_offset_raw(init_mm.pgd, KASAN_SHADOW_START); in kasan_copy_shadow_mapping()
|
| D | init.c | 102 init_mm.pgd = swapper_pg_dir; in paging_init() 110 init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits; in paging_init() 111 S390_lowcore.kernel_asce = init_mm.context.asce; in paging_init() 113 crst_table_init((unsigned long *) init_mm.pgd, pgd_type); in paging_init() 197 cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask); in mem_init() 198 cpumask_set_cpu(0, mm_cpumask(&init_mm)); in mem_init()
|
| /Linux-v5.15/arch/arm64/include/asm/ |
| D | mmu_context.h | 105 if (mm != &init_mm && !system_uses_ttbr0_pan()) in cpu_uninstall_idmap() 115 cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm); in cpu_install_idmap() 179 if (mm == &init_mm) in update_saved_ttbr0() 201 update_saved_ttbr0(tsk, &init_mm); in enter_lazy_tlb() 210 if (next == &init_mm) { in __switch_mm()
|
| D | pgalloc.h | 32 pudval |= (mm == &init_mm) ? PUD_TABLE_UXN : PUD_TABLE_PXN; in pud_populate() 53 p4dval |= (mm == &init_mm) ? P4D_TABLE_UXN : P4D_TABLE_PXN; in p4d_populate() 79 VM_BUG_ON(mm != &init_mm); in pmd_populate_kernel() 86 VM_BUG_ON(mm == &init_mm); in pmd_populate()
|
| /Linux-v5.15/arch/arm/mm/ |
| D | kasan_init.c | 81 set_pte_at(&init_mm, addr, ptep, entry); in kasan_pte_populate() 109 pmd_populate_kernel(&init_mm, pmdp, p); in kasan_pmd_populate() 141 pgd_populate(&init_mm, pgdp, p); in kasan_pgd_populate() 234 cpu_switch_mm(tmp_pgd_table, &init_mm); in kasan_init() 279 set_pte_at(&init_mm, KASAN_SHADOW_START + i*PAGE_SIZE, in kasan_init() 285 cpu_switch_mm(swapper_pg_dir, &init_mm); in kasan_init()
|
| D | idmap.c | 31 pmd = pmd_alloc_one(&init_mm, addr); in idmap_add_pmd() 43 pud_populate(&init_mm, pud, pmd); in idmap_add_pmd() 107 idmap_pgd = pgd_alloc(&init_mm); in init_static_idmap() 130 cpu_switch_mm(idmap_pgd, &init_mm); in setup_mm_for_reboot()
|
| /Linux-v5.15/arch/powerpc/mm/kasan/ |
| D | kasan_init_32.c | 28 __set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 0); in kasan_populate_pte() 50 pmd_populate_kernel(&init_mm, pmd, new); in kasan_init_shadow_page_tables() 76 __set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0); in kasan_init_region() 95 __set_pte_at(&init_mm, k_cur, ptep, pte, 0); in kasan_update_early_region() 190 pmd_populate_kernel(&init_mm, pmd, kasan_early_shadow_pte); in kasan_early_init()
|
| /Linux-v5.15/arch/x86/xen/ |
| D | grant-table.c | 45 set_pte_at(&init_mm, addr, gnttab_shared_vm_area.ptes[i], in arch_gnttab_map_shared() 67 set_pte_at(&init_mm, addr, gnttab_status_vm_area.ptes[i], in arch_gnttab_map_status() 89 set_pte_at(&init_mm, addr, ptes[i], __pte(0)); in arch_gnttab_unmap() 110 if (apply_to_page_range(&init_mm, (unsigned long)area->area->addr, in arch_gnttab_valloc()
|
| /Linux-v5.15/arch/nios2/mm/ |
| D | ioremap.c | 84 dir = pgd_offset(&init_mm, address); in remap_area_pages() 94 p4d = p4d_alloc(&init_mm, dir, address); in remap_area_pages() 97 pud = pud_alloc(&init_mm, p4d, address); in remap_area_pages() 100 pmd = pmd_alloc(&init_mm, pud, address); in remap_area_pages()
|
| /Linux-v5.15/arch/x86/kernel/ |
| D | espfix_64.c | 121 p4d = p4d_alloc(&init_mm, pgd, ESPFIX_BASE_ADDR); in init_espfix_bsp() 122 p4d_populate(&init_mm, p4d, espfix_pud_page); in init_espfix_bsp() 171 paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT); in init_espfix_ap() 183 paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT); in init_espfix_ap()
|
| /Linux-v5.15/arch/powerpc/include/asm/ |
| D | kfence.h | 24 pte_update(&init_mm, addr, kpte, _PAGE_PRESENT, 0, 0); in kfence_protect_page() 27 pte_update(&init_mm, addr, kpte, 0, _PAGE_PRESENT, 0); in kfence_protect_page()
|
| /Linux-v5.15/arch/nds32/include/asm/ |
| D | pgalloc.h | 41 BUG_ON(mm != &init_mm); in pmd_populate_kernel() 56 BUG_ON(mm == &init_mm); in pmd_populate()
|
| /Linux-v5.15/arch/parisc/mm/ |
| D | fixmap.c | 29 set_pte_at(&init_mm, vaddr, pte, __mk_pte(phys, PAGE_KERNEL_RWX)); in set_fixmap() 41 pte_clear(&init_mm, vaddr, pte); in clear_fixmap()
|