/Linux-v4.19/arch/xtensa/kernel/ |
D | setup.c | 335 initrd_is_mapped = mem_reserve(__pa(initrd_start), in setup_arch() 336 __pa(initrd_end)) == 0; in setup_arch() 343 mem_reserve(__pa(_stext), __pa(_end)); in setup_arch() 346 mem_reserve(__pa(&_WindowVectors_text_start), in setup_arch() 347 __pa(&_WindowVectors_text_end)); in setup_arch() 349 mem_reserve(__pa(&_DebugInterruptVector_text_start), in setup_arch() 350 __pa(&_DebugInterruptVector_text_end)); in setup_arch() 352 mem_reserve(__pa(&_KernelExceptionVector_text_start), in setup_arch() 353 __pa(&_KernelExceptionVector_text_end)); in setup_arch() 355 mem_reserve(__pa(&_UserExceptionVector_text_start), in setup_arch() [all …]
|
/Linux-v4.19/arch/parisc/kernel/ |
D | firmware.c | 166 __pa(pdc_result), 0); in set_firmware_width_unlocked() 247 retval = mem_pdc_call(PDC_INSTR, 0UL, __pa(pdc_result)); in pdc_instr() 272 __pa(pdc_result), __pa(pdc_result2), len); in pdc_chassis_info() 296 retval = mem_pdc_call(PDC_PAT_CHASSIS_LOG, PDC_PAT_CHASSIS_WRITE_LOG, __pa(&state), __pa(&data)); in pdc_pat_chassis_send_log() 329 retval = mem_pdc_call(PDC_CHASSIS, PDC_CHASSIS_WARN, __pa(pdc_result)); in pdc_chassis_warn() 340 ret = mem_pdc_call(PDC_COPROC, PDC_COPROC_CFG, __pa(pdc_result)); in pdc_coproc_cfg_unlocked() 387 retval = mem_pdc_call(PDC_IODC, PDC_IODC_READ, __pa(pdc_result), hpa, in pdc_iodc_read() 388 index, __pa(pdc_result2), iodc_data_size); in pdc_iodc_read() 414 retval = mem_pdc_call(PDC_SYSTEM_MAP, PDC_FIND_MODULE, __pa(pdc_result), in pdc_system_map_find_mods() 415 __pa(pdc_result2), mod_index); in pdc_system_map_find_mods() [all …]
|
/Linux-v4.19/arch/x86/kernel/ |
D | head32.c | 74 #ifdef __pa in mk_early_pgtbl_32() 75 #undef __pa in mk_early_pgtbl_32() 77 #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET) in mk_early_pgtbl_32() macro 82 const unsigned long limit = __pa(_end) + in mk_early_pgtbl_32() 85 pmd_t pl2, *pl2p = (pmd_t *)__pa(initial_pg_pmd); in mk_early_pgtbl_32() 88 pgd_t pl2, *pl2p = (pgd_t *)__pa(initial_page_table); in mk_early_pgtbl_32() 92 ptep = (pte_t *)__pa(__brk_base); in mk_early_pgtbl_32() 112 ptr = (unsigned long *)__pa(&max_pfn_mapped); in mk_early_pgtbl_32() 116 ptr = (unsigned long *)__pa(&_brk_end); in mk_early_pgtbl_32()
|
D | machine_kexec_32.c | 103 set_pgd(pgd, __pgd(__pa(pmd) | _PAGE_PRESENT)); in machine_kexec_page_table_set_one() 109 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE)); in machine_kexec_page_table_set_one() 125 (unsigned long)control_page, __pa(control_page)); in machine_kexec_prepare_page_tables() 131 __pa(control_page), __pa(control_page)); in machine_kexec_prepare_page_tables() 215 page_list[PA_CONTROL_PAGE] = __pa(control_page); in machine_kexec() 217 page_list[PA_PGD] = __pa(image->arch.pgd); in machine_kexec()
|
D | espfix_64.c | 178 pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask)); in init_espfix_ap() 179 paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT); in init_espfix_ap() 190 pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask)); in init_espfix_ap() 191 paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT); in init_espfix_ap() 202 pte = __pte(__pa(stack_page) | ((__PAGE_KERNEL_RO | _PAGE_ENC) & ptemask)); in init_espfix_ap()
|
/Linux-v4.19/arch/ia64/hp/sim/boot/ |
D | fw-emu.c | 285 efi_systab->fw_vendor = __pa("H\0e\0w\0l\0e\0t\0t\0-\0P\0a\0c\0k\0a\0r\0d\0\0"); in sys_fw_init() 287 efi_systab->runtime = (void *) __pa(efi_runtime); in sys_fw_init() 289 efi_systab->tables = __pa(efi_tables); in sys_fw_init() 294 efi_runtime->get_time = (void *)__pa(&fw_efi_get_time); in sys_fw_init() 295 efi_runtime->set_time = (void *)__pa(&efi_unimplemented); in sys_fw_init() 296 efi_runtime->get_wakeup_time = (void *)__pa(&efi_unimplemented); in sys_fw_init() 297 efi_runtime->set_wakeup_time = (void *)__pa(&efi_unimplemented); in sys_fw_init() 298 efi_runtime->set_virtual_address_map = (void *)__pa(&efi_unimplemented); in sys_fw_init() 299 efi_runtime->get_variable = (void *)__pa(&efi_unimplemented); in sys_fw_init() 300 efi_runtime->get_next_variable = (void *)__pa(&efi_unimplemented); in sys_fw_init() [all …]
|
/Linux-v4.19/arch/arm/mach-omap2/ |
D | omap-secure.c | 54 outer_clean_range(__pa(param), __pa(param + 5)); in omap_secure_dispatcher() 55 ret = omap_smc2(idx, flag, __pa(param)); in omap_secure_dispatcher() 86 param[1] = __pa(addr); /* Physical address for saving */ in omap3_save_secure_ram() 91 ret = save_secure_ram_context(__pa(param)); in omap3_save_secure_ram() 129 outer_clean_range(__pa(param), __pa(param + 5)); in rx51_secure_dispatcher() 130 ret = omap_smc3(idx, process, flag, __pa(param)); in rx51_secure_dispatcher()
|
/Linux-v4.19/arch/x86/include/asm/ |
D | pgalloc.h | 79 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT); in pmd_populate_kernel() 80 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE)); in pmd_populate_kernel() 132 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); in pud_populate() 133 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd))); in pud_populate() 140 paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT); in p4d_populate() 141 set_p4d(p4d, __p4d(_PAGE_TABLE | __pa(pud))); in p4d_populate() 172 paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT); in pgd_populate() 173 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(p4d))); in pgd_populate()
|
D | page.h | 41 #ifndef __pa 42 #define __pa(x) __phys_addr((unsigned long)(x)) macro 63 #define __boot_pa(x) __pa(x) 69 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
|
/Linux-v4.19/arch/parisc/mm/ |
D | init.c | 95 search_addr = PAGE_ALIGN(__pa((unsigned long) &_end)); in get_memblock() 338 memblock_reserve(__pa(KERNEL_BINARY_TEXT_START), in setup_bootmem() 354 if (__pa(initrd_start) < mem_max) { in setup_bootmem() 357 if (__pa(initrd_end) > mem_max) { in setup_bootmem() 358 initrd_reserve = mem_max - __pa(initrd_start); in setup_bootmem() 363 …printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initr… in setup_bootmem() 365 memblock_reserve(__pa(initrd_start), initrd_reserve); in setup_bootmem() 418 ro_start = __pa((unsigned long)_text); in map_pages() 419 ro_end = __pa((unsigned long)&data_start); in map_pages() 420 kernel_end = __pa((unsigned long)&_end); in map_pages() [all …]
|
/Linux-v4.19/arch/arm/mm/ |
D | pmsa-v8.c | 254 subtract_range(mem, ARRAY_SIZE(mem), __pa(KERNEL_START), __pa(KERNEL_END)); in pmsav8_setup() 255 subtract_range(io, ARRAY_SIZE(io), __pa(KERNEL_START), __pa(KERNEL_END)); in pmsav8_setup() 259 subtract_range(mem, ARRAY_SIZE(mem), CONFIG_XIP_PHYS_ADDR, __pa(_exiprom)); in pmsav8_setup() 260 subtract_range(io, ARRAY_SIZE(io), CONFIG_XIP_PHYS_ADDR, __pa(_exiprom)); in pmsav8_setup() 276 err |= pmsav8_setup_fixed(PMSAv8_XIP_REGION, CONFIG_XIP_PHYS_ADDR, __pa(_exiprom)); in pmsav8_setup() 279 err |= pmsav8_setup_fixed(region++, __pa(KERNEL_START), __pa(KERNEL_END)); in pmsav8_setup()
|
/Linux-v4.19/arch/x86/xen/ |
D | p2m.c | 337 paravirt_alloc_pte(&init_mm, __pa(p2m_missing_pte) >> PAGE_SHIFT); in xen_rebuild_p2m_list() 339 paravirt_alloc_pte(&init_mm, __pa(p2m_identity_pte) >> PAGE_SHIFT); in xen_rebuild_p2m_list() 342 pfn_pte(PFN_DOWN(__pa(p2m_missing)), PAGE_KERNEL_RO)); in xen_rebuild_p2m_list() 344 pfn_pte(PFN_DOWN(__pa(p2m_identity)), PAGE_KERNEL_RO)); in xen_rebuild_p2m_list() 381 pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL)); in xen_rebuild_p2m_list() 391 pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL_RO)); in xen_rebuild_p2m_list() 401 set_pmd(pmdp, __pmd(__pa(ptep) | _KERNPG_TABLE)); in xen_rebuild_p2m_list() 450 if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_identity))) in get_phys_to_machine() 488 paravirt_alloc_pte(&init_mm, __pa(pte_newpg[i]) >> PAGE_SHIFT); in alloc_p2m_pmd() 500 __pmd(__pa(pte_newpg[i]) | _KERNPG_TABLE)); in alloc_p2m_pmd() [all …]
|
D | mmu_pv.c | 798 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd))); in __xen_pgd_pin() 803 PFN_DOWN(__pa(user_pgd))); in __xen_pgd_pin() 812 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd))); in __xen_pgd_pin() 917 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); in __xen_pgd_unpin() 925 PFN_DOWN(__pa(user_pgd))); in __xen_pgd_unpin() 993 if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd)) in drop_mm_ref_this_cpu() 1012 if (per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd)) in xen_drop_mm_ref() 1028 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd)) in xen_drop_mm_ref() 1120 unsigned long pa = __pa(pgtbl) & PHYSICAL_PAGE_MASK; in xen_cleanmfnmap_free_pgtbl() 1241 memblock_free(__pa(addr), size); in xen_pagetable_p2m_free() [all …]
|
/Linux-v4.19/arch/um/kernel/ |
D | physmem.c | 83 unsigned long pfn = PFN_UP(__pa(reserve_end)); in setup_physmem() 112 os_seek_file(physmem_fd, __pa(__syscall_stub_start)); in setup_physmem() 117 free_bootmem(__pa(reserve_end) + bootmap_size, in setup_physmem() 129 else if (phys < __pa(end_iomem)) { in phys_mapping() 142 else if (phys < __pa(end_iomem) + highmem) { in phys_mapping() 214 region->phys = __pa(region->virt); in setup_iomem()
|
D | mem.c | 48 map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0); in mem_init() 49 free_bootmem(__pa(brk_end), uml_reserved - brk_end); in mem_init() 69 (unsigned long) __pa(pte))); in one_page_table_init() 79 set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table))); in one_md_table_init() 129 p = __pa(v); in fixaddr_user_init()
|
/Linux-v4.19/arch/x86/mm/ |
D | kasan_init_64.c | 32 __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid); in early_alloc() 35 __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid); in early_alloc() 50 if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL)) in kasan_populate_pmd() 53 memblock_free(__pa(p), PMD_SIZE); in kasan_populate_pmd() 69 entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL); in kasan_populate_pmd() 87 if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL)) in kasan_populate_pud() 90 memblock_free(__pa(p), PUD_SIZE); in kasan_populate_pud() 322 __pgd(__pa(tmp_p4d_table) | _KERNPG_TABLE)); in kasan_init() 363 early_pfn_to_nid(__pa(_stext))); in kasan_init() 384 pte = __pte(__pa(kasan_zero_page) | pgprot_val(prot)); in kasan_init()
|
D | pageattr.c | 743 unsigned long pfn = PFN_DOWN(__pa(address)); in __split_large_page() 934 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE)); in alloc_pte_page() 944 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); in alloc_pmd_page() 1138 set_pgd(pgd_entry, __pgd(__pa(p4d) | _KERNPG_TABLE)); in populate_pgd() 1150 set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE)); in populate_pgd() 1202 cpa->pfn = __pa(vaddr) >> PAGE_SHIFT; in __cpa_process_fault() 1598 ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, in set_memory_uc() 1610 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); in set_memory_uc() 1624 ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE, in _set_memory_array() 1650 free_memtype(__pa(addr[j]), __pa(addr[j]) + PAGE_SIZE); in _set_memory_array() [all …]
|
/Linux-v4.19/arch/hexagon/include/asm/ |
D | page.h | 99 #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET) macro 106 #define virt_to_page(kaddr) pfn_to_page(PFN_DOWN(__pa(kaddr))) 113 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 143 #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
|
/Linux-v4.19/arch/ia64/hp/sim/ |
D | simscsi.c | 135 req.addr = __pa(sg_virt(sl)); in simscsi_sg_readwrite() 141 ia64_ssc(stat.fd, 1, __pa(&req), offset, mode); in simscsi_sg_readwrite() 142 ia64_ssc(__pa(&stat), 0, 0, 0, SSC_WAIT_COMPLETION); in simscsi_sg_readwrite() 182 req.addr = __pa(&buf); in simscsi_get_disk_size() 184 ia64_ssc(fd, 1, __pa(&req), ((sectors | bit) - 1)*512, SSC_READ); in simscsi_get_disk_size() 186 ia64_ssc(__pa(&stat), 0, 0, 0, SSC_WAIT_COMPLETION); in simscsi_get_disk_size() 230 desc[target_id] = ia64_ssc(__pa(fname), SSC_READ_ACCESS|SSC_WRITE_ACCESS, in simscsi_queuecommand_lck()
|
/Linux-v4.19/arch/ia64/mm/ |
D | init.c | 440 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT); in create_mem_map_page_table() 441 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT); in create_mem_map_page_table() 445 node = paddr_to_nid(__pa(start)); in create_mem_map_page_table() 462 set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT, in create_mem_map_page_table() 482 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT); in virtual_memmap_init() 483 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT); in virtual_memmap_init() 567 memblock_add_node(__pa(start), end - start, nid); in register_active_ranges() 576 pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT; in find_max_min_low_pfn() 577 pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT; in find_max_min_low_pfn() 579 pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT; in find_max_min_low_pfn() [all …]
|
/Linux-v4.19/arch/parisc/include/asm/ |
D | pgalloc.h | 38 + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT)); in pgd_alloc() 62 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT)); in pgd_populate() 113 + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT)); in pmd_populate_kernel() 117 + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT)); in pmd_populate_kernel()
|
/Linux-v4.19/drivers/i2c/busses/ |
D | i2c-opal.c | 102 req.buffer_ra = cpu_to_be64(__pa(msgs[0].buf)); in i2c_opal_master_xfer() 113 req.buffer_ra = cpu_to_be64(__pa(msgs[1].buf)); in i2c_opal_master_xfer() 138 req.buffer_ra = cpu_to_be64(__pa(&data->byte)); in i2c_opal_smbus_xfer() 146 req.buffer_ra = cpu_to_be64(__pa(&data->byte)); in i2c_opal_smbus_xfer() 158 req.buffer_ra = cpu_to_be64(__pa(local)); in i2c_opal_smbus_xfer() 166 req.buffer_ra = cpu_to_be64(__pa(&data->block[1])); in i2c_opal_smbus_xfer()
|
/Linux-v4.19/arch/s390/kernel/ |
D | suspend.c | 154 unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin)); in pfn_is_nosave() 155 unsigned long nosave_end_pfn = PFN_DOWN(__pa(&__nosave_end)); in pfn_is_nosave() 156 unsigned long end_rodata_pfn = PFN_DOWN(__pa(__end_rodata)) - 1; in pfn_is_nosave() 157 unsigned long stext_pfn = PFN_DOWN(__pa(_stext)); in pfn_is_nosave()
|
/Linux-v4.19/arch/sparc/include/asm/ |
D | page_64.h | 147 #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET) macro 152 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr)>>PAGE_SHIFT) 154 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 156 #define virt_to_phys __pa
|
/Linux-v4.19/arch/powerpc/include/asm/ |
D | page.h | 140 #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) 232 #define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET) macro 240 #define __pa(x) ((unsigned long)(x) & 0x0fffffffffffffffUL) macro 244 #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START) macro
|