/Linux-v5.4/arch/powerpc/kvm/ |
D | book3s_32_mmu.c | 69 static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, 75 static u32 find_sr(struct kvm_vcpu *vcpu, gva_t eaddr) in find_sr() argument 77 return kvmppc_get_sr(vcpu, (eaddr >> 28) & 0xf); in find_sr() 80 static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, in kvmppc_mmu_book3s_32_ea_to_vp() argument 86 if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data, false)) in kvmppc_mmu_book3s_32_ea_to_vp() 89 kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_book3s_32_ea_to_vp() 90 return (((u64)eaddr >> 12) & 0xffff) | (vsid << 16); in kvmppc_mmu_book3s_32_ea_to_vp() 99 u32 sre, gva_t eaddr, in kvmppc_mmu_book3s_32_get_pteg() argument 106 page = (eaddr & 0x0FFFFFFF) >> 12; in kvmppc_mmu_book3s_32_get_pteg() 117 kvmppc_get_pc(vcpu), eaddr, vcpu_book3s->sdr1, pteg, in kvmppc_mmu_book3s_32_get_pteg() [all …]
|
D | book3s_64_mmu.c | 43 gva_t eaddr) in kvmppc_mmu_book3s_64_find_slbe() argument 46 u64 esid = GET_ESID(eaddr); in kvmppc_mmu_book3s_64_find_slbe() 47 u64 esid_1t = GET_ESID_1T(eaddr); in kvmppc_mmu_book3s_64_find_slbe() 63 eaddr, esid, esid_1t); in kvmppc_mmu_book3s_64_find_slbe() 87 static u64 kvmppc_slb_calc_vpn(struct kvmppc_slb *slb, gva_t eaddr) in kvmppc_slb_calc_vpn() argument 89 eaddr &= kvmppc_slb_offset_mask(slb); in kvmppc_slb_calc_vpn() 91 return (eaddr >> VPN_SHIFT) | in kvmppc_slb_calc_vpn() 95 static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, in kvmppc_mmu_book3s_64_ea_to_vp() argument 100 slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr); in kvmppc_mmu_book3s_64_ea_to_vp() 104 return kvmppc_slb_calc_vpn(slb, eaddr); in kvmppc_mmu_book3s_64_ea_to_vp() [all …]
|
D | trace_pr.h | 39 __field( unsigned long, eaddr ) 49 __entry->eaddr = orig_pte->eaddr; 57 __entry->flag_w, __entry->flag_x, __entry->eaddr, 70 __field( ulong, eaddr ) 79 __entry->eaddr = pte->pte.eaddr; 88 __entry->host_vpn, __entry->pfn, __entry->eaddr, 99 __field( ulong, eaddr ) 108 __entry->eaddr = pte->pte.eaddr; 117 __entry->host_vpn, __entry->pfn, __entry->eaddr,
|
D | book3s_32_mmu_host.c | 59 asm volatile ("tlbie %0" : : "r" (pte->pte.eaddr) : "memory"); in kvmppc_mmu_invalidate_pte() 106 static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr, in kvmppc_mmu_get_pteg() argument 112 page = (eaddr & ~ESID_MASK) >> 12; in kvmppc_mmu_get_pteg() 138 u32 eaddr = orig_pte->eaddr; in kvmppc_mmu_map_page() local 158 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_map_page() 161 kvmppc_mmu_map_segment(vcpu, eaddr); in kvmppc_mmu_map_page() 168 ((eaddr & ~ESID_MASK) >> VPN_SHIFT); in kvmppc_mmu_map_page() 176 pteg = kvmppc_mmu_get_pteg(vcpu, vsid, eaddr, primary); in kvmppc_mmu_map_page() 194 pteg0 = ((eaddr & 0x0fffffff) >> 22) | (vsid << 7) | PTE_V | in kvmppc_mmu_map_page() 243 orig_pte->eaddr, (ulong)pteg, vpn, in kvmppc_mmu_map_page() [all …]
|
D | e500_mmu.c | 81 gva_t eaddr, int tlbsel, unsigned int pid, int as) in kvmppc_e500_tlb_index() argument 88 set_base = gtlb0_set_base(vcpu_e500, eaddr); in kvmppc_e500_tlb_index() 91 if (eaddr < vcpu_e500->tlb1_min_eaddr || in kvmppc_e500_tlb_index() 92 eaddr > vcpu_e500->tlb1_max_eaddr) in kvmppc_e500_tlb_index() 104 if (eaddr < get_tlb_eaddr(tlbe)) in kvmppc_e500_tlb_index() 107 if (eaddr > get_tlb_end(tlbe)) in kvmppc_e500_tlb_index() 127 gva_t eaddr, int as) in kvmppc_e500_deliver_tlb_miss() argument 143 vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN) in kvmppc_e500_deliver_tlb_miss() 155 gva_t eaddr; in kvmppc_recalc_tlb1map_range() local 169 eaddr = get_tlb_eaddr(tlbe); in kvmppc_recalc_tlb1map_range() [all …]
|
D | book3s_mmu_hpte.c | 26 static inline u64 kvmppc_mmu_hash_pte(u64 eaddr) in kvmppc_mmu_hash_pte() argument 28 return hash_64(eaddr >> PTE_SIZE, HPTEG_HASH_BITS_PTE); in kvmppc_mmu_hash_pte() 31 static inline u64 kvmppc_mmu_hash_pte_long(u64 eaddr) in kvmppc_mmu_hash_pte_long() argument 33 return hash_64((eaddr & 0x0ffff000) >> PTE_SIZE, in kvmppc_mmu_hash_pte_long() 66 index = kvmppc_mmu_hash_pte(pte->pte.eaddr); in kvmppc_mmu_hpte_cache_map() 70 index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr); in kvmppc_mmu_hpte_cache_map() 163 if ((pte->pte.eaddr & ~0xfffUL) == guest_ea) in kvmppc_mmu_pte_flush_page() 183 if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea) in kvmppc_mmu_pte_flush_long()
|
D | book3s_64_mmu_host.c | 106 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_map_page() 109 ret = kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr); in kvmppc_mmu_map_page() 115 vsid, orig_pte->eaddr); in kvmppc_mmu_map_page() 121 vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M); in kvmppc_mmu_map_page() 217 vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_unmap_page() 310 int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) in kvmppc_mmu_map_segment() argument 313 u64 esid = eaddr >> SID_SHIFT; in kvmppc_mmu_map_segment() 314 u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V; in kvmppc_mmu_map_segment() 321 slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK); in kvmppc_mmu_map_segment()
|
D | book3s_64_mmu_radix.c | 31 gva_t eaddr, void *to, void *from, in __kvmhv_copy_tofrom_guest_radix() argument 40 return plpar_hcall_norets(H_COPY_TOFROM_GUEST, lpid, pid, eaddr, in __kvmhv_copy_tofrom_guest_radix() 47 from = (void *) (eaddr | (quadrant << 62)); in __kvmhv_copy_tofrom_guest_radix() 49 to = (void *) (eaddr | (quadrant << 62)); in __kvmhv_copy_tofrom_guest_radix() 84 static long kvmhv_copy_tofrom_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, in kvmhv_copy_tofrom_guest_radix() argument 91 if (eaddr & (0x3FFUL << 52)) in kvmhv_copy_tofrom_guest_radix() 99 if (((eaddr >> 62) & 0x3) == 0x3) in kvmhv_copy_tofrom_guest_radix() 102 eaddr &= ~(0xFFFUL << 52); in kvmhv_copy_tofrom_guest_radix() 104 return __kvmhv_copy_tofrom_guest_radix(lpid, pid, eaddr, to, from, n); in kvmhv_copy_tofrom_guest_radix() 107 long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *to, in kvmhv_copy_from_guest_radix() argument [all …]
|
D | e500_mmu_host.c | 105 static u32 get_host_mas0(unsigned long eaddr) in get_host_mas0() argument 115 asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET)); in get_host_mas0() 586 void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, in kvmppc_mmu_map() argument 606 &priv->ref, eaddr, &stlbe); in kvmppc_mmu_map() 613 kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe, &stlbe, in kvmppc_mmu_map() 631 hva_t eaddr; in kvmppc_load_last_inst() local 708 eaddr = (unsigned long)kmap_atomic(page); in kvmppc_load_last_inst() 709 *instr = *(u32 *)(eaddr | (unsigned long)(addr & ~PAGE_MASK)); in kvmppc_load_last_inst() 710 kunmap_atomic((u32 *)eaddr); in kvmppc_load_last_inst()
|
D | booke.c | 1236 unsigned long eaddr = vcpu->arch.fault_dear; in kvmppc_handle_exit() local 1243 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) { in kvmppc_handle_exit() 1253 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr); in kvmppc_handle_exit() 1267 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); in kvmppc_handle_exit() 1277 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); in kvmppc_handle_exit() 1284 vcpu->arch.vaddr_accessed = eaddr; in kvmppc_handle_exit() 1294 unsigned long eaddr = vcpu->arch.regs.nip; in kvmppc_handle_exit() local 1302 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr); in kvmppc_handle_exit() 1315 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); in kvmppc_handle_exit() 1325 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); in kvmppc_handle_exit() [all …]
|
D | e500mc.c | 59 gva_t eaddr; in kvmppc_e500_tlbil_one() local 68 eaddr = get_tlb_eaddr(gtlbe); in kvmppc_e500_tlbil_one() 75 asm volatile("tlbsx 0, %[eaddr]\n" : : [eaddr] "r" (eaddr)); in kvmppc_e500_tlbil_one()
|
D | e500.c | 238 u32 val, eaddr; in kvmppc_e500_tlbil_one() local 270 eaddr = get_tlb_eaddr(gtlbe); in kvmppc_e500_tlbil_one() 275 asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr)); in kvmppc_e500_tlbil_one()
|
D | book3s.c | 452 int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid, in kvmppc_xlate() argument 461 r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite); in kvmppc_xlate() 463 pte->eaddr = eaddr; in kvmppc_xlate() 464 pte->raddr = eaddr & KVM_PAM; in kvmppc_xlate() 465 pte->vpage = VSID_REAL | eaddr >> 12; in kvmppc_xlate() 474 ((eaddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)) in kvmppc_xlate()
|
/Linux-v5.4/arch/sh/mm/ |
D | cache-sh5.c | 34 sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid, in sh64_setup_dtlb_cache_slot() argument 38 sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr); in sh64_setup_dtlb_cache_slot() 87 static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long eaddr) in sh64_icache_inv_user_page() argument 95 addr = eaddr; in sh64_icache_inv_user_page() 159 unsigned long eaddr; in sh64_icache_inv_user_page_range() local 188 eaddr = aligned_start; in sh64_icache_inv_user_page_range() 189 while (eaddr < vma_end) { in sh64_icache_inv_user_page_range() 190 sh64_icache_inv_user_page(vma, eaddr); in sh64_icache_inv_user_page_range() 191 eaddr += PAGE_SIZE; in sh64_icache_inv_user_page_range() 244 unsigned long long eaddr, eaddr0, eaddr1; in sh64_dcache_purge_sets() local [all …]
|
D | tlb-sh5.c | 120 void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr, in sh64_setup_tlb_slot() argument 125 pteh = neff_sign_extend(eaddr); in sh64_setup_tlb_slot()
|
/Linux-v5.4/arch/unicore32/mm/ |
D | alignment.c | 290 unsigned long eaddr, newaddr; in do_alignment_ldmstm() local 301 newaddr = eaddr = regs->uregs[rn]; in do_alignment_ldmstm() 307 eaddr = newaddr; in do_alignment_ldmstm() 310 eaddr += 4; in do_alignment_ldmstm() 316 if (addr != eaddr) { in do_alignment_ldmstm() 319 instruction_pointer(regs), instr, addr, eaddr); in do_alignment_ldmstm() 333 uregs[rd + reg_correction], eaddr); in do_alignment_ldmstm() 336 uregs[rd + reg_correction], eaddr); in do_alignment_ldmstm() 337 eaddr += 4; in do_alignment_ldmstm()
|
/Linux-v5.4/arch/powerpc/include/asm/ |
D | kvm_book3s.h | 155 extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); 156 extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size); 161 extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, 181 gva_t eaddr, void *to, void *from, 183 extern long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, 185 extern long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, 187 extern int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr, 190 extern int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr, 193 extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, 228 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
|
D | kvm_host.h | 379 ulong eaddr; member 396 int (*slbfee)(struct kvm_vcpu *vcpu, gva_t eaddr, ulong *ret_slb); 402 int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr, 407 u64 (*ea_to_vp)(struct kvm_vcpu *vcpu, gva_t eaddr, bool data); 475 unsigned long eaddr; member
|
D | kvm_ppc.h | 89 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, 91 extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, 112 extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); 113 extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); 115 gva_t eaddr); 118 extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, 320 int (*load_from_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr, 322 int (*store_to_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
|
/Linux-v5.4/arch/arm/mm/ |
D | alignment.c | 503 unsigned long eaddr, newaddr; in do_alignment_ldmstm() local 517 newaddr = eaddr = regs->uregs[rn]; in do_alignment_ldmstm() 523 eaddr = newaddr; in do_alignment_ldmstm() 526 eaddr += 4; in do_alignment_ldmstm() 540 if (addr != eaddr) { in do_alignment_ldmstm() 543 instruction_pointer(regs), instr, addr, eaddr); in do_alignment_ldmstm() 555 get32t_unaligned_check(val, eaddr); in do_alignment_ldmstm() 558 put32t_unaligned_check(regs->uregs[rd], eaddr); in do_alignment_ldmstm() 559 eaddr += 4; in do_alignment_ldmstm() 568 get32_unaligned_check(val, eaddr); in do_alignment_ldmstm() [all …]
|
/Linux-v5.4/fs/freevxfs/ |
D | vxfs_olt.c | 82 char *oaddr, *eaddr; in vxfs_read_olt() local 105 eaddr = bp->b_data + (infp->vsi_oltsize * sbp->s_blocksize); in vxfs_read_olt() 107 while (oaddr < eaddr) { in vxfs_read_olt()
|
/Linux-v5.4/arch/powerpc/platforms/pseries/ |
D | ras.c | 501 unsigned long eaddr = 0, paddr = 0; in mce_handle_error() local 580 eaddr = be64_to_cpu(mce_log->effective_address); in mce_handle_error() 587 pfn = addr_to_pfn(regs, eaddr); in mce_handle_error() 608 eaddr = be64_to_cpu(mce_log->effective_address); in mce_handle_error() 625 eaddr = be64_to_cpu(mce_log->effective_address); in mce_handle_error() 642 eaddr = be64_to_cpu(mce_log->effective_address); in mce_handle_error() 687 &mce_err, regs->nip, eaddr, paddr); in mce_handle_error()
|
/Linux-v5.4/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_vm.c | 2117 uint64_t eaddr; in amdgpu_vm_bo_map() local 2125 eaddr = saddr + size - 1; in amdgpu_vm_bo_map() 2126 if (saddr >= eaddr || in amdgpu_vm_bo_map() 2131 eaddr /= AMDGPU_GPU_PAGE_SIZE; in amdgpu_vm_bo_map() 2133 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); in amdgpu_vm_bo_map() 2137 "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr, in amdgpu_vm_bo_map() 2147 mapping->last = eaddr; in amdgpu_vm_bo_map() 2181 uint64_t eaddr; in amdgpu_vm_bo_replace_map() local 2190 eaddr = saddr + size - 1; in amdgpu_vm_bo_replace_map() 2191 if (saddr >= eaddr || in amdgpu_vm_bo_replace_map() [all …]
|
/Linux-v5.4/drivers/slimbus/ |
D | core.c | 179 struct slim_eaddr *eaddr, in slim_alloc_device() argument 189 sbdev->e_addr = *eaddr; in slim_alloc_device() 352 struct slim_eaddr *eaddr) in find_slim_device() argument 357 dev = device_find_child(ctrl->dev, eaddr, slim_match_dev); in find_slim_device()
|
/Linux-v5.4/arch/sh/include/asm/ |
D | tlb_64.h | 56 void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr,
|