| /Linux-v5.4/arch/powerpc/kvm/ |
| D | book3s_64_mmu.c | 46 u64 esid = GET_ESID(eaddr); in kvmppc_mmu_book3s_64_find_slbe() local 50 u64 cmp_esid = esid; in kvmppc_mmu_book3s_64_find_slbe() 58 if (vcpu->arch.slb[i].esid == cmp_esid) in kvmppc_mmu_book3s_64_find_slbe() 63 eaddr, esid, esid_1t); in kvmppc_mmu_book3s_64_find_slbe() 70 vcpu->arch.slb[i].esid, in kvmppc_mmu_book3s_64_find_slbe() 380 u64 esid, esid_1t; in kvmppc_mmu_book3s_64_slbmte() local 386 esid = GET_ESID(rb); in kvmppc_mmu_book3s_64_slbmte() 397 slbe->esid = slbe->tb ? esid_1t : esid; in kvmppc_mmu_book3s_64_slbmte() 424 kvmppc_mmu_map_segment(vcpu, esid << SID_SHIFT); in kvmppc_mmu_book3s_64_slbmte() 476 dprintk("KVM MMU: slbie(0x%llx, 0x%llx)\n", ea, slbe->esid); in kvmppc_mmu_book3s_64_slbie() [all …]
|
| D | book3s_64_mmu_host.c | 269 static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid) in kvmppc_mmu_next_segment() argument 279 if (!(svcpu->slb[i].esid & SLB_ESID_V)) in kvmppc_mmu_next_segment() 281 else if ((svcpu->slb[i].esid & ESID_MASK) == esid) { in kvmppc_mmu_next_segment() 313 u64 esid = eaddr >> SID_SHIFT; in kvmppc_mmu_map_segment() local 323 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) { in kvmppc_mmu_map_segment() 325 svcpu->slb[slb_index].esid = 0; in kvmppc_mmu_map_segment() 334 map->guest_esid = esid; in kvmppc_mmu_map_segment() 346 svcpu->slb[slb_index].esid = slb_esid; in kvmppc_mmu_map_segment() 363 if ((svcpu->slb[i].esid & SLB_ESID_V) && in kvmppc_mmu_flush_segment() 364 (svcpu->slb[i].esid & seg_mask) == ea) { in kvmppc_mmu_flush_segment() [all …]
|
| D | book3s_32_mmu_host.c | 304 u32 esid = eaddr >> SID_SHIFT; in kvmppc_mmu_map_segment() local 311 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) { in kvmppc_mmu_map_segment() 313 svcpu->sr[esid] = SR_INVALID; in kvmppc_mmu_map_segment() 322 map->guest_esid = esid; in kvmppc_mmu_map_segment() 324 svcpu->sr[esid] = sr; in kvmppc_mmu_map_segment() 326 dprintk_sr("MMU: mtsr %d, 0x%x\n", esid, sr); in kvmppc_mmu_map_segment()
|
| D | book3s_32_mmu.c | 72 static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, 353 static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, in kvmppc_mmu_book3s_32_esid_to_vsid() argument 356 ulong ea = esid << SID_SHIFT; in kvmppc_mmu_book3s_32_esid_to_vsid() 358 u64 gvsid = esid; in kvmppc_mmu_book3s_32_esid_to_vsid() 372 *vsid = VSID_REAL | esid; in kvmppc_mmu_book3s_32_esid_to_vsid()
|
| D | book3s_hv_ras.c | 56 unsigned long rb = be64_to_cpu(slb->save_area[i].esid); in reload_slb()
|
| /Linux-v5.4/arch/powerpc/mm/book3s64/ |
| D | slb.c | 89 WRITE_ONCE(p->save_area[index].esid, 0); in slb_shadow_update() 91 WRITE_ONCE(p->save_area[index].esid, cpu_to_be64(mk_esid_data(ea, ssize, index))); in slb_shadow_update() 96 WRITE_ONCE(get_slb_shadow()->save_area[index].esid, cpu_to_be64(index)); in slb_shadow_clear() 130 "r" (be64_to_cpu(p->save_area[index].esid))); in __slb_restore_bolted_realmode() 179 "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].esid)) in slb_flush_and_restore_bolted() 203 slb_ptr->esid = e; in slb_save_contents() 222 e = slb_ptr->esid; in slb_dump_contents() 267 static bool preload_hit(struct thread_info *ti, unsigned long esid) in preload_hit() argument 275 if (esid == ti->slb_preload_esid[idx]) in preload_hit() 284 unsigned long esid; in preload_add() local [all …]
|
| /Linux-v5.4/drivers/misc/cxl/ |
| D | fault.c | 25 (sste->esid_data == cpu_to_be64(slb->esid))); in sste_matches() 41 hash = (slb->esid >> SID_SHIFT_1T) & mask; in find_free_sste() 43 hash = (slb->esid >> SID_SHIFT) & mask; in find_free_sste() 75 sste - ctx->sstp, slb->vsid, slb->esid); in cxl_load_segment() 76 trace_cxl_ste_write(ctx, sste - ctx->sstp, slb->esid, slb->vsid); in cxl_load_segment() 79 sste->esid_data = cpu_to_be64(slb->esid); in cxl_load_segment() 332 if (last_esid == slb.esid) in cxl_prefault_vma() 336 last_esid = slb.esid; in cxl_prefault_vma()
|
| /Linux-v5.4/arch/powerpc/include/asm/ |
| D | copro.h | 13 u64 esid, vsid; member
|
| D | lppaca.h | 144 __be64 esid; member
|
| D | kvm_book3s_asm.h | 160 u64 esid;
|
| D | kvm_host.h | 406 int (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid); 412 u64 esid; member
|
| D | kvm_book3s.h | 102 u64 esid; member
|
| /Linux-v5.4/drivers/s390/cio/ |
| D | device_id.c | 136 cdev->private->flags.esid = 0; in snsid_init() 161 cdev->private->flags.esid = 1; in snsid_check()
|
| D | io_sch.h | 157 unsigned int esid:1; /* Ext. SenseID supported by HW */ member
|
| D | device_ops.c | 440 if (cdev->private->flags.esid == 0) in ccw_device_get_ciw()
|
| /Linux-v5.4/arch/powerpc/platforms/cell/ |
| D | spu_base.c | 140 __func__, slbe, slb->vsid, slb->esid); in spu_load_slb() 148 out_be64(&priv2->slb_esid_RW, slb->esid); in spu_load_slb() 221 slb->esid = (ea & ESID_MASK) | SLB_ESID_V; in __spu_kernel_slb() 235 if (!((slbs[i].esid ^ ea) & ESID_MASK)) in __slb_present()
|
| /Linux-v5.4/tools/testing/selftests/powerpc/vphn/asm/ |
| D | lppaca.h | 144 __be64 esid; member
|
| /Linux-v5.4/arch/powerpc/xmon/ |
| D | xmon.c | 2409 u64 esid, vsid; in dump_one_paca() local 2414 esid = be64_to_cpu(p->slb_shadow_ptr->save_area[i].esid); in dump_one_paca() 2417 if (esid || vsid) { in dump_one_paca() 2419 22, "slb_shadow", i, esid, vsid); in dump_one_paca() 3515 unsigned long esid,vsid; in dump_segments() local 3521 asm volatile("slbmfee %0,%1" : "=r" (esid) : "r" (i)); in dump_segments() 3524 if (!esid && !vsid) in dump_segments() 3527 printf("%02d %016lx %016lx", i, esid, vsid); in dump_segments() 3529 if (!(esid & SLB_ESID_V)) { in dump_segments() 3537 GET_ESID_1T(esid), in dump_segments() [all …]
|
| /Linux-v5.4/arch/powerpc/mm/ |
| D | copro_fault.c | 139 slb->esid = (ea & (ssize == MMU_SEGSIZE_1T ? ESID_MASK_1T : ESID_MASK)) | SLB_ESID_V; in copro_calculate_slb()
|
| /Linux-v5.4/arch/powerpc/include/asm/book3s/64/ |
| D | mmu-hash.h | 509 u64 esid; member
|
| /Linux-v5.4/arch/powerpc/kernel/ |
| D | asm-offsets.c | 241 OFFSET(SLBSHADOW_STACKESID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid); in main()
|