Lines Matching refs:vcpu_e500
37 struct kvmppc_vcpu_e500 *vcpu_e500) in gtlb0_get_next_victim() argument
41 victim = vcpu_e500->gtlb_nv[0]++; in gtlb0_get_next_victim()
42 if (unlikely(vcpu_e500->gtlb_nv[0] >= vcpu_e500->gtlb_params[0].ways)) in gtlb0_get_next_victim()
43 vcpu_e500->gtlb_nv[0] = 0; in gtlb0_get_next_victim()
58 static int gtlb0_set_base(struct kvmppc_vcpu_e500 *vcpu_e500, gva_t addr) in gtlb0_set_base() argument
60 return tlb0_set_base(addr, vcpu_e500->gtlb_params[0].sets, in gtlb0_set_base()
61 vcpu_e500->gtlb_params[0].ways); in gtlb0_set_base()
66 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); in get_tlb_esel() local
70 esel &= vcpu_e500->gtlb_params[0].ways - 1; in get_tlb_esel()
71 esel += gtlb0_set_base(vcpu_e500, vcpu->arch.shared->mas2); in get_tlb_esel()
73 esel &= vcpu_e500->gtlb_params[tlbsel].entries - 1; in get_tlb_esel()
80 static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500, in kvmppc_e500_tlb_index() argument
83 int size = vcpu_e500->gtlb_params[tlbsel].entries; in kvmppc_e500_tlb_index()
88 set_base = gtlb0_set_base(vcpu_e500, eaddr); in kvmppc_e500_tlb_index()
89 size = vcpu_e500->gtlb_params[0].ways; in kvmppc_e500_tlb_index()
91 if (eaddr < vcpu_e500->tlb1_min_eaddr || in kvmppc_e500_tlb_index()
92 eaddr > vcpu_e500->tlb1_max_eaddr) in kvmppc_e500_tlb_index()
97 offset = vcpu_e500->gtlb_offset[tlbsel]; in kvmppc_e500_tlb_index()
101 &vcpu_e500->gtlb_arch[offset + set_base + i]; in kvmppc_e500_tlb_index()
129 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); in kvmppc_e500_deliver_tlb_miss() local
135 victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0; in kvmppc_e500_deliver_tlb_miss()
139 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); in kvmppc_e500_deliver_tlb_miss()
151 static void kvmppc_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500) in kvmppc_recalc_tlb1map_range() argument
153 int size = vcpu_e500->gtlb_params[1].entries; in kvmppc_recalc_tlb1map_range()
158 vcpu_e500->tlb1_min_eaddr = ~0UL; in kvmppc_recalc_tlb1map_range()
159 vcpu_e500->tlb1_max_eaddr = 0; in kvmppc_recalc_tlb1map_range()
160 offset = vcpu_e500->gtlb_offset[1]; in kvmppc_recalc_tlb1map_range()
164 &vcpu_e500->gtlb_arch[offset + i]; in kvmppc_recalc_tlb1map_range()
170 vcpu_e500->tlb1_min_eaddr = in kvmppc_recalc_tlb1map_range()
171 min(vcpu_e500->tlb1_min_eaddr, eaddr); in kvmppc_recalc_tlb1map_range()
174 vcpu_e500->tlb1_max_eaddr = in kvmppc_recalc_tlb1map_range()
175 max(vcpu_e500->tlb1_max_eaddr, eaddr); in kvmppc_recalc_tlb1map_range()
179 static int kvmppc_need_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500, in kvmppc_need_recalc_tlb1map_range() argument
188 return vcpu_e500->tlb1_min_eaddr == start || in kvmppc_need_recalc_tlb1map_range()
189 vcpu_e500->tlb1_max_eaddr == end; in kvmppc_need_recalc_tlb1map_range()
197 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); in kvmppc_set_tlb1map_range() local
206 vcpu_e500->tlb1_min_eaddr = min(vcpu_e500->tlb1_min_eaddr, start); in kvmppc_set_tlb1map_range()
207 vcpu_e500->tlb1_max_eaddr = max(vcpu_e500->tlb1_max_eaddr, end); in kvmppc_set_tlb1map_range()
211 struct kvmppc_vcpu_e500 *vcpu_e500, in kvmppc_e500_gtlbe_invalidate() argument
215 get_entry(vcpu_e500, tlbsel, esel); in kvmppc_e500_gtlbe_invalidate()
220 if (tlbsel == 1 && kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe)) in kvmppc_e500_gtlbe_invalidate()
221 kvmppc_recalc_tlb1map_range(vcpu_e500); in kvmppc_e500_gtlbe_invalidate()
228 int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value) in kvmppc_e500_emul_mt_mmucsr0() argument
233 for (esel = 0; esel < vcpu_e500->gtlb_params[0].entries; esel++) in kvmppc_e500_emul_mt_mmucsr0()
234 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel); in kvmppc_e500_emul_mt_mmucsr0()
236 for (esel = 0; esel < vcpu_e500->gtlb_params[1].entries; esel++) in kvmppc_e500_emul_mt_mmucsr0()
237 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel); in kvmppc_e500_emul_mt_mmucsr0()
240 kvmppc_core_flush_tlb(&vcpu_e500->vcpu); in kvmppc_e500_emul_mt_mmucsr0()
247 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); in kvmppc_e500_emul_tlbivax() local
258 for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; in kvmppc_e500_emul_tlbivax()
260 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); in kvmppc_e500_emul_tlbivax()
263 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, in kvmppc_e500_emul_tlbivax()
266 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); in kvmppc_e500_emul_tlbivax()
270 kvmppc_core_flush_tlb(&vcpu_e500->vcpu); in kvmppc_e500_emul_tlbivax()
275 static void tlbilx_all(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, in tlbilx_all() argument
282 for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; esel++) { in tlbilx_all()
283 tlbe = get_entry(vcpu_e500, tlbsel, esel); in tlbilx_all()
286 inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); in tlbilx_all()
287 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); in tlbilx_all()
292 static void tlbilx_one(struct kvmppc_vcpu_e500 *vcpu_e500, int pid, in tlbilx_one() argument
298 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, -1); in tlbilx_one()
300 inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); in tlbilx_one()
301 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); in tlbilx_one()
309 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); in kvmppc_e500_emul_tlbilx() local
313 tlbilx_all(vcpu_e500, 0, pid, type); in kvmppc_e500_emul_tlbilx()
314 tlbilx_all(vcpu_e500, 1, pid, type); in kvmppc_e500_emul_tlbilx()
316 tlbilx_one(vcpu_e500, pid, ea); in kvmppc_e500_emul_tlbilx()
324 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); in kvmppc_e500_emul_tlbre() local
331 gtlbe = get_entry(vcpu_e500, tlbsel, esel); in kvmppc_e500_emul_tlbre()
333 vcpu->arch.shared->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); in kvmppc_e500_emul_tlbre()
343 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); in kvmppc_e500_emul_tlbsx() local
350 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as); in kvmppc_e500_emul_tlbsx()
352 gtlbe = get_entry(vcpu_e500, tlbsel, esel); in kvmppc_e500_emul_tlbsx()
358 esel &= vcpu_e500->gtlb_params[tlbsel].ways - 1; in kvmppc_e500_emul_tlbsx()
361 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); in kvmppc_e500_emul_tlbsx()
370 victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0; in kvmppc_e500_emul_tlbsx()
374 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); in kvmppc_e500_emul_tlbsx()
392 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); in kvmppc_e500_emul_tlbwe() local
401 gtlbe = get_entry(vcpu_e500, tlbsel, esel); in kvmppc_e500_emul_tlbwe()
404 inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); in kvmppc_e500_emul_tlbwe()
406 kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe)) in kvmppc_e500_emul_tlbwe()
426 kvmppc_recalc_tlb1map_range(vcpu_e500); in kvmppc_e500_emul_tlbwe()
456 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); in kvmppc_e500_tlb_search() local
460 esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as); in kvmppc_e500_tlb_search()
526 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); in kvmppc_mmu_xlate() local
530 gtlbe = get_entry(vcpu_e500, tlbsel_of(index), esel_of(index)); in kvmppc_mmu_xlate()
538 static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500) in free_gtlb() argument
542 kvmppc_core_flush_tlb(&vcpu_e500->vcpu); in free_gtlb()
543 kfree(vcpu_e500->g2h_tlb1_map); in free_gtlb()
544 kfree(vcpu_e500->gtlb_priv[0]); in free_gtlb()
545 kfree(vcpu_e500->gtlb_priv[1]); in free_gtlb()
547 if (vcpu_e500->shared_tlb_pages) { in free_gtlb()
548 vfree((void *)(round_down((uintptr_t)vcpu_e500->gtlb_arch, in free_gtlb()
551 for (i = 0; i < vcpu_e500->num_shared_tlb_pages; i++) { in free_gtlb()
552 set_page_dirty_lock(vcpu_e500->shared_tlb_pages[i]); in free_gtlb()
553 put_page(vcpu_e500->shared_tlb_pages[i]); in free_gtlb()
556 vcpu_e500->num_shared_tlb_pages = 0; in free_gtlb()
558 kfree(vcpu_e500->shared_tlb_pages); in free_gtlb()
559 vcpu_e500->shared_tlb_pages = NULL; in free_gtlb()
561 kfree(vcpu_e500->gtlb_arch); in free_gtlb()
564 vcpu_e500->gtlb_arch = NULL; in free_gtlb()
734 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); in kvm_vcpu_ioctl_config_tlb() local
815 free_gtlb(vcpu_e500); in kvm_vcpu_ioctl_config_tlb()
817 vcpu_e500->gtlb_priv[0] = privs[0]; in kvm_vcpu_ioctl_config_tlb()
818 vcpu_e500->gtlb_priv[1] = privs[1]; in kvm_vcpu_ioctl_config_tlb()
819 vcpu_e500->g2h_tlb1_map = g2h_bitmap; in kvm_vcpu_ioctl_config_tlb()
821 vcpu_e500->gtlb_arch = (struct kvm_book3e_206_tlb_entry *) in kvm_vcpu_ioctl_config_tlb()
824 vcpu_e500->gtlb_params[0].entries = params.tlb_sizes[0]; in kvm_vcpu_ioctl_config_tlb()
825 vcpu_e500->gtlb_params[1].entries = params.tlb_sizes[1]; in kvm_vcpu_ioctl_config_tlb()
827 vcpu_e500->gtlb_offset[0] = 0; in kvm_vcpu_ioctl_config_tlb()
828 vcpu_e500->gtlb_offset[1] = params.tlb_sizes[0]; in kvm_vcpu_ioctl_config_tlb()
833 vcpu_e500->shared_tlb_pages = pages; in kvm_vcpu_ioctl_config_tlb()
834 vcpu_e500->num_shared_tlb_pages = num_pages; in kvm_vcpu_ioctl_config_tlb()
836 vcpu_e500->gtlb_params[0].ways = params.tlb_ways[0]; in kvm_vcpu_ioctl_config_tlb()
837 vcpu_e500->gtlb_params[0].sets = sets; in kvm_vcpu_ioctl_config_tlb()
839 vcpu_e500->gtlb_params[1].ways = params.tlb_sizes[1]; in kvm_vcpu_ioctl_config_tlb()
840 vcpu_e500->gtlb_params[1].sets = 1; in kvm_vcpu_ioctl_config_tlb()
842 kvmppc_recalc_tlb1map_range(vcpu_e500); in kvm_vcpu_ioctl_config_tlb()
859 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); in kvm_vcpu_ioctl_dirty_tlb() local
860 kvmppc_recalc_tlb1map_range(vcpu_e500); in kvm_vcpu_ioctl_dirty_tlb()
898 int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500) in kvmppc_e500_tlb_init() argument
900 struct kvm_vcpu *vcpu = &vcpu_e500->vcpu; in kvmppc_e500_tlb_init()
902 if (e500_mmu_host_init(vcpu_e500)) in kvmppc_e500_tlb_init()
905 vcpu_e500->gtlb_params[0].entries = KVM_E500_TLB0_SIZE; in kvmppc_e500_tlb_init()
906 vcpu_e500->gtlb_params[1].entries = KVM_E500_TLB1_SIZE; in kvmppc_e500_tlb_init()
908 vcpu_e500->gtlb_params[0].ways = KVM_E500_TLB0_WAY_NUM; in kvmppc_e500_tlb_init()
909 vcpu_e500->gtlb_params[0].sets = in kvmppc_e500_tlb_init()
912 vcpu_e500->gtlb_params[1].ways = KVM_E500_TLB1_SIZE; in kvmppc_e500_tlb_init()
913 vcpu_e500->gtlb_params[1].sets = 1; in kvmppc_e500_tlb_init()
915 vcpu_e500->gtlb_arch = kmalloc_array(KVM_E500_TLB0_SIZE + in kvmppc_e500_tlb_init()
917 sizeof(*vcpu_e500->gtlb_arch), in kvmppc_e500_tlb_init()
919 if (!vcpu_e500->gtlb_arch) in kvmppc_e500_tlb_init()
922 vcpu_e500->gtlb_offset[0] = 0; in kvmppc_e500_tlb_init()
923 vcpu_e500->gtlb_offset[1] = KVM_E500_TLB0_SIZE; in kvmppc_e500_tlb_init()
925 vcpu_e500->gtlb_priv[0] = kcalloc(vcpu_e500->gtlb_params[0].entries, in kvmppc_e500_tlb_init()
928 if (!vcpu_e500->gtlb_priv[0]) in kvmppc_e500_tlb_init()
931 vcpu_e500->gtlb_priv[1] = kcalloc(vcpu_e500->gtlb_params[1].entries, in kvmppc_e500_tlb_init()
934 if (!vcpu_e500->gtlb_priv[1]) in kvmppc_e500_tlb_init()
937 vcpu_e500->g2h_tlb1_map = kcalloc(vcpu_e500->gtlb_params[1].entries, in kvmppc_e500_tlb_init()
938 sizeof(*vcpu_e500->g2h_tlb1_map), in kvmppc_e500_tlb_init()
940 if (!vcpu_e500->g2h_tlb1_map) in kvmppc_e500_tlb_init()
943 vcpu_mmu_init(vcpu, vcpu_e500->gtlb_params); in kvmppc_e500_tlb_init()
945 kvmppc_recalc_tlb1map_range(vcpu_e500); in kvmppc_e500_tlb_init()
948 free_gtlb(vcpu_e500); in kvmppc_e500_tlb_init()
952 void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) in kvmppc_e500_tlb_uninit() argument
954 free_gtlb(vcpu_e500); in kvmppc_e500_tlb_uninit()
955 e500_mmu_host_uninit(vcpu_e500); in kvmppc_e500_tlb_uninit()