1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
4  */
5 
6 #include <linux/cpu.h>
7 #include <linux/kvm_host.h>
8 #include <linux/preempt.h>
9 #include <linux/export.h>
10 #include <linux/sched.h>
11 #include <linux/spinlock.h>
12 #include <linux/init.h>
13 #include <linux/memblock.h>
14 #include <linux/sizes.h>
15 #include <linux/cma.h>
16 #include <linux/bitops.h>
17 
18 #include <asm/cputable.h>
19 #include <asm/interrupt.h>
20 #include <asm/kvm_ppc.h>
21 #include <asm/kvm_book3s.h>
22 #include <asm/machdep.h>
23 #include <asm/xics.h>
24 #include <asm/xive.h>
25 #include <asm/dbell.h>
26 #include <asm/cputhreads.h>
27 #include <asm/io.h>
28 #include <asm/opal.h>
29 #include <asm/smp.h>
30 
31 #define KVM_CMA_CHUNK_ORDER	18
32 
33 #include "book3s_xics.h"
34 #include "book3s_xive.h"
35 
36 /*
37  * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
38  * should be power of 2.
39  */
40 #define HPT_ALIGN_PAGES		((1 << 18) >> PAGE_SHIFT) /* 256k */
41 /*
42  * By default we reserve 5% of memory for hash pagetable allocation.
43  */
44 static unsigned long kvm_cma_resv_ratio = 5;
45 
46 static struct cma *kvm_cma;
47 
early_parse_kvm_cma_resv(char * p)48 static int __init early_parse_kvm_cma_resv(char *p)
49 {
50 	pr_debug("%s(%s)\n", __func__, p);
51 	if (!p)
52 		return -EINVAL;
53 	return kstrtoul(p, 0, &kvm_cma_resv_ratio);
54 }
55 early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv);
56 
kvm_alloc_hpt_cma(unsigned long nr_pages)57 struct page *kvm_alloc_hpt_cma(unsigned long nr_pages)
58 {
59 	VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
60 
61 	return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES),
62 			 false);
63 }
64 EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma);
65 
kvm_free_hpt_cma(struct page * page,unsigned long nr_pages)66 void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages)
67 {
68 	cma_release(kvm_cma, page, nr_pages);
69 }
70 EXPORT_SYMBOL_GPL(kvm_free_hpt_cma);
71 
72 /**
73  * kvm_cma_reserve() - reserve area for kvm hash pagetable
74  *
75  * This function reserves memory from early allocator. It should be
76  * called by arch specific code once the memblock allocator
77  * has been activated and all other subsystems have already allocated/reserved
78  * memory.
79  */
kvm_cma_reserve(void)80 void __init kvm_cma_reserve(void)
81 {
82 	unsigned long align_size;
83 	phys_addr_t selected_size;
84 
85 	/*
86 	 * We need CMA reservation only when we are in HV mode
87 	 */
88 	if (!cpu_has_feature(CPU_FTR_HVMODE))
89 		return;
90 
91 	selected_size = PAGE_ALIGN(memblock_phys_mem_size() * kvm_cma_resv_ratio / 100);
92 	if (selected_size) {
93 		pr_info("%s: reserving %ld MiB for global area\n", __func__,
94 			 (unsigned long)selected_size / SZ_1M);
95 		align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
96 		cma_declare_contiguous(0, selected_size, 0, align_size,
97 			KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, "kvm_cma",
98 			&kvm_cma);
99 	}
100 }
101 
102 /*
103  * Real-mode H_CONFER implementation.
104  * We check if we are the only vcpu out of this virtual core
105  * still running in the guest and not ceded.  If so, we pop up
106  * to the virtual-mode implementation; if not, just return to
107  * the guest.
108  */
kvmppc_rm_h_confer(struct kvm_vcpu * vcpu,int target,unsigned int yield_count)109 long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
110 			    unsigned int yield_count)
111 {
112 	struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
113 	int ptid = local_paca->kvm_hstate.ptid;
114 	int threads_running;
115 	int threads_ceded;
116 	int threads_conferring;
117 	u64 stop = get_tb() + 10 * tb_ticks_per_usec;
118 	int rv = H_SUCCESS; /* => don't yield */
119 
120 	set_bit(ptid, &vc->conferring_threads);
121 	while ((get_tb() < stop) && !VCORE_IS_EXITING(vc)) {
122 		threads_running = VCORE_ENTRY_MAP(vc);
123 		threads_ceded = vc->napping_threads;
124 		threads_conferring = vc->conferring_threads;
125 		if ((threads_ceded | threads_conferring) == threads_running) {
126 			rv = H_TOO_HARD; /* => do yield */
127 			break;
128 		}
129 	}
130 	clear_bit(ptid, &vc->conferring_threads);
131 	return rv;
132 }
133 
134 /*
135  * When running HV mode KVM we need to block certain operations while KVM VMs
136  * exist in the system. We use a counter of VMs to track this.
137  *
138  * One of the operations we need to block is onlining of secondaries, so we
139  * protect hv_vm_count with cpus_read_lock/unlock().
140  */
141 static atomic_t hv_vm_count;
142 
kvm_hv_vm_activated(void)143 void kvm_hv_vm_activated(void)
144 {
145 	cpus_read_lock();
146 	atomic_inc(&hv_vm_count);
147 	cpus_read_unlock();
148 }
149 EXPORT_SYMBOL_GPL(kvm_hv_vm_activated);
150 
kvm_hv_vm_deactivated(void)151 void kvm_hv_vm_deactivated(void)
152 {
153 	cpus_read_lock();
154 	atomic_dec(&hv_vm_count);
155 	cpus_read_unlock();
156 }
157 EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated);
158 
kvm_hv_mode_active(void)159 bool kvm_hv_mode_active(void)
160 {
161 	return atomic_read(&hv_vm_count) != 0;
162 }
163 
164 extern int hcall_real_table[], hcall_real_table_end[];
165 
kvmppc_hcall_impl_hv_realmode(unsigned long cmd)166 int kvmppc_hcall_impl_hv_realmode(unsigned long cmd)
167 {
168 	cmd /= 4;
169 	if (cmd < hcall_real_table_end - hcall_real_table &&
170 	    hcall_real_table[cmd])
171 		return 1;
172 
173 	return 0;
174 }
175 EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode);
176 
kvmppc_hwrng_present(void)177 int kvmppc_hwrng_present(void)
178 {
179 	return ppc_md.get_random_seed != NULL;
180 }
181 EXPORT_SYMBOL_GPL(kvmppc_hwrng_present);
182 
kvmppc_rm_h_random(struct kvm_vcpu * vcpu)183 long kvmppc_rm_h_random(struct kvm_vcpu *vcpu)
184 {
185 	if (ppc_md.get_random_seed &&
186 	    ppc_md.get_random_seed(&vcpu->arch.regs.gpr[4]))
187 		return H_SUCCESS;
188 
189 	return H_HARDWARE;
190 }
191 
192 /*
193  * Send an interrupt or message to another CPU.
194  * The caller needs to include any barrier needed to order writes
195  * to memory vs. the IPI/message.
196  */
kvmhv_rm_send_ipi(int cpu)197 void kvmhv_rm_send_ipi(int cpu)
198 {
199 	void __iomem *xics_phys;
200 	unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
201 
202 	/* On POWER9 we can use msgsnd for any destination cpu. */
203 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
204 		msg |= get_hard_smp_processor_id(cpu);
205 		__asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
206 		return;
207 	}
208 
209 	/* On POWER8 for IPIs to threads in the same core, use msgsnd. */
210 	if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
211 	    cpu_first_thread_sibling(cpu) ==
212 	    cpu_first_thread_sibling(raw_smp_processor_id())) {
213 		msg |= cpu_thread_in_core(cpu);
214 		__asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
215 		return;
216 	}
217 
218 	/* We should never reach this */
219 	if (WARN_ON_ONCE(xics_on_xive()))
220 	    return;
221 
222 	/* Else poke the target with an IPI */
223 	xics_phys = paca_ptrs[cpu]->kvm_hstate.xics_phys;
224 	if (xics_phys)
225 		__raw_rm_writeb(IPI_PRIORITY, xics_phys + XICS_MFRR);
226 	else
227 		opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY);
228 }
229 
230 /*
231  * The following functions are called from the assembly code
232  * in book3s_hv_rmhandlers.S.
233  */
kvmhv_interrupt_vcore(struct kvmppc_vcore * vc,int active)234 static void kvmhv_interrupt_vcore(struct kvmppc_vcore *vc, int active)
235 {
236 	int cpu = vc->pcpu;
237 
238 	/* Order setting of exit map vs. msgsnd/IPI */
239 	smp_mb();
240 	for (; active; active >>= 1, ++cpu)
241 		if (active & 1)
242 			kvmhv_rm_send_ipi(cpu);
243 }
244 
kvmhv_commence_exit(int trap)245 void kvmhv_commence_exit(int trap)
246 {
247 	struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
248 	int ptid = local_paca->kvm_hstate.ptid;
249 	struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode;
250 	int me, ee, i;
251 
252 	/* Set our bit in the threads-exiting-guest map in the 0xff00
253 	   bits of vcore->entry_exit_map */
254 	me = 0x100 << ptid;
255 	do {
256 		ee = vc->entry_exit_map;
257 	} while (cmpxchg(&vc->entry_exit_map, ee, ee | me) != ee);
258 
259 	/* Are we the first here? */
260 	if ((ee >> 8) != 0)
261 		return;
262 
263 	/*
264 	 * Trigger the other threads in this vcore to exit the guest.
265 	 * If this is a hypervisor decrementer interrupt then they
266 	 * will be already on their way out of the guest.
267 	 */
268 	if (trap != BOOK3S_INTERRUPT_HV_DECREMENTER)
269 		kvmhv_interrupt_vcore(vc, ee & ~(1 << ptid));
270 
271 	/*
272 	 * If we are doing dynamic micro-threading, interrupt the other
273 	 * subcores to pull them out of their guests too.
274 	 */
275 	if (!sip)
276 		return;
277 
278 	for (i = 0; i < MAX_SUBCORES; ++i) {
279 		vc = sip->vc[i];
280 		if (!vc)
281 			break;
282 		do {
283 			ee = vc->entry_exit_map;
284 			/* Already asked to exit? */
285 			if ((ee >> 8) != 0)
286 				break;
287 		} while (cmpxchg(&vc->entry_exit_map, ee,
288 				 ee | VCORE_EXIT_REQ) != ee);
289 		if ((ee >> 8) == 0)
290 			kvmhv_interrupt_vcore(vc, ee);
291 	}
292 }
293 
294 struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
295 EXPORT_SYMBOL_GPL(kvmppc_host_rm_ops_hv);
296 
297 #ifdef CONFIG_KVM_XICS
get_irqmap(struct kvmppc_passthru_irqmap * pimap,u32 xisr)298 static struct kvmppc_irq_map *get_irqmap(struct kvmppc_passthru_irqmap *pimap,
299 					 u32 xisr)
300 {
301 	int i;
302 
303 	/*
304 	 * We access the mapped array here without a lock.  That
305 	 * is safe because we never reduce the number of entries
306 	 * in the array and we never change the v_hwirq field of
307 	 * an entry once it is set.
308 	 *
309 	 * We have also carefully ordered the stores in the writer
310 	 * and the loads here in the reader, so that if we find a matching
311 	 * hwirq here, the associated GSI and irq_desc fields are valid.
312 	 */
313 	for (i = 0; i < pimap->n_mapped; i++)  {
314 		if (xisr == pimap->mapped[i].r_hwirq) {
315 			/*
316 			 * Order subsequent reads in the caller to serialize
317 			 * with the writer.
318 			 */
319 			smp_rmb();
320 			return &pimap->mapped[i];
321 		}
322 	}
323 	return NULL;
324 }
325 
326 /*
327  * If we have an interrupt that's not an IPI, check if we have a
328  * passthrough adapter and if so, check if this external interrupt
329  * is for the adapter.
330  * We will attempt to deliver the IRQ directly to the target VCPU's
331  * ICP, the virtual ICP (based on affinity - the xive value in ICS).
332  *
333  * If the delivery fails or if this is not for a passthrough adapter,
334  * return to the host to handle this interrupt. We earlier
335  * saved a copy of the XIRR in the PACA, it will be picked up by
336  * the host ICP driver.
337  */
kvmppc_check_passthru(u32 xisr,__be32 xirr,bool * again)338 static int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again)
339 {
340 	struct kvmppc_passthru_irqmap *pimap;
341 	struct kvmppc_irq_map *irq_map;
342 	struct kvm_vcpu *vcpu;
343 
344 	vcpu = local_paca->kvm_hstate.kvm_vcpu;
345 	if (!vcpu)
346 		return 1;
347 	pimap = kvmppc_get_passthru_irqmap(vcpu->kvm);
348 	if (!pimap)
349 		return 1;
350 	irq_map = get_irqmap(pimap, xisr);
351 	if (!irq_map)
352 		return 1;
353 
354 	/* We're handling this interrupt, generic code doesn't need to */
355 	local_paca->kvm_hstate.saved_xirr = 0;
356 
357 	return kvmppc_deliver_irq_passthru(vcpu, xirr, irq_map, pimap, again);
358 }
359 
360 #else
kvmppc_check_passthru(u32 xisr,__be32 xirr,bool * again)361 static inline int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again)
362 {
363 	return 1;
364 }
365 #endif
366 
367 /*
368  * Determine what sort of external interrupt is pending (if any).
369  * Returns:
370  *	0 if no interrupt is pending
371  *	1 if an interrupt is pending that needs to be handled by the host
372  *	2 Passthrough that needs completion in the host
373  *	-1 if there was a guest wakeup IPI (which has now been cleared)
374  *	-2 if there is PCI passthrough external interrupt that was handled
375  */
376 static long kvmppc_read_one_intr(bool *again);
377 
kvmppc_read_intr(void)378 long kvmppc_read_intr(void)
379 {
380 	long ret = 0;
381 	long rc;
382 	bool again;
383 
384 	if (xive_enabled())
385 		return 1;
386 
387 	do {
388 		again = false;
389 		rc = kvmppc_read_one_intr(&again);
390 		if (rc && (ret == 0 || rc > ret))
391 			ret = rc;
392 	} while (again);
393 	return ret;
394 }
395 
kvmppc_read_one_intr(bool * again)396 static long kvmppc_read_one_intr(bool *again)
397 {
398 	void __iomem *xics_phys;
399 	u32 h_xirr;
400 	__be32 xirr;
401 	u32 xisr;
402 	u8 host_ipi;
403 	int64_t rc;
404 
405 	if (xive_enabled())
406 		return 1;
407 
408 	/* see if a host IPI is pending */
409 	host_ipi = READ_ONCE(local_paca->kvm_hstate.host_ipi);
410 	if (host_ipi)
411 		return 1;
412 
413 	/* Now read the interrupt from the ICP */
414 	xics_phys = local_paca->kvm_hstate.xics_phys;
415 	rc = 0;
416 	if (!xics_phys)
417 		rc = opal_int_get_xirr(&xirr, false);
418 	else
419 		xirr = __raw_rm_readl(xics_phys + XICS_XIRR);
420 	if (rc < 0)
421 		return 1;
422 
423 	/*
424 	 * Save XIRR for later. Since we get control in reverse endian
425 	 * on LE systems, save it byte reversed and fetch it back in
426 	 * host endian. Note that xirr is the value read from the
427 	 * XIRR register, while h_xirr is the host endian version.
428 	 */
429 	h_xirr = be32_to_cpu(xirr);
430 	local_paca->kvm_hstate.saved_xirr = h_xirr;
431 	xisr = h_xirr & 0xffffff;
432 	/*
433 	 * Ensure that the store/load complete to guarantee all side
434 	 * effects of loading from XIRR has completed
435 	 */
436 	smp_mb();
437 
438 	/* if nothing pending in the ICP */
439 	if (!xisr)
440 		return 0;
441 
442 	/* We found something in the ICP...
443 	 *
444 	 * If it is an IPI, clear the MFRR and EOI it.
445 	 */
446 	if (xisr == XICS_IPI) {
447 		rc = 0;
448 		if (xics_phys) {
449 			__raw_rm_writeb(0xff, xics_phys + XICS_MFRR);
450 			__raw_rm_writel(xirr, xics_phys + XICS_XIRR);
451 		} else {
452 			opal_int_set_mfrr(hard_smp_processor_id(), 0xff);
453 			rc = opal_int_eoi(h_xirr);
454 		}
455 		/* If rc > 0, there is another interrupt pending */
456 		*again = rc > 0;
457 
458 		/*
459 		 * Need to ensure side effects of above stores
460 		 * complete before proceeding.
461 		 */
462 		smp_mb();
463 
464 		/*
465 		 * We need to re-check host IPI now in case it got set in the
466 		 * meantime. If it's clear, we bounce the interrupt to the
467 		 * guest
468 		 */
469 		host_ipi = READ_ONCE(local_paca->kvm_hstate.host_ipi);
470 		if (unlikely(host_ipi != 0)) {
471 			/* We raced with the host,
472 			 * we need to resend that IPI, bummer
473 			 */
474 			if (xics_phys)
475 				__raw_rm_writeb(IPI_PRIORITY,
476 						xics_phys + XICS_MFRR);
477 			else
478 				opal_int_set_mfrr(hard_smp_processor_id(),
479 						  IPI_PRIORITY);
480 			/* Let side effects complete */
481 			smp_mb();
482 			return 1;
483 		}
484 
485 		/* OK, it's an IPI for us */
486 		local_paca->kvm_hstate.saved_xirr = 0;
487 		return -1;
488 	}
489 
490 	return kvmppc_check_passthru(xisr, xirr, again);
491 }
492 
kvmppc_end_cede(struct kvm_vcpu * vcpu)493 static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
494 {
495 	vcpu->arch.ceded = 0;
496 	if (vcpu->arch.timer_running) {
497 		hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
498 		vcpu->arch.timer_running = 0;
499 	}
500 }
501 
kvmppc_set_msr_hv(struct kvm_vcpu * vcpu,u64 msr)502 void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
503 {
504 	/* Guest must always run with ME enabled, HV disabled. */
505 	msr = (msr | MSR_ME) & ~MSR_HV;
506 
507 	/*
508 	 * Check for illegal transactional state bit combination
509 	 * and if we find it, force the TS field to a safe state.
510 	 */
511 	if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
512 		msr &= ~MSR_TS_MASK;
513 	vcpu->arch.shregs.msr = msr;
514 	kvmppc_end_cede(vcpu);
515 }
516 EXPORT_SYMBOL_GPL(kvmppc_set_msr_hv);
517 
inject_interrupt(struct kvm_vcpu * vcpu,int vec,u64 srr1_flags)518 static void inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
519 {
520 	unsigned long msr, pc, new_msr, new_pc;
521 
522 	msr = kvmppc_get_msr(vcpu);
523 	pc = kvmppc_get_pc(vcpu);
524 	new_msr = vcpu->arch.intr_msr;
525 	new_pc = vec;
526 
527 	/* If transactional, change to suspend mode on IRQ delivery */
528 	if (MSR_TM_TRANSACTIONAL(msr))
529 		new_msr |= MSR_TS_S;
530 	else
531 		new_msr |= msr & MSR_TS_MASK;
532 
533 	/*
534 	 * Perform MSR and PC adjustment for LPCR[AIL]=3 if it is set and
535 	 * applicable. AIL=2 is not supported.
536 	 *
537 	 * AIL does not apply to SRESET, MCE, or HMI (which is never
538 	 * delivered to the guest), and does not apply if IR=0 or DR=0.
539 	 */
540 	if (vec != BOOK3S_INTERRUPT_SYSTEM_RESET &&
541 	    vec != BOOK3S_INTERRUPT_MACHINE_CHECK &&
542 	    (vcpu->arch.vcore->lpcr & LPCR_AIL) == LPCR_AIL_3 &&
543 	    (msr & (MSR_IR|MSR_DR)) == (MSR_IR|MSR_DR) ) {
544 		new_msr |= MSR_IR | MSR_DR;
545 		new_pc += 0xC000000000004000ULL;
546 	}
547 
548 	kvmppc_set_srr0(vcpu, pc);
549 	kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags);
550 	kvmppc_set_pc(vcpu, new_pc);
551 	vcpu->arch.shregs.msr = new_msr;
552 }
553 
kvmppc_inject_interrupt_hv(struct kvm_vcpu * vcpu,int vec,u64 srr1_flags)554 void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
555 {
556 	inject_interrupt(vcpu, vec, srr1_flags);
557 	kvmppc_end_cede(vcpu);
558 }
559 EXPORT_SYMBOL_GPL(kvmppc_inject_interrupt_hv);
560 
561 /*
562  * Is there a PRIV_DOORBELL pending for the guest (on POWER9)?
563  * Can we inject a Decrementer or a External interrupt?
564  */
kvmppc_guest_entry_inject_int(struct kvm_vcpu * vcpu)565 void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu)
566 {
567 	int ext;
568 	unsigned long lpcr;
569 
570 	WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300));
571 
572 	/* Insert EXTERNAL bit into LPCR at the MER bit position */
573 	ext = (vcpu->arch.pending_exceptions >> BOOK3S_IRQPRIO_EXTERNAL) & 1;
574 	lpcr = mfspr(SPRN_LPCR);
575 	lpcr |= ext << LPCR_MER_SH;
576 	mtspr(SPRN_LPCR, lpcr);
577 	isync();
578 
579 	if (vcpu->arch.shregs.msr & MSR_EE) {
580 		if (ext) {
581 			inject_interrupt(vcpu, BOOK3S_INTERRUPT_EXTERNAL, 0);
582 		} else {
583 			long int dec = mfspr(SPRN_DEC);
584 			if (!(lpcr & LPCR_LD))
585 				dec = (int) dec;
586 			if (dec < 0)
587 				inject_interrupt(vcpu,
588 					BOOK3S_INTERRUPT_DECREMENTER, 0);
589 		}
590 	}
591 
592 	if (vcpu->arch.doorbell_request) {
593 		mtspr(SPRN_DPDES, 1);
594 		vcpu->arch.vcore->dpdes = 1;
595 		smp_wmb();
596 		vcpu->arch.doorbell_request = 0;
597 	}
598 }
599 
flush_guest_tlb(struct kvm * kvm)600 static void flush_guest_tlb(struct kvm *kvm)
601 {
602 	unsigned long rb, set;
603 
604 	rb = PPC_BIT(52);	/* IS = 2 */
605 	for (set = 0; set < kvm->arch.tlb_sets; ++set) {
606 		/* R=0 PRS=0 RIC=0 */
607 		asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
608 			     : : "r" (rb), "i" (0), "i" (0), "i" (0),
609 			       "r" (0) : "memory");
610 		rb += PPC_BIT(51);	/* increment set number */
611 	}
612 	asm volatile("ptesync": : :"memory");
613 }
614 
kvmppc_check_need_tlb_flush(struct kvm * kvm,int pcpu)615 void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu)
616 {
617 	if (cpumask_test_cpu(pcpu, &kvm->arch.need_tlb_flush)) {
618 		flush_guest_tlb(kvm);
619 
620 		/* Clear the bit after the TLB flush */
621 		cpumask_clear_cpu(pcpu, &kvm->arch.need_tlb_flush);
622 	}
623 }
624 EXPORT_SYMBOL_GPL(kvmppc_check_need_tlb_flush);
625