1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
4  * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
5  *
6  * Authors:
7  *    Paul Mackerras <paulus@au1.ibm.com>
8  *    Alexander Graf <agraf@suse.de>
9  *    Kevin Wolf <mail@kevin-wolf.de>
10  *
11  * Description: KVM functions specific to running on Book 3S
12  * processors in hypervisor mode (specifically POWER7 and later).
13  *
14  * This file is derived from arch/powerpc/kvm/book3s.c,
15  * by Alexander Graf <agraf@suse.de>.
16  */
17 
18 #include <linux/kvm_host.h>
19 #include <linux/kernel.h>
20 #include <linux/err.h>
21 #include <linux/slab.h>
22 #include <linux/preempt.h>
23 #include <linux/sched/signal.h>
24 #include <linux/sched/stat.h>
25 #include <linux/delay.h>
26 #include <linux/export.h>
27 #include <linux/fs.h>
28 #include <linux/anon_inodes.h>
29 #include <linux/cpu.h>
30 #include <linux/cpumask.h>
31 #include <linux/spinlock.h>
32 #include <linux/page-flags.h>
33 #include <linux/srcu.h>
34 #include <linux/miscdevice.h>
35 #include <linux/debugfs.h>
36 #include <linux/gfp.h>
37 #include <linux/vmalloc.h>
38 #include <linux/highmem.h>
39 #include <linux/hugetlb.h>
40 #include <linux/kvm_irqfd.h>
41 #include <linux/irqbypass.h>
42 #include <linux/module.h>
43 #include <linux/compiler.h>
44 #include <linux/of.h>
45 
46 #include <asm/ftrace.h>
47 #include <asm/reg.h>
48 #include <asm/ppc-opcode.h>
49 #include <asm/asm-prototypes.h>
50 #include <asm/archrandom.h>
51 #include <asm/debug.h>
52 #include <asm/disassemble.h>
53 #include <asm/cputable.h>
54 #include <asm/cacheflush.h>
55 #include <linux/uaccess.h>
56 #include <asm/io.h>
57 #include <asm/kvm_ppc.h>
58 #include <asm/kvm_book3s.h>
59 #include <asm/mmu_context.h>
60 #include <asm/lppaca.h>
61 #include <asm/processor.h>
62 #include <asm/cputhreads.h>
63 #include <asm/page.h>
64 #include <asm/hvcall.h>
65 #include <asm/switch_to.h>
66 #include <asm/smp.h>
67 #include <asm/dbell.h>
68 #include <asm/hmi.h>
69 #include <asm/pnv-pci.h>
70 #include <asm/mmu.h>
71 #include <asm/opal.h>
72 #include <asm/xics.h>
73 #include <asm/xive.h>
74 #include <asm/hw_breakpoint.h>
75 #include <asm/kvm_book3s_uvmem.h>
76 #include <asm/ultravisor.h>
77 #include <asm/dtl.h>
78 
79 #include "book3s.h"
80 
81 #define CREATE_TRACE_POINTS
82 #include "trace_hv.h"
83 
84 /* #define EXIT_DEBUG */
85 /* #define EXIT_DEBUG_SIMPLE */
86 /* #define EXIT_DEBUG_INT */
87 
88 /* Used to indicate that a guest page fault needs to be handled */
89 #define RESUME_PAGE_FAULT	(RESUME_GUEST | RESUME_FLAG_ARCH1)
90 /* Used to indicate that a guest passthrough interrupt needs to be handled */
91 #define RESUME_PASSTHROUGH	(RESUME_GUEST | RESUME_FLAG_ARCH2)
92 
93 /* Used as a "null" value for timebase values */
94 #define TB_NIL	(~(u64)0)
95 
96 static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
97 
98 static int dynamic_mt_modes = 6;
99 module_param(dynamic_mt_modes, int, 0644);
100 MODULE_PARM_DESC(dynamic_mt_modes, "Set of allowed dynamic micro-threading modes: 0 (= none), 2, 4, or 6 (= 2 or 4)");
101 static int target_smt_mode;
102 module_param(target_smt_mode, int, 0644);
103 MODULE_PARM_DESC(target_smt_mode, "Target threads per core (0 = max)");
104 
105 static bool indep_threads_mode = true;
106 module_param(indep_threads_mode, bool, S_IRUGO | S_IWUSR);
107 MODULE_PARM_DESC(indep_threads_mode, "Independent-threads mode (only on POWER9)");
108 
109 static bool one_vm_per_core;
110 module_param(one_vm_per_core, bool, S_IRUGO | S_IWUSR);
111 MODULE_PARM_DESC(one_vm_per_core, "Only run vCPUs from the same VM on a core (requires indep_threads_mode=N)");
112 
113 #ifdef CONFIG_KVM_XICS
114 static const struct kernel_param_ops module_param_ops = {
115 	.set = param_set_int,
116 	.get = param_get_int,
117 };
118 
119 module_param_cb(kvm_irq_bypass, &module_param_ops, &kvm_irq_bypass, 0644);
120 MODULE_PARM_DESC(kvm_irq_bypass, "Bypass passthrough interrupt optimization");
121 
122 module_param_cb(h_ipi_redirect, &module_param_ops, &h_ipi_redirect, 0644);
123 MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core");
124 #endif
125 
126 /* If set, guests are allowed to create and control nested guests */
127 static bool nested = true;
128 module_param(nested, bool, S_IRUGO | S_IWUSR);
129 MODULE_PARM_DESC(nested, "Enable nested virtualization (only on POWER9)");
130 
nesting_enabled(struct kvm * kvm)131 static inline bool nesting_enabled(struct kvm *kvm)
132 {
133 	return kvm->arch.nested_enable && kvm_is_radix(kvm);
134 }
135 
136 /* If set, the threads on each CPU core have to be in the same MMU mode */
137 static bool no_mixing_hpt_and_radix;
138 
139 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
140 
141 /*
142  * RWMR values for POWER8.  These control the rate at which PURR
143  * and SPURR count and should be set according to the number of
144  * online threads in the vcore being run.
145  */
146 #define RWMR_RPA_P8_1THREAD	0x164520C62609AECAUL
147 #define RWMR_RPA_P8_2THREAD	0x7FFF2908450D8DA9UL
148 #define RWMR_RPA_P8_3THREAD	0x164520C62609AECAUL
149 #define RWMR_RPA_P8_4THREAD	0x199A421245058DA9UL
150 #define RWMR_RPA_P8_5THREAD	0x164520C62609AECAUL
151 #define RWMR_RPA_P8_6THREAD	0x164520C62609AECAUL
152 #define RWMR_RPA_P8_7THREAD	0x164520C62609AECAUL
153 #define RWMR_RPA_P8_8THREAD	0x164520C62609AECAUL
154 
155 static unsigned long p8_rwmr_values[MAX_SMT_THREADS + 1] = {
156 	RWMR_RPA_P8_1THREAD,
157 	RWMR_RPA_P8_1THREAD,
158 	RWMR_RPA_P8_2THREAD,
159 	RWMR_RPA_P8_3THREAD,
160 	RWMR_RPA_P8_4THREAD,
161 	RWMR_RPA_P8_5THREAD,
162 	RWMR_RPA_P8_6THREAD,
163 	RWMR_RPA_P8_7THREAD,
164 	RWMR_RPA_P8_8THREAD,
165 };
166 
next_runnable_thread(struct kvmppc_vcore * vc,int * ip)167 static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc,
168 		int *ip)
169 {
170 	int i = *ip;
171 	struct kvm_vcpu *vcpu;
172 
173 	while (++i < MAX_SMT_THREADS) {
174 		vcpu = READ_ONCE(vc->runnable_threads[i]);
175 		if (vcpu) {
176 			*ip = i;
177 			return vcpu;
178 		}
179 	}
180 	return NULL;
181 }
182 
183 /* Used to traverse the list of runnable threads for a given vcore */
184 #define for_each_runnable_thread(i, vcpu, vc) \
185 	for (i = -1; (vcpu = next_runnable_thread(vc, &i)); )
186 
kvmppc_ipi_thread(int cpu)187 static bool kvmppc_ipi_thread(int cpu)
188 {
189 	unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
190 
191 	/* If we're a nested hypervisor, fall back to ordinary IPIs for now */
192 	if (kvmhv_on_pseries())
193 		return false;
194 
195 	/* On POWER9 we can use msgsnd to IPI any cpu */
196 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
197 		msg |= get_hard_smp_processor_id(cpu);
198 		smp_mb();
199 		__asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
200 		return true;
201 	}
202 
203 	/* On POWER8 for IPIs to threads in the same core, use msgsnd */
204 	if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
205 		preempt_disable();
206 		if (cpu_first_thread_sibling(cpu) ==
207 		    cpu_first_thread_sibling(smp_processor_id())) {
208 			msg |= cpu_thread_in_core(cpu);
209 			smp_mb();
210 			__asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
211 			preempt_enable();
212 			return true;
213 		}
214 		preempt_enable();
215 	}
216 
217 #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
218 	if (cpu >= 0 && cpu < nr_cpu_ids) {
219 		if (paca_ptrs[cpu]->kvm_hstate.xics_phys) {
220 			xics_wake_cpu(cpu);
221 			return true;
222 		}
223 		opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY);
224 		return true;
225 	}
226 #endif
227 
228 	return false;
229 }
230 
kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu * vcpu)231 static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
232 {
233 	int cpu;
234 	struct rcuwait *waitp;
235 
236 	waitp = kvm_arch_vcpu_get_wait(vcpu);
237 	if (rcuwait_wake_up(waitp))
238 		++vcpu->stat.halt_wakeup;
239 
240 	cpu = READ_ONCE(vcpu->arch.thread_cpu);
241 	if (cpu >= 0 && kvmppc_ipi_thread(cpu))
242 		return;
243 
244 	/* CPU points to the first thread of the core */
245 	cpu = vcpu->cpu;
246 	if (cpu >= 0 && cpu < nr_cpu_ids && cpu_online(cpu))
247 		smp_send_reschedule(cpu);
248 }
249 
250 /*
251  * We use the vcpu_load/put functions to measure stolen time.
252  * Stolen time is counted as time when either the vcpu is able to
253  * run as part of a virtual core, but the task running the vcore
254  * is preempted or sleeping, or when the vcpu needs something done
255  * in the kernel by the task running the vcpu, but that task is
256  * preempted or sleeping.  Those two things have to be counted
257  * separately, since one of the vcpu tasks will take on the job
258  * of running the core, and the other vcpu tasks in the vcore will
259  * sleep waiting for it to do that, but that sleep shouldn't count
260  * as stolen time.
261  *
262  * Hence we accumulate stolen time when the vcpu can run as part of
263  * a vcore using vc->stolen_tb, and the stolen time when the vcpu
264  * needs its task to do other things in the kernel (for example,
265  * service a page fault) in busy_stolen.  We don't accumulate
266  * stolen time for a vcore when it is inactive, or for a vcpu
267  * when it is in state RUNNING or NOTREADY.  NOTREADY is a bit of
268  * a misnomer; it means that the vcpu task is not executing in
269  * the KVM_VCPU_RUN ioctl, i.e. it is in userspace or elsewhere in
270  * the kernel.  We don't have any way of dividing up that time
271  * between time that the vcpu is genuinely stopped, time that
272  * the task is actively working on behalf of the vcpu, and time
273  * that the task is preempted, so we don't count any of it as
274  * stolen.
275  *
276  * Updates to busy_stolen are protected by arch.tbacct_lock;
277  * updates to vc->stolen_tb are protected by the vcore->stoltb_lock
278  * lock.  The stolen times are measured in units of timebase ticks.
279  * (Note that the != TB_NIL checks below are purely defensive;
280  * they should never fail.)
281  */
282 
kvmppc_core_start_stolen(struct kvmppc_vcore * vc)283 static void kvmppc_core_start_stolen(struct kvmppc_vcore *vc)
284 {
285 	unsigned long flags;
286 
287 	spin_lock_irqsave(&vc->stoltb_lock, flags);
288 	vc->preempt_tb = mftb();
289 	spin_unlock_irqrestore(&vc->stoltb_lock, flags);
290 }
291 
kvmppc_core_end_stolen(struct kvmppc_vcore * vc)292 static void kvmppc_core_end_stolen(struct kvmppc_vcore *vc)
293 {
294 	unsigned long flags;
295 
296 	spin_lock_irqsave(&vc->stoltb_lock, flags);
297 	if (vc->preempt_tb != TB_NIL) {
298 		vc->stolen_tb += mftb() - vc->preempt_tb;
299 		vc->preempt_tb = TB_NIL;
300 	}
301 	spin_unlock_irqrestore(&vc->stoltb_lock, flags);
302 }
303 
kvmppc_core_vcpu_load_hv(struct kvm_vcpu * vcpu,int cpu)304 static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
305 {
306 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
307 	unsigned long flags;
308 
309 	/*
310 	 * We can test vc->runner without taking the vcore lock,
311 	 * because only this task ever sets vc->runner to this
312 	 * vcpu, and once it is set to this vcpu, only this task
313 	 * ever sets it to NULL.
314 	 */
315 	if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
316 		kvmppc_core_end_stolen(vc);
317 
318 	spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
319 	if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST &&
320 	    vcpu->arch.busy_preempt != TB_NIL) {
321 		vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt;
322 		vcpu->arch.busy_preempt = TB_NIL;
323 	}
324 	spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
325 }
326 
kvmppc_core_vcpu_put_hv(struct kvm_vcpu * vcpu)327 static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
328 {
329 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
330 	unsigned long flags;
331 
332 	if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
333 		kvmppc_core_start_stolen(vc);
334 
335 	spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
336 	if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
337 		vcpu->arch.busy_preempt = mftb();
338 	spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
339 }
340 
kvmppc_set_pvr_hv(struct kvm_vcpu * vcpu,u32 pvr)341 static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr)
342 {
343 	vcpu->arch.pvr = pvr;
344 }
345 
346 /* Dummy value used in computing PCR value below */
347 #define PCR_ARCH_31    (PCR_ARCH_300 << 1)
348 
kvmppc_set_arch_compat(struct kvm_vcpu * vcpu,u32 arch_compat)349 static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
350 {
351 	unsigned long host_pcr_bit = 0, guest_pcr_bit = 0;
352 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
353 
354 	/* We can (emulate) our own architecture version and anything older */
355 	if (cpu_has_feature(CPU_FTR_ARCH_31))
356 		host_pcr_bit = PCR_ARCH_31;
357 	else if (cpu_has_feature(CPU_FTR_ARCH_300))
358 		host_pcr_bit = PCR_ARCH_300;
359 	else if (cpu_has_feature(CPU_FTR_ARCH_207S))
360 		host_pcr_bit = PCR_ARCH_207;
361 	else if (cpu_has_feature(CPU_FTR_ARCH_206))
362 		host_pcr_bit = PCR_ARCH_206;
363 	else
364 		host_pcr_bit = PCR_ARCH_205;
365 
366 	/* Determine lowest PCR bit needed to run guest in given PVR level */
367 	guest_pcr_bit = host_pcr_bit;
368 	if (arch_compat) {
369 		switch (arch_compat) {
370 		case PVR_ARCH_205:
371 			guest_pcr_bit = PCR_ARCH_205;
372 			break;
373 		case PVR_ARCH_206:
374 		case PVR_ARCH_206p:
375 			guest_pcr_bit = PCR_ARCH_206;
376 			break;
377 		case PVR_ARCH_207:
378 			guest_pcr_bit = PCR_ARCH_207;
379 			break;
380 		case PVR_ARCH_300:
381 			guest_pcr_bit = PCR_ARCH_300;
382 			break;
383 		case PVR_ARCH_31:
384 			guest_pcr_bit = PCR_ARCH_31;
385 			break;
386 		default:
387 			return -EINVAL;
388 		}
389 	}
390 
391 	/* Check requested PCR bits don't exceed our capabilities */
392 	if (guest_pcr_bit > host_pcr_bit)
393 		return -EINVAL;
394 
395 	spin_lock(&vc->lock);
396 	vc->arch_compat = arch_compat;
397 	/*
398 	 * Set all PCR bits for which guest_pcr_bit <= bit < host_pcr_bit
399 	 * Also set all reserved PCR bits
400 	 */
401 	vc->pcr = (host_pcr_bit - guest_pcr_bit) | PCR_MASK;
402 	spin_unlock(&vc->lock);
403 
404 	return 0;
405 }
406 
kvmppc_dump_regs(struct kvm_vcpu * vcpu)407 static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
408 {
409 	int r;
410 
411 	pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id);
412 	pr_err("pc  = %.16lx  msr = %.16llx  trap = %x\n",
413 	       vcpu->arch.regs.nip, vcpu->arch.shregs.msr, vcpu->arch.trap);
414 	for (r = 0; r < 16; ++r)
415 		pr_err("r%2d = %.16lx  r%d = %.16lx\n",
416 		       r, kvmppc_get_gpr(vcpu, r),
417 		       r+16, kvmppc_get_gpr(vcpu, r+16));
418 	pr_err("ctr = %.16lx  lr  = %.16lx\n",
419 	       vcpu->arch.regs.ctr, vcpu->arch.regs.link);
420 	pr_err("srr0 = %.16llx srr1 = %.16llx\n",
421 	       vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1);
422 	pr_err("sprg0 = %.16llx sprg1 = %.16llx\n",
423 	       vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1);
424 	pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
425 	       vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
426 	pr_err("cr = %.8lx  xer = %.16lx  dsisr = %.8x\n",
427 	       vcpu->arch.regs.ccr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr);
428 	pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
429 	pr_err("fault dar = %.16lx dsisr = %.8x\n",
430 	       vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
431 	pr_err("SLB (%d entries):\n", vcpu->arch.slb_max);
432 	for (r = 0; r < vcpu->arch.slb_max; ++r)
433 		pr_err("  ESID = %.16llx VSID = %.16llx\n",
434 		       vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
435 	pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
436 	       vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1,
437 	       vcpu->arch.last_inst);
438 }
439 
kvmppc_find_vcpu(struct kvm * kvm,int id)440 static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
441 {
442 	return kvm_get_vcpu_by_id(kvm, id);
443 }
444 
init_vpa(struct kvm_vcpu * vcpu,struct lppaca * vpa)445 static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
446 {
447 	vpa->__old_status |= LPPACA_OLD_SHARED_PROC;
448 	vpa->yield_count = cpu_to_be32(1);
449 }
450 
set_vpa(struct kvm_vcpu * vcpu,struct kvmppc_vpa * v,unsigned long addr,unsigned long len)451 static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v,
452 		   unsigned long addr, unsigned long len)
453 {
454 	/* check address is cacheline aligned */
455 	if (addr & (L1_CACHE_BYTES - 1))
456 		return -EINVAL;
457 	spin_lock(&vcpu->arch.vpa_update_lock);
458 	if (v->next_gpa != addr || v->len != len) {
459 		v->next_gpa = addr;
460 		v->len = addr ? len : 0;
461 		v->update_pending = 1;
462 	}
463 	spin_unlock(&vcpu->arch.vpa_update_lock);
464 	return 0;
465 }
466 
467 /* Length for a per-processor buffer is passed in at offset 4 in the buffer */
468 struct reg_vpa {
469 	u32 dummy;
470 	union {
471 		__be16 hword;
472 		__be32 word;
473 	} length;
474 };
475 
vpa_is_registered(struct kvmppc_vpa * vpap)476 static int vpa_is_registered(struct kvmppc_vpa *vpap)
477 {
478 	if (vpap->update_pending)
479 		return vpap->next_gpa != 0;
480 	return vpap->pinned_addr != NULL;
481 }
482 
do_h_register_vpa(struct kvm_vcpu * vcpu,unsigned long flags,unsigned long vcpuid,unsigned long vpa)483 static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
484 				       unsigned long flags,
485 				       unsigned long vcpuid, unsigned long vpa)
486 {
487 	struct kvm *kvm = vcpu->kvm;
488 	unsigned long len, nb;
489 	void *va;
490 	struct kvm_vcpu *tvcpu;
491 	int err;
492 	int subfunc;
493 	struct kvmppc_vpa *vpap;
494 
495 	tvcpu = kvmppc_find_vcpu(kvm, vcpuid);
496 	if (!tvcpu)
497 		return H_PARAMETER;
498 
499 	subfunc = (flags >> H_VPA_FUNC_SHIFT) & H_VPA_FUNC_MASK;
500 	if (subfunc == H_VPA_REG_VPA || subfunc == H_VPA_REG_DTL ||
501 	    subfunc == H_VPA_REG_SLB) {
502 		/* Registering new area - address must be cache-line aligned */
503 		if ((vpa & (L1_CACHE_BYTES - 1)) || !vpa)
504 			return H_PARAMETER;
505 
506 		/* convert logical addr to kernel addr and read length */
507 		va = kvmppc_pin_guest_page(kvm, vpa, &nb);
508 		if (va == NULL)
509 			return H_PARAMETER;
510 		if (subfunc == H_VPA_REG_VPA)
511 			len = be16_to_cpu(((struct reg_vpa *)va)->length.hword);
512 		else
513 			len = be32_to_cpu(((struct reg_vpa *)va)->length.word);
514 		kvmppc_unpin_guest_page(kvm, va, vpa, false);
515 
516 		/* Check length */
517 		if (len > nb || len < sizeof(struct reg_vpa))
518 			return H_PARAMETER;
519 	} else {
520 		vpa = 0;
521 		len = 0;
522 	}
523 
524 	err = H_PARAMETER;
525 	vpap = NULL;
526 	spin_lock(&tvcpu->arch.vpa_update_lock);
527 
528 	switch (subfunc) {
529 	case H_VPA_REG_VPA:		/* register VPA */
530 		/*
531 		 * The size of our lppaca is 1kB because of the way we align
532 		 * it for the guest to avoid crossing a 4kB boundary. We only
533 		 * use 640 bytes of the structure though, so we should accept
534 		 * clients that set a size of 640.
535 		 */
536 		BUILD_BUG_ON(sizeof(struct lppaca) != 640);
537 		if (len < sizeof(struct lppaca))
538 			break;
539 		vpap = &tvcpu->arch.vpa;
540 		err = 0;
541 		break;
542 
543 	case H_VPA_REG_DTL:		/* register DTL */
544 		if (len < sizeof(struct dtl_entry))
545 			break;
546 		len -= len % sizeof(struct dtl_entry);
547 
548 		/* Check that they have previously registered a VPA */
549 		err = H_RESOURCE;
550 		if (!vpa_is_registered(&tvcpu->arch.vpa))
551 			break;
552 
553 		vpap = &tvcpu->arch.dtl;
554 		err = 0;
555 		break;
556 
557 	case H_VPA_REG_SLB:		/* register SLB shadow buffer */
558 		/* Check that they have previously registered a VPA */
559 		err = H_RESOURCE;
560 		if (!vpa_is_registered(&tvcpu->arch.vpa))
561 			break;
562 
563 		vpap = &tvcpu->arch.slb_shadow;
564 		err = 0;
565 		break;
566 
567 	case H_VPA_DEREG_VPA:		/* deregister VPA */
568 		/* Check they don't still have a DTL or SLB buf registered */
569 		err = H_RESOURCE;
570 		if (vpa_is_registered(&tvcpu->arch.dtl) ||
571 		    vpa_is_registered(&tvcpu->arch.slb_shadow))
572 			break;
573 
574 		vpap = &tvcpu->arch.vpa;
575 		err = 0;
576 		break;
577 
578 	case H_VPA_DEREG_DTL:		/* deregister DTL */
579 		vpap = &tvcpu->arch.dtl;
580 		err = 0;
581 		break;
582 
583 	case H_VPA_DEREG_SLB:		/* deregister SLB shadow buffer */
584 		vpap = &tvcpu->arch.slb_shadow;
585 		err = 0;
586 		break;
587 	}
588 
589 	if (vpap) {
590 		vpap->next_gpa = vpa;
591 		vpap->len = len;
592 		vpap->update_pending = 1;
593 	}
594 
595 	spin_unlock(&tvcpu->arch.vpa_update_lock);
596 
597 	return err;
598 }
599 
kvmppc_update_vpa(struct kvm_vcpu * vcpu,struct kvmppc_vpa * vpap)600 static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
601 {
602 	struct kvm *kvm = vcpu->kvm;
603 	void *va;
604 	unsigned long nb;
605 	unsigned long gpa;
606 
607 	/*
608 	 * We need to pin the page pointed to by vpap->next_gpa,
609 	 * but we can't call kvmppc_pin_guest_page under the lock
610 	 * as it does get_user_pages() and down_read().  So we
611 	 * have to drop the lock, pin the page, then get the lock
612 	 * again and check that a new area didn't get registered
613 	 * in the meantime.
614 	 */
615 	for (;;) {
616 		gpa = vpap->next_gpa;
617 		spin_unlock(&vcpu->arch.vpa_update_lock);
618 		va = NULL;
619 		nb = 0;
620 		if (gpa)
621 			va = kvmppc_pin_guest_page(kvm, gpa, &nb);
622 		spin_lock(&vcpu->arch.vpa_update_lock);
623 		if (gpa == vpap->next_gpa)
624 			break;
625 		/* sigh... unpin that one and try again */
626 		if (va)
627 			kvmppc_unpin_guest_page(kvm, va, gpa, false);
628 	}
629 
630 	vpap->update_pending = 0;
631 	if (va && nb < vpap->len) {
632 		/*
633 		 * If it's now too short, it must be that userspace
634 		 * has changed the mappings underlying guest memory,
635 		 * so unregister the region.
636 		 */
637 		kvmppc_unpin_guest_page(kvm, va, gpa, false);
638 		va = NULL;
639 	}
640 	if (vpap->pinned_addr)
641 		kvmppc_unpin_guest_page(kvm, vpap->pinned_addr, vpap->gpa,
642 					vpap->dirty);
643 	vpap->gpa = gpa;
644 	vpap->pinned_addr = va;
645 	vpap->dirty = false;
646 	if (va)
647 		vpap->pinned_end = va + vpap->len;
648 }
649 
kvmppc_update_vpas(struct kvm_vcpu * vcpu)650 static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
651 {
652 	if (!(vcpu->arch.vpa.update_pending ||
653 	      vcpu->arch.slb_shadow.update_pending ||
654 	      vcpu->arch.dtl.update_pending))
655 		return;
656 
657 	spin_lock(&vcpu->arch.vpa_update_lock);
658 	if (vcpu->arch.vpa.update_pending) {
659 		kvmppc_update_vpa(vcpu, &vcpu->arch.vpa);
660 		if (vcpu->arch.vpa.pinned_addr)
661 			init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
662 	}
663 	if (vcpu->arch.dtl.update_pending) {
664 		kvmppc_update_vpa(vcpu, &vcpu->arch.dtl);
665 		vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr;
666 		vcpu->arch.dtl_index = 0;
667 	}
668 	if (vcpu->arch.slb_shadow.update_pending)
669 		kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow);
670 	spin_unlock(&vcpu->arch.vpa_update_lock);
671 }
672 
673 /*
674  * Return the accumulated stolen time for the vcore up until `now'.
675  * The caller should hold the vcore lock.
676  */
vcore_stolen_time(struct kvmppc_vcore * vc,u64 now)677 static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
678 {
679 	u64 p;
680 	unsigned long flags;
681 
682 	spin_lock_irqsave(&vc->stoltb_lock, flags);
683 	p = vc->stolen_tb;
684 	if (vc->vcore_state != VCORE_INACTIVE &&
685 	    vc->preempt_tb != TB_NIL)
686 		p += now - vc->preempt_tb;
687 	spin_unlock_irqrestore(&vc->stoltb_lock, flags);
688 	return p;
689 }
690 
kvmppc_create_dtl_entry(struct kvm_vcpu * vcpu,struct kvmppc_vcore * vc)691 static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
692 				    struct kvmppc_vcore *vc)
693 {
694 	struct dtl_entry *dt;
695 	struct lppaca *vpa;
696 	unsigned long stolen;
697 	unsigned long core_stolen;
698 	u64 now;
699 	unsigned long flags;
700 
701 	dt = vcpu->arch.dtl_ptr;
702 	vpa = vcpu->arch.vpa.pinned_addr;
703 	now = mftb();
704 	core_stolen = vcore_stolen_time(vc, now);
705 	stolen = core_stolen - vcpu->arch.stolen_logged;
706 	vcpu->arch.stolen_logged = core_stolen;
707 	spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
708 	stolen += vcpu->arch.busy_stolen;
709 	vcpu->arch.busy_stolen = 0;
710 	spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
711 	if (!dt || !vpa)
712 		return;
713 	memset(dt, 0, sizeof(struct dtl_entry));
714 	dt->dispatch_reason = 7;
715 	dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid);
716 	dt->timebase = cpu_to_be64(now + vc->tb_offset);
717 	dt->enqueue_to_dispatch_time = cpu_to_be32(stolen);
718 	dt->srr0 = cpu_to_be64(kvmppc_get_pc(vcpu));
719 	dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr);
720 	++dt;
721 	if (dt == vcpu->arch.dtl.pinned_end)
722 		dt = vcpu->arch.dtl.pinned_addr;
723 	vcpu->arch.dtl_ptr = dt;
724 	/* order writing *dt vs. writing vpa->dtl_idx */
725 	smp_wmb();
726 	vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index);
727 	vcpu->arch.dtl.dirty = true;
728 }
729 
730 /* See if there is a doorbell interrupt pending for a vcpu */
kvmppc_doorbell_pending(struct kvm_vcpu * vcpu)731 static bool kvmppc_doorbell_pending(struct kvm_vcpu *vcpu)
732 {
733 	int thr;
734 	struct kvmppc_vcore *vc;
735 
736 	if (vcpu->arch.doorbell_request)
737 		return true;
738 	/*
739 	 * Ensure that the read of vcore->dpdes comes after the read
740 	 * of vcpu->doorbell_request.  This barrier matches the
741 	 * smp_wmb() in kvmppc_guest_entry_inject().
742 	 */
743 	smp_rmb();
744 	vc = vcpu->arch.vcore;
745 	thr = vcpu->vcpu_id - vc->first_vcpuid;
746 	return !!(vc->dpdes & (1 << thr));
747 }
748 
kvmppc_power8_compatible(struct kvm_vcpu * vcpu)749 static bool kvmppc_power8_compatible(struct kvm_vcpu *vcpu)
750 {
751 	if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207)
752 		return true;
753 	if ((!vcpu->arch.vcore->arch_compat) &&
754 	    cpu_has_feature(CPU_FTR_ARCH_207S))
755 		return true;
756 	return false;
757 }
758 
kvmppc_h_set_mode(struct kvm_vcpu * vcpu,unsigned long mflags,unsigned long resource,unsigned long value1,unsigned long value2)759 static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
760 			     unsigned long resource, unsigned long value1,
761 			     unsigned long value2)
762 {
763 	switch (resource) {
764 	case H_SET_MODE_RESOURCE_SET_CIABR:
765 		if (!kvmppc_power8_compatible(vcpu))
766 			return H_P2;
767 		if (value2)
768 			return H_P4;
769 		if (mflags)
770 			return H_UNSUPPORTED_FLAG_START;
771 		/* Guests can't breakpoint the hypervisor */
772 		if ((value1 & CIABR_PRIV) == CIABR_PRIV_HYPER)
773 			return H_P3;
774 		vcpu->arch.ciabr  = value1;
775 		return H_SUCCESS;
776 	case H_SET_MODE_RESOURCE_SET_DAWR0:
777 		if (!kvmppc_power8_compatible(vcpu))
778 			return H_P2;
779 		if (!ppc_breakpoint_available())
780 			return H_P2;
781 		if (mflags)
782 			return H_UNSUPPORTED_FLAG_START;
783 		if (value2 & DABRX_HYP)
784 			return H_P4;
785 		vcpu->arch.dawr  = value1;
786 		vcpu->arch.dawrx = value2;
787 		return H_SUCCESS;
788 	case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE:
789 		/* KVM does not support mflags=2 (AIL=2) */
790 		if (mflags != 0 && mflags != 3)
791 			return H_UNSUPPORTED_FLAG_START;
792 		return H_TOO_HARD;
793 	default:
794 		return H_TOO_HARD;
795 	}
796 }
797 
798 /* Copy guest memory in place - must reside within a single memslot */
kvmppc_copy_guest(struct kvm * kvm,gpa_t to,gpa_t from,unsigned long len)799 static int kvmppc_copy_guest(struct kvm *kvm, gpa_t to, gpa_t from,
800 				  unsigned long len)
801 {
802 	struct kvm_memory_slot *to_memslot = NULL;
803 	struct kvm_memory_slot *from_memslot = NULL;
804 	unsigned long to_addr, from_addr;
805 	int r;
806 
807 	/* Get HPA for from address */
808 	from_memslot = gfn_to_memslot(kvm, from >> PAGE_SHIFT);
809 	if (!from_memslot)
810 		return -EFAULT;
811 	if ((from + len) >= ((from_memslot->base_gfn + from_memslot->npages)
812 			     << PAGE_SHIFT))
813 		return -EINVAL;
814 	from_addr = gfn_to_hva_memslot(from_memslot, from >> PAGE_SHIFT);
815 	if (kvm_is_error_hva(from_addr))
816 		return -EFAULT;
817 	from_addr |= (from & (PAGE_SIZE - 1));
818 
819 	/* Get HPA for to address */
820 	to_memslot = gfn_to_memslot(kvm, to >> PAGE_SHIFT);
821 	if (!to_memslot)
822 		return -EFAULT;
823 	if ((to + len) >= ((to_memslot->base_gfn + to_memslot->npages)
824 			   << PAGE_SHIFT))
825 		return -EINVAL;
826 	to_addr = gfn_to_hva_memslot(to_memslot, to >> PAGE_SHIFT);
827 	if (kvm_is_error_hva(to_addr))
828 		return -EFAULT;
829 	to_addr |= (to & (PAGE_SIZE - 1));
830 
831 	/* Perform copy */
832 	r = raw_copy_in_user((void __user *)to_addr, (void __user *)from_addr,
833 			     len);
834 	if (r)
835 		return -EFAULT;
836 	mark_page_dirty(kvm, to >> PAGE_SHIFT);
837 	return 0;
838 }
839 
kvmppc_h_page_init(struct kvm_vcpu * vcpu,unsigned long flags,unsigned long dest,unsigned long src)840 static long kvmppc_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
841 			       unsigned long dest, unsigned long src)
842 {
843 	u64 pg_sz = SZ_4K;		/* 4K page size */
844 	u64 pg_mask = SZ_4K - 1;
845 	int ret;
846 
847 	/* Check for invalid flags (H_PAGE_SET_LOANED covers all CMO flags) */
848 	if (flags & ~(H_ICACHE_INVALIDATE | H_ICACHE_SYNCHRONIZE |
849 		      H_ZERO_PAGE | H_COPY_PAGE | H_PAGE_SET_LOANED))
850 		return H_PARAMETER;
851 
852 	/* dest (and src if copy_page flag set) must be page aligned */
853 	if ((dest & pg_mask) || ((flags & H_COPY_PAGE) && (src & pg_mask)))
854 		return H_PARAMETER;
855 
856 	/* zero and/or copy the page as determined by the flags */
857 	if (flags & H_COPY_PAGE) {
858 		ret = kvmppc_copy_guest(vcpu->kvm, dest, src, pg_sz);
859 		if (ret < 0)
860 			return H_PARAMETER;
861 	} else if (flags & H_ZERO_PAGE) {
862 		ret = kvm_clear_guest(vcpu->kvm, dest, pg_sz);
863 		if (ret < 0)
864 			return H_PARAMETER;
865 	}
866 
867 	/* We can ignore the remaining flags */
868 
869 	return H_SUCCESS;
870 }
871 
kvm_arch_vcpu_yield_to(struct kvm_vcpu * target)872 static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target)
873 {
874 	struct kvmppc_vcore *vcore = target->arch.vcore;
875 
876 	/*
877 	 * We expect to have been called by the real mode handler
878 	 * (kvmppc_rm_h_confer()) which would have directly returned
879 	 * H_SUCCESS if the source vcore wasn't idle (e.g. if it may
880 	 * have useful work to do and should not confer) so we don't
881 	 * recheck that here.
882 	 */
883 
884 	spin_lock(&vcore->lock);
885 	if (target->arch.state == KVMPPC_VCPU_RUNNABLE &&
886 	    vcore->vcore_state != VCORE_INACTIVE &&
887 	    vcore->runner)
888 		target = vcore->runner;
889 	spin_unlock(&vcore->lock);
890 
891 	return kvm_vcpu_yield_to(target);
892 }
893 
kvmppc_get_yield_count(struct kvm_vcpu * vcpu)894 static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu)
895 {
896 	int yield_count = 0;
897 	struct lppaca *lppaca;
898 
899 	spin_lock(&vcpu->arch.vpa_update_lock);
900 	lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr;
901 	if (lppaca)
902 		yield_count = be32_to_cpu(lppaca->yield_count);
903 	spin_unlock(&vcpu->arch.vpa_update_lock);
904 	return yield_count;
905 }
906 
kvmppc_pseries_do_hcall(struct kvm_vcpu * vcpu)907 int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
908 {
909 	unsigned long req = kvmppc_get_gpr(vcpu, 3);
910 	unsigned long target, ret = H_SUCCESS;
911 	int yield_count;
912 	struct kvm_vcpu *tvcpu;
913 	int idx, rc;
914 
915 	if (req <= MAX_HCALL_OPCODE &&
916 	    !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls))
917 		return RESUME_HOST;
918 
919 	switch (req) {
920 	case H_CEDE:
921 		break;
922 	case H_PROD:
923 		target = kvmppc_get_gpr(vcpu, 4);
924 		tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
925 		if (!tvcpu) {
926 			ret = H_PARAMETER;
927 			break;
928 		}
929 		tvcpu->arch.prodded = 1;
930 		smp_mb();
931 		if (tvcpu->arch.ceded)
932 			kvmppc_fast_vcpu_kick_hv(tvcpu);
933 		break;
934 	case H_CONFER:
935 		target = kvmppc_get_gpr(vcpu, 4);
936 		if (target == -1)
937 			break;
938 		tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
939 		if (!tvcpu) {
940 			ret = H_PARAMETER;
941 			break;
942 		}
943 		yield_count = kvmppc_get_gpr(vcpu, 5);
944 		if (kvmppc_get_yield_count(tvcpu) != yield_count)
945 			break;
946 		kvm_arch_vcpu_yield_to(tvcpu);
947 		break;
948 	case H_REGISTER_VPA:
949 		ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
950 					kvmppc_get_gpr(vcpu, 5),
951 					kvmppc_get_gpr(vcpu, 6));
952 		break;
953 	case H_RTAS:
954 		if (list_empty(&vcpu->kvm->arch.rtas_tokens))
955 			return RESUME_HOST;
956 
957 		idx = srcu_read_lock(&vcpu->kvm->srcu);
958 		rc = kvmppc_rtas_hcall(vcpu);
959 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
960 
961 		if (rc == -ENOENT)
962 			return RESUME_HOST;
963 		else if (rc == 0)
964 			break;
965 
966 		/* Send the error out to userspace via KVM_RUN */
967 		return rc;
968 	case H_LOGICAL_CI_LOAD:
969 		ret = kvmppc_h_logical_ci_load(vcpu);
970 		if (ret == H_TOO_HARD)
971 			return RESUME_HOST;
972 		break;
973 	case H_LOGICAL_CI_STORE:
974 		ret = kvmppc_h_logical_ci_store(vcpu);
975 		if (ret == H_TOO_HARD)
976 			return RESUME_HOST;
977 		break;
978 	case H_SET_MODE:
979 		ret = kvmppc_h_set_mode(vcpu, kvmppc_get_gpr(vcpu, 4),
980 					kvmppc_get_gpr(vcpu, 5),
981 					kvmppc_get_gpr(vcpu, 6),
982 					kvmppc_get_gpr(vcpu, 7));
983 		if (ret == H_TOO_HARD)
984 			return RESUME_HOST;
985 		break;
986 	case H_XIRR:
987 	case H_CPPR:
988 	case H_EOI:
989 	case H_IPI:
990 	case H_IPOLL:
991 	case H_XIRR_X:
992 		if (kvmppc_xics_enabled(vcpu)) {
993 			if (xics_on_xive()) {
994 				ret = H_NOT_AVAILABLE;
995 				return RESUME_GUEST;
996 			}
997 			ret = kvmppc_xics_hcall(vcpu, req);
998 			break;
999 		}
1000 		return RESUME_HOST;
1001 	case H_SET_DABR:
1002 		ret = kvmppc_h_set_dabr(vcpu, kvmppc_get_gpr(vcpu, 4));
1003 		break;
1004 	case H_SET_XDABR:
1005 		ret = kvmppc_h_set_xdabr(vcpu, kvmppc_get_gpr(vcpu, 4),
1006 						kvmppc_get_gpr(vcpu, 5));
1007 		break;
1008 #ifdef CONFIG_SPAPR_TCE_IOMMU
1009 	case H_GET_TCE:
1010 		ret = kvmppc_h_get_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
1011 						kvmppc_get_gpr(vcpu, 5));
1012 		if (ret == H_TOO_HARD)
1013 			return RESUME_HOST;
1014 		break;
1015 	case H_PUT_TCE:
1016 		ret = kvmppc_h_put_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
1017 						kvmppc_get_gpr(vcpu, 5),
1018 						kvmppc_get_gpr(vcpu, 6));
1019 		if (ret == H_TOO_HARD)
1020 			return RESUME_HOST;
1021 		break;
1022 	case H_PUT_TCE_INDIRECT:
1023 		ret = kvmppc_h_put_tce_indirect(vcpu, kvmppc_get_gpr(vcpu, 4),
1024 						kvmppc_get_gpr(vcpu, 5),
1025 						kvmppc_get_gpr(vcpu, 6),
1026 						kvmppc_get_gpr(vcpu, 7));
1027 		if (ret == H_TOO_HARD)
1028 			return RESUME_HOST;
1029 		break;
1030 	case H_STUFF_TCE:
1031 		ret = kvmppc_h_stuff_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
1032 						kvmppc_get_gpr(vcpu, 5),
1033 						kvmppc_get_gpr(vcpu, 6),
1034 						kvmppc_get_gpr(vcpu, 7));
1035 		if (ret == H_TOO_HARD)
1036 			return RESUME_HOST;
1037 		break;
1038 #endif
1039 	case H_RANDOM:
1040 		if (!powernv_get_random_long(&vcpu->arch.regs.gpr[4]))
1041 			ret = H_HARDWARE;
1042 		break;
1043 
1044 	case H_SET_PARTITION_TABLE:
1045 		ret = H_FUNCTION;
1046 		if (nesting_enabled(vcpu->kvm))
1047 			ret = kvmhv_set_partition_table(vcpu);
1048 		break;
1049 	case H_ENTER_NESTED:
1050 		ret = H_FUNCTION;
1051 		if (!nesting_enabled(vcpu->kvm))
1052 			break;
1053 		ret = kvmhv_enter_nested_guest(vcpu);
1054 		if (ret == H_INTERRUPT) {
1055 			kvmppc_set_gpr(vcpu, 3, 0);
1056 			vcpu->arch.hcall_needed = 0;
1057 			return -EINTR;
1058 		} else if (ret == H_TOO_HARD) {
1059 			kvmppc_set_gpr(vcpu, 3, 0);
1060 			vcpu->arch.hcall_needed = 0;
1061 			return RESUME_HOST;
1062 		}
1063 		break;
1064 	case H_TLB_INVALIDATE:
1065 		ret = H_FUNCTION;
1066 		if (nesting_enabled(vcpu->kvm))
1067 			ret = kvmhv_do_nested_tlbie(vcpu);
1068 		break;
1069 	case H_COPY_TOFROM_GUEST:
1070 		ret = H_FUNCTION;
1071 		if (nesting_enabled(vcpu->kvm))
1072 			ret = kvmhv_copy_tofrom_guest_nested(vcpu);
1073 		break;
1074 	case H_PAGE_INIT:
1075 		ret = kvmppc_h_page_init(vcpu, kvmppc_get_gpr(vcpu, 4),
1076 					 kvmppc_get_gpr(vcpu, 5),
1077 					 kvmppc_get_gpr(vcpu, 6));
1078 		break;
1079 	case H_SVM_PAGE_IN:
1080 		ret = H_UNSUPPORTED;
1081 		if (kvmppc_get_srr1(vcpu) & MSR_S)
1082 			ret = kvmppc_h_svm_page_in(vcpu->kvm,
1083 						   kvmppc_get_gpr(vcpu, 4),
1084 						   kvmppc_get_gpr(vcpu, 5),
1085 						   kvmppc_get_gpr(vcpu, 6));
1086 		break;
1087 	case H_SVM_PAGE_OUT:
1088 		ret = H_UNSUPPORTED;
1089 		if (kvmppc_get_srr1(vcpu) & MSR_S)
1090 			ret = kvmppc_h_svm_page_out(vcpu->kvm,
1091 						    kvmppc_get_gpr(vcpu, 4),
1092 						    kvmppc_get_gpr(vcpu, 5),
1093 						    kvmppc_get_gpr(vcpu, 6));
1094 		break;
1095 	case H_SVM_INIT_START:
1096 		ret = H_UNSUPPORTED;
1097 		if (kvmppc_get_srr1(vcpu) & MSR_S)
1098 			ret = kvmppc_h_svm_init_start(vcpu->kvm);
1099 		break;
1100 	case H_SVM_INIT_DONE:
1101 		ret = H_UNSUPPORTED;
1102 		if (kvmppc_get_srr1(vcpu) & MSR_S)
1103 			ret = kvmppc_h_svm_init_done(vcpu->kvm);
1104 		break;
1105 	case H_SVM_INIT_ABORT:
1106 		/*
1107 		 * Even if that call is made by the Ultravisor, the SSR1 value
1108 		 * is the guest context one, with the secure bit clear as it has
1109 		 * not yet been secured. So we can't check it here.
1110 		 * Instead the kvm->arch.secure_guest flag is checked inside
1111 		 * kvmppc_h_svm_init_abort().
1112 		 */
1113 		ret = kvmppc_h_svm_init_abort(vcpu->kvm);
1114 		break;
1115 
1116 	default:
1117 		return RESUME_HOST;
1118 	}
1119 	kvmppc_set_gpr(vcpu, 3, ret);
1120 	vcpu->arch.hcall_needed = 0;
1121 	return RESUME_GUEST;
1122 }
1123 
1124 /*
1125  * Handle H_CEDE in the nested virtualization case where we haven't
1126  * called the real-mode hcall handlers in book3s_hv_rmhandlers.S.
1127  * This has to be done early, not in kvmppc_pseries_do_hcall(), so
1128  * that the cede logic in kvmppc_run_single_vcpu() works properly.
1129  */
kvmppc_nested_cede(struct kvm_vcpu * vcpu)1130 static void kvmppc_nested_cede(struct kvm_vcpu *vcpu)
1131 {
1132 	vcpu->arch.shregs.msr |= MSR_EE;
1133 	vcpu->arch.ceded = 1;
1134 	smp_mb();
1135 	if (vcpu->arch.prodded) {
1136 		vcpu->arch.prodded = 0;
1137 		smp_mb();
1138 		vcpu->arch.ceded = 0;
1139 	}
1140 }
1141 
kvmppc_hcall_impl_hv(unsigned long cmd)1142 static int kvmppc_hcall_impl_hv(unsigned long cmd)
1143 {
1144 	switch (cmd) {
1145 	case H_CEDE:
1146 	case H_PROD:
1147 	case H_CONFER:
1148 	case H_REGISTER_VPA:
1149 	case H_SET_MODE:
1150 	case H_LOGICAL_CI_LOAD:
1151 	case H_LOGICAL_CI_STORE:
1152 #ifdef CONFIG_KVM_XICS
1153 	case H_XIRR:
1154 	case H_CPPR:
1155 	case H_EOI:
1156 	case H_IPI:
1157 	case H_IPOLL:
1158 	case H_XIRR_X:
1159 #endif
1160 	case H_PAGE_INIT:
1161 		return 1;
1162 	}
1163 
1164 	/* See if it's in the real-mode table */
1165 	return kvmppc_hcall_impl_hv_realmode(cmd);
1166 }
1167 
kvmppc_emulate_debug_inst(struct kvm_vcpu * vcpu)1168 static int kvmppc_emulate_debug_inst(struct kvm_vcpu *vcpu)
1169 {
1170 	u32 last_inst;
1171 
1172 	if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) !=
1173 					EMULATE_DONE) {
1174 		/*
1175 		 * Fetch failed, so return to guest and
1176 		 * try executing it again.
1177 		 */
1178 		return RESUME_GUEST;
1179 	}
1180 
1181 	if (last_inst == KVMPPC_INST_SW_BREAKPOINT) {
1182 		vcpu->run->exit_reason = KVM_EXIT_DEBUG;
1183 		vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu);
1184 		return RESUME_HOST;
1185 	} else {
1186 		kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
1187 		return RESUME_GUEST;
1188 	}
1189 }
1190 
do_nothing(void * x)1191 static void do_nothing(void *x)
1192 {
1193 }
1194 
kvmppc_read_dpdes(struct kvm_vcpu * vcpu)1195 static unsigned long kvmppc_read_dpdes(struct kvm_vcpu *vcpu)
1196 {
1197 	int thr, cpu, pcpu, nthreads;
1198 	struct kvm_vcpu *v;
1199 	unsigned long dpdes;
1200 
1201 	nthreads = vcpu->kvm->arch.emul_smt_mode;
1202 	dpdes = 0;
1203 	cpu = vcpu->vcpu_id & ~(nthreads - 1);
1204 	for (thr = 0; thr < nthreads; ++thr, ++cpu) {
1205 		v = kvmppc_find_vcpu(vcpu->kvm, cpu);
1206 		if (!v)
1207 			continue;
1208 		/*
1209 		 * If the vcpu is currently running on a physical cpu thread,
1210 		 * interrupt it in order to pull it out of the guest briefly,
1211 		 * which will update its vcore->dpdes value.
1212 		 */
1213 		pcpu = READ_ONCE(v->cpu);
1214 		if (pcpu >= 0)
1215 			smp_call_function_single(pcpu, do_nothing, NULL, 1);
1216 		if (kvmppc_doorbell_pending(v))
1217 			dpdes |= 1 << thr;
1218 	}
1219 	return dpdes;
1220 }
1221 
1222 /*
1223  * On POWER9, emulate doorbell-related instructions in order to
1224  * give the guest the illusion of running on a multi-threaded core.
1225  * The instructions emulated are msgsndp, msgclrp, mfspr TIR,
1226  * and mfspr DPDES.
1227  */
kvmppc_emulate_doorbell_instr(struct kvm_vcpu * vcpu)1228 static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu)
1229 {
1230 	u32 inst, rb, thr;
1231 	unsigned long arg;
1232 	struct kvm *kvm = vcpu->kvm;
1233 	struct kvm_vcpu *tvcpu;
1234 
1235 	if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst) != EMULATE_DONE)
1236 		return RESUME_GUEST;
1237 	if (get_op(inst) != 31)
1238 		return EMULATE_FAIL;
1239 	rb = get_rb(inst);
1240 	thr = vcpu->vcpu_id & (kvm->arch.emul_smt_mode - 1);
1241 	switch (get_xop(inst)) {
1242 	case OP_31_XOP_MSGSNDP:
1243 		arg = kvmppc_get_gpr(vcpu, rb);
1244 		if (((arg >> 27) & 0xf) != PPC_DBELL_SERVER)
1245 			break;
1246 		arg &= 0x3f;
1247 		if (arg >= kvm->arch.emul_smt_mode)
1248 			break;
1249 		tvcpu = kvmppc_find_vcpu(kvm, vcpu->vcpu_id - thr + arg);
1250 		if (!tvcpu)
1251 			break;
1252 		if (!tvcpu->arch.doorbell_request) {
1253 			tvcpu->arch.doorbell_request = 1;
1254 			kvmppc_fast_vcpu_kick_hv(tvcpu);
1255 		}
1256 		break;
1257 	case OP_31_XOP_MSGCLRP:
1258 		arg = kvmppc_get_gpr(vcpu, rb);
1259 		if (((arg >> 27) & 0xf) != PPC_DBELL_SERVER)
1260 			break;
1261 		vcpu->arch.vcore->dpdes = 0;
1262 		vcpu->arch.doorbell_request = 0;
1263 		break;
1264 	case OP_31_XOP_MFSPR:
1265 		switch (get_sprn(inst)) {
1266 		case SPRN_TIR:
1267 			arg = thr;
1268 			break;
1269 		case SPRN_DPDES:
1270 			arg = kvmppc_read_dpdes(vcpu);
1271 			break;
1272 		default:
1273 			return EMULATE_FAIL;
1274 		}
1275 		kvmppc_set_gpr(vcpu, get_rt(inst), arg);
1276 		break;
1277 	default:
1278 		return EMULATE_FAIL;
1279 	}
1280 	kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
1281 	return RESUME_GUEST;
1282 }
1283 
kvmppc_handle_exit_hv(struct kvm_vcpu * vcpu,struct task_struct * tsk)1284 static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
1285 				 struct task_struct *tsk)
1286 {
1287 	struct kvm_run *run = vcpu->run;
1288 	int r = RESUME_HOST;
1289 
1290 	vcpu->stat.sum_exits++;
1291 
1292 	/*
1293 	 * This can happen if an interrupt occurs in the last stages
1294 	 * of guest entry or the first stages of guest exit (i.e. after
1295 	 * setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV
1296 	 * and before setting it to KVM_GUEST_MODE_HOST_HV).
1297 	 * That can happen due to a bug, or due to a machine check
1298 	 * occurring at just the wrong time.
1299 	 */
1300 	if (vcpu->arch.shregs.msr & MSR_HV) {
1301 		printk(KERN_EMERG "KVM trap in HV mode!\n");
1302 		printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
1303 			vcpu->arch.trap, kvmppc_get_pc(vcpu),
1304 			vcpu->arch.shregs.msr);
1305 		kvmppc_dump_regs(vcpu);
1306 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1307 		run->hw.hardware_exit_reason = vcpu->arch.trap;
1308 		return RESUME_HOST;
1309 	}
1310 	run->exit_reason = KVM_EXIT_UNKNOWN;
1311 	run->ready_for_interrupt_injection = 1;
1312 	switch (vcpu->arch.trap) {
1313 	/* We're good on these - the host merely wanted to get our attention */
1314 	case BOOK3S_INTERRUPT_HV_DECREMENTER:
1315 		vcpu->stat.dec_exits++;
1316 		r = RESUME_GUEST;
1317 		break;
1318 	case BOOK3S_INTERRUPT_EXTERNAL:
1319 	case BOOK3S_INTERRUPT_H_DOORBELL:
1320 	case BOOK3S_INTERRUPT_H_VIRT:
1321 		vcpu->stat.ext_intr_exits++;
1322 		r = RESUME_GUEST;
1323 		break;
1324 	/* SR/HMI/PMI are HV interrupts that host has handled. Resume guest.*/
1325 	case BOOK3S_INTERRUPT_HMI:
1326 	case BOOK3S_INTERRUPT_PERFMON:
1327 	case BOOK3S_INTERRUPT_SYSTEM_RESET:
1328 		r = RESUME_GUEST;
1329 		break;
1330 	case BOOK3S_INTERRUPT_MACHINE_CHECK:
1331 		/* Print the MCE event to host console. */
1332 		machine_check_print_event_info(&vcpu->arch.mce_evt, false, true);
1333 
1334 		/*
1335 		 * If the guest can do FWNMI, exit to userspace so it can
1336 		 * deliver a FWNMI to the guest.
1337 		 * Otherwise we synthesize a machine check for the guest
1338 		 * so that it knows that the machine check occurred.
1339 		 */
1340 		if (!vcpu->kvm->arch.fwnmi_enabled) {
1341 			ulong flags = vcpu->arch.shregs.msr & 0x083c0000;
1342 			kvmppc_core_queue_machine_check(vcpu, flags);
1343 			r = RESUME_GUEST;
1344 			break;
1345 		}
1346 
1347 		/* Exit to guest with KVM_EXIT_NMI as exit reason */
1348 		run->exit_reason = KVM_EXIT_NMI;
1349 		run->hw.hardware_exit_reason = vcpu->arch.trap;
1350 		/* Clear out the old NMI status from run->flags */
1351 		run->flags &= ~KVM_RUN_PPC_NMI_DISP_MASK;
1352 		/* Now set the NMI status */
1353 		if (vcpu->arch.mce_evt.disposition == MCE_DISPOSITION_RECOVERED)
1354 			run->flags |= KVM_RUN_PPC_NMI_DISP_FULLY_RECOV;
1355 		else
1356 			run->flags |= KVM_RUN_PPC_NMI_DISP_NOT_RECOV;
1357 
1358 		r = RESUME_HOST;
1359 		break;
1360 	case BOOK3S_INTERRUPT_PROGRAM:
1361 	{
1362 		ulong flags;
1363 		/*
1364 		 * Normally program interrupts are delivered directly
1365 		 * to the guest by the hardware, but we can get here
1366 		 * as a result of a hypervisor emulation interrupt
1367 		 * (e40) getting turned into a 700 by BML RTAS.
1368 		 */
1369 		flags = vcpu->arch.shregs.msr & 0x1f0000ull;
1370 		kvmppc_core_queue_program(vcpu, flags);
1371 		r = RESUME_GUEST;
1372 		break;
1373 	}
1374 	case BOOK3S_INTERRUPT_SYSCALL:
1375 	{
1376 		/* hcall - punt to userspace */
1377 		int i;
1378 
1379 		/* hypercall with MSR_PR has already been handled in rmode,
1380 		 * and never reaches here.
1381 		 */
1382 
1383 		run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
1384 		for (i = 0; i < 9; ++i)
1385 			run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
1386 		run->exit_reason = KVM_EXIT_PAPR_HCALL;
1387 		vcpu->arch.hcall_needed = 1;
1388 		r = RESUME_HOST;
1389 		break;
1390 	}
1391 	/*
1392 	 * We get these next two if the guest accesses a page which it thinks
1393 	 * it has mapped but which is not actually present, either because
1394 	 * it is for an emulated I/O device or because the corresonding
1395 	 * host page has been paged out.  Any other HDSI/HISI interrupts
1396 	 * have been handled already.
1397 	 */
1398 	case BOOK3S_INTERRUPT_H_DATA_STORAGE:
1399 		r = RESUME_PAGE_FAULT;
1400 		break;
1401 	case BOOK3S_INTERRUPT_H_INST_STORAGE:
1402 		vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
1403 		vcpu->arch.fault_dsisr = vcpu->arch.shregs.msr &
1404 			DSISR_SRR1_MATCH_64S;
1405 		if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
1406 			vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
1407 		r = RESUME_PAGE_FAULT;
1408 		break;
1409 	/*
1410 	 * This occurs if the guest executes an illegal instruction.
1411 	 * If the guest debug is disabled, generate a program interrupt
1412 	 * to the guest. If guest debug is enabled, we need to check
1413 	 * whether the instruction is a software breakpoint instruction.
1414 	 * Accordingly return to Guest or Host.
1415 	 */
1416 	case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
1417 		if (vcpu->arch.emul_inst != KVM_INST_FETCH_FAILED)
1418 			vcpu->arch.last_inst = kvmppc_need_byteswap(vcpu) ?
1419 				swab32(vcpu->arch.emul_inst) :
1420 				vcpu->arch.emul_inst;
1421 		if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
1422 			r = kvmppc_emulate_debug_inst(vcpu);
1423 		} else {
1424 			kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
1425 			r = RESUME_GUEST;
1426 		}
1427 		break;
1428 	/*
1429 	 * This occurs if the guest (kernel or userspace), does something that
1430 	 * is prohibited by HFSCR.
1431 	 * On POWER9, this could be a doorbell instruction that we need
1432 	 * to emulate.
1433 	 * Otherwise, we just generate a program interrupt to the guest.
1434 	 */
1435 	case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
1436 		r = EMULATE_FAIL;
1437 		if (((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) &&
1438 		    cpu_has_feature(CPU_FTR_ARCH_300))
1439 			r = kvmppc_emulate_doorbell_instr(vcpu);
1440 		if (r == EMULATE_FAIL) {
1441 			kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
1442 			r = RESUME_GUEST;
1443 		}
1444 		break;
1445 
1446 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1447 	case BOOK3S_INTERRUPT_HV_SOFTPATCH:
1448 		/*
1449 		 * This occurs for various TM-related instructions that
1450 		 * we need to emulate on POWER9 DD2.2.  We have already
1451 		 * handled the cases where the guest was in real-suspend
1452 		 * mode and was transitioning to transactional state.
1453 		 */
1454 		r = kvmhv_p9_tm_emulation(vcpu);
1455 		break;
1456 #endif
1457 
1458 	case BOOK3S_INTERRUPT_HV_RM_HARD:
1459 		r = RESUME_PASSTHROUGH;
1460 		break;
1461 	default:
1462 		kvmppc_dump_regs(vcpu);
1463 		printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
1464 			vcpu->arch.trap, kvmppc_get_pc(vcpu),
1465 			vcpu->arch.shregs.msr);
1466 		run->hw.hardware_exit_reason = vcpu->arch.trap;
1467 		r = RESUME_HOST;
1468 		break;
1469 	}
1470 
1471 	return r;
1472 }
1473 
kvmppc_handle_nested_exit(struct kvm_vcpu * vcpu)1474 static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
1475 {
1476 	int r;
1477 	int srcu_idx;
1478 
1479 	vcpu->stat.sum_exits++;
1480 
1481 	/*
1482 	 * This can happen if an interrupt occurs in the last stages
1483 	 * of guest entry or the first stages of guest exit (i.e. after
1484 	 * setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV
1485 	 * and before setting it to KVM_GUEST_MODE_HOST_HV).
1486 	 * That can happen due to a bug, or due to a machine check
1487 	 * occurring at just the wrong time.
1488 	 */
1489 	if (vcpu->arch.shregs.msr & MSR_HV) {
1490 		pr_emerg("KVM trap in HV mode while nested!\n");
1491 		pr_emerg("trap=0x%x | pc=0x%lx | msr=0x%llx\n",
1492 			 vcpu->arch.trap, kvmppc_get_pc(vcpu),
1493 			 vcpu->arch.shregs.msr);
1494 		kvmppc_dump_regs(vcpu);
1495 		return RESUME_HOST;
1496 	}
1497 	switch (vcpu->arch.trap) {
1498 	/* We're good on these - the host merely wanted to get our attention */
1499 	case BOOK3S_INTERRUPT_HV_DECREMENTER:
1500 		vcpu->stat.dec_exits++;
1501 		r = RESUME_GUEST;
1502 		break;
1503 	case BOOK3S_INTERRUPT_EXTERNAL:
1504 		vcpu->stat.ext_intr_exits++;
1505 		r = RESUME_HOST;
1506 		break;
1507 	case BOOK3S_INTERRUPT_H_DOORBELL:
1508 	case BOOK3S_INTERRUPT_H_VIRT:
1509 		vcpu->stat.ext_intr_exits++;
1510 		r = RESUME_GUEST;
1511 		break;
1512 	/* SR/HMI/PMI are HV interrupts that host has handled. Resume guest.*/
1513 	case BOOK3S_INTERRUPT_HMI:
1514 	case BOOK3S_INTERRUPT_PERFMON:
1515 	case BOOK3S_INTERRUPT_SYSTEM_RESET:
1516 		r = RESUME_GUEST;
1517 		break;
1518 	case BOOK3S_INTERRUPT_MACHINE_CHECK:
1519 		/* Pass the machine check to the L1 guest */
1520 		r = RESUME_HOST;
1521 		/* Print the MCE event to host console. */
1522 		machine_check_print_event_info(&vcpu->arch.mce_evt, false, true);
1523 		break;
1524 	/*
1525 	 * We get these next two if the guest accesses a page which it thinks
1526 	 * it has mapped but which is not actually present, either because
1527 	 * it is for an emulated I/O device or because the corresonding
1528 	 * host page has been paged out.
1529 	 */
1530 	case BOOK3S_INTERRUPT_H_DATA_STORAGE:
1531 		srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1532 		r = kvmhv_nested_page_fault(vcpu);
1533 		srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
1534 		break;
1535 	case BOOK3S_INTERRUPT_H_INST_STORAGE:
1536 		vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
1537 		vcpu->arch.fault_dsisr = kvmppc_get_msr(vcpu) &
1538 					 DSISR_SRR1_MATCH_64S;
1539 		if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
1540 			vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
1541 		srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1542 		r = kvmhv_nested_page_fault(vcpu);
1543 		srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
1544 		break;
1545 
1546 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1547 	case BOOK3S_INTERRUPT_HV_SOFTPATCH:
1548 		/*
1549 		 * This occurs for various TM-related instructions that
1550 		 * we need to emulate on POWER9 DD2.2.  We have already
1551 		 * handled the cases where the guest was in real-suspend
1552 		 * mode and was transitioning to transactional state.
1553 		 */
1554 		r = kvmhv_p9_tm_emulation(vcpu);
1555 		break;
1556 #endif
1557 
1558 	case BOOK3S_INTERRUPT_HV_RM_HARD:
1559 		vcpu->arch.trap = 0;
1560 		r = RESUME_GUEST;
1561 		if (!xics_on_xive())
1562 			kvmppc_xics_rm_complete(vcpu, 0);
1563 		break;
1564 	default:
1565 		r = RESUME_HOST;
1566 		break;
1567 	}
1568 
1569 	return r;
1570 }
1571 
kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1572 static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu,
1573 					    struct kvm_sregs *sregs)
1574 {
1575 	int i;
1576 
1577 	memset(sregs, 0, sizeof(struct kvm_sregs));
1578 	sregs->pvr = vcpu->arch.pvr;
1579 	for (i = 0; i < vcpu->arch.slb_max; i++) {
1580 		sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige;
1581 		sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
1582 	}
1583 
1584 	return 0;
1585 }
1586 
kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1587 static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
1588 					    struct kvm_sregs *sregs)
1589 {
1590 	int i, j;
1591 
1592 	/* Only accept the same PVR as the host's, since we can't spoof it */
1593 	if (sregs->pvr != vcpu->arch.pvr)
1594 		return -EINVAL;
1595 
1596 	j = 0;
1597 	for (i = 0; i < vcpu->arch.slb_nr; i++) {
1598 		if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) {
1599 			vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe;
1600 			vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv;
1601 			++j;
1602 		}
1603 	}
1604 	vcpu->arch.slb_max = j;
1605 
1606 	return 0;
1607 }
1608 
kvmppc_set_lpcr(struct kvm_vcpu * vcpu,u64 new_lpcr,bool preserve_top32)1609 static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
1610 		bool preserve_top32)
1611 {
1612 	struct kvm *kvm = vcpu->kvm;
1613 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
1614 	u64 mask;
1615 
1616 	spin_lock(&vc->lock);
1617 	/*
1618 	 * If ILE (interrupt little-endian) has changed, update the
1619 	 * MSR_LE bit in the intr_msr for each vcpu in this vcore.
1620 	 */
1621 	if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) {
1622 		struct kvm_vcpu *vcpu;
1623 		int i;
1624 
1625 		kvm_for_each_vcpu(i, vcpu, kvm) {
1626 			if (vcpu->arch.vcore != vc)
1627 				continue;
1628 			if (new_lpcr & LPCR_ILE)
1629 				vcpu->arch.intr_msr |= MSR_LE;
1630 			else
1631 				vcpu->arch.intr_msr &= ~MSR_LE;
1632 		}
1633 	}
1634 
1635 	/*
1636 	 * Userspace can only modify DPFD (default prefetch depth),
1637 	 * ILE (interrupt little-endian) and TC (translation control).
1638 	 * On POWER8 and POWER9 userspace can also modify AIL (alt. interrupt loc.).
1639 	 */
1640 	mask = LPCR_DPFD | LPCR_ILE | LPCR_TC;
1641 	if (cpu_has_feature(CPU_FTR_ARCH_207S))
1642 		mask |= LPCR_AIL;
1643 	/*
1644 	 * On POWER9, allow userspace to enable large decrementer for the
1645 	 * guest, whether or not the host has it enabled.
1646 	 */
1647 	if (cpu_has_feature(CPU_FTR_ARCH_300))
1648 		mask |= LPCR_LD;
1649 
1650 	/* Broken 32-bit version of LPCR must not clear top bits */
1651 	if (preserve_top32)
1652 		mask &= 0xFFFFFFFF;
1653 	vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
1654 	spin_unlock(&vc->lock);
1655 }
1656 
kvmppc_get_one_reg_hv(struct kvm_vcpu * vcpu,u64 id,union kvmppc_one_reg * val)1657 static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
1658 				 union kvmppc_one_reg *val)
1659 {
1660 	int r = 0;
1661 	long int i;
1662 
1663 	switch (id) {
1664 	case KVM_REG_PPC_DEBUG_INST:
1665 		*val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
1666 		break;
1667 	case KVM_REG_PPC_HIOR:
1668 		*val = get_reg_val(id, 0);
1669 		break;
1670 	case KVM_REG_PPC_DABR:
1671 		*val = get_reg_val(id, vcpu->arch.dabr);
1672 		break;
1673 	case KVM_REG_PPC_DABRX:
1674 		*val = get_reg_val(id, vcpu->arch.dabrx);
1675 		break;
1676 	case KVM_REG_PPC_DSCR:
1677 		*val = get_reg_val(id, vcpu->arch.dscr);
1678 		break;
1679 	case KVM_REG_PPC_PURR:
1680 		*val = get_reg_val(id, vcpu->arch.purr);
1681 		break;
1682 	case KVM_REG_PPC_SPURR:
1683 		*val = get_reg_val(id, vcpu->arch.spurr);
1684 		break;
1685 	case KVM_REG_PPC_AMR:
1686 		*val = get_reg_val(id, vcpu->arch.amr);
1687 		break;
1688 	case KVM_REG_PPC_UAMOR:
1689 		*val = get_reg_val(id, vcpu->arch.uamor);
1690 		break;
1691 	case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCR1:
1692 		i = id - KVM_REG_PPC_MMCR0;
1693 		*val = get_reg_val(id, vcpu->arch.mmcr[i]);
1694 		break;
1695 	case KVM_REG_PPC_MMCR2:
1696 		*val = get_reg_val(id, vcpu->arch.mmcr[2]);
1697 		break;
1698 	case KVM_REG_PPC_MMCRA:
1699 		*val = get_reg_val(id, vcpu->arch.mmcra);
1700 		break;
1701 	case KVM_REG_PPC_MMCRS:
1702 		*val = get_reg_val(id, vcpu->arch.mmcrs);
1703 		break;
1704 	case KVM_REG_PPC_MMCR3:
1705 		*val = get_reg_val(id, vcpu->arch.mmcr[3]);
1706 		break;
1707 	case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
1708 		i = id - KVM_REG_PPC_PMC1;
1709 		*val = get_reg_val(id, vcpu->arch.pmc[i]);
1710 		break;
1711 	case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
1712 		i = id - KVM_REG_PPC_SPMC1;
1713 		*val = get_reg_val(id, vcpu->arch.spmc[i]);
1714 		break;
1715 	case KVM_REG_PPC_SIAR:
1716 		*val = get_reg_val(id, vcpu->arch.siar);
1717 		break;
1718 	case KVM_REG_PPC_SDAR:
1719 		*val = get_reg_val(id, vcpu->arch.sdar);
1720 		break;
1721 	case KVM_REG_PPC_SIER:
1722 		*val = get_reg_val(id, vcpu->arch.sier[0]);
1723 		break;
1724 	case KVM_REG_PPC_SIER2:
1725 		*val = get_reg_val(id, vcpu->arch.sier[1]);
1726 		break;
1727 	case KVM_REG_PPC_SIER3:
1728 		*val = get_reg_val(id, vcpu->arch.sier[2]);
1729 		break;
1730 	case KVM_REG_PPC_IAMR:
1731 		*val = get_reg_val(id, vcpu->arch.iamr);
1732 		break;
1733 	case KVM_REG_PPC_PSPB:
1734 		*val = get_reg_val(id, vcpu->arch.pspb);
1735 		break;
1736 	case KVM_REG_PPC_DPDES:
1737 		/*
1738 		 * On POWER9, where we are emulating msgsndp etc.,
1739 		 * we return 1 bit for each vcpu, which can come from
1740 		 * either vcore->dpdes or doorbell_request.
1741 		 * On POWER8, doorbell_request is 0.
1742 		 */
1743 		*val = get_reg_val(id, vcpu->arch.vcore->dpdes |
1744 				   vcpu->arch.doorbell_request);
1745 		break;
1746 	case KVM_REG_PPC_VTB:
1747 		*val = get_reg_val(id, vcpu->arch.vcore->vtb);
1748 		break;
1749 	case KVM_REG_PPC_DAWR:
1750 		*val = get_reg_val(id, vcpu->arch.dawr);
1751 		break;
1752 	case KVM_REG_PPC_DAWRX:
1753 		*val = get_reg_val(id, vcpu->arch.dawrx);
1754 		break;
1755 	case KVM_REG_PPC_CIABR:
1756 		*val = get_reg_val(id, vcpu->arch.ciabr);
1757 		break;
1758 	case KVM_REG_PPC_CSIGR:
1759 		*val = get_reg_val(id, vcpu->arch.csigr);
1760 		break;
1761 	case KVM_REG_PPC_TACR:
1762 		*val = get_reg_val(id, vcpu->arch.tacr);
1763 		break;
1764 	case KVM_REG_PPC_TCSCR:
1765 		*val = get_reg_val(id, vcpu->arch.tcscr);
1766 		break;
1767 	case KVM_REG_PPC_PID:
1768 		*val = get_reg_val(id, vcpu->arch.pid);
1769 		break;
1770 	case KVM_REG_PPC_ACOP:
1771 		*val = get_reg_val(id, vcpu->arch.acop);
1772 		break;
1773 	case KVM_REG_PPC_WORT:
1774 		*val = get_reg_val(id, vcpu->arch.wort);
1775 		break;
1776 	case KVM_REG_PPC_TIDR:
1777 		*val = get_reg_val(id, vcpu->arch.tid);
1778 		break;
1779 	case KVM_REG_PPC_PSSCR:
1780 		*val = get_reg_val(id, vcpu->arch.psscr);
1781 		break;
1782 	case KVM_REG_PPC_VPA_ADDR:
1783 		spin_lock(&vcpu->arch.vpa_update_lock);
1784 		*val = get_reg_val(id, vcpu->arch.vpa.next_gpa);
1785 		spin_unlock(&vcpu->arch.vpa_update_lock);
1786 		break;
1787 	case KVM_REG_PPC_VPA_SLB:
1788 		spin_lock(&vcpu->arch.vpa_update_lock);
1789 		val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa;
1790 		val->vpaval.length = vcpu->arch.slb_shadow.len;
1791 		spin_unlock(&vcpu->arch.vpa_update_lock);
1792 		break;
1793 	case KVM_REG_PPC_VPA_DTL:
1794 		spin_lock(&vcpu->arch.vpa_update_lock);
1795 		val->vpaval.addr = vcpu->arch.dtl.next_gpa;
1796 		val->vpaval.length = vcpu->arch.dtl.len;
1797 		spin_unlock(&vcpu->arch.vpa_update_lock);
1798 		break;
1799 	case KVM_REG_PPC_TB_OFFSET:
1800 		*val = get_reg_val(id, vcpu->arch.vcore->tb_offset);
1801 		break;
1802 	case KVM_REG_PPC_LPCR:
1803 	case KVM_REG_PPC_LPCR_64:
1804 		*val = get_reg_val(id, vcpu->arch.vcore->lpcr);
1805 		break;
1806 	case KVM_REG_PPC_PPR:
1807 		*val = get_reg_val(id, vcpu->arch.ppr);
1808 		break;
1809 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1810 	case KVM_REG_PPC_TFHAR:
1811 		*val = get_reg_val(id, vcpu->arch.tfhar);
1812 		break;
1813 	case KVM_REG_PPC_TFIAR:
1814 		*val = get_reg_val(id, vcpu->arch.tfiar);
1815 		break;
1816 	case KVM_REG_PPC_TEXASR:
1817 		*val = get_reg_val(id, vcpu->arch.texasr);
1818 		break;
1819 	case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
1820 		i = id - KVM_REG_PPC_TM_GPR0;
1821 		*val = get_reg_val(id, vcpu->arch.gpr_tm[i]);
1822 		break;
1823 	case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
1824 	{
1825 		int j;
1826 		i = id - KVM_REG_PPC_TM_VSR0;
1827 		if (i < 32)
1828 			for (j = 0; j < TS_FPRWIDTH; j++)
1829 				val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j];
1830 		else {
1831 			if (cpu_has_feature(CPU_FTR_ALTIVEC))
1832 				val->vval = vcpu->arch.vr_tm.vr[i-32];
1833 			else
1834 				r = -ENXIO;
1835 		}
1836 		break;
1837 	}
1838 	case KVM_REG_PPC_TM_CR:
1839 		*val = get_reg_val(id, vcpu->arch.cr_tm);
1840 		break;
1841 	case KVM_REG_PPC_TM_XER:
1842 		*val = get_reg_val(id, vcpu->arch.xer_tm);
1843 		break;
1844 	case KVM_REG_PPC_TM_LR:
1845 		*val = get_reg_val(id, vcpu->arch.lr_tm);
1846 		break;
1847 	case KVM_REG_PPC_TM_CTR:
1848 		*val = get_reg_val(id, vcpu->arch.ctr_tm);
1849 		break;
1850 	case KVM_REG_PPC_TM_FPSCR:
1851 		*val = get_reg_val(id, vcpu->arch.fp_tm.fpscr);
1852 		break;
1853 	case KVM_REG_PPC_TM_AMR:
1854 		*val = get_reg_val(id, vcpu->arch.amr_tm);
1855 		break;
1856 	case KVM_REG_PPC_TM_PPR:
1857 		*val = get_reg_val(id, vcpu->arch.ppr_tm);
1858 		break;
1859 	case KVM_REG_PPC_TM_VRSAVE:
1860 		*val = get_reg_val(id, vcpu->arch.vrsave_tm);
1861 		break;
1862 	case KVM_REG_PPC_TM_VSCR:
1863 		if (cpu_has_feature(CPU_FTR_ALTIVEC))
1864 			*val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]);
1865 		else
1866 			r = -ENXIO;
1867 		break;
1868 	case KVM_REG_PPC_TM_DSCR:
1869 		*val = get_reg_val(id, vcpu->arch.dscr_tm);
1870 		break;
1871 	case KVM_REG_PPC_TM_TAR:
1872 		*val = get_reg_val(id, vcpu->arch.tar_tm);
1873 		break;
1874 #endif
1875 	case KVM_REG_PPC_ARCH_COMPAT:
1876 		*val = get_reg_val(id, vcpu->arch.vcore->arch_compat);
1877 		break;
1878 	case KVM_REG_PPC_DEC_EXPIRY:
1879 		*val = get_reg_val(id, vcpu->arch.dec_expires +
1880 				   vcpu->arch.vcore->tb_offset);
1881 		break;
1882 	case KVM_REG_PPC_ONLINE:
1883 		*val = get_reg_val(id, vcpu->arch.online);
1884 		break;
1885 	case KVM_REG_PPC_PTCR:
1886 		*val = get_reg_val(id, vcpu->kvm->arch.l1_ptcr);
1887 		break;
1888 	default:
1889 		r = -EINVAL;
1890 		break;
1891 	}
1892 
1893 	return r;
1894 }
1895 
kvmppc_set_one_reg_hv(struct kvm_vcpu * vcpu,u64 id,union kvmppc_one_reg * val)1896 static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
1897 				 union kvmppc_one_reg *val)
1898 {
1899 	int r = 0;
1900 	long int i;
1901 	unsigned long addr, len;
1902 
1903 	switch (id) {
1904 	case KVM_REG_PPC_HIOR:
1905 		/* Only allow this to be set to zero */
1906 		if (set_reg_val(id, *val))
1907 			r = -EINVAL;
1908 		break;
1909 	case KVM_REG_PPC_DABR:
1910 		vcpu->arch.dabr = set_reg_val(id, *val);
1911 		break;
1912 	case KVM_REG_PPC_DABRX:
1913 		vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP;
1914 		break;
1915 	case KVM_REG_PPC_DSCR:
1916 		vcpu->arch.dscr = set_reg_val(id, *val);
1917 		break;
1918 	case KVM_REG_PPC_PURR:
1919 		vcpu->arch.purr = set_reg_val(id, *val);
1920 		break;
1921 	case KVM_REG_PPC_SPURR:
1922 		vcpu->arch.spurr = set_reg_val(id, *val);
1923 		break;
1924 	case KVM_REG_PPC_AMR:
1925 		vcpu->arch.amr = set_reg_val(id, *val);
1926 		break;
1927 	case KVM_REG_PPC_UAMOR:
1928 		vcpu->arch.uamor = set_reg_val(id, *val);
1929 		break;
1930 	case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCR1:
1931 		i = id - KVM_REG_PPC_MMCR0;
1932 		vcpu->arch.mmcr[i] = set_reg_val(id, *val);
1933 		break;
1934 	case KVM_REG_PPC_MMCR2:
1935 		vcpu->arch.mmcr[2] = set_reg_val(id, *val);
1936 		break;
1937 	case KVM_REG_PPC_MMCRA:
1938 		vcpu->arch.mmcra = set_reg_val(id, *val);
1939 		break;
1940 	case KVM_REG_PPC_MMCRS:
1941 		vcpu->arch.mmcrs = set_reg_val(id, *val);
1942 		break;
1943 	case KVM_REG_PPC_MMCR3:
1944 		*val = get_reg_val(id, vcpu->arch.mmcr[3]);
1945 		break;
1946 	case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
1947 		i = id - KVM_REG_PPC_PMC1;
1948 		vcpu->arch.pmc[i] = set_reg_val(id, *val);
1949 		break;
1950 	case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
1951 		i = id - KVM_REG_PPC_SPMC1;
1952 		vcpu->arch.spmc[i] = set_reg_val(id, *val);
1953 		break;
1954 	case KVM_REG_PPC_SIAR:
1955 		vcpu->arch.siar = set_reg_val(id, *val);
1956 		break;
1957 	case KVM_REG_PPC_SDAR:
1958 		vcpu->arch.sdar = set_reg_val(id, *val);
1959 		break;
1960 	case KVM_REG_PPC_SIER:
1961 		vcpu->arch.sier[0] = set_reg_val(id, *val);
1962 		break;
1963 	case KVM_REG_PPC_SIER2:
1964 		vcpu->arch.sier[1] = set_reg_val(id, *val);
1965 		break;
1966 	case KVM_REG_PPC_SIER3:
1967 		vcpu->arch.sier[2] = set_reg_val(id, *val);
1968 		break;
1969 	case KVM_REG_PPC_IAMR:
1970 		vcpu->arch.iamr = set_reg_val(id, *val);
1971 		break;
1972 	case KVM_REG_PPC_PSPB:
1973 		vcpu->arch.pspb = set_reg_val(id, *val);
1974 		break;
1975 	case KVM_REG_PPC_DPDES:
1976 		vcpu->arch.vcore->dpdes = set_reg_val(id, *val);
1977 		break;
1978 	case KVM_REG_PPC_VTB:
1979 		vcpu->arch.vcore->vtb = set_reg_val(id, *val);
1980 		break;
1981 	case KVM_REG_PPC_DAWR:
1982 		vcpu->arch.dawr = set_reg_val(id, *val);
1983 		break;
1984 	case KVM_REG_PPC_DAWRX:
1985 		vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP;
1986 		break;
1987 	case KVM_REG_PPC_CIABR:
1988 		vcpu->arch.ciabr = set_reg_val(id, *val);
1989 		/* Don't allow setting breakpoints in hypervisor code */
1990 		if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
1991 			vcpu->arch.ciabr &= ~CIABR_PRIV;	/* disable */
1992 		break;
1993 	case KVM_REG_PPC_CSIGR:
1994 		vcpu->arch.csigr = set_reg_val(id, *val);
1995 		break;
1996 	case KVM_REG_PPC_TACR:
1997 		vcpu->arch.tacr = set_reg_val(id, *val);
1998 		break;
1999 	case KVM_REG_PPC_TCSCR:
2000 		vcpu->arch.tcscr = set_reg_val(id, *val);
2001 		break;
2002 	case KVM_REG_PPC_PID:
2003 		vcpu->arch.pid = set_reg_val(id, *val);
2004 		break;
2005 	case KVM_REG_PPC_ACOP:
2006 		vcpu->arch.acop = set_reg_val(id, *val);
2007 		break;
2008 	case KVM_REG_PPC_WORT:
2009 		vcpu->arch.wort = set_reg_val(id, *val);
2010 		break;
2011 	case KVM_REG_PPC_TIDR:
2012 		vcpu->arch.tid = set_reg_val(id, *val);
2013 		break;
2014 	case KVM_REG_PPC_PSSCR:
2015 		vcpu->arch.psscr = set_reg_val(id, *val) & PSSCR_GUEST_VIS;
2016 		break;
2017 	case KVM_REG_PPC_VPA_ADDR:
2018 		addr = set_reg_val(id, *val);
2019 		r = -EINVAL;
2020 		if (!addr && (vcpu->arch.slb_shadow.next_gpa ||
2021 			      vcpu->arch.dtl.next_gpa))
2022 			break;
2023 		r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca));
2024 		break;
2025 	case KVM_REG_PPC_VPA_SLB:
2026 		addr = val->vpaval.addr;
2027 		len = val->vpaval.length;
2028 		r = -EINVAL;
2029 		if (addr && !vcpu->arch.vpa.next_gpa)
2030 			break;
2031 		r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len);
2032 		break;
2033 	case KVM_REG_PPC_VPA_DTL:
2034 		addr = val->vpaval.addr;
2035 		len = val->vpaval.length;
2036 		r = -EINVAL;
2037 		if (addr && (len < sizeof(struct dtl_entry) ||
2038 			     !vcpu->arch.vpa.next_gpa))
2039 			break;
2040 		len -= len % sizeof(struct dtl_entry);
2041 		r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
2042 		break;
2043 	case KVM_REG_PPC_TB_OFFSET:
2044 		/* round up to multiple of 2^24 */
2045 		vcpu->arch.vcore->tb_offset =
2046 			ALIGN(set_reg_val(id, *val), 1UL << 24);
2047 		break;
2048 	case KVM_REG_PPC_LPCR:
2049 		kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), true);
2050 		break;
2051 	case KVM_REG_PPC_LPCR_64:
2052 		kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), false);
2053 		break;
2054 	case KVM_REG_PPC_PPR:
2055 		vcpu->arch.ppr = set_reg_val(id, *val);
2056 		break;
2057 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2058 	case KVM_REG_PPC_TFHAR:
2059 		vcpu->arch.tfhar = set_reg_val(id, *val);
2060 		break;
2061 	case KVM_REG_PPC_TFIAR:
2062 		vcpu->arch.tfiar = set_reg_val(id, *val);
2063 		break;
2064 	case KVM_REG_PPC_TEXASR:
2065 		vcpu->arch.texasr = set_reg_val(id, *val);
2066 		break;
2067 	case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
2068 		i = id - KVM_REG_PPC_TM_GPR0;
2069 		vcpu->arch.gpr_tm[i] = set_reg_val(id, *val);
2070 		break;
2071 	case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
2072 	{
2073 		int j;
2074 		i = id - KVM_REG_PPC_TM_VSR0;
2075 		if (i < 32)
2076 			for (j = 0; j < TS_FPRWIDTH; j++)
2077 				vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j];
2078 		else
2079 			if (cpu_has_feature(CPU_FTR_ALTIVEC))
2080 				vcpu->arch.vr_tm.vr[i-32] = val->vval;
2081 			else
2082 				r = -ENXIO;
2083 		break;
2084 	}
2085 	case KVM_REG_PPC_TM_CR:
2086 		vcpu->arch.cr_tm = set_reg_val(id, *val);
2087 		break;
2088 	case KVM_REG_PPC_TM_XER:
2089 		vcpu->arch.xer_tm = set_reg_val(id, *val);
2090 		break;
2091 	case KVM_REG_PPC_TM_LR:
2092 		vcpu->arch.lr_tm = set_reg_val(id, *val);
2093 		break;
2094 	case KVM_REG_PPC_TM_CTR:
2095 		vcpu->arch.ctr_tm = set_reg_val(id, *val);
2096 		break;
2097 	case KVM_REG_PPC_TM_FPSCR:
2098 		vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val);
2099 		break;
2100 	case KVM_REG_PPC_TM_AMR:
2101 		vcpu->arch.amr_tm = set_reg_val(id, *val);
2102 		break;
2103 	case KVM_REG_PPC_TM_PPR:
2104 		vcpu->arch.ppr_tm = set_reg_val(id, *val);
2105 		break;
2106 	case KVM_REG_PPC_TM_VRSAVE:
2107 		vcpu->arch.vrsave_tm = set_reg_val(id, *val);
2108 		break;
2109 	case KVM_REG_PPC_TM_VSCR:
2110 		if (cpu_has_feature(CPU_FTR_ALTIVEC))
2111 			vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val);
2112 		else
2113 			r = - ENXIO;
2114 		break;
2115 	case KVM_REG_PPC_TM_DSCR:
2116 		vcpu->arch.dscr_tm = set_reg_val(id, *val);
2117 		break;
2118 	case KVM_REG_PPC_TM_TAR:
2119 		vcpu->arch.tar_tm = set_reg_val(id, *val);
2120 		break;
2121 #endif
2122 	case KVM_REG_PPC_ARCH_COMPAT:
2123 		r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val));
2124 		break;
2125 	case KVM_REG_PPC_DEC_EXPIRY:
2126 		vcpu->arch.dec_expires = set_reg_val(id, *val) -
2127 			vcpu->arch.vcore->tb_offset;
2128 		break;
2129 	case KVM_REG_PPC_ONLINE:
2130 		i = set_reg_val(id, *val);
2131 		if (i && !vcpu->arch.online)
2132 			atomic_inc(&vcpu->arch.vcore->online_count);
2133 		else if (!i && vcpu->arch.online)
2134 			atomic_dec(&vcpu->arch.vcore->online_count);
2135 		vcpu->arch.online = i;
2136 		break;
2137 	case KVM_REG_PPC_PTCR:
2138 		vcpu->kvm->arch.l1_ptcr = set_reg_val(id, *val);
2139 		break;
2140 	default:
2141 		r = -EINVAL;
2142 		break;
2143 	}
2144 
2145 	return r;
2146 }
2147 
2148 /*
2149  * On POWER9, threads are independent and can be in different partitions.
2150  * Therefore we consider each thread to be a subcore.
2151  * There is a restriction that all threads have to be in the same
2152  * MMU mode (radix or HPT), unfortunately, but since we only support
2153  * HPT guests on a HPT host so far, that isn't an impediment yet.
2154  */
threads_per_vcore(struct kvm * kvm)2155 static int threads_per_vcore(struct kvm *kvm)
2156 {
2157 	if (kvm->arch.threads_indep)
2158 		return 1;
2159 	return threads_per_subcore;
2160 }
2161 
kvmppc_vcore_create(struct kvm * kvm,int id)2162 static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int id)
2163 {
2164 	struct kvmppc_vcore *vcore;
2165 
2166 	vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL);
2167 
2168 	if (vcore == NULL)
2169 		return NULL;
2170 
2171 	spin_lock_init(&vcore->lock);
2172 	spin_lock_init(&vcore->stoltb_lock);
2173 	rcuwait_init(&vcore->wait);
2174 	vcore->preempt_tb = TB_NIL;
2175 	vcore->lpcr = kvm->arch.lpcr;
2176 	vcore->first_vcpuid = id;
2177 	vcore->kvm = kvm;
2178 	INIT_LIST_HEAD(&vcore->preempt_list);
2179 
2180 	return vcore;
2181 }
2182 
2183 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2184 static struct debugfs_timings_element {
2185 	const char *name;
2186 	size_t offset;
2187 } timings[] = {
2188 	{"rm_entry",	offsetof(struct kvm_vcpu, arch.rm_entry)},
2189 	{"rm_intr",	offsetof(struct kvm_vcpu, arch.rm_intr)},
2190 	{"rm_exit",	offsetof(struct kvm_vcpu, arch.rm_exit)},
2191 	{"guest",	offsetof(struct kvm_vcpu, arch.guest_time)},
2192 	{"cede",	offsetof(struct kvm_vcpu, arch.cede_time)},
2193 };
2194 
2195 #define N_TIMINGS	(ARRAY_SIZE(timings))
2196 
2197 struct debugfs_timings_state {
2198 	struct kvm_vcpu	*vcpu;
2199 	unsigned int	buflen;
2200 	char		buf[N_TIMINGS * 100];
2201 };
2202 
debugfs_timings_open(struct inode * inode,struct file * file)2203 static int debugfs_timings_open(struct inode *inode, struct file *file)
2204 {
2205 	struct kvm_vcpu *vcpu = inode->i_private;
2206 	struct debugfs_timings_state *p;
2207 
2208 	p = kzalloc(sizeof(*p), GFP_KERNEL);
2209 	if (!p)
2210 		return -ENOMEM;
2211 
2212 	kvm_get_kvm(vcpu->kvm);
2213 	p->vcpu = vcpu;
2214 	file->private_data = p;
2215 
2216 	return nonseekable_open(inode, file);
2217 }
2218 
debugfs_timings_release(struct inode * inode,struct file * file)2219 static int debugfs_timings_release(struct inode *inode, struct file *file)
2220 {
2221 	struct debugfs_timings_state *p = file->private_data;
2222 
2223 	kvm_put_kvm(p->vcpu->kvm);
2224 	kfree(p);
2225 	return 0;
2226 }
2227 
debugfs_timings_read(struct file * file,char __user * buf,size_t len,loff_t * ppos)2228 static ssize_t debugfs_timings_read(struct file *file, char __user *buf,
2229 				    size_t len, loff_t *ppos)
2230 {
2231 	struct debugfs_timings_state *p = file->private_data;
2232 	struct kvm_vcpu *vcpu = p->vcpu;
2233 	char *s, *buf_end;
2234 	struct kvmhv_tb_accumulator tb;
2235 	u64 count;
2236 	loff_t pos;
2237 	ssize_t n;
2238 	int i, loops;
2239 	bool ok;
2240 
2241 	if (!p->buflen) {
2242 		s = p->buf;
2243 		buf_end = s + sizeof(p->buf);
2244 		for (i = 0; i < N_TIMINGS; ++i) {
2245 			struct kvmhv_tb_accumulator *acc;
2246 
2247 			acc = (struct kvmhv_tb_accumulator *)
2248 				((unsigned long)vcpu + timings[i].offset);
2249 			ok = false;
2250 			for (loops = 0; loops < 1000; ++loops) {
2251 				count = acc->seqcount;
2252 				if (!(count & 1)) {
2253 					smp_rmb();
2254 					tb = *acc;
2255 					smp_rmb();
2256 					if (count == acc->seqcount) {
2257 						ok = true;
2258 						break;
2259 					}
2260 				}
2261 				udelay(1);
2262 			}
2263 			if (!ok)
2264 				snprintf(s, buf_end - s, "%s: stuck\n",
2265 					timings[i].name);
2266 			else
2267 				snprintf(s, buf_end - s,
2268 					"%s: %llu %llu %llu %llu\n",
2269 					timings[i].name, count / 2,
2270 					tb_to_ns(tb.tb_total),
2271 					tb_to_ns(tb.tb_min),
2272 					tb_to_ns(tb.tb_max));
2273 			s += strlen(s);
2274 		}
2275 		p->buflen = s - p->buf;
2276 	}
2277 
2278 	pos = *ppos;
2279 	if (pos >= p->buflen)
2280 		return 0;
2281 	if (len > p->buflen - pos)
2282 		len = p->buflen - pos;
2283 	n = copy_to_user(buf, p->buf + pos, len);
2284 	if (n) {
2285 		if (n == len)
2286 			return -EFAULT;
2287 		len -= n;
2288 	}
2289 	*ppos = pos + len;
2290 	return len;
2291 }
2292 
debugfs_timings_write(struct file * file,const char __user * buf,size_t len,loff_t * ppos)2293 static ssize_t debugfs_timings_write(struct file *file, const char __user *buf,
2294 				     size_t len, loff_t *ppos)
2295 {
2296 	return -EACCES;
2297 }
2298 
2299 static const struct file_operations debugfs_timings_ops = {
2300 	.owner	 = THIS_MODULE,
2301 	.open	 = debugfs_timings_open,
2302 	.release = debugfs_timings_release,
2303 	.read	 = debugfs_timings_read,
2304 	.write	 = debugfs_timings_write,
2305 	.llseek	 = generic_file_llseek,
2306 };
2307 
2308 /* Create a debugfs directory for the vcpu */
debugfs_vcpu_init(struct kvm_vcpu * vcpu,unsigned int id)2309 static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id)
2310 {
2311 	char buf[16];
2312 	struct kvm *kvm = vcpu->kvm;
2313 
2314 	snprintf(buf, sizeof(buf), "vcpu%u", id);
2315 	vcpu->arch.debugfs_dir = debugfs_create_dir(buf, kvm->arch.debugfs_dir);
2316 	debugfs_create_file("timings", 0444, vcpu->arch.debugfs_dir, vcpu,
2317 			    &debugfs_timings_ops);
2318 }
2319 
2320 #else /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
debugfs_vcpu_init(struct kvm_vcpu * vcpu,unsigned int id)2321 static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id)
2322 {
2323 }
2324 #endif /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
2325 
kvmppc_core_vcpu_create_hv(struct kvm_vcpu * vcpu)2326 static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
2327 {
2328 	int err;
2329 	int core;
2330 	struct kvmppc_vcore *vcore;
2331 	struct kvm *kvm;
2332 	unsigned int id;
2333 
2334 	kvm = vcpu->kvm;
2335 	id = vcpu->vcpu_id;
2336 
2337 	vcpu->arch.shared = &vcpu->arch.shregs;
2338 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
2339 	/*
2340 	 * The shared struct is never shared on HV,
2341 	 * so we can always use host endianness
2342 	 */
2343 #ifdef __BIG_ENDIAN__
2344 	vcpu->arch.shared_big_endian = true;
2345 #else
2346 	vcpu->arch.shared_big_endian = false;
2347 #endif
2348 #endif
2349 	vcpu->arch.mmcr[0] = MMCR0_FC;
2350 	vcpu->arch.ctrl = CTRL_RUNLATCH;
2351 	/* default to host PVR, since we can't spoof it */
2352 	kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR));
2353 	spin_lock_init(&vcpu->arch.vpa_update_lock);
2354 	spin_lock_init(&vcpu->arch.tbacct_lock);
2355 	vcpu->arch.busy_preempt = TB_NIL;
2356 	vcpu->arch.intr_msr = MSR_SF | MSR_ME;
2357 
2358 	/*
2359 	 * Set the default HFSCR for the guest from the host value.
2360 	 * This value is only used on POWER9.
2361 	 * On POWER9, we want to virtualize the doorbell facility, so we
2362 	 * don't set the HFSCR_MSGP bit, and that causes those instructions
2363 	 * to trap and then we emulate them.
2364 	 */
2365 	vcpu->arch.hfscr = HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB |
2366 		HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP | HFSCR_PREFIX;
2367 	if (cpu_has_feature(CPU_FTR_HVMODE)) {
2368 		vcpu->arch.hfscr &= mfspr(SPRN_HFSCR);
2369 		if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
2370 			vcpu->arch.hfscr |= HFSCR_TM;
2371 	}
2372 	if (cpu_has_feature(CPU_FTR_TM_COMP))
2373 		vcpu->arch.hfscr |= HFSCR_TM;
2374 
2375 	kvmppc_mmu_book3s_hv_init(vcpu);
2376 
2377 	vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
2378 
2379 	init_waitqueue_head(&vcpu->arch.cpu_run);
2380 
2381 	mutex_lock(&kvm->lock);
2382 	vcore = NULL;
2383 	err = -EINVAL;
2384 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
2385 		if (id >= (KVM_MAX_VCPUS * kvm->arch.emul_smt_mode)) {
2386 			pr_devel("KVM: VCPU ID too high\n");
2387 			core = KVM_MAX_VCORES;
2388 		} else {
2389 			BUG_ON(kvm->arch.smt_mode != 1);
2390 			core = kvmppc_pack_vcpu_id(kvm, id);
2391 		}
2392 	} else {
2393 		core = id / kvm->arch.smt_mode;
2394 	}
2395 	if (core < KVM_MAX_VCORES) {
2396 		vcore = kvm->arch.vcores[core];
2397 		if (vcore && cpu_has_feature(CPU_FTR_ARCH_300)) {
2398 			pr_devel("KVM: collision on id %u", id);
2399 			vcore = NULL;
2400 		} else if (!vcore) {
2401 			/*
2402 			 * Take mmu_setup_lock for mutual exclusion
2403 			 * with kvmppc_update_lpcr().
2404 			 */
2405 			err = -ENOMEM;
2406 			vcore = kvmppc_vcore_create(kvm,
2407 					id & ~(kvm->arch.smt_mode - 1));
2408 			mutex_lock(&kvm->arch.mmu_setup_lock);
2409 			kvm->arch.vcores[core] = vcore;
2410 			kvm->arch.online_vcores++;
2411 			mutex_unlock(&kvm->arch.mmu_setup_lock);
2412 		}
2413 	}
2414 	mutex_unlock(&kvm->lock);
2415 
2416 	if (!vcore)
2417 		return err;
2418 
2419 	spin_lock(&vcore->lock);
2420 	++vcore->num_threads;
2421 	spin_unlock(&vcore->lock);
2422 	vcpu->arch.vcore = vcore;
2423 	vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid;
2424 	vcpu->arch.thread_cpu = -1;
2425 	vcpu->arch.prev_cpu = -1;
2426 
2427 	vcpu->arch.cpu_type = KVM_CPU_3S_64;
2428 	kvmppc_sanity_check(vcpu);
2429 
2430 	debugfs_vcpu_init(vcpu, id);
2431 
2432 	return 0;
2433 }
2434 
kvmhv_set_smt_mode(struct kvm * kvm,unsigned long smt_mode,unsigned long flags)2435 static int kvmhv_set_smt_mode(struct kvm *kvm, unsigned long smt_mode,
2436 			      unsigned long flags)
2437 {
2438 	int err;
2439 	int esmt = 0;
2440 
2441 	if (flags)
2442 		return -EINVAL;
2443 	if (smt_mode > MAX_SMT_THREADS || !is_power_of_2(smt_mode))
2444 		return -EINVAL;
2445 	if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
2446 		/*
2447 		 * On POWER8 (or POWER7), the threading mode is "strict",
2448 		 * so we pack smt_mode vcpus per vcore.
2449 		 */
2450 		if (smt_mode > threads_per_subcore)
2451 			return -EINVAL;
2452 	} else {
2453 		/*
2454 		 * On POWER9, the threading mode is "loose",
2455 		 * so each vcpu gets its own vcore.
2456 		 */
2457 		esmt = smt_mode;
2458 		smt_mode = 1;
2459 	}
2460 	mutex_lock(&kvm->lock);
2461 	err = -EBUSY;
2462 	if (!kvm->arch.online_vcores) {
2463 		kvm->arch.smt_mode = smt_mode;
2464 		kvm->arch.emul_smt_mode = esmt;
2465 		err = 0;
2466 	}
2467 	mutex_unlock(&kvm->lock);
2468 
2469 	return err;
2470 }
2471 
unpin_vpa(struct kvm * kvm,struct kvmppc_vpa * vpa)2472 static void unpin_vpa(struct kvm *kvm, struct kvmppc_vpa *vpa)
2473 {
2474 	if (vpa->pinned_addr)
2475 		kvmppc_unpin_guest_page(kvm, vpa->pinned_addr, vpa->gpa,
2476 					vpa->dirty);
2477 }
2478 
kvmppc_core_vcpu_free_hv(struct kvm_vcpu * vcpu)2479 static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu)
2480 {
2481 	spin_lock(&vcpu->arch.vpa_update_lock);
2482 	unpin_vpa(vcpu->kvm, &vcpu->arch.dtl);
2483 	unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow);
2484 	unpin_vpa(vcpu->kvm, &vcpu->arch.vpa);
2485 	spin_unlock(&vcpu->arch.vpa_update_lock);
2486 }
2487 
kvmppc_core_check_requests_hv(struct kvm_vcpu * vcpu)2488 static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu)
2489 {
2490 	/* Indicate we want to get back into the guest */
2491 	return 1;
2492 }
2493 
kvmppc_set_timer(struct kvm_vcpu * vcpu)2494 static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
2495 {
2496 	unsigned long dec_nsec, now;
2497 
2498 	now = get_tb();
2499 	if (now > vcpu->arch.dec_expires) {
2500 		/* decrementer has already gone negative */
2501 		kvmppc_core_queue_dec(vcpu);
2502 		kvmppc_core_prepare_to_enter(vcpu);
2503 		return;
2504 	}
2505 	dec_nsec = tb_to_ns(vcpu->arch.dec_expires - now);
2506 	hrtimer_start(&vcpu->arch.dec_timer, dec_nsec, HRTIMER_MODE_REL);
2507 	vcpu->arch.timer_running = 1;
2508 }
2509 
2510 extern int __kvmppc_vcore_entry(void);
2511 
kvmppc_remove_runnable(struct kvmppc_vcore * vc,struct kvm_vcpu * vcpu)2512 static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
2513 				   struct kvm_vcpu *vcpu)
2514 {
2515 	u64 now;
2516 
2517 	if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
2518 		return;
2519 	spin_lock_irq(&vcpu->arch.tbacct_lock);
2520 	now = mftb();
2521 	vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) -
2522 		vcpu->arch.stolen_logged;
2523 	vcpu->arch.busy_preempt = now;
2524 	vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
2525 	spin_unlock_irq(&vcpu->arch.tbacct_lock);
2526 	--vc->n_runnable;
2527 	WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], NULL);
2528 }
2529 
kvmppc_grab_hwthread(int cpu)2530 static int kvmppc_grab_hwthread(int cpu)
2531 {
2532 	struct paca_struct *tpaca;
2533 	long timeout = 10000;
2534 
2535 	tpaca = paca_ptrs[cpu];
2536 
2537 	/* Ensure the thread won't go into the kernel if it wakes */
2538 	tpaca->kvm_hstate.kvm_vcpu = NULL;
2539 	tpaca->kvm_hstate.kvm_vcore = NULL;
2540 	tpaca->kvm_hstate.napping = 0;
2541 	smp_wmb();
2542 	tpaca->kvm_hstate.hwthread_req = 1;
2543 
2544 	/*
2545 	 * If the thread is already executing in the kernel (e.g. handling
2546 	 * a stray interrupt), wait for it to get back to nap mode.
2547 	 * The smp_mb() is to ensure that our setting of hwthread_req
2548 	 * is visible before we look at hwthread_state, so if this
2549 	 * races with the code at system_reset_pSeries and the thread
2550 	 * misses our setting of hwthread_req, we are sure to see its
2551 	 * setting of hwthread_state, and vice versa.
2552 	 */
2553 	smp_mb();
2554 	while (tpaca->kvm_hstate.hwthread_state == KVM_HWTHREAD_IN_KERNEL) {
2555 		if (--timeout <= 0) {
2556 			pr_err("KVM: couldn't grab cpu %d\n", cpu);
2557 			return -EBUSY;
2558 		}
2559 		udelay(1);
2560 	}
2561 	return 0;
2562 }
2563 
kvmppc_release_hwthread(int cpu)2564 static void kvmppc_release_hwthread(int cpu)
2565 {
2566 	struct paca_struct *tpaca;
2567 
2568 	tpaca = paca_ptrs[cpu];
2569 	tpaca->kvm_hstate.hwthread_req = 0;
2570 	tpaca->kvm_hstate.kvm_vcpu = NULL;
2571 	tpaca->kvm_hstate.kvm_vcore = NULL;
2572 	tpaca->kvm_hstate.kvm_split_mode = NULL;
2573 }
2574 
radix_flush_cpu(struct kvm * kvm,int cpu,struct kvm_vcpu * vcpu)2575 static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
2576 {
2577 	struct kvm_nested_guest *nested = vcpu->arch.nested;
2578 	cpumask_t *cpu_in_guest;
2579 	int i;
2580 
2581 	cpu = cpu_first_thread_sibling(cpu);
2582 	if (nested) {
2583 		cpumask_set_cpu(cpu, &nested->need_tlb_flush);
2584 		cpu_in_guest = &nested->cpu_in_guest;
2585 	} else {
2586 		cpumask_set_cpu(cpu, &kvm->arch.need_tlb_flush);
2587 		cpu_in_guest = &kvm->arch.cpu_in_guest;
2588 	}
2589 	/*
2590 	 * Make sure setting of bit in need_tlb_flush precedes
2591 	 * testing of cpu_in_guest bits.  The matching barrier on
2592 	 * the other side is the first smp_mb() in kvmppc_run_core().
2593 	 */
2594 	smp_mb();
2595 	for (i = 0; i < threads_per_core; ++i)
2596 		if (cpumask_test_cpu(cpu + i, cpu_in_guest))
2597 			smp_call_function_single(cpu + i, do_nothing, NULL, 1);
2598 }
2599 
kvmppc_prepare_radix_vcpu(struct kvm_vcpu * vcpu,int pcpu)2600 static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
2601 {
2602 	struct kvm_nested_guest *nested = vcpu->arch.nested;
2603 	struct kvm *kvm = vcpu->kvm;
2604 	int prev_cpu;
2605 
2606 	if (!cpu_has_feature(CPU_FTR_HVMODE))
2607 		return;
2608 
2609 	if (nested)
2610 		prev_cpu = nested->prev_cpu[vcpu->arch.nested_vcpu_id];
2611 	else
2612 		prev_cpu = vcpu->arch.prev_cpu;
2613 
2614 	/*
2615 	 * With radix, the guest can do TLB invalidations itself,
2616 	 * and it could choose to use the local form (tlbiel) if
2617 	 * it is invalidating a translation that has only ever been
2618 	 * used on one vcpu.  However, that doesn't mean it has
2619 	 * only ever been used on one physical cpu, since vcpus
2620 	 * can move around between pcpus.  To cope with this, when
2621 	 * a vcpu moves from one pcpu to another, we need to tell
2622 	 * any vcpus running on the same core as this vcpu previously
2623 	 * ran to flush the TLB.  The TLB is shared between threads,
2624 	 * so we use a single bit in .need_tlb_flush for all 4 threads.
2625 	 */
2626 	if (prev_cpu != pcpu) {
2627 		if (prev_cpu >= 0 &&
2628 		    cpu_first_thread_sibling(prev_cpu) !=
2629 		    cpu_first_thread_sibling(pcpu))
2630 			radix_flush_cpu(kvm, prev_cpu, vcpu);
2631 		if (nested)
2632 			nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu;
2633 		else
2634 			vcpu->arch.prev_cpu = pcpu;
2635 	}
2636 }
2637 
kvmppc_start_thread(struct kvm_vcpu * vcpu,struct kvmppc_vcore * vc)2638 static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc)
2639 {
2640 	int cpu;
2641 	struct paca_struct *tpaca;
2642 	struct kvm *kvm = vc->kvm;
2643 
2644 	cpu = vc->pcpu;
2645 	if (vcpu) {
2646 		if (vcpu->arch.timer_running) {
2647 			hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
2648 			vcpu->arch.timer_running = 0;
2649 		}
2650 		cpu += vcpu->arch.ptid;
2651 		vcpu->cpu = vc->pcpu;
2652 		vcpu->arch.thread_cpu = cpu;
2653 		cpumask_set_cpu(cpu, &kvm->arch.cpu_in_guest);
2654 	}
2655 	tpaca = paca_ptrs[cpu];
2656 	tpaca->kvm_hstate.kvm_vcpu = vcpu;
2657 	tpaca->kvm_hstate.ptid = cpu - vc->pcpu;
2658 	tpaca->kvm_hstate.fake_suspend = 0;
2659 	/* Order stores to hstate.kvm_vcpu etc. before store to kvm_vcore */
2660 	smp_wmb();
2661 	tpaca->kvm_hstate.kvm_vcore = vc;
2662 	if (cpu != smp_processor_id())
2663 		kvmppc_ipi_thread(cpu);
2664 }
2665 
kvmppc_wait_for_nap(int n_threads)2666 static void kvmppc_wait_for_nap(int n_threads)
2667 {
2668 	int cpu = smp_processor_id();
2669 	int i, loops;
2670 
2671 	if (n_threads <= 1)
2672 		return;
2673 	for (loops = 0; loops < 1000000; ++loops) {
2674 		/*
2675 		 * Check if all threads are finished.
2676 		 * We set the vcore pointer when starting a thread
2677 		 * and the thread clears it when finished, so we look
2678 		 * for any threads that still have a non-NULL vcore ptr.
2679 		 */
2680 		for (i = 1; i < n_threads; ++i)
2681 			if (paca_ptrs[cpu + i]->kvm_hstate.kvm_vcore)
2682 				break;
2683 		if (i == n_threads) {
2684 			HMT_medium();
2685 			return;
2686 		}
2687 		HMT_low();
2688 	}
2689 	HMT_medium();
2690 	for (i = 1; i < n_threads; ++i)
2691 		if (paca_ptrs[cpu + i]->kvm_hstate.kvm_vcore)
2692 			pr_err("KVM: CPU %d seems to be stuck\n", cpu + i);
2693 }
2694 
2695 /*
2696  * Check that we are on thread 0 and that any other threads in
2697  * this core are off-line.  Then grab the threads so they can't
2698  * enter the kernel.
2699  */
on_primary_thread(void)2700 static int on_primary_thread(void)
2701 {
2702 	int cpu = smp_processor_id();
2703 	int thr;
2704 
2705 	/* Are we on a primary subcore? */
2706 	if (cpu_thread_in_subcore(cpu))
2707 		return 0;
2708 
2709 	thr = 0;
2710 	while (++thr < threads_per_subcore)
2711 		if (cpu_online(cpu + thr))
2712 			return 0;
2713 
2714 	/* Grab all hw threads so they can't go into the kernel */
2715 	for (thr = 1; thr < threads_per_subcore; ++thr) {
2716 		if (kvmppc_grab_hwthread(cpu + thr)) {
2717 			/* Couldn't grab one; let the others go */
2718 			do {
2719 				kvmppc_release_hwthread(cpu + thr);
2720 			} while (--thr > 0);
2721 			return 0;
2722 		}
2723 	}
2724 	return 1;
2725 }
2726 
2727 /*
2728  * A list of virtual cores for each physical CPU.
2729  * These are vcores that could run but their runner VCPU tasks are
2730  * (or may be) preempted.
2731  */
2732 struct preempted_vcore_list {
2733 	struct list_head	list;
2734 	spinlock_t		lock;
2735 };
2736 
2737 static DEFINE_PER_CPU(struct preempted_vcore_list, preempted_vcores);
2738 
init_vcore_lists(void)2739 static void init_vcore_lists(void)
2740 {
2741 	int cpu;
2742 
2743 	for_each_possible_cpu(cpu) {
2744 		struct preempted_vcore_list *lp = &per_cpu(preempted_vcores, cpu);
2745 		spin_lock_init(&lp->lock);
2746 		INIT_LIST_HEAD(&lp->list);
2747 	}
2748 }
2749 
kvmppc_vcore_preempt(struct kvmppc_vcore * vc)2750 static void kvmppc_vcore_preempt(struct kvmppc_vcore *vc)
2751 {
2752 	struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores);
2753 
2754 	vc->vcore_state = VCORE_PREEMPT;
2755 	vc->pcpu = smp_processor_id();
2756 	if (vc->num_threads < threads_per_vcore(vc->kvm)) {
2757 		spin_lock(&lp->lock);
2758 		list_add_tail(&vc->preempt_list, &lp->list);
2759 		spin_unlock(&lp->lock);
2760 	}
2761 
2762 	/* Start accumulating stolen time */
2763 	kvmppc_core_start_stolen(vc);
2764 }
2765 
kvmppc_vcore_end_preempt(struct kvmppc_vcore * vc)2766 static void kvmppc_vcore_end_preempt(struct kvmppc_vcore *vc)
2767 {
2768 	struct preempted_vcore_list *lp;
2769 
2770 	kvmppc_core_end_stolen(vc);
2771 	if (!list_empty(&vc->preempt_list)) {
2772 		lp = &per_cpu(preempted_vcores, vc->pcpu);
2773 		spin_lock(&lp->lock);
2774 		list_del_init(&vc->preempt_list);
2775 		spin_unlock(&lp->lock);
2776 	}
2777 	vc->vcore_state = VCORE_INACTIVE;
2778 }
2779 
2780 /*
2781  * This stores information about the virtual cores currently
2782  * assigned to a physical core.
2783  */
2784 struct core_info {
2785 	int		n_subcores;
2786 	int		max_subcore_threads;
2787 	int		total_threads;
2788 	int		subcore_threads[MAX_SUBCORES];
2789 	struct kvmppc_vcore *vc[MAX_SUBCORES];
2790 };
2791 
2792 /*
2793  * This mapping means subcores 0 and 1 can use threads 0-3 and 4-7
2794  * respectively in 2-way micro-threading (split-core) mode on POWER8.
2795  */
2796 static int subcore_thread_map[MAX_SUBCORES] = { 0, 4, 2, 6 };
2797 
init_core_info(struct core_info * cip,struct kvmppc_vcore * vc)2798 static void init_core_info(struct core_info *cip, struct kvmppc_vcore *vc)
2799 {
2800 	memset(cip, 0, sizeof(*cip));
2801 	cip->n_subcores = 1;
2802 	cip->max_subcore_threads = vc->num_threads;
2803 	cip->total_threads = vc->num_threads;
2804 	cip->subcore_threads[0] = vc->num_threads;
2805 	cip->vc[0] = vc;
2806 }
2807 
subcore_config_ok(int n_subcores,int n_threads)2808 static bool subcore_config_ok(int n_subcores, int n_threads)
2809 {
2810 	/*
2811 	 * POWER9 "SMT4" cores are permanently in what is effectively a 4-way
2812 	 * split-core mode, with one thread per subcore.
2813 	 */
2814 	if (cpu_has_feature(CPU_FTR_ARCH_300))
2815 		return n_subcores <= 4 && n_threads == 1;
2816 
2817 	/* On POWER8, can only dynamically split if unsplit to begin with */
2818 	if (n_subcores > 1 && threads_per_subcore < MAX_SMT_THREADS)
2819 		return false;
2820 	if (n_subcores > MAX_SUBCORES)
2821 		return false;
2822 	if (n_subcores > 1) {
2823 		if (!(dynamic_mt_modes & 2))
2824 			n_subcores = 4;
2825 		if (n_subcores > 2 && !(dynamic_mt_modes & 4))
2826 			return false;
2827 	}
2828 
2829 	return n_subcores * roundup_pow_of_two(n_threads) <= MAX_SMT_THREADS;
2830 }
2831 
init_vcore_to_run(struct kvmppc_vcore * vc)2832 static void init_vcore_to_run(struct kvmppc_vcore *vc)
2833 {
2834 	vc->entry_exit_map = 0;
2835 	vc->in_guest = 0;
2836 	vc->napping_threads = 0;
2837 	vc->conferring_threads = 0;
2838 	vc->tb_offset_applied = 0;
2839 }
2840 
can_dynamic_split(struct kvmppc_vcore * vc,struct core_info * cip)2841 static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip)
2842 {
2843 	int n_threads = vc->num_threads;
2844 	int sub;
2845 
2846 	if (!cpu_has_feature(CPU_FTR_ARCH_207S))
2847 		return false;
2848 
2849 	/* In one_vm_per_core mode, require all vcores to be from the same vm */
2850 	if (one_vm_per_core && vc->kvm != cip->vc[0]->kvm)
2851 		return false;
2852 
2853 	/* Some POWER9 chips require all threads to be in the same MMU mode */
2854 	if (no_mixing_hpt_and_radix &&
2855 	    kvm_is_radix(vc->kvm) != kvm_is_radix(cip->vc[0]->kvm))
2856 		return false;
2857 
2858 	if (n_threads < cip->max_subcore_threads)
2859 		n_threads = cip->max_subcore_threads;
2860 	if (!subcore_config_ok(cip->n_subcores + 1, n_threads))
2861 		return false;
2862 	cip->max_subcore_threads = n_threads;
2863 
2864 	sub = cip->n_subcores;
2865 	++cip->n_subcores;
2866 	cip->total_threads += vc->num_threads;
2867 	cip->subcore_threads[sub] = vc->num_threads;
2868 	cip->vc[sub] = vc;
2869 	init_vcore_to_run(vc);
2870 	list_del_init(&vc->preempt_list);
2871 
2872 	return true;
2873 }
2874 
2875 /*
2876  * Work out whether it is possible to piggyback the execution of
2877  * vcore *pvc onto the execution of the other vcores described in *cip.
2878  */
can_piggyback(struct kvmppc_vcore * pvc,struct core_info * cip,int target_threads)2879 static bool can_piggyback(struct kvmppc_vcore *pvc, struct core_info *cip,
2880 			  int target_threads)
2881 {
2882 	if (cip->total_threads + pvc->num_threads > target_threads)
2883 		return false;
2884 
2885 	return can_dynamic_split(pvc, cip);
2886 }
2887 
prepare_threads(struct kvmppc_vcore * vc)2888 static void prepare_threads(struct kvmppc_vcore *vc)
2889 {
2890 	int i;
2891 	struct kvm_vcpu *vcpu;
2892 
2893 	for_each_runnable_thread(i, vcpu, vc) {
2894 		if (signal_pending(vcpu->arch.run_task))
2895 			vcpu->arch.ret = -EINTR;
2896 		else if (vcpu->arch.vpa.update_pending ||
2897 			 vcpu->arch.slb_shadow.update_pending ||
2898 			 vcpu->arch.dtl.update_pending)
2899 			vcpu->arch.ret = RESUME_GUEST;
2900 		else
2901 			continue;
2902 		kvmppc_remove_runnable(vc, vcpu);
2903 		wake_up(&vcpu->arch.cpu_run);
2904 	}
2905 }
2906 
collect_piggybacks(struct core_info * cip,int target_threads)2907 static void collect_piggybacks(struct core_info *cip, int target_threads)
2908 {
2909 	struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores);
2910 	struct kvmppc_vcore *pvc, *vcnext;
2911 
2912 	spin_lock(&lp->lock);
2913 	list_for_each_entry_safe(pvc, vcnext, &lp->list, preempt_list) {
2914 		if (!spin_trylock(&pvc->lock))
2915 			continue;
2916 		prepare_threads(pvc);
2917 		if (!pvc->n_runnable || !pvc->kvm->arch.mmu_ready) {
2918 			list_del_init(&pvc->preempt_list);
2919 			if (pvc->runner == NULL) {
2920 				pvc->vcore_state = VCORE_INACTIVE;
2921 				kvmppc_core_end_stolen(pvc);
2922 			}
2923 			spin_unlock(&pvc->lock);
2924 			continue;
2925 		}
2926 		if (!can_piggyback(pvc, cip, target_threads)) {
2927 			spin_unlock(&pvc->lock);
2928 			continue;
2929 		}
2930 		kvmppc_core_end_stolen(pvc);
2931 		pvc->vcore_state = VCORE_PIGGYBACK;
2932 		if (cip->total_threads >= target_threads)
2933 			break;
2934 	}
2935 	spin_unlock(&lp->lock);
2936 }
2937 
recheck_signals_and_mmu(struct core_info * cip)2938 static bool recheck_signals_and_mmu(struct core_info *cip)
2939 {
2940 	int sub, i;
2941 	struct kvm_vcpu *vcpu;
2942 	struct kvmppc_vcore *vc;
2943 
2944 	for (sub = 0; sub < cip->n_subcores; ++sub) {
2945 		vc = cip->vc[sub];
2946 		if (!vc->kvm->arch.mmu_ready)
2947 			return true;
2948 		for_each_runnable_thread(i, vcpu, vc)
2949 			if (signal_pending(vcpu->arch.run_task))
2950 				return true;
2951 	}
2952 	return false;
2953 }
2954 
post_guest_process(struct kvmppc_vcore * vc,bool is_master)2955 static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
2956 {
2957 	int still_running = 0, i;
2958 	u64 now;
2959 	long ret;
2960 	struct kvm_vcpu *vcpu;
2961 
2962 	spin_lock(&vc->lock);
2963 	now = get_tb();
2964 	for_each_runnable_thread(i, vcpu, vc) {
2965 		/*
2966 		 * It's safe to unlock the vcore in the loop here, because
2967 		 * for_each_runnable_thread() is safe against removal of
2968 		 * the vcpu, and the vcore state is VCORE_EXITING here,
2969 		 * so any vcpus becoming runnable will have their arch.trap
2970 		 * set to zero and can't actually run in the guest.
2971 		 */
2972 		spin_unlock(&vc->lock);
2973 		/* cancel pending dec exception if dec is positive */
2974 		if (now < vcpu->arch.dec_expires &&
2975 		    kvmppc_core_pending_dec(vcpu))
2976 			kvmppc_core_dequeue_dec(vcpu);
2977 
2978 		trace_kvm_guest_exit(vcpu);
2979 
2980 		ret = RESUME_GUEST;
2981 		if (vcpu->arch.trap)
2982 			ret = kvmppc_handle_exit_hv(vcpu,
2983 						    vcpu->arch.run_task);
2984 
2985 		vcpu->arch.ret = ret;
2986 		vcpu->arch.trap = 0;
2987 
2988 		spin_lock(&vc->lock);
2989 		if (is_kvmppc_resume_guest(vcpu->arch.ret)) {
2990 			if (vcpu->arch.pending_exceptions)
2991 				kvmppc_core_prepare_to_enter(vcpu);
2992 			if (vcpu->arch.ceded)
2993 				kvmppc_set_timer(vcpu);
2994 			else
2995 				++still_running;
2996 		} else {
2997 			kvmppc_remove_runnable(vc, vcpu);
2998 			wake_up(&vcpu->arch.cpu_run);
2999 		}
3000 	}
3001 	if (!is_master) {
3002 		if (still_running > 0) {
3003 			kvmppc_vcore_preempt(vc);
3004 		} else if (vc->runner) {
3005 			vc->vcore_state = VCORE_PREEMPT;
3006 			kvmppc_core_start_stolen(vc);
3007 		} else {
3008 			vc->vcore_state = VCORE_INACTIVE;
3009 		}
3010 		if (vc->n_runnable > 0 && vc->runner == NULL) {
3011 			/* make sure there's a candidate runner awake */
3012 			i = -1;
3013 			vcpu = next_runnable_thread(vc, &i);
3014 			wake_up(&vcpu->arch.cpu_run);
3015 		}
3016 	}
3017 	spin_unlock(&vc->lock);
3018 }
3019 
3020 /*
3021  * Clear core from the list of active host cores as we are about to
3022  * enter the guest. Only do this if it is the primary thread of the
3023  * core (not if a subcore) that is entering the guest.
3024  */
kvmppc_clear_host_core(unsigned int cpu)3025 static inline int kvmppc_clear_host_core(unsigned int cpu)
3026 {
3027 	int core;
3028 
3029 	if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu))
3030 		return 0;
3031 	/*
3032 	 * Memory barrier can be omitted here as we will do a smp_wmb()
3033 	 * later in kvmppc_start_thread and we need ensure that state is
3034 	 * visible to other CPUs only after we enter guest.
3035 	 */
3036 	core = cpu >> threads_shift;
3037 	kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 0;
3038 	return 0;
3039 }
3040 
3041 /*
3042  * Advertise this core as an active host core since we exited the guest
3043  * Only need to do this if it is the primary thread of the core that is
3044  * exiting.
3045  */
kvmppc_set_host_core(unsigned int cpu)3046 static inline int kvmppc_set_host_core(unsigned int cpu)
3047 {
3048 	int core;
3049 
3050 	if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu))
3051 		return 0;
3052 
3053 	/*
3054 	 * Memory barrier can be omitted here because we do a spin_unlock
3055 	 * immediately after this which provides the memory barrier.
3056 	 */
3057 	core = cpu >> threads_shift;
3058 	kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 1;
3059 	return 0;
3060 }
3061 
set_irq_happened(int trap)3062 static void set_irq_happened(int trap)
3063 {
3064 	switch (trap) {
3065 	case BOOK3S_INTERRUPT_EXTERNAL:
3066 		local_paca->irq_happened |= PACA_IRQ_EE;
3067 		break;
3068 	case BOOK3S_INTERRUPT_H_DOORBELL:
3069 		local_paca->irq_happened |= PACA_IRQ_DBELL;
3070 		break;
3071 	case BOOK3S_INTERRUPT_HMI:
3072 		local_paca->irq_happened |= PACA_IRQ_HMI;
3073 		break;
3074 	case BOOK3S_INTERRUPT_SYSTEM_RESET:
3075 		replay_system_reset();
3076 		break;
3077 	}
3078 }
3079 
3080 /*
3081  * Run a set of guest threads on a physical core.
3082  * Called with vc->lock held.
3083  */
kvmppc_run_core(struct kvmppc_vcore * vc)3084 static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
3085 {
3086 	struct kvm_vcpu *vcpu;
3087 	int i;
3088 	int srcu_idx;
3089 	struct core_info core_info;
3090 	struct kvmppc_vcore *pvc;
3091 	struct kvm_split_mode split_info, *sip;
3092 	int split, subcore_size, active;
3093 	int sub;
3094 	bool thr0_done;
3095 	unsigned long cmd_bit, stat_bit;
3096 	int pcpu, thr;
3097 	int target_threads;
3098 	int controlled_threads;
3099 	int trap;
3100 	bool is_power8;
3101 	bool hpt_on_radix;
3102 
3103 	/*
3104 	 * Remove from the list any threads that have a signal pending
3105 	 * or need a VPA update done
3106 	 */
3107 	prepare_threads(vc);
3108 
3109 	/* if the runner is no longer runnable, let the caller pick a new one */
3110 	if (vc->runner->arch.state != KVMPPC_VCPU_RUNNABLE)
3111 		return;
3112 
3113 	/*
3114 	 * Initialize *vc.
3115 	 */
3116 	init_vcore_to_run(vc);
3117 	vc->preempt_tb = TB_NIL;
3118 
3119 	/*
3120 	 * Number of threads that we will be controlling: the same as
3121 	 * the number of threads per subcore, except on POWER9,
3122 	 * where it's 1 because the threads are (mostly) independent.
3123 	 */
3124 	controlled_threads = threads_per_vcore(vc->kvm);
3125 
3126 	/*
3127 	 * Make sure we are running on primary threads, and that secondary
3128 	 * threads are offline.  Also check if the number of threads in this
3129 	 * guest are greater than the current system threads per guest.
3130 	 * On POWER9, we need to be not in independent-threads mode if
3131 	 * this is a HPT guest on a radix host machine where the
3132 	 * CPU threads may not be in different MMU modes.
3133 	 */
3134 	hpt_on_radix = no_mixing_hpt_and_radix && radix_enabled() &&
3135 		!kvm_is_radix(vc->kvm);
3136 	if (((controlled_threads > 1) &&
3137 	     ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) ||
3138 	    (hpt_on_radix && vc->kvm->arch.threads_indep)) {
3139 		for_each_runnable_thread(i, vcpu, vc) {
3140 			vcpu->arch.ret = -EBUSY;
3141 			kvmppc_remove_runnable(vc, vcpu);
3142 			wake_up(&vcpu->arch.cpu_run);
3143 		}
3144 		goto out;
3145 	}
3146 
3147 	/*
3148 	 * See if we could run any other vcores on the physical core
3149 	 * along with this one.
3150 	 */
3151 	init_core_info(&core_info, vc);
3152 	pcpu = smp_processor_id();
3153 	target_threads = controlled_threads;
3154 	if (target_smt_mode && target_smt_mode < target_threads)
3155 		target_threads = target_smt_mode;
3156 	if (vc->num_threads < target_threads)
3157 		collect_piggybacks(&core_info, target_threads);
3158 
3159 	/*
3160 	 * On radix, arrange for TLB flushing if necessary.
3161 	 * This has to be done before disabling interrupts since
3162 	 * it uses smp_call_function().
3163 	 */
3164 	pcpu = smp_processor_id();
3165 	if (kvm_is_radix(vc->kvm)) {
3166 		for (sub = 0; sub < core_info.n_subcores; ++sub)
3167 			for_each_runnable_thread(i, vcpu, core_info.vc[sub])
3168 				kvmppc_prepare_radix_vcpu(vcpu, pcpu);
3169 	}
3170 
3171 	/*
3172 	 * Hard-disable interrupts, and check resched flag and signals.
3173 	 * If we need to reschedule or deliver a signal, clean up
3174 	 * and return without going into the guest(s).
3175 	 * If the mmu_ready flag has been cleared, don't go into the
3176 	 * guest because that means a HPT resize operation is in progress.
3177 	 */
3178 	local_irq_disable();
3179 	hard_irq_disable();
3180 	if (lazy_irq_pending() || need_resched() ||
3181 	    recheck_signals_and_mmu(&core_info)) {
3182 		local_irq_enable();
3183 		vc->vcore_state = VCORE_INACTIVE;
3184 		/* Unlock all except the primary vcore */
3185 		for (sub = 1; sub < core_info.n_subcores; ++sub) {
3186 			pvc = core_info.vc[sub];
3187 			/* Put back on to the preempted vcores list */
3188 			kvmppc_vcore_preempt(pvc);
3189 			spin_unlock(&pvc->lock);
3190 		}
3191 		for (i = 0; i < controlled_threads; ++i)
3192 			kvmppc_release_hwthread(pcpu + i);
3193 		return;
3194 	}
3195 
3196 	kvmppc_clear_host_core(pcpu);
3197 
3198 	/* Decide on micro-threading (split-core) mode */
3199 	subcore_size = threads_per_subcore;
3200 	cmd_bit = stat_bit = 0;
3201 	split = core_info.n_subcores;
3202 	sip = NULL;
3203 	is_power8 = cpu_has_feature(CPU_FTR_ARCH_207S)
3204 		&& !cpu_has_feature(CPU_FTR_ARCH_300);
3205 
3206 	if (split > 1 || hpt_on_radix) {
3207 		sip = &split_info;
3208 		memset(&split_info, 0, sizeof(split_info));
3209 		for (sub = 0; sub < core_info.n_subcores; ++sub)
3210 			split_info.vc[sub] = core_info.vc[sub];
3211 
3212 		if (is_power8) {
3213 			if (split == 2 && (dynamic_mt_modes & 2)) {
3214 				cmd_bit = HID0_POWER8_1TO2LPAR;
3215 				stat_bit = HID0_POWER8_2LPARMODE;
3216 			} else {
3217 				split = 4;
3218 				cmd_bit = HID0_POWER8_1TO4LPAR;
3219 				stat_bit = HID0_POWER8_4LPARMODE;
3220 			}
3221 			subcore_size = MAX_SMT_THREADS / split;
3222 			split_info.rpr = mfspr(SPRN_RPR);
3223 			split_info.pmmar = mfspr(SPRN_PMMAR);
3224 			split_info.ldbar = mfspr(SPRN_LDBAR);
3225 			split_info.subcore_size = subcore_size;
3226 		} else {
3227 			split_info.subcore_size = 1;
3228 			if (hpt_on_radix) {
3229 				/* Use the split_info for LPCR/LPIDR changes */
3230 				split_info.lpcr_req = vc->lpcr;
3231 				split_info.lpidr_req = vc->kvm->arch.lpid;
3232 				split_info.host_lpcr = vc->kvm->arch.host_lpcr;
3233 				split_info.do_set = 1;
3234 			}
3235 		}
3236 
3237 		/* order writes to split_info before kvm_split_mode pointer */
3238 		smp_wmb();
3239 	}
3240 
3241 	for (thr = 0; thr < controlled_threads; ++thr) {
3242 		struct paca_struct *paca = paca_ptrs[pcpu + thr];
3243 
3244 		paca->kvm_hstate.tid = thr;
3245 		paca->kvm_hstate.napping = 0;
3246 		paca->kvm_hstate.kvm_split_mode = sip;
3247 	}
3248 
3249 	/* Initiate micro-threading (split-core) on POWER8 if required */
3250 	if (cmd_bit) {
3251 		unsigned long hid0 = mfspr(SPRN_HID0);
3252 
3253 		hid0 |= cmd_bit | HID0_POWER8_DYNLPARDIS;
3254 		mb();
3255 		mtspr(SPRN_HID0, hid0);
3256 		isync();
3257 		for (;;) {
3258 			hid0 = mfspr(SPRN_HID0);
3259 			if (hid0 & stat_bit)
3260 				break;
3261 			cpu_relax();
3262 		}
3263 	}
3264 
3265 	/*
3266 	 * On POWER8, set RWMR register.
3267 	 * Since it only affects PURR and SPURR, it doesn't affect
3268 	 * the host, so we don't save/restore the host value.
3269 	 */
3270 	if (is_power8) {
3271 		unsigned long rwmr_val = RWMR_RPA_P8_8THREAD;
3272 		int n_online = atomic_read(&vc->online_count);
3273 
3274 		/*
3275 		 * Use the 8-thread value if we're doing split-core
3276 		 * or if the vcore's online count looks bogus.
3277 		 */
3278 		if (split == 1 && threads_per_subcore == MAX_SMT_THREADS &&
3279 		    n_online >= 1 && n_online <= MAX_SMT_THREADS)
3280 			rwmr_val = p8_rwmr_values[n_online];
3281 		mtspr(SPRN_RWMR, rwmr_val);
3282 	}
3283 
3284 	/* Start all the threads */
3285 	active = 0;
3286 	for (sub = 0; sub < core_info.n_subcores; ++sub) {
3287 		thr = is_power8 ? subcore_thread_map[sub] : sub;
3288 		thr0_done = false;
3289 		active |= 1 << thr;
3290 		pvc = core_info.vc[sub];
3291 		pvc->pcpu = pcpu + thr;
3292 		for_each_runnable_thread(i, vcpu, pvc) {
3293 			kvmppc_start_thread(vcpu, pvc);
3294 			kvmppc_create_dtl_entry(vcpu, pvc);
3295 			trace_kvm_guest_enter(vcpu);
3296 			if (!vcpu->arch.ptid)
3297 				thr0_done = true;
3298 			active |= 1 << (thr + vcpu->arch.ptid);
3299 		}
3300 		/*
3301 		 * We need to start the first thread of each subcore
3302 		 * even if it doesn't have a vcpu.
3303 		 */
3304 		if (!thr0_done)
3305 			kvmppc_start_thread(NULL, pvc);
3306 	}
3307 
3308 	/*
3309 	 * Ensure that split_info.do_nap is set after setting
3310 	 * the vcore pointer in the PACA of the secondaries.
3311 	 */
3312 	smp_mb();
3313 
3314 	/*
3315 	 * When doing micro-threading, poke the inactive threads as well.
3316 	 * This gets them to the nap instruction after kvm_do_nap,
3317 	 * which reduces the time taken to unsplit later.
3318 	 * For POWER9 HPT guest on radix host, we need all the secondary
3319 	 * threads woken up so they can do the LPCR/LPIDR change.
3320 	 */
3321 	if (cmd_bit || hpt_on_radix) {
3322 		split_info.do_nap = 1;	/* ask secondaries to nap when done */
3323 		for (thr = 1; thr < threads_per_subcore; ++thr)
3324 			if (!(active & (1 << thr)))
3325 				kvmppc_ipi_thread(pcpu + thr);
3326 	}
3327 
3328 	vc->vcore_state = VCORE_RUNNING;
3329 	preempt_disable();
3330 
3331 	trace_kvmppc_run_core(vc, 0);
3332 
3333 	for (sub = 0; sub < core_info.n_subcores; ++sub)
3334 		spin_unlock(&core_info.vc[sub]->lock);
3335 
3336 	guest_enter_irqoff();
3337 
3338 	srcu_idx = srcu_read_lock(&vc->kvm->srcu);
3339 
3340 	this_cpu_disable_ftrace();
3341 
3342 	/*
3343 	 * Interrupts will be enabled once we get into the guest,
3344 	 * so tell lockdep that we're about to enable interrupts.
3345 	 */
3346 	trace_hardirqs_on();
3347 
3348 	trap = __kvmppc_vcore_entry();
3349 
3350 	trace_hardirqs_off();
3351 
3352 	this_cpu_enable_ftrace();
3353 
3354 	srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
3355 
3356 	set_irq_happened(trap);
3357 
3358 	spin_lock(&vc->lock);
3359 	/* prevent other vcpu threads from doing kvmppc_start_thread() now */
3360 	vc->vcore_state = VCORE_EXITING;
3361 
3362 	/* wait for secondary threads to finish writing their state to memory */
3363 	kvmppc_wait_for_nap(controlled_threads);
3364 
3365 	/* Return to whole-core mode if we split the core earlier */
3366 	if (cmd_bit) {
3367 		unsigned long hid0 = mfspr(SPRN_HID0);
3368 		unsigned long loops = 0;
3369 
3370 		hid0 &= ~HID0_POWER8_DYNLPARDIS;
3371 		stat_bit = HID0_POWER8_2LPARMODE | HID0_POWER8_4LPARMODE;
3372 		mb();
3373 		mtspr(SPRN_HID0, hid0);
3374 		isync();
3375 		for (;;) {
3376 			hid0 = mfspr(SPRN_HID0);
3377 			if (!(hid0 & stat_bit))
3378 				break;
3379 			cpu_relax();
3380 			++loops;
3381 		}
3382 	} else if (hpt_on_radix) {
3383 		/* Wait for all threads to have seen final sync */
3384 		for (thr = 1; thr < controlled_threads; ++thr) {
3385 			struct paca_struct *paca = paca_ptrs[pcpu + thr];
3386 
3387 			while (paca->kvm_hstate.kvm_split_mode) {
3388 				HMT_low();
3389 				barrier();
3390 			}
3391 			HMT_medium();
3392 		}
3393 	}
3394 	split_info.do_nap = 0;
3395 
3396 	kvmppc_set_host_core(pcpu);
3397 
3398 	local_irq_enable();
3399 	guest_exit();
3400 
3401 	/* Let secondaries go back to the offline loop */
3402 	for (i = 0; i < controlled_threads; ++i) {
3403 		kvmppc_release_hwthread(pcpu + i);
3404 		if (sip && sip->napped[i])
3405 			kvmppc_ipi_thread(pcpu + i);
3406 		cpumask_clear_cpu(pcpu + i, &vc->kvm->arch.cpu_in_guest);
3407 	}
3408 
3409 	spin_unlock(&vc->lock);
3410 
3411 	/* make sure updates to secondary vcpu structs are visible now */
3412 	smp_mb();
3413 
3414 	preempt_enable();
3415 
3416 	for (sub = 0; sub < core_info.n_subcores; ++sub) {
3417 		pvc = core_info.vc[sub];
3418 		post_guest_process(pvc, pvc == vc);
3419 	}
3420 
3421 	spin_lock(&vc->lock);
3422 
3423  out:
3424 	vc->vcore_state = VCORE_INACTIVE;
3425 	trace_kvmppc_run_core(vc, 1);
3426 }
3427 
3428 /*
3429  * Load up hypervisor-mode registers on P9.
3430  */
kvmhv_load_hv_regs_and_go(struct kvm_vcpu * vcpu,u64 time_limit,unsigned long lpcr)3431 static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
3432 				     unsigned long lpcr)
3433 {
3434 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
3435 	s64 hdec;
3436 	u64 tb, purr, spurr;
3437 	int trap;
3438 	unsigned long host_hfscr = mfspr(SPRN_HFSCR);
3439 	unsigned long host_ciabr = mfspr(SPRN_CIABR);
3440 	unsigned long host_dawr = mfspr(SPRN_DAWR0);
3441 	unsigned long host_dawrx = mfspr(SPRN_DAWRX0);
3442 	unsigned long host_psscr = mfspr(SPRN_PSSCR);
3443 	unsigned long host_pidr = mfspr(SPRN_PID);
3444 
3445 	/*
3446 	 * P8 and P9 suppress the HDEC exception when LPCR[HDICE] = 0,
3447 	 * so set HDICE before writing HDEC.
3448 	 */
3449 	mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr | LPCR_HDICE);
3450 	isync();
3451 
3452 	hdec = time_limit - mftb();
3453 	if (hdec < 0) {
3454 		mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr);
3455 		isync();
3456 		return BOOK3S_INTERRUPT_HV_DECREMENTER;
3457 	}
3458 	mtspr(SPRN_HDEC, hdec);
3459 
3460 	if (vc->tb_offset) {
3461 		u64 new_tb = mftb() + vc->tb_offset;
3462 		mtspr(SPRN_TBU40, new_tb);
3463 		tb = mftb();
3464 		if ((tb & 0xffffff) < (new_tb & 0xffffff))
3465 			mtspr(SPRN_TBU40, new_tb + 0x1000000);
3466 		vc->tb_offset_applied = vc->tb_offset;
3467 	}
3468 
3469 	if (vc->pcr)
3470 		mtspr(SPRN_PCR, vc->pcr | PCR_MASK);
3471 	mtspr(SPRN_DPDES, vc->dpdes);
3472 	mtspr(SPRN_VTB, vc->vtb);
3473 
3474 	local_paca->kvm_hstate.host_purr = mfspr(SPRN_PURR);
3475 	local_paca->kvm_hstate.host_spurr = mfspr(SPRN_SPURR);
3476 	mtspr(SPRN_PURR, vcpu->arch.purr);
3477 	mtspr(SPRN_SPURR, vcpu->arch.spurr);
3478 
3479 	if (dawr_enabled()) {
3480 		mtspr(SPRN_DAWR0, vcpu->arch.dawr);
3481 		mtspr(SPRN_DAWRX0, vcpu->arch.dawrx);
3482 	}
3483 	mtspr(SPRN_CIABR, vcpu->arch.ciabr);
3484 	mtspr(SPRN_IC, vcpu->arch.ic);
3485 	mtspr(SPRN_PID, vcpu->arch.pid);
3486 
3487 	mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC |
3488 	      (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
3489 
3490 	mtspr(SPRN_HFSCR, vcpu->arch.hfscr);
3491 
3492 	mtspr(SPRN_SPRG0, vcpu->arch.shregs.sprg0);
3493 	mtspr(SPRN_SPRG1, vcpu->arch.shregs.sprg1);
3494 	mtspr(SPRN_SPRG2, vcpu->arch.shregs.sprg2);
3495 	mtspr(SPRN_SPRG3, vcpu->arch.shregs.sprg3);
3496 
3497 	mtspr(SPRN_AMOR, ~0UL);
3498 
3499 	mtspr(SPRN_LPCR, lpcr);
3500 	isync();
3501 
3502 	kvmppc_xive_push_vcpu(vcpu);
3503 
3504 	mtspr(SPRN_SRR0, vcpu->arch.shregs.srr0);
3505 	mtspr(SPRN_SRR1, vcpu->arch.shregs.srr1);
3506 
3507 	trap = __kvmhv_vcpu_entry_p9(vcpu);
3508 
3509 	/* Advance host PURR/SPURR by the amount used by guest */
3510 	purr = mfspr(SPRN_PURR);
3511 	spurr = mfspr(SPRN_SPURR);
3512 	mtspr(SPRN_PURR, local_paca->kvm_hstate.host_purr +
3513 	      purr - vcpu->arch.purr);
3514 	mtspr(SPRN_SPURR, local_paca->kvm_hstate.host_spurr +
3515 	      spurr - vcpu->arch.spurr);
3516 	vcpu->arch.purr = purr;
3517 	vcpu->arch.spurr = spurr;
3518 
3519 	vcpu->arch.ic = mfspr(SPRN_IC);
3520 	vcpu->arch.pid = mfspr(SPRN_PID);
3521 	vcpu->arch.psscr = mfspr(SPRN_PSSCR) & PSSCR_GUEST_VIS;
3522 
3523 	vcpu->arch.shregs.sprg0 = mfspr(SPRN_SPRG0);
3524 	vcpu->arch.shregs.sprg1 = mfspr(SPRN_SPRG1);
3525 	vcpu->arch.shregs.sprg2 = mfspr(SPRN_SPRG2);
3526 	vcpu->arch.shregs.sprg3 = mfspr(SPRN_SPRG3);
3527 
3528 	/* Preserve PSSCR[FAKE_SUSPEND] until we've called kvmppc_save_tm_hv */
3529 	mtspr(SPRN_PSSCR, host_psscr |
3530 	      (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
3531 	mtspr(SPRN_HFSCR, host_hfscr);
3532 	mtspr(SPRN_CIABR, host_ciabr);
3533 	mtspr(SPRN_DAWR0, host_dawr);
3534 	mtspr(SPRN_DAWRX0, host_dawrx);
3535 	mtspr(SPRN_PID, host_pidr);
3536 
3537 	/*
3538 	 * Since this is radix, do a eieio; tlbsync; ptesync sequence in
3539 	 * case we interrupted the guest between a tlbie and a ptesync.
3540 	 */
3541 	asm volatile("eieio; tlbsync; ptesync");
3542 
3543 	/*
3544 	 * cp_abort is required if the processor supports local copy-paste
3545 	 * to clear the copy buffer that was under control of the guest.
3546 	 */
3547 	if (cpu_has_feature(CPU_FTR_ARCH_31))
3548 		asm volatile(PPC_CP_ABORT);
3549 
3550 	mtspr(SPRN_LPID, vcpu->kvm->arch.host_lpid);	/* restore host LPID */
3551 	isync();
3552 
3553 	vc->dpdes = mfspr(SPRN_DPDES);
3554 	vc->vtb = mfspr(SPRN_VTB);
3555 	mtspr(SPRN_DPDES, 0);
3556 	if (vc->pcr)
3557 		mtspr(SPRN_PCR, PCR_MASK);
3558 
3559 	if (vc->tb_offset_applied) {
3560 		u64 new_tb = mftb() - vc->tb_offset_applied;
3561 		mtspr(SPRN_TBU40, new_tb);
3562 		tb = mftb();
3563 		if ((tb & 0xffffff) < (new_tb & 0xffffff))
3564 			mtspr(SPRN_TBU40, new_tb + 0x1000000);
3565 		vc->tb_offset_applied = 0;
3566 	}
3567 
3568 	mtspr(SPRN_HDEC, 0x7fffffff);
3569 	mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr);
3570 
3571 	return trap;
3572 }
3573 
3574 /*
3575  * Virtual-mode guest entry for POWER9 and later when the host and
3576  * guest are both using the radix MMU.  The LPIDR has already been set.
3577  */
kvmhv_p9_guest_entry(struct kvm_vcpu * vcpu,u64 time_limit,unsigned long lpcr)3578 static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
3579 			 unsigned long lpcr)
3580 {
3581 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
3582 	unsigned long host_dscr = mfspr(SPRN_DSCR);
3583 	unsigned long host_tidr = mfspr(SPRN_TIDR);
3584 	unsigned long host_iamr = mfspr(SPRN_IAMR);
3585 	unsigned long host_amr = mfspr(SPRN_AMR);
3586 	s64 dec;
3587 	u64 tb;
3588 	int trap, save_pmu;
3589 
3590 	dec = mfspr(SPRN_DEC);
3591 	tb = mftb();
3592 	if (dec < 0)
3593 		return BOOK3S_INTERRUPT_HV_DECREMENTER;
3594 	local_paca->kvm_hstate.dec_expires = dec + tb;
3595 	if (local_paca->kvm_hstate.dec_expires < time_limit)
3596 		time_limit = local_paca->kvm_hstate.dec_expires;
3597 
3598 	vcpu->arch.ceded = 0;
3599 
3600 	kvmhv_save_host_pmu();		/* saves it to PACA kvm_hstate */
3601 
3602 	kvmppc_subcore_enter_guest();
3603 
3604 	vc->entry_exit_map = 1;
3605 	vc->in_guest = 1;
3606 
3607 	if (vcpu->arch.vpa.pinned_addr) {
3608 		struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
3609 		u32 yield_count = be32_to_cpu(lp->yield_count) + 1;
3610 		lp->yield_count = cpu_to_be32(yield_count);
3611 		vcpu->arch.vpa.dirty = 1;
3612 	}
3613 
3614 	if (cpu_has_feature(CPU_FTR_TM) ||
3615 	    cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
3616 		kvmppc_restore_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
3617 
3618 	kvmhv_load_guest_pmu(vcpu);
3619 
3620 	msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
3621 	load_fp_state(&vcpu->arch.fp);
3622 #ifdef CONFIG_ALTIVEC
3623 	load_vr_state(&vcpu->arch.vr);
3624 #endif
3625 	mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
3626 
3627 	mtspr(SPRN_DSCR, vcpu->arch.dscr);
3628 	mtspr(SPRN_IAMR, vcpu->arch.iamr);
3629 	mtspr(SPRN_PSPB, vcpu->arch.pspb);
3630 	mtspr(SPRN_FSCR, vcpu->arch.fscr);
3631 	mtspr(SPRN_TAR, vcpu->arch.tar);
3632 	mtspr(SPRN_EBBHR, vcpu->arch.ebbhr);
3633 	mtspr(SPRN_EBBRR, vcpu->arch.ebbrr);
3634 	mtspr(SPRN_BESCR, vcpu->arch.bescr);
3635 	mtspr(SPRN_WORT, vcpu->arch.wort);
3636 	mtspr(SPRN_TIDR, vcpu->arch.tid);
3637 	mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
3638 	mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
3639 	mtspr(SPRN_AMR, vcpu->arch.amr);
3640 	mtspr(SPRN_UAMOR, vcpu->arch.uamor);
3641 
3642 	if (!(vcpu->arch.ctrl & 1))
3643 		mtspr(SPRN_CTRLT, mfspr(SPRN_CTRLF) & ~1);
3644 
3645 	mtspr(SPRN_DEC, vcpu->arch.dec_expires - mftb());
3646 
3647 	if (kvmhv_on_pseries()) {
3648 		/*
3649 		 * We need to save and restore the guest visible part of the
3650 		 * psscr (i.e. using SPRN_PSSCR_PR) since the hypervisor
3651 		 * doesn't do this for us. Note only required if pseries since
3652 		 * this is done in kvmhv_load_hv_regs_and_go() below otherwise.
3653 		 */
3654 		unsigned long host_psscr;
3655 		/* call our hypervisor to load up HV regs and go */
3656 		struct hv_guest_state hvregs;
3657 
3658 		host_psscr = mfspr(SPRN_PSSCR_PR);
3659 		mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr);
3660 		kvmhv_save_hv_regs(vcpu, &hvregs);
3661 		hvregs.lpcr = lpcr;
3662 		vcpu->arch.regs.msr = vcpu->arch.shregs.msr;
3663 		hvregs.version = HV_GUEST_STATE_VERSION;
3664 		if (vcpu->arch.nested) {
3665 			hvregs.lpid = vcpu->arch.nested->shadow_lpid;
3666 			hvregs.vcpu_token = vcpu->arch.nested_vcpu_id;
3667 		} else {
3668 			hvregs.lpid = vcpu->kvm->arch.lpid;
3669 			hvregs.vcpu_token = vcpu->vcpu_id;
3670 		}
3671 		hvregs.hdec_expiry = time_limit;
3672 		trap = plpar_hcall_norets(H_ENTER_NESTED, __pa(&hvregs),
3673 					  __pa(&vcpu->arch.regs));
3674 		kvmhv_restore_hv_return_state(vcpu, &hvregs);
3675 		vcpu->arch.shregs.msr = vcpu->arch.regs.msr;
3676 		vcpu->arch.shregs.dar = mfspr(SPRN_DAR);
3677 		vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR);
3678 		vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR);
3679 		mtspr(SPRN_PSSCR_PR, host_psscr);
3680 
3681 		/* H_CEDE has to be handled now, not later */
3682 		if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested &&
3683 		    kvmppc_get_gpr(vcpu, 3) == H_CEDE) {
3684 			kvmppc_nested_cede(vcpu);
3685 			kvmppc_set_gpr(vcpu, 3, 0);
3686 			trap = 0;
3687 		}
3688 	} else {
3689 		trap = kvmhv_load_hv_regs_and_go(vcpu, time_limit, lpcr);
3690 	}
3691 
3692 	vcpu->arch.slb_max = 0;
3693 	dec = mfspr(SPRN_DEC);
3694 	if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */
3695 		dec = (s32) dec;
3696 	tb = mftb();
3697 	vcpu->arch.dec_expires = dec + tb;
3698 	vcpu->cpu = -1;
3699 	vcpu->arch.thread_cpu = -1;
3700 	vcpu->arch.ctrl = mfspr(SPRN_CTRLF);
3701 
3702 	vcpu->arch.iamr = mfspr(SPRN_IAMR);
3703 	vcpu->arch.pspb = mfspr(SPRN_PSPB);
3704 	vcpu->arch.fscr = mfspr(SPRN_FSCR);
3705 	vcpu->arch.tar = mfspr(SPRN_TAR);
3706 	vcpu->arch.ebbhr = mfspr(SPRN_EBBHR);
3707 	vcpu->arch.ebbrr = mfspr(SPRN_EBBRR);
3708 	vcpu->arch.bescr = mfspr(SPRN_BESCR);
3709 	vcpu->arch.wort = mfspr(SPRN_WORT);
3710 	vcpu->arch.tid = mfspr(SPRN_TIDR);
3711 	vcpu->arch.amr = mfspr(SPRN_AMR);
3712 	vcpu->arch.uamor = mfspr(SPRN_UAMOR);
3713 	vcpu->arch.dscr = mfspr(SPRN_DSCR);
3714 
3715 	mtspr(SPRN_PSPB, 0);
3716 	mtspr(SPRN_WORT, 0);
3717 	mtspr(SPRN_UAMOR, 0);
3718 	mtspr(SPRN_DSCR, host_dscr);
3719 	mtspr(SPRN_TIDR, host_tidr);
3720 	mtspr(SPRN_IAMR, host_iamr);
3721 	mtspr(SPRN_PSPB, 0);
3722 
3723 	if (host_amr != vcpu->arch.amr)
3724 		mtspr(SPRN_AMR, host_amr);
3725 
3726 	msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
3727 	store_fp_state(&vcpu->arch.fp);
3728 #ifdef CONFIG_ALTIVEC
3729 	store_vr_state(&vcpu->arch.vr);
3730 #endif
3731 	vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
3732 
3733 	if (cpu_has_feature(CPU_FTR_TM) ||
3734 	    cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
3735 		kvmppc_save_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
3736 
3737 	save_pmu = 1;
3738 	if (vcpu->arch.vpa.pinned_addr) {
3739 		struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
3740 		u32 yield_count = be32_to_cpu(lp->yield_count) + 1;
3741 		lp->yield_count = cpu_to_be32(yield_count);
3742 		vcpu->arch.vpa.dirty = 1;
3743 		save_pmu = lp->pmcregs_in_use;
3744 	}
3745 	/* Must save pmu if this guest is capable of running nested guests */
3746 	save_pmu |= nesting_enabled(vcpu->kvm);
3747 
3748 	kvmhv_save_guest_pmu(vcpu, save_pmu);
3749 
3750 	vc->entry_exit_map = 0x101;
3751 	vc->in_guest = 0;
3752 
3753 	mtspr(SPRN_DEC, local_paca->kvm_hstate.dec_expires - mftb());
3754 	mtspr(SPRN_SPRG_VDSO_WRITE, local_paca->sprg_vdso);
3755 
3756 	kvmhv_load_host_pmu();
3757 
3758 	kvmppc_subcore_exit_guest();
3759 
3760 	return trap;
3761 }
3762 
3763 /*
3764  * Wait for some other vcpu thread to execute us, and
3765  * wake us up when we need to handle something in the host.
3766  */
kvmppc_wait_for_exec(struct kvmppc_vcore * vc,struct kvm_vcpu * vcpu,int wait_state)3767 static void kvmppc_wait_for_exec(struct kvmppc_vcore *vc,
3768 				 struct kvm_vcpu *vcpu, int wait_state)
3769 {
3770 	DEFINE_WAIT(wait);
3771 
3772 	prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state);
3773 	if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
3774 		spin_unlock(&vc->lock);
3775 		schedule();
3776 		spin_lock(&vc->lock);
3777 	}
3778 	finish_wait(&vcpu->arch.cpu_run, &wait);
3779 }
3780 
grow_halt_poll_ns(struct kvmppc_vcore * vc)3781 static void grow_halt_poll_ns(struct kvmppc_vcore *vc)
3782 {
3783 	if (!halt_poll_ns_grow)
3784 		return;
3785 
3786 	vc->halt_poll_ns *= halt_poll_ns_grow;
3787 	if (vc->halt_poll_ns < halt_poll_ns_grow_start)
3788 		vc->halt_poll_ns = halt_poll_ns_grow_start;
3789 }
3790 
shrink_halt_poll_ns(struct kvmppc_vcore * vc)3791 static void shrink_halt_poll_ns(struct kvmppc_vcore *vc)
3792 {
3793 	if (halt_poll_ns_shrink == 0)
3794 		vc->halt_poll_ns = 0;
3795 	else
3796 		vc->halt_poll_ns /= halt_poll_ns_shrink;
3797 }
3798 
3799 #ifdef CONFIG_KVM_XICS
xive_interrupt_pending(struct kvm_vcpu * vcpu)3800 static inline bool xive_interrupt_pending(struct kvm_vcpu *vcpu)
3801 {
3802 	if (!xics_on_xive())
3803 		return false;
3804 	return vcpu->arch.irq_pending || vcpu->arch.xive_saved_state.pipr <
3805 		vcpu->arch.xive_saved_state.cppr;
3806 }
3807 #else
xive_interrupt_pending(struct kvm_vcpu * vcpu)3808 static inline bool xive_interrupt_pending(struct kvm_vcpu *vcpu)
3809 {
3810 	return false;
3811 }
3812 #endif /* CONFIG_KVM_XICS */
3813 
kvmppc_vcpu_woken(struct kvm_vcpu * vcpu)3814 static bool kvmppc_vcpu_woken(struct kvm_vcpu *vcpu)
3815 {
3816 	if (vcpu->arch.pending_exceptions || vcpu->arch.prodded ||
3817 	    kvmppc_doorbell_pending(vcpu) || xive_interrupt_pending(vcpu))
3818 		return true;
3819 
3820 	return false;
3821 }
3822 
3823 /*
3824  * Check to see if any of the runnable vcpus on the vcore have pending
3825  * exceptions or are no longer ceded
3826  */
kvmppc_vcore_check_block(struct kvmppc_vcore * vc)3827 static int kvmppc_vcore_check_block(struct kvmppc_vcore *vc)
3828 {
3829 	struct kvm_vcpu *vcpu;
3830 	int i;
3831 
3832 	for_each_runnable_thread(i, vcpu, vc) {
3833 		if (!vcpu->arch.ceded || kvmppc_vcpu_woken(vcpu))
3834 			return 1;
3835 	}
3836 
3837 	return 0;
3838 }
3839 
3840 /*
3841  * All the vcpus in this vcore are idle, so wait for a decrementer
3842  * or external interrupt to one of the vcpus.  vc->lock is held.
3843  */
kvmppc_vcore_blocked(struct kvmppc_vcore * vc)3844 static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
3845 {
3846 	ktime_t cur, start_poll, start_wait;
3847 	int do_sleep = 1;
3848 	u64 block_ns;
3849 
3850 	/* Poll for pending exceptions and ceded state */
3851 	cur = start_poll = ktime_get();
3852 	if (vc->halt_poll_ns) {
3853 		ktime_t stop = ktime_add_ns(start_poll, vc->halt_poll_ns);
3854 		++vc->runner->stat.halt_attempted_poll;
3855 
3856 		vc->vcore_state = VCORE_POLLING;
3857 		spin_unlock(&vc->lock);
3858 
3859 		do {
3860 			if (kvmppc_vcore_check_block(vc)) {
3861 				do_sleep = 0;
3862 				break;
3863 			}
3864 			cur = ktime_get();
3865 		} while (single_task_running() && ktime_before(cur, stop));
3866 
3867 		spin_lock(&vc->lock);
3868 		vc->vcore_state = VCORE_INACTIVE;
3869 
3870 		if (!do_sleep) {
3871 			++vc->runner->stat.halt_successful_poll;
3872 			goto out;
3873 		}
3874 	}
3875 
3876 	prepare_to_rcuwait(&vc->wait);
3877 	set_current_state(TASK_INTERRUPTIBLE);
3878 	if (kvmppc_vcore_check_block(vc)) {
3879 		finish_rcuwait(&vc->wait);
3880 		do_sleep = 0;
3881 		/* If we polled, count this as a successful poll */
3882 		if (vc->halt_poll_ns)
3883 			++vc->runner->stat.halt_successful_poll;
3884 		goto out;
3885 	}
3886 
3887 	start_wait = ktime_get();
3888 
3889 	vc->vcore_state = VCORE_SLEEPING;
3890 	trace_kvmppc_vcore_blocked(vc, 0);
3891 	spin_unlock(&vc->lock);
3892 	schedule();
3893 	finish_rcuwait(&vc->wait);
3894 	spin_lock(&vc->lock);
3895 	vc->vcore_state = VCORE_INACTIVE;
3896 	trace_kvmppc_vcore_blocked(vc, 1);
3897 	++vc->runner->stat.halt_successful_wait;
3898 
3899 	cur = ktime_get();
3900 
3901 out:
3902 	block_ns = ktime_to_ns(cur) - ktime_to_ns(start_poll);
3903 
3904 	/* Attribute wait time */
3905 	if (do_sleep) {
3906 		vc->runner->stat.halt_wait_ns +=
3907 			ktime_to_ns(cur) - ktime_to_ns(start_wait);
3908 		/* Attribute failed poll time */
3909 		if (vc->halt_poll_ns)
3910 			vc->runner->stat.halt_poll_fail_ns +=
3911 				ktime_to_ns(start_wait) -
3912 				ktime_to_ns(start_poll);
3913 	} else {
3914 		/* Attribute successful poll time */
3915 		if (vc->halt_poll_ns)
3916 			vc->runner->stat.halt_poll_success_ns +=
3917 				ktime_to_ns(cur) -
3918 				ktime_to_ns(start_poll);
3919 	}
3920 
3921 	/* Adjust poll time */
3922 	if (halt_poll_ns) {
3923 		if (block_ns <= vc->halt_poll_ns)
3924 			;
3925 		/* We slept and blocked for longer than the max halt time */
3926 		else if (vc->halt_poll_ns && block_ns > halt_poll_ns)
3927 			shrink_halt_poll_ns(vc);
3928 		/* We slept and our poll time is too small */
3929 		else if (vc->halt_poll_ns < halt_poll_ns &&
3930 				block_ns < halt_poll_ns)
3931 			grow_halt_poll_ns(vc);
3932 		if (vc->halt_poll_ns > halt_poll_ns)
3933 			vc->halt_poll_ns = halt_poll_ns;
3934 	} else
3935 		vc->halt_poll_ns = 0;
3936 
3937 	trace_kvmppc_vcore_wakeup(do_sleep, block_ns);
3938 }
3939 
3940 /*
3941  * This never fails for a radix guest, as none of the operations it does
3942  * for a radix guest can fail or have a way to report failure.
3943  * kvmhv_run_single_vcpu() relies on this fact.
3944  */
kvmhv_setup_mmu(struct kvm_vcpu * vcpu)3945 static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu)
3946 {
3947 	int r = 0;
3948 	struct kvm *kvm = vcpu->kvm;
3949 
3950 	mutex_lock(&kvm->arch.mmu_setup_lock);
3951 	if (!kvm->arch.mmu_ready) {
3952 		if (!kvm_is_radix(kvm))
3953 			r = kvmppc_hv_setup_htab_rma(vcpu);
3954 		if (!r) {
3955 			if (cpu_has_feature(CPU_FTR_ARCH_300))
3956 				kvmppc_setup_partition_table(kvm);
3957 			kvm->arch.mmu_ready = 1;
3958 		}
3959 	}
3960 	mutex_unlock(&kvm->arch.mmu_setup_lock);
3961 	return r;
3962 }
3963 
kvmppc_run_vcpu(struct kvm_vcpu * vcpu)3964 static int kvmppc_run_vcpu(struct kvm_vcpu *vcpu)
3965 {
3966 	struct kvm_run *run = vcpu->run;
3967 	int n_ceded, i, r;
3968 	struct kvmppc_vcore *vc;
3969 	struct kvm_vcpu *v;
3970 
3971 	trace_kvmppc_run_vcpu_enter(vcpu);
3972 
3973 	run->exit_reason = 0;
3974 	vcpu->arch.ret = RESUME_GUEST;
3975 	vcpu->arch.trap = 0;
3976 	kvmppc_update_vpas(vcpu);
3977 
3978 	/*
3979 	 * Synchronize with other threads in this virtual core
3980 	 */
3981 	vc = vcpu->arch.vcore;
3982 	spin_lock(&vc->lock);
3983 	vcpu->arch.ceded = 0;
3984 	vcpu->arch.run_task = current;
3985 	vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
3986 	vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
3987 	vcpu->arch.busy_preempt = TB_NIL;
3988 	WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], vcpu);
3989 	++vc->n_runnable;
3990 
3991 	/*
3992 	 * This happens the first time this is called for a vcpu.
3993 	 * If the vcore is already running, we may be able to start
3994 	 * this thread straight away and have it join in.
3995 	 */
3996 	if (!signal_pending(current)) {
3997 		if ((vc->vcore_state == VCORE_PIGGYBACK ||
3998 		     vc->vcore_state == VCORE_RUNNING) &&
3999 			   !VCORE_IS_EXITING(vc)) {
4000 			kvmppc_create_dtl_entry(vcpu, vc);
4001 			kvmppc_start_thread(vcpu, vc);
4002 			trace_kvm_guest_enter(vcpu);
4003 		} else if (vc->vcore_state == VCORE_SLEEPING) {
4004 		        rcuwait_wake_up(&vc->wait);
4005 		}
4006 
4007 	}
4008 
4009 	while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
4010 	       !signal_pending(current)) {
4011 		/* See if the MMU is ready to go */
4012 		if (!vcpu->kvm->arch.mmu_ready) {
4013 			spin_unlock(&vc->lock);
4014 			r = kvmhv_setup_mmu(vcpu);
4015 			spin_lock(&vc->lock);
4016 			if (r) {
4017 				run->exit_reason = KVM_EXIT_FAIL_ENTRY;
4018 				run->fail_entry.
4019 					hardware_entry_failure_reason = 0;
4020 				vcpu->arch.ret = r;
4021 				break;
4022 			}
4023 		}
4024 
4025 		if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL)
4026 			kvmppc_vcore_end_preempt(vc);
4027 
4028 		if (vc->vcore_state != VCORE_INACTIVE) {
4029 			kvmppc_wait_for_exec(vc, vcpu, TASK_INTERRUPTIBLE);
4030 			continue;
4031 		}
4032 		for_each_runnable_thread(i, v, vc) {
4033 			kvmppc_core_prepare_to_enter(v);
4034 			if (signal_pending(v->arch.run_task)) {
4035 				kvmppc_remove_runnable(vc, v);
4036 				v->stat.signal_exits++;
4037 				v->run->exit_reason = KVM_EXIT_INTR;
4038 				v->arch.ret = -EINTR;
4039 				wake_up(&v->arch.cpu_run);
4040 			}
4041 		}
4042 		if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
4043 			break;
4044 		n_ceded = 0;
4045 		for_each_runnable_thread(i, v, vc) {
4046 			if (!kvmppc_vcpu_woken(v))
4047 				n_ceded += v->arch.ceded;
4048 			else
4049 				v->arch.ceded = 0;
4050 		}
4051 		vc->runner = vcpu;
4052 		if (n_ceded == vc->n_runnable) {
4053 			kvmppc_vcore_blocked(vc);
4054 		} else if (need_resched()) {
4055 			kvmppc_vcore_preempt(vc);
4056 			/* Let something else run */
4057 			cond_resched_lock(&vc->lock);
4058 			if (vc->vcore_state == VCORE_PREEMPT)
4059 				kvmppc_vcore_end_preempt(vc);
4060 		} else {
4061 			kvmppc_run_core(vc);
4062 		}
4063 		vc->runner = NULL;
4064 	}
4065 
4066 	while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
4067 	       (vc->vcore_state == VCORE_RUNNING ||
4068 		vc->vcore_state == VCORE_EXITING ||
4069 		vc->vcore_state == VCORE_PIGGYBACK))
4070 		kvmppc_wait_for_exec(vc, vcpu, TASK_UNINTERRUPTIBLE);
4071 
4072 	if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL)
4073 		kvmppc_vcore_end_preempt(vc);
4074 
4075 	if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
4076 		kvmppc_remove_runnable(vc, vcpu);
4077 		vcpu->stat.signal_exits++;
4078 		run->exit_reason = KVM_EXIT_INTR;
4079 		vcpu->arch.ret = -EINTR;
4080 	}
4081 
4082 	if (vc->n_runnable && vc->vcore_state == VCORE_INACTIVE) {
4083 		/* Wake up some vcpu to run the core */
4084 		i = -1;
4085 		v = next_runnable_thread(vc, &i);
4086 		wake_up(&v->arch.cpu_run);
4087 	}
4088 
4089 	trace_kvmppc_run_vcpu_exit(vcpu);
4090 	spin_unlock(&vc->lock);
4091 	return vcpu->arch.ret;
4092 }
4093 
kvmhv_run_single_vcpu(struct kvm_vcpu * vcpu,u64 time_limit,unsigned long lpcr)4094 int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
4095 			  unsigned long lpcr)
4096 {
4097 	struct kvm_run *run = vcpu->run;
4098 	int trap, r, pcpu;
4099 	int srcu_idx, lpid;
4100 	struct kvmppc_vcore *vc;
4101 	struct kvm *kvm = vcpu->kvm;
4102 	struct kvm_nested_guest *nested = vcpu->arch.nested;
4103 
4104 	trace_kvmppc_run_vcpu_enter(vcpu);
4105 
4106 	run->exit_reason = 0;
4107 	vcpu->arch.ret = RESUME_GUEST;
4108 	vcpu->arch.trap = 0;
4109 
4110 	vc = vcpu->arch.vcore;
4111 	vcpu->arch.ceded = 0;
4112 	vcpu->arch.run_task = current;
4113 	vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
4114 	vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
4115 	vcpu->arch.busy_preempt = TB_NIL;
4116 	vcpu->arch.last_inst = KVM_INST_FETCH_FAILED;
4117 	vc->runnable_threads[0] = vcpu;
4118 	vc->n_runnable = 1;
4119 	vc->runner = vcpu;
4120 
4121 	/* See if the MMU is ready to go */
4122 	if (!kvm->arch.mmu_ready)
4123 		kvmhv_setup_mmu(vcpu);
4124 
4125 	if (need_resched())
4126 		cond_resched();
4127 
4128 	kvmppc_update_vpas(vcpu);
4129 
4130 	init_vcore_to_run(vc);
4131 	vc->preempt_tb = TB_NIL;
4132 
4133 	preempt_disable();
4134 	pcpu = smp_processor_id();
4135 	vc->pcpu = pcpu;
4136 	kvmppc_prepare_radix_vcpu(vcpu, pcpu);
4137 
4138 	local_irq_disable();
4139 	hard_irq_disable();
4140 	if (signal_pending(current))
4141 		goto sigpend;
4142 	if (lazy_irq_pending() || need_resched() || !kvm->arch.mmu_ready)
4143 		goto out;
4144 
4145 	if (!nested) {
4146 		kvmppc_core_prepare_to_enter(vcpu);
4147 		if (vcpu->arch.doorbell_request) {
4148 			vc->dpdes = 1;
4149 			smp_wmb();
4150 			vcpu->arch.doorbell_request = 0;
4151 		}
4152 		if (test_bit(BOOK3S_IRQPRIO_EXTERNAL,
4153 			     &vcpu->arch.pending_exceptions))
4154 			lpcr |= LPCR_MER;
4155 	} else if (vcpu->arch.pending_exceptions ||
4156 		   vcpu->arch.doorbell_request ||
4157 		   xive_interrupt_pending(vcpu)) {
4158 		vcpu->arch.ret = RESUME_HOST;
4159 		goto out;
4160 	}
4161 
4162 	kvmppc_clear_host_core(pcpu);
4163 
4164 	local_paca->kvm_hstate.tid = 0;
4165 	local_paca->kvm_hstate.napping = 0;
4166 	local_paca->kvm_hstate.kvm_split_mode = NULL;
4167 	kvmppc_start_thread(vcpu, vc);
4168 	kvmppc_create_dtl_entry(vcpu, vc);
4169 	trace_kvm_guest_enter(vcpu);
4170 
4171 	vc->vcore_state = VCORE_RUNNING;
4172 	trace_kvmppc_run_core(vc, 0);
4173 
4174 	if (cpu_has_feature(CPU_FTR_HVMODE)) {
4175 		lpid = nested ? nested->shadow_lpid : kvm->arch.lpid;
4176 		mtspr(SPRN_LPID, lpid);
4177 		isync();
4178 		kvmppc_check_need_tlb_flush(kvm, pcpu, nested);
4179 	}
4180 
4181 	guest_enter_irqoff();
4182 
4183 	srcu_idx = srcu_read_lock(&kvm->srcu);
4184 
4185 	this_cpu_disable_ftrace();
4186 
4187 	/* Tell lockdep that we're about to enable interrupts */
4188 	trace_hardirqs_on();
4189 
4190 	trap = kvmhv_p9_guest_entry(vcpu, time_limit, lpcr);
4191 	vcpu->arch.trap = trap;
4192 
4193 	trace_hardirqs_off();
4194 
4195 	this_cpu_enable_ftrace();
4196 
4197 	srcu_read_unlock(&kvm->srcu, srcu_idx);
4198 
4199 	if (cpu_has_feature(CPU_FTR_HVMODE)) {
4200 		mtspr(SPRN_LPID, kvm->arch.host_lpid);
4201 		isync();
4202 	}
4203 
4204 	set_irq_happened(trap);
4205 
4206 	kvmppc_set_host_core(pcpu);
4207 
4208 	local_irq_enable();
4209 	guest_exit();
4210 
4211 	cpumask_clear_cpu(pcpu, &kvm->arch.cpu_in_guest);
4212 
4213 	preempt_enable();
4214 
4215 	/*
4216 	 * cancel pending decrementer exception if DEC is now positive, or if
4217 	 * entering a nested guest in which case the decrementer is now owned
4218 	 * by L2 and the L1 decrementer is provided in hdec_expires
4219 	 */
4220 	if (kvmppc_core_pending_dec(vcpu) &&
4221 			((get_tb() < vcpu->arch.dec_expires) ||
4222 			 (trap == BOOK3S_INTERRUPT_SYSCALL &&
4223 			  kvmppc_get_gpr(vcpu, 3) == H_ENTER_NESTED)))
4224 		kvmppc_core_dequeue_dec(vcpu);
4225 
4226 	trace_kvm_guest_exit(vcpu);
4227 	r = RESUME_GUEST;
4228 	if (trap) {
4229 		if (!nested)
4230 			r = kvmppc_handle_exit_hv(vcpu, current);
4231 		else
4232 			r = kvmppc_handle_nested_exit(vcpu);
4233 	}
4234 	vcpu->arch.ret = r;
4235 
4236 	if (is_kvmppc_resume_guest(r) && vcpu->arch.ceded &&
4237 	    !kvmppc_vcpu_woken(vcpu)) {
4238 		kvmppc_set_timer(vcpu);
4239 		while (vcpu->arch.ceded && !kvmppc_vcpu_woken(vcpu)) {
4240 			if (signal_pending(current)) {
4241 				vcpu->stat.signal_exits++;
4242 				run->exit_reason = KVM_EXIT_INTR;
4243 				vcpu->arch.ret = -EINTR;
4244 				break;
4245 			}
4246 			spin_lock(&vc->lock);
4247 			kvmppc_vcore_blocked(vc);
4248 			spin_unlock(&vc->lock);
4249 		}
4250 	}
4251 	vcpu->arch.ceded = 0;
4252 
4253 	vc->vcore_state = VCORE_INACTIVE;
4254 	trace_kvmppc_run_core(vc, 1);
4255 
4256  done:
4257 	kvmppc_remove_runnable(vc, vcpu);
4258 	trace_kvmppc_run_vcpu_exit(vcpu);
4259 
4260 	return vcpu->arch.ret;
4261 
4262  sigpend:
4263 	vcpu->stat.signal_exits++;
4264 	run->exit_reason = KVM_EXIT_INTR;
4265 	vcpu->arch.ret = -EINTR;
4266  out:
4267 	local_irq_enable();
4268 	preempt_enable();
4269 	goto done;
4270 }
4271 
kvmppc_vcpu_run_hv(struct kvm_vcpu * vcpu)4272 static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
4273 {
4274 	struct kvm_run *run = vcpu->run;
4275 	int r;
4276 	int srcu_idx;
4277 	unsigned long ebb_regs[3] = {};	/* shut up GCC */
4278 	unsigned long user_tar = 0;
4279 	unsigned int user_vrsave;
4280 	struct kvm *kvm;
4281 
4282 	if (!vcpu->arch.sane) {
4283 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
4284 		return -EINVAL;
4285 	}
4286 
4287 	/*
4288 	 * Don't allow entry with a suspended transaction, because
4289 	 * the guest entry/exit code will lose it.
4290 	 * If the guest has TM enabled, save away their TM-related SPRs
4291 	 * (they will get restored by the TM unavailable interrupt).
4292 	 */
4293 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
4294 	if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
4295 	    (current->thread.regs->msr & MSR_TM)) {
4296 		if (MSR_TM_ACTIVE(current->thread.regs->msr)) {
4297 			run->exit_reason = KVM_EXIT_FAIL_ENTRY;
4298 			run->fail_entry.hardware_entry_failure_reason = 0;
4299 			return -EINVAL;
4300 		}
4301 		/* Enable TM so we can read the TM SPRs */
4302 		mtmsr(mfmsr() | MSR_TM);
4303 		current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
4304 		current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
4305 		current->thread.tm_texasr = mfspr(SPRN_TEXASR);
4306 		current->thread.regs->msr &= ~MSR_TM;
4307 	}
4308 #endif
4309 
4310 	/*
4311 	 * Force online to 1 for the sake of old userspace which doesn't
4312 	 * set it.
4313 	 */
4314 	if (!vcpu->arch.online) {
4315 		atomic_inc(&vcpu->arch.vcore->online_count);
4316 		vcpu->arch.online = 1;
4317 	}
4318 
4319 	kvmppc_core_prepare_to_enter(vcpu);
4320 
4321 	/* No need to go into the guest when all we'll do is come back out */
4322 	if (signal_pending(current)) {
4323 		run->exit_reason = KVM_EXIT_INTR;
4324 		return -EINTR;
4325 	}
4326 
4327 	kvm = vcpu->kvm;
4328 	atomic_inc(&kvm->arch.vcpus_running);
4329 	/* Order vcpus_running vs. mmu_ready, see kvmppc_alloc_reset_hpt */
4330 	smp_mb();
4331 
4332 	flush_all_to_thread(current);
4333 
4334 	/* Save userspace EBB and other register values */
4335 	if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
4336 		ebb_regs[0] = mfspr(SPRN_EBBHR);
4337 		ebb_regs[1] = mfspr(SPRN_EBBRR);
4338 		ebb_regs[2] = mfspr(SPRN_BESCR);
4339 		user_tar = mfspr(SPRN_TAR);
4340 	}
4341 	user_vrsave = mfspr(SPRN_VRSAVE);
4342 
4343 	vcpu->arch.waitp = &vcpu->arch.vcore->wait;
4344 	vcpu->arch.pgdir = kvm->mm->pgd;
4345 	vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
4346 
4347 	do {
4348 		/*
4349 		 * The early POWER9 chips that can't mix radix and HPT threads
4350 		 * on the same core also need the workaround for the problem
4351 		 * where the TLB would prefetch entries in the guest exit path
4352 		 * for radix guests using the guest PIDR value and LPID 0.
4353 		 * The workaround is in the old path (kvmppc_run_vcpu())
4354 		 * but not the new path (kvmhv_run_single_vcpu()).
4355 		 */
4356 		if (kvm->arch.threads_indep && kvm_is_radix(kvm) &&
4357 		    !no_mixing_hpt_and_radix)
4358 			r = kvmhv_run_single_vcpu(vcpu, ~(u64)0,
4359 						  vcpu->arch.vcore->lpcr);
4360 		else
4361 			r = kvmppc_run_vcpu(vcpu);
4362 
4363 		if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
4364 		    !(vcpu->arch.shregs.msr & MSR_PR)) {
4365 			trace_kvm_hcall_enter(vcpu);
4366 			r = kvmppc_pseries_do_hcall(vcpu);
4367 			trace_kvm_hcall_exit(vcpu, r);
4368 			kvmppc_core_prepare_to_enter(vcpu);
4369 		} else if (r == RESUME_PAGE_FAULT) {
4370 			srcu_idx = srcu_read_lock(&kvm->srcu);
4371 			r = kvmppc_book3s_hv_page_fault(vcpu,
4372 				vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
4373 			srcu_read_unlock(&kvm->srcu, srcu_idx);
4374 		} else if (r == RESUME_PASSTHROUGH) {
4375 			if (WARN_ON(xics_on_xive()))
4376 				r = H_SUCCESS;
4377 			else
4378 				r = kvmppc_xics_rm_complete(vcpu, 0);
4379 		}
4380 	} while (is_kvmppc_resume_guest(r));
4381 
4382 	/* Restore userspace EBB and other register values */
4383 	if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
4384 		mtspr(SPRN_EBBHR, ebb_regs[0]);
4385 		mtspr(SPRN_EBBRR, ebb_regs[1]);
4386 		mtspr(SPRN_BESCR, ebb_regs[2]);
4387 		mtspr(SPRN_TAR, user_tar);
4388 		mtspr(SPRN_FSCR, current->thread.fscr);
4389 	}
4390 	mtspr(SPRN_VRSAVE, user_vrsave);
4391 
4392 	vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
4393 	atomic_dec(&kvm->arch.vcpus_running);
4394 	return r;
4395 }
4396 
kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size ** sps,int shift,int sllp)4397 static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps,
4398 				     int shift, int sllp)
4399 {
4400 	(*sps)->page_shift = shift;
4401 	(*sps)->slb_enc = sllp;
4402 	(*sps)->enc[0].page_shift = shift;
4403 	(*sps)->enc[0].pte_enc = kvmppc_pgsize_lp_encoding(shift, shift);
4404 	/*
4405 	 * Add 16MB MPSS support (may get filtered out by userspace)
4406 	 */
4407 	if (shift != 24) {
4408 		int penc = kvmppc_pgsize_lp_encoding(shift, 24);
4409 		if (penc != -1) {
4410 			(*sps)->enc[1].page_shift = 24;
4411 			(*sps)->enc[1].pte_enc = penc;
4412 		}
4413 	}
4414 	(*sps)++;
4415 }
4416 
kvm_vm_ioctl_get_smmu_info_hv(struct kvm * kvm,struct kvm_ppc_smmu_info * info)4417 static int kvm_vm_ioctl_get_smmu_info_hv(struct kvm *kvm,
4418 					 struct kvm_ppc_smmu_info *info)
4419 {
4420 	struct kvm_ppc_one_seg_page_size *sps;
4421 
4422 	/*
4423 	 * POWER7, POWER8 and POWER9 all support 32 storage keys for data.
4424 	 * POWER7 doesn't support keys for instruction accesses,
4425 	 * POWER8 and POWER9 do.
4426 	 */
4427 	info->data_keys = 32;
4428 	info->instr_keys = cpu_has_feature(CPU_FTR_ARCH_207S) ? 32 : 0;
4429 
4430 	/* POWER7, 8 and 9 all have 1T segments and 32-entry SLB */
4431 	info->flags = KVM_PPC_PAGE_SIZES_REAL | KVM_PPC_1T_SEGMENTS;
4432 	info->slb_size = 32;
4433 
4434 	/* We only support these sizes for now, and no muti-size segments */
4435 	sps = &info->sps[0];
4436 	kvmppc_add_seg_page_size(&sps, 12, 0);
4437 	kvmppc_add_seg_page_size(&sps, 16, SLB_VSID_L | SLB_VSID_LP_01);
4438 	kvmppc_add_seg_page_size(&sps, 24, SLB_VSID_L);
4439 
4440 	/* If running as a nested hypervisor, we don't support HPT guests */
4441 	if (kvmhv_on_pseries())
4442 		info->flags |= KVM_PPC_NO_HASH;
4443 
4444 	return 0;
4445 }
4446 
4447 /*
4448  * Get (and clear) the dirty memory log for a memory slot.
4449  */
kvm_vm_ioctl_get_dirty_log_hv(struct kvm * kvm,struct kvm_dirty_log * log)4450 static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm,
4451 					 struct kvm_dirty_log *log)
4452 {
4453 	struct kvm_memslots *slots;
4454 	struct kvm_memory_slot *memslot;
4455 	int i, r;
4456 	unsigned long n;
4457 	unsigned long *buf, *p;
4458 	struct kvm_vcpu *vcpu;
4459 
4460 	mutex_lock(&kvm->slots_lock);
4461 
4462 	r = -EINVAL;
4463 	if (log->slot >= KVM_USER_MEM_SLOTS)
4464 		goto out;
4465 
4466 	slots = kvm_memslots(kvm);
4467 	memslot = id_to_memslot(slots, log->slot);
4468 	r = -ENOENT;
4469 	if (!memslot || !memslot->dirty_bitmap)
4470 		goto out;
4471 
4472 	/*
4473 	 * Use second half of bitmap area because both HPT and radix
4474 	 * accumulate bits in the first half.
4475 	 */
4476 	n = kvm_dirty_bitmap_bytes(memslot);
4477 	buf = memslot->dirty_bitmap + n / sizeof(long);
4478 	memset(buf, 0, n);
4479 
4480 	if (kvm_is_radix(kvm))
4481 		r = kvmppc_hv_get_dirty_log_radix(kvm, memslot, buf);
4482 	else
4483 		r = kvmppc_hv_get_dirty_log_hpt(kvm, memslot, buf);
4484 	if (r)
4485 		goto out;
4486 
4487 	/*
4488 	 * We accumulate dirty bits in the first half of the
4489 	 * memslot's dirty_bitmap area, for when pages are paged
4490 	 * out or modified by the host directly.  Pick up these
4491 	 * bits and add them to the map.
4492 	 */
4493 	p = memslot->dirty_bitmap;
4494 	for (i = 0; i < n / sizeof(long); ++i)
4495 		buf[i] |= xchg(&p[i], 0);
4496 
4497 	/* Harvest dirty bits from VPA and DTL updates */
4498 	/* Note: we never modify the SLB shadow buffer areas */
4499 	kvm_for_each_vcpu(i, vcpu, kvm) {
4500 		spin_lock(&vcpu->arch.vpa_update_lock);
4501 		kvmppc_harvest_vpa_dirty(&vcpu->arch.vpa, memslot, buf);
4502 		kvmppc_harvest_vpa_dirty(&vcpu->arch.dtl, memslot, buf);
4503 		spin_unlock(&vcpu->arch.vpa_update_lock);
4504 	}
4505 
4506 	r = -EFAULT;
4507 	if (copy_to_user(log->dirty_bitmap, buf, n))
4508 		goto out;
4509 
4510 	r = 0;
4511 out:
4512 	mutex_unlock(&kvm->slots_lock);
4513 	return r;
4514 }
4515 
kvmppc_core_free_memslot_hv(struct kvm_memory_slot * slot)4516 static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *slot)
4517 {
4518 	vfree(slot->arch.rmap);
4519 	slot->arch.rmap = NULL;
4520 }
4521 
kvmppc_core_prepare_memory_region_hv(struct kvm * kvm,struct kvm_memory_slot * slot,const struct kvm_userspace_memory_region * mem,enum kvm_mr_change change)4522 static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm,
4523 					struct kvm_memory_slot *slot,
4524 					const struct kvm_userspace_memory_region *mem,
4525 					enum kvm_mr_change change)
4526 {
4527 	unsigned long npages = mem->memory_size >> PAGE_SHIFT;
4528 
4529 	if (change == KVM_MR_CREATE) {
4530 		slot->arch.rmap = vzalloc(array_size(npages,
4531 					  sizeof(*slot->arch.rmap)));
4532 		if (!slot->arch.rmap)
4533 			return -ENOMEM;
4534 	}
4535 
4536 	return 0;
4537 }
4538 
kvmppc_core_commit_memory_region_hv(struct kvm * kvm,const struct kvm_userspace_memory_region * mem,const struct kvm_memory_slot * old,const struct kvm_memory_slot * new,enum kvm_mr_change change)4539 static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
4540 				const struct kvm_userspace_memory_region *mem,
4541 				const struct kvm_memory_slot *old,
4542 				const struct kvm_memory_slot *new,
4543 				enum kvm_mr_change change)
4544 {
4545 	unsigned long npages = mem->memory_size >> PAGE_SHIFT;
4546 
4547 	/*
4548 	 * If we are making a new memslot, it might make
4549 	 * some address that was previously cached as emulated
4550 	 * MMIO be no longer emulated MMIO, so invalidate
4551 	 * all the caches of emulated MMIO translations.
4552 	 */
4553 	if (npages)
4554 		atomic64_inc(&kvm->arch.mmio_update);
4555 
4556 	/*
4557 	 * For change == KVM_MR_MOVE or KVM_MR_DELETE, higher levels
4558 	 * have already called kvm_arch_flush_shadow_memslot() to
4559 	 * flush shadow mappings.  For KVM_MR_CREATE we have no
4560 	 * previous mappings.  So the only case to handle is
4561 	 * KVM_MR_FLAGS_ONLY when the KVM_MEM_LOG_DIRTY_PAGES bit
4562 	 * has been changed.
4563 	 * For radix guests, we flush on setting KVM_MEM_LOG_DIRTY_PAGES
4564 	 * to get rid of any THP PTEs in the partition-scoped page tables
4565 	 * so we can track dirtiness at the page level; we flush when
4566 	 * clearing KVM_MEM_LOG_DIRTY_PAGES so that we can go back to
4567 	 * using THP PTEs.
4568 	 */
4569 	if (change == KVM_MR_FLAGS_ONLY && kvm_is_radix(kvm) &&
4570 	    ((new->flags ^ old->flags) & KVM_MEM_LOG_DIRTY_PAGES))
4571 		kvmppc_radix_flush_memslot(kvm, old);
4572 	/*
4573 	 * If UV hasn't yet called H_SVM_INIT_START, don't register memslots.
4574 	 */
4575 	if (!kvm->arch.secure_guest)
4576 		return;
4577 
4578 	switch (change) {
4579 	case KVM_MR_CREATE:
4580 		/*
4581 		 * @TODO kvmppc_uvmem_memslot_create() can fail and
4582 		 * return error. Fix this.
4583 		 */
4584 		kvmppc_uvmem_memslot_create(kvm, new);
4585 		break;
4586 	case KVM_MR_DELETE:
4587 		kvmppc_uvmem_memslot_delete(kvm, old);
4588 		break;
4589 	default:
4590 		/* TODO: Handle KVM_MR_MOVE */
4591 		break;
4592 	}
4593 }
4594 
4595 /*
4596  * Update LPCR values in kvm->arch and in vcores.
4597  * Caller must hold kvm->arch.mmu_setup_lock (for mutual exclusion
4598  * of kvm->arch.lpcr update).
4599  */
kvmppc_update_lpcr(struct kvm * kvm,unsigned long lpcr,unsigned long mask)4600 void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask)
4601 {
4602 	long int i;
4603 	u32 cores_done = 0;
4604 
4605 	if ((kvm->arch.lpcr & mask) == lpcr)
4606 		return;
4607 
4608 	kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr;
4609 
4610 	for (i = 0; i < KVM_MAX_VCORES; ++i) {
4611 		struct kvmppc_vcore *vc = kvm->arch.vcores[i];
4612 		if (!vc)
4613 			continue;
4614 		spin_lock(&vc->lock);
4615 		vc->lpcr = (vc->lpcr & ~mask) | lpcr;
4616 		spin_unlock(&vc->lock);
4617 		if (++cores_done >= kvm->arch.online_vcores)
4618 			break;
4619 	}
4620 }
4621 
kvmppc_setup_partition_table(struct kvm * kvm)4622 void kvmppc_setup_partition_table(struct kvm *kvm)
4623 {
4624 	unsigned long dw0, dw1;
4625 
4626 	if (!kvm_is_radix(kvm)) {
4627 		/* PS field - page size for VRMA */
4628 		dw0 = ((kvm->arch.vrma_slb_v & SLB_VSID_L) >> 1) |
4629 			((kvm->arch.vrma_slb_v & SLB_VSID_LP) << 1);
4630 		/* HTABSIZE and HTABORG fields */
4631 		dw0 |= kvm->arch.sdr1;
4632 
4633 		/* Second dword as set by userspace */
4634 		dw1 = kvm->arch.process_table;
4635 	} else {
4636 		dw0 = PATB_HR | radix__get_tree_size() |
4637 			__pa(kvm->arch.pgtable) | RADIX_PGD_INDEX_SIZE;
4638 		dw1 = PATB_GR | kvm->arch.process_table;
4639 	}
4640 	kvmhv_set_ptbl_entry(kvm->arch.lpid, dw0, dw1);
4641 }
4642 
4643 /*
4644  * Set up HPT (hashed page table) and RMA (real-mode area).
4645  * Must be called with kvm->arch.mmu_setup_lock held.
4646  */
kvmppc_hv_setup_htab_rma(struct kvm_vcpu * vcpu)4647 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
4648 {
4649 	int err = 0;
4650 	struct kvm *kvm = vcpu->kvm;
4651 	unsigned long hva;
4652 	struct kvm_memory_slot *memslot;
4653 	struct vm_area_struct *vma;
4654 	unsigned long lpcr = 0, senc;
4655 	unsigned long psize, porder;
4656 	int srcu_idx;
4657 
4658 	/* Allocate hashed page table (if not done already) and reset it */
4659 	if (!kvm->arch.hpt.virt) {
4660 		int order = KVM_DEFAULT_HPT_ORDER;
4661 		struct kvm_hpt_info info;
4662 
4663 		err = kvmppc_allocate_hpt(&info, order);
4664 		/* If we get here, it means userspace didn't specify a
4665 		 * size explicitly.  So, try successively smaller
4666 		 * sizes if the default failed. */
4667 		while ((err == -ENOMEM) && --order >= PPC_MIN_HPT_ORDER)
4668 			err  = kvmppc_allocate_hpt(&info, order);
4669 
4670 		if (err < 0) {
4671 			pr_err("KVM: Couldn't alloc HPT\n");
4672 			goto out;
4673 		}
4674 
4675 		kvmppc_set_hpt(kvm, &info);
4676 	}
4677 
4678 	/* Look up the memslot for guest physical address 0 */
4679 	srcu_idx = srcu_read_lock(&kvm->srcu);
4680 	memslot = gfn_to_memslot(kvm, 0);
4681 
4682 	/* We must have some memory at 0 by now */
4683 	err = -EINVAL;
4684 	if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
4685 		goto out_srcu;
4686 
4687 	/* Look up the VMA for the start of this memory slot */
4688 	hva = memslot->userspace_addr;
4689 	mmap_read_lock(kvm->mm);
4690 	vma = find_vma(kvm->mm, hva);
4691 	if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO))
4692 		goto up_out;
4693 
4694 	psize = vma_kernel_pagesize(vma);
4695 
4696 	mmap_read_unlock(kvm->mm);
4697 
4698 	/* We can handle 4k, 64k or 16M pages in the VRMA */
4699 	if (psize >= 0x1000000)
4700 		psize = 0x1000000;
4701 	else if (psize >= 0x10000)
4702 		psize = 0x10000;
4703 	else
4704 		psize = 0x1000;
4705 	porder = __ilog2(psize);
4706 
4707 	senc = slb_pgsize_encoding(psize);
4708 	kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
4709 		(VRMA_VSID << SLB_VSID_SHIFT_1T);
4710 	/* Create HPTEs in the hash page table for the VRMA */
4711 	kvmppc_map_vrma(vcpu, memslot, porder);
4712 
4713 	/* Update VRMASD field in the LPCR */
4714 	if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
4715 		/* the -4 is to account for senc values starting at 0x10 */
4716 		lpcr = senc << (LPCR_VRMASD_SH - 4);
4717 		kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
4718 	}
4719 
4720 	/* Order updates to kvm->arch.lpcr etc. vs. mmu_ready */
4721 	smp_wmb();
4722 	err = 0;
4723  out_srcu:
4724 	srcu_read_unlock(&kvm->srcu, srcu_idx);
4725  out:
4726 	return err;
4727 
4728  up_out:
4729 	mmap_read_unlock(kvm->mm);
4730 	goto out_srcu;
4731 }
4732 
4733 /*
4734  * Must be called with kvm->arch.mmu_setup_lock held and
4735  * mmu_ready = 0 and no vcpus running.
4736  */
kvmppc_switch_mmu_to_hpt(struct kvm * kvm)4737 int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
4738 {
4739 	if (nesting_enabled(kvm))
4740 		kvmhv_release_all_nested(kvm);
4741 	kvmppc_rmap_reset(kvm);
4742 	kvm->arch.process_table = 0;
4743 	/* Mutual exclusion with kvm_unmap_hva_range etc. */
4744 	spin_lock(&kvm->mmu_lock);
4745 	kvm->arch.radix = 0;
4746 	spin_unlock(&kvm->mmu_lock);
4747 	kvmppc_free_radix(kvm);
4748 	kvmppc_update_lpcr(kvm, LPCR_VPM1,
4749 			   LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
4750 	return 0;
4751 }
4752 
4753 /*
4754  * Must be called with kvm->arch.mmu_setup_lock held and
4755  * mmu_ready = 0 and no vcpus running.
4756  */
kvmppc_switch_mmu_to_radix(struct kvm * kvm)4757 int kvmppc_switch_mmu_to_radix(struct kvm *kvm)
4758 {
4759 	int err;
4760 
4761 	err = kvmppc_init_vm_radix(kvm);
4762 	if (err)
4763 		return err;
4764 	kvmppc_rmap_reset(kvm);
4765 	/* Mutual exclusion with kvm_unmap_hva_range etc. */
4766 	spin_lock(&kvm->mmu_lock);
4767 	kvm->arch.radix = 1;
4768 	spin_unlock(&kvm->mmu_lock);
4769 	kvmppc_free_hpt(&kvm->arch.hpt);
4770 	kvmppc_update_lpcr(kvm, LPCR_UPRT | LPCR_GTSE | LPCR_HR,
4771 			   LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
4772 	return 0;
4773 }
4774 
4775 #ifdef CONFIG_KVM_XICS
4776 /*
4777  * Allocate a per-core structure for managing state about which cores are
4778  * running in the host versus the guest and for exchanging data between
4779  * real mode KVM and CPU running in the host.
4780  * This is only done for the first VM.
4781  * The allocated structure stays even if all VMs have stopped.
4782  * It is only freed when the kvm-hv module is unloaded.
4783  * It's OK for this routine to fail, we just don't support host
4784  * core operations like redirecting H_IPI wakeups.
4785  */
kvmppc_alloc_host_rm_ops(void)4786 void kvmppc_alloc_host_rm_ops(void)
4787 {
4788 	struct kvmppc_host_rm_ops *ops;
4789 	unsigned long l_ops;
4790 	int cpu, core;
4791 	int size;
4792 
4793 	/* Not the first time here ? */
4794 	if (kvmppc_host_rm_ops_hv != NULL)
4795 		return;
4796 
4797 	ops = kzalloc(sizeof(struct kvmppc_host_rm_ops), GFP_KERNEL);
4798 	if (!ops)
4799 		return;
4800 
4801 	size = cpu_nr_cores() * sizeof(struct kvmppc_host_rm_core);
4802 	ops->rm_core = kzalloc(size, GFP_KERNEL);
4803 
4804 	if (!ops->rm_core) {
4805 		kfree(ops);
4806 		return;
4807 	}
4808 
4809 	cpus_read_lock();
4810 
4811 	for (cpu = 0; cpu < nr_cpu_ids; cpu += threads_per_core) {
4812 		if (!cpu_online(cpu))
4813 			continue;
4814 
4815 		core = cpu >> threads_shift;
4816 		ops->rm_core[core].rm_state.in_host = 1;
4817 	}
4818 
4819 	ops->vcpu_kick = kvmppc_fast_vcpu_kick_hv;
4820 
4821 	/*
4822 	 * Make the contents of the kvmppc_host_rm_ops structure visible
4823 	 * to other CPUs before we assign it to the global variable.
4824 	 * Do an atomic assignment (no locks used here), but if someone
4825 	 * beats us to it, just free our copy and return.
4826 	 */
4827 	smp_wmb();
4828 	l_ops = (unsigned long) ops;
4829 
4830 	if (cmpxchg64((unsigned long *)&kvmppc_host_rm_ops_hv, 0, l_ops)) {
4831 		cpus_read_unlock();
4832 		kfree(ops->rm_core);
4833 		kfree(ops);
4834 		return;
4835 	}
4836 
4837 	cpuhp_setup_state_nocalls_cpuslocked(CPUHP_KVM_PPC_BOOK3S_PREPARE,
4838 					     "ppc/kvm_book3s:prepare",
4839 					     kvmppc_set_host_core,
4840 					     kvmppc_clear_host_core);
4841 	cpus_read_unlock();
4842 }
4843 
kvmppc_free_host_rm_ops(void)4844 void kvmppc_free_host_rm_ops(void)
4845 {
4846 	if (kvmppc_host_rm_ops_hv) {
4847 		cpuhp_remove_state_nocalls(CPUHP_KVM_PPC_BOOK3S_PREPARE);
4848 		kfree(kvmppc_host_rm_ops_hv->rm_core);
4849 		kfree(kvmppc_host_rm_ops_hv);
4850 		kvmppc_host_rm_ops_hv = NULL;
4851 	}
4852 }
4853 #endif
4854 
kvmppc_core_init_vm_hv(struct kvm * kvm)4855 static int kvmppc_core_init_vm_hv(struct kvm *kvm)
4856 {
4857 	unsigned long lpcr, lpid;
4858 	char buf[32];
4859 	int ret;
4860 
4861 	mutex_init(&kvm->arch.uvmem_lock);
4862 	INIT_LIST_HEAD(&kvm->arch.uvmem_pfns);
4863 	mutex_init(&kvm->arch.mmu_setup_lock);
4864 
4865 	/* Allocate the guest's logical partition ID */
4866 
4867 	lpid = kvmppc_alloc_lpid();
4868 	if ((long)lpid < 0)
4869 		return -ENOMEM;
4870 	kvm->arch.lpid = lpid;
4871 
4872 	kvmppc_alloc_host_rm_ops();
4873 
4874 	kvmhv_vm_nested_init(kvm);
4875 
4876 	/*
4877 	 * Since we don't flush the TLB when tearing down a VM,
4878 	 * and this lpid might have previously been used,
4879 	 * make sure we flush on each core before running the new VM.
4880 	 * On POWER9, the tlbie in mmu_partition_table_set_entry()
4881 	 * does this flush for us.
4882 	 */
4883 	if (!cpu_has_feature(CPU_FTR_ARCH_300))
4884 		cpumask_setall(&kvm->arch.need_tlb_flush);
4885 
4886 	/* Start out with the default set of hcalls enabled */
4887 	memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls,
4888 	       sizeof(kvm->arch.enabled_hcalls));
4889 
4890 	if (!cpu_has_feature(CPU_FTR_ARCH_300))
4891 		kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
4892 
4893 	/* Init LPCR for virtual RMA mode */
4894 	if (cpu_has_feature(CPU_FTR_HVMODE)) {
4895 		kvm->arch.host_lpid = mfspr(SPRN_LPID);
4896 		kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
4897 		lpcr &= LPCR_PECE | LPCR_LPES;
4898 	} else {
4899 		lpcr = 0;
4900 	}
4901 	lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
4902 		LPCR_VPM0 | LPCR_VPM1;
4903 	kvm->arch.vrma_slb_v = SLB_VSID_B_1T |
4904 		(VRMA_VSID << SLB_VSID_SHIFT_1T);
4905 	/* On POWER8 turn on online bit to enable PURR/SPURR */
4906 	if (cpu_has_feature(CPU_FTR_ARCH_207S))
4907 		lpcr |= LPCR_ONL;
4908 	/*
4909 	 * On POWER9, VPM0 bit is reserved (VPM0=1 behaviour is assumed)
4910 	 * Set HVICE bit to enable hypervisor virtualization interrupts.
4911 	 * Set HEIC to prevent OS interrupts to go to hypervisor (should
4912 	 * be unnecessary but better safe than sorry in case we re-enable
4913 	 * EE in HV mode with this LPCR still set)
4914 	 */
4915 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
4916 		lpcr &= ~LPCR_VPM0;
4917 		lpcr |= LPCR_HVICE | LPCR_HEIC;
4918 
4919 		/*
4920 		 * If xive is enabled, we route 0x500 interrupts directly
4921 		 * to the guest.
4922 		 */
4923 		if (xics_on_xive())
4924 			lpcr |= LPCR_LPES;
4925 	}
4926 
4927 	/*
4928 	 * If the host uses radix, the guest starts out as radix.
4929 	 */
4930 	if (radix_enabled()) {
4931 		kvm->arch.radix = 1;
4932 		kvm->arch.mmu_ready = 1;
4933 		lpcr &= ~LPCR_VPM1;
4934 		lpcr |= LPCR_UPRT | LPCR_GTSE | LPCR_HR;
4935 		ret = kvmppc_init_vm_radix(kvm);
4936 		if (ret) {
4937 			kvmppc_free_lpid(kvm->arch.lpid);
4938 			return ret;
4939 		}
4940 		kvmppc_setup_partition_table(kvm);
4941 	}
4942 
4943 	kvm->arch.lpcr = lpcr;
4944 
4945 	/* Initialization for future HPT resizes */
4946 	kvm->arch.resize_hpt = NULL;
4947 
4948 	/*
4949 	 * Work out how many sets the TLB has, for the use of
4950 	 * the TLB invalidation loop in book3s_hv_rmhandlers.S.
4951 	 */
4952 	if (radix_enabled())
4953 		kvm->arch.tlb_sets = POWER9_TLB_SETS_RADIX;	/* 128 */
4954 	else if (cpu_has_feature(CPU_FTR_ARCH_300))
4955 		kvm->arch.tlb_sets = POWER9_TLB_SETS_HASH;	/* 256 */
4956 	else if (cpu_has_feature(CPU_FTR_ARCH_207S))
4957 		kvm->arch.tlb_sets = POWER8_TLB_SETS;		/* 512 */
4958 	else
4959 		kvm->arch.tlb_sets = POWER7_TLB_SETS;		/* 128 */
4960 
4961 	/*
4962 	 * Track that we now have a HV mode VM active. This blocks secondary
4963 	 * CPU threads from coming online.
4964 	 * On POWER9, we only need to do this if the "indep_threads_mode"
4965 	 * module parameter has been set to N.
4966 	 */
4967 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
4968 		if (!indep_threads_mode && !cpu_has_feature(CPU_FTR_HVMODE)) {
4969 			pr_warn("KVM: Ignoring indep_threads_mode=N in nested hypervisor\n");
4970 			kvm->arch.threads_indep = true;
4971 		} else {
4972 			kvm->arch.threads_indep = indep_threads_mode;
4973 		}
4974 	}
4975 	if (!kvm->arch.threads_indep)
4976 		kvm_hv_vm_activated();
4977 
4978 	/*
4979 	 * Initialize smt_mode depending on processor.
4980 	 * POWER8 and earlier have to use "strict" threading, where
4981 	 * all vCPUs in a vcore have to run on the same (sub)core,
4982 	 * whereas on POWER9 the threads can each run a different
4983 	 * guest.
4984 	 */
4985 	if (!cpu_has_feature(CPU_FTR_ARCH_300))
4986 		kvm->arch.smt_mode = threads_per_subcore;
4987 	else
4988 		kvm->arch.smt_mode = 1;
4989 	kvm->arch.emul_smt_mode = 1;
4990 
4991 	/*
4992 	 * Create a debugfs directory for the VM
4993 	 */
4994 	snprintf(buf, sizeof(buf), "vm%d", current->pid);
4995 	kvm->arch.debugfs_dir = debugfs_create_dir(buf, kvm_debugfs_dir);
4996 	kvmppc_mmu_debugfs_init(kvm);
4997 	if (radix_enabled())
4998 		kvmhv_radix_debugfs_init(kvm);
4999 
5000 	return 0;
5001 }
5002 
kvmppc_free_vcores(struct kvm * kvm)5003 static void kvmppc_free_vcores(struct kvm *kvm)
5004 {
5005 	long int i;
5006 
5007 	for (i = 0; i < KVM_MAX_VCORES; ++i)
5008 		kfree(kvm->arch.vcores[i]);
5009 	kvm->arch.online_vcores = 0;
5010 }
5011 
kvmppc_core_destroy_vm_hv(struct kvm * kvm)5012 static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
5013 {
5014 	debugfs_remove_recursive(kvm->arch.debugfs_dir);
5015 
5016 	if (!kvm->arch.threads_indep)
5017 		kvm_hv_vm_deactivated();
5018 
5019 	kvmppc_free_vcores(kvm);
5020 
5021 
5022 	if (kvm_is_radix(kvm))
5023 		kvmppc_free_radix(kvm);
5024 	else
5025 		kvmppc_free_hpt(&kvm->arch.hpt);
5026 
5027 	/* Perform global invalidation and return lpid to the pool */
5028 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
5029 		if (nesting_enabled(kvm))
5030 			kvmhv_release_all_nested(kvm);
5031 		kvm->arch.process_table = 0;
5032 		if (kvm->arch.secure_guest)
5033 			uv_svm_terminate(kvm->arch.lpid);
5034 		kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0);
5035 	}
5036 
5037 	kvmppc_free_lpid(kvm->arch.lpid);
5038 
5039 	kvmppc_free_pimap(kvm);
5040 }
5041 
5042 /* We don't need to emulate any privileged instructions or dcbz */
kvmppc_core_emulate_op_hv(struct kvm_vcpu * vcpu,unsigned int inst,int * advance)5043 static int kvmppc_core_emulate_op_hv(struct kvm_vcpu *vcpu,
5044 				     unsigned int inst, int *advance)
5045 {
5046 	return EMULATE_FAIL;
5047 }
5048 
kvmppc_core_emulate_mtspr_hv(struct kvm_vcpu * vcpu,int sprn,ulong spr_val)5049 static int kvmppc_core_emulate_mtspr_hv(struct kvm_vcpu *vcpu, int sprn,
5050 					ulong spr_val)
5051 {
5052 	return EMULATE_FAIL;
5053 }
5054 
kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu * vcpu,int sprn,ulong * spr_val)5055 static int kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu *vcpu, int sprn,
5056 					ulong *spr_val)
5057 {
5058 	return EMULATE_FAIL;
5059 }
5060 
kvmppc_core_check_processor_compat_hv(void)5061 static int kvmppc_core_check_processor_compat_hv(void)
5062 {
5063 	if (cpu_has_feature(CPU_FTR_HVMODE) &&
5064 	    cpu_has_feature(CPU_FTR_ARCH_206))
5065 		return 0;
5066 
5067 	/* POWER9 in radix mode is capable of being a nested hypervisor. */
5068 	if (cpu_has_feature(CPU_FTR_ARCH_300) && radix_enabled())
5069 		return 0;
5070 
5071 	return -EIO;
5072 }
5073 
5074 #ifdef CONFIG_KVM_XICS
5075 
kvmppc_free_pimap(struct kvm * kvm)5076 void kvmppc_free_pimap(struct kvm *kvm)
5077 {
5078 	kfree(kvm->arch.pimap);
5079 }
5080 
kvmppc_alloc_pimap(void)5081 static struct kvmppc_passthru_irqmap *kvmppc_alloc_pimap(void)
5082 {
5083 	return kzalloc(sizeof(struct kvmppc_passthru_irqmap), GFP_KERNEL);
5084 }
5085 
kvmppc_set_passthru_irq(struct kvm * kvm,int host_irq,int guest_gsi)5086 static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
5087 {
5088 	struct irq_desc *desc;
5089 	struct kvmppc_irq_map *irq_map;
5090 	struct kvmppc_passthru_irqmap *pimap;
5091 	struct irq_chip *chip;
5092 	int i, rc = 0;
5093 
5094 	if (!kvm_irq_bypass)
5095 		return 1;
5096 
5097 	desc = irq_to_desc(host_irq);
5098 	if (!desc)
5099 		return -EIO;
5100 
5101 	mutex_lock(&kvm->lock);
5102 
5103 	pimap = kvm->arch.pimap;
5104 	if (pimap == NULL) {
5105 		/* First call, allocate structure to hold IRQ map */
5106 		pimap = kvmppc_alloc_pimap();
5107 		if (pimap == NULL) {
5108 			mutex_unlock(&kvm->lock);
5109 			return -ENOMEM;
5110 		}
5111 		kvm->arch.pimap = pimap;
5112 	}
5113 
5114 	/*
5115 	 * For now, we only support interrupts for which the EOI operation
5116 	 * is an OPAL call followed by a write to XIRR, since that's
5117 	 * what our real-mode EOI code does, or a XIVE interrupt
5118 	 */
5119 	chip = irq_data_get_irq_chip(&desc->irq_data);
5120 	if (!chip || !(is_pnv_opal_msi(chip) || is_xive_irq(chip))) {
5121 		pr_warn("kvmppc_set_passthru_irq_hv: Could not assign IRQ map for (%d,%d)\n",
5122 			host_irq, guest_gsi);
5123 		mutex_unlock(&kvm->lock);
5124 		return -ENOENT;
5125 	}
5126 
5127 	/*
5128 	 * See if we already have an entry for this guest IRQ number.
5129 	 * If it's mapped to a hardware IRQ number, that's an error,
5130 	 * otherwise re-use this entry.
5131 	 */
5132 	for (i = 0; i < pimap->n_mapped; i++) {
5133 		if (guest_gsi == pimap->mapped[i].v_hwirq) {
5134 			if (pimap->mapped[i].r_hwirq) {
5135 				mutex_unlock(&kvm->lock);
5136 				return -EINVAL;
5137 			}
5138 			break;
5139 		}
5140 	}
5141 
5142 	if (i == KVMPPC_PIRQ_MAPPED) {
5143 		mutex_unlock(&kvm->lock);
5144 		return -EAGAIN;		/* table is full */
5145 	}
5146 
5147 	irq_map = &pimap->mapped[i];
5148 
5149 	irq_map->v_hwirq = guest_gsi;
5150 	irq_map->desc = desc;
5151 
5152 	/*
5153 	 * Order the above two stores before the next to serialize with
5154 	 * the KVM real mode handler.
5155 	 */
5156 	smp_wmb();
5157 	irq_map->r_hwirq = desc->irq_data.hwirq;
5158 
5159 	if (i == pimap->n_mapped)
5160 		pimap->n_mapped++;
5161 
5162 	if (xics_on_xive())
5163 		rc = kvmppc_xive_set_mapped(kvm, guest_gsi, desc);
5164 	else
5165 		kvmppc_xics_set_mapped(kvm, guest_gsi, desc->irq_data.hwirq);
5166 	if (rc)
5167 		irq_map->r_hwirq = 0;
5168 
5169 	mutex_unlock(&kvm->lock);
5170 
5171 	return 0;
5172 }
5173 
kvmppc_clr_passthru_irq(struct kvm * kvm,int host_irq,int guest_gsi)5174 static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
5175 {
5176 	struct irq_desc *desc;
5177 	struct kvmppc_passthru_irqmap *pimap;
5178 	int i, rc = 0;
5179 
5180 	if (!kvm_irq_bypass)
5181 		return 0;
5182 
5183 	desc = irq_to_desc(host_irq);
5184 	if (!desc)
5185 		return -EIO;
5186 
5187 	mutex_lock(&kvm->lock);
5188 	if (!kvm->arch.pimap)
5189 		goto unlock;
5190 
5191 	pimap = kvm->arch.pimap;
5192 
5193 	for (i = 0; i < pimap->n_mapped; i++) {
5194 		if (guest_gsi == pimap->mapped[i].v_hwirq)
5195 			break;
5196 	}
5197 
5198 	if (i == pimap->n_mapped) {
5199 		mutex_unlock(&kvm->lock);
5200 		return -ENODEV;
5201 	}
5202 
5203 	if (xics_on_xive())
5204 		rc = kvmppc_xive_clr_mapped(kvm, guest_gsi, pimap->mapped[i].desc);
5205 	else
5206 		kvmppc_xics_clr_mapped(kvm, guest_gsi, pimap->mapped[i].r_hwirq);
5207 
5208 	/* invalidate the entry (what do do on error from the above ?) */
5209 	pimap->mapped[i].r_hwirq = 0;
5210 
5211 	/*
5212 	 * We don't free this structure even when the count goes to
5213 	 * zero. The structure is freed when we destroy the VM.
5214 	 */
5215  unlock:
5216 	mutex_unlock(&kvm->lock);
5217 	return rc;
5218 }
5219 
kvmppc_irq_bypass_add_producer_hv(struct irq_bypass_consumer * cons,struct irq_bypass_producer * prod)5220 static int kvmppc_irq_bypass_add_producer_hv(struct irq_bypass_consumer *cons,
5221 					     struct irq_bypass_producer *prod)
5222 {
5223 	int ret = 0;
5224 	struct kvm_kernel_irqfd *irqfd =
5225 		container_of(cons, struct kvm_kernel_irqfd, consumer);
5226 
5227 	irqfd->producer = prod;
5228 
5229 	ret = kvmppc_set_passthru_irq(irqfd->kvm, prod->irq, irqfd->gsi);
5230 	if (ret)
5231 		pr_info("kvmppc_set_passthru_irq (irq %d, gsi %d) fails: %d\n",
5232 			prod->irq, irqfd->gsi, ret);
5233 
5234 	return ret;
5235 }
5236 
kvmppc_irq_bypass_del_producer_hv(struct irq_bypass_consumer * cons,struct irq_bypass_producer * prod)5237 static void kvmppc_irq_bypass_del_producer_hv(struct irq_bypass_consumer *cons,
5238 					      struct irq_bypass_producer *prod)
5239 {
5240 	int ret;
5241 	struct kvm_kernel_irqfd *irqfd =
5242 		container_of(cons, struct kvm_kernel_irqfd, consumer);
5243 
5244 	irqfd->producer = NULL;
5245 
5246 	/*
5247 	 * When producer of consumer is unregistered, we change back to
5248 	 * default external interrupt handling mode - KVM real mode
5249 	 * will switch back to host.
5250 	 */
5251 	ret = kvmppc_clr_passthru_irq(irqfd->kvm, prod->irq, irqfd->gsi);
5252 	if (ret)
5253 		pr_warn("kvmppc_clr_passthru_irq (irq %d, gsi %d) fails: %d\n",
5254 			prod->irq, irqfd->gsi, ret);
5255 }
5256 #endif
5257 
kvm_arch_vm_ioctl_hv(struct file * filp,unsigned int ioctl,unsigned long arg)5258 static long kvm_arch_vm_ioctl_hv(struct file *filp,
5259 				 unsigned int ioctl, unsigned long arg)
5260 {
5261 	struct kvm *kvm __maybe_unused = filp->private_data;
5262 	void __user *argp = (void __user *)arg;
5263 	long r;
5264 
5265 	switch (ioctl) {
5266 
5267 	case KVM_PPC_ALLOCATE_HTAB: {
5268 		u32 htab_order;
5269 
5270 		/* If we're a nested hypervisor, we currently only support radix */
5271 		if (kvmhv_on_pseries()) {
5272 			r = -EOPNOTSUPP;
5273 			break;
5274 		}
5275 
5276 		r = -EFAULT;
5277 		if (get_user(htab_order, (u32 __user *)argp))
5278 			break;
5279 		r = kvmppc_alloc_reset_hpt(kvm, htab_order);
5280 		if (r)
5281 			break;
5282 		r = 0;
5283 		break;
5284 	}
5285 
5286 	case KVM_PPC_GET_HTAB_FD: {
5287 		struct kvm_get_htab_fd ghf;
5288 
5289 		r = -EFAULT;
5290 		if (copy_from_user(&ghf, argp, sizeof(ghf)))
5291 			break;
5292 		r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf);
5293 		break;
5294 	}
5295 
5296 	case KVM_PPC_RESIZE_HPT_PREPARE: {
5297 		struct kvm_ppc_resize_hpt rhpt;
5298 
5299 		r = -EFAULT;
5300 		if (copy_from_user(&rhpt, argp, sizeof(rhpt)))
5301 			break;
5302 
5303 		r = kvm_vm_ioctl_resize_hpt_prepare(kvm, &rhpt);
5304 		break;
5305 	}
5306 
5307 	case KVM_PPC_RESIZE_HPT_COMMIT: {
5308 		struct kvm_ppc_resize_hpt rhpt;
5309 
5310 		r = -EFAULT;
5311 		if (copy_from_user(&rhpt, argp, sizeof(rhpt)))
5312 			break;
5313 
5314 		r = kvm_vm_ioctl_resize_hpt_commit(kvm, &rhpt);
5315 		break;
5316 	}
5317 
5318 	default:
5319 		r = -ENOTTY;
5320 	}
5321 
5322 	return r;
5323 }
5324 
5325 /*
5326  * List of hcall numbers to enable by default.
5327  * For compatibility with old userspace, we enable by default
5328  * all hcalls that were implemented before the hcall-enabling
5329  * facility was added.  Note this list should not include H_RTAS.
5330  */
5331 static unsigned int default_hcall_list[] = {
5332 	H_REMOVE,
5333 	H_ENTER,
5334 	H_READ,
5335 	H_PROTECT,
5336 	H_BULK_REMOVE,
5337 	H_GET_TCE,
5338 	H_PUT_TCE,
5339 	H_SET_DABR,
5340 	H_SET_XDABR,
5341 	H_CEDE,
5342 	H_PROD,
5343 	H_CONFER,
5344 	H_REGISTER_VPA,
5345 #ifdef CONFIG_KVM_XICS
5346 	H_EOI,
5347 	H_CPPR,
5348 	H_IPI,
5349 	H_IPOLL,
5350 	H_XIRR,
5351 	H_XIRR_X,
5352 #endif
5353 	0
5354 };
5355 
init_default_hcalls(void)5356 static void init_default_hcalls(void)
5357 {
5358 	int i;
5359 	unsigned int hcall;
5360 
5361 	for (i = 0; default_hcall_list[i]; ++i) {
5362 		hcall = default_hcall_list[i];
5363 		WARN_ON(!kvmppc_hcall_impl_hv(hcall));
5364 		__set_bit(hcall / 4, default_enabled_hcalls);
5365 	}
5366 }
5367 
kvmhv_configure_mmu(struct kvm * kvm,struct kvm_ppc_mmuv3_cfg * cfg)5368 static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg)
5369 {
5370 	unsigned long lpcr;
5371 	int radix;
5372 	int err;
5373 
5374 	/* If not on a POWER9, reject it */
5375 	if (!cpu_has_feature(CPU_FTR_ARCH_300))
5376 		return -ENODEV;
5377 
5378 	/* If any unknown flags set, reject it */
5379 	if (cfg->flags & ~(KVM_PPC_MMUV3_RADIX | KVM_PPC_MMUV3_GTSE))
5380 		return -EINVAL;
5381 
5382 	/* GR (guest radix) bit in process_table field must match */
5383 	radix = !!(cfg->flags & KVM_PPC_MMUV3_RADIX);
5384 	if (!!(cfg->process_table & PATB_GR) != radix)
5385 		return -EINVAL;
5386 
5387 	/* Process table size field must be reasonable, i.e. <= 24 */
5388 	if ((cfg->process_table & PRTS_MASK) > 24)
5389 		return -EINVAL;
5390 
5391 	/* We can change a guest to/from radix now, if the host is radix */
5392 	if (radix && !radix_enabled())
5393 		return -EINVAL;
5394 
5395 	/* If we're a nested hypervisor, we currently only support radix */
5396 	if (kvmhv_on_pseries() && !radix)
5397 		return -EINVAL;
5398 
5399 	mutex_lock(&kvm->arch.mmu_setup_lock);
5400 	if (radix != kvm_is_radix(kvm)) {
5401 		if (kvm->arch.mmu_ready) {
5402 			kvm->arch.mmu_ready = 0;
5403 			/* order mmu_ready vs. vcpus_running */
5404 			smp_mb();
5405 			if (atomic_read(&kvm->arch.vcpus_running)) {
5406 				kvm->arch.mmu_ready = 1;
5407 				err = -EBUSY;
5408 				goto out_unlock;
5409 			}
5410 		}
5411 		if (radix)
5412 			err = kvmppc_switch_mmu_to_radix(kvm);
5413 		else
5414 			err = kvmppc_switch_mmu_to_hpt(kvm);
5415 		if (err)
5416 			goto out_unlock;
5417 	}
5418 
5419 	kvm->arch.process_table = cfg->process_table;
5420 	kvmppc_setup_partition_table(kvm);
5421 
5422 	lpcr = (cfg->flags & KVM_PPC_MMUV3_GTSE) ? LPCR_GTSE : 0;
5423 	kvmppc_update_lpcr(kvm, lpcr, LPCR_GTSE);
5424 	err = 0;
5425 
5426  out_unlock:
5427 	mutex_unlock(&kvm->arch.mmu_setup_lock);
5428 	return err;
5429 }
5430 
kvmhv_enable_nested(struct kvm * kvm)5431 static int kvmhv_enable_nested(struct kvm *kvm)
5432 {
5433 	if (!nested)
5434 		return -EPERM;
5435 	if (!cpu_has_feature(CPU_FTR_ARCH_300) || no_mixing_hpt_and_radix)
5436 		return -ENODEV;
5437 
5438 	/* kvm == NULL means the caller is testing if the capability exists */
5439 	if (kvm)
5440 		kvm->arch.nested_enable = true;
5441 	return 0;
5442 }
5443 
kvmhv_load_from_eaddr(struct kvm_vcpu * vcpu,ulong * eaddr,void * ptr,int size)5444 static int kvmhv_load_from_eaddr(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
5445 				 int size)
5446 {
5447 	int rc = -EINVAL;
5448 
5449 	if (kvmhv_vcpu_is_radix(vcpu)) {
5450 		rc = kvmhv_copy_from_guest_radix(vcpu, *eaddr, ptr, size);
5451 
5452 		if (rc > 0)
5453 			rc = -EINVAL;
5454 	}
5455 
5456 	/* For now quadrants are the only way to access nested guest memory */
5457 	if (rc && vcpu->arch.nested)
5458 		rc = -EAGAIN;
5459 
5460 	return rc;
5461 }
5462 
kvmhv_store_to_eaddr(struct kvm_vcpu * vcpu,ulong * eaddr,void * ptr,int size)5463 static int kvmhv_store_to_eaddr(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
5464 				int size)
5465 {
5466 	int rc = -EINVAL;
5467 
5468 	if (kvmhv_vcpu_is_radix(vcpu)) {
5469 		rc = kvmhv_copy_to_guest_radix(vcpu, *eaddr, ptr, size);
5470 
5471 		if (rc > 0)
5472 			rc = -EINVAL;
5473 	}
5474 
5475 	/* For now quadrants are the only way to access nested guest memory */
5476 	if (rc && vcpu->arch.nested)
5477 		rc = -EAGAIN;
5478 
5479 	return rc;
5480 }
5481 
unpin_vpa_reset(struct kvm * kvm,struct kvmppc_vpa * vpa)5482 static void unpin_vpa_reset(struct kvm *kvm, struct kvmppc_vpa *vpa)
5483 {
5484 	unpin_vpa(kvm, vpa);
5485 	vpa->gpa = 0;
5486 	vpa->pinned_addr = NULL;
5487 	vpa->dirty = false;
5488 	vpa->update_pending = 0;
5489 }
5490 
5491 /*
5492  * Enable a guest to become a secure VM, or test whether
5493  * that could be enabled.
5494  * Called when the KVM_CAP_PPC_SECURE_GUEST capability is
5495  * tested (kvm == NULL) or enabled (kvm != NULL).
5496  */
kvmhv_enable_svm(struct kvm * kvm)5497 static int kvmhv_enable_svm(struct kvm *kvm)
5498 {
5499 	if (!kvmppc_uvmem_available())
5500 		return -EINVAL;
5501 	if (kvm)
5502 		kvm->arch.svm_enabled = 1;
5503 	return 0;
5504 }
5505 
5506 /*
5507  *  IOCTL handler to turn off secure mode of guest
5508  *
5509  * - Release all device pages
5510  * - Issue ucall to terminate the guest on the UV side
5511  * - Unpin the VPA pages.
5512  * - Reinit the partition scoped page tables
5513  */
kvmhv_svm_off(struct kvm * kvm)5514 static int kvmhv_svm_off(struct kvm *kvm)
5515 {
5516 	struct kvm_vcpu *vcpu;
5517 	int mmu_was_ready;
5518 	int srcu_idx;
5519 	int ret = 0;
5520 	int i;
5521 
5522 	if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
5523 		return ret;
5524 
5525 	mutex_lock(&kvm->arch.mmu_setup_lock);
5526 	mmu_was_ready = kvm->arch.mmu_ready;
5527 	if (kvm->arch.mmu_ready) {
5528 		kvm->arch.mmu_ready = 0;
5529 		/* order mmu_ready vs. vcpus_running */
5530 		smp_mb();
5531 		if (atomic_read(&kvm->arch.vcpus_running)) {
5532 			kvm->arch.mmu_ready = 1;
5533 			ret = -EBUSY;
5534 			goto out;
5535 		}
5536 	}
5537 
5538 	srcu_idx = srcu_read_lock(&kvm->srcu);
5539 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
5540 		struct kvm_memory_slot *memslot;
5541 		struct kvm_memslots *slots = __kvm_memslots(kvm, i);
5542 
5543 		if (!slots)
5544 			continue;
5545 
5546 		kvm_for_each_memslot(memslot, slots) {
5547 			kvmppc_uvmem_drop_pages(memslot, kvm, true);
5548 			uv_unregister_mem_slot(kvm->arch.lpid, memslot->id);
5549 		}
5550 	}
5551 	srcu_read_unlock(&kvm->srcu, srcu_idx);
5552 
5553 	ret = uv_svm_terminate(kvm->arch.lpid);
5554 	if (ret != U_SUCCESS) {
5555 		ret = -EINVAL;
5556 		goto out;
5557 	}
5558 
5559 	/*
5560 	 * When secure guest is reset, all the guest pages are sent
5561 	 * to UV via UV_PAGE_IN before the non-boot vcpus get a
5562 	 * chance to run and unpin their VPA pages. Unpinning of all
5563 	 * VPA pages is done here explicitly so that VPA pages
5564 	 * can be migrated to the secure side.
5565 	 *
5566 	 * This is required to for the secure SMP guest to reboot
5567 	 * correctly.
5568 	 */
5569 	kvm_for_each_vcpu(i, vcpu, kvm) {
5570 		spin_lock(&vcpu->arch.vpa_update_lock);
5571 		unpin_vpa_reset(kvm, &vcpu->arch.dtl);
5572 		unpin_vpa_reset(kvm, &vcpu->arch.slb_shadow);
5573 		unpin_vpa_reset(kvm, &vcpu->arch.vpa);
5574 		spin_unlock(&vcpu->arch.vpa_update_lock);
5575 	}
5576 
5577 	kvmppc_setup_partition_table(kvm);
5578 	kvm->arch.secure_guest = 0;
5579 	kvm->arch.mmu_ready = mmu_was_ready;
5580 out:
5581 	mutex_unlock(&kvm->arch.mmu_setup_lock);
5582 	return ret;
5583 }
5584 
5585 static struct kvmppc_ops kvm_ops_hv = {
5586 	.get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv,
5587 	.set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv,
5588 	.get_one_reg = kvmppc_get_one_reg_hv,
5589 	.set_one_reg = kvmppc_set_one_reg_hv,
5590 	.vcpu_load   = kvmppc_core_vcpu_load_hv,
5591 	.vcpu_put    = kvmppc_core_vcpu_put_hv,
5592 	.inject_interrupt = kvmppc_inject_interrupt_hv,
5593 	.set_msr     = kvmppc_set_msr_hv,
5594 	.vcpu_run    = kvmppc_vcpu_run_hv,
5595 	.vcpu_create = kvmppc_core_vcpu_create_hv,
5596 	.vcpu_free   = kvmppc_core_vcpu_free_hv,
5597 	.check_requests = kvmppc_core_check_requests_hv,
5598 	.get_dirty_log  = kvm_vm_ioctl_get_dirty_log_hv,
5599 	.flush_memslot  = kvmppc_core_flush_memslot_hv,
5600 	.prepare_memory_region = kvmppc_core_prepare_memory_region_hv,
5601 	.commit_memory_region  = kvmppc_core_commit_memory_region_hv,
5602 	.unmap_hva_range = kvm_unmap_hva_range_hv,
5603 	.age_hva  = kvm_age_hva_hv,
5604 	.test_age_hva = kvm_test_age_hva_hv,
5605 	.set_spte_hva = kvm_set_spte_hva_hv,
5606 	.free_memslot = kvmppc_core_free_memslot_hv,
5607 	.init_vm =  kvmppc_core_init_vm_hv,
5608 	.destroy_vm = kvmppc_core_destroy_vm_hv,
5609 	.get_smmu_info = kvm_vm_ioctl_get_smmu_info_hv,
5610 	.emulate_op = kvmppc_core_emulate_op_hv,
5611 	.emulate_mtspr = kvmppc_core_emulate_mtspr_hv,
5612 	.emulate_mfspr = kvmppc_core_emulate_mfspr_hv,
5613 	.fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv,
5614 	.arch_vm_ioctl  = kvm_arch_vm_ioctl_hv,
5615 	.hcall_implemented = kvmppc_hcall_impl_hv,
5616 #ifdef CONFIG_KVM_XICS
5617 	.irq_bypass_add_producer = kvmppc_irq_bypass_add_producer_hv,
5618 	.irq_bypass_del_producer = kvmppc_irq_bypass_del_producer_hv,
5619 #endif
5620 	.configure_mmu = kvmhv_configure_mmu,
5621 	.get_rmmu_info = kvmhv_get_rmmu_info,
5622 	.set_smt_mode = kvmhv_set_smt_mode,
5623 	.enable_nested = kvmhv_enable_nested,
5624 	.load_from_eaddr = kvmhv_load_from_eaddr,
5625 	.store_to_eaddr = kvmhv_store_to_eaddr,
5626 	.enable_svm = kvmhv_enable_svm,
5627 	.svm_off = kvmhv_svm_off,
5628 };
5629 
kvm_init_subcore_bitmap(void)5630 static int kvm_init_subcore_bitmap(void)
5631 {
5632 	int i, j;
5633 	int nr_cores = cpu_nr_cores();
5634 	struct sibling_subcore_state *sibling_subcore_state;
5635 
5636 	for (i = 0; i < nr_cores; i++) {
5637 		int first_cpu = i * threads_per_core;
5638 		int node = cpu_to_node(first_cpu);
5639 
5640 		/* Ignore if it is already allocated. */
5641 		if (paca_ptrs[first_cpu]->sibling_subcore_state)
5642 			continue;
5643 
5644 		sibling_subcore_state =
5645 			kzalloc_node(sizeof(struct sibling_subcore_state),
5646 							GFP_KERNEL, node);
5647 		if (!sibling_subcore_state)
5648 			return -ENOMEM;
5649 
5650 
5651 		for (j = 0; j < threads_per_core; j++) {
5652 			int cpu = first_cpu + j;
5653 
5654 			paca_ptrs[cpu]->sibling_subcore_state =
5655 						sibling_subcore_state;
5656 		}
5657 	}
5658 	return 0;
5659 }
5660 
kvmppc_radix_possible(void)5661 static int kvmppc_radix_possible(void)
5662 {
5663 	return cpu_has_feature(CPU_FTR_ARCH_300) && radix_enabled();
5664 }
5665 
kvmppc_book3s_init_hv(void)5666 static int kvmppc_book3s_init_hv(void)
5667 {
5668 	int r;
5669 
5670 	if (!tlbie_capable) {
5671 		pr_err("KVM-HV: Host does not support TLBIE\n");
5672 		return -ENODEV;
5673 	}
5674 
5675 	/*
5676 	 * FIXME!! Do we need to check on all cpus ?
5677 	 */
5678 	r = kvmppc_core_check_processor_compat_hv();
5679 	if (r < 0)
5680 		return -ENODEV;
5681 
5682 	r = kvmhv_nested_init();
5683 	if (r)
5684 		return r;
5685 
5686 	r = kvm_init_subcore_bitmap();
5687 	if (r)
5688 		return r;
5689 
5690 	/*
5691 	 * We need a way of accessing the XICS interrupt controller,
5692 	 * either directly, via paca_ptrs[cpu]->kvm_hstate.xics_phys, or
5693 	 * indirectly, via OPAL.
5694 	 */
5695 #ifdef CONFIG_SMP
5696 	if (!xics_on_xive() && !kvmhv_on_pseries() &&
5697 	    !local_paca->kvm_hstate.xics_phys) {
5698 		struct device_node *np;
5699 
5700 		np = of_find_compatible_node(NULL, NULL, "ibm,opal-intc");
5701 		if (!np) {
5702 			pr_err("KVM-HV: Cannot determine method for accessing XICS\n");
5703 			return -ENODEV;
5704 		}
5705 		/* presence of intc confirmed - node can be dropped again */
5706 		of_node_put(np);
5707 	}
5708 #endif
5709 
5710 	kvm_ops_hv.owner = THIS_MODULE;
5711 	kvmppc_hv_ops = &kvm_ops_hv;
5712 
5713 	init_default_hcalls();
5714 
5715 	init_vcore_lists();
5716 
5717 	r = kvmppc_mmu_hv_init();
5718 	if (r)
5719 		return r;
5720 
5721 	if (kvmppc_radix_possible())
5722 		r = kvmppc_radix_init();
5723 
5724 	/*
5725 	 * POWER9 chips before version 2.02 can't have some threads in
5726 	 * HPT mode and some in radix mode on the same core.
5727 	 */
5728 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
5729 		unsigned int pvr = mfspr(SPRN_PVR);
5730 		if ((pvr >> 16) == PVR_POWER9 &&
5731 		    (((pvr & 0xe000) == 0 && (pvr & 0xfff) < 0x202) ||
5732 		     ((pvr & 0xe000) == 0x2000 && (pvr & 0xfff) < 0x101)))
5733 			no_mixing_hpt_and_radix = true;
5734 	}
5735 
5736 	r = kvmppc_uvmem_init();
5737 	if (r < 0)
5738 		pr_err("KVM-HV: kvmppc_uvmem_init failed %d\n", r);
5739 
5740 	return r;
5741 }
5742 
kvmppc_book3s_exit_hv(void)5743 static void kvmppc_book3s_exit_hv(void)
5744 {
5745 	kvmppc_uvmem_free();
5746 	kvmppc_free_host_rm_ops();
5747 	if (kvmppc_radix_possible())
5748 		kvmppc_radix_exit();
5749 	kvmppc_hv_ops = NULL;
5750 	kvmhv_nested_exit();
5751 }
5752 
5753 module_init(kvmppc_book3s_init_hv);
5754 module_exit(kvmppc_book3s_exit_hv);
5755 MODULE_LICENSE("GPL");
5756 MODULE_ALIAS_MISCDEV(KVM_MINOR);
5757 MODULE_ALIAS("devname:kvm");
5758