1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 *
4 * Copyright IBM Corp. 2007
5 * Copyright 2010-2011 Freescale Semiconductor, Inc.
6 *
7 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
8 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
9 * Scott Wood <scottwood@freescale.com>
10 * Varun Sethi <varun.sethi@freescale.com>
11 */
12
13 #include <linux/errno.h>
14 #include <linux/err.h>
15 #include <linux/kvm_host.h>
16 #include <linux/gfp.h>
17 #include <linux/module.h>
18 #include <linux/vmalloc.h>
19 #include <linux/fs.h>
20
21 #include <asm/cputable.h>
22 #include <linux/uaccess.h>
23 #include <asm/kvm_ppc.h>
24 #include <asm/cacheflush.h>
25 #include <asm/dbell.h>
26 #include <asm/hw_irq.h>
27 #include <asm/irq.h>
28 #include <asm/time.h>
29
30 #include "timing.h"
31 #include "booke.h"
32
33 #define CREATE_TRACE_POINTS
34 #include "trace_booke.h"
35
36 unsigned long kvmppc_booke_handlers;
37
38 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
39 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
40
41 struct kvm_stats_debugfs_item debugfs_entries[] = {
42 { "mmio", VCPU_STAT(mmio_exits) },
43 { "sig", VCPU_STAT(signal_exits) },
44 { "itlb_r", VCPU_STAT(itlb_real_miss_exits) },
45 { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) },
46 { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) },
47 { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) },
48 { "sysc", VCPU_STAT(syscall_exits) },
49 { "isi", VCPU_STAT(isi_exits) },
50 { "dsi", VCPU_STAT(dsi_exits) },
51 { "inst_emu", VCPU_STAT(emulated_inst_exits) },
52 { "dec", VCPU_STAT(dec_exits) },
53 { "ext_intr", VCPU_STAT(ext_intr_exits) },
54 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
55 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
56 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
57 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
58 { "doorbell", VCPU_STAT(dbell_exits) },
59 { "guest doorbell", VCPU_STAT(gdbell_exits) },
60 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
61 { NULL }
62 };
63
64 /* TODO: use vcpu_printf() */
kvmppc_dump_vcpu(struct kvm_vcpu * vcpu)65 void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
66 {
67 int i;
68
69 printk("pc: %08lx msr: %08llx\n", vcpu->arch.regs.nip,
70 vcpu->arch.shared->msr);
71 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.regs.link,
72 vcpu->arch.regs.ctr);
73 printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
74 vcpu->arch.shared->srr1);
75
76 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
77
78 for (i = 0; i < 32; i += 4) {
79 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
80 kvmppc_get_gpr(vcpu, i),
81 kvmppc_get_gpr(vcpu, i+1),
82 kvmppc_get_gpr(vcpu, i+2),
83 kvmppc_get_gpr(vcpu, i+3));
84 }
85 }
86
87 #ifdef CONFIG_SPE
kvmppc_vcpu_disable_spe(struct kvm_vcpu * vcpu)88 void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
89 {
90 preempt_disable();
91 enable_kernel_spe();
92 kvmppc_save_guest_spe(vcpu);
93 disable_kernel_spe();
94 vcpu->arch.shadow_msr &= ~MSR_SPE;
95 preempt_enable();
96 }
97
kvmppc_vcpu_enable_spe(struct kvm_vcpu * vcpu)98 static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
99 {
100 preempt_disable();
101 enable_kernel_spe();
102 kvmppc_load_guest_spe(vcpu);
103 disable_kernel_spe();
104 vcpu->arch.shadow_msr |= MSR_SPE;
105 preempt_enable();
106 }
107
kvmppc_vcpu_sync_spe(struct kvm_vcpu * vcpu)108 static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
109 {
110 if (vcpu->arch.shared->msr & MSR_SPE) {
111 if (!(vcpu->arch.shadow_msr & MSR_SPE))
112 kvmppc_vcpu_enable_spe(vcpu);
113 } else if (vcpu->arch.shadow_msr & MSR_SPE) {
114 kvmppc_vcpu_disable_spe(vcpu);
115 }
116 }
117 #else
kvmppc_vcpu_sync_spe(struct kvm_vcpu * vcpu)118 static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
119 {
120 }
121 #endif
122
123 /*
124 * Load up guest vcpu FP state if it's needed.
125 * It also set the MSR_FP in thread so that host know
126 * we're holding FPU, and then host can help to save
127 * guest vcpu FP state if other threads require to use FPU.
128 * This simulates an FP unavailable fault.
129 *
130 * It requires to be called with preemption disabled.
131 */
kvmppc_load_guest_fp(struct kvm_vcpu * vcpu)132 static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
133 {
134 #ifdef CONFIG_PPC_FPU
135 if (!(current->thread.regs->msr & MSR_FP)) {
136 enable_kernel_fp();
137 load_fp_state(&vcpu->arch.fp);
138 disable_kernel_fp();
139 current->thread.fp_save_area = &vcpu->arch.fp;
140 current->thread.regs->msr |= MSR_FP;
141 }
142 #endif
143 }
144
145 /*
146 * Save guest vcpu FP state into thread.
147 * It requires to be called with preemption disabled.
148 */
kvmppc_save_guest_fp(struct kvm_vcpu * vcpu)149 static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
150 {
151 #ifdef CONFIG_PPC_FPU
152 if (current->thread.regs->msr & MSR_FP)
153 giveup_fpu(current);
154 current->thread.fp_save_area = NULL;
155 #endif
156 }
157
kvmppc_vcpu_sync_fpu(struct kvm_vcpu * vcpu)158 static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
159 {
160 #if defined(CONFIG_PPC_FPU) && !defined(CONFIG_KVM_BOOKE_HV)
161 /* We always treat the FP bit as enabled from the host
162 perspective, so only need to adjust the shadow MSR */
163 vcpu->arch.shadow_msr &= ~MSR_FP;
164 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_FP;
165 #endif
166 }
167
168 /*
169 * Simulate AltiVec unavailable fault to load guest state
170 * from thread to AltiVec unit.
171 * It requires to be called with preemption disabled.
172 */
kvmppc_load_guest_altivec(struct kvm_vcpu * vcpu)173 static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
174 {
175 #ifdef CONFIG_ALTIVEC
176 if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
177 if (!(current->thread.regs->msr & MSR_VEC)) {
178 enable_kernel_altivec();
179 load_vr_state(&vcpu->arch.vr);
180 disable_kernel_altivec();
181 current->thread.vr_save_area = &vcpu->arch.vr;
182 current->thread.regs->msr |= MSR_VEC;
183 }
184 }
185 #endif
186 }
187
188 /*
189 * Save guest vcpu AltiVec state into thread.
190 * It requires to be called with preemption disabled.
191 */
kvmppc_save_guest_altivec(struct kvm_vcpu * vcpu)192 static inline void kvmppc_save_guest_altivec(struct kvm_vcpu *vcpu)
193 {
194 #ifdef CONFIG_ALTIVEC
195 if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
196 if (current->thread.regs->msr & MSR_VEC)
197 giveup_altivec(current);
198 current->thread.vr_save_area = NULL;
199 }
200 #endif
201 }
202
kvmppc_vcpu_sync_debug(struct kvm_vcpu * vcpu)203 static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu)
204 {
205 /* Synchronize guest's desire to get debug interrupts into shadow MSR */
206 #ifndef CONFIG_KVM_BOOKE_HV
207 vcpu->arch.shadow_msr &= ~MSR_DE;
208 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_DE;
209 #endif
210
211 /* Force enable debug interrupts when user space wants to debug */
212 if (vcpu->guest_debug) {
213 #ifdef CONFIG_KVM_BOOKE_HV
214 /*
215 * Since there is no shadow MSR, sync MSR_DE into the guest
216 * visible MSR.
217 */
218 vcpu->arch.shared->msr |= MSR_DE;
219 #else
220 vcpu->arch.shadow_msr |= MSR_DE;
221 vcpu->arch.shared->msr &= ~MSR_DE;
222 #endif
223 }
224 }
225
226 /*
227 * Helper function for "full" MSR writes. No need to call this if only
228 * EE/CE/ME/DE/RI are changing.
229 */
kvmppc_set_msr(struct kvm_vcpu * vcpu,u32 new_msr)230 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
231 {
232 u32 old_msr = vcpu->arch.shared->msr;
233
234 #ifdef CONFIG_KVM_BOOKE_HV
235 new_msr |= MSR_GS;
236 #endif
237
238 vcpu->arch.shared->msr = new_msr;
239
240 kvmppc_mmu_msr_notify(vcpu, old_msr);
241 kvmppc_vcpu_sync_spe(vcpu);
242 kvmppc_vcpu_sync_fpu(vcpu);
243 kvmppc_vcpu_sync_debug(vcpu);
244 }
245
kvmppc_booke_queue_irqprio(struct kvm_vcpu * vcpu,unsigned int priority)246 static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
247 unsigned int priority)
248 {
249 trace_kvm_booke_queue_irqprio(vcpu, priority);
250 set_bit(priority, &vcpu->arch.pending_exceptions);
251 }
252
kvmppc_core_queue_dtlb_miss(struct kvm_vcpu * vcpu,ulong dear_flags,ulong esr_flags)253 void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
254 ulong dear_flags, ulong esr_flags)
255 {
256 vcpu->arch.queued_dear = dear_flags;
257 vcpu->arch.queued_esr = esr_flags;
258 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
259 }
260
kvmppc_core_queue_data_storage(struct kvm_vcpu * vcpu,ulong dear_flags,ulong esr_flags)261 void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
262 ulong dear_flags, ulong esr_flags)
263 {
264 vcpu->arch.queued_dear = dear_flags;
265 vcpu->arch.queued_esr = esr_flags;
266 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
267 }
268
kvmppc_core_queue_itlb_miss(struct kvm_vcpu * vcpu)269 void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu)
270 {
271 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
272 }
273
kvmppc_core_queue_inst_storage(struct kvm_vcpu * vcpu,ulong esr_flags)274 void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong esr_flags)
275 {
276 vcpu->arch.queued_esr = esr_flags;
277 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
278 }
279
kvmppc_core_queue_alignment(struct kvm_vcpu * vcpu,ulong dear_flags,ulong esr_flags)280 static void kvmppc_core_queue_alignment(struct kvm_vcpu *vcpu, ulong dear_flags,
281 ulong esr_flags)
282 {
283 vcpu->arch.queued_dear = dear_flags;
284 vcpu->arch.queued_esr = esr_flags;
285 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALIGNMENT);
286 }
287
kvmppc_core_queue_program(struct kvm_vcpu * vcpu,ulong esr_flags)288 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
289 {
290 vcpu->arch.queued_esr = esr_flags;
291 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
292 }
293
kvmppc_core_queue_fpunavail(struct kvm_vcpu * vcpu)294 void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
295 {
296 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
297 }
298
299 #ifdef CONFIG_ALTIVEC
kvmppc_core_queue_vec_unavail(struct kvm_vcpu * vcpu)300 void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu)
301 {
302 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
303 }
304 #endif
305
kvmppc_core_queue_dec(struct kvm_vcpu * vcpu)306 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
307 {
308 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
309 }
310
kvmppc_core_pending_dec(struct kvm_vcpu * vcpu)311 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
312 {
313 return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
314 }
315
kvmppc_core_dequeue_dec(struct kvm_vcpu * vcpu)316 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
317 {
318 clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
319 }
320
kvmppc_core_queue_external(struct kvm_vcpu * vcpu,struct kvm_interrupt * irq)321 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
322 struct kvm_interrupt *irq)
323 {
324 unsigned int prio = BOOKE_IRQPRIO_EXTERNAL;
325
326 if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
327 prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL;
328
329 kvmppc_booke_queue_irqprio(vcpu, prio);
330 }
331
kvmppc_core_dequeue_external(struct kvm_vcpu * vcpu)332 void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
333 {
334 clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
335 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
336 }
337
kvmppc_core_queue_watchdog(struct kvm_vcpu * vcpu)338 static void kvmppc_core_queue_watchdog(struct kvm_vcpu *vcpu)
339 {
340 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_WATCHDOG);
341 }
342
kvmppc_core_dequeue_watchdog(struct kvm_vcpu * vcpu)343 static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu)
344 {
345 clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions);
346 }
347
kvmppc_core_queue_debug(struct kvm_vcpu * vcpu)348 void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu)
349 {
350 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DEBUG);
351 }
352
kvmppc_core_dequeue_debug(struct kvm_vcpu * vcpu)353 void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu)
354 {
355 clear_bit(BOOKE_IRQPRIO_DEBUG, &vcpu->arch.pending_exceptions);
356 }
357
set_guest_srr(struct kvm_vcpu * vcpu,unsigned long srr0,u32 srr1)358 static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
359 {
360 kvmppc_set_srr0(vcpu, srr0);
361 kvmppc_set_srr1(vcpu, srr1);
362 }
363
set_guest_csrr(struct kvm_vcpu * vcpu,unsigned long srr0,u32 srr1)364 static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
365 {
366 vcpu->arch.csrr0 = srr0;
367 vcpu->arch.csrr1 = srr1;
368 }
369
set_guest_dsrr(struct kvm_vcpu * vcpu,unsigned long srr0,u32 srr1)370 static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
371 {
372 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) {
373 vcpu->arch.dsrr0 = srr0;
374 vcpu->arch.dsrr1 = srr1;
375 } else {
376 set_guest_csrr(vcpu, srr0, srr1);
377 }
378 }
379
set_guest_mcsrr(struct kvm_vcpu * vcpu,unsigned long srr0,u32 srr1)380 static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
381 {
382 vcpu->arch.mcsrr0 = srr0;
383 vcpu->arch.mcsrr1 = srr1;
384 }
385
386 /* Deliver the interrupt of the corresponding priority, if possible. */
kvmppc_booke_irqprio_deliver(struct kvm_vcpu * vcpu,unsigned int priority)387 static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
388 unsigned int priority)
389 {
390 int allowed = 0;
391 ulong msr_mask = 0;
392 bool update_esr = false, update_dear = false, update_epr = false;
393 ulong crit_raw = vcpu->arch.shared->critical;
394 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
395 bool crit;
396 bool keep_irq = false;
397 enum int_class int_class;
398 ulong new_msr = vcpu->arch.shared->msr;
399
400 /* Truncate crit indicators in 32 bit mode */
401 if (!(vcpu->arch.shared->msr & MSR_SF)) {
402 crit_raw &= 0xffffffff;
403 crit_r1 &= 0xffffffff;
404 }
405
406 /* Critical section when crit == r1 */
407 crit = (crit_raw == crit_r1);
408 /* ... and we're in supervisor mode */
409 crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
410
411 if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) {
412 priority = BOOKE_IRQPRIO_EXTERNAL;
413 keep_irq = true;
414 }
415
416 if ((priority == BOOKE_IRQPRIO_EXTERNAL) && vcpu->arch.epr_flags)
417 update_epr = true;
418
419 switch (priority) {
420 case BOOKE_IRQPRIO_DTLB_MISS:
421 case BOOKE_IRQPRIO_DATA_STORAGE:
422 case BOOKE_IRQPRIO_ALIGNMENT:
423 update_dear = true;
424 /* fall through */
425 case BOOKE_IRQPRIO_INST_STORAGE:
426 case BOOKE_IRQPRIO_PROGRAM:
427 update_esr = true;
428 /* fall through */
429 case BOOKE_IRQPRIO_ITLB_MISS:
430 case BOOKE_IRQPRIO_SYSCALL:
431 case BOOKE_IRQPRIO_FP_UNAVAIL:
432 #ifdef CONFIG_SPE_POSSIBLE
433 case BOOKE_IRQPRIO_SPE_UNAVAIL:
434 case BOOKE_IRQPRIO_SPE_FP_DATA:
435 case BOOKE_IRQPRIO_SPE_FP_ROUND:
436 #endif
437 #ifdef CONFIG_ALTIVEC
438 case BOOKE_IRQPRIO_ALTIVEC_UNAVAIL:
439 case BOOKE_IRQPRIO_ALTIVEC_ASSIST:
440 #endif
441 case BOOKE_IRQPRIO_AP_UNAVAIL:
442 allowed = 1;
443 msr_mask = MSR_CE | MSR_ME | MSR_DE;
444 int_class = INT_CLASS_NONCRIT;
445 break;
446 case BOOKE_IRQPRIO_WATCHDOG:
447 case BOOKE_IRQPRIO_CRITICAL:
448 case BOOKE_IRQPRIO_DBELL_CRIT:
449 allowed = vcpu->arch.shared->msr & MSR_CE;
450 allowed = allowed && !crit;
451 msr_mask = MSR_ME;
452 int_class = INT_CLASS_CRIT;
453 break;
454 case BOOKE_IRQPRIO_MACHINE_CHECK:
455 allowed = vcpu->arch.shared->msr & MSR_ME;
456 allowed = allowed && !crit;
457 int_class = INT_CLASS_MC;
458 break;
459 case BOOKE_IRQPRIO_DECREMENTER:
460 case BOOKE_IRQPRIO_FIT:
461 keep_irq = true;
462 /* fall through */
463 case BOOKE_IRQPRIO_EXTERNAL:
464 case BOOKE_IRQPRIO_DBELL:
465 allowed = vcpu->arch.shared->msr & MSR_EE;
466 allowed = allowed && !crit;
467 msr_mask = MSR_CE | MSR_ME | MSR_DE;
468 int_class = INT_CLASS_NONCRIT;
469 break;
470 case BOOKE_IRQPRIO_DEBUG:
471 allowed = vcpu->arch.shared->msr & MSR_DE;
472 allowed = allowed && !crit;
473 msr_mask = MSR_ME;
474 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
475 int_class = INT_CLASS_DBG;
476 else
477 int_class = INT_CLASS_CRIT;
478
479 break;
480 }
481
482 if (allowed) {
483 switch (int_class) {
484 case INT_CLASS_NONCRIT:
485 set_guest_srr(vcpu, vcpu->arch.regs.nip,
486 vcpu->arch.shared->msr);
487 break;
488 case INT_CLASS_CRIT:
489 set_guest_csrr(vcpu, vcpu->arch.regs.nip,
490 vcpu->arch.shared->msr);
491 break;
492 case INT_CLASS_DBG:
493 set_guest_dsrr(vcpu, vcpu->arch.regs.nip,
494 vcpu->arch.shared->msr);
495 break;
496 case INT_CLASS_MC:
497 set_guest_mcsrr(vcpu, vcpu->arch.regs.nip,
498 vcpu->arch.shared->msr);
499 break;
500 }
501
502 vcpu->arch.regs.nip = vcpu->arch.ivpr |
503 vcpu->arch.ivor[priority];
504 if (update_esr == true)
505 kvmppc_set_esr(vcpu, vcpu->arch.queued_esr);
506 if (update_dear == true)
507 kvmppc_set_dar(vcpu, vcpu->arch.queued_dear);
508 if (update_epr == true) {
509 if (vcpu->arch.epr_flags & KVMPPC_EPR_USER)
510 kvm_make_request(KVM_REQ_EPR_EXIT, vcpu);
511 else if (vcpu->arch.epr_flags & KVMPPC_EPR_KERNEL) {
512 BUG_ON(vcpu->arch.irq_type != KVMPPC_IRQ_MPIC);
513 kvmppc_mpic_set_epr(vcpu);
514 }
515 }
516
517 new_msr &= msr_mask;
518 #if defined(CONFIG_64BIT)
519 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
520 new_msr |= MSR_CM;
521 #endif
522 kvmppc_set_msr(vcpu, new_msr);
523
524 if (!keep_irq)
525 clear_bit(priority, &vcpu->arch.pending_exceptions);
526 }
527
528 #ifdef CONFIG_KVM_BOOKE_HV
529 /*
530 * If an interrupt is pending but masked, raise a guest doorbell
531 * so that we are notified when the guest enables the relevant
532 * MSR bit.
533 */
534 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
535 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT);
536 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
537 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT);
538 if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
539 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC);
540 #endif
541
542 return allowed;
543 }
544
545 /*
546 * Return the number of jiffies until the next timeout. If the timeout is
547 * longer than the NEXT_TIMER_MAX_DELTA, then return NEXT_TIMER_MAX_DELTA
548 * because the larger value can break the timer APIs.
549 */
watchdog_next_timeout(struct kvm_vcpu * vcpu)550 static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu)
551 {
552 u64 tb, wdt_tb, wdt_ticks = 0;
553 u64 nr_jiffies = 0;
554 u32 period = TCR_GET_WP(vcpu->arch.tcr);
555
556 wdt_tb = 1ULL << (63 - period);
557 tb = get_tb();
558 /*
559 * The watchdog timeout will hapeen when TB bit corresponding
560 * to watchdog will toggle from 0 to 1.
561 */
562 if (tb & wdt_tb)
563 wdt_ticks = wdt_tb;
564
565 wdt_ticks += wdt_tb - (tb & (wdt_tb - 1));
566
567 /* Convert timebase ticks to jiffies */
568 nr_jiffies = wdt_ticks;
569
570 if (do_div(nr_jiffies, tb_ticks_per_jiffy))
571 nr_jiffies++;
572
573 return min_t(unsigned long long, nr_jiffies, NEXT_TIMER_MAX_DELTA);
574 }
575
arm_next_watchdog(struct kvm_vcpu * vcpu)576 static void arm_next_watchdog(struct kvm_vcpu *vcpu)
577 {
578 unsigned long nr_jiffies;
579 unsigned long flags;
580
581 /*
582 * If TSR_ENW and TSR_WIS are not set then no need to exit to
583 * userspace, so clear the KVM_REQ_WATCHDOG request.
584 */
585 if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS))
586 kvm_clear_request(KVM_REQ_WATCHDOG, vcpu);
587
588 spin_lock_irqsave(&vcpu->arch.wdt_lock, flags);
589 nr_jiffies = watchdog_next_timeout(vcpu);
590 /*
591 * If the number of jiffies of watchdog timer >= NEXT_TIMER_MAX_DELTA
592 * then do not run the watchdog timer as this can break timer APIs.
593 */
594 if (nr_jiffies < NEXT_TIMER_MAX_DELTA)
595 mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies);
596 else
597 del_timer(&vcpu->arch.wdt_timer);
598 spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags);
599 }
600
kvmppc_watchdog_func(struct timer_list * t)601 void kvmppc_watchdog_func(struct timer_list *t)
602 {
603 struct kvm_vcpu *vcpu = from_timer(vcpu, t, arch.wdt_timer);
604 u32 tsr, new_tsr;
605 int final;
606
607 do {
608 new_tsr = tsr = vcpu->arch.tsr;
609 final = 0;
610
611 /* Time out event */
612 if (tsr & TSR_ENW) {
613 if (tsr & TSR_WIS)
614 final = 1;
615 else
616 new_tsr = tsr | TSR_WIS;
617 } else {
618 new_tsr = tsr | TSR_ENW;
619 }
620 } while (cmpxchg(&vcpu->arch.tsr, tsr, new_tsr) != tsr);
621
622 if (new_tsr & TSR_WIS) {
623 smp_wmb();
624 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
625 kvm_vcpu_kick(vcpu);
626 }
627
628 /*
629 * If this is final watchdog expiry and some action is required
630 * then exit to userspace.
631 */
632 if (final && (vcpu->arch.tcr & TCR_WRC_MASK) &&
633 vcpu->arch.watchdog_enabled) {
634 smp_wmb();
635 kvm_make_request(KVM_REQ_WATCHDOG, vcpu);
636 kvm_vcpu_kick(vcpu);
637 }
638
639 /*
640 * Stop running the watchdog timer after final expiration to
641 * prevent the host from being flooded with timers if the
642 * guest sets a short period.
643 * Timers will resume when TSR/TCR is updated next time.
644 */
645 if (!final)
646 arm_next_watchdog(vcpu);
647 }
648
update_timer_ints(struct kvm_vcpu * vcpu)649 static void update_timer_ints(struct kvm_vcpu *vcpu)
650 {
651 if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
652 kvmppc_core_queue_dec(vcpu);
653 else
654 kvmppc_core_dequeue_dec(vcpu);
655
656 if ((vcpu->arch.tcr & TCR_WIE) && (vcpu->arch.tsr & TSR_WIS))
657 kvmppc_core_queue_watchdog(vcpu);
658 else
659 kvmppc_core_dequeue_watchdog(vcpu);
660 }
661
kvmppc_core_check_exceptions(struct kvm_vcpu * vcpu)662 static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
663 {
664 unsigned long *pending = &vcpu->arch.pending_exceptions;
665 unsigned int priority;
666
667 priority = __ffs(*pending);
668 while (priority < BOOKE_IRQPRIO_MAX) {
669 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
670 break;
671
672 priority = find_next_bit(pending,
673 BITS_PER_BYTE * sizeof(*pending),
674 priority + 1);
675 }
676
677 /* Tell the guest about our interrupt status */
678 vcpu->arch.shared->int_pending = !!*pending;
679 }
680
681 /* Check pending exceptions and deliver one, if possible. */
kvmppc_core_prepare_to_enter(struct kvm_vcpu * vcpu)682 int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
683 {
684 int r = 0;
685 WARN_ON_ONCE(!irqs_disabled());
686
687 kvmppc_core_check_exceptions(vcpu);
688
689 if (kvm_request_pending(vcpu)) {
690 /* Exception delivery raised request; start over */
691 return 1;
692 }
693
694 if (vcpu->arch.shared->msr & MSR_WE) {
695 local_irq_enable();
696 kvm_vcpu_block(vcpu);
697 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
698 hard_irq_disable();
699
700 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
701 r = 1;
702 };
703
704 return r;
705 }
706
kvmppc_core_check_requests(struct kvm_vcpu * vcpu)707 int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
708 {
709 int r = 1; /* Indicate we want to get back into the guest */
710
711 if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu))
712 update_timer_ints(vcpu);
713 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
714 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
715 kvmppc_core_flush_tlb(vcpu);
716 #endif
717
718 if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) {
719 vcpu->run->exit_reason = KVM_EXIT_WATCHDOG;
720 r = 0;
721 }
722
723 if (kvm_check_request(KVM_REQ_EPR_EXIT, vcpu)) {
724 vcpu->run->epr.epr = 0;
725 vcpu->arch.epr_needed = true;
726 vcpu->run->exit_reason = KVM_EXIT_EPR;
727 r = 0;
728 }
729
730 return r;
731 }
732
kvmppc_vcpu_run(struct kvm_run * kvm_run,struct kvm_vcpu * vcpu)733 int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
734 {
735 int ret, s;
736 struct debug_reg debug;
737
738 if (!vcpu->arch.sane) {
739 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
740 return -EINVAL;
741 }
742
743 s = kvmppc_prepare_to_enter(vcpu);
744 if (s <= 0) {
745 ret = s;
746 goto out;
747 }
748 /* interrupts now hard-disabled */
749
750 #ifdef CONFIG_PPC_FPU
751 /* Save userspace FPU state in stack */
752 enable_kernel_fp();
753
754 /*
755 * Since we can't trap on MSR_FP in GS-mode, we consider the guest
756 * as always using the FPU.
757 */
758 kvmppc_load_guest_fp(vcpu);
759 #endif
760
761 #ifdef CONFIG_ALTIVEC
762 /* Save userspace AltiVec state in stack */
763 if (cpu_has_feature(CPU_FTR_ALTIVEC))
764 enable_kernel_altivec();
765 /*
766 * Since we can't trap on MSR_VEC in GS-mode, we consider the guest
767 * as always using the AltiVec.
768 */
769 kvmppc_load_guest_altivec(vcpu);
770 #endif
771
772 /* Switch to guest debug context */
773 debug = vcpu->arch.dbg_reg;
774 switch_booke_debug_regs(&debug);
775 debug = current->thread.debug;
776 current->thread.debug = vcpu->arch.dbg_reg;
777
778 vcpu->arch.pgdir = current->mm->pgd;
779 kvmppc_fix_ee_before_entry();
780
781 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
782
783 /* No need for guest_exit. It's done in handle_exit.
784 We also get here with interrupts enabled. */
785
786 /* Switch back to user space debug context */
787 switch_booke_debug_regs(&debug);
788 current->thread.debug = debug;
789
790 #ifdef CONFIG_PPC_FPU
791 kvmppc_save_guest_fp(vcpu);
792 #endif
793
794 #ifdef CONFIG_ALTIVEC
795 kvmppc_save_guest_altivec(vcpu);
796 #endif
797
798 out:
799 vcpu->mode = OUTSIDE_GUEST_MODE;
800 return ret;
801 }
802
emulation_exit(struct kvm_run * run,struct kvm_vcpu * vcpu)803 static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
804 {
805 enum emulation_result er;
806
807 er = kvmppc_emulate_instruction(run, vcpu);
808 switch (er) {
809 case EMULATE_DONE:
810 /* don't overwrite subtypes, just account kvm_stats */
811 kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
812 /* Future optimization: only reload non-volatiles if
813 * they were actually modified by emulation. */
814 return RESUME_GUEST_NV;
815
816 case EMULATE_AGAIN:
817 return RESUME_GUEST;
818
819 case EMULATE_FAIL:
820 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
821 __func__, vcpu->arch.regs.nip, vcpu->arch.last_inst);
822 /* For debugging, encode the failing instruction and
823 * report it to userspace. */
824 run->hw.hardware_exit_reason = ~0ULL << 32;
825 run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
826 kvmppc_core_queue_program(vcpu, ESR_PIL);
827 return RESUME_HOST;
828
829 case EMULATE_EXIT_USER:
830 return RESUME_HOST;
831
832 default:
833 BUG();
834 }
835 }
836
kvmppc_handle_debug(struct kvm_run * run,struct kvm_vcpu * vcpu)837 static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu)
838 {
839 struct debug_reg *dbg_reg = &(vcpu->arch.dbg_reg);
840 u32 dbsr = vcpu->arch.dbsr;
841
842 if (vcpu->guest_debug == 0) {
843 /*
844 * Debug resources belong to Guest.
845 * Imprecise debug event is not injected
846 */
847 if (dbsr & DBSR_IDE) {
848 dbsr &= ~DBSR_IDE;
849 if (!dbsr)
850 return RESUME_GUEST;
851 }
852
853 if (dbsr && (vcpu->arch.shared->msr & MSR_DE) &&
854 (vcpu->arch.dbg_reg.dbcr0 & DBCR0_IDM))
855 kvmppc_core_queue_debug(vcpu);
856
857 /* Inject a program interrupt if trap debug is not allowed */
858 if ((dbsr & DBSR_TIE) && !(vcpu->arch.shared->msr & MSR_DE))
859 kvmppc_core_queue_program(vcpu, ESR_PTR);
860
861 return RESUME_GUEST;
862 }
863
864 /*
865 * Debug resource owned by userspace.
866 * Clear guest dbsr (vcpu->arch.dbsr)
867 */
868 vcpu->arch.dbsr = 0;
869 run->debug.arch.status = 0;
870 run->debug.arch.address = vcpu->arch.regs.nip;
871
872 if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) {
873 run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT;
874 } else {
875 if (dbsr & (DBSR_DAC1W | DBSR_DAC2W))
876 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_WRITE;
877 else if (dbsr & (DBSR_DAC1R | DBSR_DAC2R))
878 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_READ;
879 if (dbsr & (DBSR_DAC1R | DBSR_DAC1W))
880 run->debug.arch.address = dbg_reg->dac1;
881 else if (dbsr & (DBSR_DAC2R | DBSR_DAC2W))
882 run->debug.arch.address = dbg_reg->dac2;
883 }
884
885 return RESUME_HOST;
886 }
887
kvmppc_fill_pt_regs(struct pt_regs * regs)888 static void kvmppc_fill_pt_regs(struct pt_regs *regs)
889 {
890 ulong r1, ip, msr, lr;
891
892 asm("mr %0, 1" : "=r"(r1));
893 asm("mflr %0" : "=r"(lr));
894 asm("mfmsr %0" : "=r"(msr));
895 asm("bl 1f; 1: mflr %0" : "=r"(ip));
896
897 memset(regs, 0, sizeof(*regs));
898 regs->gpr[1] = r1;
899 regs->nip = ip;
900 regs->msr = msr;
901 regs->link = lr;
902 }
903
904 /*
905 * For interrupts needed to be handled by host interrupt handlers,
906 * corresponding host handler are called from here in similar way
907 * (but not exact) as they are called from low level handler
908 * (such as from arch/powerpc/kernel/head_fsl_booke.S).
909 */
kvmppc_restart_interrupt(struct kvm_vcpu * vcpu,unsigned int exit_nr)910 static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
911 unsigned int exit_nr)
912 {
913 struct pt_regs regs;
914
915 switch (exit_nr) {
916 case BOOKE_INTERRUPT_EXTERNAL:
917 kvmppc_fill_pt_regs(®s);
918 do_IRQ(®s);
919 break;
920 case BOOKE_INTERRUPT_DECREMENTER:
921 kvmppc_fill_pt_regs(®s);
922 timer_interrupt(®s);
923 break;
924 #if defined(CONFIG_PPC_DOORBELL)
925 case BOOKE_INTERRUPT_DOORBELL:
926 kvmppc_fill_pt_regs(®s);
927 doorbell_exception(®s);
928 break;
929 #endif
930 case BOOKE_INTERRUPT_MACHINE_CHECK:
931 /* FIXME */
932 break;
933 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
934 kvmppc_fill_pt_regs(®s);
935 performance_monitor_exception(®s);
936 break;
937 case BOOKE_INTERRUPT_WATCHDOG:
938 kvmppc_fill_pt_regs(®s);
939 #ifdef CONFIG_BOOKE_WDT
940 WatchdogException(®s);
941 #else
942 unknown_exception(®s);
943 #endif
944 break;
945 case BOOKE_INTERRUPT_CRITICAL:
946 kvmppc_fill_pt_regs(®s);
947 unknown_exception(®s);
948 break;
949 case BOOKE_INTERRUPT_DEBUG:
950 /* Save DBSR before preemption is enabled */
951 vcpu->arch.dbsr = mfspr(SPRN_DBSR);
952 kvmppc_clear_dbsr();
953 break;
954 }
955 }
956
kvmppc_resume_inst_load(struct kvm_run * run,struct kvm_vcpu * vcpu,enum emulation_result emulated,u32 last_inst)957 static int kvmppc_resume_inst_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
958 enum emulation_result emulated, u32 last_inst)
959 {
960 switch (emulated) {
961 case EMULATE_AGAIN:
962 return RESUME_GUEST;
963
964 case EMULATE_FAIL:
965 pr_debug("%s: load instruction from guest address %lx failed\n",
966 __func__, vcpu->arch.regs.nip);
967 /* For debugging, encode the failing instruction and
968 * report it to userspace. */
969 run->hw.hardware_exit_reason = ~0ULL << 32;
970 run->hw.hardware_exit_reason |= last_inst;
971 kvmppc_core_queue_program(vcpu, ESR_PIL);
972 return RESUME_HOST;
973
974 default:
975 BUG();
976 }
977 }
978
979 /**
980 * kvmppc_handle_exit
981 *
982 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
983 */
kvmppc_handle_exit(struct kvm_run * run,struct kvm_vcpu * vcpu,unsigned int exit_nr)984 int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
985 unsigned int exit_nr)
986 {
987 int r = RESUME_HOST;
988 int s;
989 int idx;
990 u32 last_inst = KVM_INST_FETCH_FAILED;
991 enum emulation_result emulated = EMULATE_DONE;
992
993 /* update before a new last_exit_type is rewritten */
994 kvmppc_update_timing_stats(vcpu);
995
996 /* restart interrupts if they were meant for the host */
997 kvmppc_restart_interrupt(vcpu, exit_nr);
998
999 /*
1000 * get last instruction before being preempted
1001 * TODO: for e6500 check also BOOKE_INTERRUPT_LRAT_ERROR & ESR_DATA
1002 */
1003 switch (exit_nr) {
1004 case BOOKE_INTERRUPT_DATA_STORAGE:
1005 case BOOKE_INTERRUPT_DTLB_MISS:
1006 case BOOKE_INTERRUPT_HV_PRIV:
1007 emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
1008 break;
1009 case BOOKE_INTERRUPT_PROGRAM:
1010 /* SW breakpoints arrive as illegal instructions on HV */
1011 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
1012 emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
1013 break;
1014 default:
1015 break;
1016 }
1017
1018 trace_kvm_exit(exit_nr, vcpu);
1019 guest_exit_irqoff();
1020
1021 local_irq_enable();
1022
1023 run->exit_reason = KVM_EXIT_UNKNOWN;
1024 run->ready_for_interrupt_injection = 1;
1025
1026 if (emulated != EMULATE_DONE) {
1027 r = kvmppc_resume_inst_load(run, vcpu, emulated, last_inst);
1028 goto out;
1029 }
1030
1031 switch (exit_nr) {
1032 case BOOKE_INTERRUPT_MACHINE_CHECK:
1033 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
1034 kvmppc_dump_vcpu(vcpu);
1035 /* For debugging, send invalid exit reason to user space */
1036 run->hw.hardware_exit_reason = ~1ULL << 32;
1037 run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR);
1038 r = RESUME_HOST;
1039 break;
1040
1041 case BOOKE_INTERRUPT_EXTERNAL:
1042 kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
1043 r = RESUME_GUEST;
1044 break;
1045
1046 case BOOKE_INTERRUPT_DECREMENTER:
1047 kvmppc_account_exit(vcpu, DEC_EXITS);
1048 r = RESUME_GUEST;
1049 break;
1050
1051 case BOOKE_INTERRUPT_WATCHDOG:
1052 r = RESUME_GUEST;
1053 break;
1054
1055 case BOOKE_INTERRUPT_DOORBELL:
1056 kvmppc_account_exit(vcpu, DBELL_EXITS);
1057 r = RESUME_GUEST;
1058 break;
1059
1060 case BOOKE_INTERRUPT_GUEST_DBELL_CRIT:
1061 kvmppc_account_exit(vcpu, GDBELL_EXITS);
1062
1063 /*
1064 * We are here because there is a pending guest interrupt
1065 * which could not be delivered as MSR_CE or MSR_ME was not
1066 * set. Once we break from here we will retry delivery.
1067 */
1068 r = RESUME_GUEST;
1069 break;
1070
1071 case BOOKE_INTERRUPT_GUEST_DBELL:
1072 kvmppc_account_exit(vcpu, GDBELL_EXITS);
1073
1074 /*
1075 * We are here because there is a pending guest interrupt
1076 * which could not be delivered as MSR_EE was not set. Once
1077 * we break from here we will retry delivery.
1078 */
1079 r = RESUME_GUEST;
1080 break;
1081
1082 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
1083 r = RESUME_GUEST;
1084 break;
1085
1086 case BOOKE_INTERRUPT_HV_PRIV:
1087 r = emulation_exit(run, vcpu);
1088 break;
1089
1090 case BOOKE_INTERRUPT_PROGRAM:
1091 if ((vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) &&
1092 (last_inst == KVMPPC_INST_SW_BREAKPOINT)) {
1093 /*
1094 * We are here because of an SW breakpoint instr,
1095 * so lets return to host to handle.
1096 */
1097 r = kvmppc_handle_debug(run, vcpu);
1098 run->exit_reason = KVM_EXIT_DEBUG;
1099 kvmppc_account_exit(vcpu, DEBUG_EXITS);
1100 break;
1101 }
1102
1103 if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
1104 /*
1105 * Program traps generated by user-level software must
1106 * be handled by the guest kernel.
1107 *
1108 * In GS mode, hypervisor privileged instructions trap
1109 * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are
1110 * actual program interrupts, handled by the guest.
1111 */
1112 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
1113 r = RESUME_GUEST;
1114 kvmppc_account_exit(vcpu, USR_PR_INST);
1115 break;
1116 }
1117
1118 r = emulation_exit(run, vcpu);
1119 break;
1120
1121 case BOOKE_INTERRUPT_FP_UNAVAIL:
1122 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
1123 kvmppc_account_exit(vcpu, FP_UNAVAIL);
1124 r = RESUME_GUEST;
1125 break;
1126
1127 #ifdef CONFIG_SPE
1128 case BOOKE_INTERRUPT_SPE_UNAVAIL: {
1129 if (vcpu->arch.shared->msr & MSR_SPE)
1130 kvmppc_vcpu_enable_spe(vcpu);
1131 else
1132 kvmppc_booke_queue_irqprio(vcpu,
1133 BOOKE_IRQPRIO_SPE_UNAVAIL);
1134 r = RESUME_GUEST;
1135 break;
1136 }
1137
1138 case BOOKE_INTERRUPT_SPE_FP_DATA:
1139 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
1140 r = RESUME_GUEST;
1141 break;
1142
1143 case BOOKE_INTERRUPT_SPE_FP_ROUND:
1144 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
1145 r = RESUME_GUEST;
1146 break;
1147 #elif defined(CONFIG_SPE_POSSIBLE)
1148 case BOOKE_INTERRUPT_SPE_UNAVAIL:
1149 /*
1150 * Guest wants SPE, but host kernel doesn't support it. Send
1151 * an "unimplemented operation" program check to the guest.
1152 */
1153 kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
1154 r = RESUME_GUEST;
1155 break;
1156
1157 /*
1158 * These really should never happen without CONFIG_SPE,
1159 * as we should never enable the real MSR[SPE] in the guest.
1160 */
1161 case BOOKE_INTERRUPT_SPE_FP_DATA:
1162 case BOOKE_INTERRUPT_SPE_FP_ROUND:
1163 printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
1164 __func__, exit_nr, vcpu->arch.regs.nip);
1165 run->hw.hardware_exit_reason = exit_nr;
1166 r = RESUME_HOST;
1167 break;
1168 #endif /* CONFIG_SPE_POSSIBLE */
1169
1170 /*
1171 * On cores with Vector category, KVM is loaded only if CONFIG_ALTIVEC,
1172 * see kvmppc_core_check_processor_compat().
1173 */
1174 #ifdef CONFIG_ALTIVEC
1175 case BOOKE_INTERRUPT_ALTIVEC_UNAVAIL:
1176 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
1177 r = RESUME_GUEST;
1178 break;
1179
1180 case BOOKE_INTERRUPT_ALTIVEC_ASSIST:
1181 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_ASSIST);
1182 r = RESUME_GUEST;
1183 break;
1184 #endif
1185
1186 case BOOKE_INTERRUPT_DATA_STORAGE:
1187 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
1188 vcpu->arch.fault_esr);
1189 kvmppc_account_exit(vcpu, DSI_EXITS);
1190 r = RESUME_GUEST;
1191 break;
1192
1193 case BOOKE_INTERRUPT_INST_STORAGE:
1194 kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
1195 kvmppc_account_exit(vcpu, ISI_EXITS);
1196 r = RESUME_GUEST;
1197 break;
1198
1199 case BOOKE_INTERRUPT_ALIGNMENT:
1200 kvmppc_core_queue_alignment(vcpu, vcpu->arch.fault_dear,
1201 vcpu->arch.fault_esr);
1202 r = RESUME_GUEST;
1203 break;
1204
1205 #ifdef CONFIG_KVM_BOOKE_HV
1206 case BOOKE_INTERRUPT_HV_SYSCALL:
1207 if (!(vcpu->arch.shared->msr & MSR_PR)) {
1208 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1209 } else {
1210 /*
1211 * hcall from guest userspace -- send privileged
1212 * instruction program check.
1213 */
1214 kvmppc_core_queue_program(vcpu, ESR_PPR);
1215 }
1216
1217 r = RESUME_GUEST;
1218 break;
1219 #else
1220 case BOOKE_INTERRUPT_SYSCALL:
1221 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1222 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
1223 /* KVM PV hypercalls */
1224 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1225 r = RESUME_GUEST;
1226 } else {
1227 /* Guest syscalls */
1228 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
1229 }
1230 kvmppc_account_exit(vcpu, SYSCALL_EXITS);
1231 r = RESUME_GUEST;
1232 break;
1233 #endif
1234
1235 case BOOKE_INTERRUPT_DTLB_MISS: {
1236 unsigned long eaddr = vcpu->arch.fault_dear;
1237 int gtlb_index;
1238 gpa_t gpaddr;
1239 gfn_t gfn;
1240
1241 #ifdef CONFIG_KVM_E500V2
1242 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1243 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
1244 kvmppc_map_magic(vcpu);
1245 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
1246 r = RESUME_GUEST;
1247
1248 break;
1249 }
1250 #endif
1251
1252 /* Check the guest TLB. */
1253 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
1254 if (gtlb_index < 0) {
1255 /* The guest didn't have a mapping for it. */
1256 kvmppc_core_queue_dtlb_miss(vcpu,
1257 vcpu->arch.fault_dear,
1258 vcpu->arch.fault_esr);
1259 kvmppc_mmu_dtlb_miss(vcpu);
1260 kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
1261 r = RESUME_GUEST;
1262 break;
1263 }
1264
1265 idx = srcu_read_lock(&vcpu->kvm->srcu);
1266
1267 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
1268 gfn = gpaddr >> PAGE_SHIFT;
1269
1270 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1271 /* The guest TLB had a mapping, but the shadow TLB
1272 * didn't, and it is RAM. This could be because:
1273 * a) the entry is mapping the host kernel, or
1274 * b) the guest used a large mapping which we're faking
1275 * Either way, we need to satisfy the fault without
1276 * invoking the guest. */
1277 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
1278 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
1279 r = RESUME_GUEST;
1280 } else {
1281 /* Guest has mapped and accessed a page which is not
1282 * actually RAM. */
1283 vcpu->arch.paddr_accessed = gpaddr;
1284 vcpu->arch.vaddr_accessed = eaddr;
1285 r = kvmppc_emulate_mmio(run, vcpu);
1286 kvmppc_account_exit(vcpu, MMIO_EXITS);
1287 }
1288
1289 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1290 break;
1291 }
1292
1293 case BOOKE_INTERRUPT_ITLB_MISS: {
1294 unsigned long eaddr = vcpu->arch.regs.nip;
1295 gpa_t gpaddr;
1296 gfn_t gfn;
1297 int gtlb_index;
1298
1299 r = RESUME_GUEST;
1300
1301 /* Check the guest TLB. */
1302 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
1303 if (gtlb_index < 0) {
1304 /* The guest didn't have a mapping for it. */
1305 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
1306 kvmppc_mmu_itlb_miss(vcpu);
1307 kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
1308 break;
1309 }
1310
1311 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
1312
1313 idx = srcu_read_lock(&vcpu->kvm->srcu);
1314
1315 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
1316 gfn = gpaddr >> PAGE_SHIFT;
1317
1318 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1319 /* The guest TLB had a mapping, but the shadow TLB
1320 * didn't. This could be because:
1321 * a) the entry is mapping the host kernel, or
1322 * b) the guest used a large mapping which we're faking
1323 * Either way, we need to satisfy the fault without
1324 * invoking the guest. */
1325 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
1326 } else {
1327 /* Guest mapped and leaped at non-RAM! */
1328 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
1329 }
1330
1331 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1332 break;
1333 }
1334
1335 case BOOKE_INTERRUPT_DEBUG: {
1336 r = kvmppc_handle_debug(run, vcpu);
1337 if (r == RESUME_HOST)
1338 run->exit_reason = KVM_EXIT_DEBUG;
1339 kvmppc_account_exit(vcpu, DEBUG_EXITS);
1340 break;
1341 }
1342
1343 default:
1344 printk(KERN_EMERG "exit_nr %d\n", exit_nr);
1345 BUG();
1346 }
1347
1348 out:
1349 /*
1350 * To avoid clobbering exit_reason, only check for signals if we
1351 * aren't already exiting to userspace for some other reason.
1352 */
1353 if (!(r & RESUME_HOST)) {
1354 s = kvmppc_prepare_to_enter(vcpu);
1355 if (s <= 0)
1356 r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
1357 else {
1358 /* interrupts now hard-disabled */
1359 kvmppc_fix_ee_before_entry();
1360 kvmppc_load_guest_fp(vcpu);
1361 kvmppc_load_guest_altivec(vcpu);
1362 }
1363 }
1364
1365 return r;
1366 }
1367
kvmppc_set_tsr(struct kvm_vcpu * vcpu,u32 new_tsr)1368 static void kvmppc_set_tsr(struct kvm_vcpu *vcpu, u32 new_tsr)
1369 {
1370 u32 old_tsr = vcpu->arch.tsr;
1371
1372 vcpu->arch.tsr = new_tsr;
1373
1374 if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS))
1375 arm_next_watchdog(vcpu);
1376
1377 update_timer_ints(vcpu);
1378 }
1379
1380 /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
kvm_arch_vcpu_setup(struct kvm_vcpu * vcpu)1381 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1382 {
1383 int i;
1384 int r;
1385
1386 vcpu->arch.regs.nip = 0;
1387 vcpu->arch.shared->pir = vcpu->vcpu_id;
1388 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
1389 kvmppc_set_msr(vcpu, 0);
1390
1391 #ifndef CONFIG_KVM_BOOKE_HV
1392 vcpu->arch.shadow_msr = MSR_USER | MSR_IS | MSR_DS;
1393 vcpu->arch.shadow_pid = 1;
1394 vcpu->arch.shared->msr = 0;
1395 #endif
1396
1397 /* Eye-catching numbers so we know if the guest takes an interrupt
1398 * before it's programmed its own IVPR/IVORs. */
1399 vcpu->arch.ivpr = 0x55550000;
1400 for (i = 0; i < BOOKE_IRQPRIO_MAX; i++)
1401 vcpu->arch.ivor[i] = 0x7700 | i * 4;
1402
1403 kvmppc_init_timing_stats(vcpu);
1404
1405 r = kvmppc_core_vcpu_setup(vcpu);
1406 kvmppc_sanity_check(vcpu);
1407 return r;
1408 }
1409
kvmppc_subarch_vcpu_init(struct kvm_vcpu * vcpu)1410 int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
1411 {
1412 /* setup watchdog timer once */
1413 spin_lock_init(&vcpu->arch.wdt_lock);
1414 timer_setup(&vcpu->arch.wdt_timer, kvmppc_watchdog_func, 0);
1415
1416 /*
1417 * Clear DBSR.MRR to avoid guest debug interrupt as
1418 * this is of host interest
1419 */
1420 mtspr(SPRN_DBSR, DBSR_MRR);
1421 return 0;
1422 }
1423
kvmppc_subarch_vcpu_uninit(struct kvm_vcpu * vcpu)1424 void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
1425 {
1426 del_timer_sync(&vcpu->arch.wdt_timer);
1427 }
1428
kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)1429 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1430 {
1431 int i;
1432
1433 vcpu_load(vcpu);
1434
1435 regs->pc = vcpu->arch.regs.nip;
1436 regs->cr = kvmppc_get_cr(vcpu);
1437 regs->ctr = vcpu->arch.regs.ctr;
1438 regs->lr = vcpu->arch.regs.link;
1439 regs->xer = kvmppc_get_xer(vcpu);
1440 regs->msr = vcpu->arch.shared->msr;
1441 regs->srr0 = kvmppc_get_srr0(vcpu);
1442 regs->srr1 = kvmppc_get_srr1(vcpu);
1443 regs->pid = vcpu->arch.pid;
1444 regs->sprg0 = kvmppc_get_sprg0(vcpu);
1445 regs->sprg1 = kvmppc_get_sprg1(vcpu);
1446 regs->sprg2 = kvmppc_get_sprg2(vcpu);
1447 regs->sprg3 = kvmppc_get_sprg3(vcpu);
1448 regs->sprg4 = kvmppc_get_sprg4(vcpu);
1449 regs->sprg5 = kvmppc_get_sprg5(vcpu);
1450 regs->sprg6 = kvmppc_get_sprg6(vcpu);
1451 regs->sprg7 = kvmppc_get_sprg7(vcpu);
1452
1453 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1454 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
1455
1456 vcpu_put(vcpu);
1457 return 0;
1458 }
1459
kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)1460 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1461 {
1462 int i;
1463
1464 vcpu_load(vcpu);
1465
1466 vcpu->arch.regs.nip = regs->pc;
1467 kvmppc_set_cr(vcpu, regs->cr);
1468 vcpu->arch.regs.ctr = regs->ctr;
1469 vcpu->arch.regs.link = regs->lr;
1470 kvmppc_set_xer(vcpu, regs->xer);
1471 kvmppc_set_msr(vcpu, regs->msr);
1472 kvmppc_set_srr0(vcpu, regs->srr0);
1473 kvmppc_set_srr1(vcpu, regs->srr1);
1474 kvmppc_set_pid(vcpu, regs->pid);
1475 kvmppc_set_sprg0(vcpu, regs->sprg0);
1476 kvmppc_set_sprg1(vcpu, regs->sprg1);
1477 kvmppc_set_sprg2(vcpu, regs->sprg2);
1478 kvmppc_set_sprg3(vcpu, regs->sprg3);
1479 kvmppc_set_sprg4(vcpu, regs->sprg4);
1480 kvmppc_set_sprg5(vcpu, regs->sprg5);
1481 kvmppc_set_sprg6(vcpu, regs->sprg6);
1482 kvmppc_set_sprg7(vcpu, regs->sprg7);
1483
1484 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1485 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
1486
1487 vcpu_put(vcpu);
1488 return 0;
1489 }
1490
get_sregs_base(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1491 static void get_sregs_base(struct kvm_vcpu *vcpu,
1492 struct kvm_sregs *sregs)
1493 {
1494 u64 tb = get_tb();
1495
1496 sregs->u.e.features |= KVM_SREGS_E_BASE;
1497
1498 sregs->u.e.csrr0 = vcpu->arch.csrr0;
1499 sregs->u.e.csrr1 = vcpu->arch.csrr1;
1500 sregs->u.e.mcsr = vcpu->arch.mcsr;
1501 sregs->u.e.esr = kvmppc_get_esr(vcpu);
1502 sregs->u.e.dear = kvmppc_get_dar(vcpu);
1503 sregs->u.e.tsr = vcpu->arch.tsr;
1504 sregs->u.e.tcr = vcpu->arch.tcr;
1505 sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
1506 sregs->u.e.tb = tb;
1507 sregs->u.e.vrsave = vcpu->arch.vrsave;
1508 }
1509
set_sregs_base(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1510 static int set_sregs_base(struct kvm_vcpu *vcpu,
1511 struct kvm_sregs *sregs)
1512 {
1513 if (!(sregs->u.e.features & KVM_SREGS_E_BASE))
1514 return 0;
1515
1516 vcpu->arch.csrr0 = sregs->u.e.csrr0;
1517 vcpu->arch.csrr1 = sregs->u.e.csrr1;
1518 vcpu->arch.mcsr = sregs->u.e.mcsr;
1519 kvmppc_set_esr(vcpu, sregs->u.e.esr);
1520 kvmppc_set_dar(vcpu, sregs->u.e.dear);
1521 vcpu->arch.vrsave = sregs->u.e.vrsave;
1522 kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
1523
1524 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) {
1525 vcpu->arch.dec = sregs->u.e.dec;
1526 kvmppc_emulate_dec(vcpu);
1527 }
1528
1529 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR)
1530 kvmppc_set_tsr(vcpu, sregs->u.e.tsr);
1531
1532 return 0;
1533 }
1534
get_sregs_arch206(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1535 static void get_sregs_arch206(struct kvm_vcpu *vcpu,
1536 struct kvm_sregs *sregs)
1537 {
1538 sregs->u.e.features |= KVM_SREGS_E_ARCH206;
1539
1540 sregs->u.e.pir = vcpu->vcpu_id;
1541 sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0;
1542 sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1;
1543 sregs->u.e.decar = vcpu->arch.decar;
1544 sregs->u.e.ivpr = vcpu->arch.ivpr;
1545 }
1546
set_sregs_arch206(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1547 static int set_sregs_arch206(struct kvm_vcpu *vcpu,
1548 struct kvm_sregs *sregs)
1549 {
1550 if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206))
1551 return 0;
1552
1553 if (sregs->u.e.pir != vcpu->vcpu_id)
1554 return -EINVAL;
1555
1556 vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0;
1557 vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1;
1558 vcpu->arch.decar = sregs->u.e.decar;
1559 vcpu->arch.ivpr = sregs->u.e.ivpr;
1560
1561 return 0;
1562 }
1563
kvmppc_get_sregs_ivor(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1564 int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1565 {
1566 sregs->u.e.features |= KVM_SREGS_E_IVOR;
1567
1568 sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
1569 sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
1570 sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
1571 sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
1572 sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
1573 sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
1574 sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
1575 sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
1576 sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
1577 sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
1578 sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
1579 sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
1580 sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
1581 sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
1582 sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
1583 sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
1584 return 0;
1585 }
1586
kvmppc_set_sregs_ivor(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1587 int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1588 {
1589 if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
1590 return 0;
1591
1592 vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0];
1593 vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1];
1594 vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2];
1595 vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3];
1596 vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4];
1597 vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5];
1598 vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6];
1599 vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7];
1600 vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8];
1601 vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9];
1602 vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10];
1603 vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11];
1604 vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12];
1605 vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13];
1606 vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14];
1607 vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15];
1608
1609 return 0;
1610 }
1611
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1612 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1613 struct kvm_sregs *sregs)
1614 {
1615 int ret;
1616
1617 vcpu_load(vcpu);
1618
1619 sregs->pvr = vcpu->arch.pvr;
1620
1621 get_sregs_base(vcpu, sregs);
1622 get_sregs_arch206(vcpu, sregs);
1623 ret = vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
1624
1625 vcpu_put(vcpu);
1626 return ret;
1627 }
1628
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1629 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1630 struct kvm_sregs *sregs)
1631 {
1632 int ret = -EINVAL;
1633
1634 vcpu_load(vcpu);
1635 if (vcpu->arch.pvr != sregs->pvr)
1636 goto out;
1637
1638 ret = set_sregs_base(vcpu, sregs);
1639 if (ret < 0)
1640 goto out;
1641
1642 ret = set_sregs_arch206(vcpu, sregs);
1643 if (ret < 0)
1644 goto out;
1645
1646 ret = vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
1647
1648 out:
1649 vcpu_put(vcpu);
1650 return ret;
1651 }
1652
kvmppc_get_one_reg(struct kvm_vcpu * vcpu,u64 id,union kvmppc_one_reg * val)1653 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
1654 union kvmppc_one_reg *val)
1655 {
1656 int r = 0;
1657
1658 switch (id) {
1659 case KVM_REG_PPC_IAC1:
1660 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac1);
1661 break;
1662 case KVM_REG_PPC_IAC2:
1663 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac2);
1664 break;
1665 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
1666 case KVM_REG_PPC_IAC3:
1667 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac3);
1668 break;
1669 case KVM_REG_PPC_IAC4:
1670 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac4);
1671 break;
1672 #endif
1673 case KVM_REG_PPC_DAC1:
1674 *val = get_reg_val(id, vcpu->arch.dbg_reg.dac1);
1675 break;
1676 case KVM_REG_PPC_DAC2:
1677 *val = get_reg_val(id, vcpu->arch.dbg_reg.dac2);
1678 break;
1679 case KVM_REG_PPC_EPR: {
1680 u32 epr = kvmppc_get_epr(vcpu);
1681 *val = get_reg_val(id, epr);
1682 break;
1683 }
1684 #if defined(CONFIG_64BIT)
1685 case KVM_REG_PPC_EPCR:
1686 *val = get_reg_val(id, vcpu->arch.epcr);
1687 break;
1688 #endif
1689 case KVM_REG_PPC_TCR:
1690 *val = get_reg_val(id, vcpu->arch.tcr);
1691 break;
1692 case KVM_REG_PPC_TSR:
1693 *val = get_reg_val(id, vcpu->arch.tsr);
1694 break;
1695 case KVM_REG_PPC_DEBUG_INST:
1696 *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
1697 break;
1698 case KVM_REG_PPC_VRSAVE:
1699 *val = get_reg_val(id, vcpu->arch.vrsave);
1700 break;
1701 default:
1702 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
1703 break;
1704 }
1705
1706 return r;
1707 }
1708
kvmppc_set_one_reg(struct kvm_vcpu * vcpu,u64 id,union kvmppc_one_reg * val)1709 int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
1710 union kvmppc_one_reg *val)
1711 {
1712 int r = 0;
1713
1714 switch (id) {
1715 case KVM_REG_PPC_IAC1:
1716 vcpu->arch.dbg_reg.iac1 = set_reg_val(id, *val);
1717 break;
1718 case KVM_REG_PPC_IAC2:
1719 vcpu->arch.dbg_reg.iac2 = set_reg_val(id, *val);
1720 break;
1721 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
1722 case KVM_REG_PPC_IAC3:
1723 vcpu->arch.dbg_reg.iac3 = set_reg_val(id, *val);
1724 break;
1725 case KVM_REG_PPC_IAC4:
1726 vcpu->arch.dbg_reg.iac4 = set_reg_val(id, *val);
1727 break;
1728 #endif
1729 case KVM_REG_PPC_DAC1:
1730 vcpu->arch.dbg_reg.dac1 = set_reg_val(id, *val);
1731 break;
1732 case KVM_REG_PPC_DAC2:
1733 vcpu->arch.dbg_reg.dac2 = set_reg_val(id, *val);
1734 break;
1735 case KVM_REG_PPC_EPR: {
1736 u32 new_epr = set_reg_val(id, *val);
1737 kvmppc_set_epr(vcpu, new_epr);
1738 break;
1739 }
1740 #if defined(CONFIG_64BIT)
1741 case KVM_REG_PPC_EPCR: {
1742 u32 new_epcr = set_reg_val(id, *val);
1743 kvmppc_set_epcr(vcpu, new_epcr);
1744 break;
1745 }
1746 #endif
1747 case KVM_REG_PPC_OR_TSR: {
1748 u32 tsr_bits = set_reg_val(id, *val);
1749 kvmppc_set_tsr_bits(vcpu, tsr_bits);
1750 break;
1751 }
1752 case KVM_REG_PPC_CLEAR_TSR: {
1753 u32 tsr_bits = set_reg_val(id, *val);
1754 kvmppc_clr_tsr_bits(vcpu, tsr_bits);
1755 break;
1756 }
1757 case KVM_REG_PPC_TSR: {
1758 u32 tsr = set_reg_val(id, *val);
1759 kvmppc_set_tsr(vcpu, tsr);
1760 break;
1761 }
1762 case KVM_REG_PPC_TCR: {
1763 u32 tcr = set_reg_val(id, *val);
1764 kvmppc_set_tcr(vcpu, tcr);
1765 break;
1766 }
1767 case KVM_REG_PPC_VRSAVE:
1768 vcpu->arch.vrsave = set_reg_val(id, *val);
1769 break;
1770 default:
1771 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
1772 break;
1773 }
1774
1775 return r;
1776 }
1777
kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)1778 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1779 {
1780 return -ENOTSUPP;
1781 }
1782
kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)1783 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1784 {
1785 return -ENOTSUPP;
1786 }
1787
kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu * vcpu,struct kvm_translation * tr)1788 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1789 struct kvm_translation *tr)
1790 {
1791 int r;
1792
1793 vcpu_load(vcpu);
1794 r = kvmppc_core_vcpu_translate(vcpu, tr);
1795 vcpu_put(vcpu);
1796 return r;
1797 }
1798
kvm_vm_ioctl_get_dirty_log(struct kvm * kvm,struct kvm_dirty_log * log)1799 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1800 {
1801 return -ENOTSUPP;
1802 }
1803
kvmppc_core_free_memslot(struct kvm * kvm,struct kvm_memory_slot * free,struct kvm_memory_slot * dont)1804 void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
1805 struct kvm_memory_slot *dont)
1806 {
1807 }
1808
kvmppc_core_create_memslot(struct kvm * kvm,struct kvm_memory_slot * slot,unsigned long npages)1809 int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1810 unsigned long npages)
1811 {
1812 return 0;
1813 }
1814
kvmppc_core_prepare_memory_region(struct kvm * kvm,struct kvm_memory_slot * memslot,const struct kvm_userspace_memory_region * mem)1815 int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1816 struct kvm_memory_slot *memslot,
1817 const struct kvm_userspace_memory_region *mem)
1818 {
1819 return 0;
1820 }
1821
kvmppc_core_commit_memory_region(struct kvm * kvm,const struct kvm_userspace_memory_region * mem,const struct kvm_memory_slot * old,const struct kvm_memory_slot * new,enum kvm_mr_change change)1822 void kvmppc_core_commit_memory_region(struct kvm *kvm,
1823 const struct kvm_userspace_memory_region *mem,
1824 const struct kvm_memory_slot *old,
1825 const struct kvm_memory_slot *new,
1826 enum kvm_mr_change change)
1827 {
1828 }
1829
kvmppc_core_flush_memslot(struct kvm * kvm,struct kvm_memory_slot * memslot)1830 void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
1831 {
1832 }
1833
kvmppc_set_epcr(struct kvm_vcpu * vcpu,u32 new_epcr)1834 void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr)
1835 {
1836 #if defined(CONFIG_64BIT)
1837 vcpu->arch.epcr = new_epcr;
1838 #ifdef CONFIG_KVM_BOOKE_HV
1839 vcpu->arch.shadow_epcr &= ~SPRN_EPCR_GICM;
1840 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
1841 vcpu->arch.shadow_epcr |= SPRN_EPCR_GICM;
1842 #endif
1843 #endif
1844 }
1845
kvmppc_set_tcr(struct kvm_vcpu * vcpu,u32 new_tcr)1846 void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
1847 {
1848 vcpu->arch.tcr = new_tcr;
1849 arm_next_watchdog(vcpu);
1850 update_timer_ints(vcpu);
1851 }
1852
kvmppc_set_tsr_bits(struct kvm_vcpu * vcpu,u32 tsr_bits)1853 void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1854 {
1855 set_bits(tsr_bits, &vcpu->arch.tsr);
1856 smp_wmb();
1857 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
1858 kvm_vcpu_kick(vcpu);
1859 }
1860
kvmppc_clr_tsr_bits(struct kvm_vcpu * vcpu,u32 tsr_bits)1861 void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1862 {
1863 clear_bits(tsr_bits, &vcpu->arch.tsr);
1864
1865 /*
1866 * We may have stopped the watchdog due to
1867 * being stuck on final expiration.
1868 */
1869 if (tsr_bits & (TSR_ENW | TSR_WIS))
1870 arm_next_watchdog(vcpu);
1871
1872 update_timer_ints(vcpu);
1873 }
1874
kvmppc_decrementer_func(struct kvm_vcpu * vcpu)1875 void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
1876 {
1877 if (vcpu->arch.tcr & TCR_ARE) {
1878 vcpu->arch.dec = vcpu->arch.decar;
1879 kvmppc_emulate_dec(vcpu);
1880 }
1881
1882 kvmppc_set_tsr_bits(vcpu, TSR_DIS);
1883 }
1884
kvmppc_booke_add_breakpoint(struct debug_reg * dbg_reg,uint64_t addr,int index)1885 static int kvmppc_booke_add_breakpoint(struct debug_reg *dbg_reg,
1886 uint64_t addr, int index)
1887 {
1888 switch (index) {
1889 case 0:
1890 dbg_reg->dbcr0 |= DBCR0_IAC1;
1891 dbg_reg->iac1 = addr;
1892 break;
1893 case 1:
1894 dbg_reg->dbcr0 |= DBCR0_IAC2;
1895 dbg_reg->iac2 = addr;
1896 break;
1897 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
1898 case 2:
1899 dbg_reg->dbcr0 |= DBCR0_IAC3;
1900 dbg_reg->iac3 = addr;
1901 break;
1902 case 3:
1903 dbg_reg->dbcr0 |= DBCR0_IAC4;
1904 dbg_reg->iac4 = addr;
1905 break;
1906 #endif
1907 default:
1908 return -EINVAL;
1909 }
1910
1911 dbg_reg->dbcr0 |= DBCR0_IDM;
1912 return 0;
1913 }
1914
kvmppc_booke_add_watchpoint(struct debug_reg * dbg_reg,uint64_t addr,int type,int index)1915 static int kvmppc_booke_add_watchpoint(struct debug_reg *dbg_reg, uint64_t addr,
1916 int type, int index)
1917 {
1918 switch (index) {
1919 case 0:
1920 if (type & KVMPPC_DEBUG_WATCH_READ)
1921 dbg_reg->dbcr0 |= DBCR0_DAC1R;
1922 if (type & KVMPPC_DEBUG_WATCH_WRITE)
1923 dbg_reg->dbcr0 |= DBCR0_DAC1W;
1924 dbg_reg->dac1 = addr;
1925 break;
1926 case 1:
1927 if (type & KVMPPC_DEBUG_WATCH_READ)
1928 dbg_reg->dbcr0 |= DBCR0_DAC2R;
1929 if (type & KVMPPC_DEBUG_WATCH_WRITE)
1930 dbg_reg->dbcr0 |= DBCR0_DAC2W;
1931 dbg_reg->dac2 = addr;
1932 break;
1933 default:
1934 return -EINVAL;
1935 }
1936
1937 dbg_reg->dbcr0 |= DBCR0_IDM;
1938 return 0;
1939 }
kvm_guest_protect_msr(struct kvm_vcpu * vcpu,ulong prot_bitmap,bool set)1940 void kvm_guest_protect_msr(struct kvm_vcpu *vcpu, ulong prot_bitmap, bool set)
1941 {
1942 /* XXX: Add similar MSR protection for BookE-PR */
1943 #ifdef CONFIG_KVM_BOOKE_HV
1944 BUG_ON(prot_bitmap & ~(MSRP_UCLEP | MSRP_DEP | MSRP_PMMP));
1945 if (set) {
1946 if (prot_bitmap & MSR_UCLE)
1947 vcpu->arch.shadow_msrp |= MSRP_UCLEP;
1948 if (prot_bitmap & MSR_DE)
1949 vcpu->arch.shadow_msrp |= MSRP_DEP;
1950 if (prot_bitmap & MSR_PMM)
1951 vcpu->arch.shadow_msrp |= MSRP_PMMP;
1952 } else {
1953 if (prot_bitmap & MSR_UCLE)
1954 vcpu->arch.shadow_msrp &= ~MSRP_UCLEP;
1955 if (prot_bitmap & MSR_DE)
1956 vcpu->arch.shadow_msrp &= ~MSRP_DEP;
1957 if (prot_bitmap & MSR_PMM)
1958 vcpu->arch.shadow_msrp &= ~MSRP_PMMP;
1959 }
1960 #endif
1961 }
1962
kvmppc_xlate(struct kvm_vcpu * vcpu,ulong eaddr,enum xlate_instdata xlid,enum xlate_readwrite xlrw,struct kvmppc_pte * pte)1963 int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
1964 enum xlate_readwrite xlrw, struct kvmppc_pte *pte)
1965 {
1966 int gtlb_index;
1967 gpa_t gpaddr;
1968
1969 #ifdef CONFIG_KVM_E500V2
1970 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1971 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
1972 pte->eaddr = eaddr;
1973 pte->raddr = (vcpu->arch.magic_page_pa & PAGE_MASK) |
1974 (eaddr & ~PAGE_MASK);
1975 pte->vpage = eaddr >> PAGE_SHIFT;
1976 pte->may_read = true;
1977 pte->may_write = true;
1978 pte->may_execute = true;
1979
1980 return 0;
1981 }
1982 #endif
1983
1984 /* Check the guest TLB. */
1985 switch (xlid) {
1986 case XLATE_INST:
1987 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
1988 break;
1989 case XLATE_DATA:
1990 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
1991 break;
1992 default:
1993 BUG();
1994 }
1995
1996 /* Do we have a TLB entry at all? */
1997 if (gtlb_index < 0)
1998 return -ENOENT;
1999
2000 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
2001
2002 pte->eaddr = eaddr;
2003 pte->raddr = (gpaddr & PAGE_MASK) | (eaddr & ~PAGE_MASK);
2004 pte->vpage = eaddr >> PAGE_SHIFT;
2005
2006 /* XXX read permissions from the guest TLB */
2007 pte->may_read = true;
2008 pte->may_write = true;
2009 pte->may_execute = true;
2010
2011 return 0;
2012 }
2013
kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu * vcpu,struct kvm_guest_debug * dbg)2014 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
2015 struct kvm_guest_debug *dbg)
2016 {
2017 struct debug_reg *dbg_reg;
2018 int n, b = 0, w = 0;
2019 int ret = 0;
2020
2021 vcpu_load(vcpu);
2022
2023 if (!(dbg->control & KVM_GUESTDBG_ENABLE)) {
2024 vcpu->arch.dbg_reg.dbcr0 = 0;
2025 vcpu->guest_debug = 0;
2026 kvm_guest_protect_msr(vcpu, MSR_DE, false);
2027 goto out;
2028 }
2029
2030 kvm_guest_protect_msr(vcpu, MSR_DE, true);
2031 vcpu->guest_debug = dbg->control;
2032 vcpu->arch.dbg_reg.dbcr0 = 0;
2033
2034 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
2035 vcpu->arch.dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC;
2036
2037 /* Code below handles only HW breakpoints */
2038 dbg_reg = &(vcpu->arch.dbg_reg);
2039
2040 #ifdef CONFIG_KVM_BOOKE_HV
2041 /*
2042 * On BookE-HV (e500mc) the guest is always executed with MSR.GS=1
2043 * DBCR1 and DBCR2 are set to trigger debug events when MSR.PR is 0
2044 */
2045 dbg_reg->dbcr1 = 0;
2046 dbg_reg->dbcr2 = 0;
2047 #else
2048 /*
2049 * On BookE-PR (e500v2) the guest is always executed with MSR.PR=1
2050 * We set DBCR1 and DBCR2 to only trigger debug events when MSR.PR
2051 * is set.
2052 */
2053 dbg_reg->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | DBCR1_IAC3US |
2054 DBCR1_IAC4US;
2055 dbg_reg->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
2056 #endif
2057
2058 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
2059 goto out;
2060
2061 ret = -EINVAL;
2062 for (n = 0; n < (KVMPPC_BOOKE_IAC_NUM + KVMPPC_BOOKE_DAC_NUM); n++) {
2063 uint64_t addr = dbg->arch.bp[n].addr;
2064 uint32_t type = dbg->arch.bp[n].type;
2065
2066 if (type == KVMPPC_DEBUG_NONE)
2067 continue;
2068
2069 if (type & ~(KVMPPC_DEBUG_WATCH_READ |
2070 KVMPPC_DEBUG_WATCH_WRITE |
2071 KVMPPC_DEBUG_BREAKPOINT))
2072 goto out;
2073
2074 if (type & KVMPPC_DEBUG_BREAKPOINT) {
2075 /* Setting H/W breakpoint */
2076 if (kvmppc_booke_add_breakpoint(dbg_reg, addr, b++))
2077 goto out;
2078 } else {
2079 /* Setting H/W watchpoint */
2080 if (kvmppc_booke_add_watchpoint(dbg_reg, addr,
2081 type, w++))
2082 goto out;
2083 }
2084 }
2085
2086 ret = 0;
2087 out:
2088 vcpu_put(vcpu);
2089 return ret;
2090 }
2091
kvmppc_booke_vcpu_load(struct kvm_vcpu * vcpu,int cpu)2092 void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2093 {
2094 vcpu->cpu = smp_processor_id();
2095 current->thread.kvm_vcpu = vcpu;
2096 }
2097
kvmppc_booke_vcpu_put(struct kvm_vcpu * vcpu)2098 void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
2099 {
2100 current->thread.kvm_vcpu = NULL;
2101 vcpu->cpu = -1;
2102
2103 /* Clear pending debug event in DBSR */
2104 kvmppc_clear_dbsr();
2105 }
2106
kvmppc_mmu_destroy(struct kvm_vcpu * vcpu)2107 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
2108 {
2109 vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
2110 }
2111
kvmppc_core_init_vm(struct kvm * kvm)2112 int kvmppc_core_init_vm(struct kvm *kvm)
2113 {
2114 return kvm->arch.kvm_ops->init_vm(kvm);
2115 }
2116
kvmppc_core_vcpu_create(struct kvm * kvm,unsigned int id)2117 struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
2118 {
2119 return kvm->arch.kvm_ops->vcpu_create(kvm, id);
2120 }
2121
kvmppc_core_vcpu_free(struct kvm_vcpu * vcpu)2122 void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
2123 {
2124 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
2125 }
2126
kvmppc_core_destroy_vm(struct kvm * kvm)2127 void kvmppc_core_destroy_vm(struct kvm *kvm)
2128 {
2129 kvm->arch.kvm_ops->destroy_vm(kvm);
2130 }
2131
kvmppc_core_vcpu_load(struct kvm_vcpu * vcpu,int cpu)2132 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2133 {
2134 vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
2135 }
2136
kvmppc_core_vcpu_put(struct kvm_vcpu * vcpu)2137 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
2138 {
2139 vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
2140 }
2141
kvmppc_booke_init(void)2142 int __init kvmppc_booke_init(void)
2143 {
2144 #ifndef CONFIG_KVM_BOOKE_HV
2145 unsigned long ivor[16];
2146 unsigned long *handler = kvmppc_booke_handler_addr;
2147 unsigned long max_ivor = 0;
2148 unsigned long handler_len;
2149 int i;
2150
2151 /* We install our own exception handlers by hijacking IVPR. IVPR must
2152 * be 16-bit aligned, so we need a 64KB allocation. */
2153 kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
2154 VCPU_SIZE_ORDER);
2155 if (!kvmppc_booke_handlers)
2156 return -ENOMEM;
2157
2158 /* XXX make sure our handlers are smaller than Linux's */
2159
2160 /* Copy our interrupt handlers to match host IVORs. That way we don't
2161 * have to swap the IVORs on every guest/host transition. */
2162 ivor[0] = mfspr(SPRN_IVOR0);
2163 ivor[1] = mfspr(SPRN_IVOR1);
2164 ivor[2] = mfspr(SPRN_IVOR2);
2165 ivor[3] = mfspr(SPRN_IVOR3);
2166 ivor[4] = mfspr(SPRN_IVOR4);
2167 ivor[5] = mfspr(SPRN_IVOR5);
2168 ivor[6] = mfspr(SPRN_IVOR6);
2169 ivor[7] = mfspr(SPRN_IVOR7);
2170 ivor[8] = mfspr(SPRN_IVOR8);
2171 ivor[9] = mfspr(SPRN_IVOR9);
2172 ivor[10] = mfspr(SPRN_IVOR10);
2173 ivor[11] = mfspr(SPRN_IVOR11);
2174 ivor[12] = mfspr(SPRN_IVOR12);
2175 ivor[13] = mfspr(SPRN_IVOR13);
2176 ivor[14] = mfspr(SPRN_IVOR14);
2177 ivor[15] = mfspr(SPRN_IVOR15);
2178
2179 for (i = 0; i < 16; i++) {
2180 if (ivor[i] > max_ivor)
2181 max_ivor = i;
2182
2183 handler_len = handler[i + 1] - handler[i];
2184 memcpy((void *)kvmppc_booke_handlers + ivor[i],
2185 (void *)handler[i], handler_len);
2186 }
2187
2188 handler_len = handler[max_ivor + 1] - handler[max_ivor];
2189 flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers +
2190 ivor[max_ivor] + handler_len);
2191 #endif /* !BOOKE_HV */
2192 return 0;
2193 }
2194
kvmppc_booke_exit(void)2195 void __exit kvmppc_booke_exit(void)
2196 {
2197 free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
2198 kvm_exit();
2199 }
2200