1 /*
2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
3 *
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
7 *
8 * Description:
9 * This file is derived from arch/powerpc/kvm/44x.c,
10 * by Hollis Blanchard <hollisb@us.ibm.com>.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License, version 2, as
14 * published by the Free Software Foundation.
15 */
16
17 #include <linux/kvm_host.h>
18 #include <linux/err.h>
19 #include <linux/export.h>
20 #include <linux/slab.h>
21 #include <linux/module.h>
22 #include <linux/miscdevice.h>
23 #include <linux/gfp.h>
24 #include <linux/sched.h>
25 #include <linux/vmalloc.h>
26 #include <linux/highmem.h>
27
28 #include <asm/reg.h>
29 #include <asm/cputable.h>
30 #include <asm/cacheflush.h>
31 #include <linux/uaccess.h>
32 #include <asm/io.h>
33 #include <asm/kvm_ppc.h>
34 #include <asm/kvm_book3s.h>
35 #include <asm/mmu_context.h>
36 #include <asm/page.h>
37 #include <asm/xive.h>
38
39 #include "book3s.h"
40 #include "trace.h"
41
42 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
43
44 /* #define EXIT_DEBUG */
45
46 struct kvm_stats_debugfs_item debugfs_entries[] = {
47 { "exits", VCPU_STAT(sum_exits) },
48 { "mmio", VCPU_STAT(mmio_exits) },
49 { "sig", VCPU_STAT(signal_exits) },
50 { "sysc", VCPU_STAT(syscall_exits) },
51 { "inst_emu", VCPU_STAT(emulated_inst_exits) },
52 { "dec", VCPU_STAT(dec_exits) },
53 { "ext_intr", VCPU_STAT(ext_intr_exits) },
54 { "queue_intr", VCPU_STAT(queue_intr) },
55 { "halt_poll_success_ns", VCPU_STAT(halt_poll_success_ns) },
56 { "halt_poll_fail_ns", VCPU_STAT(halt_poll_fail_ns) },
57 { "halt_wait_ns", VCPU_STAT(halt_wait_ns) },
58 { "halt_successful_poll", VCPU_STAT(halt_successful_poll), },
59 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), },
60 { "halt_successful_wait", VCPU_STAT(halt_successful_wait) },
61 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
62 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
63 { "pf_storage", VCPU_STAT(pf_storage) },
64 { "sp_storage", VCPU_STAT(sp_storage) },
65 { "pf_instruc", VCPU_STAT(pf_instruc) },
66 { "sp_instruc", VCPU_STAT(sp_instruc) },
67 { "ld", VCPU_STAT(ld) },
68 { "ld_slow", VCPU_STAT(ld_slow) },
69 { "st", VCPU_STAT(st) },
70 { "st_slow", VCPU_STAT(st_slow) },
71 { "pthru_all", VCPU_STAT(pthru_all) },
72 { "pthru_host", VCPU_STAT(pthru_host) },
73 { "pthru_bad_aff", VCPU_STAT(pthru_bad_aff) },
74 { NULL }
75 };
76
kvmppc_unfixup_split_real(struct kvm_vcpu * vcpu)77 void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu)
78 {
79 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) {
80 ulong pc = kvmppc_get_pc(vcpu);
81 if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
82 kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK);
83 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK;
84 }
85 }
86 EXPORT_SYMBOL_GPL(kvmppc_unfixup_split_real);
87
kvmppc_interrupt_offset(struct kvm_vcpu * vcpu)88 static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
89 {
90 if (!is_kvmppc_hv_enabled(vcpu->kvm))
91 return to_book3s(vcpu)->hior;
92 return 0;
93 }
94
kvmppc_update_int_pending(struct kvm_vcpu * vcpu,unsigned long pending_now,unsigned long old_pending)95 static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
96 unsigned long pending_now, unsigned long old_pending)
97 {
98 if (is_kvmppc_hv_enabled(vcpu->kvm))
99 return;
100 if (pending_now)
101 kvmppc_set_int_pending(vcpu, 1);
102 else if (old_pending)
103 kvmppc_set_int_pending(vcpu, 0);
104 }
105
kvmppc_critical_section(struct kvm_vcpu * vcpu)106 static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
107 {
108 ulong crit_raw;
109 ulong crit_r1;
110 bool crit;
111
112 if (is_kvmppc_hv_enabled(vcpu->kvm))
113 return false;
114
115 crit_raw = kvmppc_get_critical(vcpu);
116 crit_r1 = kvmppc_get_gpr(vcpu, 1);
117
118 /* Truncate crit indicators in 32 bit mode */
119 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
120 crit_raw &= 0xffffffff;
121 crit_r1 &= 0xffffffff;
122 }
123
124 /* Critical section when crit == r1 */
125 crit = (crit_raw == crit_r1);
126 /* ... and we're in supervisor mode */
127 crit = crit && !(kvmppc_get_msr(vcpu) & MSR_PR);
128
129 return crit;
130 }
131
kvmppc_inject_interrupt(struct kvm_vcpu * vcpu,int vec,u64 flags)132 void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
133 {
134 kvmppc_unfixup_split_real(vcpu);
135 kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu));
136 kvmppc_set_srr1(vcpu, (kvmppc_get_msr(vcpu) & ~0x783f0000ul) | flags);
137 kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec);
138 vcpu->arch.mmu.reset_msr(vcpu);
139 }
140
kvmppc_book3s_vec2irqprio(unsigned int vec)141 static int kvmppc_book3s_vec2irqprio(unsigned int vec)
142 {
143 unsigned int prio;
144
145 switch (vec) {
146 case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break;
147 case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break;
148 case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE; break;
149 case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT; break;
150 case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break;
151 case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break;
152 case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break;
153 case 0x501: prio = BOOK3S_IRQPRIO_EXTERNAL_LEVEL; break;
154 case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break;
155 case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break;
156 case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break;
157 case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER; break;
158 case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL; break;
159 case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG; break;
160 case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC; break;
161 case 0xf40: prio = BOOK3S_IRQPRIO_VSX; break;
162 case 0xf60: prio = BOOK3S_IRQPRIO_FAC_UNAVAIL; break;
163 default: prio = BOOK3S_IRQPRIO_MAX; break;
164 }
165
166 return prio;
167 }
168
kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu * vcpu,unsigned int vec)169 void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
170 unsigned int vec)
171 {
172 unsigned long old_pending = vcpu->arch.pending_exceptions;
173
174 clear_bit(kvmppc_book3s_vec2irqprio(vec),
175 &vcpu->arch.pending_exceptions);
176
177 kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions,
178 old_pending);
179 }
180
kvmppc_book3s_queue_irqprio(struct kvm_vcpu * vcpu,unsigned int vec)181 void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
182 {
183 vcpu->stat.queue_intr++;
184
185 set_bit(kvmppc_book3s_vec2irqprio(vec),
186 &vcpu->arch.pending_exceptions);
187 #ifdef EXIT_DEBUG
188 printk(KERN_INFO "Queueing interrupt %x\n", vec);
189 #endif
190 }
191 EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio);
192
kvmppc_core_queue_program(struct kvm_vcpu * vcpu,ulong flags)193 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
194 {
195 /* might as well deliver this straight away */
196 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags);
197 }
198 EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);
199
kvmppc_core_queue_fpunavail(struct kvm_vcpu * vcpu)200 void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
201 {
202 /* might as well deliver this straight away */
203 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, 0);
204 }
205
kvmppc_core_queue_vec_unavail(struct kvm_vcpu * vcpu)206 void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu)
207 {
208 /* might as well deliver this straight away */
209 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_ALTIVEC, 0);
210 }
211
kvmppc_core_queue_vsx_unavail(struct kvm_vcpu * vcpu)212 void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu)
213 {
214 /* might as well deliver this straight away */
215 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_VSX, 0);
216 }
217
kvmppc_core_queue_dec(struct kvm_vcpu * vcpu)218 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
219 {
220 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
221 }
222 EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec);
223
kvmppc_core_pending_dec(struct kvm_vcpu * vcpu)224 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
225 {
226 return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
227 }
228 EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec);
229
kvmppc_core_dequeue_dec(struct kvm_vcpu * vcpu)230 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
231 {
232 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
233 }
234 EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec);
235
kvmppc_core_queue_external(struct kvm_vcpu * vcpu,struct kvm_interrupt * irq)236 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
237 struct kvm_interrupt *irq)
238 {
239 unsigned int vec = BOOK3S_INTERRUPT_EXTERNAL;
240
241 if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
242 vec = BOOK3S_INTERRUPT_EXTERNAL_LEVEL;
243
244 kvmppc_book3s_queue_irqprio(vcpu, vec);
245 }
246
kvmppc_core_dequeue_external(struct kvm_vcpu * vcpu)247 void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
248 {
249 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
250 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
251 }
252
kvmppc_core_queue_data_storage(struct kvm_vcpu * vcpu,ulong dar,ulong flags)253 void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar,
254 ulong flags)
255 {
256 kvmppc_set_dar(vcpu, dar);
257 kvmppc_set_dsisr(vcpu, flags);
258 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, 0);
259 }
260 EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage);
261
kvmppc_core_queue_inst_storage(struct kvm_vcpu * vcpu,ulong flags)262 void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags)
263 {
264 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE, flags);
265 }
266 EXPORT_SYMBOL_GPL(kvmppc_core_queue_inst_storage);
267
kvmppc_book3s_irqprio_deliver(struct kvm_vcpu * vcpu,unsigned int priority)268 static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu,
269 unsigned int priority)
270 {
271 int deliver = 1;
272 int vec = 0;
273 bool crit = kvmppc_critical_section(vcpu);
274
275 switch (priority) {
276 case BOOK3S_IRQPRIO_DECREMENTER:
277 deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
278 vec = BOOK3S_INTERRUPT_DECREMENTER;
279 break;
280 case BOOK3S_IRQPRIO_EXTERNAL:
281 case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
282 deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
283 vec = BOOK3S_INTERRUPT_EXTERNAL;
284 break;
285 case BOOK3S_IRQPRIO_SYSTEM_RESET:
286 vec = BOOK3S_INTERRUPT_SYSTEM_RESET;
287 break;
288 case BOOK3S_IRQPRIO_MACHINE_CHECK:
289 vec = BOOK3S_INTERRUPT_MACHINE_CHECK;
290 break;
291 case BOOK3S_IRQPRIO_DATA_STORAGE:
292 vec = BOOK3S_INTERRUPT_DATA_STORAGE;
293 break;
294 case BOOK3S_IRQPRIO_INST_STORAGE:
295 vec = BOOK3S_INTERRUPT_INST_STORAGE;
296 break;
297 case BOOK3S_IRQPRIO_DATA_SEGMENT:
298 vec = BOOK3S_INTERRUPT_DATA_SEGMENT;
299 break;
300 case BOOK3S_IRQPRIO_INST_SEGMENT:
301 vec = BOOK3S_INTERRUPT_INST_SEGMENT;
302 break;
303 case BOOK3S_IRQPRIO_ALIGNMENT:
304 vec = BOOK3S_INTERRUPT_ALIGNMENT;
305 break;
306 case BOOK3S_IRQPRIO_PROGRAM:
307 vec = BOOK3S_INTERRUPT_PROGRAM;
308 break;
309 case BOOK3S_IRQPRIO_VSX:
310 vec = BOOK3S_INTERRUPT_VSX;
311 break;
312 case BOOK3S_IRQPRIO_ALTIVEC:
313 vec = BOOK3S_INTERRUPT_ALTIVEC;
314 break;
315 case BOOK3S_IRQPRIO_FP_UNAVAIL:
316 vec = BOOK3S_INTERRUPT_FP_UNAVAIL;
317 break;
318 case BOOK3S_IRQPRIO_SYSCALL:
319 vec = BOOK3S_INTERRUPT_SYSCALL;
320 break;
321 case BOOK3S_IRQPRIO_DEBUG:
322 vec = BOOK3S_INTERRUPT_TRACE;
323 break;
324 case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR:
325 vec = BOOK3S_INTERRUPT_PERFMON;
326 break;
327 case BOOK3S_IRQPRIO_FAC_UNAVAIL:
328 vec = BOOK3S_INTERRUPT_FAC_UNAVAIL;
329 break;
330 default:
331 deliver = 0;
332 printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority);
333 break;
334 }
335
336 #if 0
337 printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver);
338 #endif
339
340 if (deliver)
341 kvmppc_inject_interrupt(vcpu, vec, 0);
342
343 return deliver;
344 }
345
346 /*
347 * This function determines if an irqprio should be cleared once issued.
348 */
clear_irqprio(struct kvm_vcpu * vcpu,unsigned int priority)349 static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
350 {
351 switch (priority) {
352 case BOOK3S_IRQPRIO_DECREMENTER:
353 /* DEC interrupts get cleared by mtdec */
354 return false;
355 case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
356 /* External interrupts get cleared by userspace */
357 return false;
358 }
359
360 return true;
361 }
362
kvmppc_core_prepare_to_enter(struct kvm_vcpu * vcpu)363 int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
364 {
365 unsigned long *pending = &vcpu->arch.pending_exceptions;
366 unsigned long old_pending = vcpu->arch.pending_exceptions;
367 unsigned int priority;
368
369 #ifdef EXIT_DEBUG
370 if (vcpu->arch.pending_exceptions)
371 printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
372 #endif
373 priority = __ffs(*pending);
374 while (priority < BOOK3S_IRQPRIO_MAX) {
375 if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
376 clear_irqprio(vcpu, priority)) {
377 clear_bit(priority, &vcpu->arch.pending_exceptions);
378 break;
379 }
380
381 priority = find_next_bit(pending,
382 BITS_PER_BYTE * sizeof(*pending),
383 priority + 1);
384 }
385
386 /* Tell the guest about our interrupt status */
387 kvmppc_update_int_pending(vcpu, *pending, old_pending);
388
389 return 0;
390 }
391 EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter);
392
kvmppc_gpa_to_pfn(struct kvm_vcpu * vcpu,gpa_t gpa,bool writing,bool * writable)393 kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing,
394 bool *writable)
395 {
396 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM;
397 gfn_t gfn = gpa >> PAGE_SHIFT;
398
399 if (!(kvmppc_get_msr(vcpu) & MSR_SF))
400 mp_pa = (uint32_t)mp_pa;
401
402 /* Magic page override */
403 gpa &= ~0xFFFULL;
404 if (unlikely(mp_pa) && unlikely((gpa & KVM_PAM) == mp_pa)) {
405 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
406 kvm_pfn_t pfn;
407
408 pfn = (kvm_pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
409 get_page(pfn_to_page(pfn));
410 if (writable)
411 *writable = true;
412 return pfn;
413 }
414
415 return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable);
416 }
417 EXPORT_SYMBOL_GPL(kvmppc_gpa_to_pfn);
418
kvmppc_xlate(struct kvm_vcpu * vcpu,ulong eaddr,enum xlate_instdata xlid,enum xlate_readwrite xlrw,struct kvmppc_pte * pte)419 int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
420 enum xlate_readwrite xlrw, struct kvmppc_pte *pte)
421 {
422 bool data = (xlid == XLATE_DATA);
423 bool iswrite = (xlrw == XLATE_WRITE);
424 int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR));
425 int r;
426
427 if (relocated) {
428 r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite);
429 } else {
430 pte->eaddr = eaddr;
431 pte->raddr = eaddr & KVM_PAM;
432 pte->vpage = VSID_REAL | eaddr >> 12;
433 pte->may_read = true;
434 pte->may_write = true;
435 pte->may_execute = true;
436 r = 0;
437
438 if ((kvmppc_get_msr(vcpu) & (MSR_IR | MSR_DR)) == MSR_DR &&
439 !data) {
440 if ((vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
441 ((eaddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
442 pte->raddr &= ~SPLIT_HACK_MASK;
443 }
444 }
445
446 return r;
447 }
448
kvmppc_load_last_inst(struct kvm_vcpu * vcpu,enum instruction_fetch_type type,u32 * inst)449 int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
450 enum instruction_fetch_type type, u32 *inst)
451 {
452 ulong pc = kvmppc_get_pc(vcpu);
453 int r;
454
455 if (type == INST_SC)
456 pc -= 4;
457
458 r = kvmppc_ld(vcpu, &pc, sizeof(u32), inst, false);
459 if (r == EMULATE_DONE)
460 return r;
461 else
462 return EMULATE_AGAIN;
463 }
464 EXPORT_SYMBOL_GPL(kvmppc_load_last_inst);
465
kvm_arch_vcpu_setup(struct kvm_vcpu * vcpu)466 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
467 {
468 return 0;
469 }
470
kvmppc_subarch_vcpu_init(struct kvm_vcpu * vcpu)471 int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
472 {
473 return 0;
474 }
475
kvmppc_subarch_vcpu_uninit(struct kvm_vcpu * vcpu)476 void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
477 {
478 }
479
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)480 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
481 struct kvm_sregs *sregs)
482 {
483 int ret;
484
485 vcpu_load(vcpu);
486 ret = vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
487 vcpu_put(vcpu);
488
489 return ret;
490 }
491
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)492 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
493 struct kvm_sregs *sregs)
494 {
495 int ret;
496
497 vcpu_load(vcpu);
498 ret = vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
499 vcpu_put(vcpu);
500
501 return ret;
502 }
503
kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)504 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
505 {
506 int i;
507
508 regs->pc = kvmppc_get_pc(vcpu);
509 regs->cr = kvmppc_get_cr(vcpu);
510 regs->ctr = kvmppc_get_ctr(vcpu);
511 regs->lr = kvmppc_get_lr(vcpu);
512 regs->xer = kvmppc_get_xer(vcpu);
513 regs->msr = kvmppc_get_msr(vcpu);
514 regs->srr0 = kvmppc_get_srr0(vcpu);
515 regs->srr1 = kvmppc_get_srr1(vcpu);
516 regs->pid = vcpu->arch.pid;
517 regs->sprg0 = kvmppc_get_sprg0(vcpu);
518 regs->sprg1 = kvmppc_get_sprg1(vcpu);
519 regs->sprg2 = kvmppc_get_sprg2(vcpu);
520 regs->sprg3 = kvmppc_get_sprg3(vcpu);
521 regs->sprg4 = kvmppc_get_sprg4(vcpu);
522 regs->sprg5 = kvmppc_get_sprg5(vcpu);
523 regs->sprg6 = kvmppc_get_sprg6(vcpu);
524 regs->sprg7 = kvmppc_get_sprg7(vcpu);
525
526 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
527 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
528
529 return 0;
530 }
531
kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)532 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
533 {
534 int i;
535
536 kvmppc_set_pc(vcpu, regs->pc);
537 kvmppc_set_cr(vcpu, regs->cr);
538 kvmppc_set_ctr(vcpu, regs->ctr);
539 kvmppc_set_lr(vcpu, regs->lr);
540 kvmppc_set_xer(vcpu, regs->xer);
541 kvmppc_set_msr(vcpu, regs->msr);
542 kvmppc_set_srr0(vcpu, regs->srr0);
543 kvmppc_set_srr1(vcpu, regs->srr1);
544 kvmppc_set_sprg0(vcpu, regs->sprg0);
545 kvmppc_set_sprg1(vcpu, regs->sprg1);
546 kvmppc_set_sprg2(vcpu, regs->sprg2);
547 kvmppc_set_sprg3(vcpu, regs->sprg3);
548 kvmppc_set_sprg4(vcpu, regs->sprg4);
549 kvmppc_set_sprg5(vcpu, regs->sprg5);
550 kvmppc_set_sprg6(vcpu, regs->sprg6);
551 kvmppc_set_sprg7(vcpu, regs->sprg7);
552
553 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
554 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
555
556 return 0;
557 }
558
kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)559 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
560 {
561 return -ENOTSUPP;
562 }
563
kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)564 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
565 {
566 return -ENOTSUPP;
567 }
568
kvmppc_get_one_reg(struct kvm_vcpu * vcpu,u64 id,union kvmppc_one_reg * val)569 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
570 union kvmppc_one_reg *val)
571 {
572 int r = 0;
573 long int i;
574
575 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
576 if (r == -EINVAL) {
577 r = 0;
578 switch (id) {
579 case KVM_REG_PPC_DAR:
580 *val = get_reg_val(id, kvmppc_get_dar(vcpu));
581 break;
582 case KVM_REG_PPC_DSISR:
583 *val = get_reg_val(id, kvmppc_get_dsisr(vcpu));
584 break;
585 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
586 i = id - KVM_REG_PPC_FPR0;
587 *val = get_reg_val(id, VCPU_FPR(vcpu, i));
588 break;
589 case KVM_REG_PPC_FPSCR:
590 *val = get_reg_val(id, vcpu->arch.fp.fpscr);
591 break;
592 #ifdef CONFIG_VSX
593 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
594 if (cpu_has_feature(CPU_FTR_VSX)) {
595 i = id - KVM_REG_PPC_VSR0;
596 val->vsxval[0] = vcpu->arch.fp.fpr[i][0];
597 val->vsxval[1] = vcpu->arch.fp.fpr[i][1];
598 } else {
599 r = -ENXIO;
600 }
601 break;
602 #endif /* CONFIG_VSX */
603 case KVM_REG_PPC_DEBUG_INST:
604 *val = get_reg_val(id, INS_TW);
605 break;
606 #ifdef CONFIG_KVM_XICS
607 case KVM_REG_PPC_ICP_STATE:
608 if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
609 r = -ENXIO;
610 break;
611 }
612 if (xive_enabled())
613 *val = get_reg_val(id, kvmppc_xive_get_icp(vcpu));
614 else
615 *val = get_reg_val(id, kvmppc_xics_get_icp(vcpu));
616 break;
617 #endif /* CONFIG_KVM_XICS */
618 case KVM_REG_PPC_FSCR:
619 *val = get_reg_val(id, vcpu->arch.fscr);
620 break;
621 case KVM_REG_PPC_TAR:
622 *val = get_reg_val(id, vcpu->arch.tar);
623 break;
624 case KVM_REG_PPC_EBBHR:
625 *val = get_reg_val(id, vcpu->arch.ebbhr);
626 break;
627 case KVM_REG_PPC_EBBRR:
628 *val = get_reg_val(id, vcpu->arch.ebbrr);
629 break;
630 case KVM_REG_PPC_BESCR:
631 *val = get_reg_val(id, vcpu->arch.bescr);
632 break;
633 case KVM_REG_PPC_IC:
634 *val = get_reg_val(id, vcpu->arch.ic);
635 break;
636 default:
637 r = -EINVAL;
638 break;
639 }
640 }
641
642 return r;
643 }
644
kvmppc_set_one_reg(struct kvm_vcpu * vcpu,u64 id,union kvmppc_one_reg * val)645 int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
646 union kvmppc_one_reg *val)
647 {
648 int r = 0;
649 long int i;
650
651 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
652 if (r == -EINVAL) {
653 r = 0;
654 switch (id) {
655 case KVM_REG_PPC_DAR:
656 kvmppc_set_dar(vcpu, set_reg_val(id, *val));
657 break;
658 case KVM_REG_PPC_DSISR:
659 kvmppc_set_dsisr(vcpu, set_reg_val(id, *val));
660 break;
661 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
662 i = id - KVM_REG_PPC_FPR0;
663 VCPU_FPR(vcpu, i) = set_reg_val(id, *val);
664 break;
665 case KVM_REG_PPC_FPSCR:
666 vcpu->arch.fp.fpscr = set_reg_val(id, *val);
667 break;
668 #ifdef CONFIG_VSX
669 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
670 if (cpu_has_feature(CPU_FTR_VSX)) {
671 i = id - KVM_REG_PPC_VSR0;
672 vcpu->arch.fp.fpr[i][0] = val->vsxval[0];
673 vcpu->arch.fp.fpr[i][1] = val->vsxval[1];
674 } else {
675 r = -ENXIO;
676 }
677 break;
678 #endif /* CONFIG_VSX */
679 #ifdef CONFIG_KVM_XICS
680 case KVM_REG_PPC_ICP_STATE:
681 if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
682 r = -ENXIO;
683 break;
684 }
685 if (xive_enabled())
686 r = kvmppc_xive_set_icp(vcpu, set_reg_val(id, *val));
687 else
688 r = kvmppc_xics_set_icp(vcpu, set_reg_val(id, *val));
689 break;
690 #endif /* CONFIG_KVM_XICS */
691 case KVM_REG_PPC_FSCR:
692 vcpu->arch.fscr = set_reg_val(id, *val);
693 break;
694 case KVM_REG_PPC_TAR:
695 vcpu->arch.tar = set_reg_val(id, *val);
696 break;
697 case KVM_REG_PPC_EBBHR:
698 vcpu->arch.ebbhr = set_reg_val(id, *val);
699 break;
700 case KVM_REG_PPC_EBBRR:
701 vcpu->arch.ebbrr = set_reg_val(id, *val);
702 break;
703 case KVM_REG_PPC_BESCR:
704 vcpu->arch.bescr = set_reg_val(id, *val);
705 break;
706 case KVM_REG_PPC_IC:
707 vcpu->arch.ic = set_reg_val(id, *val);
708 break;
709 default:
710 r = -EINVAL;
711 break;
712 }
713 }
714
715 return r;
716 }
717
kvmppc_core_vcpu_load(struct kvm_vcpu * vcpu,int cpu)718 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
719 {
720 vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
721 }
722
kvmppc_core_vcpu_put(struct kvm_vcpu * vcpu)723 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
724 {
725 vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
726 }
727
kvmppc_set_msr(struct kvm_vcpu * vcpu,u64 msr)728 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
729 {
730 vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr);
731 }
732 EXPORT_SYMBOL_GPL(kvmppc_set_msr);
733
kvmppc_vcpu_run(struct kvm_run * kvm_run,struct kvm_vcpu * vcpu)734 int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
735 {
736 return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu);
737 }
738
kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu * vcpu,struct kvm_translation * tr)739 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
740 struct kvm_translation *tr)
741 {
742 return 0;
743 }
744
kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu * vcpu,struct kvm_guest_debug * dbg)745 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
746 struct kvm_guest_debug *dbg)
747 {
748 vcpu_load(vcpu);
749 vcpu->guest_debug = dbg->control;
750 vcpu_put(vcpu);
751 return 0;
752 }
753
kvmppc_decrementer_func(struct kvm_vcpu * vcpu)754 void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
755 {
756 kvmppc_core_queue_dec(vcpu);
757 kvm_vcpu_kick(vcpu);
758 }
759
kvmppc_core_vcpu_create(struct kvm * kvm,unsigned int id)760 struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
761 {
762 return kvm->arch.kvm_ops->vcpu_create(kvm, id);
763 }
764
kvmppc_core_vcpu_free(struct kvm_vcpu * vcpu)765 void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
766 {
767 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
768 }
769
kvmppc_core_check_requests(struct kvm_vcpu * vcpu)770 int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
771 {
772 return vcpu->kvm->arch.kvm_ops->check_requests(vcpu);
773 }
774
kvm_vm_ioctl_get_dirty_log(struct kvm * kvm,struct kvm_dirty_log * log)775 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
776 {
777 return kvm->arch.kvm_ops->get_dirty_log(kvm, log);
778 }
779
kvmppc_core_free_memslot(struct kvm * kvm,struct kvm_memory_slot * free,struct kvm_memory_slot * dont)780 void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
781 struct kvm_memory_slot *dont)
782 {
783 kvm->arch.kvm_ops->free_memslot(free, dont);
784 }
785
kvmppc_core_create_memslot(struct kvm * kvm,struct kvm_memory_slot * slot,unsigned long npages)786 int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
787 unsigned long npages)
788 {
789 return kvm->arch.kvm_ops->create_memslot(slot, npages);
790 }
791
kvmppc_core_flush_memslot(struct kvm * kvm,struct kvm_memory_slot * memslot)792 void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
793 {
794 kvm->arch.kvm_ops->flush_memslot(kvm, memslot);
795 }
796
kvmppc_core_prepare_memory_region(struct kvm * kvm,struct kvm_memory_slot * memslot,const struct kvm_userspace_memory_region * mem)797 int kvmppc_core_prepare_memory_region(struct kvm *kvm,
798 struct kvm_memory_slot *memslot,
799 const struct kvm_userspace_memory_region *mem)
800 {
801 return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem);
802 }
803
kvmppc_core_commit_memory_region(struct kvm * kvm,const struct kvm_userspace_memory_region * mem,const struct kvm_memory_slot * old,const struct kvm_memory_slot * new)804 void kvmppc_core_commit_memory_region(struct kvm *kvm,
805 const struct kvm_userspace_memory_region *mem,
806 const struct kvm_memory_slot *old,
807 const struct kvm_memory_slot *new)
808 {
809 kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new);
810 }
811
kvm_unmap_hva_range(struct kvm * kvm,unsigned long start,unsigned long end)812 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
813 {
814 return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
815 }
816
kvm_age_hva(struct kvm * kvm,unsigned long start,unsigned long end)817 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
818 {
819 return kvm->arch.kvm_ops->age_hva(kvm, start, end);
820 }
821
kvm_test_age_hva(struct kvm * kvm,unsigned long hva)822 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
823 {
824 return kvm->arch.kvm_ops->test_age_hva(kvm, hva);
825 }
826
kvm_set_spte_hva(struct kvm * kvm,unsigned long hva,pte_t pte)827 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
828 {
829 kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte);
830 }
831
kvmppc_mmu_destroy(struct kvm_vcpu * vcpu)832 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
833 {
834 vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
835 }
836
kvmppc_core_init_vm(struct kvm * kvm)837 int kvmppc_core_init_vm(struct kvm *kvm)
838 {
839
840 #ifdef CONFIG_PPC64
841 INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables);
842 INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
843 #endif
844
845 return kvm->arch.kvm_ops->init_vm(kvm);
846 }
847
kvmppc_core_destroy_vm(struct kvm * kvm)848 void kvmppc_core_destroy_vm(struct kvm *kvm)
849 {
850 kvm->arch.kvm_ops->destroy_vm(kvm);
851
852 #ifdef CONFIG_PPC64
853 kvmppc_rtas_tokens_free(kvm);
854 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
855 #endif
856 }
857
kvmppc_h_logical_ci_load(struct kvm_vcpu * vcpu)858 int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu)
859 {
860 unsigned long size = kvmppc_get_gpr(vcpu, 4);
861 unsigned long addr = kvmppc_get_gpr(vcpu, 5);
862 u64 buf;
863 int srcu_idx;
864 int ret;
865
866 if (!is_power_of_2(size) || (size > sizeof(buf)))
867 return H_TOO_HARD;
868
869 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
870 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf);
871 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
872 if (ret != 0)
873 return H_TOO_HARD;
874
875 switch (size) {
876 case 1:
877 kvmppc_set_gpr(vcpu, 4, *(u8 *)&buf);
878 break;
879
880 case 2:
881 kvmppc_set_gpr(vcpu, 4, be16_to_cpu(*(__be16 *)&buf));
882 break;
883
884 case 4:
885 kvmppc_set_gpr(vcpu, 4, be32_to_cpu(*(__be32 *)&buf));
886 break;
887
888 case 8:
889 kvmppc_set_gpr(vcpu, 4, be64_to_cpu(*(__be64 *)&buf));
890 break;
891
892 default:
893 BUG();
894 }
895
896 return H_SUCCESS;
897 }
898 EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_load);
899
kvmppc_h_logical_ci_store(struct kvm_vcpu * vcpu)900 int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
901 {
902 unsigned long size = kvmppc_get_gpr(vcpu, 4);
903 unsigned long addr = kvmppc_get_gpr(vcpu, 5);
904 unsigned long val = kvmppc_get_gpr(vcpu, 6);
905 u64 buf;
906 int srcu_idx;
907 int ret;
908
909 switch (size) {
910 case 1:
911 *(u8 *)&buf = val;
912 break;
913
914 case 2:
915 *(__be16 *)&buf = cpu_to_be16(val);
916 break;
917
918 case 4:
919 *(__be32 *)&buf = cpu_to_be32(val);
920 break;
921
922 case 8:
923 *(__be64 *)&buf = cpu_to_be64(val);
924 break;
925
926 default:
927 return H_TOO_HARD;
928 }
929
930 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
931 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf);
932 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
933 if (ret != 0)
934 return H_TOO_HARD;
935
936 return H_SUCCESS;
937 }
938 EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_store);
939
kvmppc_core_check_processor_compat(void)940 int kvmppc_core_check_processor_compat(void)
941 {
942 /*
943 * We always return 0 for book3s. We check
944 * for compatibility while loading the HV
945 * or PR module
946 */
947 return 0;
948 }
949
kvmppc_book3s_hcall_implemented(struct kvm * kvm,unsigned long hcall)950 int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hcall)
951 {
952 return kvm->arch.kvm_ops->hcall_implemented(hcall);
953 }
954
955 #ifdef CONFIG_KVM_XICS
kvm_set_irq(struct kvm * kvm,int irq_source_id,u32 irq,int level,bool line_status)956 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
957 bool line_status)
958 {
959 if (xive_enabled())
960 return kvmppc_xive_set_irq(kvm, irq_source_id, irq, level,
961 line_status);
962 else
963 return kvmppc_xics_set_irq(kvm, irq_source_id, irq, level,
964 line_status);
965 }
966
kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry * irq_entry,struct kvm * kvm,int irq_source_id,int level,bool line_status)967 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *irq_entry,
968 struct kvm *kvm, int irq_source_id,
969 int level, bool line_status)
970 {
971 return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi,
972 level, line_status);
973 }
kvmppc_book3s_set_irq(struct kvm_kernel_irq_routing_entry * e,struct kvm * kvm,int irq_source_id,int level,bool line_status)974 static int kvmppc_book3s_set_irq(struct kvm_kernel_irq_routing_entry *e,
975 struct kvm *kvm, int irq_source_id, int level,
976 bool line_status)
977 {
978 return kvm_set_irq(kvm, irq_source_id, e->gsi, level, line_status);
979 }
980
kvm_irq_map_gsi(struct kvm * kvm,struct kvm_kernel_irq_routing_entry * entries,int gsi)981 int kvm_irq_map_gsi(struct kvm *kvm,
982 struct kvm_kernel_irq_routing_entry *entries, int gsi)
983 {
984 entries->gsi = gsi;
985 entries->type = KVM_IRQ_ROUTING_IRQCHIP;
986 entries->set = kvmppc_book3s_set_irq;
987 entries->irqchip.irqchip = 0;
988 entries->irqchip.pin = gsi;
989 return 1;
990 }
991
kvm_irq_map_chip_pin(struct kvm * kvm,unsigned irqchip,unsigned pin)992 int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
993 {
994 return pin;
995 }
996
997 #endif /* CONFIG_KVM_XICS */
998
kvmppc_book3s_init(void)999 static int kvmppc_book3s_init(void)
1000 {
1001 int r;
1002
1003 r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1004 if (r)
1005 return r;
1006 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1007 r = kvmppc_book3s_init_pr();
1008 #endif
1009
1010 #ifdef CONFIG_KVM_XICS
1011 #ifdef CONFIG_KVM_XIVE
1012 if (xive_enabled()) {
1013 kvmppc_xive_init_module();
1014 kvm_register_device_ops(&kvm_xive_ops, KVM_DEV_TYPE_XICS);
1015 } else
1016 #endif
1017 kvm_register_device_ops(&kvm_xics_ops, KVM_DEV_TYPE_XICS);
1018 #endif
1019 return r;
1020 }
1021
kvmppc_book3s_exit(void)1022 static void kvmppc_book3s_exit(void)
1023 {
1024 #ifdef CONFIG_KVM_XICS
1025 if (xive_enabled())
1026 kvmppc_xive_exit_module();
1027 #endif
1028 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1029 kvmppc_book3s_exit_pr();
1030 #endif
1031 kvm_exit();
1032 }
1033
1034 module_init(kvmppc_book3s_init);
1035 module_exit(kvmppc_book3s_exit);
1036
1037 /* On 32bit this is our one and only kernel module */
1038 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1039 MODULE_ALIAS_MISCDEV(KVM_MINOR);
1040 MODULE_ALIAS("devname:kvm");
1041 #endif
1042