1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * 4 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 5 * 6 * Derived from book3s_rmhandlers.S and other files, which are: 7 * 8 * Copyright SUSE Linux Products GmbH 2009 9 * 10 * Authors: Alexander Graf <agraf@suse.de> 11 */ 12 13#include <asm/ppc_asm.h> 14#include <asm/code-patching-asm.h> 15#include <asm/kvm_asm.h> 16#include <asm/reg.h> 17#include <asm/mmu.h> 18#include <asm/page.h> 19#include <asm/ptrace.h> 20#include <asm/hvcall.h> 21#include <asm/asm-offsets.h> 22#include <asm/exception-64s.h> 23#include <asm/kvm_book3s_asm.h> 24#include <asm/book3s/64/mmu-hash.h> 25#include <asm/export.h> 26#include <asm/tm.h> 27#include <asm/opal.h> 28#include <asm/xive-regs.h> 29#include <asm/thread_info.h> 30#include <asm/asm-compat.h> 31#include <asm/feature-fixups.h> 32#include <asm/cpuidle.h> 33#include <asm/ultravisor-api.h> 34 35/* Sign-extend HDEC if not on POWER9 */ 36#define EXTEND_HDEC(reg) \ 37BEGIN_FTR_SECTION; \ 38 extsw reg, reg; \ 39END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 40 41/* Values in HSTATE_NAPPING(r13) */ 42#define NAPPING_CEDE 1 43#define NAPPING_NOVCPU 2 44#define NAPPING_UNSPLIT 3 45 46/* Stack frame offsets for kvmppc_hv_entry */ 47#define SFS 208 48#define STACK_SLOT_TRAP (SFS-4) 49#define STACK_SLOT_SHORT_PATH (SFS-8) 50#define STACK_SLOT_TID (SFS-16) 51#define STACK_SLOT_PSSCR (SFS-24) 52#define STACK_SLOT_PID (SFS-32) 53#define STACK_SLOT_IAMR (SFS-40) 54#define STACK_SLOT_CIABR (SFS-48) 55#define STACK_SLOT_DAWR (SFS-56) 56#define STACK_SLOT_DAWRX (SFS-64) 57#define STACK_SLOT_HFSCR (SFS-72) 58#define STACK_SLOT_AMR (SFS-80) 59#define STACK_SLOT_UAMOR (SFS-88) 60/* the following is used by the P9 short path */ 61#define STACK_SLOT_NVGPRS (SFS-152) /* 18 gprs */ 62 63/* 64 * Call kvmppc_hv_entry in real mode. 65 * Must be called with interrupts hard-disabled. 66 * 67 * Input Registers: 68 * 69 * LR = return address to continue at after eventually re-enabling MMU 70 */ 71_GLOBAL_TOC(kvmppc_hv_entry_trampoline) 72 mflr r0 73 std r0, PPC_LR_STKOFF(r1) 74 stdu r1, -112(r1) 75 mfmsr r10 76 std r10, HSTATE_HOST_MSR(r13) 77 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry) 78 li r0,MSR_RI 79 andc r0,r10,r0 80 li r6,MSR_IR | MSR_DR 81 andc r6,r10,r6 82 mtmsrd r0,1 /* clear RI in MSR */ 83 mtsrr0 r5 84 mtsrr1 r6 85 RFI_TO_KERNEL 86 87kvmppc_call_hv_entry: 88BEGIN_FTR_SECTION 89 /* On P9, do LPCR setting, if necessary */ 90 ld r3, HSTATE_SPLIT_MODE(r13) 91 cmpdi r3, 0 92 beq 46f 93 lwz r4, KVM_SPLIT_DO_SET(r3) 94 cmpwi r4, 0 95 beq 46f 96 bl kvmhv_p9_set_lpcr 97 nop 9846: 99END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 100 101 ld r4, HSTATE_KVM_VCPU(r13) 102 bl kvmppc_hv_entry 103 104 /* Back from guest - restore host state and return to caller */ 105 106BEGIN_FTR_SECTION 107 /* Restore host DABR and DABRX */ 108 ld r5,HSTATE_DABR(r13) 109 li r6,7 110 mtspr SPRN_DABR,r5 111 mtspr SPRN_DABRX,r6 112END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 113 114 /* Restore SPRG3 */ 115 ld r3,PACA_SPRG_VDSO(r13) 116 mtspr SPRN_SPRG_VDSO_WRITE,r3 117 118 /* Reload the host's PMU registers */ 119 bl kvmhv_load_host_pmu 120 121 /* 122 * Reload DEC. HDEC interrupts were disabled when 123 * we reloaded the host's LPCR value. 124 */ 125 ld r3, HSTATE_DECEXP(r13) 126 mftb r4 127 subf r4, r4, r3 128 mtspr SPRN_DEC, r4 129 130 /* hwthread_req may have got set by cede or no vcpu, so clear it */ 131 li r0, 0 132 stb r0, HSTATE_HWTHREAD_REQ(r13) 133 134 /* 135 * For external interrupts we need to call the Linux 136 * handler to process the interrupt. We do that by jumping 137 * to absolute address 0x500 for external interrupts. 138 * The [h]rfid at the end of the handler will return to 139 * the book3s_hv_interrupts.S code. For other interrupts 140 * we do the rfid to get back to the book3s_hv_interrupts.S 141 * code here. 142 */ 143 ld r8, 112+PPC_LR_STKOFF(r1) 144 addi r1, r1, 112 145 ld r7, HSTATE_HOST_MSR(r13) 146 147 /* Return the trap number on this thread as the return value */ 148 mr r3, r12 149 150 /* 151 * If we came back from the guest via a relocation-on interrupt, 152 * we will be in virtual mode at this point, which makes it a 153 * little easier to get back to the caller. 154 */ 155 mfmsr r0 156 andi. r0, r0, MSR_IR /* in real mode? */ 157 bne .Lvirt_return 158 159 /* RFI into the highmem handler */ 160 mfmsr r6 161 li r0, MSR_RI 162 andc r6, r6, r0 163 mtmsrd r6, 1 /* Clear RI in MSR */ 164 mtsrr0 r8 165 mtsrr1 r7 166 RFI_TO_KERNEL 167 168 /* Virtual-mode return */ 169.Lvirt_return: 170 mtlr r8 171 blr 172 173kvmppc_primary_no_guest: 174 /* We handle this much like a ceded vcpu */ 175 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */ 176 /* HDEC may be larger than DEC for arch >= v3.00, but since the */ 177 /* HDEC value came from DEC in the first place, it will fit */ 178 mfspr r3, SPRN_HDEC 179 mtspr SPRN_DEC, r3 180 /* 181 * Make sure the primary has finished the MMU switch. 182 * We should never get here on a secondary thread, but 183 * check it for robustness' sake. 184 */ 185 ld r5, HSTATE_KVM_VCORE(r13) 18665: lbz r0, VCORE_IN_GUEST(r5) 187 cmpwi r0, 0 188 beq 65b 189 /* Set LPCR. */ 190 ld r8,VCORE_LPCR(r5) 191 mtspr SPRN_LPCR,r8 192 isync 193 /* set our bit in napping_threads */ 194 ld r5, HSTATE_KVM_VCORE(r13) 195 lbz r7, HSTATE_PTID(r13) 196 li r0, 1 197 sld r0, r0, r7 198 addi r6, r5, VCORE_NAPPING_THREADS 1991: lwarx r3, 0, r6 200 or r3, r3, r0 201 stwcx. r3, 0, r6 202 bne 1b 203 /* order napping_threads update vs testing entry_exit_map */ 204 isync 205 li r12, 0 206 lwz r7, VCORE_ENTRY_EXIT(r5) 207 cmpwi r7, 0x100 208 bge kvm_novcpu_exit /* another thread already exiting */ 209 li r3, NAPPING_NOVCPU 210 stb r3, HSTATE_NAPPING(r13) 211 212 li r3, 0 /* Don't wake on privileged (OS) doorbell */ 213 b kvm_do_nap 214 215/* 216 * kvm_novcpu_wakeup 217 * Entered from kvm_start_guest if kvm_hstate.napping is set 218 * to NAPPING_NOVCPU 219 * r2 = kernel TOC 220 * r13 = paca 221 */ 222kvm_novcpu_wakeup: 223 ld r1, HSTATE_HOST_R1(r13) 224 ld r5, HSTATE_KVM_VCORE(r13) 225 li r0, 0 226 stb r0, HSTATE_NAPPING(r13) 227 228 /* check the wake reason */ 229 bl kvmppc_check_wake_reason 230 231 /* 232 * Restore volatile registers since we could have called 233 * a C routine in kvmppc_check_wake_reason. 234 * r5 = VCORE 235 */ 236 ld r5, HSTATE_KVM_VCORE(r13) 237 238 /* see if any other thread is already exiting */ 239 lwz r0, VCORE_ENTRY_EXIT(r5) 240 cmpwi r0, 0x100 241 bge kvm_novcpu_exit 242 243 /* clear our bit in napping_threads */ 244 lbz r7, HSTATE_PTID(r13) 245 li r0, 1 246 sld r0, r0, r7 247 addi r6, r5, VCORE_NAPPING_THREADS 2484: lwarx r7, 0, r6 249 andc r7, r7, r0 250 stwcx. r7, 0, r6 251 bne 4b 252 253 /* See if the wake reason means we need to exit */ 254 cmpdi r3, 0 255 bge kvm_novcpu_exit 256 257 /* See if our timeslice has expired (HDEC is negative) */ 258 mfspr r0, SPRN_HDEC 259 EXTEND_HDEC(r0) 260 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER 261 cmpdi r0, 0 262 blt kvm_novcpu_exit 263 264 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */ 265 ld r4, HSTATE_KVM_VCPU(r13) 266 cmpdi r4, 0 267 beq kvmppc_primary_no_guest 268 269#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 270 addi r3, r4, VCPU_TB_RMENTRY 271 bl kvmhv_start_timing 272#endif 273 b kvmppc_got_guest 274 275kvm_novcpu_exit: 276#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 277 ld r4, HSTATE_KVM_VCPU(r13) 278 cmpdi r4, 0 279 beq 13f 280 addi r3, r4, VCPU_TB_RMEXIT 281 bl kvmhv_accumulate_time 282#endif 28313: mr r3, r12 284 stw r12, STACK_SLOT_TRAP(r1) 285 bl kvmhv_commence_exit 286 nop 287 b kvmhv_switch_to_host 288 289/* 290 * We come in here when wakened from Linux offline idle code. 291 * Relocation is off 292 * r3 contains the SRR1 wakeup value, SRR1 is trashed. 293 */ 294_GLOBAL(idle_kvm_start_guest) 295 ld r4,PACAEMERGSP(r13) 296 mfcr r5 297 mflr r0 298 std r1,0(r4) 299 std r5,8(r4) 300 std r0,16(r4) 301 subi r1,r4,STACK_FRAME_OVERHEAD 302 SAVE_NVGPRS(r1) 303 304 /* 305 * Could avoid this and pass it through in r3. For now, 306 * code expects it to be in SRR1. 307 */ 308 mtspr SPRN_SRR1,r3 309 310 li r0,0 311 stb r0,PACA_FTRACE_ENABLED(r13) 312 313 li r0,KVM_HWTHREAD_IN_KVM 314 stb r0,HSTATE_HWTHREAD_STATE(r13) 315 316 /* kvm cede / napping does not come through here */ 317 lbz r0,HSTATE_NAPPING(r13) 318 twnei r0,0 319 320 b 1f 321 322kvm_unsplit_wakeup: 323 li r0, 0 324 stb r0, HSTATE_NAPPING(r13) 325 3261: 327 328 /* 329 * We weren't napping due to cede, so this must be a secondary 330 * thread being woken up to run a guest, or being woken up due 331 * to a stray IPI. (Or due to some machine check or hypervisor 332 * maintenance interrupt while the core is in KVM.) 333 */ 334 335 /* Check the wake reason in SRR1 to see why we got here */ 336 bl kvmppc_check_wake_reason 337 /* 338 * kvmppc_check_wake_reason could invoke a C routine, but we 339 * have no volatile registers to restore when we return. 340 */ 341 342 cmpdi r3, 0 343 bge kvm_no_guest 344 345 /* get vcore pointer, NULL if we have nothing to run */ 346 ld r5,HSTATE_KVM_VCORE(r13) 347 cmpdi r5,0 348 /* if we have no vcore to run, go back to sleep */ 349 beq kvm_no_guest 350 351kvm_secondary_got_guest: 352 353 /* Set HSTATE_DSCR(r13) to something sensible */ 354 ld r6, PACA_DSCR_DEFAULT(r13) 355 std r6, HSTATE_DSCR(r13) 356 357 /* On thread 0 of a subcore, set HDEC to max */ 358 lbz r4, HSTATE_PTID(r13) 359 cmpwi r4, 0 360 bne 63f 361 LOAD_REG_ADDR(r6, decrementer_max) 362 ld r6, 0(r6) 363 mtspr SPRN_HDEC, r6 364 /* and set per-LPAR registers, if doing dynamic micro-threading */ 365 ld r6, HSTATE_SPLIT_MODE(r13) 366 cmpdi r6, 0 367 beq 63f 368BEGIN_FTR_SECTION 369 ld r0, KVM_SPLIT_RPR(r6) 370 mtspr SPRN_RPR, r0 371 ld r0, KVM_SPLIT_PMMAR(r6) 372 mtspr SPRN_PMMAR, r0 373 ld r0, KVM_SPLIT_LDBAR(r6) 374 mtspr SPRN_LDBAR, r0 375 isync 376FTR_SECTION_ELSE 377 /* On P9 we use the split_info for coordinating LPCR changes */ 378 lwz r4, KVM_SPLIT_DO_SET(r6) 379 cmpwi r4, 0 380 beq 1f 381 mr r3, r6 382 bl kvmhv_p9_set_lpcr 383 nop 3841: 385ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) 38663: 387 /* Order load of vcpu after load of vcore */ 388 lwsync 389 ld r4, HSTATE_KVM_VCPU(r13) 390 bl kvmppc_hv_entry 391 392 /* Back from the guest, go back to nap */ 393 /* Clear our vcpu and vcore pointers so we don't come back in early */ 394 li r0, 0 395 std r0, HSTATE_KVM_VCPU(r13) 396 /* 397 * Once we clear HSTATE_KVM_VCORE(r13), the code in 398 * kvmppc_run_core() is going to assume that all our vcpu 399 * state is visible in memory. This lwsync makes sure 400 * that that is true. 401 */ 402 lwsync 403 std r0, HSTATE_KVM_VCORE(r13) 404 405 /* 406 * All secondaries exiting guest will fall through this path. 407 * Before proceeding, just check for HMI interrupt and 408 * invoke opal hmi handler. By now we are sure that the 409 * primary thread on this core/subcore has already made partition 410 * switch/TB resync and we are good to call opal hmi handler. 411 */ 412 cmpwi r12, BOOK3S_INTERRUPT_HMI 413 bne kvm_no_guest 414 415 li r3,0 /* NULL argument */ 416 bl hmi_exception_realmode 417/* 418 * At this point we have finished executing in the guest. 419 * We need to wait for hwthread_req to become zero, since 420 * we may not turn on the MMU while hwthread_req is non-zero. 421 * While waiting we also need to check if we get given a vcpu to run. 422 */ 423kvm_no_guest: 424 lbz r3, HSTATE_HWTHREAD_REQ(r13) 425 cmpwi r3, 0 426 bne 53f 427 HMT_MEDIUM 428 li r0, KVM_HWTHREAD_IN_KERNEL 429 stb r0, HSTATE_HWTHREAD_STATE(r13) 430 /* need to recheck hwthread_req after a barrier, to avoid race */ 431 sync 432 lbz r3, HSTATE_HWTHREAD_REQ(r13) 433 cmpwi r3, 0 434 bne 54f 435 436 /* 437 * Jump to idle_return_gpr_loss, which returns to the 438 * idle_kvm_start_guest caller. 439 */ 440 li r3, LPCR_PECE0 441 mfspr r4, SPRN_LPCR 442 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 443 mtspr SPRN_LPCR, r4 444 /* set up r3 for return */ 445 mfspr r3,SPRN_SRR1 446 REST_NVGPRS(r1) 447 addi r1, r1, STACK_FRAME_OVERHEAD 448 ld r0, 16(r1) 449 ld r5, 8(r1) 450 ld r1, 0(r1) 451 mtlr r0 452 mtcr r5 453 blr 454 45553: HMT_LOW 456 ld r5, HSTATE_KVM_VCORE(r13) 457 cmpdi r5, 0 458 bne 60f 459 ld r3, HSTATE_SPLIT_MODE(r13) 460 cmpdi r3, 0 461 beq kvm_no_guest 462 lwz r0, KVM_SPLIT_DO_SET(r3) 463 cmpwi r0, 0 464 bne kvmhv_do_set 465 lwz r0, KVM_SPLIT_DO_RESTORE(r3) 466 cmpwi r0, 0 467 bne kvmhv_do_restore 468 lbz r0, KVM_SPLIT_DO_NAP(r3) 469 cmpwi r0, 0 470 beq kvm_no_guest 471 HMT_MEDIUM 472 b kvm_unsplit_nap 47360: HMT_MEDIUM 474 b kvm_secondary_got_guest 475 47654: li r0, KVM_HWTHREAD_IN_KVM 477 stb r0, HSTATE_HWTHREAD_STATE(r13) 478 b kvm_no_guest 479 480kvmhv_do_set: 481 /* Set LPCR, LPIDR etc. on P9 */ 482 HMT_MEDIUM 483 bl kvmhv_p9_set_lpcr 484 nop 485 b kvm_no_guest 486 487kvmhv_do_restore: 488 HMT_MEDIUM 489 bl kvmhv_p9_restore_lpcr 490 nop 491 b kvm_no_guest 492 493/* 494 * Here the primary thread is trying to return the core to 495 * whole-core mode, so we need to nap. 496 */ 497kvm_unsplit_nap: 498 /* 499 * When secondaries are napping in kvm_unsplit_nap() with 500 * hwthread_req = 1, HMI goes ignored even though subcores are 501 * already exited the guest. Hence HMI keeps waking up secondaries 502 * from nap in a loop and secondaries always go back to nap since 503 * no vcore is assigned to them. This makes impossible for primary 504 * thread to get hold of secondary threads resulting into a soft 505 * lockup in KVM path. 506 * 507 * Let us check if HMI is pending and handle it before we go to nap. 508 */ 509 cmpwi r12, BOOK3S_INTERRUPT_HMI 510 bne 55f 511 li r3, 0 /* NULL argument */ 512 bl hmi_exception_realmode 51355: 514 /* 515 * Ensure that secondary doesn't nap when it has 516 * its vcore pointer set. 517 */ 518 sync /* matches smp_mb() before setting split_info.do_nap */ 519 ld r0, HSTATE_KVM_VCORE(r13) 520 cmpdi r0, 0 521 bne kvm_no_guest 522 /* clear any pending message */ 523BEGIN_FTR_SECTION 524 lis r6, (PPC_DBELL_SERVER << (63-36))@h 525 PPC_MSGCLR(6) 526END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 527 /* Set kvm_split_mode.napped[tid] = 1 */ 528 ld r3, HSTATE_SPLIT_MODE(r13) 529 li r0, 1 530 lbz r4, HSTATE_TID(r13) 531 addi r4, r4, KVM_SPLIT_NAPPED 532 stbx r0, r3, r4 533 /* Check the do_nap flag again after setting napped[] */ 534 sync 535 lbz r0, KVM_SPLIT_DO_NAP(r3) 536 cmpwi r0, 0 537 beq 57f 538 li r3, NAPPING_UNSPLIT 539 stb r3, HSTATE_NAPPING(r13) 540 li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4 541 mfspr r5, SPRN_LPCR 542 rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1) 543 b kvm_nap_sequence 544 54557: li r0, 0 546 stbx r0, r3, r4 547 b kvm_no_guest 548 549/****************************************************************************** 550 * * 551 * Entry code * 552 * * 553 *****************************************************************************/ 554 555.global kvmppc_hv_entry 556kvmppc_hv_entry: 557 558 /* Required state: 559 * 560 * R4 = vcpu pointer (or NULL) 561 * MSR = ~IR|DR 562 * R13 = PACA 563 * R1 = host R1 564 * R2 = TOC 565 * all other volatile GPRS = free 566 * Does not preserve non-volatile GPRs or CR fields 567 */ 568 mflr r0 569 std r0, PPC_LR_STKOFF(r1) 570 stdu r1, -SFS(r1) 571 572 /* Save R1 in the PACA */ 573 std r1, HSTATE_HOST_R1(r13) 574 575 li r6, KVM_GUEST_MODE_HOST_HV 576 stb r6, HSTATE_IN_GUEST(r13) 577 578#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 579 /* Store initial timestamp */ 580 cmpdi r4, 0 581 beq 1f 582 addi r3, r4, VCPU_TB_RMENTRY 583 bl kvmhv_start_timing 5841: 585#endif 586 587 ld r5, HSTATE_KVM_VCORE(r13) 588 ld r9, VCORE_KVM(r5) /* pointer to struct kvm */ 589 590 /* 591 * POWER7/POWER8 host -> guest partition switch code. 592 * We don't have to lock against concurrent tlbies, 593 * but we do have to coordinate across hardware threads. 594 */ 595 /* Set bit in entry map iff exit map is zero. */ 596 li r7, 1 597 lbz r6, HSTATE_PTID(r13) 598 sld r7, r7, r6 599 addi r8, r5, VCORE_ENTRY_EXIT 60021: lwarx r3, 0, r8 601 cmpwi r3, 0x100 /* any threads starting to exit? */ 602 bge secondary_too_late /* if so we're too late to the party */ 603 or r3, r3, r7 604 stwcx. r3, 0, r8 605 bne 21b 606 607 /* Primary thread switches to guest partition. */ 608 cmpwi r6,0 609 bne 10f 610 611 lwz r7,KVM_LPID(r9) 612BEGIN_FTR_SECTION 613 ld r6,KVM_SDR1(r9) 614 li r0,LPID_RSVD /* switch to reserved LPID */ 615 mtspr SPRN_LPID,r0 616 ptesync 617 mtspr SPRN_SDR1,r6 /* switch to partition page table */ 618END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 619 mtspr SPRN_LPID,r7 620 isync 621 622 /* See if we need to flush the TLB. */ 623 mr r3, r9 /* kvm pointer */ 624 lhz r4, PACAPACAINDEX(r13) /* physical cpu number */ 625 li r5, 0 /* nested vcpu pointer */ 626 bl kvmppc_check_need_tlb_flush 627 nop 628 ld r5, HSTATE_KVM_VCORE(r13) 629 630 /* Add timebase offset onto timebase */ 63122: ld r8,VCORE_TB_OFFSET(r5) 632 cmpdi r8,0 633 beq 37f 634 std r8, VCORE_TB_OFFSET_APPL(r5) 635 mftb r6 /* current host timebase */ 636 add r8,r8,r6 637 mtspr SPRN_TBU40,r8 /* update upper 40 bits */ 638 mftb r7 /* check if lower 24 bits overflowed */ 639 clrldi r6,r6,40 640 clrldi r7,r7,40 641 cmpld r7,r6 642 bge 37f 643 addis r8,r8,0x100 /* if so, increment upper 40 bits */ 644 mtspr SPRN_TBU40,r8 645 646 /* Load guest PCR value to select appropriate compat mode */ 64737: ld r7, VCORE_PCR(r5) 648 LOAD_REG_IMMEDIATE(r6, PCR_MASK) 649 cmpld r7, r6 650 beq 38f 651 or r7, r7, r6 652 mtspr SPRN_PCR, r7 65338: 654 655BEGIN_FTR_SECTION 656 /* DPDES and VTB are shared between threads */ 657 ld r8, VCORE_DPDES(r5) 658 ld r7, VCORE_VTB(r5) 659 mtspr SPRN_DPDES, r8 660 mtspr SPRN_VTB, r7 661END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 662 663 /* Mark the subcore state as inside guest */ 664 bl kvmppc_subcore_enter_guest 665 nop 666 ld r5, HSTATE_KVM_VCORE(r13) 667 ld r4, HSTATE_KVM_VCPU(r13) 668 li r0,1 669 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */ 670 671 /* Do we have a guest vcpu to run? */ 67210: cmpdi r4, 0 673 beq kvmppc_primary_no_guest 674kvmppc_got_guest: 675 /* Increment yield count if they have a VPA */ 676 ld r3, VCPU_VPA(r4) 677 cmpdi r3, 0 678 beq 25f 679 li r6, LPPACA_YIELDCOUNT 680 LWZX_BE r5, r3, r6 681 addi r5, r5, 1 682 STWX_BE r5, r3, r6 683 li r6, 1 684 stb r6, VCPU_VPA_DIRTY(r4) 68525: 686 687 /* Save purr/spurr */ 688 mfspr r5,SPRN_PURR 689 mfspr r6,SPRN_SPURR 690 std r5,HSTATE_PURR(r13) 691 std r6,HSTATE_SPURR(r13) 692 ld r7,VCPU_PURR(r4) 693 ld r8,VCPU_SPURR(r4) 694 mtspr SPRN_PURR,r7 695 mtspr SPRN_SPURR,r8 696 697 /* Save host values of some registers */ 698BEGIN_FTR_SECTION 699 mfspr r5, SPRN_TIDR 700 mfspr r6, SPRN_PSSCR 701 mfspr r7, SPRN_PID 702 std r5, STACK_SLOT_TID(r1) 703 std r6, STACK_SLOT_PSSCR(r1) 704 std r7, STACK_SLOT_PID(r1) 705 mfspr r5, SPRN_HFSCR 706 std r5, STACK_SLOT_HFSCR(r1) 707END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 708BEGIN_FTR_SECTION 709 mfspr r5, SPRN_CIABR 710 mfspr r6, SPRN_DAWR0 711 mfspr r7, SPRN_DAWRX0 712 mfspr r8, SPRN_IAMR 713 std r5, STACK_SLOT_CIABR(r1) 714 std r6, STACK_SLOT_DAWR(r1) 715 std r7, STACK_SLOT_DAWRX(r1) 716 std r8, STACK_SLOT_IAMR(r1) 717END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 718 719 mfspr r5, SPRN_AMR 720 std r5, STACK_SLOT_AMR(r1) 721 mfspr r6, SPRN_UAMOR 722 std r6, STACK_SLOT_UAMOR(r1) 723 724BEGIN_FTR_SECTION 725 /* Set partition DABR */ 726 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ 727 lwz r5,VCPU_DABRX(r4) 728 ld r6,VCPU_DABR(r4) 729 mtspr SPRN_DABRX,r5 730 mtspr SPRN_DABR,r6 731 isync 732END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 733 734#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 735/* 736 * Branch around the call if both CPU_FTR_TM and 737 * CPU_FTR_P9_TM_HV_ASSIST are off. 738 */ 739BEGIN_FTR_SECTION 740 b 91f 741END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) 742 /* 743 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) 744 */ 745 mr r3, r4 746 ld r4, VCPU_MSR(r3) 747 li r5, 0 /* don't preserve non-vol regs */ 748 bl kvmppc_restore_tm_hv 749 nop 750 ld r4, HSTATE_KVM_VCPU(r13) 75191: 752#endif 753 754 /* Load guest PMU registers; r4 = vcpu pointer here */ 755 mr r3, r4 756 bl kvmhv_load_guest_pmu 757 758 /* Load up FP, VMX and VSX registers */ 759 ld r4, HSTATE_KVM_VCPU(r13) 760 bl kvmppc_load_fp 761 762 ld r14, VCPU_GPR(R14)(r4) 763 ld r15, VCPU_GPR(R15)(r4) 764 ld r16, VCPU_GPR(R16)(r4) 765 ld r17, VCPU_GPR(R17)(r4) 766 ld r18, VCPU_GPR(R18)(r4) 767 ld r19, VCPU_GPR(R19)(r4) 768 ld r20, VCPU_GPR(R20)(r4) 769 ld r21, VCPU_GPR(R21)(r4) 770 ld r22, VCPU_GPR(R22)(r4) 771 ld r23, VCPU_GPR(R23)(r4) 772 ld r24, VCPU_GPR(R24)(r4) 773 ld r25, VCPU_GPR(R25)(r4) 774 ld r26, VCPU_GPR(R26)(r4) 775 ld r27, VCPU_GPR(R27)(r4) 776 ld r28, VCPU_GPR(R28)(r4) 777 ld r29, VCPU_GPR(R29)(r4) 778 ld r30, VCPU_GPR(R30)(r4) 779 ld r31, VCPU_GPR(R31)(r4) 780 781 /* Switch DSCR to guest value */ 782 ld r5, VCPU_DSCR(r4) 783 mtspr SPRN_DSCR, r5 784 785BEGIN_FTR_SECTION 786 /* Skip next section on POWER7 */ 787 b 8f 788END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 789 /* Load up POWER8-specific registers */ 790 ld r5, VCPU_IAMR(r4) 791 lwz r6, VCPU_PSPB(r4) 792 ld r7, VCPU_FSCR(r4) 793 mtspr SPRN_IAMR, r5 794 mtspr SPRN_PSPB, r6 795 mtspr SPRN_FSCR, r7 796 /* 797 * Handle broken DAWR case by not writing it. This means we 798 * can still store the DAWR register for migration. 799 */ 800 LOAD_REG_ADDR(r5, dawr_force_enable) 801 lbz r5, 0(r5) 802 cmpdi r5, 0 803 beq 1f 804 ld r5, VCPU_DAWR(r4) 805 ld r6, VCPU_DAWRX(r4) 806 mtspr SPRN_DAWR0, r5 807 mtspr SPRN_DAWRX0, r6 8081: 809 ld r7, VCPU_CIABR(r4) 810 ld r8, VCPU_TAR(r4) 811 mtspr SPRN_CIABR, r7 812 mtspr SPRN_TAR, r8 813 ld r5, VCPU_IC(r4) 814 ld r8, VCPU_EBBHR(r4) 815 mtspr SPRN_IC, r5 816 mtspr SPRN_EBBHR, r8 817 ld r5, VCPU_EBBRR(r4) 818 ld r6, VCPU_BESCR(r4) 819 lwz r7, VCPU_GUEST_PID(r4) 820 ld r8, VCPU_WORT(r4) 821 mtspr SPRN_EBBRR, r5 822 mtspr SPRN_BESCR, r6 823 mtspr SPRN_PID, r7 824 mtspr SPRN_WORT, r8 825BEGIN_FTR_SECTION 826 /* POWER8-only registers */ 827 ld r5, VCPU_TCSCR(r4) 828 ld r6, VCPU_ACOP(r4) 829 ld r7, VCPU_CSIGR(r4) 830 ld r8, VCPU_TACR(r4) 831 mtspr SPRN_TCSCR, r5 832 mtspr SPRN_ACOP, r6 833 mtspr SPRN_CSIGR, r7 834 mtspr SPRN_TACR, r8 835 nop 836FTR_SECTION_ELSE 837 /* POWER9-only registers */ 838 ld r5, VCPU_TID(r4) 839 ld r6, VCPU_PSSCR(r4) 840 lbz r8, HSTATE_FAKE_SUSPEND(r13) 841 oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */ 842 rldimi r6, r8, PSSCR_FAKE_SUSPEND_LG, 63 - PSSCR_FAKE_SUSPEND_LG 843 ld r7, VCPU_HFSCR(r4) 844 mtspr SPRN_TIDR, r5 845 mtspr SPRN_PSSCR, r6 846 mtspr SPRN_HFSCR, r7 847ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) 8488: 849 850 ld r5, VCPU_SPRG0(r4) 851 ld r6, VCPU_SPRG1(r4) 852 ld r7, VCPU_SPRG2(r4) 853 ld r8, VCPU_SPRG3(r4) 854 mtspr SPRN_SPRG0, r5 855 mtspr SPRN_SPRG1, r6 856 mtspr SPRN_SPRG2, r7 857 mtspr SPRN_SPRG3, r8 858 859 /* Load up DAR and DSISR */ 860 ld r5, VCPU_DAR(r4) 861 lwz r6, VCPU_DSISR(r4) 862 mtspr SPRN_DAR, r5 863 mtspr SPRN_DSISR, r6 864 865 /* Restore AMR and UAMOR, set AMOR to all 1s */ 866 ld r5,VCPU_AMR(r4) 867 ld r6,VCPU_UAMOR(r4) 868 li r7,-1 869 mtspr SPRN_AMR,r5 870 mtspr SPRN_UAMOR,r6 871 mtspr SPRN_AMOR,r7 872 873 /* Restore state of CTRL run bit; assume 1 on entry */ 874 lwz r5,VCPU_CTRL(r4) 875 andi. r5,r5,1 876 bne 4f 877 mfspr r6,SPRN_CTRLF 878 clrrdi r6,r6,1 879 mtspr SPRN_CTRLT,r6 8804: 881 /* Secondary threads wait for primary to have done partition switch */ 882 ld r5, HSTATE_KVM_VCORE(r13) 883 lbz r6, HSTATE_PTID(r13) 884 cmpwi r6, 0 885 beq 21f 886 lbz r0, VCORE_IN_GUEST(r5) 887 cmpwi r0, 0 888 bne 21f 889 HMT_LOW 89020: lwz r3, VCORE_ENTRY_EXIT(r5) 891 cmpwi r3, 0x100 892 bge no_switch_exit 893 lbz r0, VCORE_IN_GUEST(r5) 894 cmpwi r0, 0 895 beq 20b 896 HMT_MEDIUM 89721: 898 /* Set LPCR. */ 899 ld r8,VCORE_LPCR(r5) 900 mtspr SPRN_LPCR,r8 901 isync 902 903 /* 904 * Set the decrementer to the guest decrementer. 905 */ 906 ld r8,VCPU_DEC_EXPIRES(r4) 907 /* r8 is a host timebase value here, convert to guest TB */ 908 ld r5,HSTATE_KVM_VCORE(r13) 909 ld r6,VCORE_TB_OFFSET_APPL(r5) 910 add r8,r8,r6 911 mftb r7 912 subf r3,r7,r8 913 mtspr SPRN_DEC,r3 914 915 /* Check if HDEC expires soon */ 916 mfspr r3, SPRN_HDEC 917 EXTEND_HDEC(r3) 918 cmpdi r3, 512 /* 1 microsecond */ 919 blt hdec_soon 920 921 /* For hash guest, clear out and reload the SLB */ 922 ld r6, VCPU_KVM(r4) 923 lbz r0, KVM_RADIX(r6) 924 cmpwi r0, 0 925 bne 9f 926 li r6, 0 927 slbmte r6, r6 928 slbia 929 ptesync 930 931 /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */ 932 lwz r5,VCPU_SLB_MAX(r4) 933 cmpwi r5,0 934 beq 9f 935 mtctr r5 936 addi r6,r4,VCPU_SLB 9371: ld r8,VCPU_SLB_E(r6) 938 ld r9,VCPU_SLB_V(r6) 939 slbmte r9,r8 940 addi r6,r6,VCPU_SLB_SIZE 941 bdnz 1b 9429: 943 944#ifdef CONFIG_KVM_XICS 945 /* We are entering the guest on that thread, push VCPU to XIVE */ 946 ld r11, VCPU_XIVE_SAVED_STATE(r4) 947 li r9, TM_QW1_OS 948 lwz r8, VCPU_XIVE_CAM_WORD(r4) 949 cmpwi r8, 0 950 beq no_xive 951 li r7, TM_QW1_OS + TM_WORD2 952 mfmsr r0 953 andi. r0, r0, MSR_DR /* in real mode? */ 954 beq 2f 955 ld r10, HSTATE_XIVE_TIMA_VIRT(r13) 956 cmpldi cr1, r10, 0 957 beq cr1, no_xive 958 eieio 959 stdx r11,r9,r10 960 stwx r8,r7,r10 961 b 3f 9622: ld r10, HSTATE_XIVE_TIMA_PHYS(r13) 963 cmpldi cr1, r10, 0 964 beq cr1, no_xive 965 eieio 966 stdcix r11,r9,r10 967 stwcix r8,r7,r10 9683: li r9, 1 969 stb r9, VCPU_XIVE_PUSHED(r4) 970 eieio 971 972 /* 973 * We clear the irq_pending flag. There is a small chance of a 974 * race vs. the escalation interrupt happening on another 975 * processor setting it again, but the only consequence is to 976 * cause a spurrious wakeup on the next H_CEDE which is not an 977 * issue. 978 */ 979 li r0,0 980 stb r0, VCPU_IRQ_PENDING(r4) 981 982 /* 983 * In single escalation mode, if the escalation interrupt is 984 * on, we mask it. 985 */ 986 lbz r0, VCPU_XIVE_ESC_ON(r4) 987 cmpwi cr1, r0,0 988 beq cr1, 1f 989 li r9, XIVE_ESB_SET_PQ_01 990 beq 4f /* in real mode? */ 991 ld r10, VCPU_XIVE_ESC_VADDR(r4) 992 ldx r0, r10, r9 993 b 5f 9944: ld r10, VCPU_XIVE_ESC_RADDR(r4) 995 ldcix r0, r10, r9 9965: sync 997 998 /* We have a possible subtle race here: The escalation interrupt might 999 * have fired and be on its way to the host queue while we mask it, 1000 * and if we unmask it early enough (re-cede right away), there is 1001 * a theorical possibility that it fires again, thus landing in the 1002 * target queue more than once which is a big no-no. 1003 * 1004 * Fortunately, solving this is rather easy. If the above load setting 1005 * PQ to 01 returns a previous value where P is set, then we know the 1006 * escalation interrupt is somewhere on its way to the host. In that 1007 * case we simply don't clear the xive_esc_on flag below. It will be 1008 * eventually cleared by the handler for the escalation interrupt. 1009 * 1010 * Then, when doing a cede, we check that flag again before re-enabling 1011 * the escalation interrupt, and if set, we abort the cede. 1012 */ 1013 andi. r0, r0, XIVE_ESB_VAL_P 1014 bne- 1f 1015 1016 /* Now P is 0, we can clear the flag */ 1017 li r0, 0 1018 stb r0, VCPU_XIVE_ESC_ON(r4) 10191: 1020no_xive: 1021#endif /* CONFIG_KVM_XICS */ 1022 1023 li r0, 0 1024 stw r0, STACK_SLOT_SHORT_PATH(r1) 1025 1026deliver_guest_interrupt: /* r4 = vcpu, r13 = paca */ 1027 /* Check if we can deliver an external or decrementer interrupt now */ 1028 ld r0, VCPU_PENDING_EXC(r4) 1029BEGIN_FTR_SECTION 1030 /* On POWER9, also check for emulated doorbell interrupt */ 1031 lbz r3, VCPU_DBELL_REQ(r4) 1032 or r0, r0, r3 1033END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1034 cmpdi r0, 0 1035 beq 71f 1036 mr r3, r4 1037 bl kvmppc_guest_entry_inject_int 1038 ld r4, HSTATE_KVM_VCPU(r13) 103971: 1040 ld r6, VCPU_SRR0(r4) 1041 ld r7, VCPU_SRR1(r4) 1042 mtspr SPRN_SRR0, r6 1043 mtspr SPRN_SRR1, r7 1044 1045fast_guest_entry_c: 1046 ld r10, VCPU_PC(r4) 1047 ld r11, VCPU_MSR(r4) 1048 /* r11 = vcpu->arch.msr & ~MSR_HV */ 1049 rldicl r11, r11, 63 - MSR_HV_LG, 1 1050 rotldi r11, r11, 1 + MSR_HV_LG 1051 ori r11, r11, MSR_ME 1052 1053 ld r6, VCPU_CTR(r4) 1054 ld r7, VCPU_XER(r4) 1055 mtctr r6 1056 mtxer r7 1057 1058/* 1059 * Required state: 1060 * R4 = vcpu 1061 * R10: value for HSRR0 1062 * R11: value for HSRR1 1063 * R13 = PACA 1064 */ 1065fast_guest_return: 1066 li r0,0 1067 stb r0,VCPU_CEDED(r4) /* cancel cede */ 1068 mtspr SPRN_HSRR0,r10 1069 mtspr SPRN_HSRR1,r11 1070 1071 /* Activate guest mode, so faults get handled by KVM */ 1072 li r9, KVM_GUEST_MODE_GUEST_HV 1073 stb r9, HSTATE_IN_GUEST(r13) 1074 1075#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1076 /* Accumulate timing */ 1077 addi r3, r4, VCPU_TB_GUEST 1078 bl kvmhv_accumulate_time 1079#endif 1080 1081 /* Enter guest */ 1082 1083BEGIN_FTR_SECTION 1084 ld r5, VCPU_CFAR(r4) 1085 mtspr SPRN_CFAR, r5 1086END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 1087BEGIN_FTR_SECTION 1088 ld r0, VCPU_PPR(r4) 1089END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 1090 1091 ld r5, VCPU_LR(r4) 1092 mtlr r5 1093 1094 ld r1, VCPU_GPR(R1)(r4) 1095 ld r5, VCPU_GPR(R5)(r4) 1096 ld r8, VCPU_GPR(R8)(r4) 1097 ld r9, VCPU_GPR(R9)(r4) 1098 ld r10, VCPU_GPR(R10)(r4) 1099 ld r11, VCPU_GPR(R11)(r4) 1100 ld r12, VCPU_GPR(R12)(r4) 1101 ld r13, VCPU_GPR(R13)(r4) 1102 1103BEGIN_FTR_SECTION 1104 mtspr SPRN_PPR, r0 1105END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 1106 1107/* Move canary into DSISR to check for later */ 1108BEGIN_FTR_SECTION 1109 li r0, 0x7fff 1110 mtspr SPRN_HDSISR, r0 1111END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1112 1113 ld r6, VCPU_KVM(r4) 1114 lbz r7, KVM_SECURE_GUEST(r6) 1115 cmpdi r7, 0 1116 ld r6, VCPU_GPR(R6)(r4) 1117 ld r7, VCPU_GPR(R7)(r4) 1118 bne ret_to_ultra 1119 1120 ld r0, VCPU_CR(r4) 1121 mtcr r0 1122 1123 ld r0, VCPU_GPR(R0)(r4) 1124 ld r2, VCPU_GPR(R2)(r4) 1125 ld r3, VCPU_GPR(R3)(r4) 1126 ld r4, VCPU_GPR(R4)(r4) 1127 HRFI_TO_GUEST 1128 b . 1129/* 1130 * Use UV_RETURN ultracall to return control back to the Ultravisor after 1131 * processing an hypercall or interrupt that was forwarded (a.k.a. reflected) 1132 * to the Hypervisor. 1133 * 1134 * All registers have already been loaded, except: 1135 * R0 = hcall result 1136 * R2 = SRR1, so UV can detect a synthesized interrupt (if any) 1137 * R3 = UV_RETURN 1138 */ 1139ret_to_ultra: 1140 ld r0, VCPU_CR(r4) 1141 mtcr r0 1142 1143 ld r0, VCPU_GPR(R3)(r4) 1144 mfspr r2, SPRN_SRR1 1145 li r3, 0 1146 ori r3, r3, UV_RETURN 1147 ld r4, VCPU_GPR(R4)(r4) 1148 sc 2 1149 1150/* 1151 * Enter the guest on a P9 or later system where we have exactly 1152 * one vcpu per vcore and we don't need to go to real mode 1153 * (which implies that host and guest are both using radix MMU mode). 1154 * r3 = vcpu pointer 1155 * Most SPRs and all the VSRs have been loaded already. 1156 */ 1157_GLOBAL(__kvmhv_vcpu_entry_p9) 1158EXPORT_SYMBOL_GPL(__kvmhv_vcpu_entry_p9) 1159 mflr r0 1160 std r0, PPC_LR_STKOFF(r1) 1161 stdu r1, -SFS(r1) 1162 1163 li r0, 1 1164 stw r0, STACK_SLOT_SHORT_PATH(r1) 1165 1166 std r3, HSTATE_KVM_VCPU(r13) 1167 mfcr r4 1168 stw r4, SFS+8(r1) 1169 1170 std r1, HSTATE_HOST_R1(r13) 1171 1172 reg = 14 1173 .rept 18 1174 std reg, STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1) 1175 reg = reg + 1 1176 .endr 1177 1178 reg = 14 1179 .rept 18 1180 ld reg, __VCPU_GPR(reg)(r3) 1181 reg = reg + 1 1182 .endr 1183 1184 mfmsr r10 1185 std r10, HSTATE_HOST_MSR(r13) 1186 1187 mr r4, r3 1188 b fast_guest_entry_c 1189guest_exit_short_path: 1190 1191 li r0, KVM_GUEST_MODE_NONE 1192 stb r0, HSTATE_IN_GUEST(r13) 1193 1194 reg = 14 1195 .rept 18 1196 std reg, __VCPU_GPR(reg)(r9) 1197 reg = reg + 1 1198 .endr 1199 1200 reg = 14 1201 .rept 18 1202 ld reg, STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1) 1203 reg = reg + 1 1204 .endr 1205 1206 lwz r4, SFS+8(r1) 1207 mtcr r4 1208 1209 mr r3, r12 /* trap number */ 1210 1211 addi r1, r1, SFS 1212 ld r0, PPC_LR_STKOFF(r1) 1213 mtlr r0 1214 1215 /* If we are in real mode, do a rfid to get back to the caller */ 1216 mfmsr r4 1217 andi. r5, r4, MSR_IR 1218 bnelr 1219 rldicl r5, r4, 64 - MSR_TS_S_LG, 62 /* extract TS field */ 1220 mtspr SPRN_SRR0, r0 1221 ld r10, HSTATE_HOST_MSR(r13) 1222 rldimi r10, r5, MSR_TS_S_LG, 63 - MSR_TS_T_LG 1223 mtspr SPRN_SRR1, r10 1224 RFI_TO_KERNEL 1225 b . 1226 1227secondary_too_late: 1228 li r12, 0 1229 stw r12, STACK_SLOT_TRAP(r1) 1230 cmpdi r4, 0 1231 beq 11f 1232 stw r12, VCPU_TRAP(r4) 1233#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1234 addi r3, r4, VCPU_TB_RMEXIT 1235 bl kvmhv_accumulate_time 1236#endif 123711: b kvmhv_switch_to_host 1238 1239no_switch_exit: 1240 HMT_MEDIUM 1241 li r12, 0 1242 b 12f 1243hdec_soon: 1244 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER 124512: stw r12, VCPU_TRAP(r4) 1246 mr r9, r4 1247#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1248 addi r3, r4, VCPU_TB_RMEXIT 1249 bl kvmhv_accumulate_time 1250#endif 1251 b guest_bypass 1252 1253/****************************************************************************** 1254 * * 1255 * Exit code * 1256 * * 1257 *****************************************************************************/ 1258 1259/* 1260 * We come here from the first-level interrupt handlers. 1261 */ 1262 .globl kvmppc_interrupt_hv 1263kvmppc_interrupt_hv: 1264 /* 1265 * Register contents: 1266 * R12 = (guest CR << 32) | interrupt vector 1267 * R13 = PACA 1268 * guest R12 saved in shadow VCPU SCRATCH0 1269 * guest R13 saved in SPRN_SCRATCH0 1270 */ 1271 std r9, HSTATE_SCRATCH2(r13) 1272 lbz r9, HSTATE_IN_GUEST(r13) 1273 cmpwi r9, KVM_GUEST_MODE_HOST_HV 1274 beq kvmppc_bad_host_intr 1275#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1276 cmpwi r9, KVM_GUEST_MODE_GUEST 1277 ld r9, HSTATE_SCRATCH2(r13) 1278 beq kvmppc_interrupt_pr 1279#endif 1280 /* We're now back in the host but in guest MMU context */ 1281 li r9, KVM_GUEST_MODE_HOST_HV 1282 stb r9, HSTATE_IN_GUEST(r13) 1283 1284 ld r9, HSTATE_KVM_VCPU(r13) 1285 1286 /* Save registers */ 1287 1288 std r0, VCPU_GPR(R0)(r9) 1289 std r1, VCPU_GPR(R1)(r9) 1290 std r2, VCPU_GPR(R2)(r9) 1291 std r3, VCPU_GPR(R3)(r9) 1292 std r4, VCPU_GPR(R4)(r9) 1293 std r5, VCPU_GPR(R5)(r9) 1294 std r6, VCPU_GPR(R6)(r9) 1295 std r7, VCPU_GPR(R7)(r9) 1296 std r8, VCPU_GPR(R8)(r9) 1297 ld r0, HSTATE_SCRATCH2(r13) 1298 std r0, VCPU_GPR(R9)(r9) 1299 std r10, VCPU_GPR(R10)(r9) 1300 std r11, VCPU_GPR(R11)(r9) 1301 ld r3, HSTATE_SCRATCH0(r13) 1302 std r3, VCPU_GPR(R12)(r9) 1303 /* CR is in the high half of r12 */ 1304 srdi r4, r12, 32 1305 std r4, VCPU_CR(r9) 1306BEGIN_FTR_SECTION 1307 ld r3, HSTATE_CFAR(r13) 1308 std r3, VCPU_CFAR(r9) 1309END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 1310BEGIN_FTR_SECTION 1311 ld r4, HSTATE_PPR(r13) 1312 std r4, VCPU_PPR(r9) 1313END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 1314 1315 /* Restore R1/R2 so we can handle faults */ 1316 ld r1, HSTATE_HOST_R1(r13) 1317 ld r2, PACATOC(r13) 1318 1319 mfspr r10, SPRN_SRR0 1320 mfspr r11, SPRN_SRR1 1321 std r10, VCPU_SRR0(r9) 1322 std r11, VCPU_SRR1(r9) 1323 /* trap is in the low half of r12, clear CR from the high half */ 1324 clrldi r12, r12, 32 1325 andi. r0, r12, 2 /* need to read HSRR0/1? */ 1326 beq 1f 1327 mfspr r10, SPRN_HSRR0 1328 mfspr r11, SPRN_HSRR1 1329 clrrdi r12, r12, 2 13301: std r10, VCPU_PC(r9) 1331 std r11, VCPU_MSR(r9) 1332 1333 GET_SCRATCH0(r3) 1334 mflr r4 1335 std r3, VCPU_GPR(R13)(r9) 1336 std r4, VCPU_LR(r9) 1337 1338 stw r12,VCPU_TRAP(r9) 1339 1340 /* 1341 * Now that we have saved away SRR0/1 and HSRR0/1, 1342 * interrupts are recoverable in principle, so set MSR_RI. 1343 * This becomes important for relocation-on interrupts from 1344 * the guest, which we can get in radix mode on POWER9. 1345 */ 1346 li r0, MSR_RI 1347 mtmsrd r0, 1 1348 1349#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1350 addi r3, r9, VCPU_TB_RMINTR 1351 mr r4, r9 1352 bl kvmhv_accumulate_time 1353 ld r5, VCPU_GPR(R5)(r9) 1354 ld r6, VCPU_GPR(R6)(r9) 1355 ld r7, VCPU_GPR(R7)(r9) 1356 ld r8, VCPU_GPR(R8)(r9) 1357#endif 1358 1359 /* Save HEIR (HV emulation assist reg) in emul_inst 1360 if this is an HEI (HV emulation interrupt, e40) */ 1361 li r3,KVM_INST_FETCH_FAILED 1362 stw r3,VCPU_LAST_INST(r9) 1363 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST 1364 bne 11f 1365 mfspr r3,SPRN_HEIR 136611: stw r3,VCPU_HEIR(r9) 1367 1368 /* these are volatile across C function calls */ 1369 mfctr r3 1370 mfxer r4 1371 std r3, VCPU_CTR(r9) 1372 std r4, VCPU_XER(r9) 1373 1374 /* Save more register state */ 1375 mfdar r3 1376 mfdsisr r4 1377 std r3, VCPU_DAR(r9) 1378 stw r4, VCPU_DSISR(r9) 1379 1380 /* If this is a page table miss then see if it's theirs or ours */ 1381 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE 1382 beq kvmppc_hdsi 1383 std r3, VCPU_FAULT_DAR(r9) 1384 stw r4, VCPU_FAULT_DSISR(r9) 1385 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE 1386 beq kvmppc_hisi 1387 1388#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1389 /* For softpatch interrupt, go off and do TM instruction emulation */ 1390 cmpwi r12, BOOK3S_INTERRUPT_HV_SOFTPATCH 1391 beq kvmppc_tm_emul 1392#endif 1393 1394 /* See if this is a leftover HDEC interrupt */ 1395 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER 1396 bne 2f 1397 mfspr r3,SPRN_HDEC 1398 EXTEND_HDEC(r3) 1399 cmpdi r3,0 1400 mr r4,r9 1401 bge fast_guest_return 14022: 1403 /* See if this is an hcall we can handle in real mode */ 1404 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL 1405 beq hcall_try_real_mode 1406 1407 /* Hypervisor doorbell - exit only if host IPI flag set */ 1408 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL 1409 bne 3f 1410BEGIN_FTR_SECTION 1411 PPC_MSGSYNC 1412 lwsync 1413 /* always exit if we're running a nested guest */ 1414 ld r0, VCPU_NESTED(r9) 1415 cmpdi r0, 0 1416 bne guest_exit_cont 1417END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1418 lbz r0, HSTATE_HOST_IPI(r13) 1419 cmpwi r0, 0 1420 beq maybe_reenter_guest 1421 b guest_exit_cont 14223: 1423 /* If it's a hypervisor facility unavailable interrupt, save HFSCR */ 1424 cmpwi r12, BOOK3S_INTERRUPT_H_FAC_UNAVAIL 1425 bne 14f 1426 mfspr r3, SPRN_HFSCR 1427 std r3, VCPU_HFSCR(r9) 1428 b guest_exit_cont 142914: 1430 /* External interrupt ? */ 1431 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL 1432 beq kvmppc_guest_external 1433 /* See if it is a machine check */ 1434 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK 1435 beq machine_check_realmode 1436 /* Or a hypervisor maintenance interrupt */ 1437 cmpwi r12, BOOK3S_INTERRUPT_HMI 1438 beq hmi_realmode 1439 1440guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ 1441 1442#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1443 addi r3, r9, VCPU_TB_RMEXIT 1444 mr r4, r9 1445 bl kvmhv_accumulate_time 1446#endif 1447#ifdef CONFIG_KVM_XICS 1448 /* We are exiting, pull the VP from the XIVE */ 1449 lbz r0, VCPU_XIVE_PUSHED(r9) 1450 cmpwi cr0, r0, 0 1451 beq 1f 1452 li r7, TM_SPC_PULL_OS_CTX 1453 li r6, TM_QW1_OS 1454 mfmsr r0 1455 andi. r0, r0, MSR_DR /* in real mode? */ 1456 beq 2f 1457 ld r10, HSTATE_XIVE_TIMA_VIRT(r13) 1458 cmpldi cr0, r10, 0 1459 beq 1f 1460 /* First load to pull the context, we ignore the value */ 1461 eieio 1462 lwzx r11, r7, r10 1463 /* Second load to recover the context state (Words 0 and 1) */ 1464 ldx r11, r6, r10 1465 b 3f 14662: ld r10, HSTATE_XIVE_TIMA_PHYS(r13) 1467 cmpldi cr0, r10, 0 1468 beq 1f 1469 /* First load to pull the context, we ignore the value */ 1470 eieio 1471 lwzcix r11, r7, r10 1472 /* Second load to recover the context state (Words 0 and 1) */ 1473 ldcix r11, r6, r10 14743: std r11, VCPU_XIVE_SAVED_STATE(r9) 1475 /* Fixup some of the state for the next load */ 1476 li r10, 0 1477 li r0, 0xff 1478 stb r10, VCPU_XIVE_PUSHED(r9) 1479 stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9) 1480 stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9) 1481 eieio 14821: 1483#endif /* CONFIG_KVM_XICS */ 1484 1485 /* 1486 * Possibly flush the link stack here, before we do a blr in 1487 * guest_exit_short_path. 1488 */ 14891: nop 1490 patch_site 1b patch__call_kvm_flush_link_stack 1491 1492 /* If we came in through the P9 short path, go back out to C now */ 1493 lwz r0, STACK_SLOT_SHORT_PATH(r1) 1494 cmpwi r0, 0 1495 bne guest_exit_short_path 1496 1497 /* For hash guest, read the guest SLB and save it away */ 1498 ld r5, VCPU_KVM(r9) 1499 lbz r0, KVM_RADIX(r5) 1500 li r5, 0 1501 cmpwi r0, 0 1502 bne 3f /* for radix, save 0 entries */ 1503 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */ 1504 mtctr r0 1505 li r6,0 1506 addi r7,r9,VCPU_SLB 15071: slbmfee r8,r6 1508 andis. r0,r8,SLB_ESID_V@h 1509 beq 2f 1510 add r8,r8,r6 /* put index in */ 1511 slbmfev r3,r6 1512 std r8,VCPU_SLB_E(r7) 1513 std r3,VCPU_SLB_V(r7) 1514 addi r7,r7,VCPU_SLB_SIZE 1515 addi r5,r5,1 15162: addi r6,r6,1 1517 bdnz 1b 1518 /* Finally clear out the SLB */ 1519 li r0,0 1520 slbmte r0,r0 1521 slbia 1522 ptesync 15233: stw r5,VCPU_SLB_MAX(r9) 1524 1525 /* load host SLB entries */ 1526BEGIN_MMU_FTR_SECTION 1527 b 0f 1528END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) 1529 ld r8,PACA_SLBSHADOWPTR(r13) 1530 1531 .rept SLB_NUM_BOLTED 1532 li r3, SLBSHADOW_SAVEAREA 1533 LDX_BE r5, r8, r3 1534 addi r3, r3, 8 1535 LDX_BE r6, r8, r3 1536 andis. r7,r5,SLB_ESID_V@h 1537 beq 1f 1538 slbmte r6,r5 15391: addi r8,r8,16 1540 .endr 15410: 1542 1543guest_bypass: 1544 stw r12, STACK_SLOT_TRAP(r1) 1545 1546 /* Save DEC */ 1547 /* Do this before kvmhv_commence_exit so we know TB is guest TB */ 1548 ld r3, HSTATE_KVM_VCORE(r13) 1549 mfspr r5,SPRN_DEC 1550 mftb r6 1551 /* On P9, if the guest has large decr enabled, don't sign extend */ 1552BEGIN_FTR_SECTION 1553 ld r4, VCORE_LPCR(r3) 1554 andis. r4, r4, LPCR_LD@h 1555 bne 16f 1556END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1557 extsw r5,r5 155816: add r5,r5,r6 1559 /* r5 is a guest timebase value here, convert to host TB */ 1560 ld r4,VCORE_TB_OFFSET_APPL(r3) 1561 subf r5,r4,r5 1562 std r5,VCPU_DEC_EXPIRES(r9) 1563 1564 /* Increment exit count, poke other threads to exit */ 1565 mr r3, r12 1566 bl kvmhv_commence_exit 1567 nop 1568 ld r9, HSTATE_KVM_VCPU(r13) 1569 1570 /* Stop others sending VCPU interrupts to this physical CPU */ 1571 li r0, -1 1572 stw r0, VCPU_CPU(r9) 1573 stw r0, VCPU_THREAD_CPU(r9) 1574 1575 /* Save guest CTRL register, set runlatch to 1 */ 1576 mfspr r6,SPRN_CTRLF 1577 stw r6,VCPU_CTRL(r9) 1578 andi. r0,r6,1 1579 bne 4f 1580 ori r6,r6,1 1581 mtspr SPRN_CTRLT,r6 15824: 1583 /* 1584 * Save the guest PURR/SPURR 1585 */ 1586 mfspr r5,SPRN_PURR 1587 mfspr r6,SPRN_SPURR 1588 ld r7,VCPU_PURR(r9) 1589 ld r8,VCPU_SPURR(r9) 1590 std r5,VCPU_PURR(r9) 1591 std r6,VCPU_SPURR(r9) 1592 subf r5,r7,r5 1593 subf r6,r8,r6 1594 1595 /* 1596 * Restore host PURR/SPURR and add guest times 1597 * so that the time in the guest gets accounted. 1598 */ 1599 ld r3,HSTATE_PURR(r13) 1600 ld r4,HSTATE_SPURR(r13) 1601 add r3,r3,r5 1602 add r4,r4,r6 1603 mtspr SPRN_PURR,r3 1604 mtspr SPRN_SPURR,r4 1605 1606BEGIN_FTR_SECTION 1607 b 8f 1608END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 1609 /* Save POWER8-specific registers */ 1610 mfspr r5, SPRN_IAMR 1611 mfspr r6, SPRN_PSPB 1612 mfspr r7, SPRN_FSCR 1613 std r5, VCPU_IAMR(r9) 1614 stw r6, VCPU_PSPB(r9) 1615 std r7, VCPU_FSCR(r9) 1616 mfspr r5, SPRN_IC 1617 mfspr r7, SPRN_TAR 1618 std r5, VCPU_IC(r9) 1619 std r7, VCPU_TAR(r9) 1620 mfspr r8, SPRN_EBBHR 1621 std r8, VCPU_EBBHR(r9) 1622 mfspr r5, SPRN_EBBRR 1623 mfspr r6, SPRN_BESCR 1624 mfspr r7, SPRN_PID 1625 mfspr r8, SPRN_WORT 1626 std r5, VCPU_EBBRR(r9) 1627 std r6, VCPU_BESCR(r9) 1628 stw r7, VCPU_GUEST_PID(r9) 1629 std r8, VCPU_WORT(r9) 1630BEGIN_FTR_SECTION 1631 mfspr r5, SPRN_TCSCR 1632 mfspr r6, SPRN_ACOP 1633 mfspr r7, SPRN_CSIGR 1634 mfspr r8, SPRN_TACR 1635 std r5, VCPU_TCSCR(r9) 1636 std r6, VCPU_ACOP(r9) 1637 std r7, VCPU_CSIGR(r9) 1638 std r8, VCPU_TACR(r9) 1639FTR_SECTION_ELSE 1640 mfspr r5, SPRN_TIDR 1641 mfspr r6, SPRN_PSSCR 1642 std r5, VCPU_TID(r9) 1643 rldicl r6, r6, 4, 50 /* r6 &= PSSCR_GUEST_VIS */ 1644 rotldi r6, r6, 60 1645 std r6, VCPU_PSSCR(r9) 1646 /* Restore host HFSCR value */ 1647 ld r7, STACK_SLOT_HFSCR(r1) 1648 mtspr SPRN_HFSCR, r7 1649ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) 1650 /* 1651 * Restore various registers to 0, where non-zero values 1652 * set by the guest could disrupt the host. 1653 */ 1654 li r0, 0 1655 mtspr SPRN_PSPB, r0 1656 mtspr SPRN_WORT, r0 1657BEGIN_FTR_SECTION 1658 mtspr SPRN_TCSCR, r0 1659 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */ 1660 li r0, 1 1661 sldi r0, r0, 31 1662 mtspr SPRN_MMCRS, r0 1663END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 1664 1665 /* Save and restore AMR, IAMR and UAMOR before turning on the MMU */ 1666 ld r8, STACK_SLOT_IAMR(r1) 1667 mtspr SPRN_IAMR, r8 1668 16698: /* Power7 jumps back in here */ 1670 mfspr r5,SPRN_AMR 1671 mfspr r6,SPRN_UAMOR 1672 std r5,VCPU_AMR(r9) 1673 std r6,VCPU_UAMOR(r9) 1674 ld r5,STACK_SLOT_AMR(r1) 1675 ld r6,STACK_SLOT_UAMOR(r1) 1676 mtspr SPRN_AMR, r5 1677 mtspr SPRN_UAMOR, r6 1678 1679 /* Switch DSCR back to host value */ 1680 mfspr r8, SPRN_DSCR 1681 ld r7, HSTATE_DSCR(r13) 1682 std r8, VCPU_DSCR(r9) 1683 mtspr SPRN_DSCR, r7 1684 1685 /* Save non-volatile GPRs */ 1686 std r14, VCPU_GPR(R14)(r9) 1687 std r15, VCPU_GPR(R15)(r9) 1688 std r16, VCPU_GPR(R16)(r9) 1689 std r17, VCPU_GPR(R17)(r9) 1690 std r18, VCPU_GPR(R18)(r9) 1691 std r19, VCPU_GPR(R19)(r9) 1692 std r20, VCPU_GPR(R20)(r9) 1693 std r21, VCPU_GPR(R21)(r9) 1694 std r22, VCPU_GPR(R22)(r9) 1695 std r23, VCPU_GPR(R23)(r9) 1696 std r24, VCPU_GPR(R24)(r9) 1697 std r25, VCPU_GPR(R25)(r9) 1698 std r26, VCPU_GPR(R26)(r9) 1699 std r27, VCPU_GPR(R27)(r9) 1700 std r28, VCPU_GPR(R28)(r9) 1701 std r29, VCPU_GPR(R29)(r9) 1702 std r30, VCPU_GPR(R30)(r9) 1703 std r31, VCPU_GPR(R31)(r9) 1704 1705 /* Save SPRGs */ 1706 mfspr r3, SPRN_SPRG0 1707 mfspr r4, SPRN_SPRG1 1708 mfspr r5, SPRN_SPRG2 1709 mfspr r6, SPRN_SPRG3 1710 std r3, VCPU_SPRG0(r9) 1711 std r4, VCPU_SPRG1(r9) 1712 std r5, VCPU_SPRG2(r9) 1713 std r6, VCPU_SPRG3(r9) 1714 1715 /* save FP state */ 1716 mr r3, r9 1717 bl kvmppc_save_fp 1718 1719#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1720/* 1721 * Branch around the call if both CPU_FTR_TM and 1722 * CPU_FTR_P9_TM_HV_ASSIST are off. 1723 */ 1724BEGIN_FTR_SECTION 1725 b 91f 1726END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) 1727 /* 1728 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) 1729 */ 1730 mr r3, r9 1731 ld r4, VCPU_MSR(r3) 1732 li r5, 0 /* don't preserve non-vol regs */ 1733 bl kvmppc_save_tm_hv 1734 nop 1735 ld r9, HSTATE_KVM_VCPU(r13) 173691: 1737#endif 1738 1739 /* Increment yield count if they have a VPA */ 1740 ld r8, VCPU_VPA(r9) /* do they have a VPA? */ 1741 cmpdi r8, 0 1742 beq 25f 1743 li r4, LPPACA_YIELDCOUNT 1744 LWZX_BE r3, r8, r4 1745 addi r3, r3, 1 1746 STWX_BE r3, r8, r4 1747 li r3, 1 1748 stb r3, VCPU_VPA_DIRTY(r9) 174925: 1750 /* Save PMU registers if requested */ 1751 /* r8 and cr0.eq are live here */ 1752 mr r3, r9 1753 li r4, 1 1754 beq 21f /* if no VPA, save PMU stuff anyway */ 1755 lbz r4, LPPACA_PMCINUSE(r8) 175621: bl kvmhv_save_guest_pmu 1757 ld r9, HSTATE_KVM_VCPU(r13) 1758 1759 /* Restore host values of some registers */ 1760BEGIN_FTR_SECTION 1761 ld r5, STACK_SLOT_CIABR(r1) 1762 ld r6, STACK_SLOT_DAWR(r1) 1763 ld r7, STACK_SLOT_DAWRX(r1) 1764 mtspr SPRN_CIABR, r5 1765 /* 1766 * If the DAWR doesn't work, it's ok to write these here as 1767 * this value should always be zero 1768 */ 1769 mtspr SPRN_DAWR0, r6 1770 mtspr SPRN_DAWRX0, r7 1771END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1772BEGIN_FTR_SECTION 1773 ld r5, STACK_SLOT_TID(r1) 1774 ld r6, STACK_SLOT_PSSCR(r1) 1775 ld r7, STACK_SLOT_PID(r1) 1776 mtspr SPRN_TIDR, r5 1777 mtspr SPRN_PSSCR, r6 1778 mtspr SPRN_PID, r7 1779END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1780 1781#ifdef CONFIG_PPC_RADIX_MMU 1782 /* 1783 * Are we running hash or radix ? 1784 */ 1785 ld r5, VCPU_KVM(r9) 1786 lbz r0, KVM_RADIX(r5) 1787 cmpwi cr2, r0, 0 1788 beq cr2, 2f 1789 1790 /* 1791 * Radix: do eieio; tlbsync; ptesync sequence in case we 1792 * interrupted the guest between a tlbie and a ptesync. 1793 */ 1794 eieio 1795 tlbsync 1796 ptesync 1797 1798BEGIN_FTR_SECTION 1799 /* Radix: Handle the case where the guest used an illegal PID */ 1800 LOAD_REG_ADDR(r4, mmu_base_pid) 1801 lwz r3, VCPU_GUEST_PID(r9) 1802 lwz r5, 0(r4) 1803 cmpw cr0,r3,r5 1804 blt 2f 1805 1806 /* 1807 * Illegal PID, the HW might have prefetched and cached in the TLB 1808 * some translations for the LPID 0 / guest PID combination which 1809 * Linux doesn't know about, so we need to flush that PID out of 1810 * the TLB. First we need to set LPIDR to 0 so tlbiel applies to 1811 * the right context. 1812 */ 1813 li r0,0 1814 mtspr SPRN_LPID,r0 1815 isync 1816 1817 /* Then do a congruence class local flush */ 1818 ld r6,VCPU_KVM(r9) 1819 lwz r0,KVM_TLB_SETS(r6) 1820 mtctr r0 1821 li r7,0x400 /* IS field = 0b01 */ 1822 ptesync 1823 sldi r0,r3,32 /* RS has PID */ 18241: PPC_TLBIEL(7,0,2,1,1) /* RIC=2, PRS=1, R=1 */ 1825 addi r7,r7,0x1000 1826 bdnz 1b 1827 ptesync 1828END_FTR_SECTION_IFSET(CPU_FTR_P9_RADIX_PREFETCH_BUG) 1829 18302: 1831#endif /* CONFIG_PPC_RADIX_MMU */ 1832 1833 /* 1834 * cp_abort is required if the processor supports local copy-paste 1835 * to clear the copy buffer that was under control of the guest. 1836 */ 1837BEGIN_FTR_SECTION 1838 PPC_CP_ABORT 1839END_FTR_SECTION_IFSET(CPU_FTR_ARCH_31) 1840 1841 /* 1842 * POWER7/POWER8 guest -> host partition switch code. 1843 * We don't have to lock against tlbies but we do 1844 * have to coordinate the hardware threads. 1845 * Here STACK_SLOT_TRAP(r1) contains the trap number. 1846 */ 1847kvmhv_switch_to_host: 1848 /* Secondary threads wait for primary to do partition switch */ 1849 ld r5,HSTATE_KVM_VCORE(r13) 1850 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ 1851 lbz r3,HSTATE_PTID(r13) 1852 cmpwi r3,0 1853 beq 15f 1854 HMT_LOW 185513: lbz r3,VCORE_IN_GUEST(r5) 1856 cmpwi r3,0 1857 bne 13b 1858 HMT_MEDIUM 1859 b 16f 1860 1861 /* Primary thread waits for all the secondaries to exit guest */ 186215: lwz r3,VCORE_ENTRY_EXIT(r5) 1863 rlwinm r0,r3,32-8,0xff 1864 clrldi r3,r3,56 1865 cmpw r3,r0 1866 bne 15b 1867 isync 1868 1869 /* Did we actually switch to the guest at all? */ 1870 lbz r6, VCORE_IN_GUEST(r5) 1871 cmpwi r6, 0 1872 beq 19f 1873 1874 /* Primary thread switches back to host partition */ 1875 lwz r7,KVM_HOST_LPID(r4) 1876BEGIN_FTR_SECTION 1877 ld r6,KVM_HOST_SDR1(r4) 1878 li r8,LPID_RSVD /* switch to reserved LPID */ 1879 mtspr SPRN_LPID,r8 1880 ptesync 1881 mtspr SPRN_SDR1,r6 /* switch to host page table */ 1882END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 1883 mtspr SPRN_LPID,r7 1884 isync 1885 1886BEGIN_FTR_SECTION 1887 /* DPDES and VTB are shared between threads */ 1888 mfspr r7, SPRN_DPDES 1889 mfspr r8, SPRN_VTB 1890 std r7, VCORE_DPDES(r5) 1891 std r8, VCORE_VTB(r5) 1892 /* clear DPDES so we don't get guest doorbells in the host */ 1893 li r8, 0 1894 mtspr SPRN_DPDES, r8 1895END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1896 1897 /* Subtract timebase offset from timebase */ 1898 ld r8, VCORE_TB_OFFSET_APPL(r5) 1899 cmpdi r8,0 1900 beq 17f 1901 li r0, 0 1902 std r0, VCORE_TB_OFFSET_APPL(r5) 1903 mftb r6 /* current guest timebase */ 1904 subf r8,r8,r6 1905 mtspr SPRN_TBU40,r8 /* update upper 40 bits */ 1906 mftb r7 /* check if lower 24 bits overflowed */ 1907 clrldi r6,r6,40 1908 clrldi r7,r7,40 1909 cmpld r7,r6 1910 bge 17f 1911 addis r8,r8,0x100 /* if so, increment upper 40 bits */ 1912 mtspr SPRN_TBU40,r8 1913 191417: 1915 /* 1916 * If this is an HMI, we called kvmppc_realmode_hmi_handler 1917 * above, which may or may not have already called 1918 * kvmppc_subcore_exit_guest. Fortunately, all that 1919 * kvmppc_subcore_exit_guest does is clear a flag, so calling 1920 * it again here is benign even if kvmppc_realmode_hmi_handler 1921 * has already called it. 1922 */ 1923 bl kvmppc_subcore_exit_guest 1924 nop 192530: ld r5,HSTATE_KVM_VCORE(r13) 1926 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ 1927 1928 /* Reset PCR */ 1929 ld r0, VCORE_PCR(r5) 1930 LOAD_REG_IMMEDIATE(r6, PCR_MASK) 1931 cmpld r0, r6 1932 beq 18f 1933 mtspr SPRN_PCR, r6 193418: 1935 /* Signal secondary CPUs to continue */ 1936 li r0, 0 1937 stb r0,VCORE_IN_GUEST(r5) 193819: lis r8,0x7fff /* MAX_INT@h */ 1939 mtspr SPRN_HDEC,r8 1940 194116: 1942BEGIN_FTR_SECTION 1943 /* On POWER9 with HPT-on-radix we need to wait for all other threads */ 1944 ld r3, HSTATE_SPLIT_MODE(r13) 1945 cmpdi r3, 0 1946 beq 47f 1947 lwz r8, KVM_SPLIT_DO_RESTORE(r3) 1948 cmpwi r8, 0 1949 beq 47f 1950 bl kvmhv_p9_restore_lpcr 1951 nop 1952 b 48f 195347: 1954END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1955 ld r8,KVM_HOST_LPCR(r4) 1956 mtspr SPRN_LPCR,r8 1957 isync 195848: 1959#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1960 /* Finish timing, if we have a vcpu */ 1961 ld r4, HSTATE_KVM_VCPU(r13) 1962 cmpdi r4, 0 1963 li r3, 0 1964 beq 2f 1965 bl kvmhv_accumulate_time 19662: 1967#endif 1968 /* Unset guest mode */ 1969 li r0, KVM_GUEST_MODE_NONE 1970 stb r0, HSTATE_IN_GUEST(r13) 1971 1972 lwz r12, STACK_SLOT_TRAP(r1) /* return trap # in r12 */ 1973 ld r0, SFS+PPC_LR_STKOFF(r1) 1974 addi r1, r1, SFS 1975 mtlr r0 1976 blr 1977 1978.balign 32 1979.global kvm_flush_link_stack 1980kvm_flush_link_stack: 1981 /* Save LR into r0 */ 1982 mflr r0 1983 1984 /* Flush the link stack. On Power8 it's up to 32 entries in size. */ 1985 .rept 32 1986 bl .+4 1987 .endr 1988 1989 /* And on Power9 it's up to 64. */ 1990BEGIN_FTR_SECTION 1991 .rept 32 1992 bl .+4 1993 .endr 1994END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1995 1996 /* Restore LR */ 1997 mtlr r0 1998 blr 1999 2000kvmppc_guest_external: 2001 /* External interrupt, first check for host_ipi. If this is 2002 * set, we know the host wants us out so let's do it now 2003 */ 2004 bl kvmppc_read_intr 2005 2006 /* 2007 * Restore the active volatile registers after returning from 2008 * a C function. 2009 */ 2010 ld r9, HSTATE_KVM_VCPU(r13) 2011 li r12, BOOK3S_INTERRUPT_EXTERNAL 2012 2013 /* 2014 * kvmppc_read_intr return codes: 2015 * 2016 * Exit to host (r3 > 0) 2017 * 1 An interrupt is pending that needs to be handled by the host 2018 * Exit guest and return to host by branching to guest_exit_cont 2019 * 2020 * 2 Passthrough that needs completion in the host 2021 * Exit guest and return to host by branching to guest_exit_cont 2022 * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD 2023 * to indicate to the host to complete handling the interrupt 2024 * 2025 * Before returning to guest, we check if any CPU is heading out 2026 * to the host and if so, we head out also. If no CPUs are heading 2027 * check return values <= 0. 2028 * 2029 * Return to guest (r3 <= 0) 2030 * 0 No external interrupt is pending 2031 * -1 A guest wakeup IPI (which has now been cleared) 2032 * In either case, we return to guest to deliver any pending 2033 * guest interrupts. 2034 * 2035 * -2 A PCI passthrough external interrupt was handled 2036 * (interrupt was delivered directly to guest) 2037 * Return to guest to deliver any pending guest interrupts. 2038 */ 2039 2040 cmpdi r3, 1 2041 ble 1f 2042 2043 /* Return code = 2 */ 2044 li r12, BOOK3S_INTERRUPT_HV_RM_HARD 2045 stw r12, VCPU_TRAP(r9) 2046 b guest_exit_cont 2047 20481: /* Return code <= 1 */ 2049 cmpdi r3, 0 2050 bgt guest_exit_cont 2051 2052 /* Return code <= 0 */ 2053maybe_reenter_guest: 2054 ld r5, HSTATE_KVM_VCORE(r13) 2055 lwz r0, VCORE_ENTRY_EXIT(r5) 2056 cmpwi r0, 0x100 2057 mr r4, r9 2058 blt deliver_guest_interrupt 2059 b guest_exit_cont 2060 2061#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2062/* 2063 * Softpatch interrupt for transactional memory emulation cases 2064 * on POWER9 DD2.2. This is early in the guest exit path - we 2065 * haven't saved registers or done a treclaim yet. 2066 */ 2067kvmppc_tm_emul: 2068 /* Save instruction image in HEIR */ 2069 mfspr r3, SPRN_HEIR 2070 stw r3, VCPU_HEIR(r9) 2071 2072 /* 2073 * The cases we want to handle here are those where the guest 2074 * is in real suspend mode and is trying to transition to 2075 * transactional mode. 2076 */ 2077 lbz r0, HSTATE_FAKE_SUSPEND(r13) 2078 cmpwi r0, 0 /* keep exiting guest if in fake suspend */ 2079 bne guest_exit_cont 2080 rldicl r3, r11, 64 - MSR_TS_S_LG, 62 2081 cmpwi r3, 1 /* or if not in suspend state */ 2082 bne guest_exit_cont 2083 2084 /* Call C code to do the emulation */ 2085 mr r3, r9 2086 bl kvmhv_p9_tm_emulation_early 2087 nop 2088 ld r9, HSTATE_KVM_VCPU(r13) 2089 li r12, BOOK3S_INTERRUPT_HV_SOFTPATCH 2090 cmpwi r3, 0 2091 beq guest_exit_cont /* continue exiting if not handled */ 2092 ld r10, VCPU_PC(r9) 2093 ld r11, VCPU_MSR(r9) 2094 b fast_interrupt_c_return /* go back to guest if handled */ 2095#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 2096 2097/* 2098 * Check whether an HDSI is an HPTE not found fault or something else. 2099 * If it is an HPTE not found fault that is due to the guest accessing 2100 * a page that they have mapped but which we have paged out, then 2101 * we continue on with the guest exit path. In all other cases, 2102 * reflect the HDSI to the guest as a DSI. 2103 */ 2104kvmppc_hdsi: 2105 ld r3, VCPU_KVM(r9) 2106 lbz r0, KVM_RADIX(r3) 2107 mfspr r4, SPRN_HDAR 2108 mfspr r6, SPRN_HDSISR 2109BEGIN_FTR_SECTION 2110 /* Look for DSISR canary. If we find it, retry instruction */ 2111 cmpdi r6, 0x7fff 2112 beq 6f 2113END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 2114 cmpwi r0, 0 2115 bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */ 2116 /* HPTE not found fault or protection fault? */ 2117 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h 2118 beq 1f /* if not, send it to the guest */ 2119 andi. r0, r11, MSR_DR /* data relocation enabled? */ 2120 beq 3f 2121BEGIN_FTR_SECTION 2122 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */ 2123 b 4f 2124END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 2125 clrrdi r0, r4, 28 2126 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ 2127 li r0, BOOK3S_INTERRUPT_DATA_SEGMENT 2128 bne 7f /* if no SLB entry found */ 21294: std r4, VCPU_FAULT_DAR(r9) 2130 stw r6, VCPU_FAULT_DSISR(r9) 2131 2132 /* Search the hash table. */ 2133 mr r3, r9 /* vcpu pointer */ 2134 li r7, 1 /* data fault */ 2135 bl kvmppc_hpte_hv_fault 2136 ld r9, HSTATE_KVM_VCPU(r13) 2137 ld r10, VCPU_PC(r9) 2138 ld r11, VCPU_MSR(r9) 2139 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE 2140 cmpdi r3, 0 /* retry the instruction */ 2141 beq 6f 2142 cmpdi r3, -1 /* handle in kernel mode */ 2143 beq guest_exit_cont 2144 cmpdi r3, -2 /* MMIO emulation; need instr word */ 2145 beq 2f 2146 2147 /* Synthesize a DSI (or DSegI) for the guest */ 2148 ld r4, VCPU_FAULT_DAR(r9) 2149 mr r6, r3 21501: li r0, BOOK3S_INTERRUPT_DATA_STORAGE 2151 mtspr SPRN_DSISR, r6 21527: mtspr SPRN_DAR, r4 2153 mtspr SPRN_SRR0, r10 2154 mtspr SPRN_SRR1, r11 2155 mr r10, r0 2156 bl kvmppc_msr_interrupt 2157fast_interrupt_c_return: 21586: ld r7, VCPU_CTR(r9) 2159 ld r8, VCPU_XER(r9) 2160 mtctr r7 2161 mtxer r8 2162 mr r4, r9 2163 b fast_guest_return 2164 21653: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */ 2166 ld r5, KVM_VRMA_SLB_V(r5) 2167 b 4b 2168 2169 /* If this is for emulated MMIO, load the instruction word */ 21702: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */ 2171 2172 /* Set guest mode to 'jump over instruction' so if lwz faults 2173 * we'll just continue at the next IP. */ 2174 li r0, KVM_GUEST_MODE_SKIP 2175 stb r0, HSTATE_IN_GUEST(r13) 2176 2177 /* Do the access with MSR:DR enabled */ 2178 mfmsr r3 2179 ori r4, r3, MSR_DR /* Enable paging for data */ 2180 mtmsrd r4 2181 lwz r8, 0(r10) 2182 mtmsrd r3 2183 2184 /* Store the result */ 2185 stw r8, VCPU_LAST_INST(r9) 2186 2187 /* Unset guest mode. */ 2188 li r0, KVM_GUEST_MODE_HOST_HV 2189 stb r0, HSTATE_IN_GUEST(r13) 2190 b guest_exit_cont 2191 2192.Lradix_hdsi: 2193 std r4, VCPU_FAULT_DAR(r9) 2194 stw r6, VCPU_FAULT_DSISR(r9) 2195.Lradix_hisi: 2196 mfspr r5, SPRN_ASDR 2197 std r5, VCPU_FAULT_GPA(r9) 2198 b guest_exit_cont 2199 2200/* 2201 * Similarly for an HISI, reflect it to the guest as an ISI unless 2202 * it is an HPTE not found fault for a page that we have paged out. 2203 */ 2204kvmppc_hisi: 2205 ld r3, VCPU_KVM(r9) 2206 lbz r0, KVM_RADIX(r3) 2207 cmpwi r0, 0 2208 bne .Lradix_hisi /* for radix, just save ASDR */ 2209 andis. r0, r11, SRR1_ISI_NOPT@h 2210 beq 1f 2211 andi. r0, r11, MSR_IR /* instruction relocation enabled? */ 2212 beq 3f 2213BEGIN_FTR_SECTION 2214 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */ 2215 b 4f 2216END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 2217 clrrdi r0, r10, 28 2218 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ 2219 li r0, BOOK3S_INTERRUPT_INST_SEGMENT 2220 bne 7f /* if no SLB entry found */ 22214: 2222 /* Search the hash table. */ 2223 mr r3, r9 /* vcpu pointer */ 2224 mr r4, r10 2225 mr r6, r11 2226 li r7, 0 /* instruction fault */ 2227 bl kvmppc_hpte_hv_fault 2228 ld r9, HSTATE_KVM_VCPU(r13) 2229 ld r10, VCPU_PC(r9) 2230 ld r11, VCPU_MSR(r9) 2231 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE 2232 cmpdi r3, 0 /* retry the instruction */ 2233 beq fast_interrupt_c_return 2234 cmpdi r3, -1 /* handle in kernel mode */ 2235 beq guest_exit_cont 2236 2237 /* Synthesize an ISI (or ISegI) for the guest */ 2238 mr r11, r3 22391: li r0, BOOK3S_INTERRUPT_INST_STORAGE 22407: mtspr SPRN_SRR0, r10 2241 mtspr SPRN_SRR1, r11 2242 mr r10, r0 2243 bl kvmppc_msr_interrupt 2244 b fast_interrupt_c_return 2245 22463: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */ 2247 ld r5, KVM_VRMA_SLB_V(r6) 2248 b 4b 2249 2250/* 2251 * Try to handle an hcall in real mode. 2252 * Returns to the guest if we handle it, or continues on up to 2253 * the kernel if we can't (i.e. if we don't have a handler for 2254 * it, or if the handler returns H_TOO_HARD). 2255 * 2256 * r5 - r8 contain hcall args, 2257 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca 2258 */ 2259hcall_try_real_mode: 2260 ld r3,VCPU_GPR(R3)(r9) 2261 andi. r0,r11,MSR_PR 2262 /* sc 1 from userspace - reflect to guest syscall */ 2263 bne sc_1_fast_return 2264 /* sc 1 from nested guest - give it to L1 to handle */ 2265 ld r0, VCPU_NESTED(r9) 2266 cmpdi r0, 0 2267 bne guest_exit_cont 2268 clrrdi r3,r3,2 2269 cmpldi r3,hcall_real_table_end - hcall_real_table 2270 bge guest_exit_cont 2271 /* See if this hcall is enabled for in-kernel handling */ 2272 ld r4, VCPU_KVM(r9) 2273 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */ 2274 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */ 2275 add r4, r4, r0 2276 ld r0, KVM_ENABLED_HCALLS(r4) 2277 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */ 2278 srd r0, r0, r4 2279 andi. r0, r0, 1 2280 beq guest_exit_cont 2281 /* Get pointer to handler, if any, and call it */ 2282 LOAD_REG_ADDR(r4, hcall_real_table) 2283 lwax r3,r3,r4 2284 cmpwi r3,0 2285 beq guest_exit_cont 2286 add r12,r3,r4 2287 mtctr r12 2288 mr r3,r9 /* get vcpu pointer */ 2289 ld r4,VCPU_GPR(R4)(r9) 2290 bctrl 2291 cmpdi r3,H_TOO_HARD 2292 beq hcall_real_fallback 2293 ld r4,HSTATE_KVM_VCPU(r13) 2294 std r3,VCPU_GPR(R3)(r4) 2295 ld r10,VCPU_PC(r4) 2296 ld r11,VCPU_MSR(r4) 2297 b fast_guest_return 2298 2299sc_1_fast_return: 2300 mtspr SPRN_SRR0,r10 2301 mtspr SPRN_SRR1,r11 2302 li r10, BOOK3S_INTERRUPT_SYSCALL 2303 bl kvmppc_msr_interrupt 2304 mr r4,r9 2305 b fast_guest_return 2306 2307 /* We've attempted a real mode hcall, but it's punted it back 2308 * to userspace. We need to restore some clobbered volatiles 2309 * before resuming the pass-it-to-qemu path */ 2310hcall_real_fallback: 2311 li r12,BOOK3S_INTERRUPT_SYSCALL 2312 ld r9, HSTATE_KVM_VCPU(r13) 2313 2314 b guest_exit_cont 2315 2316 .globl hcall_real_table 2317hcall_real_table: 2318 .long 0 /* 0 - unused */ 2319 .long DOTSYM(kvmppc_h_remove) - hcall_real_table 2320 .long DOTSYM(kvmppc_h_enter) - hcall_real_table 2321 .long DOTSYM(kvmppc_h_read) - hcall_real_table 2322 .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table 2323 .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table 2324 .long DOTSYM(kvmppc_h_protect) - hcall_real_table 2325#ifdef CONFIG_SPAPR_TCE_IOMMU 2326 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table 2327 .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table 2328#else 2329 .long 0 /* 0x1c */ 2330 .long 0 /* 0x20 */ 2331#endif 2332 .long 0 /* 0x24 - H_SET_SPRG0 */ 2333 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table 2334 .long DOTSYM(kvmppc_rm_h_page_init) - hcall_real_table 2335 .long 0 /* 0x30 */ 2336 .long 0 /* 0x34 */ 2337 .long 0 /* 0x38 */ 2338 .long 0 /* 0x3c */ 2339 .long 0 /* 0x40 */ 2340 .long 0 /* 0x44 */ 2341 .long 0 /* 0x48 */ 2342 .long 0 /* 0x4c */ 2343 .long 0 /* 0x50 */ 2344 .long 0 /* 0x54 */ 2345 .long 0 /* 0x58 */ 2346 .long 0 /* 0x5c */ 2347 .long 0 /* 0x60 */ 2348#ifdef CONFIG_KVM_XICS 2349 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table 2350 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table 2351 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table 2352 .long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table 2353 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table 2354#else 2355 .long 0 /* 0x64 - H_EOI */ 2356 .long 0 /* 0x68 - H_CPPR */ 2357 .long 0 /* 0x6c - H_IPI */ 2358 .long 0 /* 0x70 - H_IPOLL */ 2359 .long 0 /* 0x74 - H_XIRR */ 2360#endif 2361 .long 0 /* 0x78 */ 2362 .long 0 /* 0x7c */ 2363 .long 0 /* 0x80 */ 2364 .long 0 /* 0x84 */ 2365 .long 0 /* 0x88 */ 2366 .long 0 /* 0x8c */ 2367 .long 0 /* 0x90 */ 2368 .long 0 /* 0x94 */ 2369 .long 0 /* 0x98 */ 2370 .long 0 /* 0x9c */ 2371 .long 0 /* 0xa0 */ 2372 .long 0 /* 0xa4 */ 2373 .long 0 /* 0xa8 */ 2374 .long 0 /* 0xac */ 2375 .long 0 /* 0xb0 */ 2376 .long 0 /* 0xb4 */ 2377 .long 0 /* 0xb8 */ 2378 .long 0 /* 0xbc */ 2379 .long 0 /* 0xc0 */ 2380 .long 0 /* 0xc4 */ 2381 .long 0 /* 0xc8 */ 2382 .long 0 /* 0xcc */ 2383 .long 0 /* 0xd0 */ 2384 .long 0 /* 0xd4 */ 2385 .long 0 /* 0xd8 */ 2386 .long 0 /* 0xdc */ 2387 .long DOTSYM(kvmppc_h_cede) - hcall_real_table 2388 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table 2389 .long 0 /* 0xe8 */ 2390 .long 0 /* 0xec */ 2391 .long 0 /* 0xf0 */ 2392 .long 0 /* 0xf4 */ 2393 .long 0 /* 0xf8 */ 2394 .long 0 /* 0xfc */ 2395 .long 0 /* 0x100 */ 2396 .long 0 /* 0x104 */ 2397 .long 0 /* 0x108 */ 2398 .long 0 /* 0x10c */ 2399 .long 0 /* 0x110 */ 2400 .long 0 /* 0x114 */ 2401 .long 0 /* 0x118 */ 2402 .long 0 /* 0x11c */ 2403 .long 0 /* 0x120 */ 2404 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table 2405 .long 0 /* 0x128 */ 2406 .long 0 /* 0x12c */ 2407 .long 0 /* 0x130 */ 2408 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table 2409#ifdef CONFIG_SPAPR_TCE_IOMMU 2410 .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table 2411 .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table 2412#else 2413 .long 0 /* 0x138 */ 2414 .long 0 /* 0x13c */ 2415#endif 2416 .long 0 /* 0x140 */ 2417 .long 0 /* 0x144 */ 2418 .long 0 /* 0x148 */ 2419 .long 0 /* 0x14c */ 2420 .long 0 /* 0x150 */ 2421 .long 0 /* 0x154 */ 2422 .long 0 /* 0x158 */ 2423 .long 0 /* 0x15c */ 2424 .long 0 /* 0x160 */ 2425 .long 0 /* 0x164 */ 2426 .long 0 /* 0x168 */ 2427 .long 0 /* 0x16c */ 2428 .long 0 /* 0x170 */ 2429 .long 0 /* 0x174 */ 2430 .long 0 /* 0x178 */ 2431 .long 0 /* 0x17c */ 2432 .long 0 /* 0x180 */ 2433 .long 0 /* 0x184 */ 2434 .long 0 /* 0x188 */ 2435 .long 0 /* 0x18c */ 2436 .long 0 /* 0x190 */ 2437 .long 0 /* 0x194 */ 2438 .long 0 /* 0x198 */ 2439 .long 0 /* 0x19c */ 2440 .long 0 /* 0x1a0 */ 2441 .long 0 /* 0x1a4 */ 2442 .long 0 /* 0x1a8 */ 2443 .long 0 /* 0x1ac */ 2444 .long 0 /* 0x1b0 */ 2445 .long 0 /* 0x1b4 */ 2446 .long 0 /* 0x1b8 */ 2447 .long 0 /* 0x1bc */ 2448 .long 0 /* 0x1c0 */ 2449 .long 0 /* 0x1c4 */ 2450 .long 0 /* 0x1c8 */ 2451 .long 0 /* 0x1cc */ 2452 .long 0 /* 0x1d0 */ 2453 .long 0 /* 0x1d4 */ 2454 .long 0 /* 0x1d8 */ 2455 .long 0 /* 0x1dc */ 2456 .long 0 /* 0x1e0 */ 2457 .long 0 /* 0x1e4 */ 2458 .long 0 /* 0x1e8 */ 2459 .long 0 /* 0x1ec */ 2460 .long 0 /* 0x1f0 */ 2461 .long 0 /* 0x1f4 */ 2462 .long 0 /* 0x1f8 */ 2463 .long 0 /* 0x1fc */ 2464 .long 0 /* 0x200 */ 2465 .long 0 /* 0x204 */ 2466 .long 0 /* 0x208 */ 2467 .long 0 /* 0x20c */ 2468 .long 0 /* 0x210 */ 2469 .long 0 /* 0x214 */ 2470 .long 0 /* 0x218 */ 2471 .long 0 /* 0x21c */ 2472 .long 0 /* 0x220 */ 2473 .long 0 /* 0x224 */ 2474 .long 0 /* 0x228 */ 2475 .long 0 /* 0x22c */ 2476 .long 0 /* 0x230 */ 2477 .long 0 /* 0x234 */ 2478 .long 0 /* 0x238 */ 2479 .long 0 /* 0x23c */ 2480 .long 0 /* 0x240 */ 2481 .long 0 /* 0x244 */ 2482 .long 0 /* 0x248 */ 2483 .long 0 /* 0x24c */ 2484 .long 0 /* 0x250 */ 2485 .long 0 /* 0x254 */ 2486 .long 0 /* 0x258 */ 2487 .long 0 /* 0x25c */ 2488 .long 0 /* 0x260 */ 2489 .long 0 /* 0x264 */ 2490 .long 0 /* 0x268 */ 2491 .long 0 /* 0x26c */ 2492 .long 0 /* 0x270 */ 2493 .long 0 /* 0x274 */ 2494 .long 0 /* 0x278 */ 2495 .long 0 /* 0x27c */ 2496 .long 0 /* 0x280 */ 2497 .long 0 /* 0x284 */ 2498 .long 0 /* 0x288 */ 2499 .long 0 /* 0x28c */ 2500 .long 0 /* 0x290 */ 2501 .long 0 /* 0x294 */ 2502 .long 0 /* 0x298 */ 2503 .long 0 /* 0x29c */ 2504 .long 0 /* 0x2a0 */ 2505 .long 0 /* 0x2a4 */ 2506 .long 0 /* 0x2a8 */ 2507 .long 0 /* 0x2ac */ 2508 .long 0 /* 0x2b0 */ 2509 .long 0 /* 0x2b4 */ 2510 .long 0 /* 0x2b8 */ 2511 .long 0 /* 0x2bc */ 2512 .long 0 /* 0x2c0 */ 2513 .long 0 /* 0x2c4 */ 2514 .long 0 /* 0x2c8 */ 2515 .long 0 /* 0x2cc */ 2516 .long 0 /* 0x2d0 */ 2517 .long 0 /* 0x2d4 */ 2518 .long 0 /* 0x2d8 */ 2519 .long 0 /* 0x2dc */ 2520 .long 0 /* 0x2e0 */ 2521 .long 0 /* 0x2e4 */ 2522 .long 0 /* 0x2e8 */ 2523 .long 0 /* 0x2ec */ 2524 .long 0 /* 0x2f0 */ 2525 .long 0 /* 0x2f4 */ 2526 .long 0 /* 0x2f8 */ 2527#ifdef CONFIG_KVM_XICS 2528 .long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table 2529#else 2530 .long 0 /* 0x2fc - H_XIRR_X*/ 2531#endif 2532 .long DOTSYM(kvmppc_h_random) - hcall_real_table 2533 .globl hcall_real_table_end 2534hcall_real_table_end: 2535 2536_GLOBAL(kvmppc_h_set_xdabr) 2537EXPORT_SYMBOL_GPL(kvmppc_h_set_xdabr) 2538 andi. r0, r5, DABRX_USER | DABRX_KERNEL 2539 beq 6f 2540 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI 2541 andc. r0, r5, r0 2542 beq 3f 25436: li r3, H_PARAMETER 2544 blr 2545 2546_GLOBAL(kvmppc_h_set_dabr) 2547EXPORT_SYMBOL_GPL(kvmppc_h_set_dabr) 2548 li r5, DABRX_USER | DABRX_KERNEL 25493: 2550BEGIN_FTR_SECTION 2551 b 2f 2552END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2553 std r4,VCPU_DABR(r3) 2554 stw r5, VCPU_DABRX(r3) 2555 mtspr SPRN_DABRX, r5 2556 /* Work around P7 bug where DABR can get corrupted on mtspr */ 25571: mtspr SPRN_DABR,r4 2558 mfspr r5, SPRN_DABR 2559 cmpd r4, r5 2560 bne 1b 2561 isync 2562 li r3,0 2563 blr 2564 25652: 2566 LOAD_REG_ADDR(r11, dawr_force_enable) 2567 lbz r11, 0(r11) 2568 cmpdi r11, 0 2569 bne 3f 2570 li r3, H_HARDWARE 2571 blr 25723: 2573 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */ 2574 rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW 2575 rlwimi r5, r4, 2, DAWRX_WT 2576 clrrdi r4, r4, 3 2577 std r4, VCPU_DAWR(r3) 2578 std r5, VCPU_DAWRX(r3) 2579 /* 2580 * If came in through the real mode hcall handler then it is necessary 2581 * to write the registers since the return path won't. Otherwise it is 2582 * sufficient to store then in the vcpu struct as they will be loaded 2583 * next time the vcpu is run. 2584 */ 2585 mfmsr r6 2586 andi. r6, r6, MSR_DR /* in real mode? */ 2587 bne 4f 2588 mtspr SPRN_DAWR0, r4 2589 mtspr SPRN_DAWRX0, r5 25904: li r3, 0 2591 blr 2592 2593_GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */ 2594 ori r11,r11,MSR_EE 2595 std r11,VCPU_MSR(r3) 2596 li r0,1 2597 stb r0,VCPU_CEDED(r3) 2598 sync /* order setting ceded vs. testing prodded */ 2599 lbz r5,VCPU_PRODDED(r3) 2600 cmpwi r5,0 2601 bne kvm_cede_prodded 2602 li r12,0 /* set trap to 0 to say hcall is handled */ 2603 stw r12,VCPU_TRAP(r3) 2604 li r0,H_SUCCESS 2605 std r0,VCPU_GPR(R3)(r3) 2606 2607 /* 2608 * Set our bit in the bitmask of napping threads unless all the 2609 * other threads are already napping, in which case we send this 2610 * up to the host. 2611 */ 2612 ld r5,HSTATE_KVM_VCORE(r13) 2613 lbz r6,HSTATE_PTID(r13) 2614 lwz r8,VCORE_ENTRY_EXIT(r5) 2615 clrldi r8,r8,56 2616 li r0,1 2617 sld r0,r0,r6 2618 addi r6,r5,VCORE_NAPPING_THREADS 261931: lwarx r4,0,r6 2620 or r4,r4,r0 2621 cmpw r4,r8 2622 beq kvm_cede_exit 2623 stwcx. r4,0,r6 2624 bne 31b 2625 /* order napping_threads update vs testing entry_exit_map */ 2626 isync 2627 li r0,NAPPING_CEDE 2628 stb r0,HSTATE_NAPPING(r13) 2629 lwz r7,VCORE_ENTRY_EXIT(r5) 2630 cmpwi r7,0x100 2631 bge 33f /* another thread already exiting */ 2632 2633/* 2634 * Although not specifically required by the architecture, POWER7 2635 * preserves the following registers in nap mode, even if an SMT mode 2636 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3, 2637 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR. 2638 */ 2639 /* Save non-volatile GPRs */ 2640 std r14, VCPU_GPR(R14)(r3) 2641 std r15, VCPU_GPR(R15)(r3) 2642 std r16, VCPU_GPR(R16)(r3) 2643 std r17, VCPU_GPR(R17)(r3) 2644 std r18, VCPU_GPR(R18)(r3) 2645 std r19, VCPU_GPR(R19)(r3) 2646 std r20, VCPU_GPR(R20)(r3) 2647 std r21, VCPU_GPR(R21)(r3) 2648 std r22, VCPU_GPR(R22)(r3) 2649 std r23, VCPU_GPR(R23)(r3) 2650 std r24, VCPU_GPR(R24)(r3) 2651 std r25, VCPU_GPR(R25)(r3) 2652 std r26, VCPU_GPR(R26)(r3) 2653 std r27, VCPU_GPR(R27)(r3) 2654 std r28, VCPU_GPR(R28)(r3) 2655 std r29, VCPU_GPR(R29)(r3) 2656 std r30, VCPU_GPR(R30)(r3) 2657 std r31, VCPU_GPR(R31)(r3) 2658 2659 /* save FP state */ 2660 bl kvmppc_save_fp 2661 2662#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2663/* 2664 * Branch around the call if both CPU_FTR_TM and 2665 * CPU_FTR_P9_TM_HV_ASSIST are off. 2666 */ 2667BEGIN_FTR_SECTION 2668 b 91f 2669END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) 2670 /* 2671 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) 2672 */ 2673 ld r3, HSTATE_KVM_VCPU(r13) 2674 ld r4, VCPU_MSR(r3) 2675 li r5, 0 /* don't preserve non-vol regs */ 2676 bl kvmppc_save_tm_hv 2677 nop 267891: 2679#endif 2680 2681 /* 2682 * Set DEC to the smaller of DEC and HDEC, so that we wake 2683 * no later than the end of our timeslice (HDEC interrupts 2684 * don't wake us from nap). 2685 */ 2686 mfspr r3, SPRN_DEC 2687 mfspr r4, SPRN_HDEC 2688 mftb r5 2689BEGIN_FTR_SECTION 2690 /* On P9 check whether the guest has large decrementer mode enabled */ 2691 ld r6, HSTATE_KVM_VCORE(r13) 2692 ld r6, VCORE_LPCR(r6) 2693 andis. r6, r6, LPCR_LD@h 2694 bne 68f 2695END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 2696 extsw r3, r3 269768: EXTEND_HDEC(r4) 2698 cmpd r3, r4 2699 ble 67f 2700 mtspr SPRN_DEC, r4 270167: 2702 /* save expiry time of guest decrementer */ 2703 add r3, r3, r5 2704 ld r4, HSTATE_KVM_VCPU(r13) 2705 ld r5, HSTATE_KVM_VCORE(r13) 2706 ld r6, VCORE_TB_OFFSET_APPL(r5) 2707 subf r3, r6, r3 /* convert to host TB value */ 2708 std r3, VCPU_DEC_EXPIRES(r4) 2709 2710#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 2711 ld r4, HSTATE_KVM_VCPU(r13) 2712 addi r3, r4, VCPU_TB_CEDE 2713 bl kvmhv_accumulate_time 2714#endif 2715 2716 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */ 2717 2718 /* Go back to host stack */ 2719 ld r1, HSTATE_HOST_R1(r13) 2720 2721 /* 2722 * Take a nap until a decrementer or external or doobell interrupt 2723 * occurs, with PECE1 and PECE0 set in LPCR. 2724 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP. 2725 * Also clear the runlatch bit before napping. 2726 */ 2727kvm_do_nap: 2728 mfspr r0, SPRN_CTRLF 2729 clrrdi r0, r0, 1 2730 mtspr SPRN_CTRLT, r0 2731 2732 li r0,1 2733 stb r0,HSTATE_HWTHREAD_REQ(r13) 2734 mfspr r5,SPRN_LPCR 2735 ori r5,r5,LPCR_PECE0 | LPCR_PECE1 2736BEGIN_FTR_SECTION 2737 ori r5, r5, LPCR_PECEDH 2738 rlwimi r5, r3, 0, LPCR_PECEDP 2739END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2740 2741kvm_nap_sequence: /* desired LPCR value in r5 */ 2742BEGIN_FTR_SECTION 2743 /* 2744 * PSSCR bits: exit criterion = 1 (wakeup based on LPCR at sreset) 2745 * enable state loss = 1 (allow SMT mode switch) 2746 * requested level = 0 (just stop dispatching) 2747 */ 2748 lis r3, (PSSCR_EC | PSSCR_ESL)@h 2749 /* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */ 2750 li r4, LPCR_PECE_HVEE@higher 2751 sldi r4, r4, 32 2752 or r5, r5, r4 2753FTR_SECTION_ELSE 2754 li r3, PNV_THREAD_NAP 2755ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) 2756 mtspr SPRN_LPCR,r5 2757 isync 2758 2759BEGIN_FTR_SECTION 2760 bl isa300_idle_stop_mayloss 2761FTR_SECTION_ELSE 2762 bl isa206_idle_insn_mayloss 2763ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) 2764 2765 mfspr r0, SPRN_CTRLF 2766 ori r0, r0, 1 2767 mtspr SPRN_CTRLT, r0 2768 2769 mtspr SPRN_SRR1, r3 2770 2771 li r0, 0 2772 stb r0, PACA_FTRACE_ENABLED(r13) 2773 2774 li r0, KVM_HWTHREAD_IN_KVM 2775 stb r0, HSTATE_HWTHREAD_STATE(r13) 2776 2777 lbz r0, HSTATE_NAPPING(r13) 2778 cmpwi r0, NAPPING_CEDE 2779 beq kvm_end_cede 2780 cmpwi r0, NAPPING_NOVCPU 2781 beq kvm_novcpu_wakeup 2782 cmpwi r0, NAPPING_UNSPLIT 2783 beq kvm_unsplit_wakeup 2784 twi 31,0,0 /* Nap state must not be zero */ 2785 278633: mr r4, r3 2787 li r3, 0 2788 li r12, 0 2789 b 34f 2790 2791kvm_end_cede: 2792 /* Woken by external or decrementer interrupt */ 2793 2794 /* get vcpu pointer */ 2795 ld r4, HSTATE_KVM_VCPU(r13) 2796 2797#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 2798 addi r3, r4, VCPU_TB_RMINTR 2799 bl kvmhv_accumulate_time 2800#endif 2801 2802#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2803/* 2804 * Branch around the call if both CPU_FTR_TM and 2805 * CPU_FTR_P9_TM_HV_ASSIST are off. 2806 */ 2807BEGIN_FTR_SECTION 2808 b 91f 2809END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) 2810 /* 2811 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) 2812 */ 2813 mr r3, r4 2814 ld r4, VCPU_MSR(r3) 2815 li r5, 0 /* don't preserve non-vol regs */ 2816 bl kvmppc_restore_tm_hv 2817 nop 2818 ld r4, HSTATE_KVM_VCPU(r13) 281991: 2820#endif 2821 2822 /* load up FP state */ 2823 bl kvmppc_load_fp 2824 2825 /* Restore guest decrementer */ 2826 ld r3, VCPU_DEC_EXPIRES(r4) 2827 ld r5, HSTATE_KVM_VCORE(r13) 2828 ld r6, VCORE_TB_OFFSET_APPL(r5) 2829 add r3, r3, r6 /* convert host TB to guest TB value */ 2830 mftb r7 2831 subf r3, r7, r3 2832 mtspr SPRN_DEC, r3 2833 2834 /* Load NV GPRS */ 2835 ld r14, VCPU_GPR(R14)(r4) 2836 ld r15, VCPU_GPR(R15)(r4) 2837 ld r16, VCPU_GPR(R16)(r4) 2838 ld r17, VCPU_GPR(R17)(r4) 2839 ld r18, VCPU_GPR(R18)(r4) 2840 ld r19, VCPU_GPR(R19)(r4) 2841 ld r20, VCPU_GPR(R20)(r4) 2842 ld r21, VCPU_GPR(R21)(r4) 2843 ld r22, VCPU_GPR(R22)(r4) 2844 ld r23, VCPU_GPR(R23)(r4) 2845 ld r24, VCPU_GPR(R24)(r4) 2846 ld r25, VCPU_GPR(R25)(r4) 2847 ld r26, VCPU_GPR(R26)(r4) 2848 ld r27, VCPU_GPR(R27)(r4) 2849 ld r28, VCPU_GPR(R28)(r4) 2850 ld r29, VCPU_GPR(R29)(r4) 2851 ld r30, VCPU_GPR(R30)(r4) 2852 ld r31, VCPU_GPR(R31)(r4) 2853 2854 /* Check the wake reason in SRR1 to see why we got here */ 2855 bl kvmppc_check_wake_reason 2856 2857 /* 2858 * Restore volatile registers since we could have called a 2859 * C routine in kvmppc_check_wake_reason 2860 * r4 = VCPU 2861 * r3 tells us whether we need to return to host or not 2862 * WARNING: it gets checked further down: 2863 * should not modify r3 until this check is done. 2864 */ 2865 ld r4, HSTATE_KVM_VCPU(r13) 2866 2867 /* clear our bit in vcore->napping_threads */ 286834: ld r5,HSTATE_KVM_VCORE(r13) 2869 lbz r7,HSTATE_PTID(r13) 2870 li r0,1 2871 sld r0,r0,r7 2872 addi r6,r5,VCORE_NAPPING_THREADS 287332: lwarx r7,0,r6 2874 andc r7,r7,r0 2875 stwcx. r7,0,r6 2876 bne 32b 2877 li r0,0 2878 stb r0,HSTATE_NAPPING(r13) 2879 2880 /* See if the wake reason saved in r3 means we need to exit */ 2881 stw r12, VCPU_TRAP(r4) 2882 mr r9, r4 2883 cmpdi r3, 0 2884 bgt guest_exit_cont 2885 b maybe_reenter_guest 2886 2887 /* cede when already previously prodded case */ 2888kvm_cede_prodded: 2889 li r0,0 2890 stb r0,VCPU_PRODDED(r3) 2891 sync /* order testing prodded vs. clearing ceded */ 2892 stb r0,VCPU_CEDED(r3) 2893 li r3,H_SUCCESS 2894 blr 2895 2896 /* we've ceded but we want to give control to the host */ 2897kvm_cede_exit: 2898 ld r9, HSTATE_KVM_VCPU(r13) 2899#ifdef CONFIG_KVM_XICS 2900 /* are we using XIVE with single escalation? */ 2901 ld r10, VCPU_XIVE_ESC_VADDR(r9) 2902 cmpdi r10, 0 2903 beq 3f 2904 li r6, XIVE_ESB_SET_PQ_00 2905 /* 2906 * If we still have a pending escalation, abort the cede, 2907 * and we must set PQ to 10 rather than 00 so that we don't 2908 * potentially end up with two entries for the escalation 2909 * interrupt in the XIVE interrupt queue. In that case 2910 * we also don't want to set xive_esc_on to 1 here in 2911 * case we race with xive_esc_irq(). 2912 */ 2913 lbz r5, VCPU_XIVE_ESC_ON(r9) 2914 cmpwi r5, 0 2915 beq 4f 2916 li r0, 0 2917 stb r0, VCPU_CEDED(r9) 2918 /* 2919 * The escalation interrupts are special as we don't EOI them. 2920 * There is no need to use the load-after-store ordering offset 2921 * to set PQ to 10 as we won't use StoreEOI. 2922 */ 2923 li r6, XIVE_ESB_SET_PQ_10 2924 b 5f 29254: li r0, 1 2926 stb r0, VCPU_XIVE_ESC_ON(r9) 2927 /* make sure store to xive_esc_on is seen before xive_esc_irq runs */ 2928 sync 29295: /* Enable XIVE escalation */ 2930 mfmsr r0 2931 andi. r0, r0, MSR_DR /* in real mode? */ 2932 beq 1f 2933 ldx r0, r10, r6 2934 b 2f 29351: ld r10, VCPU_XIVE_ESC_RADDR(r9) 2936 ldcix r0, r10, r6 29372: sync 2938#endif /* CONFIG_KVM_XICS */ 29393: b guest_exit_cont 2940 2941 /* Try to do machine check recovery in real mode */ 2942machine_check_realmode: 2943 mr r3, r9 /* get vcpu pointer */ 2944 bl kvmppc_realmode_machine_check 2945 nop 2946 /* all machine checks go to virtual mode for further handling */ 2947 ld r9, HSTATE_KVM_VCPU(r13) 2948 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK 2949 b guest_exit_cont 2950 2951/* 2952 * Call C code to handle a HMI in real mode. 2953 * Only the primary thread does the call, secondary threads are handled 2954 * by calling hmi_exception_realmode() after kvmppc_hv_entry returns. 2955 * r9 points to the vcpu on entry 2956 */ 2957hmi_realmode: 2958 lbz r0, HSTATE_PTID(r13) 2959 cmpwi r0, 0 2960 bne guest_exit_cont 2961 bl kvmppc_realmode_hmi_handler 2962 ld r9, HSTATE_KVM_VCPU(r13) 2963 li r12, BOOK3S_INTERRUPT_HMI 2964 b guest_exit_cont 2965 2966/* 2967 * Check the reason we woke from nap, and take appropriate action. 2968 * Returns (in r3): 2969 * 0 if nothing needs to be done 2970 * 1 if something happened that needs to be handled by the host 2971 * -1 if there was a guest wakeup (IPI or msgsnd) 2972 * -2 if we handled a PCI passthrough interrupt (returned by 2973 * kvmppc_read_intr only) 2974 * 2975 * Also sets r12 to the interrupt vector for any interrupt that needs 2976 * to be handled now by the host (0x500 for external interrupt), or zero. 2977 * Modifies all volatile registers (since it may call a C function). 2978 * This routine calls kvmppc_read_intr, a C function, if an external 2979 * interrupt is pending. 2980 */ 2981kvmppc_check_wake_reason: 2982 mfspr r6, SPRN_SRR1 2983BEGIN_FTR_SECTION 2984 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */ 2985FTR_SECTION_ELSE 2986 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */ 2987ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S) 2988 cmpwi r6, 8 /* was it an external interrupt? */ 2989 beq 7f /* if so, see what it was */ 2990 li r3, 0 2991 li r12, 0 2992 cmpwi r6, 6 /* was it the decrementer? */ 2993 beq 0f 2994BEGIN_FTR_SECTION 2995 cmpwi r6, 5 /* privileged doorbell? */ 2996 beq 0f 2997 cmpwi r6, 3 /* hypervisor doorbell? */ 2998 beq 3f 2999END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 3000 cmpwi r6, 0xa /* Hypervisor maintenance ? */ 3001 beq 4f 3002 li r3, 1 /* anything else, return 1 */ 30030: blr 3004 3005 /* hypervisor doorbell */ 30063: li r12, BOOK3S_INTERRUPT_H_DOORBELL 3007 3008 /* 3009 * Clear the doorbell as we will invoke the handler 3010 * explicitly in the guest exit path. 3011 */ 3012 lis r6, (PPC_DBELL_SERVER << (63-36))@h 3013 PPC_MSGCLR(6) 3014 /* see if it's a host IPI */ 3015 li r3, 1 3016BEGIN_FTR_SECTION 3017 PPC_MSGSYNC 3018 lwsync 3019END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 3020 lbz r0, HSTATE_HOST_IPI(r13) 3021 cmpwi r0, 0 3022 bnelr 3023 /* if not, return -1 */ 3024 li r3, -1 3025 blr 3026 3027 /* Woken up due to Hypervisor maintenance interrupt */ 30284: li r12, BOOK3S_INTERRUPT_HMI 3029 li r3, 1 3030 blr 3031 3032 /* external interrupt - create a stack frame so we can call C */ 30337: mflr r0 3034 std r0, PPC_LR_STKOFF(r1) 3035 stdu r1, -PPC_MIN_STKFRM(r1) 3036 bl kvmppc_read_intr 3037 nop 3038 li r12, BOOK3S_INTERRUPT_EXTERNAL 3039 cmpdi r3, 1 3040 ble 1f 3041 3042 /* 3043 * Return code of 2 means PCI passthrough interrupt, but 3044 * we need to return back to host to complete handling the 3045 * interrupt. Trap reason is expected in r12 by guest 3046 * exit code. 3047 */ 3048 li r12, BOOK3S_INTERRUPT_HV_RM_HARD 30491: 3050 ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1) 3051 addi r1, r1, PPC_MIN_STKFRM 3052 mtlr r0 3053 blr 3054 3055/* 3056 * Save away FP, VMX and VSX registers. 3057 * r3 = vcpu pointer 3058 * N.B. r30 and r31 are volatile across this function, 3059 * thus it is not callable from C. 3060 */ 3061kvmppc_save_fp: 3062 mflr r30 3063 mr r31,r3 3064 mfmsr r5 3065 ori r8,r5,MSR_FP 3066#ifdef CONFIG_ALTIVEC 3067BEGIN_FTR_SECTION 3068 oris r8,r8,MSR_VEC@h 3069END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 3070#endif 3071#ifdef CONFIG_VSX 3072BEGIN_FTR_SECTION 3073 oris r8,r8,MSR_VSX@h 3074END_FTR_SECTION_IFSET(CPU_FTR_VSX) 3075#endif 3076 mtmsrd r8 3077 addi r3,r3,VCPU_FPRS 3078 bl store_fp_state 3079#ifdef CONFIG_ALTIVEC 3080BEGIN_FTR_SECTION 3081 addi r3,r31,VCPU_VRS 3082 bl store_vr_state 3083END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 3084#endif 3085 mfspr r6,SPRN_VRSAVE 3086 stw r6,VCPU_VRSAVE(r31) 3087 mtlr r30 3088 blr 3089 3090/* 3091 * Load up FP, VMX and VSX registers 3092 * r4 = vcpu pointer 3093 * N.B. r30 and r31 are volatile across this function, 3094 * thus it is not callable from C. 3095 */ 3096kvmppc_load_fp: 3097 mflr r30 3098 mr r31,r4 3099 mfmsr r9 3100 ori r8,r9,MSR_FP 3101#ifdef CONFIG_ALTIVEC 3102BEGIN_FTR_SECTION 3103 oris r8,r8,MSR_VEC@h 3104END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 3105#endif 3106#ifdef CONFIG_VSX 3107BEGIN_FTR_SECTION 3108 oris r8,r8,MSR_VSX@h 3109END_FTR_SECTION_IFSET(CPU_FTR_VSX) 3110#endif 3111 mtmsrd r8 3112 addi r3,r4,VCPU_FPRS 3113 bl load_fp_state 3114#ifdef CONFIG_ALTIVEC 3115BEGIN_FTR_SECTION 3116 addi r3,r31,VCPU_VRS 3117 bl load_vr_state 3118END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 3119#endif 3120 lwz r7,VCPU_VRSAVE(r31) 3121 mtspr SPRN_VRSAVE,r7 3122 mtlr r30 3123 mr r4,r31 3124 blr 3125 3126#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 3127/* 3128 * Save transactional state and TM-related registers. 3129 * Called with r3 pointing to the vcpu struct and r4 containing 3130 * the guest MSR value. 3131 * r5 is non-zero iff non-volatile register state needs to be maintained. 3132 * If r5 == 0, this can modify all checkpointed registers, but 3133 * restores r1 and r2 before exit. 3134 */ 3135_GLOBAL_TOC(kvmppc_save_tm_hv) 3136EXPORT_SYMBOL_GPL(kvmppc_save_tm_hv) 3137 /* See if we need to handle fake suspend mode */ 3138BEGIN_FTR_SECTION 3139 b __kvmppc_save_tm 3140END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST) 3141 3142 lbz r0, HSTATE_FAKE_SUSPEND(r13) /* Were we fake suspended? */ 3143 cmpwi r0, 0 3144 beq __kvmppc_save_tm 3145 3146 /* The following code handles the fake_suspend = 1 case */ 3147 mflr r0 3148 std r0, PPC_LR_STKOFF(r1) 3149 stdu r1, -PPC_MIN_STKFRM(r1) 3150 3151 /* Turn on TM. */ 3152 mfmsr r8 3153 li r0, 1 3154 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG 3155 mtmsrd r8 3156 3157 rldicl. r8, r8, 64 - MSR_TS_S_LG, 62 /* Did we actually hrfid? */ 3158 beq 4f 3159BEGIN_FTR_SECTION 3160 bl pnv_power9_force_smt4_catch 3161END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) 3162 nop 3163 3164 /* We have to treclaim here because that's the only way to do S->N */ 3165 li r3, TM_CAUSE_KVM_RESCHED 3166 TRECLAIM(R3) 3167 3168 /* 3169 * We were in fake suspend, so we are not going to save the 3170 * register state as the guest checkpointed state (since 3171 * we already have it), therefore we can now use any volatile GPR. 3172 * In fact treclaim in fake suspend state doesn't modify 3173 * any registers. 3174 */ 3175 3176BEGIN_FTR_SECTION 3177 bl pnv_power9_force_smt4_release 3178END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) 3179 nop 3180 31814: 3182 mfspr r3, SPRN_PSSCR 3183 /* PSSCR_FAKE_SUSPEND is a write-only bit, but clear it anyway */ 3184 li r0, PSSCR_FAKE_SUSPEND 3185 andc r3, r3, r0 3186 mtspr SPRN_PSSCR, r3 3187 3188 /* Don't save TEXASR, use value from last exit in real suspend state */ 3189 ld r9, HSTATE_KVM_VCPU(r13) 3190 mfspr r5, SPRN_TFHAR 3191 mfspr r6, SPRN_TFIAR 3192 std r5, VCPU_TFHAR(r9) 3193 std r6, VCPU_TFIAR(r9) 3194 3195 addi r1, r1, PPC_MIN_STKFRM 3196 ld r0, PPC_LR_STKOFF(r1) 3197 mtlr r0 3198 blr 3199 3200/* 3201 * Restore transactional state and TM-related registers. 3202 * Called with r3 pointing to the vcpu struct 3203 * and r4 containing the guest MSR value. 3204 * r5 is non-zero iff non-volatile register state needs to be maintained. 3205 * This potentially modifies all checkpointed registers. 3206 * It restores r1 and r2 from the PACA. 3207 */ 3208_GLOBAL_TOC(kvmppc_restore_tm_hv) 3209EXPORT_SYMBOL_GPL(kvmppc_restore_tm_hv) 3210 /* 3211 * If we are doing TM emulation for the guest on a POWER9 DD2, 3212 * then we don't actually do a trechkpt -- we either set up 3213 * fake-suspend mode, or emulate a TM rollback. 3214 */ 3215BEGIN_FTR_SECTION 3216 b __kvmppc_restore_tm 3217END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST) 3218 mflr r0 3219 std r0, PPC_LR_STKOFF(r1) 3220 3221 li r0, 0 3222 stb r0, HSTATE_FAKE_SUSPEND(r13) 3223 3224 /* Turn on TM so we can restore TM SPRs */ 3225 mfmsr r5 3226 li r0, 1 3227 rldimi r5, r0, MSR_TM_LG, 63-MSR_TM_LG 3228 mtmsrd r5 3229 3230 /* 3231 * The user may change these outside of a transaction, so they must 3232 * always be context switched. 3233 */ 3234 ld r5, VCPU_TFHAR(r3) 3235 ld r6, VCPU_TFIAR(r3) 3236 ld r7, VCPU_TEXASR(r3) 3237 mtspr SPRN_TFHAR, r5 3238 mtspr SPRN_TFIAR, r6 3239 mtspr SPRN_TEXASR, r7 3240 3241 rldicl. r5, r4, 64 - MSR_TS_S_LG, 62 3242 beqlr /* TM not active in guest */ 3243 3244 /* Make sure the failure summary is set */ 3245 oris r7, r7, (TEXASR_FS)@h 3246 mtspr SPRN_TEXASR, r7 3247 3248 cmpwi r5, 1 /* check for suspended state */ 3249 bgt 10f 3250 stb r5, HSTATE_FAKE_SUSPEND(r13) 3251 b 9f /* and return */ 325210: stdu r1, -PPC_MIN_STKFRM(r1) 3253 /* guest is in transactional state, so simulate rollback */ 3254 bl kvmhv_emulate_tm_rollback 3255 nop 3256 addi r1, r1, PPC_MIN_STKFRM 32579: ld r0, PPC_LR_STKOFF(r1) 3258 mtlr r0 3259 blr 3260#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 3261 3262/* 3263 * We come here if we get any exception or interrupt while we are 3264 * executing host real mode code while in guest MMU context. 3265 * r12 is (CR << 32) | vector 3266 * r13 points to our PACA 3267 * r12 is saved in HSTATE_SCRATCH0(r13) 3268 * r9 is saved in HSTATE_SCRATCH2(r13) 3269 * r13 is saved in HSPRG1 3270 * cfar is saved in HSTATE_CFAR(r13) 3271 * ppr is saved in HSTATE_PPR(r13) 3272 */ 3273kvmppc_bad_host_intr: 3274 /* 3275 * Switch to the emergency stack, but start half-way down in 3276 * case we were already on it. 3277 */ 3278 mr r9, r1 3279 std r1, PACAR1(r13) 3280 ld r1, PACAEMERGSP(r13) 3281 subi r1, r1, THREAD_SIZE/2 + INT_FRAME_SIZE 3282 std r9, 0(r1) 3283 std r0, GPR0(r1) 3284 std r9, GPR1(r1) 3285 std r2, GPR2(r1) 3286 SAVE_4GPRS(3, r1) 3287 SAVE_2GPRS(7, r1) 3288 srdi r0, r12, 32 3289 clrldi r12, r12, 32 3290 std r0, _CCR(r1) 3291 std r12, _TRAP(r1) 3292 andi. r0, r12, 2 3293 beq 1f 3294 mfspr r3, SPRN_HSRR0 3295 mfspr r4, SPRN_HSRR1 3296 mfspr r5, SPRN_HDAR 3297 mfspr r6, SPRN_HDSISR 3298 b 2f 32991: mfspr r3, SPRN_SRR0 3300 mfspr r4, SPRN_SRR1 3301 mfspr r5, SPRN_DAR 3302 mfspr r6, SPRN_DSISR 33032: std r3, _NIP(r1) 3304 std r4, _MSR(r1) 3305 std r5, _DAR(r1) 3306 std r6, _DSISR(r1) 3307 ld r9, HSTATE_SCRATCH2(r13) 3308 ld r12, HSTATE_SCRATCH0(r13) 3309 GET_SCRATCH0(r0) 3310 SAVE_4GPRS(9, r1) 3311 std r0, GPR13(r1) 3312 SAVE_NVGPRS(r1) 3313 ld r5, HSTATE_CFAR(r13) 3314 std r5, ORIG_GPR3(r1) 3315 mflr r3 3316 mfctr r4 3317 mfxer r5 3318 lbz r6, PACAIRQSOFTMASK(r13) 3319 std r3, _LINK(r1) 3320 std r4, _CTR(r1) 3321 std r5, _XER(r1) 3322 std r6, SOFTE(r1) 3323 ld r2, PACATOC(r13) 3324 LOAD_REG_IMMEDIATE(3, 0x7265677368657265) 3325 std r3, STACK_FRAME_OVERHEAD-16(r1) 3326 3327 /* 3328 * On POWER9 do a minimal restore of the MMU and call C code, 3329 * which will print a message and panic. 3330 * XXX On POWER7 and POWER8, we just spin here since we don't 3331 * know what the other threads are doing (and we don't want to 3332 * coordinate with them) - but at least we now have register state 3333 * in memory that we might be able to look at from another CPU. 3334 */ 3335BEGIN_FTR_SECTION 3336 b . 3337END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 3338 ld r9, HSTATE_KVM_VCPU(r13) 3339 ld r10, VCPU_KVM(r9) 3340 3341 li r0, 0 3342 mtspr SPRN_AMR, r0 3343 mtspr SPRN_IAMR, r0 3344 mtspr SPRN_CIABR, r0 3345 mtspr SPRN_DAWRX0, r0 3346 3347BEGIN_MMU_FTR_SECTION 3348 b 4f 3349END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) 3350 3351 slbmte r0, r0 3352 slbia 3353 ptesync 3354 ld r8, PACA_SLBSHADOWPTR(r13) 3355 .rept SLB_NUM_BOLTED 3356 li r3, SLBSHADOW_SAVEAREA 3357 LDX_BE r5, r8, r3 3358 addi r3, r3, 8 3359 LDX_BE r6, r8, r3 3360 andis. r7, r5, SLB_ESID_V@h 3361 beq 3f 3362 slbmte r6, r5 33633: addi r8, r8, 16 3364 .endr 3365 33664: lwz r7, KVM_HOST_LPID(r10) 3367 mtspr SPRN_LPID, r7 3368 mtspr SPRN_PID, r0 3369 ld r8, KVM_HOST_LPCR(r10) 3370 mtspr SPRN_LPCR, r8 3371 isync 3372 li r0, KVM_GUEST_MODE_NONE 3373 stb r0, HSTATE_IN_GUEST(r13) 3374 3375 /* 3376 * Turn on the MMU and jump to C code 3377 */ 3378 bcl 20, 31, .+4 33795: mflr r3 3380 addi r3, r3, 9f - 5b 3381 li r4, -1 3382 rldimi r3, r4, 62, 0 /* ensure 0xc000000000000000 bits are set */ 3383 ld r4, PACAKMSR(r13) 3384 mtspr SPRN_SRR0, r3 3385 mtspr SPRN_SRR1, r4 3386 RFI_TO_KERNEL 33879: addi r3, r1, STACK_FRAME_OVERHEAD 3388 bl kvmppc_bad_interrupt 3389 b 9b 3390 3391/* 3392 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken 3393 * from VCPU_INTR_MSR and is modified based on the required TM state changes. 3394 * r11 has the guest MSR value (in/out) 3395 * r9 has a vcpu pointer (in) 3396 * r0 is used as a scratch register 3397 */ 3398kvmppc_msr_interrupt: 3399 rldicl r0, r11, 64 - MSR_TS_S_LG, 62 3400 cmpwi r0, 2 /* Check if we are in transactional state.. */ 3401 ld r11, VCPU_INTR_MSR(r9) 3402 bne 1f 3403 /* ... if transactional, change to suspended */ 3404 li r0, 1 34051: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG 3406 blr 3407 3408/* 3409 * Load up guest PMU state. R3 points to the vcpu struct. 3410 */ 3411_GLOBAL(kvmhv_load_guest_pmu) 3412EXPORT_SYMBOL_GPL(kvmhv_load_guest_pmu) 3413 mr r4, r3 3414 mflr r0 3415 li r3, 1 3416 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 3417 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 3418 isync 3419BEGIN_FTR_SECTION 3420 ld r3, VCPU_MMCR(r4) 3421 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO 3422 cmpwi r5, MMCR0_PMAO 3423 beql kvmppc_fix_pmao 3424END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) 3425 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */ 3426 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */ 3427 lwz r6, VCPU_PMC + 8(r4) 3428 lwz r7, VCPU_PMC + 12(r4) 3429 lwz r8, VCPU_PMC + 16(r4) 3430 lwz r9, VCPU_PMC + 20(r4) 3431 mtspr SPRN_PMC1, r3 3432 mtspr SPRN_PMC2, r5 3433 mtspr SPRN_PMC3, r6 3434 mtspr SPRN_PMC4, r7 3435 mtspr SPRN_PMC5, r8 3436 mtspr SPRN_PMC6, r9 3437 ld r3, VCPU_MMCR(r4) 3438 ld r5, VCPU_MMCR + 8(r4) 3439 ld r6, VCPU_MMCRA(r4) 3440 ld r7, VCPU_SIAR(r4) 3441 ld r8, VCPU_SDAR(r4) 3442 mtspr SPRN_MMCR1, r5 3443 mtspr SPRN_MMCRA, r6 3444 mtspr SPRN_SIAR, r7 3445 mtspr SPRN_SDAR, r8 3446BEGIN_FTR_SECTION 3447 ld r5, VCPU_MMCR + 24(r4) 3448 ld r6, VCPU_SIER + 8(r4) 3449 ld r7, VCPU_SIER + 16(r4) 3450 mtspr SPRN_MMCR3, r5 3451 mtspr SPRN_SIER2, r6 3452 mtspr SPRN_SIER3, r7 3453END_FTR_SECTION_IFSET(CPU_FTR_ARCH_31) 3454BEGIN_FTR_SECTION 3455 ld r5, VCPU_MMCR + 16(r4) 3456 ld r6, VCPU_SIER(r4) 3457 mtspr SPRN_MMCR2, r5 3458 mtspr SPRN_SIER, r6 3459BEGIN_FTR_SECTION_NESTED(96) 3460 lwz r7, VCPU_PMC + 24(r4) 3461 lwz r8, VCPU_PMC + 28(r4) 3462 ld r9, VCPU_MMCRS(r4) 3463 mtspr SPRN_SPMC1, r7 3464 mtspr SPRN_SPMC2, r8 3465 mtspr SPRN_MMCRS, r9 3466END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96) 3467END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 3468 mtspr SPRN_MMCR0, r3 3469 isync 3470 mtlr r0 3471 blr 3472 3473/* 3474 * Reload host PMU state saved in the PACA by kvmhv_save_host_pmu. 3475 */ 3476_GLOBAL(kvmhv_load_host_pmu) 3477EXPORT_SYMBOL_GPL(kvmhv_load_host_pmu) 3478 mflr r0 3479 lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */ 3480 cmpwi r4, 0 3481 beq 23f /* skip if not */ 3482BEGIN_FTR_SECTION 3483 ld r3, HSTATE_MMCR0(r13) 3484 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO 3485 cmpwi r4, MMCR0_PMAO 3486 beql kvmppc_fix_pmao 3487END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) 3488 lwz r3, HSTATE_PMC1(r13) 3489 lwz r4, HSTATE_PMC2(r13) 3490 lwz r5, HSTATE_PMC3(r13) 3491 lwz r6, HSTATE_PMC4(r13) 3492 lwz r8, HSTATE_PMC5(r13) 3493 lwz r9, HSTATE_PMC6(r13) 3494 mtspr SPRN_PMC1, r3 3495 mtspr SPRN_PMC2, r4 3496 mtspr SPRN_PMC3, r5 3497 mtspr SPRN_PMC4, r6 3498 mtspr SPRN_PMC5, r8 3499 mtspr SPRN_PMC6, r9 3500 ld r3, HSTATE_MMCR0(r13) 3501 ld r4, HSTATE_MMCR1(r13) 3502 ld r5, HSTATE_MMCRA(r13) 3503 ld r6, HSTATE_SIAR(r13) 3504 ld r7, HSTATE_SDAR(r13) 3505 mtspr SPRN_MMCR1, r4 3506 mtspr SPRN_MMCRA, r5 3507 mtspr SPRN_SIAR, r6 3508 mtspr SPRN_SDAR, r7 3509BEGIN_FTR_SECTION 3510 ld r8, HSTATE_MMCR2(r13) 3511 ld r9, HSTATE_SIER(r13) 3512 mtspr SPRN_MMCR2, r8 3513 mtspr SPRN_SIER, r9 3514END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 3515BEGIN_FTR_SECTION 3516 ld r5, HSTATE_MMCR3(r13) 3517 ld r6, HSTATE_SIER2(r13) 3518 ld r7, HSTATE_SIER3(r13) 3519 mtspr SPRN_MMCR3, r5 3520 mtspr SPRN_SIER2, r6 3521 mtspr SPRN_SIER3, r7 3522END_FTR_SECTION_IFSET(CPU_FTR_ARCH_31) 3523 mtspr SPRN_MMCR0, r3 3524 isync 3525 mtlr r0 352623: blr 3527 3528/* 3529 * Save guest PMU state into the vcpu struct. 3530 * r3 = vcpu, r4 = full save flag (PMU in use flag set in VPA) 3531 */ 3532_GLOBAL(kvmhv_save_guest_pmu) 3533EXPORT_SYMBOL_GPL(kvmhv_save_guest_pmu) 3534 mr r9, r3 3535 mr r8, r4 3536BEGIN_FTR_SECTION 3537 /* 3538 * POWER8 seems to have a hardware bug where setting 3539 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE] 3540 * when some counters are already negative doesn't seem 3541 * to cause a performance monitor alert (and hence interrupt). 3542 * The effect of this is that when saving the PMU state, 3543 * if there is no PMU alert pending when we read MMCR0 3544 * before freezing the counters, but one becomes pending 3545 * before we read the counters, we lose it. 3546 * To work around this, we need a way to freeze the counters 3547 * before reading MMCR0. Normally, freezing the counters 3548 * is done by writing MMCR0 (to set MMCR0[FC]) which 3549 * unavoidably writes MMCR0[PMA0] as well. On POWER8, 3550 * we can also freeze the counters using MMCR2, by writing 3551 * 1s to all the counter freeze condition bits (there are 3552 * 9 bits each for 6 counters). 3553 */ 3554 li r3, -1 /* set all freeze bits */ 3555 clrrdi r3, r3, 10 3556 mfspr r10, SPRN_MMCR2 3557 mtspr SPRN_MMCR2, r3 3558 isync 3559END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 3560 li r3, 1 3561 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 3562 mfspr r4, SPRN_MMCR0 /* save MMCR0 */ 3563 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 3564 mfspr r6, SPRN_MMCRA 3565 /* Clear MMCRA in order to disable SDAR updates */ 3566 li r7, 0 3567 mtspr SPRN_MMCRA, r7 3568 isync 3569 cmpwi r8, 0 /* did they ask for PMU stuff to be saved? */ 3570 bne 21f 3571 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ 3572 b 22f 357321: mfspr r5, SPRN_MMCR1 3574 mfspr r7, SPRN_SIAR 3575 mfspr r8, SPRN_SDAR 3576 std r4, VCPU_MMCR(r9) 3577 std r5, VCPU_MMCR + 8(r9) 3578 std r6, VCPU_MMCRA(r9) 3579BEGIN_FTR_SECTION 3580 std r10, VCPU_MMCR + 16(r9) 3581END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 3582BEGIN_FTR_SECTION 3583 mfspr r5, SPRN_MMCR3 3584 mfspr r6, SPRN_SIER2 3585 mfspr r7, SPRN_SIER3 3586 std r5, VCPU_MMCR + 24(r9) 3587 std r6, VCPU_SIER + 8(r9) 3588 std r7, VCPU_SIER + 16(r9) 3589END_FTR_SECTION_IFSET(CPU_FTR_ARCH_31) 3590 std r7, VCPU_SIAR(r9) 3591 std r8, VCPU_SDAR(r9) 3592 mfspr r3, SPRN_PMC1 3593 mfspr r4, SPRN_PMC2 3594 mfspr r5, SPRN_PMC3 3595 mfspr r6, SPRN_PMC4 3596 mfspr r7, SPRN_PMC5 3597 mfspr r8, SPRN_PMC6 3598 stw r3, VCPU_PMC(r9) 3599 stw r4, VCPU_PMC + 4(r9) 3600 stw r5, VCPU_PMC + 8(r9) 3601 stw r6, VCPU_PMC + 12(r9) 3602 stw r7, VCPU_PMC + 16(r9) 3603 stw r8, VCPU_PMC + 20(r9) 3604BEGIN_FTR_SECTION 3605 mfspr r5, SPRN_SIER 3606 std r5, VCPU_SIER(r9) 3607BEGIN_FTR_SECTION_NESTED(96) 3608 mfspr r6, SPRN_SPMC1 3609 mfspr r7, SPRN_SPMC2 3610 mfspr r8, SPRN_MMCRS 3611 stw r6, VCPU_PMC + 24(r9) 3612 stw r7, VCPU_PMC + 28(r9) 3613 std r8, VCPU_MMCRS(r9) 3614 lis r4, 0x8000 3615 mtspr SPRN_MMCRS, r4 3616END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96) 3617END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 361822: blr 3619 3620/* 3621 * This works around a hardware bug on POWER8E processors, where 3622 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a 3623 * performance monitor interrupt. Instead, when we need to have 3624 * an interrupt pending, we have to arrange for a counter to overflow. 3625 */ 3626kvmppc_fix_pmao: 3627 li r3, 0 3628 mtspr SPRN_MMCR2, r3 3629 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h 3630 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN 3631 mtspr SPRN_MMCR0, r3 3632 lis r3, 0x7fff 3633 ori r3, r3, 0xffff 3634 mtspr SPRN_PMC6, r3 3635 isync 3636 blr 3637 3638#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 3639/* 3640 * Start timing an activity 3641 * r3 = pointer to time accumulation struct, r4 = vcpu 3642 */ 3643kvmhv_start_timing: 3644 ld r5, HSTATE_KVM_VCORE(r13) 3645 ld r6, VCORE_TB_OFFSET_APPL(r5) 3646 mftb r5 3647 subf r5, r6, r5 /* subtract current timebase offset */ 3648 std r3, VCPU_CUR_ACTIVITY(r4) 3649 std r5, VCPU_ACTIVITY_START(r4) 3650 blr 3651 3652/* 3653 * Accumulate time to one activity and start another. 3654 * r3 = pointer to new time accumulation struct, r4 = vcpu 3655 */ 3656kvmhv_accumulate_time: 3657 ld r5, HSTATE_KVM_VCORE(r13) 3658 ld r8, VCORE_TB_OFFSET_APPL(r5) 3659 ld r5, VCPU_CUR_ACTIVITY(r4) 3660 ld r6, VCPU_ACTIVITY_START(r4) 3661 std r3, VCPU_CUR_ACTIVITY(r4) 3662 mftb r7 3663 subf r7, r8, r7 /* subtract current timebase offset */ 3664 std r7, VCPU_ACTIVITY_START(r4) 3665 cmpdi r5, 0 3666 beqlr 3667 subf r3, r6, r7 3668 ld r8, TAS_SEQCOUNT(r5) 3669 cmpdi r8, 0 3670 addi r8, r8, 1 3671 std r8, TAS_SEQCOUNT(r5) 3672 lwsync 3673 ld r7, TAS_TOTAL(r5) 3674 add r7, r7, r3 3675 std r7, TAS_TOTAL(r5) 3676 ld r6, TAS_MIN(r5) 3677 ld r7, TAS_MAX(r5) 3678 beq 3f 3679 cmpd r3, r6 3680 bge 1f 36813: std r3, TAS_MIN(r5) 36821: cmpd r3, r7 3683 ble 2f 3684 std r3, TAS_MAX(r5) 36852: lwsync 3686 addi r8, r8, 1 3687 std r8, TAS_SEQCOUNT(r5) 3688 blr 3689#endif 3690