1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* 3 * PowerPC version 4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 5 * 6 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP 7 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> 8 * Adapted for Power Macintosh by Paul Mackerras. 9 * Low-level exception handlers and MMU support 10 * rewritten by Paul Mackerras. 11 * Copyright (C) 1996 Paul Mackerras. 12 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net). 13 * 14 * This file contains the low-level support and setup for the 15 * PowerPC platform, including trap and interrupt dispatch. 16 * (The PPC 8xx embedded CPUs use head_8xx.S instead.) 17 */ 18 19#include <linux/init.h> 20#include <linux/pgtable.h> 21#include <asm/reg.h> 22#include <asm/page.h> 23#include <asm/mmu.h> 24#include <asm/cputable.h> 25#include <asm/cache.h> 26#include <asm/thread_info.h> 27#include <asm/ppc_asm.h> 28#include <asm/asm-offsets.h> 29#include <asm/ptrace.h> 30#include <asm/bug.h> 31#include <asm/kvm_book3s_asm.h> 32#include <asm/export.h> 33#include <asm/feature-fixups.h> 34 35#include "head_32.h" 36 37#define LOAD_BAT(n, reg, RA, RB) \ 38 /* see the comment for clear_bats() -- Cort */ \ 39 li RA,0; \ 40 mtspr SPRN_IBAT##n##U,RA; \ 41 mtspr SPRN_DBAT##n##U,RA; \ 42 lwz RA,(n*16)+0(reg); \ 43 lwz RB,(n*16)+4(reg); \ 44 mtspr SPRN_IBAT##n##U,RA; \ 45 mtspr SPRN_IBAT##n##L,RB; \ 46 lwz RA,(n*16)+8(reg); \ 47 lwz RB,(n*16)+12(reg); \ 48 mtspr SPRN_DBAT##n##U,RA; \ 49 mtspr SPRN_DBAT##n##L,RB 50 51 __HEAD 52 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f 53 .stabs "head_book3s_32.S",N_SO,0,0,0f 540: 55_ENTRY(_stext); 56 57/* 58 * _start is defined this way because the XCOFF loader in the OpenFirmware 59 * on the powermac expects the entry point to be a procedure descriptor. 60 */ 61_ENTRY(_start); 62 /* 63 * These are here for legacy reasons, the kernel used to 64 * need to look like a coff function entry for the pmac 65 * but we're always started by some kind of bootloader now. 66 * -- Cort 67 */ 68 nop /* used by __secondary_hold on prep (mtx) and chrp smp */ 69 nop /* used by __secondary_hold on prep (mtx) and chrp smp */ 70 nop 71 72/* PMAC 73 * Enter here with the kernel text, data and bss loaded starting at 74 * 0, running with virtual == physical mapping. 75 * r5 points to the prom entry point (the client interface handler 76 * address). Address translation is turned on, with the prom 77 * managing the hash table. Interrupts are disabled. The stack 78 * pointer (r1) points to just below the end of the half-meg region 79 * from 0x380000 - 0x400000, which is mapped in already. 80 * 81 * If we are booted from MacOS via BootX, we enter with the kernel 82 * image loaded somewhere, and the following values in registers: 83 * r3: 'BooX' (0x426f6f58) 84 * r4: virtual address of boot_infos_t 85 * r5: 0 86 * 87 * PREP 88 * This is jumped to on prep systems right after the kernel is relocated 89 * to its proper place in memory by the boot loader. The expected layout 90 * of the regs is: 91 * r3: ptr to residual data 92 * r4: initrd_start or if no initrd then 0 93 * r5: initrd_end - unused if r4 is 0 94 * r6: Start of command line string 95 * r7: End of command line string 96 * 97 * This just gets a minimal mmu environment setup so we can call 98 * start_here() to do the real work. 99 * -- Cort 100 */ 101 102 .globl __start 103__start: 104/* 105 * We have to do any OF calls before we map ourselves to KERNELBASE, 106 * because OF may have I/O devices mapped into that area 107 * (particularly on CHRP). 108 */ 109 cmpwi 0,r5,0 110 beq 1f 111 112#ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE 113 /* find out where we are now */ 114 bcl 20,31,$+4 1150: mflr r8 /* r8 = runtime addr here */ 116 addis r8,r8,(_stext - 0b)@ha 117 addi r8,r8,(_stext - 0b)@l /* current runtime base addr */ 118 bl prom_init 119#endif /* CONFIG_PPC_OF_BOOT_TRAMPOLINE */ 120 121 /* We never return. We also hit that trap if trying to boot 122 * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */ 123 trap 124 125/* 126 * Check for BootX signature when supporting PowerMac and branch to 127 * appropriate trampoline if it's present 128 */ 129#ifdef CONFIG_PPC_PMAC 1301: lis r31,0x426f 131 ori r31,r31,0x6f58 132 cmpw 0,r3,r31 133 bne 1f 134 bl bootx_init 135 trap 136#endif /* CONFIG_PPC_PMAC */ 137 1381: mr r31,r3 /* save device tree ptr */ 139 li r24,0 /* cpu # */ 140 141/* 142 * early_init() does the early machine identification and does 143 * the necessary low-level setup and clears the BSS 144 * -- Cort <cort@fsmlabs.com> 145 */ 146 bl early_init 147 148/* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains 149 * the physical address we are running at, returned by early_init() 150 */ 151 bl mmu_off 152__after_mmu_off: 153 bl clear_bats 154 bl flush_tlbs 155 156 bl initial_bats 157 bl load_segment_registers 158BEGIN_MMU_FTR_SECTION 159 bl reloc_offset 160 bl early_hash_table 161END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) 162#if defined(CONFIG_BOOTX_TEXT) 163 bl setup_disp_bat 164#endif 165#ifdef CONFIG_PPC_EARLY_DEBUG_CPM 166 bl setup_cpm_bat 167#endif 168#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO 169 bl setup_usbgecko_bat 170#endif 171 172/* 173 * Call setup_cpu for CPU 0 and initialize 6xx Idle 174 */ 175 bl reloc_offset 176 li r24,0 /* cpu# */ 177 bl call_setup_cpu /* Call setup_cpu for this CPU */ 178 bl reloc_offset 179 bl init_idle_6xx 180 181 182/* 183 * We need to run with _start at physical address 0. 184 * On CHRP, we are loaded at 0x10000 since OF on CHRP uses 185 * the exception vectors at 0 (and therefore this copy 186 * overwrites OF's exception vectors with our own). 187 * The MMU is off at this point. 188 */ 189 bl reloc_offset 190 mr r26,r3 191 addis r4,r3,KERNELBASE@h /* current address of _start */ 192 lis r5,PHYSICAL_START@h 193 cmplw 0,r4,r5 /* already running at PHYSICAL_START? */ 194 bne relocate_kernel 195/* 196 * we now have the 1st 16M of ram mapped with the bats. 197 * prep needs the mmu to be turned on here, but pmac already has it on. 198 * this shouldn't bother the pmac since it just gets turned on again 199 * as we jump to our code at KERNELBASE. -- Cort 200 * Actually no, pmac doesn't have it on any more. BootX enters with MMU 201 * off, and in other cases, we now turn it off before changing BATs above. 202 */ 203turn_on_mmu: 204 mfmsr r0 205 ori r0,r0,MSR_DR|MSR_IR|MSR_RI 206 mtspr SPRN_SRR1,r0 207 lis r0,start_here@h 208 ori r0,r0,start_here@l 209 mtspr SPRN_SRR0,r0 210 RFI /* enables MMU */ 211 212/* 213 * We need __secondary_hold as a place to hold the other cpus on 214 * an SMP machine, even when we are running a UP kernel. 215 */ 216 . = 0xc0 /* for prep bootloader */ 217 li r3,1 /* MTX only has 1 cpu */ 218 .globl __secondary_hold 219__secondary_hold: 220 /* tell the master we're here */ 221 stw r3,__secondary_hold_acknowledge@l(0) 222#ifdef CONFIG_SMP 223100: lwz r4,0(0) 224 /* wait until we're told to start */ 225 cmpw 0,r4,r3 226 bne 100b 227 /* our cpu # was at addr 0 - go */ 228 mr r24,r3 /* cpu # */ 229 b __secondary_start 230#else 231 b . 232#endif /* CONFIG_SMP */ 233 234 .globl __secondary_hold_spinloop 235__secondary_hold_spinloop: 236 .long 0 237 .globl __secondary_hold_acknowledge 238__secondary_hold_acknowledge: 239 .long -1 240 241/* System reset */ 242/* core99 pmac starts the seconary here by changing the vector, and 243 putting it back to what it was (unknown_exception) when done. */ 244 EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD) 245 246/* Machine check */ 247/* 248 * On CHRP, this is complicated by the fact that we could get a 249 * machine check inside RTAS, and we have no guarantee that certain 250 * critical registers will have the values we expect. The set of 251 * registers that might have bad values includes all the GPRs 252 * and all the BATs. We indicate that we are in RTAS by putting 253 * a non-zero value, the address of the exception frame to use, 254 * in thread.rtas_sp. The machine check handler checks thread.rtas_sp 255 * and uses its value if it is non-zero. 256 * (Other exception handlers assume that r1 is a valid kernel stack 257 * pointer when we take an exception from supervisor mode.) 258 * -- paulus. 259 */ 260 . = 0x200 261 DO_KVM 0x200 262MachineCheck: 263 EXCEPTION_PROLOG_0 264#ifdef CONFIG_PPC_CHRP 265 mfspr r11, SPRN_SPRG_THREAD 266 lwz r11, RTAS_SP(r11) 267 cmpwi cr1, r11, 0 268 bne cr1, 7f 269#endif /* CONFIG_PPC_CHRP */ 270 EXCEPTION_PROLOG_1 for_rtas=1 2717: EXCEPTION_PROLOG_2 272 addi r3,r1,STACK_FRAME_OVERHEAD 273#ifdef CONFIG_PPC_CHRP 274#ifdef CONFIG_VMAP_STACK 275 mfspr r4, SPRN_SPRG_THREAD 276 tovirt(r4, r4) 277 lwz r4, RTAS_SP(r4) 278 cmpwi cr1, r4, 0 279#endif 280 beq cr1, machine_check_tramp 281 twi 31, 0, 0 282#else 283 b machine_check_tramp 284#endif 285 286/* Data access exception. */ 287 . = 0x300 288 DO_KVM 0x300 289DataAccess: 290#ifdef CONFIG_VMAP_STACK 291 mtspr SPRN_SPRG_SCRATCH0,r10 292 mfspr r10, SPRN_SPRG_THREAD 293BEGIN_MMU_FTR_SECTION 294 stw r11, THR11(r10) 295 mfspr r10, SPRN_DSISR 296 mfcr r11 297#ifdef CONFIG_PPC_KUAP 298 andis. r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h 299#else 300 andis. r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h 301#endif 302 mfspr r10, SPRN_SPRG_THREAD 303 beq hash_page_dsi 304.Lhash_page_dsi_cont: 305 mtcr r11 306 lwz r11, THR11(r10) 307END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) 308 mtspr SPRN_SPRG_SCRATCH1,r11 309 mfspr r11, SPRN_DAR 310 stw r11, DAR(r10) 311 mfspr r11, SPRN_DSISR 312 stw r11, DSISR(r10) 313 mfspr r11, SPRN_SRR0 314 stw r11, SRR0(r10) 315 mfspr r11, SPRN_SRR1 /* check whether user or kernel */ 316 stw r11, SRR1(r10) 317 mfcr r10 318 andi. r11, r11, MSR_PR 319 320 EXCEPTION_PROLOG_1 321 b handle_page_fault_tramp_1 322#else /* CONFIG_VMAP_STACK */ 323 EXCEPTION_PROLOG handle_dar_dsisr=1 324 get_and_save_dar_dsisr_on_stack r4, r5, r11 325BEGIN_MMU_FTR_SECTION 326#ifdef CONFIG_PPC_KUAP 327 andis. r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h 328#else 329 andis. r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h 330#endif 331 bne handle_page_fault_tramp_2 /* if not, try to put a PTE */ 332 rlwinm r3, r5, 32 - 15, 21, 21 /* DSISR_STORE -> _PAGE_RW */ 333 bl hash_page 334 b handle_page_fault_tramp_1 335FTR_SECTION_ELSE 336 b handle_page_fault_tramp_2 337ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE) 338#endif /* CONFIG_VMAP_STACK */ 339 340/* Instruction access exception. */ 341 . = 0x400 342 DO_KVM 0x400 343InstructionAccess: 344#ifdef CONFIG_VMAP_STACK 345 mtspr SPRN_SPRG_SCRATCH0,r10 346 mtspr SPRN_SPRG_SCRATCH1,r11 347 mfspr r10, SPRN_SPRG_THREAD 348 mfspr r11, SPRN_SRR0 349 stw r11, SRR0(r10) 350 mfspr r11, SPRN_SRR1 /* check whether user or kernel */ 351 stw r11, SRR1(r10) 352 mfcr r10 353BEGIN_MMU_FTR_SECTION 354 andis. r11, r11, SRR1_ISI_NOPT@h /* no pte found? */ 355 bne hash_page_isi 356.Lhash_page_isi_cont: 357 mfspr r11, SPRN_SRR1 /* check whether user or kernel */ 358END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) 359 andi. r11, r11, MSR_PR 360 361 EXCEPTION_PROLOG_1 362 EXCEPTION_PROLOG_2 363#else /* CONFIG_VMAP_STACK */ 364 EXCEPTION_PROLOG 365 andis. r0,r9,SRR1_ISI_NOPT@h /* no pte found? */ 366 beq 1f /* if so, try to put a PTE */ 367 li r3,0 /* into the hash table */ 368 mr r4,r12 /* SRR0 is fault address */ 369BEGIN_MMU_FTR_SECTION 370 bl hash_page 371END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE) 372#endif /* CONFIG_VMAP_STACK */ 3731: mr r4,r12 374 andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */ 375 stw r4, _DAR(r11) 376 EXC_XFER_LITE(0x400, handle_page_fault) 377 378/* External interrupt */ 379 EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE) 380 381/* Alignment exception */ 382 . = 0x600 383 DO_KVM 0x600 384Alignment: 385 EXCEPTION_PROLOG handle_dar_dsisr=1 386 save_dar_dsisr_on_stack r4, r5, r11 387 addi r3,r1,STACK_FRAME_OVERHEAD 388 b alignment_exception_tramp 389 390/* Program check exception */ 391 EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD) 392 393/* Floating-point unavailable */ 394 . = 0x800 395 DO_KVM 0x800 396FPUnavailable: 397BEGIN_FTR_SECTION 398/* 399 * Certain Freescale cores don't have a FPU and treat fp instructions 400 * as a FP Unavailable exception. Redirect to illegal/emulation handling. 401 */ 402 b ProgramCheck 403END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE) 404 EXCEPTION_PROLOG 405 beq 1f 406 bl load_up_fpu /* if from user, just load it up */ 407 b fast_exception_return 4081: addi r3,r1,STACK_FRAME_OVERHEAD 409 EXC_XFER_LITE(0x800, kernel_fp_unavailable_exception) 410 411/* Decrementer */ 412 EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE) 413 414 EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_STD) 415 EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_STD) 416 417/* System call */ 418 . = 0xc00 419 DO_KVM 0xc00 420SystemCall: 421 SYSCALL_ENTRY 0xc00 422 423 EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD) 424 EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_STD) 425 426/* 427 * The Altivec unavailable trap is at 0x0f20. Foo. 428 * We effectively remap it to 0x3000. 429 * We include an altivec unavailable exception vector even if 430 * not configured for Altivec, so that you can't panic a 431 * non-altivec kernel running on a machine with altivec just 432 * by executing an altivec instruction. 433 */ 434 . = 0xf00 435 DO_KVM 0xf00 436 b PerformanceMonitor 437 438 . = 0xf20 439 DO_KVM 0xf20 440 b AltiVecUnavailable 441 442/* 443 * Handle TLB miss for instruction on 603/603e. 444 * Note: we get an alternate set of r0 - r3 to use automatically. 445 */ 446 . = 0x1000 447InstructionTLBMiss: 448/* 449 * r0: scratch 450 * r1: linux style pte ( later becomes ppc hardware pte ) 451 * r2: ptr to linux-style pte 452 * r3: scratch 453 */ 454 /* Get PTE (linux-style) and check access */ 455 mfspr r3,SPRN_IMISS 456#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC) 457 lis r1, TASK_SIZE@h /* check if kernel address */ 458 cmplw 0,r1,r3 459#endif 460 mfspr r2, SPRN_SPRG_PGDIR 461 li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC 462#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC) 463 bgt- 112f 464 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */ 465 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */ 466#endif 467112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ 468 lwz r2,0(r2) /* get pmd entry */ 469 rlwinm. r2,r2,0,0,19 /* extract address of pte page */ 470 beq- InstructionAddressInvalid /* return if no mapping */ 471 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ 472 lwz r0,0(r2) /* get linux-style pte */ 473 andc. r1,r1,r0 /* check access & ~permission */ 474 bne- InstructionAddressInvalid /* return if access not permitted */ 475 /* Convert linux-style PTE to low word of PPC-style PTE */ 476 rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */ 477 ori r1, r1, 0xe06 /* clear out reserved bits */ 478 andc r1, r0, r1 /* PP = user? 1 : 0 */ 479BEGIN_FTR_SECTION 480 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */ 481END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) 482 mtspr SPRN_RPA,r1 483 tlbli r3 484 mfspr r3,SPRN_SRR1 /* Need to restore CR0 */ 485 mtcrf 0x80,r3 486 rfi 487InstructionAddressInvalid: 488 mfspr r3,SPRN_SRR1 489 rlwinm r1,r3,9,6,6 /* Get load/store bit */ 490 491 addis r1,r1,0x2000 492 mtspr SPRN_DSISR,r1 /* (shouldn't be needed) */ 493 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */ 494 or r2,r2,r1 495 mtspr SPRN_SRR1,r2 496 mfspr r1,SPRN_IMISS /* Get failing address */ 497 rlwinm. r2,r2,0,31,31 /* Check for little endian access */ 498 rlwimi r2,r2,1,30,30 /* change 1 -> 3 */ 499 xor r1,r1,r2 500 mtspr SPRN_DAR,r1 /* Set fault address */ 501 mfmsr r0 /* Restore "normal" registers */ 502 xoris r0,r0,MSR_TGPR>>16 503 mtcrf 0x80,r3 /* Restore CR0 */ 504 mtmsr r0 505 b InstructionAccess 506 507/* 508 * Handle TLB miss for DATA Load operation on 603/603e 509 */ 510 . = 0x1100 511DataLoadTLBMiss: 512/* 513 * r0: scratch 514 * r1: linux style pte ( later becomes ppc hardware pte ) 515 * r2: ptr to linux-style pte 516 * r3: scratch 517 */ 518 /* Get PTE (linux-style) and check access */ 519 mfspr r3,SPRN_DMISS 520 lis r1, TASK_SIZE@h /* check if kernel address */ 521 cmplw 0,r1,r3 522 mfspr r2, SPRN_SPRG_PGDIR 523 li r1, _PAGE_PRESENT | _PAGE_ACCESSED 524 bgt- 112f 525 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */ 526 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */ 527112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ 528 lwz r2,0(r2) /* get pmd entry */ 529 rlwinm. r2,r2,0,0,19 /* extract address of pte page */ 530 beq- DataAddressInvalid /* return if no mapping */ 531 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ 532 lwz r0,0(r2) /* get linux-style pte */ 533 andc. r1,r1,r0 /* check access & ~permission */ 534 bne- DataAddressInvalid /* return if access not permitted */ 535 /* 536 * NOTE! We are assuming this is not an SMP system, otherwise 537 * we would need to update the pte atomically with lwarx/stwcx. 538 */ 539 /* Convert linux-style PTE to low word of PPC-style PTE */ 540 rlwinm r1,r0,32-9,30,30 /* _PAGE_RW -> PP msb */ 541 rlwimi r0,r0,32-1,30,30 /* _PAGE_USER -> PP msb */ 542 rlwimi r0,r0,32-1,31,31 /* _PAGE_USER -> PP lsb */ 543 ori r1,r1,0xe04 /* clear out reserved bits */ 544 andc r1,r0,r1 /* PP = user? rw? 1: 3: 0 */ 545BEGIN_FTR_SECTION 546 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */ 547END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) 548 mtspr SPRN_RPA,r1 549 mfspr r2,SPRN_SRR1 /* Need to restore CR0 */ 550 mtcrf 0x80,r2 551BEGIN_MMU_FTR_SECTION 552 li r0,1 553 mfspr r1,SPRN_SPRG_603_LRU 554 rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */ 555 slw r0,r0,r2 556 xor r1,r0,r1 557 srw r0,r1,r2 558 mtspr SPRN_SPRG_603_LRU,r1 559 mfspr r2,SPRN_SRR1 560 rlwimi r2,r0,31-14,14,14 561 mtspr SPRN_SRR1,r2 562END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU) 563 tlbld r3 564 rfi 565DataAddressInvalid: 566 mfspr r3,SPRN_SRR1 567 rlwinm r1,r3,9,6,6 /* Get load/store bit */ 568 addis r1,r1,0x2000 569 mtspr SPRN_DSISR,r1 570 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */ 571 mtspr SPRN_SRR1,r2 572 mfspr r1,SPRN_DMISS /* Get failing address */ 573 rlwinm. r2,r2,0,31,31 /* Check for little endian access */ 574 beq 20f /* Jump if big endian */ 575 xori r1,r1,3 57620: mtspr SPRN_DAR,r1 /* Set fault address */ 577 mfmsr r0 /* Restore "normal" registers */ 578 xoris r0,r0,MSR_TGPR>>16 579 mtcrf 0x80,r3 /* Restore CR0 */ 580 mtmsr r0 581 b DataAccess 582 583/* 584 * Handle TLB miss for DATA Store on 603/603e 585 */ 586 . = 0x1200 587DataStoreTLBMiss: 588/* 589 * r0: scratch 590 * r1: linux style pte ( later becomes ppc hardware pte ) 591 * r2: ptr to linux-style pte 592 * r3: scratch 593 */ 594 /* Get PTE (linux-style) and check access */ 595 mfspr r3,SPRN_DMISS 596 lis r1, TASK_SIZE@h /* check if kernel address */ 597 cmplw 0,r1,r3 598 mfspr r2, SPRN_SPRG_PGDIR 599 li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED 600 bgt- 112f 601 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */ 602 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */ 603112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ 604 lwz r2,0(r2) /* get pmd entry */ 605 rlwinm. r2,r2,0,0,19 /* extract address of pte page */ 606 beq- DataAddressInvalid /* return if no mapping */ 607 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ 608 lwz r0,0(r2) /* get linux-style pte */ 609 andc. r1,r1,r0 /* check access & ~permission */ 610 bne- DataAddressInvalid /* return if access not permitted */ 611 /* 612 * NOTE! We are assuming this is not an SMP system, otherwise 613 * we would need to update the pte atomically with lwarx/stwcx. 614 */ 615 /* Convert linux-style PTE to low word of PPC-style PTE */ 616 rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */ 617 li r1,0xe06 /* clear out reserved bits & PP msb */ 618 andc r1,r0,r1 /* PP = user? 1: 0 */ 619BEGIN_FTR_SECTION 620 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */ 621END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) 622 mtspr SPRN_RPA,r1 623 mfspr r2,SPRN_SRR1 /* Need to restore CR0 */ 624 mtcrf 0x80,r2 625BEGIN_MMU_FTR_SECTION 626 li r0,1 627 mfspr r1,SPRN_SPRG_603_LRU 628 rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */ 629 slw r0,r0,r2 630 xor r1,r0,r1 631 srw r0,r1,r2 632 mtspr SPRN_SPRG_603_LRU,r1 633 mfspr r2,SPRN_SRR1 634 rlwimi r2,r0,31-14,14,14 635 mtspr SPRN_SRR1,r2 636END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU) 637 tlbld r3 638 rfi 639 640#ifndef CONFIG_ALTIVEC 641#define altivec_assist_exception unknown_exception 642#endif 643 644#ifndef CONFIG_TAU_INT 645#define TAUException unknown_exception 646#endif 647 648 EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_STD) 649 EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_STD) 650 EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_STD) 651 EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_STD) 652 EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD) 653 EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_STD) 654 EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_STD) 655 EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_STD) 656 EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_STD) 657 EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_STD) 658 EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_STD) 659 EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_STD) 660 EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_STD) 661 EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_STD) 662 EXCEPTION(0x2100, Trap_21, unknown_exception, EXC_XFER_STD) 663 EXCEPTION(0x2200, Trap_22, unknown_exception, EXC_XFER_STD) 664 EXCEPTION(0x2300, Trap_23, unknown_exception, EXC_XFER_STD) 665 EXCEPTION(0x2400, Trap_24, unknown_exception, EXC_XFER_STD) 666 EXCEPTION(0x2500, Trap_25, unknown_exception, EXC_XFER_STD) 667 EXCEPTION(0x2600, Trap_26, unknown_exception, EXC_XFER_STD) 668 EXCEPTION(0x2700, Trap_27, unknown_exception, EXC_XFER_STD) 669 EXCEPTION(0x2800, Trap_28, unknown_exception, EXC_XFER_STD) 670 EXCEPTION(0x2900, Trap_29, unknown_exception, EXC_XFER_STD) 671 EXCEPTION(0x2a00, Trap_2a, unknown_exception, EXC_XFER_STD) 672 EXCEPTION(0x2b00, Trap_2b, unknown_exception, EXC_XFER_STD) 673 EXCEPTION(0x2c00, Trap_2c, unknown_exception, EXC_XFER_STD) 674 EXCEPTION(0x2d00, Trap_2d, unknown_exception, EXC_XFER_STD) 675 EXCEPTION(0x2e00, Trap_2e, unknown_exception, EXC_XFER_STD) 676 EXCEPTION(0x2f00, Trap_2f, unknown_exception, EXC_XFER_STD) 677 678 . = 0x3000 679 680machine_check_tramp: 681 EXC_XFER_STD(0x200, machine_check_exception) 682 683alignment_exception_tramp: 684 EXC_XFER_STD(0x600, alignment_exception) 685 686handle_page_fault_tramp_1: 687#ifdef CONFIG_VMAP_STACK 688 EXCEPTION_PROLOG_2 handle_dar_dsisr=1 689#endif 690 lwz r4, _DAR(r11) 691 lwz r5, _DSISR(r11) 692 /* fall through */ 693handle_page_fault_tramp_2: 694 EXC_XFER_LITE(0x300, handle_page_fault) 695 696#ifdef CONFIG_VMAP_STACK 697.macro save_regs_thread thread 698 stw r0, THR0(\thread) 699 stw r3, THR3(\thread) 700 stw r4, THR4(\thread) 701 stw r5, THR5(\thread) 702 stw r6, THR6(\thread) 703 stw r8, THR8(\thread) 704 stw r9, THR9(\thread) 705 mflr r0 706 stw r0, THLR(\thread) 707 mfctr r0 708 stw r0, THCTR(\thread) 709.endm 710 711.macro restore_regs_thread thread 712 lwz r0, THLR(\thread) 713 mtlr r0 714 lwz r0, THCTR(\thread) 715 mtctr r0 716 lwz r0, THR0(\thread) 717 lwz r3, THR3(\thread) 718 lwz r4, THR4(\thread) 719 lwz r5, THR5(\thread) 720 lwz r6, THR6(\thread) 721 lwz r8, THR8(\thread) 722 lwz r9, THR9(\thread) 723.endm 724 725hash_page_dsi: 726 save_regs_thread r10 727 mfdsisr r3 728 mfdar r4 729 mfsrr0 r5 730 mfsrr1 r9 731 rlwinm r3, r3, 32 - 15, _PAGE_RW /* DSISR_STORE -> _PAGE_RW */ 732 bl hash_page 733 mfspr r10, SPRN_SPRG_THREAD 734 restore_regs_thread r10 735 b .Lhash_page_dsi_cont 736 737hash_page_isi: 738 mr r11, r10 739 mfspr r10, SPRN_SPRG_THREAD 740 save_regs_thread r10 741 li r3, 0 742 lwz r4, SRR0(r10) 743 lwz r9, SRR1(r10) 744 bl hash_page 745 mfspr r10, SPRN_SPRG_THREAD 746 restore_regs_thread r10 747 mr r10, r11 748 b .Lhash_page_isi_cont 749 750 .globl fast_hash_page_return 751fast_hash_page_return: 752 andis. r10, r9, SRR1_ISI_NOPT@h /* Set on ISI, cleared on DSI */ 753 mfspr r10, SPRN_SPRG_THREAD 754 restore_regs_thread r10 755 bne 1f 756 757 /* DSI */ 758 mtcr r11 759 lwz r11, THR11(r10) 760 mfspr r10, SPRN_SPRG_SCRATCH0 761 RFI 762 7631: /* ISI */ 764 mtcr r11 765 mfspr r11, SPRN_SPRG_SCRATCH1 766 mfspr r10, SPRN_SPRG_SCRATCH0 767 RFI 768 769stack_overflow: 770 vmap_stack_overflow_exception 771#endif 772 773AltiVecUnavailable: 774 EXCEPTION_PROLOG 775#ifdef CONFIG_ALTIVEC 776 beq 1f 777 bl load_up_altivec /* if from user, just load it up */ 778 b fast_exception_return 779#endif /* CONFIG_ALTIVEC */ 7801: addi r3,r1,STACK_FRAME_OVERHEAD 781 EXC_XFER_LITE(0xf20, altivec_unavailable_exception) 782 783PerformanceMonitor: 784 EXCEPTION_PROLOG 785 addi r3,r1,STACK_FRAME_OVERHEAD 786 EXC_XFER_STD(0xf00, performance_monitor_exception) 787 788 789/* 790 * This code is jumped to from the startup code to copy 791 * the kernel image to physical address PHYSICAL_START. 792 */ 793relocate_kernel: 794 addis r9,r26,klimit@ha /* fetch klimit */ 795 lwz r25,klimit@l(r9) 796 addis r25,r25,-KERNELBASE@h 797 lis r3,PHYSICAL_START@h /* Destination base address */ 798 li r6,0 /* Destination offset */ 799 li r5,0x4000 /* # bytes of memory to copy */ 800 bl copy_and_flush /* copy the first 0x4000 bytes */ 801 addi r0,r3,4f@l /* jump to the address of 4f */ 802 mtctr r0 /* in copy and do the rest. */ 803 bctr /* jump to the copy */ 8044: mr r5,r25 805 bl copy_and_flush /* copy the rest */ 806 b turn_on_mmu 807 808/* 809 * Copy routine used to copy the kernel to start at physical address 0 810 * and flush and invalidate the caches as needed. 811 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset 812 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5. 813 */ 814_ENTRY(copy_and_flush) 815 addi r5,r5,-4 816 addi r6,r6,-4 8174: li r0,L1_CACHE_BYTES/4 818 mtctr r0 8193: addi r6,r6,4 /* copy a cache line */ 820 lwzx r0,r6,r4 821 stwx r0,r6,r3 822 bdnz 3b 823 dcbst r6,r3 /* write it to memory */ 824 sync 825 icbi r6,r3 /* flush the icache line */ 826 cmplw 0,r6,r5 827 blt 4b 828 sync /* additional sync needed on g4 */ 829 isync 830 addi r5,r5,4 831 addi r6,r6,4 832 blr 833 834#ifdef CONFIG_SMP 835 .globl __secondary_start_mpc86xx 836__secondary_start_mpc86xx: 837 mfspr r3, SPRN_PIR 838 stw r3, __secondary_hold_acknowledge@l(0) 839 mr r24, r3 /* cpu # */ 840 b __secondary_start 841 842 .globl __secondary_start_pmac_0 843__secondary_start_pmac_0: 844 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */ 845 li r24,0 846 b 1f 847 li r24,1 848 b 1f 849 li r24,2 850 b 1f 851 li r24,3 8521: 853 /* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0 854 set to map the 0xf0000000 - 0xffffffff region */ 855 mfmsr r0 856 rlwinm r0,r0,0,28,26 /* clear DR (0x10) */ 857 mtmsr r0 858 isync 859 860 .globl __secondary_start 861__secondary_start: 862 /* Copy some CPU settings from CPU 0 */ 863 bl __restore_cpu_setup 864 865 lis r3,-KERNELBASE@h 866 mr r4,r24 867 bl call_setup_cpu /* Call setup_cpu for this CPU */ 868 lis r3,-KERNELBASE@h 869 bl init_idle_6xx 870 871 /* get current's stack and current */ 872 lis r2,secondary_current@ha 873 tophys(r2,r2) 874 lwz r2,secondary_current@l(r2) 875 tophys(r1,r2) 876 lwz r1,TASK_STACK(r1) 877 878 /* stack */ 879 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD 880 li r0,0 881 tophys(r3,r1) 882 stw r0,0(r3) 883 884 /* load up the MMU */ 885 bl load_segment_registers 886 bl load_up_mmu 887 888 /* ptr to phys current thread */ 889 tophys(r4,r2) 890 addi r4,r4,THREAD /* phys address of our thread_struct */ 891 mtspr SPRN_SPRG_THREAD,r4 892 lis r4, (swapper_pg_dir - PAGE_OFFSET)@h 893 ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l 894 mtspr SPRN_SPRG_PGDIR, r4 895 896 /* enable MMU and jump to start_secondary */ 897 li r4,MSR_KERNEL 898 lis r3,start_secondary@h 899 ori r3,r3,start_secondary@l 900 mtspr SPRN_SRR0,r3 901 mtspr SPRN_SRR1,r4 902 RFI 903#endif /* CONFIG_SMP */ 904 905#ifdef CONFIG_KVM_BOOK3S_HANDLER 906#include "../kvm/book3s_rmhandlers.S" 907#endif 908 909/* 910 * Load stuff into the MMU. Intended to be called with 911 * IR=0 and DR=0. 912 */ 913early_hash_table: 914 sync /* Force all PTE updates to finish */ 915 isync 916 tlbia /* Clear all TLB entries */ 917 sync /* wait for tlbia/tlbie to finish */ 918 TLBSYNC /* ... on all CPUs */ 919 /* Load the SDR1 register (hash table base & size) */ 920 lis r6, early_hash - PAGE_OFFSET@h 921 ori r6, r6, 3 /* 256kB table */ 922 mtspr SPRN_SDR1, r6 923 lis r6, early_hash@h 924 addis r3, r3, Hash@ha 925 stw r6, Hash@l(r3) 926 blr 927 928load_up_mmu: 929 sync /* Force all PTE updates to finish */ 930 isync 931 tlbia /* Clear all TLB entries */ 932 sync /* wait for tlbia/tlbie to finish */ 933 TLBSYNC /* ... on all CPUs */ 934 /* Load the SDR1 register (hash table base & size) */ 935 lis r6,_SDR1@ha 936 tophys(r6,r6) 937 lwz r6,_SDR1@l(r6) 938 mtspr SPRN_SDR1,r6 939 940/* Load the BAT registers with the values set up by MMU_init. */ 941 lis r3,BATS@ha 942 addi r3,r3,BATS@l 943 tophys(r3,r3) 944 LOAD_BAT(0,r3,r4,r5) 945 LOAD_BAT(1,r3,r4,r5) 946 LOAD_BAT(2,r3,r4,r5) 947 LOAD_BAT(3,r3,r4,r5) 948BEGIN_MMU_FTR_SECTION 949 LOAD_BAT(4,r3,r4,r5) 950 LOAD_BAT(5,r3,r4,r5) 951 LOAD_BAT(6,r3,r4,r5) 952 LOAD_BAT(7,r3,r4,r5) 953END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) 954 blr 955 956_GLOBAL(load_segment_registers) 957 li r0, NUM_USER_SEGMENTS /* load up user segment register values */ 958 mtctr r0 /* for context 0 */ 959 li r3, 0 /* Kp = 0, Ks = 0, VSID = 0 */ 960#ifdef CONFIG_PPC_KUEP 961 oris r3, r3, SR_NX@h /* Set Nx */ 962#endif 963#ifdef CONFIG_PPC_KUAP 964 oris r3, r3, SR_KS@h /* Set Ks */ 965#endif 966 li r4, 0 9673: mtsrin r3, r4 968 addi r3, r3, 0x111 /* increment VSID */ 969 addis r4, r4, 0x1000 /* address of next segment */ 970 bdnz 3b 971 li r0, 16 - NUM_USER_SEGMENTS /* load up kernel segment registers */ 972 mtctr r0 /* for context 0 */ 973 rlwinm r3, r3, 0, ~SR_NX /* Nx = 0 */ 974 rlwinm r3, r3, 0, ~SR_KS /* Ks = 0 */ 975 oris r3, r3, SR_KP@h /* Kp = 1 */ 9763: mtsrin r3, r4 977 addi r3, r3, 0x111 /* increment VSID */ 978 addis r4, r4, 0x1000 /* address of next segment */ 979 bdnz 3b 980 blr 981 982/* 983 * This is where the main kernel code starts. 984 */ 985start_here: 986 /* ptr to current */ 987 lis r2,init_task@h 988 ori r2,r2,init_task@l 989 /* Set up for using our exception vectors */ 990 /* ptr to phys current thread */ 991 tophys(r4,r2) 992 addi r4,r4,THREAD /* init task's THREAD */ 993 mtspr SPRN_SPRG_THREAD,r4 994 lis r4, (swapper_pg_dir - PAGE_OFFSET)@h 995 ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l 996 mtspr SPRN_SPRG_PGDIR, r4 997 998 /* stack */ 999 lis r1,init_thread_union@ha 1000 addi r1,r1,init_thread_union@l 1001 li r0,0 1002 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) 1003/* 1004 * Do early platform-specific initialization, 1005 * and set up the MMU. 1006 */ 1007#ifdef CONFIG_KASAN 1008 bl kasan_early_init 1009#endif 1010 li r3,0 1011 mr r4,r31 1012 bl machine_init 1013 bl __save_cpu_setup 1014 bl MMU_init 1015 bl MMU_init_hw_patch 1016 1017/* 1018 * Go back to running unmapped so we can load up new values 1019 * for SDR1 (hash table pointer) and the segment registers 1020 * and change to using our exception vectors. 1021 */ 1022 lis r4,2f@h 1023 ori r4,r4,2f@l 1024 tophys(r4,r4) 1025 li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR) 1026 1027 .align 4 1028 mtspr SPRN_SRR0,r4 1029 mtspr SPRN_SRR1,r3 1030 RFI 1031/* Load up the kernel context */ 10322: bl load_up_mmu 1033 1034#ifdef CONFIG_BDI_SWITCH 1035 /* Add helper information for the Abatron bdiGDB debugger. 1036 * We do this here because we know the mmu is disabled, and 1037 * will be enabled for real in just a few instructions. 1038 */ 1039 lis r5, abatron_pteptrs@h 1040 ori r5, r5, abatron_pteptrs@l 1041 stw r5, 0xf0(0) /* This much match your Abatron config */ 1042 lis r6, swapper_pg_dir@h 1043 ori r6, r6, swapper_pg_dir@l 1044 tophys(r5, r5) 1045 stw r6, 0(r5) 1046#endif /* CONFIG_BDI_SWITCH */ 1047 1048/* Now turn on the MMU for real! */ 1049 li r4,MSR_KERNEL 1050 lis r3,start_kernel@h 1051 ori r3,r3,start_kernel@l 1052 mtspr SPRN_SRR0,r3 1053 mtspr SPRN_SRR1,r4 1054 RFI 1055 1056/* 1057 * void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next); 1058 * 1059 * Set up the segment registers for a new context. 1060 */ 1061_ENTRY(switch_mmu_context) 1062 lwz r3,MMCONTEXTID(r4) 1063 cmpwi cr0,r3,0 1064 blt- 4f 1065 mulli r3,r3,897 /* multiply context by skew factor */ 1066 rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */ 1067#ifdef CONFIG_PPC_KUEP 1068 oris r3, r3, SR_NX@h /* Set Nx */ 1069#endif 1070#ifdef CONFIG_PPC_KUAP 1071 oris r3, r3, SR_KS@h /* Set Ks */ 1072#endif 1073 li r0,NUM_USER_SEGMENTS 1074 mtctr r0 1075 1076 lwz r4, MM_PGD(r4) 1077#ifdef CONFIG_BDI_SWITCH 1078 /* Context switch the PTE pointer for the Abatron BDI2000. 1079 * The PGDIR is passed as second argument. 1080 */ 1081 lis r5, abatron_pteptrs@ha 1082 stw r4, abatron_pteptrs@l + 0x4(r5) 1083#endif 1084 tophys(r4, r4) 1085 mtspr SPRN_SPRG_PGDIR, r4 1086 li r4,0 1087 isync 10883: 1089 mtsrin r3,r4 1090 addi r3,r3,0x111 /* next VSID */ 1091 rlwinm r3,r3,0,8,3 /* clear out any overflow from VSID field */ 1092 addis r4,r4,0x1000 /* address of next segment */ 1093 bdnz 3b 1094 sync 1095 isync 1096 blr 10974: trap 1098 EMIT_BUG_ENTRY 4b,__FILE__,__LINE__,0 1099 blr 1100EXPORT_SYMBOL(switch_mmu_context) 1101 1102/* 1103 * An undocumented "feature" of 604e requires that the v bit 1104 * be cleared before changing BAT values. 1105 * 1106 * Also, newer IBM firmware does not clear bat3 and 4 so 1107 * this makes sure it's done. 1108 * -- Cort 1109 */ 1110clear_bats: 1111 li r10,0 1112 1113 mtspr SPRN_DBAT0U,r10 1114 mtspr SPRN_DBAT0L,r10 1115 mtspr SPRN_DBAT1U,r10 1116 mtspr SPRN_DBAT1L,r10 1117 mtspr SPRN_DBAT2U,r10 1118 mtspr SPRN_DBAT2L,r10 1119 mtspr SPRN_DBAT3U,r10 1120 mtspr SPRN_DBAT3L,r10 1121 mtspr SPRN_IBAT0U,r10 1122 mtspr SPRN_IBAT0L,r10 1123 mtspr SPRN_IBAT1U,r10 1124 mtspr SPRN_IBAT1L,r10 1125 mtspr SPRN_IBAT2U,r10 1126 mtspr SPRN_IBAT2L,r10 1127 mtspr SPRN_IBAT3U,r10 1128 mtspr SPRN_IBAT3L,r10 1129BEGIN_MMU_FTR_SECTION 1130 /* Here's a tweak: at this point, CPU setup have 1131 * not been called yet, so HIGH_BAT_EN may not be 1132 * set in HID0 for the 745x processors. However, it 1133 * seems that doesn't affect our ability to actually 1134 * write to these SPRs. 1135 */ 1136 mtspr SPRN_DBAT4U,r10 1137 mtspr SPRN_DBAT4L,r10 1138 mtspr SPRN_DBAT5U,r10 1139 mtspr SPRN_DBAT5L,r10 1140 mtspr SPRN_DBAT6U,r10 1141 mtspr SPRN_DBAT6L,r10 1142 mtspr SPRN_DBAT7U,r10 1143 mtspr SPRN_DBAT7L,r10 1144 mtspr SPRN_IBAT4U,r10 1145 mtspr SPRN_IBAT4L,r10 1146 mtspr SPRN_IBAT5U,r10 1147 mtspr SPRN_IBAT5L,r10 1148 mtspr SPRN_IBAT6U,r10 1149 mtspr SPRN_IBAT6L,r10 1150 mtspr SPRN_IBAT7U,r10 1151 mtspr SPRN_IBAT7L,r10 1152END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) 1153 blr 1154 1155_ENTRY(update_bats) 1156 lis r4, 1f@h 1157 ori r4, r4, 1f@l 1158 tophys(r4, r4) 1159 mfmsr r6 1160 mflr r7 1161 li r3, MSR_KERNEL & ~(MSR_IR | MSR_DR) 1162 rlwinm r0, r6, 0, ~MSR_RI 1163 rlwinm r0, r0, 0, ~MSR_EE 1164 mtmsr r0 1165 1166 .align 4 1167 mtspr SPRN_SRR0, r4 1168 mtspr SPRN_SRR1, r3 1169 RFI 11701: bl clear_bats 1171 lis r3, BATS@ha 1172 addi r3, r3, BATS@l 1173 tophys(r3, r3) 1174 LOAD_BAT(0, r3, r4, r5) 1175 LOAD_BAT(1, r3, r4, r5) 1176 LOAD_BAT(2, r3, r4, r5) 1177 LOAD_BAT(3, r3, r4, r5) 1178BEGIN_MMU_FTR_SECTION 1179 LOAD_BAT(4, r3, r4, r5) 1180 LOAD_BAT(5, r3, r4, r5) 1181 LOAD_BAT(6, r3, r4, r5) 1182 LOAD_BAT(7, r3, r4, r5) 1183END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) 1184 li r3, MSR_KERNEL & ~(MSR_IR | MSR_DR | MSR_RI) 1185 mtmsr r3 1186 mtspr SPRN_SRR0, r7 1187 mtspr SPRN_SRR1, r6 1188 RFI 1189 1190flush_tlbs: 1191 lis r10, 0x40 11921: addic. r10, r10, -0x1000 1193 tlbie r10 1194 bgt 1b 1195 sync 1196 blr 1197 1198mmu_off: 1199 addi r4, r3, __after_mmu_off - _start 1200 mfmsr r3 1201 andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */ 1202 beqlr 1203 andc r3,r3,r0 1204 1205 .align 4 1206 mtspr SPRN_SRR0,r4 1207 mtspr SPRN_SRR1,r3 1208 sync 1209 RFI 1210 1211/* We use one BAT to map up to 256M of RAM at _PAGE_OFFSET */ 1212initial_bats: 1213 lis r11,PAGE_OFFSET@h 1214 tophys(r8,r11) 1215#ifdef CONFIG_SMP 1216 ori r8,r8,0x12 /* R/W access, M=1 */ 1217#else 1218 ori r8,r8,2 /* R/W access */ 1219#endif /* CONFIG_SMP */ 1220 ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */ 1221 1222 mtspr SPRN_DBAT0L,r8 /* N.B. 6xx have valid */ 1223 mtspr SPRN_DBAT0U,r11 /* bit in upper BAT register */ 1224 mtspr SPRN_IBAT0L,r8 1225 mtspr SPRN_IBAT0U,r11 1226 isync 1227 blr 1228 1229#ifdef CONFIG_BOOTX_TEXT 1230setup_disp_bat: 1231 /* 1232 * setup the display bat prepared for us in prom.c 1233 */ 1234 mflr r8 1235 bl reloc_offset 1236 mtlr r8 1237 addis r8,r3,disp_BAT@ha 1238 addi r8,r8,disp_BAT@l 1239 cmpwi cr0,r8,0 1240 beqlr 1241 lwz r11,0(r8) 1242 lwz r8,4(r8) 1243 mtspr SPRN_DBAT3L,r8 1244 mtspr SPRN_DBAT3U,r11 1245 blr 1246#endif /* CONFIG_BOOTX_TEXT */ 1247 1248#ifdef CONFIG_PPC_EARLY_DEBUG_CPM 1249setup_cpm_bat: 1250 lis r8, 0xf000 1251 ori r8, r8, 0x002a 1252 mtspr SPRN_DBAT1L, r8 1253 1254 lis r11, 0xf000 1255 ori r11, r11, (BL_1M << 2) | 2 1256 mtspr SPRN_DBAT1U, r11 1257 1258 blr 1259#endif 1260 1261#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO 1262setup_usbgecko_bat: 1263 /* prepare a BAT for early io */ 1264#if defined(CONFIG_GAMECUBE) 1265 lis r8, 0x0c00 1266#elif defined(CONFIG_WII) 1267 lis r8, 0x0d00 1268#else 1269#error Invalid platform for USB Gecko based early debugging. 1270#endif 1271 /* 1272 * The virtual address used must match the virtual address 1273 * associated to the fixmap entry FIX_EARLY_DEBUG_BASE. 1274 */ 1275 lis r11, 0xfffe /* top 128K */ 1276 ori r8, r8, 0x002a /* uncached, guarded ,rw */ 1277 ori r11, r11, 0x2 /* 128K, Vs=1, Vp=0 */ 1278 mtspr SPRN_DBAT1L, r8 1279 mtspr SPRN_DBAT1U, r11 1280 blr 1281#endif 1282 1283#ifdef CONFIG_8260 1284/* Jump into the system reset for the rom. 1285 * We first disable the MMU, and then jump to the ROM reset address. 1286 * 1287 * r3 is the board info structure, r4 is the location for starting. 1288 * I use this for building a small kernel that can load other kernels, 1289 * rather than trying to write or rely on a rom monitor that can tftp load. 1290 */ 1291 .globl m8260_gorom 1292m8260_gorom: 1293 mfmsr r0 1294 rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */ 1295 sync 1296 mtmsr r0 1297 sync 1298 mfspr r11, SPRN_HID0 1299 lis r10, 0 1300 ori r10,r10,HID0_ICE|HID0_DCE 1301 andc r11, r11, r10 1302 mtspr SPRN_HID0, r11 1303 isync 1304 li r5, MSR_ME|MSR_RI 1305 lis r6,2f@h 1306 addis r6,r6,-KERNELBASE@h 1307 ori r6,r6,2f@l 1308 mtspr SPRN_SRR0,r6 1309 mtspr SPRN_SRR1,r5 1310 isync 1311 sync 1312 rfi 13132: 1314 mtlr r4 1315 blr 1316#endif 1317 1318 1319/* 1320 * We put a few things here that have to be page-aligned. 1321 * This stuff goes at the beginning of the data segment, 1322 * which is page-aligned. 1323 */ 1324 .data 1325 .globl sdata 1326sdata: 1327 .globl empty_zero_page 1328empty_zero_page: 1329 .space 4096 1330EXPORT_SYMBOL(empty_zero_page) 1331 1332 .globl swapper_pg_dir 1333swapper_pg_dir: 1334 .space PGD_TABLE_SIZE 1335 1336/* Room for two PTE pointers, usually the kernel and current user pointers 1337 * to their respective root page table. 1338 */ 1339abatron_pteptrs: 1340 .space 8 1341