Lines Matching full:l

30 	l.movhi	rd,hi(-KERNELBASE)		;\
31 l.add rd,rd,rs
34 l.movhi gpr,0x0
37 l.movhi gpr,hi(symbol) ;\
38 l.ori gpr,gpr,lo(symbol)
54 #define EMERGENCY_PRINT_STORE_GPR4 l.mtspr r0,r4,SPR_SHADOW_GPR(14)
55 #define EMERGENCY_PRINT_LOAD_GPR4 l.mfspr r4,r0,SPR_SHADOW_GPR(14)
57 #define EMERGENCY_PRINT_STORE_GPR5 l.mtspr r0,r5,SPR_SHADOW_GPR(15)
58 #define EMERGENCY_PRINT_LOAD_GPR5 l.mfspr r5,r0,SPR_SHADOW_GPR(15)
60 #define EMERGENCY_PRINT_STORE_GPR6 l.mtspr r0,r6,SPR_SHADOW_GPR(16)
61 #define EMERGENCY_PRINT_LOAD_GPR6 l.mfspr r6,r0,SPR_SHADOW_GPR(16)
63 #define EMERGENCY_PRINT_STORE_GPR7 l.mtspr r0,r7,SPR_SHADOW_GPR(7)
64 #define EMERGENCY_PRINT_LOAD_GPR7 l.mfspr r7,r0,SPR_SHADOW_GPR(7)
66 #define EMERGENCY_PRINT_STORE_GPR8 l.mtspr r0,r8,SPR_SHADOW_GPR(8)
67 #define EMERGENCY_PRINT_LOAD_GPR8 l.mfspr r8,r0,SPR_SHADOW_GPR(8)
69 #define EMERGENCY_PRINT_STORE_GPR9 l.mtspr r0,r9,SPR_SHADOW_GPR(9)
70 #define EMERGENCY_PRINT_LOAD_GPR9 l.mfspr r9,r0,SPR_SHADOW_GPR(9)
73 #define EMERGENCY_PRINT_STORE_GPR4 l.sw 0x20(r0),r4
74 #define EMERGENCY_PRINT_LOAD_GPR4 l.lwz r4,0x20(r0)
76 #define EMERGENCY_PRINT_STORE_GPR5 l.sw 0x24(r0),r5
77 #define EMERGENCY_PRINT_LOAD_GPR5 l.lwz r5,0x24(r0)
79 #define EMERGENCY_PRINT_STORE_GPR6 l.sw 0x28(r0),r6
80 #define EMERGENCY_PRINT_LOAD_GPR6 l.lwz r6,0x28(r0)
82 #define EMERGENCY_PRINT_STORE_GPR7 l.sw 0x2c(r0),r7
83 #define EMERGENCY_PRINT_LOAD_GPR7 l.lwz r7,0x2c(r0)
85 #define EMERGENCY_PRINT_STORE_GPR8 l.sw 0x30(r0),r8
86 #define EMERGENCY_PRINT_LOAD_GPR8 l.lwz r8,0x30(r0)
88 #define EMERGENCY_PRINT_STORE_GPR9 l.sw 0x34(r0),r9
89 #define EMERGENCY_PRINT_LOAD_GPR9 l.lwz r9,0x34(r0)
97 #define EXCEPTION_STORE_GPR2 l.mtspr r0,r2,SPR_SHADOW_GPR(2)
98 #define EXCEPTION_LOAD_GPR2 l.mfspr r2,r0,SPR_SHADOW_GPR(2)
100 #define EXCEPTION_STORE_GPR3 l.mtspr r0,r3,SPR_SHADOW_GPR(3)
101 #define EXCEPTION_LOAD_GPR3 l.mfspr r3,r0,SPR_SHADOW_GPR(3)
103 #define EXCEPTION_STORE_GPR4 l.mtspr r0,r4,SPR_SHADOW_GPR(4)
104 #define EXCEPTION_LOAD_GPR4 l.mfspr r4,r0,SPR_SHADOW_GPR(4)
106 #define EXCEPTION_STORE_GPR5 l.mtspr r0,r5,SPR_SHADOW_GPR(5)
107 #define EXCEPTION_LOAD_GPR5 l.mfspr r5,r0,SPR_SHADOW_GPR(5)
109 #define EXCEPTION_STORE_GPR6 l.mtspr r0,r6,SPR_SHADOW_GPR(6)
110 #define EXCEPTION_LOAD_GPR6 l.mfspr r6,r0,SPR_SHADOW_GPR(6)
113 #define EXCEPTION_STORE_GPR2 l.sw 0x64(r0),r2
114 #define EXCEPTION_LOAD_GPR2 l.lwz r2,0x64(r0)
116 #define EXCEPTION_STORE_GPR3 l.sw 0x68(r0),r3
117 #define EXCEPTION_LOAD_GPR3 l.lwz r3,0x68(r0)
119 #define EXCEPTION_STORE_GPR4 l.sw 0x6c(r0),r4
120 #define EXCEPTION_LOAD_GPR4 l.lwz r4,0x6c(r0)
122 #define EXCEPTION_STORE_GPR5 l.sw 0x70(r0),r5
123 #define EXCEPTION_LOAD_GPR5 l.lwz r5,0x70(r0)
125 #define EXCEPTION_STORE_GPR6 l.sw 0x74(r0),r6
126 #define EXCEPTION_LOAD_GPR6 l.lwz r6,0x74(r0)
135 #define EXCEPTION_T_STORE_GPR30 l.mtspr r0,r30,SPR_SHADOW_GPR(30)
136 #define EXCEPTION_T_LOAD_GPR30(reg) l.mfspr reg,r0,SPR_SHADOW_GPR(30)
138 #define EXCEPTION_T_STORE_GPR10 l.mtspr r0,r10,SPR_SHADOW_GPR(10)
139 #define EXCEPTION_T_LOAD_GPR10(reg) l.mfspr reg,r0,SPR_SHADOW_GPR(10)
141 #define EXCEPTION_T_STORE_SP l.mtspr r0,r1,SPR_SHADOW_GPR(1)
142 #define EXCEPTION_T_LOAD_SP(reg) l.mfspr reg,r0,SPR_SHADOW_GPR(1)
145 #define EXCEPTION_T_STORE_GPR30 l.sw 0x78(r0),r30
146 #define EXCEPTION_T_LOAD_GPR30(reg) l.lwz reg,0x78(r0)
148 #define EXCEPTION_T_STORE_GPR10 l.sw 0x7c(r0),r10
149 #define EXCEPTION_T_LOAD_GPR10(reg) l.lwz reg,0x7c(r0)
151 #define EXCEPTION_T_STORE_SP l.sw 0x80(r0),r1
152 #define EXCEPTION_T_LOAD_SP(reg) l.lwz reg,0x80(r0)
160 l.mfspr t1,r0,SPR_COREID ;\
161 l.slli t1,t1,2 ;\
162 l.add reg,reg,t1 ;\
164 l.lwz reg,0(t1)
169 l.lwz reg,0(t1)
177 l.mfspr r10,r0,SPR_COREID ;\
178 l.slli r10,r10,2 ;\
179 l.add r30,r30,r10 ;\
181 l.lwz r10,0(r30)
187 l.lwz r10,0(r30)
223 l.mfspr r30,r0,SPR_ESR_BASE ;\
224 l.andi r30,r30,SPR_SR_SM ;\
225 l.sfeqi r30,0 ;\
227 l.bnf 2f /* kernel_mode */ ;\
232 l.lwz r1,(TI_KSP)(r30) ;\
238 l.addi r1,r1,-(INT_FRAME_SIZE) ;\
241 l.sw PT_GPR12(r30),r12 ;\
243 l.mfspr r12,r0,SPR_EPCR_BASE ;\
244 l.sw PT_PC(r30),r12 ;\
245 l.mfspr r12,r0,SPR_ESR_BASE ;\
246 l.sw PT_SR(r30),r12 ;\
249 l.sw PT_GPR30(r30),r12 ;\
252 l.sw PT_GPR10(r30),r12 ;\
255 l.sw PT_SP(r30),r12 ;\
257 l.sw PT_GPR4(r30),r4 ;\
258 l.mfspr r4,r0,SPR_EEAR_BASE ;\
263 l.mfspr r30,r0,SPR_SR ;\
264 l.andi r30,r30,SPR_SR_DSX ;\
265 l.ori r30,r30,(EXCEPTION_SR) ;\
266 l.mtspr r0,r30,SPR_ESR_BASE ;\
269 l.mtspr r0,r30,SPR_EPCR_BASE ;\
270 l.rfe
278 * l.ori r3,r0,0x1 ;\
279 * l.mtspr r0,r3,SPR_SR ;\
280 * l.movhi r3,hi(0xf0000100) ;\
281 * l.ori r3,r3,lo(0xf0000100) ;\
282 * l.jr r3 ;\
283 * l.nop 1
298 l.addi r1,r3,0x0 ;\
299 l.addi r10,r9,0x0 ;\
301 l.jal _emergency_print ;\
302 l.ori r3,r0,lo(_string_unhandled_exception) ;\
303 l.mfspr r3,r0,SPR_NPC ;\
304 l.jal _emergency_print_nr ;\
305 l.andi r3,r3,0x1f00 ;\
307 l.jal _emergency_print ;\
308 l.ori r3,r0,lo(_string_epc_prefix) ;\
309 l.jal _emergency_print_nr ;\
310 l.mfspr r3,r0,SPR_EPCR_BASE ;\
311 l.jal _emergency_print ;\
312 l.ori r3,r0,lo(_string_nl) ;\
314 l.addi r3,r1,0x0 ;\
315 l.addi r9,r10,0x0 ;\
322 l.addi r1,r1,-(INT_FRAME_SIZE) ;\
325 l.sw PT_GPR12(r30),r12 ;\
326 l.mfspr r12,r0,SPR_EPCR_BASE ;\
327 l.sw PT_PC(r30),r12 ;\
328 l.mfspr r12,r0,SPR_ESR_BASE ;\
329 l.sw PT_SR(r30),r12 ;\
332 l.sw PT_GPR30(r30),r12 ;\
335 l.sw PT_GPR10(r30),r12 ;\
338 l.sw PT_SP(r30),r12 ;\
339 l.sw PT_GPR13(r30),r13 ;\
342 l.sw PT_GPR4(r30),r4 ;\
343 l.mfspr r4,r0,SPR_EEAR_BASE ;\
347 l.ori r30,r0,(EXCEPTION_SR) ;\
348 l.mtspr r0,r30,SPR_ESR_BASE ;\
351 l.mtspr r0,r30,SPR_EPCR_BASE ;\
352 l.rfe
363 l.jr r13
364 l.nop
375 // l.mtspr r0,r0,SPR_TTMR
384 // l.mtspr r0,r0,SPR_TTMR
407 l.j boot_dtlb_miss_handler
408 l.nop
412 l.j boot_itlb_miss_handler
413 l.nop
515 l.or r25,r0,r3 /* pointer to fdt */
521 l.ori r3,r0,0x1
522 l.mtspr r0,r3,SPR_SR
556 l.mfspr r26,r0,SPR_COREID
557 l.sfeq r26,r0
558 l.bnf secondary_wait
559 l.nop
568 l.sw TI_KSP(r31), r1
570 l.ori r4,r0,0x0
585 l.sw (0)(r28),r0
586 l.sfltu r28,r30
587 l.bf 1b
588 l.addi r28,r28,4
591 l.jal _ic_enable
592 l.nop
595 l.jal _dc_enable
596 l.nop
599 l.jal _flush_tlb
600 l.nop
609 l.mfspr r30,r0,SPR_SR
610 l.movhi r28,hi(SPR_SR_DME | SPR_SR_IME)
611 l.ori r28,r28,lo(SPR_SR_DME | SPR_SR_IME)
612 l.or r30,r30,r28
613 l.mtspr r0,r30,SPR_SR
614 l.nop
615 l.nop
616 l.nop
617 l.nop
618 l.nop
619 l.nop
620 l.nop
621 l.nop
622 l.nop
623 l.nop
624 l.nop
625 l.nop
626 l.nop
627 l.nop
628 l.nop
629 l.nop
632 l.nop 5
635 l.lwz r3,0(r25) /* load magic from fdt into r3 */
636 l.movhi r4,hi(OF_DT_HEADER)
637 l.ori r4,r4,lo(OF_DT_HEADER)
638 l.sfeq r3,r4
639 l.bf _fdt_found
640 l.nop
642 l.or r25,r0,r0
645 l.or r3,r0,r25
647 l.jalr r24
648 l.nop
689 l.jr r30
690 l.nop
694 * I N V A L I D A T E T L B e n t r i e s
698 l.addi r7,r0,128 /* Maximum number of sets */
700 l.mtspr r5,r0,0x0
701 l.mtspr r6,r0,0x0
703 l.addi r5,r5,1
704 l.addi r6,r6,1
705 l.sfeq r7,r0
706 l.bnf 1b
707 l.addi r7,r7,-1
709 l.jr r9
710 l.nop
716 l.mfspr r25,r0,SPR_UPR
717 l.andi r25,r25,SPR_UPR_PMP
718 l.sfeq r25,r0
719 l.bf secondary_check_release
720 l.nop
725 l.mtspr r0,r25,SPR_EVBAR
728 l.mfspr r25,r0,SPR_SR
729 l.ori r25,r25,SPR_SR_IEE
730 l.mtspr r0,r25,SPR_SR
733 l.mfspr r25,r0,SPR_PICMR
734 l.ori r25,r25,0xffff
735 l.mtspr r0,r25,SPR_PICMR
738 l.mfspr r25,r0,SPR_PMR
740 l.or r25,r25,r3
741 l.mtspr r0,r25,SPR_PMR
744 l.mtspr r0,r0,SPR_EVBAR
751 l.mfspr r25,r0,SPR_COREID
754 l.lwz r3,0(r4)
755 l.sfeq r25,r3
756 l.bnf secondary_wait
757 l.nop
766 l.lwz r10,0(r30)
767 l.addi r1,r10,THREAD_SIZE
769 l.sw TI_KSP(r30),r1
771 l.jal _ic_enable
772 l.nop
774 l.jal _dc_enable
775 l.nop
777 l.jal _flush_tlb
778 l.nop
783 l.mfspr r30,r0,SPR_SR
784 l.movhi r28,hi(SPR_SR_DME | SPR_SR_IME)
785 l.ori r28,r28,lo(SPR_SR_DME | SPR_SR_IME)
786 l.or r30,r30,r28
791 * Then EPCR is set to secondary_start and then a l.rfe is issued to
794 l.mtspr r0,r30,SPR_ESR_BASE
796 l.mtspr r0,r30,SPR_EPCR_BASE
797 l.rfe
801 l.jr r30
802 l.nop
815 l.mfspr r24,r0,SPR_UPR
816 l.andi r26,r24,SPR_UPR_ICP
817 l.sfeq r26,r0
818 l.bf 9f
819 l.nop
822 l.mfspr r6,r0,SPR_SR
823 l.addi r5,r0,-1
824 l.xori r5,r5,SPR_SR_ICE
825 l.and r5,r6,r5
826 l.mtspr r0,r5,SPR_SR
833 l.mfspr r24,r0,SPR_ICCFGR
834 l.andi r26,r24,SPR_ICCFGR_CBS
835 l.srli r28,r26,7
836 l.ori r30,r0,16
837 l.sll r14,r30,r28
843 l.andi r26,r24,SPR_ICCFGR_NCS
844 l.srli r28,r26,3
845 l.ori r30,r0,1
846 l.sll r16,r30,r28
849 l.addi r6,r0,0
850 l.sll r5,r14,r28
851 // l.mul r5,r14,r16
852 // l.trap 1
853 // l.addi r5,r0,IC_SIZE
855 l.mtspr r0,r6,SPR_ICBIR
856 l.sfne r6,r5
857 l.bf 1b
858 l.add r6,r6,r14
859 // l.addi r6,r6,IC_LINE
862 l.mfspr r6,r0,SPR_SR
863 l.ori r6,r6,SPR_SR_ICE
864 l.mtspr r0,r6,SPR_SR
865 l.nop
866 l.nop
867 l.nop
868 l.nop
869 l.nop
870 l.nop
871 l.nop
872 l.nop
873 l.nop
874 l.nop
876 l.jr r9
877 l.nop
881 l.mfspr r24,r0,SPR_UPR
882 l.andi r26,r24,SPR_UPR_DCP
883 l.sfeq r26,r0
884 l.bf 9f
885 l.nop
888 l.mfspr r6,r0,SPR_SR
889 l.addi r5,r0,-1
890 l.xori r5,r5,SPR_SR_DCE
891 l.and r5,r6,r5
892 l.mtspr r0,r5,SPR_SR
899 l.mfspr r24,r0,SPR_DCCFGR
900 l.andi r26,r24,SPR_DCCFGR_CBS
901 l.srli r28,r26,7
902 l.ori r30,r0,16
903 l.sll r14,r30,r28
909 l.andi r26,r24,SPR_DCCFGR_NCS
910 l.srli r28,r26,3
911 l.ori r30,r0,1
912 l.sll r16,r30,r28
915 l.addi r6,r0,0
916 l.sll r5,r14,r28
918 l.mtspr r0,r6,SPR_DCBIR
919 l.sfne r6,r5
920 l.bf 1b
921 l.add r6,r6,r14
924 l.mfspr r6,r0,SPR_SR
925 l.ori r6,r6,SPR_SR_DCE
926 l.mtspr r0,r6,SPR_SR
928 l.jr r9
929 l.nop
970 l.mfspr r6,r0,SPR_ESR_BASE //
971 l.andi r6,r6,SPR_SR_SM // are we in kernel mode ?
972 l.sfeqi r6,0 // r6 == 0x1 --> SM
973 l.bf exit_with_no_dtranslation //
974 l.nop
987 l.mfspr r4,r0,SPR_EEAR_BASE // get the offending EA
992l.srli r3,r4,0xd // r3 <- r4 / 8192 (sets are relative to page size (8Kb) NOT VPN…
994 l.mfspr r6, r0, SPR_DMMUCFGR
995 l.andi r6, r6, SPR_DMMUCFGR_NTS
996 l.srli r6, r6, SPR_DMMUCFGR_NTS_OFF
997 l.ori r5, r0, 0x1
998 l.sll r5, r5, r6 // r5 = number DMMU sets
999 l.addi r6, r5, -1 // r6 = nsets mask
1000 l.and r2, r3, r6 // r2 <- r3 % NSETS_MASK
1002 l.or r6,r6,r4 // r6 <- r4
1003 l.ori r6,r6,~(VPN_MASK) // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff
1004 l.movhi r5,hi(DTLB_MR_MASK) // r5 <- ffff:0000.x000
1005 l.ori r5,r5,lo(DTLB_MR_MASK) // r5 <- ffff:1111.x001 - apply DTLB_MR_MASK
1006 l.and r5,r5,r6 // r5 <- VPN :VPN .x001 - we have DTLBMR entry
1007 l.mtspr r2,r5,SPR_DTLBMR_BASE(0) // set DTLBMR
1011 l.sfgeu r6,r4 // flag if r6 >= r4 (if 0xbfffffff >= EA)
1012 l.bf 1f // goto out
1013 l.and r3,r4,r4 // delay slot :: 24 <- r4 (if flag==1)
1017 l.ori r3,r3,~(PPN_MASK) // r3 <- PPN :PPN .xfff - clear up lo(r6) to 0x**** *fff
1018 l.movhi r5,hi(DTLB_TR_MASK) // r5 <- ffff:0000.x000
1019 l.ori r5,r5,lo(DTLB_TR_MASK) // r5 <- ffff:1111.x330 - apply DTLB_MR_MASK
1020 l.and r5,r5,r3 // r5 <- PPN :PPN .x330 - we have DTLBTR entry
1021 l.mtspr r2,r5,SPR_DTLBTR_BASE(0) // set DTLBTR
1029 l.rfe // SR <- ESR, PC <- EPC
1035 l.j _dispatch_bus_fault
1066 l.mfspr r6,r0,SPR_ESR_BASE //
1067 l.andi r6,r6,SPR_SR_SM // are we in kernel mode ?
1068 l.sfeqi r6,0 // r6 == 0x1 --> SM
1069 l.bf exit_with_no_itranslation
1070 l.nop
1074 l.mfspr r4,r0,SPR_EEAR_BASE // get the offending EA
1079l.srli r3,r4,0xd // r3 <- r4 / 8192 (sets are relative to page size (8Kb) NOT VP…
1081 l.mfspr r6, r0, SPR_IMMUCFGR
1082 l.andi r6, r6, SPR_IMMUCFGR_NTS
1083 l.srli r6, r6, SPR_IMMUCFGR_NTS_OFF
1084 l.ori r5, r0, 0x1
1085 l.sll r5, r5, r6 // r5 = number IMMU sets from IMMUCFGR
1086 l.addi r6, r5, -1 // r6 = nsets mask
1087 l.and r2, r3, r6 // r2 <- r3 % NSETS_MASK
1089 l.or r6,r6,r4 // r6 <- r4
1090 l.ori r6,r6,~(VPN_MASK) // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff
1091 l.movhi r5,hi(ITLB_MR_MASK) // r5 <- ffff:0000.x000
1092 l.ori r5,r5,lo(ITLB_MR_MASK) // r5 <- ffff:1111.x001 - apply ITLB_MR_MASK
1093 l.and r5,r5,r6 // r5 <- VPN :VPN .x001 - we have ITLBMR entry
1094 l.mtspr r2,r5,SPR_ITLBMR_BASE(0) // set ITLBMR
1104 l.sfgeu r6,r4 // flag if r6 >= r4 (if 0xb0ffffff >= EA)
1105 l.bf 1f // goto out
1106 l.and r3,r4,r4 // delay slot :: 24 <- r4 (if flag==1)
1110 l.ori r3,r3,~(PPN_MASK) // r3 <- PPN :PPN .xfff - clear up lo(r6) to 0x**** *fff
1111 l.movhi r5,hi(ITLB_TR_MASK) // r5 <- ffff:0000.x000
1112 l.ori r5,r5,lo(ITLB_TR_MASK) // r5 <- ffff:1111.x050 - apply ITLB_MR_MASK
1113 l.and r5,r5,r3 // r5 <- PPN :PPN .x050 - we have ITLBTR entry
1114 l.mtspr r2,r5,SPR_ITLBTR_BASE(0) // set ITLBTR
1122 l.rfe // SR <- ESR, PC <- EPC
1127 l.j _dispatch_bus_fault
1128 l.nop
1153 l.mfspr r2,r0,SPR_EEAR_BASE
1158 l.srli r4,r2,0x18 // >> PAGE_SHIFT + (PAGE_SHIFT - 2)
1159 l.slli r4,r4,0x2 // to get address << 2
1160 l.add r3,r4,r3 // r4 is pgd_index(daddr)
1166 l.lwz r3,0x0(r4) // get *pmd value
1167 l.sfne r3,r0
1168 l.bnf d_pmd_none
1169 l.addi r3,r0,0xffffe000 // PAGE_MASK
1175 l.lwz r4,0x0(r4) // get **pmd value
1176 l.and r4,r4,r3 // & PAGE_MASK
1177 l.srli r2,r2,0xd // >> PAGE_SHIFT, r2 == EEAR
1178 l.andi r3,r2,0x7ff // (1UL << PAGE_SHIFT - 2) - 1
1179 l.slli r3,r3,0x2 // to get address << 2
1180 l.add r3,r3,r4
1181 l.lwz r3,0x0(r3) // this is pte at last
1185 l.andi r4,r3,0x1
1186 l.sfne r4,r0 // is pte present
1187 l.bnf d_pte_not_present
1188 l.addi r4,r0,0xffffe3fa // PAGE_MASK | DTLB_UP_CONVERT_MASK
1192 l.and r4,r3,r4 // apply the mask
1194 l.mfspr r2, r0, SPR_DMMUCFGR
1195 l.andi r2, r2, SPR_DMMUCFGR_NTS
1196 l.srli r2, r2, SPR_DMMUCFGR_NTS_OFF
1197 l.ori r3, r0, 0x1
1198 l.sll r3, r3, r2 // r3 = number DMMU sets DMMUCFGR
1199 l.addi r2, r3, -1 // r2 = nsets mask
1200 l.mfspr r3, r0, SPR_EEAR_BASE
1201 l.srli r3, r3, 0xd // >> PAGE_SHIFT
1202 l.and r2, r3, r2 // calc offset: & (NUM_TLB_ENTRIES-1)
1204 l.mtspr r2,r4,SPR_DTLBTR_BASE(0)
1208 l.slli r3, r3, 0xd /* << PAGE_SHIFT => EA & PAGE_MASK */
1209 l.ori r4,r3,0x1 // set hardware valid bit: DTBL_MR entry
1210 l.mtspr r2,r4,SPR_DTLBMR_BASE(0)
1215 l.rfe
1231 l.mfspr r2,r0,SPR_EEAR_BASE
1238 l.srli r4,r2,0x18 // >> PAGE_SHIFT + (PAGE_SHIFT - 2)
1239 l.slli r4,r4,0x2 // to get address << 2
1240 l.add r3,r4,r3 // r4 is pgd_index(daddr)
1246 l.lwz r3,0x0(r4) // get *pmd value
1247 l.sfne r3,r0
1248 l.bnf i_pmd_none
1249 l.addi r3,r0,0xffffe000 // PAGE_MASK
1256 l.lwz r4,0x0(r4) // get **pmd value
1257 l.and r4,r4,r3 // & PAGE_MASK
1258 l.srli r2,r2,0xd // >> PAGE_SHIFT, r2 == EEAR
1259 l.andi r3,r2,0x7ff // (1UL << PAGE_SHIFT - 2) - 1
1260 l.slli r3,r3,0x2 // to get address << 2
1261 l.add r3,r3,r4
1262 l.lwz r3,0x0(r3) // this is pte at last
1267 l.andi r4,r3,0x1
1268 l.sfne r4,r0 // is pte present
1269 l.bnf i_pte_not_present
1270 l.addi r4,r0,0xffffe03a // PAGE_MASK | ITLB_UP_CONVERT_MASK
1274 l.and r4,r3,r4 // apply the mask
1275 l.andi r3,r3,0x7c0 // _PAGE_EXEC | _PAGE_SRE | _PAGE_SWE | _PAGE_URE | _PAGE_UWE
1276 l.sfeq r3,r0
1277 l.bf itlb_tr_fill //_workaround
1279 l.mfspr r2, r0, SPR_IMMUCFGR
1280 l.andi r2, r2, SPR_IMMUCFGR_NTS
1281 l.srli r2, r2, SPR_IMMUCFGR_NTS_OFF
1282 l.ori r3, r0, 0x1
1283 l.sll r3, r3, r2 // r3 = number IMMU sets IMMUCFGR
1284 l.addi r2, r3, -1 // r2 = nsets mask
1285 l.mfspr r3, r0, SPR_EEAR_BASE
1286 l.srli r3, r3, 0xd // >> PAGE_SHIFT
1287 l.and r2, r3, r2 // calc offset: & (NUM_TLB_ENTRIES-1)
1297 l.ori r4,r4,0xc0 // | (SPR_ITLBTR_UXE | ITLBTR_SXE)
1299 l.mtspr r2,r4,SPR_ITLBTR_BASE(0)
1303 l.slli r3, r3, 0xd /* << PAGE_SHIFT => EA & PAGE_MASK */
1304 l.ori r4,r3,0x1 // set hardware valid bit: ITBL_MR entry
1305 l.mtspr r2,r4,SPR_ITLBMR_BASE(0)
1310 l.rfe
1346 l.sw TRAMP_SLOT_0(r3),r4
1347 l.sw TRAMP_SLOT_1(r3),r4
1348 l.sw TRAMP_SLOT_4(r3),r4
1349 l.sw TRAMP_SLOT_5(r3),r4
1352 l.lwz r4,0x0(r6) // load op @ EEA + 0x0 (fc address)
1353 l.sw TRAMP_SLOT_3(r3),r4 // store it to _immu_trampoline_data
1354 l.lwz r4,-0x4(r6) // load op @ EEA - 0x4 (f8 address)
1355 l.sw TRAMP_SLOT_2(r3),r4 // store it to _immu_trampoline_data
1357 l.srli r5,r4,26 // check opcode for write access
1358 l.sfeqi r5,0 // l.j
1359 l.bf 0f
1360 l.sfeqi r5,0x11 // l.jr
1361 l.bf 1f
1362 l.sfeqi r5,1 // l.jal
1363 l.bf 2f
1364 l.sfeqi r5,0x12 // l.jalr
1365 l.bf 3f
1366 l.sfeqi r5,3 // l.bnf
1367 l.bf 4f
1368 l.sfeqi r5,4 // l.bf
1369 l.bf 5f
1371 l.nop
1372 l.j 99b // should never happen
1373 l.nop 1
1382 2: // l.jal
1384 /* 19 20 aa aa l.movhi r9,0xaaaa
1385 * a9 29 bb bb l.ori r9,0xbbbb
1390 l.addi r6,r2,0x4 // this is 0xaaaabbbb
1392 // l.movhi r9,0xaaaa
1393 l.ori r5,r0,0x1920 // 0x1920 == l.movhi r9
1394 l.sh (TRAMP_SLOT_0+0x0)(r3),r5
1395 l.srli r5,r6,16
1396 l.sh (TRAMP_SLOT_0+0x2)(r3),r5
1398 // l.ori r9,0xbbbb
1399 l.ori r5,r0,0xa929 // 0xa929 == l.ori r9
1400 l.sh (TRAMP_SLOT_1+0x0)(r3),r5
1401 l.andi r5,r6,0xffff
1402 l.sh (TRAMP_SLOT_1+0x2)(r3),r5
1407 0: // l.j
1408 l.slli r6,r4,6 // original offset shifted left 6 - 2
1409 // l.srli r6,r6,6 // original offset shifted right 2
1411 l.slli r4,r2,4 // old jump position: EEA shifted left 4
1412 // l.srli r4,r4,6 // old jump position: shifted right 2
1414 l.addi r5,r3,0xc // new jump position (physical)
1415 l.slli r5,r5,4 // new jump position: shifted left 4
1420 l.sub r5,r4,r5 // old_jump - new_jump
1421 l.add r5,r6,r5 // orig_off + (old_jump - new_jump)
1422 l.srli r5,r5,6 // new offset shifted right 2
1425 // l.j has opcode 0x0...
1426 l.sw TRAMP_SLOT_2(r3),r5 // write it back
1428 l.j trampoline_out
1429 l.nop
1433 3: // l.jalr
1435 /* 19 20 aa aa l.movhi r9,0xaaaa
1436 * a9 29 bb bb l.ori r9,0xbbbb
1441 l.addi r6,r2,0x4 // this is 0xaaaabbbb
1443 // l.movhi r9,0xaaaa
1444 l.ori r5,r0,0x1920 // 0x1920 == l.movhi r9
1445 l.sh (TRAMP_SLOT_0+0x0)(r3),r5
1446 l.srli r5,r6,16
1447 l.sh (TRAMP_SLOT_0+0x2)(r3),r5
1449 // l.ori r9,0xbbbb
1450 l.ori r5,r0,0xa929 // 0xa929 == l.ori r9
1451 l.sh (TRAMP_SLOT_1+0x0)(r3),r5
1452 l.andi r5,r6,0xffff
1453 l.sh (TRAMP_SLOT_1+0x2)(r3),r5
1455 l.lhz r5,(TRAMP_SLOT_2+0x0)(r3) // load hi part of jump instruction
1456 l.andi r5,r5,0x3ff // clear out opcode part
1457 l.ori r5,r5,0x4400 // opcode changed from l.jalr -> l.jr
1458 l.sh (TRAMP_SLOT_2+0x0)(r3),r5 // write it back
1462 1: // l.jr
1463 l.j trampoline_out
1464 l.nop
1468 4: // l.bnf
1469 5: // l.bf
1470 l.slli r6,r4,6 // original offset shifted left 6 - 2
1471 // l.srli r6,r6,6 // original offset shifted right 2
1473 l.slli r4,r2,4 // old jump position: EEA shifted left 4
1474 // l.srli r4,r4,6 // old jump position: shifted right 2
1476 l.addi r5,r3,0xc // new jump position (physical)
1477 l.slli r5,r5,4 // new jump position: shifted left 4
1482 l.add r6,r6,r4 // (orig_off + old_jump)
1483 l.sub r6,r6,r5 // (orig_off + old_jump) - new_jump
1484 l.srli r6,r6,6 // new offset shifted right 2
1487 l.lwz r4,(TRAMP_SLOT_2+0x0)(r3) // load jump instruction
1488 l.srli r4,r4,16
1489 l.andi r4,r4,0xfc00 // get opcode part
1490 l.slli r4,r4,16
1491 l.or r6,r4,r6 // l.b(n)f new offset
1492 l.sw TRAMP_SLOT_2(r3),r6 // write it back
1494 /* we need to add l.j to EEA + 0x8 */
1496 l.addi r4,r4,(0x8 - 0x8) // jump target = r2 + 0x8 (compensate for 0x8)
1498 l.sub r4,r4,r5 // jump offset = target - new_position + 0x8
1500 l.slli r4,r4,4 // the amount of info in imediate of jump
1501 l.srli r4,r4,6 // jump instruction with offset
1502 l.sw TRAMP_SLOT_4(r3),r4 // write it to 4th slot
1509 l.mtspr r0,r5,SPR_EPCR_BASE
1520 l.mfspr r21,r0,SPR_ICCFGR
1521 l.andi r21,r21,SPR_ICCFGR_CBS
1522 l.srli r21,r21,7
1523 l.ori r23,r0,16
1524 l.sll r14,r23,r21
1526 l.mtspr r0,r5,SPR_ICBIR
1527 l.add r5,r5,r14
1528 l.mtspr r0,r5,SPR_ICBIR
1530 l.jr r9
1531 l.nop
1550 l.lbz r7,0(r3)
1551 l.sfeq r7,r0
1552 l.bf 9f
1553 l.nop
1556 l.movhi r4,hi(UART_BASE_ADD)
1558 l.addi r6,r0,0x20
1559 1: l.lbz r5,5(r4)
1560 l.andi r5,r5,0x20
1561 l.sfeq r5,r6
1562 l.bnf 1b
1563 l.nop
1565 l.sb 0(r4),r7
1567 l.addi r6,r0,0x60
1568 1: l.lbz r5,5(r4)
1569 l.andi r5,r5,0x60
1570 l.sfeq r5,r6
1571 l.bnf 1b
1572 l.nop
1575 l.j 2b
1576 l.addi r3,r3,0x1
1583 l.jr r9
1584 l.nop
1593 l.addi r8,r0,32 // shift register
1596 l.addi r8,r8,-0x4
1597 l.srl r7,r3,r8
1598 l.andi r7,r7,0xf
1601 l.sfeqi r8,0x4
1602 l.bf 2f
1603 l.nop
1605 l.sfeq r7,r0
1606 l.bf 1b
1607 l.nop
1610 l.srl r7,r3,r8
1612 l.andi r7,r7,0xf
1613 l.sflts r8,r0
1614 l.bf 9f
1616 l.sfgtui r7,0x9
1617 l.bnf 8f
1618 l.nop
1619 l.addi r7,r7,0x27
1622 l.addi r7,r7,0x30
1624 l.movhi r4,hi(UART_BASE_ADD)
1626 l.addi r6,r0,0x20
1627 1: l.lbz r5,5(r4)
1628 l.andi r5,r5,0x20
1629 l.sfeq r5,r6
1630 l.bnf 1b
1631 l.nop
1633 l.sb 0(r4),r7
1635 l.addi r6,r0,0x60
1636 1: l.lbz r5,5(r4)
1637 l.andi r5,r5,0x60
1638 l.sfeq r5,r6
1639 l.bnf 1b
1640 l.nop
1643 l.j 2b
1644 l.addi r8,r8,-0x4
1652 l.jr r9
1653 l.nop
1678 l.movhi r3,hi(UART_BASE_ADD)
1680 l.addi r4,r0,0x7
1681 l.sb 0x2(r3),r4
1683 l.addi r4,r0,0x0
1684 l.sb 0x1(r3),r4
1686 l.addi r4,r0,0x3
1687 l.sb 0x3(r3),r4
1689 l.lbz r5,3(r3)
1690 l.ori r4,r5,0x80
1691 l.sb 0x3(r3),r4
1692 l.addi r4,r0,((UART_DIVISOR>>8) & 0x000000ff)
1693 l.sb UART_DLM(r3),r4
1694 l.addi r4,r0,((UART_DIVISOR) & 0x000000ff)
1695 l.sb UART_DLL(r3),r4
1696 l.sb 0x3(r3),r5
1698 l.jr r9
1699 l.nop
1707 l.ori r3,r0,SPR_SR_SM
1708 l.mtspr r0,r3,SPR_ESR_BASE
1709 l.rfe