1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * MIPS idle loop and WAIT instruction support.
4 *
5 * Copyright (C) xxxx the Anonymous
6 * Copyright (C) 1994 - 2006 Ralf Baechle
7 * Copyright (C) 2003, 2004 Maciej W. Rozycki
8 * Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc.
9 */
10 #include <linux/cpu.h>
11 #include <linux/export.h>
12 #include <linux/init.h>
13 #include <linux/irqflags.h>
14 #include <linux/printk.h>
15 #include <linux/sched.h>
16 #include <asm/cpu.h>
17 #include <asm/cpu-info.h>
18 #include <asm/cpu-type.h>
19 #include <asm/idle.h>
20 #include <asm/mipsregs.h>
21
22 /*
23 * Not all of the MIPS CPUs have the "wait" instruction available. Moreover,
24 * the implementation of the "wait" feature differs between CPU families. This
25 * points to the function that implements CPU specific wait.
26 * The wait instruction stops the pipeline and reduces the power consumption of
27 * the CPU very much.
28 */
29 void (*cpu_wait)(void);
30 EXPORT_SYMBOL(cpu_wait);
31
r3081_wait(void)32 static void __cpuidle r3081_wait(void)
33 {
34 unsigned long cfg = read_c0_conf();
35 write_c0_conf(cfg | R30XX_CONF_HALT);
36 raw_local_irq_enable();
37 }
38
r39xx_wait(void)39 static void __cpuidle r39xx_wait(void)
40 {
41 if (!need_resched())
42 write_c0_conf(read_c0_conf() | TX39_CONF_HALT);
43 raw_local_irq_enable();
44 }
45
r4k_wait(void)46 void __cpuidle r4k_wait(void)
47 {
48 raw_local_irq_enable();
49 __r4k_wait();
50 }
51
52 /*
53 * This variant is preferable as it allows testing need_resched and going to
54 * sleep depending on the outcome atomically. Unfortunately the "It is
55 * implementation-dependent whether the pipeline restarts when a non-enabled
56 * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
57 * using this version a gamble.
58 */
r4k_wait_irqoff(void)59 void __cpuidle r4k_wait_irqoff(void)
60 {
61 if (!need_resched())
62 __asm__(
63 " .set push \n"
64 " .set arch=r4000 \n"
65 " wait \n"
66 " .set pop \n");
67 raw_local_irq_enable();
68 }
69
70 /*
71 * The RM7000 variant has to handle erratum 38. The workaround is to not
72 * have any pending stores when the WAIT instruction is executed.
73 */
rm7k_wait_irqoff(void)74 static void __cpuidle rm7k_wait_irqoff(void)
75 {
76 if (!need_resched())
77 __asm__(
78 " .set push \n"
79 " .set arch=r4000 \n"
80 " .set noat \n"
81 " mfc0 $1, $12 \n"
82 " sync \n"
83 " mtc0 $1, $12 # stalls until W stage \n"
84 " wait \n"
85 " mtc0 $1, $12 # stalls until W stage \n"
86 " .set pop \n");
87 raw_local_irq_enable();
88 }
89
90 /*
91 * Au1 'wait' is only useful when the 32kHz counter is used as timer,
92 * since coreclock (and the cp0 counter) stops upon executing it. Only an
93 * interrupt can wake it, so they must be enabled before entering idle modes.
94 */
au1k_wait(void)95 static void __cpuidle au1k_wait(void)
96 {
97 unsigned long c0status = read_c0_status() | 1; /* irqs on */
98
99 __asm__(
100 " .set push \n"
101 " .set arch=r4000 \n"
102 " cache 0x14, 0(%0) \n"
103 " cache 0x14, 32(%0) \n"
104 " sync \n"
105 " mtc0 %1, $12 \n" /* wr c0status */
106 " wait \n"
107 " nop \n"
108 " nop \n"
109 " nop \n"
110 " nop \n"
111 " .set pop \n"
112 : : "r" (au1k_wait), "r" (c0status));
113 }
114
115 static int __initdata nowait;
116
wait_disable(char * s)117 static int __init wait_disable(char *s)
118 {
119 nowait = 1;
120
121 return 1;
122 }
123
124 __setup("nowait", wait_disable);
125
check_wait(void)126 void __init check_wait(void)
127 {
128 struct cpuinfo_mips *c = ¤t_cpu_data;
129
130 if (nowait) {
131 printk("Wait instruction disabled.\n");
132 return;
133 }
134
135 /*
136 * MIPSr6 specifies that masked interrupts should unblock an executing
137 * wait instruction, and thus that it is safe for us to use
138 * r4k_wait_irqoff. Yippee!
139 */
140 if (cpu_has_mips_r6) {
141 cpu_wait = r4k_wait_irqoff;
142 return;
143 }
144
145 switch (current_cpu_type()) {
146 case CPU_R3081:
147 case CPU_R3081E:
148 cpu_wait = r3081_wait;
149 break;
150 case CPU_TX3927:
151 cpu_wait = r39xx_wait;
152 break;
153 case CPU_R4200:
154 case CPU_R4600:
155 case CPU_R4640:
156 case CPU_R4650:
157 case CPU_R4700:
158 case CPU_R5000:
159 case CPU_R5500:
160 case CPU_NEVADA:
161 case CPU_4KC:
162 case CPU_4KEC:
163 case CPU_4KSC:
164 case CPU_5KC:
165 case CPU_5KE:
166 case CPU_25KF:
167 case CPU_PR4450:
168 case CPU_BMIPS3300:
169 case CPU_BMIPS4350:
170 case CPU_BMIPS4380:
171 case CPU_CAVIUM_OCTEON:
172 case CPU_CAVIUM_OCTEON_PLUS:
173 case CPU_CAVIUM_OCTEON2:
174 case CPU_CAVIUM_OCTEON3:
175 case CPU_XBURST:
176 case CPU_LOONGSON32:
177 case CPU_XLR:
178 case CPU_XLP:
179 cpu_wait = r4k_wait;
180 break;
181 case CPU_LOONGSON64:
182 if ((c->processor_id & (PRID_IMP_MASK | PRID_REV_MASK)) >=
183 (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0) ||
184 (c->processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64R)
185 cpu_wait = r4k_wait;
186 break;
187
188 case CPU_BMIPS5000:
189 cpu_wait = r4k_wait_irqoff;
190 break;
191 case CPU_RM7000:
192 cpu_wait = rm7k_wait_irqoff;
193 break;
194
195 case CPU_PROAPTIV:
196 case CPU_P5600:
197 /*
198 * Incoming Fast Debug Channel (FDC) data during a wait
199 * instruction causes the wait never to resume, even if an
200 * interrupt is received. Avoid using wait at all if FDC data is
201 * likely to be received.
202 */
203 if (IS_ENABLED(CONFIG_MIPS_EJTAG_FDC_TTY))
204 break;
205 fallthrough;
206 case CPU_M14KC:
207 case CPU_M14KEC:
208 case CPU_24K:
209 case CPU_34K:
210 case CPU_1004K:
211 case CPU_1074K:
212 case CPU_INTERAPTIV:
213 case CPU_M5150:
214 case CPU_QEMU_GENERIC:
215 cpu_wait = r4k_wait;
216 if (read_c0_config7() & MIPS_CONF7_WII)
217 cpu_wait = r4k_wait_irqoff;
218 break;
219
220 case CPU_74K:
221 cpu_wait = r4k_wait;
222 if ((c->processor_id & 0xff) >= PRID_REV_ENCODE_332(2, 1, 0))
223 cpu_wait = r4k_wait_irqoff;
224 break;
225
226 case CPU_TX49XX:
227 cpu_wait = r4k_wait_irqoff;
228 break;
229 case CPU_ALCHEMY:
230 cpu_wait = au1k_wait;
231 break;
232 case CPU_20KC:
233 /*
234 * WAIT on Rev1.0 has E1, E2, E3 and E16.
235 * WAIT on Rev2.0 and Rev3.0 has E16.
236 * Rev3.1 WAIT is nop, why bother
237 */
238 if ((c->processor_id & 0xff) <= 0x64)
239 break;
240
241 /*
242 * Another rev is incremeting c0_count at a reduced clock
243 * rate while in WAIT mode. So we basically have the choice
244 * between using the cp0 timer as clocksource or avoiding
245 * the WAIT instruction. Until more details are known,
246 * disable the use of WAIT for 20Kc entirely.
247 cpu_wait = r4k_wait;
248 */
249 break;
250 default:
251 break;
252 }
253 }
254
arch_cpu_idle(void)255 void arch_cpu_idle(void)
256 {
257 if (cpu_wait)
258 cpu_wait();
259 else
260 raw_local_irq_enable();
261 }
262
263 #ifdef CONFIG_CPU_IDLE
264
mips_cpuidle_wait_enter(struct cpuidle_device * dev,struct cpuidle_driver * drv,int index)265 int mips_cpuidle_wait_enter(struct cpuidle_device *dev,
266 struct cpuidle_driver *drv, int index)
267 {
268 arch_cpu_idle();
269 return index;
270 }
271
272 #endif
273