1 /*
2  * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
3  * reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the NetLogic
9  * license below:
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  *
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in
19  *    the documentation and/or other materials provided with the
20  *    distribution.
21  *
22  * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
31  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
32  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #ifndef _ASM_NLM_MIPS_EXTS_H
36 #define _ASM_NLM_MIPS_EXTS_H
37 
38 /*
39  * XLR and XLP interrupt request and interrupt mask registers
40  */
41 /*
42  * NOTE: Do not save/restore flags around write_c0_eimr().
43  * On non-R2 platforms the flags has part of EIMR that is shadowed in STATUS
44  * register. Restoring flags will overwrite the lower 8 bits of EIMR.
45  *
46  * Call with interrupts disabled.
47  */
48 #define write_c0_eimr(val)						\
49 do {									\
50 	if (sizeof(unsigned long) == 4) {				\
51 		__asm__ __volatile__(					\
52 			".set\tmips64\n\t"				\
53 			"dsll\t%L0, %L0, 32\n\t"			\
54 			"dsrl\t%L0, %L0, 32\n\t"			\
55 			"dsll\t%M0, %M0, 32\n\t"			\
56 			"or\t%L0, %L0, %M0\n\t"				\
57 			"dmtc0\t%L0, $9, 7\n\t"				\
58 			".set\tmips0"					\
59 			: : "r" (val));					\
60 	} else								\
61 		__write_64bit_c0_register($9, 7, (val));		\
62 } while (0)
63 
64 /*
65  * Handling the 64 bit EIMR and EIRR registers in 32-bit mode with
66  * standard functions will be very inefficient. This provides
67  * optimized functions for the normal operations on the registers.
68  *
69  * Call with interrupts disabled.
70  */
ack_c0_eirr(int irq)71 static inline void ack_c0_eirr(int irq)
72 {
73 	__asm__ __volatile__(
74 		".set	push\n\t"
75 		".set	mips64\n\t"
76 		".set	noat\n\t"
77 		"li	$1, 1\n\t"
78 		"dsllv	$1, $1, %0\n\t"
79 		"dmtc0	$1, $9, 6\n\t"
80 		".set	pop"
81 		: : "r" (irq));
82 }
83 
set_c0_eimr(int irq)84 static inline void set_c0_eimr(int irq)
85 {
86 	__asm__ __volatile__(
87 		".set	push\n\t"
88 		".set	mips64\n\t"
89 		".set	noat\n\t"
90 		"li	$1, 1\n\t"
91 		"dsllv	%0, $1, %0\n\t"
92 		"dmfc0	$1, $9, 7\n\t"
93 		"or	$1, %0\n\t"
94 		"dmtc0	$1, $9, 7\n\t"
95 		".set	pop"
96 		: "+r" (irq));
97 }
98 
clear_c0_eimr(int irq)99 static inline void clear_c0_eimr(int irq)
100 {
101 	__asm__ __volatile__(
102 		".set	push\n\t"
103 		".set	mips64\n\t"
104 		".set	noat\n\t"
105 		"li	$1, 1\n\t"
106 		"dsllv	%0, $1, %0\n\t"
107 		"dmfc0	$1, $9, 7\n\t"
108 		"or	$1, %0\n\t"
109 		"xor	$1, %0\n\t"
110 		"dmtc0	$1, $9, 7\n\t"
111 		".set	pop"
112 		: "+r" (irq));
113 }
114 
115 /*
116  * Read c0 eimr and c0 eirr, do AND of the two values, the result is
117  * the interrupts which are raised and are not masked.
118  */
read_c0_eirr_and_eimr(void)119 static inline uint64_t read_c0_eirr_and_eimr(void)
120 {
121 	uint64_t val;
122 
123 #ifdef CONFIG_64BIT
124 	val = __read_64bit_c0_register($9, 6) & __read_64bit_c0_register($9, 7);
125 #else
126 	__asm__ __volatile__(
127 		".set	push\n\t"
128 		".set	mips64\n\t"
129 		".set	noat\n\t"
130 		"dmfc0	%M0, $9, 6\n\t"
131 		"dmfc0	%L0, $9, 7\n\t"
132 		"and	%M0, %L0\n\t"
133 		"dsll	%L0, %M0, 32\n\t"
134 		"dsra	%M0, %M0, 32\n\t"
135 		"dsra	%L0, %L0, 32\n\t"
136 		".set	pop"
137 		: "=r" (val));
138 #endif
139 	return val;
140 }
141 
hard_smp_processor_id(void)142 static inline int hard_smp_processor_id(void)
143 {
144 	return __read_32bit_c0_register($15, 1) & 0x3ff;
145 }
146 
nlm_nodeid(void)147 static inline int nlm_nodeid(void)
148 {
149 	uint32_t prid = read_c0_prid() & PRID_IMP_MASK;
150 
151 	if ((prid == PRID_IMP_NETLOGIC_XLP9XX) ||
152 			(prid == PRID_IMP_NETLOGIC_XLP5XX))
153 		return (__read_32bit_c0_register($15, 1) >> 7) & 0x7;
154 	else
155 		return (__read_32bit_c0_register($15, 1) >> 5) & 0x3;
156 }
157 
nlm_core_id(void)158 static inline unsigned int nlm_core_id(void)
159 {
160 	uint32_t prid = read_c0_prid() & PRID_IMP_MASK;
161 
162 	if ((prid == PRID_IMP_NETLOGIC_XLP9XX) ||
163 			(prid == PRID_IMP_NETLOGIC_XLP5XX))
164 		return (read_c0_ebase() & 0x7c) >> 2;
165 	else
166 		return (read_c0_ebase() & 0x1c) >> 2;
167 }
168 
nlm_thread_id(void)169 static inline unsigned int nlm_thread_id(void)
170 {
171 	return read_c0_ebase() & 0x3;
172 }
173 
174 #define __read_64bit_c2_split(source, sel)				\
175 ({									\
176 	unsigned long long __val;					\
177 	unsigned long __flags;						\
178 									\
179 	local_irq_save(__flags);					\
180 	if (sel == 0)							\
181 		__asm__ __volatile__(					\
182 			".set\tmips64\n\t"				\
183 			"dmfc2\t%M0, " #source "\n\t"			\
184 			"dsll\t%L0, %M0, 32\n\t"			\
185 			"dsra\t%M0, %M0, 32\n\t"			\
186 			"dsra\t%L0, %L0, 32\n\t"			\
187 			".set\tmips0\n\t"				\
188 			: "=r" (__val));				\
189 	else								\
190 		__asm__ __volatile__(					\
191 			".set\tmips64\n\t"				\
192 			"dmfc2\t%M0, " #source ", " #sel "\n\t"		\
193 			"dsll\t%L0, %M0, 32\n\t"			\
194 			"dsra\t%M0, %M0, 32\n\t"			\
195 			"dsra\t%L0, %L0, 32\n\t"			\
196 			".set\tmips0\n\t"				\
197 			: "=r" (__val));				\
198 	local_irq_restore(__flags);					\
199 									\
200 	__val;								\
201 })
202 
203 #define __write_64bit_c2_split(source, sel, val)			\
204 do {									\
205 	unsigned long __flags;						\
206 									\
207 	local_irq_save(__flags);					\
208 	if (sel == 0)							\
209 		__asm__ __volatile__(					\
210 			".set\tmips64\n\t"				\
211 			"dsll\t%L0, %L0, 32\n\t"			\
212 			"dsrl\t%L0, %L0, 32\n\t"			\
213 			"dsll\t%M0, %M0, 32\n\t"			\
214 			"or\t%L0, %L0, %M0\n\t"				\
215 			"dmtc2\t%L0, " #source "\n\t"			\
216 			".set\tmips0\n\t"				\
217 			: : "r" (val));					\
218 	else								\
219 		__asm__ __volatile__(					\
220 			".set\tmips64\n\t"				\
221 			"dsll\t%L0, %L0, 32\n\t"			\
222 			"dsrl\t%L0, %L0, 32\n\t"			\
223 			"dsll\t%M0, %M0, 32\n\t"			\
224 			"or\t%L0, %L0, %M0\n\t"				\
225 			"dmtc2\t%L0, " #source ", " #sel "\n\t"		\
226 			".set\tmips0\n\t"				\
227 			: : "r" (val));					\
228 	local_irq_restore(__flags);					\
229 } while (0)
230 
231 #define __read_32bit_c2_register(source, sel)				\
232 ({ uint32_t __res;							\
233 	if (sel == 0)							\
234 		__asm__ __volatile__(					\
235 			".set\tmips32\n\t"				\
236 			"mfc2\t%0, " #source "\n\t"			\
237 			".set\tmips0\n\t"				\
238 			: "=r" (__res));				\
239 	else								\
240 		__asm__ __volatile__(					\
241 			".set\tmips32\n\t"				\
242 			"mfc2\t%0, " #source ", " #sel "\n\t"		\
243 			".set\tmips0\n\t"				\
244 			: "=r" (__res));				\
245 	__res;								\
246 })
247 
248 #define __read_64bit_c2_register(source, sel)				\
249 ({ unsigned long long __res;						\
250 	if (sizeof(unsigned long) == 4)					\
251 		__res = __read_64bit_c2_split(source, sel);		\
252 	else if (sel == 0)						\
253 		__asm__ __volatile__(					\
254 			".set\tmips64\n\t"				\
255 			"dmfc2\t%0, " #source "\n\t"			\
256 			".set\tmips0\n\t"				\
257 			: "=r" (__res));				\
258 	else								\
259 		__asm__ __volatile__(					\
260 			".set\tmips64\n\t"				\
261 			"dmfc2\t%0, " #source ", " #sel "\n\t"		\
262 			".set\tmips0\n\t"				\
263 			: "=r" (__res));				\
264 	__res;								\
265 })
266 
267 #define __write_64bit_c2_register(register, sel, value)			\
268 do {									\
269 	if (sizeof(unsigned long) == 4)					\
270 		__write_64bit_c2_split(register, sel, value);		\
271 	else if (sel == 0)						\
272 		__asm__ __volatile__(					\
273 			".set\tmips64\n\t"				\
274 			"dmtc2\t%z0, " #register "\n\t"			\
275 			".set\tmips0\n\t"				\
276 			: : "Jr" (value));				\
277 	else								\
278 		__asm__ __volatile__(					\
279 			".set\tmips64\n\t"				\
280 			"dmtc2\t%z0, " #register ", " #sel "\n\t"	\
281 			".set\tmips0\n\t"				\
282 			: : "Jr" (value));				\
283 } while (0)
284 
285 #define __write_32bit_c2_register(reg, sel, value)			\
286 ({									\
287 	if (sel == 0)							\
288 		__asm__ __volatile__(					\
289 			".set\tmips32\n\t"				\
290 			"mtc2\t%z0, " #reg "\n\t"			\
291 			".set\tmips0\n\t"				\
292 			: : "Jr" (value));				\
293 	else								\
294 		__asm__ __volatile__(					\
295 			".set\tmips32\n\t"				\
296 			"mtc2\t%z0, " #reg ", " #sel "\n\t"		\
297 			".set\tmips0\n\t"				\
298 			: : "Jr" (value));				\
299 })
300 
301 #endif /*_ASM_NLM_MIPS_EXTS_H */
302