1 /*
2 * xtensa/core-macros.h -- C specific definitions
3 * that depend on CORE configuration
4 */
5
6 /*
7 * Copyright (c) 2012 Tensilica Inc.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining
10 * a copy of this software and associated documentation files (the
11 * "Software"), to deal in the Software without restriction, including
12 * without limitation the rights to use, copy, modify, merge, publish,
13 * distribute, sublicense, and/or sell copies of the Software, and to
14 * permit persons to whom the Software is furnished to do so, subject to
15 * the following conditions:
16 *
17 * The above copyright notice and this permission notice shall be included
18 * in all copies or substantial portions of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
23 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
24 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 */
28
29 #ifndef XTENSA_CACHE_H
30 #define XTENSA_CACHE_H
31
32 #include <xtensa/config/core.h>
33
34 /* Only define things for C code. */
35 #if !defined(_ASMLANGUAGE) && !defined(_NOCLANGUAGE) && !defined(__ASSEMBLER__)
36
37
38
39 /*************************** CACHE ***************************/
40
41 /* All the macros are in the lower case now and some of them
42 * share the name with the existing functions from hal.h.
43 * Including this header file will define XTHAL_USE_CACHE_MACROS
44 * which directs hal.h not to use the functions.
45 *
46
47 *
48 * Single-cache-line operations in C-callable inline assembly.
49 * Essentially macro versions (uppercase) of:
50 *
51 * xthal_icache_line_invalidate(void *addr);
52 * xthal_icache_line_lock(void *addr);
53 * xthal_icache_line_unlock(void *addr);
54 * xthal_icache_sync(void);
55 *
56 * NOTE: unlike the above functions, the following macros do NOT
57 * execute the xthal_icache_sync() as part of each line operation.
58 * This sync must be called explicitly by the caller. This is to
59 * allow better optimization when operating on more than one line.
60 *
61 * xthal_dcache_line_invalidate(void *addr);
62 * xthal_dcache_line_writeback(void *addr);
63 * xthal_dcache_line_writeback_inv(void *addr);
64 * xthal_dcache_line_lock(void *addr);
65 * xthal_dcache_line_unlock(void *addr);
66 * xthal_dcache_sync(void);
67 * xthal_dcache_line_prefetch_for_write(void *addr);
68 * xthal_dcache_line_prefetch_for_read(void *addr);
69 *
70 * All are made memory-barriers, given that's how they're typically used
71 * (ops operate on a whole line, so clobbers all memory not just *addr).
72 *
73 * NOTE: All the block block cache ops and line prefetches are implemented
74 * using intrinsics so they are better optimized regarding memory barriers etc.
75 *
76 * All block downgrade functions exist in two forms: with and without
77 * the 'max' parameter: This parameter allows compiler to optimize
78 * the functions whenever the parameter is smaller than the cache size.
79 *
80 * xthal_dcache_block_invalidate(void *addr, unsigned size);
81 * xthal_dcache_block_writeback(void *addr, unsigned size);
82 * xthal_dcache_block_writeback_inv(void *addr, unsigned size);
83 * xthal_dcache_block_invalidate_max(void *addr, unsigned size, unsigned max);
84 * xthal_dcache_block_writeback_max(void *addr, unsigned size, unsigned max);
85 * xthal_dcache_block_writeback_inv_max(void *addr, unsigned size, unsigned max);
86 *
87 * xthal_dcache_block_prefetch_for_read(void *addr, unsigned size);
88 * xthal_dcache_block_prefetch_for_write(void *addr, unsigned size);
89 * xthal_dcache_block_prefetch_modify(void *addr, unsigned size);
90 * xthal_dcache_block_prefetch_read_write(void *addr, unsigned size);
91 * xthal_dcache_block_prefetch_for_read_grp(void *addr, unsigned size);
92 * xthal_dcache_block_prefetch_for_write_grp(void *addr, unsigned size);
93 * xthal_dcache_block_prefetch_modify_grp(void *addr, unsigned size);
94 * xthal_dcache_block_prefetch_read_write_grp(void *addr, unsigned size)
95 *
96 * xthal_dcache_block_wait();
97 * xthal_dcache_block_required_wait();
98 * xthal_dcache_block_abort();
99 * xthal_dcache_block_prefetch_end();
100 * xthal_dcache_block_newgrp();
101 */
102
103 /*** INSTRUCTION CACHE ***/
104
105 #define XTHAL_USE_CACHE_MACROS
106
107 #if XCHAL_ICACHE_SIZE > 0
108 # define xthal_icache_line_invalidate(addr) do { void *__a = (void*)(addr); \
109 __asm__ __volatile__("ihi %0, 0" :: "a"(__a) : "memory"); \
110 } while(0)
111 #else
112 # define xthal_icache_line_invalidate(addr) do {/*nothing*/} while(0)
113 #endif
114
115 #if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE
116 # define xthal_icache_line_lock(addr) do { void *__a = (void*)(addr); \
117 __asm__ __volatile__("ipfl %0, 0" :: "a"(__a) : "memory"); \
118 } while(0)
119 # define xthal_icache_line_unlock(addr) do { void *__a = (void*)(addr); \
120 __asm__ __volatile__("ihu %0, 0" :: "a"(__a) : "memory"); \
121 } while(0)
122 #else
123 # define xthal_icache_line_lock(addr) do {/*nothing*/} while(0)
124 # define xthal_icache_line_unlock(addr) do {/*nothing*/} while(0)
125 #endif
126
127 /*
128 * Even if a config doesn't have caches, an isync is still needed
129 * when instructions in any memory are modified, whether by a loader
130 * or self-modifying code. Therefore, this macro always produces
131 * an isync, whether or not an icache is present.
132 */
133 #define xthal_icache_sync() \
134 __asm__ __volatile__("isync":::"memory")
135
136
137 /*** DATA CACHE ***/
138
139 #if XCHAL_DCACHE_SIZE > 0
140
141 # include <xtensa/tie/xt_datacache.h>
142
143 # define xthal_dcache_line_invalidate(addr) do { void *__a = (void*)(addr); \
144 __asm__ __volatile__("dhi %0, 0" :: "a"(__a) : "memory"); \
145 } while(0)
146 # define xthal_dcache_line_writeback(addr) do { void *__a = (void*)(addr); \
147 __asm__ __volatile__("dhwb %0, 0" :: "a"(__a) : "memory"); \
148 } while(0)
149 # define xthal_dcache_line_writeback_inv(addr) do { void *__a = (void*)(addr); \
150 __asm__ __volatile__("dhwbi %0, 0" :: "a"(__a) : "memory"); \
151 } while(0)
152 # define xthal_dcache_sync() \
153 __asm__ __volatile__("" /*"dsync"?*/:::"memory")
154 # define xthal_dcache_line_prefetch_for_read(addr) do { \
155 XT_DPFR((const int*)addr, 0); \
156 } while(0)
157 #else
158 # define xthal_dcache_line_invalidate(addr) do {/*nothing*/} while(0)
159 # define xthal_dcache_line_writeback(addr) do {/*nothing*/} while(0)
160 # define xthal_dcache_line_writeback_inv(addr) do {/*nothing*/} while(0)
161 # define xthal_dcache_sync() __asm__ __volatile__("":::"memory")
162 # define xthal_dcache_line_prefetch_for_read(addr) do {/*nothing*/} while(0)
163 #endif
164
165 #if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE
166 # define xthal_dcache_line_lock(addr) do { void *__a = (void*)(addr); \
167 __asm__ __volatile__("dpfl %0, 0" :: "a"(__a) : "memory"); \
168 } while(0)
169 # define xthal_dcache_line_unlock(addr) do { void *__a = (void*)(addr); \
170 __asm__ __volatile__("dhu %0, 0" :: "a"(__a) : "memory"); \
171 } while(0)
172 #else
173 # define xthal_dcache_line_lock(addr) do {/*nothing*/} while(0)
174 # define xthal_dcache_line_unlock(addr) do {/*nothing*/} while(0)
175 #endif
176
177 #if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_IS_WRITEBACK
178
179 # define xthal_dcache_line_prefetch_for_write(addr) do { \
180 XT_DPFW((const int*)addr, 0); \
181 } while(0)
182 #else
183 # define xthal_dcache_line_prefetch_for_write(addr) do {/*nothing*/} while(0)
184 #endif
185
186
187 /***** Block Operations *****/
188
189 #if XCHAL_DCACHE_SIZE > 0 && XCHAL_HAVE_CACHE_BLOCKOPS
190
191 /* upgrades */
192
193 # define _XTHAL_DCACHE_BLOCK_UPGRADE(addr, size, type) \
194 { \
195 type((const int*)addr, size); \
196 }
197
198 /*downgrades */
199
200 # define _XTHAL_DCACHE_BLOCK_DOWNGRADE(addr, size, type) \
201 unsigned _s = size; \
202 unsigned _a = (unsigned) addr; \
203 do { \
204 unsigned __s = (_s > XCHAL_DCACHE_SIZE) ? \
205 XCHAL_DCACHE_SIZE : _s; \
206 type((const int*)_a, __s); \
207 _s -= __s; \
208 _a += __s; \
209 } while(_s > 0);
210
211 # define _XTHAL_DCACHE_BLOCK_DOWNGRADE_MAX(addr, size, type, max) \
212 if (max <= XCHAL_DCACHE_SIZE) { \
213 unsigned _s = size; \
214 unsigned _a = (unsigned) addr; \
215 type((const int*)_a, _s); \
216 } \
217 else { \
218 _XTHAL_DCACHE_BLOCK_DOWNGRADE(addr, size, type); \
219 }
220
221 # define xthal_dcache_block_invalidate(addr, size) do { \
222 _XTHAL_DCACHE_BLOCK_DOWNGRADE(addr, size, XT_DHI_B); \
223 } while(0)
224 # define xthal_dcache_block_writeback(addr, size) do { \
225 _XTHAL_DCACHE_BLOCK_DOWNGRADE(addr, size, XT_DHWB_B); \
226 } while(0)
227 # define xthal_dcache_block_writeback_inv(addr, size) do { \
228 _XTHAL_DCACHE_BLOCK_DOWNGRADE(addr, size, XT_DHWBI_B); \
229 } while(0)
230
231 # define xthal_dcache_block_invalidate_max(addr, size, max) do { \
232 _XTHAL_DCACHE_BLOCK_DOWNGRADE_MAX(addr, size, XT_DHI_B, max); \
233 } while(0)
234 # define xthal_dcache_block_writeback_max(addr, size, max) do { \
235 _XTHAL_DCACHE_BLOCK_DOWNGRADE_MAX(addr, size, XT_DHWB_B, max); \
236 } while(0)
237 # define xthal_dcache_block_writeback_inv_max(addr, size, max) do { \
238 _XTHAL_DCACHE_BLOCK_DOWNGRADE_MAX(addr, size, XT_DHWBI_B, max); \
239 } while(0)
240
241 /* upgrades that are performed even with write-thru caches */
242
243 # define xthal_dcache_block_prefetch_read_write(addr, size) do { \
244 _XTHAL_DCACHE_BLOCK_UPGRADE(addr, size, XT_DPFW_B); \
245 } while(0)
246 # define xthal_dcache_block_prefetch_read_write_grp(addr, size) do { \
247 _XTHAL_DCACHE_BLOCK_UPGRADE(addr, size, XT_DPFW_BF); \
248 } while(0)
249 # define xthal_dcache_block_prefetch_for_read(addr, size) do { \
250 _XTHAL_DCACHE_BLOCK_UPGRADE(addr, size, XT_DPFR_B); \
251 } while(0)
252 # define xthal_dcache_block_prefetch_for_read_grp(addr, size) do { \
253 _XTHAL_DCACHE_BLOCK_UPGRADE(addr, size, XT_DPFR_BF); \
254 } while(0)
255
256 /* abort all or end optional block cache operations */
257 # define xthal_dcache_block_abort() do { \
258 XT_PFEND_A(); \
259 } while(0)
260 # define xthal_dcache_block_end() do { \
261 XT_PFEND_O(); \
262 } while(0)
263
264 /* wait for all/required block cache operations to finish */
265 # define xthal_dcache_block_wait() do { \
266 XT_PFWAIT_A(); \
267 } while(0)
268 # define xthal_dcache_block_required_wait() do { \
269 XT_PFWAIT_R(); \
270 } while(0)
271 /* Start a new group */
272 # define xthal_dcache_block_newgrp() do { \
273 XT_PFNXT_F(); \
274 } while(0)
275 #else
276 # define xthal_dcache_block_invalidate(addr, size) do {/*nothing*/} while(0)
277 # define xthal_dcache_block_writeback(addr, size) do {/*nothing*/} while(0)
278 # define xthal_dcache_block_writeback_inv(addr, size) do {/*nothing*/} while(0)
279 # define xthal_dcache_block_invalidate_max(addr, size, max) do {/*nothing*/} while(0)
280 # define xthal_dcache_block_writeback_max(addr, size, max) do {/*nothing*/} while(0)
281 # define xthal_dcache_block_writeback_inv_max(addr, size, max) do {/*nothing*/} while(0)
282 # define xthal_dcache_block_prefetch_read_write(addr, size) do {/*nothing*/} while(0)
283 # define xthal_dcache_block_prefetch_read_write_grp(addr, size) do {/*nothing*/} while(0)
284 # define xthal_dcache_block_prefetch_for_read(addr, size) do {/*nothing*/} while(0)
285 # define xthal_dcache_block_prefetch_for_read_grp(addr, size) do {/*nothing*/} while(0)
286 # define xthal_dcache_block_end() do {/*nothing*/} while(0)
287 # define xthal_dcache_block_abort() do {/*nothing*/} while(0)
288 # define xthal_dcache_block_wait() do {/*nothing*/} while(0)
289 # define xthal_dcache_block_required_wait() do {/*nothing*/} while(0)
290 # define xthal_dcache_block_newgrp() do {/*nothing*/} while(0)
291 #endif
292
293 #if XCHAL_DCACHE_SIZE > 0 && XCHAL_HAVE_CACHE_BLOCKOPS && XCHAL_DCACHE_IS_WRITEBACK
294
295 # define xthal_dcache_block_prefetch_for_write(addr, size) do { \
296 _XTHAL_DCACHE_BLOCK_UPGRADE(addr, size, XT_DPFW_B); \
297 } while(0)
298 # define xthal_dcache_block_prefetch_modify(addr, size) do { \
299 _XTHAL_DCACHE_BLOCK_UPGRADE(addr, size, XT_DPFM_B); \
300 } while(0)
301 # define xthal_dcache_block_prefetch_for_write_grp(addr, size) do { \
302 _XTHAL_DCACHE_BLOCK_UPGRADE(addr, size, XT_DPFW_BF); \
303 } while(0)
304 # define xthal_dcache_block_prefetch_modify_grp(addr, size) do { \
305 _XTHAL_DCACHE_BLOCK_UPGRADE(addr, size, XT_DPFM_BF); \
306 } while(0)
307 #else
308 # define xthal_dcache_block_prefetch_for_write(addr, size) do {/*nothing*/} while(0)
309 # define xthal_dcache_block_prefetch_modify(addr, size) do {/*nothing*/} while(0)
310 # define xthal_dcache_block_prefetch_for_write_grp(addr, size) do {/*nothing*/} while(0)
311 # define xthal_dcache_block_prefetch_modify_grp(addr, size) do {/*nothing*/} while(0)
312 #endif
313
314 /*************************** INTERRUPTS ***************************/
315
316 /*
317 * Macro versions of:
318 * unsigned xthal_get_intenable( void );
319 * void xthal_set_intenable( unsigned );
320 * unsigned xthal_get_interrupt( void );
321 * void xthal_set_intset( unsigned );
322 * void xthal_set_intclear( unsigned );
323 * unsigned xthal_get_ccount(void);
324 * void xthal_set_ccompare(int, unsigned);
325 * unsigned xthal_get_ccompare(int);
326 *
327 * NOTE: for {set,get}_ccompare, the first argument MUST be a decimal constant.
328 */
329
330 #if XCHAL_HAVE_INTERRUPTS
331 # define XTHAL_GET_INTENABLE() ({ int __intenable; \
332 __asm__("rsr.intenable %0" : "=a"(__intenable)); \
333 __intenable; })
334 # define XTHAL_SET_INTENABLE(v) do { int __intenable = (int)(v); \
335 __asm__ __volatile__("wsr.intenable %0" :: "a"(__intenable):"memory"); \
336 } while(0)
337 # define XTHAL_GET_INTERRUPT() ({ int __interrupt; \
338 __asm__ __volatile__("rsr.interrupt %0" : "=a"(__interrupt)); \
339 __interrupt; })
340 #ifdef __clang__
341 // TODO: LLVM-195. Currently clang does not support INTSET alias for INTERRUPT special reg
342 # define XTHAL_SET_INTSET(v) do { int __interrupt = (int)(v); \
343 __asm__ __volatile__("wsr.interrupt %0" :: "a"(__interrupt):"memory"); \
344 } while(0)
345 #else
346 # define XTHAL_SET_INTSET(v) do { int __interrupt = (int)(v); \
347 __asm__ __volatile__("wsr.intset %0" :: "a"(__interrupt):"memory"); \
348 } while(0)
349 #endif
350 # define XTHAL_SET_INTCLEAR(v) do { int __interrupt = (int)(v); \
351 __asm__ __volatile__("wsr.intclear %0" :: "a"(__interrupt):"memory"); \
352 } while(0)
353 # define XTHAL_GET_CCOUNT() ({ int __ccount; \
354 __asm__ __volatile__("rsr.ccount %0" : "=a"(__ccount)); \
355 __ccount; })
356 # define XTHAL_SET_CCOUNT(v) do { int __ccount = (int)(v); \
357 __asm__ __volatile__("wsr.ccount %0" :: "a"(__ccount):"memory"); \
358 } while(0)
359 # define _XTHAL_GET_CCOMPARE(n) ({ int __ccompare; \
360 __asm__("rsr.ccompare" #n " %0" : "=a"(__ccompare)); \
361 __ccompare; })
362 # define XTHAL_GET_CCOMPARE(n) _XTHAL_GET_CCOMPARE(n)
363 # define _XTHAL_SET_CCOMPARE(n,v) do { int __ccompare = (int)(v); \
364 __asm__ __volatile__("wsr.ccompare" #n " %0 ; esync" :: "a"(__ccompare):"memory"); \
365 } while(0)
366 # define XTHAL_SET_CCOMPARE(n,v) _XTHAL_SET_CCOMPARE(n,v)
367 #else
368 # define XTHAL_GET_INTENABLE() 0
369 # define XTHAL_SET_INTENABLE(v) do {/*nothing*/} while(0)
370 # define XTHAL_GET_INTERRUPT() 0
371 # define XTHAL_SET_INTSET(v) do {/*nothing*/} while(0)
372 # define XTHAL_SET_INTCLEAR(v) do {/*nothing*/} while(0)
373 # define XTHAL_GET_CCOUNT() 0
374 # define XTHAL_SET_CCOUNT(v) do {/*nothing*/} while(0)
375 # define XTHAL_GET_CCOMPARE(n) 0
376 # define XTHAL_SET_CCOMPARE(n,v) do {/*nothing*/} while(0)
377 #endif
378
379 /* New functions added to accomodate XEA3 and allow deprecation of older
380 functions. For this release they just map to the older ones. */
381
382 /* Enables the specified interrupt. */
xthal_interrupt_enable(unsigned intnum)383 static inline void xthal_interrupt_enable(unsigned intnum)
384 {
385 xthal_int_enable(1 << intnum);
386 }
387
388 /* Disables the specified interrupt. */
xthal_interrupt_disable(unsigned intnum)389 static inline void xthal_interrupt_disable(unsigned intnum)
390 {
391 xthal_int_disable(1 << intnum);
392 }
393
394 /* Triggers the specified interrupt. */
xthal_interrupt_trigger(unsigned intnum)395 static inline void xthal_interrupt_trigger(unsigned intnum)
396 {
397 xthal_set_intset(1 << intnum);
398 }
399
400 /* Clears the specified interrupt. */
xthal_interrupt_clear(unsigned intnum)401 static inline void xthal_interrupt_clear(unsigned intnum)
402 {
403 xthal_set_intclear(1 << intnum);
404 }
405
406
407 /*************************** MISC ***************************/
408
409 /*
410 * Macro or inline versions of:
411 * void xthal_clear_regcached_code( void );
412 * unsigned xthal_get_prid( void );
413 * unsigned xthal_compare_and_set( int *addr, int testval, int setval );
414 */
415
416 #if XCHAL_HAVE_LOOPS
417 # define XTHAL_CLEAR_REGCACHED_CODE() \
418 __asm__ __volatile__("wsr.lcount %0" :: "a"(0) : "memory")
419 #else
420 # define XTHAL_CLEAR_REGCACHED_CODE() do {/*nothing*/} while(0)
421 #endif
422
423 #if XCHAL_HAVE_PRID
424 # define XTHAL_GET_PRID() ({ int __prid; \
425 __asm__("rsr.prid %0" : "=a"(__prid)); \
426 __prid; })
427 #else
428 # define XTHAL_GET_PRID() 0
429 #endif
430
431
XTHAL_COMPARE_AND_SET(int * addr,int testval,int setval)432 static inline unsigned XTHAL_COMPARE_AND_SET( int *addr, int testval, int setval )
433 {
434 int result;
435
436 #if XCHAL_HAVE_S32C1I && XCHAL_HW_MIN_VERSION_MAJOR >= 2200
437 __asm__ __volatile__ (
438 " wsr.scompare1 %2 \n"
439 " s32c1i %0, %3, 0 \n"
440 : "=a"(result) : "0" (setval), "a" (testval), "a" (addr)
441 : "memory");
442 #elif XCHAL_HAVE_INTERRUPTS
443 int tmp = 0; // clang complains on unitialized var
444 __asm__ __volatile__ (
445 " rsil %4, 15 \n" // %4 == saved ps
446 " l32i %0, %3, 0 \n" // %0 == value to test, return val
447 " bne %2, %0, 9f \n" // test
448 " s32i %1, %3, 0 \n" // write the new value
449 "9: wsr.ps %4 ; rsync \n" // restore the PS
450 : "=a"(result)
451 : "0" (setval), "a" (testval), "a" (addr), "a" (tmp)
452 : "memory");
453 #else
454 __asm__ __volatile__ (
455 " l32i %0, %3, 0 \n" // %0 == value to test, return val
456 " bne %2, %0, 9f \n" // test
457 " s32i %1, %3, 0 \n" // write the new value
458 "9: \n"
459 : "=a"(result) : "0" (setval), "a" (testval), "a" (addr)
460 : "memory");
461 #endif
462 return result;
463 }
464
465 #if XCHAL_HAVE_EXTERN_REGS
466
XTHAL_RER(unsigned int reg)467 static inline unsigned XTHAL_RER (unsigned int reg)
468 {
469 unsigned result;
470
471 __asm__ __volatile__ (
472 " rer %0, %1"
473 : "=a" (result) : "a" (reg) : "memory");
474
475 return result;
476 }
477
XTHAL_WER(unsigned reg,unsigned value)478 static inline void XTHAL_WER (unsigned reg, unsigned value)
479 {
480 __asm__ __volatile__ (
481 " wer %0, %1"
482 : : "a" (value), "a" (reg) : "memory");
483 }
484
485 #endif /* XCHAL_HAVE_EXTERN_REGS */
486
487 /*
488 * Sets a single entry at 'index' within the MPU
489 *
490 * The caller must ensure that the resulting MPU map is ordered.
491 */
xthal_mpu_set_entry(xthal_MPU_entry entry)492 static inline void xthal_mpu_set_entry (xthal_MPU_entry entry)
493 {
494 #if XCHAL_HAVE_MPU
495 __asm__ __volatile__("j 1f\n\t.align 8\n\t1: memw\n\twptlb %0, %1\n\t" : : "a" (entry.at), "a"(entry.as));
496 #endif
497 }
498
499 /* Same as xthal_mpu_set_entry except that this function must not be used to change the MPU entry
500 * for the currently executing instruction ... use xthal_mpu_set_entry instead. */
xthal_mpu_set_entry_(xthal_MPU_entry entry)501 static inline void xthal_mpu_set_entry_ (xthal_MPU_entry entry)
502 {
503 #if XCHAL_HAVE_MPU
504 __asm__ __volatile__("wptlb %0, %1\n\t" : : "a" (entry.at), "a"(entry.as));
505 #endif
506 }
507
508
509
510 #endif /* C code */
511
512 #endif /*XTENSA_CACHE_H*/
513