1 /*
2  * xtos-internal.h  --  internal definitions for single-threaded run-time
3  *
4  * Copyright (c) 2003-2010 Tensilica Inc.
5  * Copyright (c) 2019 Intel Corporation. All rights reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining
8  * a copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sublicense, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice shall be included
16  * in all copies or substantial portions of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
22  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25  */
26 
27 #ifndef XTOS_INTERNAL_H
28 #define XTOS_INTERNAL_H
29 
30 
31 #if CONFIG_MULTICORE
32 #include <sof/lib/cpu.h>
33 #endif
34 #include <sof/lib/memory.h>
35 #include <xtensa/config/core.h>
36 #include <xtensa/xtruntime.h>
37 #include <xtensa/xtruntime-frames.h>
38 #include <xtensa/xtensa-versions.h>
39 #ifndef XTOS_PARAMS_H	/* this to allow indirect inclusion of this header from the outside */
40 #include "xtos-params.h"
41 #endif
42 
43 /*  Relative ordering of subpriorities within an interrupt level (or vector):  */
44 #define XTOS_SPO_ZERO_LO	0	/* lower (eg. zero) numbered interrupts are lower  priority than higher numbered interrupts */
45 #define XTOS_SPO_ZERO_HI	1	/* lower (eg. zero) numbered interrupts are higher priority than higher numbered interrupts */
46 
47 
48 /*  Sanity check some parameters from xtos-params.h:  */
49 #if XTOS_LOCKLEVEL < XCHAL_EXCM_LEVEL || XTOS_LOCKLEVEL > 15
50 # error Invalid XTOS_LOCKLEVEL value, must be >= EXCM_LEVEL and <= 15, please fix xtos-params.h
51 #endif
52 
53 /*  Mask of interrupts locked out at XTOS_LOCKLEVEL:  */
54 #define XTOS_LOCKOUT_MASK	XCHAL_INTLEVEL_ANDBELOW_MASK(XTOS_LOCKLEVEL)
55 /*  Mask of interrupts that can still be enabled at XTOS_LOCKLEVEL:  */
56 #define XTOS_UNLOCKABLE_MASK	(0xFFFFFFFF-XTOS_LOCKOUT_MASK)
57 
58 /*  Don't set this:  */
59 #define XTOS_HIGHINT_TRAMP	0	/* mapping high-pri ints to low-pri not auto-supported */
60 #define XTOS_VIRTUAL_INTERRUPT	XTOS_HIGHINT_TRAMP	/* partially-virtualized INTERRUPT register not currently supported */
61 #if XTOS_HIGHINT_TRAMP
62 # error Automatically-generated high-level interrupt trampolines are not presently supported.
63 #endif
64 
65 /*
66  *  If single interrupt at level-one, sub-prioritization is irrelevant:
67  */
68 #if defined(XCHAL_INTLEVEL1_NUM)
69 # undef XTOS_SUBPRI
70 # define XTOS_SUBPRI 0			/* override - only one interrupt */
71 #endif
72 
73 /*
74  *  In XEA1, the INTENABLE special register must be virtualized to provide
75  *  standard XTOS functionality.
76  *  In XEA2, this is only needed for software interrupt prioritization.
77  */
78 #if XTOS_SUBPRI || XCHAL_HAVE_XEA1
79 #define XTOS_VIRTUAL_INTENABLE	1
80 #else
81 #define XTOS_VIRTUAL_INTENABLE	0
82 #endif
83 
84 /*
85  *  If single interrupt per priority, then fairness is irrelevant:
86  */
87 #if (XTOS_SUBPRI && !XTOS_SUBPRI_GROUPS) || defined(XCHAL_INTLEVEL1_NUM)
88 # undef XTOS_INT_FAIRNESS
89 # define XTOS_INT_FAIRNESS	0
90 #endif
91 
92 /*  Identify special case interrupt handling code in int-lowpri-dispatcher.S:  */
93 #define XTOS_INT_SPECIALCASE	(XTOS_SUBPRI_ORDER == XTOS_SPO_ZERO_HI && XTOS_INT_FAIRNESS == 0 && XTOS_SUBPRI_GROUPS == 0)
94 
95 /*
96  *  Determine whether to extend the interrupt entry array:
97  */
98 #define XIE_EXTEND		(XTOS_VIRTUAL_INTENABLE && !XTOS_INT_SPECIALCASE)
99 
100 /*  If we have the NSAU instruction, ordering of interrupts is reversed in xtos_interrupt_table[]:  */
101 #if XCHAL_HAVE_NSA
102 # define MAPINT(n)	((XCHAL_NUM_INTERRUPTS-1)-(n))
103 # ifdef _ASMLANGUAGE
104 	.macro	mapint an
105 	neg 	\an, \an
106 	addi	\an, \an, XCHAL_NUM_INTERRUPTS-1
107 	.endm
108 # endif
109 #else /* no NSA */
110 # define MAPINT(n)	(n)
111 # ifdef _ASMLANGUAGE
112 	.macro	mapint an
113 	.endm
114 # endif
115 #endif
116 
117 #define XTOS_TASK_CONTEXT_OFFSET	48
118 
119 #if defined(_ASMLANGUAGE) || defined(__ASSEMBLER__)
120 /***********   Useful macros   ***********/
121 
122 /*
123  *  A useful looping macro:
124  *  'iterate' invokes 'what' (an instruction, pseudo-op or other macro)
125  *  multiple times, passing it a numbered parameter from 'from' to 'to'
126  *  inclusively.  Does not invoke 'what' at all if from > to.
127  *  Maximum difference between 'from' and 'to' is 99 minus nesting depth
128  *  (GNU 'as' doesn't allow nesting deeper than 100).
129  */
130 	.macro	iterate		from, to, what
131 	.ifeq	((\to-\from) & ~0xFFF)
132 	\what	\from
133 	iterate	"(\from+1)", \to, \what
134 	.endif
135 	.endm	// iterate
136 
137 
138 
139 	//  rsilft
140 	//
141 	//  Execute RSIL \ar, \tolevel if \tolevel is different than \fromlevel.
142 	//  This way the RSIL is avoided if we know at assembly time that
143 	//  it will not change the level.  Typically, this means the \ar register
144 	//  is ignored, ie. RSIL is used only to change PS.INTLEVEL.
145 	//
146 	.macro	rsilft	ar, fromlevel, tolevel
147 #if XCHAL_HAVE_INTERRUPTS
148 	.if \fromlevel - \tolevel
149 	rsil	\ar, \tolevel
150 	.endif
151 #endif
152 	.endm
153 
154 
155 	//  Save LOOP and MAC16 registers, if configured, to the exception stack
156 	//  frame pointed to by address register \esf, using \aa and \ab as temporaries.
157 	//
158 	//  This macro essentially saves optional registers that the compiler uses by
159 	//  default when present.
160 	//  Note that the acclo/acchi subset of MAC16 may be used even if others
161 	//  multipliers are present (e.g. mul16, mul32).
162 	//
163 	//  Only two temp registers required for this code to be optimal (no interlocks) in both
164 	//  T10xx (Athens) and Xtensa LX microarchitectures (both 5 and 7 stage pipes):
165 	//
166 	.macro	save_loops_mac16	esf, aa, ab
167 #if XCHAL_HAVE_LOOPS
168 	rsr.lcount	\aa
169 	rsr.lbeg	\ab
170 	s32i	\aa, \esf, UEXC_lcount
171 	rsr.lend	\aa
172 	s32i	\ab, \esf, UEXC_lbeg
173 	s32i	\aa, \esf, UEXC_lend
174 #endif
175 #if XCHAL_HAVE_MAC16
176 	rsr.acclo	\aa
177 	rsr.acchi	\ab
178 	s32i	\aa, \esf, UEXC_acclo
179 	s32i	\ab, \esf, UEXC_acchi
180 # if XTOS_SAVE_ALL_MAC16
181 	rsr.m0	\aa
182 	rsr.m1	\ab
183 	s32i	\aa, \esf, UEXC_mr + 0
184 	s32i	\ab, \esf, UEXC_mr + 4
185 	rsr.m2	\aa
186 	rsr.m3	\ab
187 	s32i	\aa, \esf, UEXC_mr + 8
188 	s32i	\ab, \esf, UEXC_mr + 12
189 # endif
190 #endif
191 	.endm
192 
193 	//  Restore LOOP and MAC16 registers, if configured, from the exception stack
194 	//  frame pointed to by address register \esf, using \aa, \ab and \ac as temporaries.
195 	//
196 	//  Three temp registers are required for this code to be optimal (no interlocks) in
197 	//  Xtensa LX microarchitectures with 7-stage pipe; otherwise only two
198 	//  registers would be needed.
199 	//
200 	.macro	restore_loops_mac16	esf, aa, ab, ac
201 #if XCHAL_HAVE_LOOPS
202 	l32i	\aa, \esf, UEXC_lcount
203 	l32i	\ab, \esf, UEXC_lbeg
204 	l32i	\ac, \esf, UEXC_lend
205 	wsr.lcount	\aa
206 	wsr.lbeg	\ab
207 	wsr.lend	\ac
208 #endif
209 #if XCHAL_HAVE_MAC16
210 	l32i	\aa, \esf, UEXC_acclo
211 	l32i	\ab, \esf, UEXC_acchi
212 # if XTOS_SAVE_ALL_MAC16
213 	l32i	\ac, \esf, UEXC_mr + 0
214 	wsr.acclo	\aa
215 	wsr.acchi	\ab
216 	wsr.m0	\ac
217 	l32i	\aa, \esf, UEXC_mr + 4
218 	l32i	\ab, \esf, UEXC_mr + 8
219 	l32i	\ac, \esf, UEXC_mr + 12
220 	wsr.m1	\aa
221 	wsr.m2	\ab
222 	wsr.m3	\ac
223 # else
224 	wsr.acclo	\aa
225 	wsr.acchi	\ab
226 # endif
227 #endif
228 	.endm
229 
230 
231 /*  Offsets from _xtos_intstruct structure:  */
232 	.struct 0
233 #if XTOS_VIRTUAL_INTENABLE
234 XTOS_ENABLED_OFS:	.space	4	/* _xtos_enabled variable */
235 XTOS_VPRI_ENABLED_OFS:	.space	4	/* _xtos_vpri_enabled variable */
236 #endif
237 #if XTOS_VIRTUAL_INTERRUPT
238 XTOS_PENDING_OFS:	.space	4	/* _xtos_pending variable */
239 #endif
240 	.text
241 
242 
243 #if XTOS_VIRTUAL_INTENABLE
244 	// Update INTENABLE register, computing it as follows:
245 	//	INTENABLE = _xtos_enabled & _xtos_vpri_enabled
246 	// 			[ & ~_xtos_pending ]
247 	//
248 	// Entry:
249 	//	register ax = &_xtos_intstruct
250 	//	register ay, az undefined (temporaries)
251 	//	PS.INTLEVEL set to XTOS_LOCKLEVEL or higher (eg. via xtos_lock)
252 	//	window overflows prevented (PS.WOE=0, PS.EXCM=1, or overflows
253 	//		already done for registers ax, ay, az)
254 	//
255 	// Exit:
256 	//	registers ax, ay, az clobbered
257 	//	PS unchanged
258 	//	caller needs to SYNC (?) for INTENABLE changes to take effect
259 	//
260 	// Note: in other software prioritization schemes/implementations,
261 	// the term <_xtos_vpri_enabled> in the above expression is often
262 	// replaced with another expression that computes the set of
263 	// interrupts allowed to be enabled at the current software virtualized
264 	// interrupt priority.
265 	//
266 	// For example, a simple alternative implementation of software
267 	// prioritization for XTOS might have been the following:
268 	//	INTENABLE = _xtos_enabled & (vpri_enabled | UNLOCKABLE_MASK)
269 	// which removes the need for the interrupt dispatcher to 'or' the
270 	// UNLOCKABLE_MASK bits into _xtos_vpri_enabled, and lets other code
271 	// disable all lockout level interrupts by just clearing _xtos_vpri_enabled
272 	// rather than setting it to UNLOCKABLE_MASK.
273 	// Other implementations sometimes use a table, eg:
274 	//	INTENABLE = _xtos_enabled & enable_table[current_vpri]
275 	// The HAL (used by some 3rd party OSes) uses essentially a table-driven
276 	// version, with other tables enabling run-time changing of priorities.
277 	//
278 	.macro	xtos_update_intenable	ax, ay, az
279 	//movi	\ax, _xtos_intstruct
280 	l32i	\ay, \ax, XTOS_VPRI_ENABLED_OFS		// ay = _xtos_vpri_enabled
281 	l32i	\az, \ax, XTOS_ENABLED_OFS		// az = _xtos_enabled
282 	//interlock
283 	and	\az, \az, \ay		// az = _xtos_enabled & _xtos_vpri_enabled
284 # if XTOS_VIRTUAL_INTERRUPT
285 	l32i	\ay, \ax, XTOS_PENDING_OFS		// ay = _xtos_pending
286 	movi	\ax, -1
287 	xor	\ay, \ay, \ax		// ay = ~_xtos_pending
288 	and	\az, \az, \ay		// az &= ~_xtos_pending
289 # endif
290 	wsr.intenable	\az
291 	.endm
292 #endif /* VIRTUAL_INTENABLE */
293 
294 	.macro	xtos_lock	ax
295 	rsil    \ax, XTOS_LOCKLEVEL	// lockout
296 	.endm
297 
298 	.macro	xtos_unlock	ax
299 	wsr.ps	\ax			// unlock
300 	rsync
301 	.endm
302 
303 /*  Offsets to XtosIntHandlerEntry structure fields (see below):  */
304 # define XIE_HANDLER	0
305 # define XIE_ARG	4
306 # define XIE_SIZE	8
307 # if XIE_EXTEND
308 #  define XIE_VPRIMASK	(XIE_SIZE*XCHAL_NUM_INTERRUPTS+0)	/* if VIRTUAL_INTENABLE [SUBPRI||XEA1] && !SPECIALCASE */
309 #  define XIE_LEVELMASK	(XIE_SIZE*XCHAL_NUM_INTERRUPTS+4)	/* [fairness preloop]  if FAIRNESS && SUBPRI [&& SUBPRI_GROUPS] */
310 # endif
311 
312 /*  To simplify code:  */
313 # if XCHAL_HAVE_NSA
314 #  define IFNSA(a,b)	a
315 # else
316 #  define IFNSA(a,b)	b
317 # endif
318 
319 	// get_prid ax
320 	// Extracts core id.
321 	.macro	get_prid ax
322 #if XCHAL_HAVE_PRID
323 	rsr.prid	\ax
324 	extui		\ax, \ax, 0, 8
325 #else
326 	movi		\ax, PLATFORM_PRIMARY_CORE_ID
327 #endif
328 	.endm
329 
330 #if CONFIG_MULTICORE
331 	// xtos_stack_addr_percore ax, ay, stack_primary, stack_secondary, stack_size
332 	// Retrieves address of end of stack buffer for certain core to register ax.
333 	.macro	xtos_stack_addr_percore ax, ay, stack_primary_addr, mem_blk_secondary_addr, stack_size
334 	get_prid	\ax
335 	bnei		\ax, PLATFORM_PRIMARY_CORE_ID, core_s
336 	movi		\ax, \stack_primary_addr
337 	j		exit
338 core_s:
339 	addi		\ax, \ax, -1
340 	movi		\ay, _core_s_size
341 	mull		\ax, \ax, \ay
342 	movi		\ay, (HEAP_SYSTEM_S_SIZE + HEAP_SYS_RUNTIME_S_SIZE)
343 	add		\ax, \ax, \ay
344 	movi		\ay, \mem_blk_secondary_addr
345 	add		\ax, \ax, \ay
346 	j		exit
347 exit:
348 	movi		\ay, \stack_size
349 	add		\ax, \ax, \ay
350 	.endm
351 
352 	// xtos_stack_addr_percore_add ax, stack_name, offset
353 	// Pointer to dedicated interrupt stack + offset.
354 	.macro	xtos_stack_addr_percore_add ax, stack_name, offset
355 	get_prid	\ax
356 	beqz		\ax, core_0
357 	beqi		\ax, 1, core_1
358 	beqi		\ax, 2, core_2
359 	beqi		\ax, 3, core_3
360 	j		exit
361 core_0:
362 	movi		\ax, \stack_name\()0 + (\offset)
363 	j		exit
364 core_1:
365 	movi		\ax, \stack_name\()1 + (\offset)
366 	j		exit
367 core_2:
368 	movi		\ax, \stack_name\()2 + (\offset)
369 	j		exit
370 core_3:
371 	movi		\ax, \stack_name\()3 + (\offset)
372 	j		exit
373 exit:
374 	.endm
375 
376 	// xtos_addr_percore_add ax, symbol, offset
377 	// Pointer to structure per core + offset.
378 	.macro	xtos_addr_percore_add ax, symbol, offset
379 	xtos_addr_percore	\ax, \symbol
380 	addi			\ax, \ax, \offset
381 	.endm
382 
383 	// xtos_addr_percore_sub ax, symbol, offset
384 	// Pointer to structure per core - offset.
385 	.macro	xtos_addr_percore_sub ax, symbol, offset
386 	xtos_addr_percore	\ax, \symbol
387 	addi			\ax, \ax, -\offset
388 	.endm
389 #endif /* CONFIG_MULTICORE */
390 
391 	// xtos_addr_percore ax, structure_name
392 	// Pointer to structure per core.
393 	.macro	xtos_addr_percore ax, structure_name
394 #if XCHAL_HAVE_THREADPTR
395 	rur.threadptr	\ax
396 #else
397 	j 1f
398 	.align 4
399 	.literal_position
400 2:
401 	.word SOF_VIRTUAL_THREAD_BASE
402 1:
403 	.align 4
404 	l32r	\ax, 2b
405 	l32i		\ax, \ax, 0
406 #endif
407 	l32i		\ax, \ax, XTOS_PTR_TO_\structure_name
408 	.endm
409 
410 	// xtos_store_percore ax, ay, structure_name
411 	// Stores register value under the selected structure per core.
412 	.macro	xtos_store_percore ax, ay, structure_name
413 #if XCHAL_HAVE_THREADPTR
414 	rur.threadptr	\ay
415 #else
416 	j 1f
417 	.align 4
418 	.literal_position
419 2:
420 	.word SOF_VIRTUAL_THREAD_BASE
421 1:
422 	.align 4
423 	l32r	\ay, 2b
424 	l32i	\ay, \ay, 0
425 #endif
426 	s32i		\ax, \ay, XTOS_PTR_TO_\structure_name
427 	.endm
428 
429 	// xtos_int_stack_addr_percore ax, int_level, stack_name
430 	// Pointer to dedicated interrupt stack.
431 	.macro	xtos_int_stack_addr_percore ax, int_level, stack_name
432 #if XCHAL_HAVE_THREADPTR
433 	rur.threadptr	\ax
434 #else
435 	j 1f
436 	.align 4
437 	.literal_position
438 2:
439 	.word SOF_VIRTUAL_THREAD_BASE
440 1:
441 	.align 4
442 	l32r	\ax, 2b
443 	l32i		\ax, \ax, 0
444 #endif
445 	l32i		\ax, \ax, XTOS_PTR_TO_\stack_name\()_&int_level
446 	.endm
447 
448 	// xtos_task_ctx_percore ax
449 	// Pointer to structure per core.
450 	.macro	xtos_task_ctx_percore ax
451 #if XCHAL_HAVE_THREADPTR
452 	rur.threadptr	\ax
453 #else
454 	j 1f
455 	.align 4
456 	.literal_position
457 2:
458 	.word SOF_VIRTUAL_THREAD_BASE
459 1:
460 	.align 4
461 	l32r	\ax, 2b
462 	l32i		\ax, \ax, 0
463 #endif
464 	l32i		\ax, \ax, XTOS_TASK_CONTEXT_OFFSET
465 	.endm
466 
467 	// xtos_task_ctx_store_percore ax, ay
468 	// Changes task context to point to the selected address.
469 	.macro	xtos_task_ctx_store_percore ax, ay
470 #if XCHAL_HAVE_THREADPTR
471 	rur.threadptr	\ay
472 #else
473 	j 1f
474 	.align 4
475 	.literal_position
476 2:
477 	.word SOF_VIRTUAL_THREAD_BASE
478 1:
479 	.align 4
480 	l32r	\ay, 2b
481 	l32i	\ay, \ay, 0
482 #endif
483 	s32i		\ax, \ay, XTOS_TASK_CONTEXT_OFFSET
484 	.endm
485 
486 	// Executes optional callback on wake up
487 	.macro	xtos_on_wakeup
488 #if CONFIG_WAKEUP_HOOK
489 	call12 arch_interrupt_on_wakeup
490 #endif
491 	.endm
492 
493 #else /* !_ASMLANGUAGE && !__ASSEMBLER__ */
494 
495 /*
496  *  Interrupt handler table entry.
497  *  Unregistered entries have 'handler' point to xtos_unhandled_interrupt().
498  */
499 typedef struct XtosIntHandlerEntry {
500     _xtos_handler	handler;
501     union {
502         void *		varg;
503         int		narg;
504     } u;
505 } XtosIntHandlerEntry;
506 # if XIE_EXTEND
507 typedef struct XtosIntMaskEntry {
508     unsigned		vpri_mask;	/* mask of interrupts enabled when this interrupt is taken */
509     unsigned		level_mask;	/* mask of interrupts at this interrupt's level */
510 } XtosIntMaskEntry;
511 # endif
512 
513 #if CONFIG_MULTICORE
514 struct XtosIntStruct
515 {
516 	unsigned xtos_enabled;
517 	unsigned vpri_enabled;
518 };
519 
520 // XtosIntInterruptTable holds array of interrupt handler descriptors.
521 struct XtosIntInterruptTable
522 {
523 	struct XtosIntHandlerEntry array[XCHAL_NUM_INTERRUPTS];
524 };
525 
526 // XtosInterruptStructure describes layout of xtos interrupt structures per core
527 // generated for certain platform in file interrupt-table.S.
528 struct XtosInterruptStructure
529 {
530 	struct XtosIntStruct xtos_enabled;
531 	struct XtosIntInterruptTable xtos_interrupt_table;
532 	struct XtosIntMaskEntry xtos_interrupt_mask_table[XCHAL_NUM_INTERRUPTS];
533 	__attribute__((aligned(XCHAL_DCACHE_LINESIZE))) int al[0];
534 };
535 #endif
536 
537 extern void xtos_unhandled_interrupt();
538 
539 #endif /* !_ASMLANGUAGE && !__ASSEMBLER__ */
540 
541 /*
542  *  Notes...
543  *
544  *  XEA1 and interrupt-SUBPRIoritization both imply virtualization of INTENABLE.
545  *  Synchronous trampoloines imply partial virtualization of the INTERRUPT
546  *  register, which in turn also implies virtualization of INTENABLE register.
547  *  High-level interrupts manipulating the set of enabled interrupts implies
548  *  at least a high XTOS_LOCK_LEVEL, although not necessarily INTENABLE virtualization.
549  *
550  *  With INTENABLE register virtualization, at all times the INTENABLE
551  *  register reflects the expression:
552  *	(set of interrupts enabled) & (set of interrupts enabled by current
553  *					virtual priority)
554  *
555  *  Unrelated (DBREAK semantics):
556  *
557  *	A[31-6] = DBA[3-6]
558  *	---------------------
559  *	A[5-0] & DBC[5-C] & szmask
560  *
561  *	= DBA[5-0] & szmask
562  *			^___  ???
563  */
564 
565 
566 /*  Report whether the XSR instruction is available (conservative):  */
567 #define HAVE_XSR	(XCHAL_HAVE_XEA2 || !XCHAL_HAVE_EXCEPTIONS)
568 /*
569  *  This is more accurate, but not a reliable test in software releases prior to 6.0
570  *  (where the targeted hardware parameter was not explicit in the XPG):
571  *
572  *#define HAVE_XSR	(XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_T1040_0)
573  */
574 
575 
576 
577 /* Macros for supporting hi-level and medium-level interrupt handling. */
578 
579 #if XCHAL_NUM_INTLEVELS > 6
580 #error Template files (*-template.S) limit support to interrupt levels <= 6
581 #endif
582 
583 #if  defined(__XTENSA_WINDOWED_ABI__) && XCHAL_HAVE_CALL4AND12 == 0
584 #error CALL8-only is not supported!
585 #endif
586 
587 #define INTERRUPT_IS_HI(level)  \
588 	( XCHAL_HAVE_INTERRUPTS && \
589 	 (XCHAL_EXCM_LEVEL < level) && \
590 	 (XCHAL_NUM_INTLEVELS >= level) && \
591 	 (XCHAL_HAVE_DEBUG ? XCHAL_DEBUGLEVEL != level : 1))
592 
593 #define INTERRUPT_IS_MED(level) \
594 	(XCHAL_HAVE_INTERRUPTS && (XCHAL_EXCM_LEVEL >= level))
595 
596 
597 #define _JOIN(x,y)	x ## y
598 #define JOIN(x,y)	_JOIN(x,y)
599 
600 #define _JOIN3(a,b,c)	a ## b ## c
601 #define JOIN3(a,b,c)	_JOIN3(a,b,c)
602 
603 #define LABEL(x,y)		JOIN3(x,_INTERRUPT_LEVEL,y)
604 #define EXCSAVE_LEVEL		JOIN(EXCSAVE_,_INTERRUPT_LEVEL)
605 #define INTLEVEL_VSIZE		JOIN3(XSHAL_INTLEVEL,_INTERRUPT_LEVEL,_VECTOR_SIZE)
606 
607 /*  For asm macros; works for positive a,b smaller than 1000:  */
608 #define GREATERTHAN(a, b)	(((b) - (a)) & ~0xFFF)
609 #define EQUAL(a, b)		((1 << (a)) & (1 << (b)))
610 
611 #if CONFIG_MULTICORE
612 // sizeof(xtos_enabled)
613 #define XTOS_ENABLED_SIZE_PER_CORE	(4)
614 // sizeof(vpri_enabled)
615 #define XTOS_VPRI_ENABLED_SIZE_PER_CORE	(4)
616 // sizeof(XtosIntStruct)
617 #define XTOS_INTSTRUCT_SIZE_PER_CORE	(XTOS_ENABLED_SIZE_PER_CORE + \
618 					XTOS_VPRI_ENABLED_SIZE_PER_CORE)
619 #endif
620 
621 #endif /* XTOS_INTERNAL_H */
622 
623