1//
2// int_asm.S - assembly language interrupt utility routines
3//
4// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/hal/int_asm.S#1 $
5
6// Copyright (c) 2003-2010 Tensilica Inc.
7//
8// Permission is hereby granted, free of charge, to any person obtaining
9// a copy of this software and associated documentation files (the
10// "Software"), to deal in the Software without restriction, including
11// without limitation the rights to use, copy, modify, merge, publish,
12// distribute, sublicense, and/or sell copies of the Software, and to
13// permit persons to whom the Software is furnished to do so, subject to
14// the following conditions:
15//
16// The above copyright notice and this permission notice shall be included
17// in all copies or substantial portions of the Software.
18//
19// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
23// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27#include <xtensa/coreasm.h>
28
29
30#if XCHAL_HAVE_INTERRUPTS
31/*  Offsets of XtHalVPriState structure members (Xthal_vpri_state variable):  */
32#define XTHAL_VPRI_VPRI_OFS		0x00
33#define XTHAL_VPRI_LOCKLEVEL_OFS	0x01
34#define XTHAL_VPRI_LOCKVPRI_OFS		0x02
35#define XTHAL_VPRI_PAD0_OFS		0x03
36#define XTHAL_VPRI_ENABLED_OFS		0x04
37#define XTHAL_VPRI_LOCKMASK_OFS		0x08
38#define XTHAL_VPRI_PAD1_OFS		0x0C
39#define XTHAL_VPRI_ENABLEMAP_OFS	0x10
40#define XTHAL_VPRI_RESOLVEMAP_OFS	(0x10+0x40*(XCHAL_NUM_INTLEVELS+1))
41#define XTHAL_VPRI_END_OFS		(0x10+0x40*(XCHAL_NUM_INTLEVELS*2+1))
42#endif /* XCHAL_HAVE_INTERRUPTS */
43
44
45
46//----------------------------------------------------------------------
47// Access INTENABLE register from C
48//----------------------------------------------------------------------
49
50// unsigned xthal_get_intenable(void)
51//
52DECLFUNC(xthal_get_intenable)
53	abi_entry
54# if XCHAL_HAVE_INTERRUPTS
55	rsr.intenable	a2
56# else
57	movi	a2, 0	// if no INTENABLE (no interrupts), tell caller nothing is enabled
58# endif
59	abi_return
60	endfunc
61
62
63
64// void xthal_set_intenable(unsigned)
65//
66DECLFUNC(xthal_set_intenable)
67	abi_entry
68# if XCHAL_HAVE_INTERRUPTS
69	wsr.intenable	a2
70# endif
71	abi_return
72	endfunc
73
74
75//----------------------------------------------------------------------
76// Access INTERRUPT, INTSET, INTCLEAR register from C
77//----------------------------------------------------------------------
78
79
80
81// unsigned xthal_get_interrupt(void)
82//
83DECLFUNC (xthal_get_interrupt)
84	abi_entry
85# if XCHAL_HAVE_INTERRUPTS
86	rsr.interrupt	a2
87# else
88	movi	a2, 0	// if no INTERRUPT (no interrupts), tell caller nothing is pending
89# endif
90	abi_return
91	endfunc
92
93
94
95DECLFUNC (xthal_get_intread)
96	abi_entry
97# if XCHAL_HAVE_INTERRUPTS
98	rsr.interrupt	a2
99# else
100	movi	a2, 0	// if no INTERRUPT (no interrupts), tell caller nothing is pending
101# endif
102	abi_return
103	endfunc
104
105
106
107// void xthal_set_intset(unsigned)
108//
109DECLFUNC(xthal_set_intset)
110	abi_entry
111# if XCHAL_HAVE_INTERRUPTS
112	wsr.intset	a2
113# endif
114	abi_return
115	endfunc
116
117
118// void xthal_set_intclear(unsigned)
119//
120DECLFUNC(xthal_set_intclear)
121	abi_entry
122# if XCHAL_HAVE_INTERRUPTS
123	wsr.intclear	a2
124# endif
125	abi_return
126	endfunc
127
128
129
130//----------------------------------------------------------------------
131// Virtual PS.INTLEVEL support:
132// allows running C code at virtual PS.INTLEVEL > 0
133// using INTENABLE to simulate the masking that PS.INTLEVEL would do.
134//----------------------------------------------------------------------
135
136
137
138// unsigned xthal_get_vpri(void);
139
140DECLFUNC(xthal_get_vpri)
141	abi_entry
142# if XCHAL_HAVE_INTERRUPTS
143	movi	a2, Xthal_vpri_state
144	l8ui	a2, a2, XTHAL_VPRI_VPRI_OFS
145# else
146	movi	a2, 0	// no interrupts, report we're always at level 0
147# endif
148	abi_return
149	endfunc
150
151
152// unsigned xthal_set_vpri_nw(unsigned)
153//
154//  Must be called at PS.INTLEVEL <= 1.
155//  Doesn't touch the stack (doesn't reference a1 at all).
156//  Normally, PS should be restored with a6 after return from this call
157//  (it isn't restored automatically because some exception handlers
158//   want to keep ints locked for a while).
159//
160//  On entry:
161//	a2 = new virtual interrupt priority (0x00 .. 0x1F)
162//	a3-a6 = undefined
163//	PS.INTLEVEL <= 1
164//  On exit:
165//	a2 = previous virtual interrupt priority (0x0F .. 0x1F, or 0 if no interrupts)
166//	a3-a5 = clobbered
167//	a6 = PS as it was on entry
168//	PS.INTLEVEL = 1
169//	!!!!!!!!! PS.WOE = 0 (but not if there are no interrupts; is this really needed???)
170//	INTENABLE = updated according to new vpri
171
172_SYM(xthal_set_vpri_nw)
173
174# if XCHAL_HAVE_INTERRUPTS
175	/*  Make sure a2 is in the range 0x0F .. 0x1F:  */
176	movi	a3, 0x1F	// highest legal virtual interrupt priority
177	sub	a4, a2, a3	// (a4 = newlevel - maxlevel)
178	movgez	a2, a3, a4	// newlevel = maxlevel if (newlevel - maxlevel) >= 0
179	movi	a3, 15		// lowest legal virtual interrupt priority
180	sub	a4, a2, a3	// (a4 = newlevel - 15)
181	movltz	a2, a3, a4	// newlevel = 15 if newlevel < 15
182
183xthal_set_vpri_nw_common:
184	movi	a4, Xthal_vpri_state	// address of vpri state structure
185
186	/*
187	 *  Lockout interrupts for exclusive access to virtual priority structure
188	 *  while we examine and modify it.
189	 *  Note that we accessed a4 and don't access any further than a6,
190	 *  so we won't cause any spills, so we could leave WOE enabled (if it is),
191	 *  but we clear it because that might be what the caller wants,
192	 *  and is cleaner.
193	 */
194	//  Get PS and mask off INTLEVEL:
195	rsil	a6, 1		// save a6 = PS, set PS.INTLEVEL = 1
196
197	//  Clear PS.WOE.  (Can we get rid of this?!!!!!):
198	movi	a3, ~0x00040000	// mask to...
199	rsr.ps	a5		// get and save a6 = PS
200//a2,a3,a4,a5,a6
201	and	a5, a5, a3	// ... clear a5.WOE
202	wsr.ps	a5		// clear PS.WOE
203	rsync
204
205//a2,a4,a6
206	/*  Get mask of interrupts to be turned off at requested level:  */
207	l32i	a5, a4, XTHAL_VPRI_ENABLED_OFS		// get the global mask
208	addx4	a3, a2, a4	// a3 = a4 + a2*4  (index into enablemap[] array)
209//a2,a3,a4,a5,a6
210	l32i	a3, a3, XTHAL_VPRI_ENABLEMAP_OFS	// get the per-level mask
211	and	a3, a5, a3	// new INTENABLE value according to new intlevel
212	wsr.intenable	a3	// set it!
213//a2,a4,a6
214
215	l8ui	a5, a4, XTHAL_VPRI_VPRI_OFS	// previous virtual priority
216	s8i	a2, a4, XTHAL_VPRI_VPRI_OFS	// new virtual priority
217
218	//  Let the caller restore PS:
219	//wsr.ps	a6			// restore PS.INTLEVEL
220	//rsync
221
222	mov	a2, a5		// return previous virtual intlevel
223
224# else /* ! XCHAL_HAVE_INTERRUPTS */
225xthal_set_vpri_nw_common:
226#  if XCHAL_HAVE_EXCEPTIONS
227	rsr.ps	a6	// return PS for caller to restore
228#  else
229	movi	a6, 0
230#  endif
231	movi	a2, 0	// no interrupts, report we're always at virtual priority 0
232# endif /* XCHAL_HAVE_INTERRUPTS */
233	ret
234	endfunc
235
236
237
238// unsigned xthal_set_vpri_intlevel_nw(unsigned);
239//
240//  Same as xthal_set_vpri_nw() except that it accepts
241//  an interrupt level rather than a virtual interrupt priority.
242//  This just converts intlevel to vpri and jumps to xthal_set_vpri_nw.
243
244_SYM(xthal_set_vpri_intlevel_nw)
245# if XCHAL_HAVE_INTERRUPTS
246	movi	a3, 0x10
247	movnez	a2, a3, a2	// a2 = (a2 ? 0x10 : 0)
248	addi	a2, a2, 0x0F	// a2 += 0x0F
249# endif
250	j	xthal_set_vpri_nw_common	// set vpri to a2
251	endfunc
252
253
254
255
256
257// unsigned  xthal_set_vpri (unsigned newvpri);
258//
259//  Normal windowed call (PS.INTLEVEL=0 and PS.WOE=1 on entry and exit).
260//  (PS.UM = 0 or 1)
261//
262//  Returns previous virtual interrupt priority
263//  (0x0F .. 0x1F, or 0 if no interrupts).
264//
265//  On entry:
266//	a2 = new virtual interrupt priority (0x00 .. 0x1F)
267//  On exit:
268//	a2 = previous vpri
269//	INTENABLE = updated according to new vpri
270
271DECLFUNC(xthal_set_vpri)
272	abi_entry
273# if XCHAL_HAVE_INTERRUPTS
274	/*  Make sure a2 is in the range 0x0F .. 0x1F:  */
275	movi	a3, 0x1F	// highest legal virtual interrupt priority
276	sub	a4, a2, a3	// (a4 = newlevel - maxlevel)
277	movgez	a2, a3, a4	// newlevel = maxlevel if (newlevel - maxlevel) >= 0
278	movi	a3, 15		// lowest legal virtual interrupt priority
279	sub	a4, a2, a3	// (a4 = newlevel - 15)
280	movltz	a2, a3, a4	// newlevel = 15 if newlevel < 15
281
282xthal_set_vpri_common1:
283	movi	a4, Xthal_vpri_state	// address of vpri state structure
284
285	/*
286	 *  Lockout interrupts for exclusive access to virtual priority structure
287	 *  while we examine and modify it.
288	 *  Note that we accessed a4 and don't access any further than a6,
289	 *  so we won't cause any spills, so we can leave WOE enabled.
290	 */
291	//  Get PS and mask off INTLEVEL:
292	rsil	a6, 1		// save a6 = PS, set PS.INTLEVEL = 1
293
294	l8ui	a7, a4, XTHAL_VPRI_VPRI_OFS	// previous virtual priority (vpri)
295
296	/*  Get mask of interrupts to be turned off at requested level:  */
297	l32i	a5, a4, XTHAL_VPRI_ENABLED_OFS		// get the global mask
298	addx4	a3, a2, a4	// a3 = a4 + a2*4  (index into enablemap[] array)
299	l32i	a3, a3, XTHAL_VPRI_ENABLEMAP_OFS	// get the per-level mask
300	s8i	a2, a4, XTHAL_VPRI_VPRI_OFS	// new virtual priority (in load-slot)
301	and	a3, a5, a3	// new INTENABLE value according to new intlevel
302	wsr.intenable	a3	// set it!
303
304	wsr.ps	a6		// restore PS.INTLEVEL
305	rsync
306
307	mov	a2, a7		// return previous vpri
308
309# else /* ! XCHAL_HAVE_INTERRUPTS */
310	movi	a2, 0	// no interrupts, report we're always at virtual priority 0
311# endif /* XCHAL_HAVE_INTERRUPTS */
312	abi_return
313	endfunc
314
315
316
317// unsigned  xthal_set_vpri_intlevel (unsigned intlevel);
318//
319//  Equivalent to xthal_set_vpri(XTHAL_VPRI(intlevel,0xF)).
320//  This just converts intlevel to vpri and jumps inside xthal_set_vpri.
321
322DECLFUNC(xthal_set_vpri_intlevel)
323	abi_entry
324# if XCHAL_HAVE_INTERRUPTS
325	movi	a3, 0x10
326	movnez	a2, a3, a2	// a2 = (a2 ? 0x10 : 0)
327	addi	a2, a2, 0x0F	// a2 += 0x0F
328	j	xthal_set_vpri_common1	// set vpri to a2
329# else
330	movi	a2, 0	// no interrupts, report we're always at virtual priority 0
331	abi_return
332# endif
333	endfunc
334
335
336
337// unsigned  xthal_set_vpri_lock (void);
338//
339//  Equivalent to xthal_set_vpri(0x1F);
340//  Returns previous virtual interrupt priority.
341//
342DECLFUNC(xthal_set_vpri_lock)
343	abi_entry
344# if XCHAL_HAVE_INTERRUPTS
345	movi	a2, 0x1F		// lock at intlevel 1
346	j	xthal_set_vpri_common1
347# else
348	movi	a2, 0	// no interrupts, report we're always at virtual priority 0
349	abi_return
350# endif
351	endfunc
352
353
354
355
356// unsigned xthal_get_intpending_nw(void)
357//
358//  Of the pending level-1 interrupts, returns
359//  the bitmask of interrupts at the highest software priority,
360//  and the index of the first of these.
361//  It also disables interrupts of that software priority and lower
362//  via INTENABLE.
363//
364//	On entry:
365//		a0 = return PC
366//		a1 = sp
367//		a2-a6 = (available) (undefined)
368//		PS.INTLEVEL = 1
369//		PS.WOE = 0
370//	On exit:
371//		a0 = return PC
372//		a1 = sp (NOTE: stack is untouched, a1 is never referenced)
373//		a2 = index of first highest-soft-pri pending l1 interrupt (0..31), or -1 if none
374//		a3 = bitmask of highest-soft-pri pending l1 interrupts (0 if none) (may be deprecated)
375//		a4 = (clobbered)
376//		a5 = new vpri (not typically used by caller? so might get deprecated...?)
377//		a6 = old vpri (eg. to be saved as part of interrupt context's state)
378//		INTENABLE = updated according to new vpri
379//		INTERRUPT bit cleared for interrupt returned in a2 (if any), if software or edge-triggered or write-error
380//		all others = preserved
381
382_SYM(xthal_get_intpending_nw)
383# if XCHAL_HAVE_INTERRUPTS
384	// Give us one more register to play with
385	//wsr.excsave1	a4
386
387	// Figure out which interrupt to process
388
389	/*
390	Perform a binary search to find a mask of the interrupts that are
391	ready at the highest virtual priority level.
392	Xthal_vpri_resolvemap is a binary tree implemented within an array,
393	sorted by priority: each node contains the set of interrupts in
394	the range of priorities corresponding to the right half of its branch.
395	The mask of enabled & pending interrupts is compared with each node to
396	determine in which subbranch (left or right) the highest priority one is
397	present.  After 4 such masks and comparisons (for 16 priorities), we have
398	determined the priority of the highest priority enabled&pending interrupt.
399
400	Table entries for intlevel 'i' are bitmasks defined as follows (map=Xthal_vpri_resolvemap[i-1]):
401	    map[8+(x=0)]          = ints at pri x + 8..15 (8-15)
402	    map[4+(x=0,8)]        = ints at pri x + 4..7  (4-7,12-15)
403	    map[2+(x=0,4,8,12)]   = ints at pri x + 2..3  (2-3,6-7,10-11,14-15)
404	    map[1+(x=0,2..12,14)] = ints at pri x + 1     (1,3,5,7,9,11,13,15)
405	    map[0]                = 0  (unused; for alignment)
406	*/
407
408	rsr.interrupt	a4	// a4 = mask of interrupts pending, including those disabled
409	rsr.intenable	a2	// a2 = mask of interrupts enabled
410	movi	a3, Xthal_vpri_state
411	and	a4, a2, a4	// a4 = mask of enabled interrupts pending
412	beqz	a4, gipfail	// if none (can happen for spurious level-triggered interrupts,
413				//  or ???), we're done
414
415	mov	a5, a3
416	l32i	a2, a5, XTHAL_VPRI_RESOLVEMAP_OFS+8*4
417	bnone	a2, a4, 1f
418	addi	a5, a5, 8*4
4191:	l32i	a2, a5, XTHAL_VPRI_RESOLVEMAP_OFS+4*4
420	bnone	a2, a4, 1f
421	addi	a5, a5, 4*4
4221:	l32i	a2, a5, XTHAL_VPRI_RESOLVEMAP_OFS+2*4
423	bnone	a2, a4, 1f
424	addi	a5, a5, 2*4
4251:	l32i	a2, a5, XTHAL_VPRI_RESOLVEMAP_OFS+1*4
426	bnone	a2, a4, 1f
427	addi	a5, a5, 1*4
4281:
429
430#  if 0
431	a5 = address of map ...
432	l32i	a2, a5, XTHAL_VPRI_RESOLVEMAP_OFS+8*4
433	addi	a?, a5, 8*4
434	and	a2, a2, a4
435	movnez	a5, a?, a2
436	l32i	a2, a5, XTHAL_VPRI_RESOLVEMAP_OFS+4*4
437	addi	a?, a5, 4*4
438	and	a2, a2, a4
439	movnez	a5, a?, a2
440	l32i	a2, a5, XTHAL_VPRI_RESOLVEMAP_OFS+2*4
441	addi	a?, a5, 2*4
442	and	a2, a2, a4
443	movnez	a5, a?, a2
444	l32i	a2, a5, XTHAL_VPRI_RESOLVEMAP_OFS+1*4
445	addi	a?, a5, 1*4
446	and	a2, a2, a4
447	movnez	a5, a?, a2
448#  endif
449
450	//  Here:
451	//	a3 = Xthal_vpri_state
452	//	a5 = Xthal_vpri_state + softpri*4
453	//	a4 = mask of enabled interrupts pending
454	//	a2,a6 = available
455
456	//  Lock interrupts during virtual priority data structure transaction:
457	//rsil	a6, 1			// set PS.INTLEVEL = 1 (a6 ignored)
458	//	a2,a6 = available
459
460	//  The highest priority interrupt(s) in a4 is at softpri = (a5-a3) / 4.
461	//  So interrupts in enablemap[1][softpri] are not in a4 (they are higher priority).
462	//  The set of interrupts at softpri are:
463	//	enablemap[1][softpri-1] - enablemap[1][softpri]
464	//  So and'ing a4 with enablemap[1][softpri - 1] will give us
465	//  the set of interrupts pending at the highest soft priority.
466	//
467	l32i	a2, a5, XTHAL_VPRI_ENABLEMAP_OFS + 16*4 - 4	// get enablemap[1][softpri-1]
468	and	a4, a2, a4		// only keep interrupts of highest pri (softpri)
469
470	//  a4 now has mask of pending interrupts at highest ready level (new vpri)
471
472	//  Update INTENABLE for this new virtual priority
473	l32i	a2, a5, XTHAL_VPRI_ENABLEMAP_OFS + 16*4	// get vpri-specific mask = enablemap[1][softpri]
474	l32i	a6, a3, XTHAL_VPRI_ENABLED_OFS		// get global mask
475	sub	a5, a5, a3		// a5 = softpri * 4 (for below; here for efficiency)
476	and	a2, a2, a6				// and together
477	wsr.intenable	a2		// disable interrupts at or below new vpri
478	//	a2,a6 = available
479
480	//  Update new virtual priority:
481	l8ui	a6, a3, XTHAL_VPRI_VPRI_OFS		// get old vpri (returned)
482	srli	a5, a5, 2		// a5 = softpri  (0..15)
483	addi	a5, a5, 0x10		// a5 = 0x10 + softpri = new virtual priority
484	s8i	a5, a3, XTHAL_VPRI_VPRI_OFS		// store new vpri (returned)
485
486	//  Undo the temporary lock (if was at PS.INTLEVEL > 1):
487	//rsil	a2, 1
488
489	mov	a3, a4		// save for the caller (in case it wants it?)
490
491	//  Choose one of the set of highest-vpri pending interrupts to process.
492	//  For speed (and simplicity), use this simple two-instruction sequence
493	//  to select the least significant bit set in a4.  This implies that
494	//  interrupts with a lower interrupt number take precedence over those
495	//  with a higher interrupt number (!!).
496	//
497	neg	a2, a4		// keep only the least-significant bit that is set...
498	and	a4, a2, a4	// ... in a4
499
500	//  Software, edge-triggered, and write-error interrupts are cleared by writing to the
501	//  INTCLEAR pseudo-reg (to clear relevant bits of the INTERRUPT register).
502	//  To simplify interrupt handlers (so they avoid tracking which type of
503	//  interrupt they handle and act accordingly), clear such interrupts here.
504	//  To avoid race conditions, the clearing must occur *after* we undertake
505	//  to process the interrupt, and *before* actually handling the interrupt.
506	//  Interrupt handlers may additionally clear the interrupt themselves
507	//  at appropriate points if needed to avoid unnecessary interrupts.
508	//
509#define CLEARABLE_INTLEVEL1_MASK	(XCHAL_INTLEVEL1_MASK & XCHAL_INTCLEARABLE_MASK)
510#  if CLEARABLE_INTLEVEL1_MASK != 0
511	//movi	a2, CLEARABLE_INTLEVEL1_MASK
512	//and	a2, a2, a4
513	//wsr.intclear	a2
514	wsr.intclear	a4	// no effect if a4 not a software or edge-triggered or write-error interrupt
515#  endif
516
517	//  Convert the single-bit interrupt mask to an interrupt number.
518	//  (ie. compute log2 using either the NSAU instruction or a binary search)
519
520	find_ms_setbit	a2, a4, a2, 0	// set a2 to index of lsbit set in a4 (0..31)
521				// NOTE: assumes a4 != 0 (otherwise a2 is undefined[?])
522
523	//	a2 has vector number (0..31)
524
525	//rsr.excsave1	a4
526	ret
527
528gipfail:
529	l8ui	a6, a3, XTHAL_VPRI_VPRI_OFS		// get old vpri
530	mov	a5, a6					// is also new vpri (unchanged)
531# else /* XCHAL_HAVE_INTERRUPTS */
532	//  No interrupts configured!
533	movi	a5, 0		// return zero new vpri
534	movi	a6, 0		// return zero old vpri
535# endif /* XCHAL_HAVE_INTERRUPTS */
536	movi	a2, -1		// return bogus vector number (eg. can be quickly tested for negative)
537	movi	a3, 0		// return zero bitmask of interrupts pending
538	ret
539	endfunc
540
541// -----------------------------------------------------------------
542
543
544
545// void xthal_vpri_lock()
546//
547// Used internally by the Core HAL to block interrupts of higher or equal
548// priority than Xthal_vpri_locklevel during virtual interrupt operations.
549//
550DECLFUNC(xthal_vpri_lock)
551	abi_entry
552# if XCHAL_HAVE_INTERRUPTS
553	rsil	a6, 1				// save a6 = PS, set PS.INTLEVEL = 1
554
555	//     if( Xthal_vpri_level < Xthal_vpri_locklevel )
556	//
557	movi	a2, Xthal_vpri_state		// a2 := address of global var. Xthal_vpri_state
558	//interlock
559	l8ui	a3, a2, XTHAL_VPRI_VPRI_OFS	// a3 := Xthal_vpri_level == Xthal_vpri_state.vpri
560	l8ui	a5, a2, XTHAL_VPRI_LOCKLEVEL_OFS  // a5 := Xthal_vpri_locklevel
561	l32i	a4, a2, XTHAL_VPRI_ENABLED_OFS	// a4 := Xthal_vpri_enabled
562	bgeu	a3, a5, xthal_vpri_lock_done
563
564	//  xthal_set_intenable( Xthal_vpri_enablemap[0][Xthal_vpri_locklevel] & Xthal_vpri_enabled );
565	//
566	addx4	a3, a5, a2			// a3 := a2 + a5*4  (index into enablemap[] array)
567	l32i	a3, a3, XTHAL_VPRI_ENABLEMAP_OFS // a3 := Xthal_vpri_enablemap[0][Xthal_vpri_locklevel]
568	//interlock
569	and	a2, a4, a3
570  	wsr.intenable	a2
571
572xthal_vpri_lock_done:
573	wsr.ps	a6				// restore PS.INTLEVEL
574	rsync
575# endif
576	abi_return
577	endfunc
578
579
580
581// void xthal_vpri_unlock(void)
582//
583// Enable interrupts according to the current virtual interrupt priority.
584// This effectively "unlocks" interrupts disabled by xthal_vpri_lock()
585// (assuming the virtual interrupt priority hasn't changed).
586//
587DECLFUNC(xthal_vpri_unlock)
588	abi_entry
589# if XCHAL_HAVE_INTERRUPTS
590        //
591	//  This should be free of race-conditions.
592	//
593        //  xthal_set_intenable( Xthal_vpri_enablemap[0][Xthal_vpri_level] & Xthal_vpri_enabled );
594	//
595	movi	a2, Xthal_vpri_state		 // a2 := address of global var. Xthal_vpri_state
596	//interlock
597	l8ui	a3, a2, XTHAL_VPRI_VPRI_OFS      // a3 := Xthal_vpri_level == Xthal_vpri_state.vpri
598	l32i	a4, a2, XTHAL_VPRI_ENABLED_OFS	 // a4 := Xthal_vpri_enabled
599	addx4	a3, a3, a2                       // a3 := a2 + a3*4  (index into enablemap[] array)
600	l32i	a3, a3, XTHAL_VPRI_ENABLEMAP_OFS // a3 := Xthal_vpri_enablemap[0][Xthal_vpri_level]
601	//interlock
602	and	a2, a4, a3
603  	wsr.intenable	a2
604# endif
605	abi_return
606	endfunc
607
608
609