1//
2// int_asm.S - assembly language interrupt utility routines
3//
4// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/hal/int_asm.S#1 $
5
6// Copyright (c) 2003-2010 Tensilica Inc.
7//
8// Permission is hereby granted, free of charge, to any person obtaining
9// a copy of this software and associated documentation files (the
10// "Software"), to deal in the Software without restriction, including
11// without limitation the rights to use, copy, modify, merge, publish,
12// distribute, sublicense, and/or sell copies of the Software, and to
13// permit persons to whom the Software is furnished to do so, subject to
14// the following conditions:
15//
16// The above copyright notice and this permission notice shall be included
17// in all copies or substantial portions of the Software.
18//
19// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
23// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27#include <xtensa/coreasm.h>
28
29
30#if XCHAL_HAVE_INTERRUPTS
31/*  Offsets of XtHalVPriState structure members (Xthal_vpri_state variable):  */
32#define XTHAL_VPRI_VPRI_OFS		0x00
33#define XTHAL_VPRI_LOCKLEVEL_OFS	0x01
34#define XTHAL_VPRI_LOCKVPRI_OFS		0x02
35#define XTHAL_VPRI_PAD0_OFS		0x03
36#define XTHAL_VPRI_ENABLED_OFS		0x04
37#define XTHAL_VPRI_LOCKMASK_OFS		0x08
38#define XTHAL_VPRI_PAD1_OFS		0x0C
39#define XTHAL_VPRI_ENABLEMAP_OFS	0x10
40#define XTHAL_VPRI_RESOLVEMAP_OFS	(0x10+0x40*(XCHAL_NUM_INTLEVELS+1))
41#define XTHAL_VPRI_END_OFS		(0x10+0x40*(XCHAL_NUM_INTLEVELS*2+1))
42#endif /* XCHAL_HAVE_INTERRUPTS */
43
44
45#if defined(__SPLIT__get_intenable) || \
46    defined(__SPLIT__get_intenable_nw)
47
48//----------------------------------------------------------------------
49// Access INTENABLE register from C
50//----------------------------------------------------------------------
51
52// unsigned xthal_get_intenable(void)
53//
54DECLFUNC(xthal_get_intenable)
55	abi_entry
56# if XCHAL_HAVE_INTERRUPTS
57	rsr.intenable	a2
58# else
59	movi	a2, 0	// if no INTENABLE (no interrupts), tell caller nothing is enabled
60# endif
61	abi_return
62	endfunc
63
64#endif
65
66#if defined(__SPLIT__set_intenable) || \
67    defined(__SPLIT__set_intenable_nw)
68
69// void xthal_set_intenable(unsigned)
70//
71DECLFUNC(xthal_set_intenable)
72	abi_entry
73# if XCHAL_HAVE_INTERRUPTS
74	wsr.intenable	a2
75# endif
76	abi_return
77	endfunc
78
79
80//----------------------------------------------------------------------
81// Access INTERRUPT, INTSET, INTCLEAR register from C
82//----------------------------------------------------------------------
83
84#endif
85
86#if defined(__SPLIT__get_interrupt) || \
87    defined (__SPLIT__get_interrupt_nw)
88
89// unsigned xthal_get_interrupt(void)
90//
91DECLFUNC (xthal_get_interrupt)
92	abi_entry
93# if XCHAL_HAVE_INTERRUPTS
94	rsr.interrupt	a2
95# else
96	movi	a2, 0	// if no INTERRUPT (no interrupts), tell caller nothing is pending
97# endif
98	abi_return
99	endfunc
100
101#endif
102
103#if defined(__SPLIT__get_intread) || \
104    defined(__SPLIT__get_intread_nw)
105
106DECLFUNC (xthal_get_intread)
107	abi_entry
108# if XCHAL_HAVE_INTERRUPTS
109	rsr.interrupt	a2
110# else
111	movi	a2, 0	// if no INTERRUPT (no interrupts), tell caller nothing is pending
112# endif
113	abi_return
114	endfunc
115
116#endif
117
118#if defined(__SPLIT__set_intset) || \
119    defined(__SPLIT__set_intset_nw)
120
121// void xthal_set_intset(unsigned)
122//
123DECLFUNC(xthal_set_intset)
124	abi_entry
125# if XCHAL_HAVE_INTERRUPTS
126	wsr.intset	a2
127# endif
128	abi_return
129	endfunc
130
131#endif
132
133#if defined(__SPLIT__set_intclear) || \
134    defined(__SPLIT__set_intclear_nw)
135
136// void xthal_set_intclear(unsigned)
137//
138DECLFUNC(xthal_set_intclear)
139	abi_entry
140# if XCHAL_HAVE_INTERRUPTS
141	wsr.intclear	a2
142# endif
143	abi_return
144	endfunc
145
146
147
148//----------------------------------------------------------------------
149// Virtual PS.INTLEVEL support:
150// allows running C code at virtual PS.INTLEVEL > 0
151// using INTENABLE to simulate the masking that PS.INTLEVEL would do.
152//----------------------------------------------------------------------
153
154#endif
155
156#if defined(__SPLIT__get_vpri) ||\
157    defined(__SPLIT__get_vpri_nw)
158
159// unsigned xthal_get_vpri(void);
160
161DECLFUNC(xthal_get_vpri)
162	abi_entry
163# if XCHAL_HAVE_INTERRUPTS
164	movi	a2, Xthal_vpri_state
165	l8ui	a2, a2, XTHAL_VPRI_VPRI_OFS
166# else
167	movi	a2, 0	// no interrupts, report we're always at level 0
168# endif
169	abi_return
170	endfunc
171
172#endif
173
174#if defined(__SPLIT__set_vpri_nw)
175
176// unsigned xthal_set_vpri_nw(unsigned)
177//
178//  Must be called at PS.INTLEVEL <= 1.
179//  Doesn't touch the stack (doesn't reference a1 at all).
180//  Normally, PS should be restored with a6 after return from this call
181//  (it isn't restored automatically because some exception handlers
182//   want to keep ints locked for a while).
183//
184//  On entry:
185//	a2 = new virtual interrupt priority (0x00 .. 0x1F)
186//	a3-a6 = undefined
187//	PS.INTLEVEL <= 1
188//  On exit:
189//	a2 = previous virtual interrupt priority (0x0F .. 0x1F, or 0 if no interrupts)
190//	a3-a5 = clobbered
191//	a6 = PS as it was on entry
192//	PS.INTLEVEL = 1
193//	!!!!!!!!! PS.WOE = 0 (but not if there are no interrupts; is this really needed???)
194//	INTENABLE = updated according to new vpri
195
196_SYM(xthal_set_vpri_nw)
197
198# if XCHAL_HAVE_INTERRUPTS
199	/*  Make sure a2 is in the range 0x0F .. 0x1F:  */
200	movi	a3, 0x1F	// highest legal virtual interrupt priority
201	sub	a4, a2, a3	// (a4 = newlevel - maxlevel)
202	movgez	a2, a3, a4	// newlevel = maxlevel if (newlevel - maxlevel) >= 0
203	movi	a3, 15		// lowest legal virtual interrupt priority
204	sub	a4, a2, a3	// (a4 = newlevel - 15)
205	movltz	a2, a3, a4	// newlevel = 15 if newlevel < 15
206
207xthal_set_vpri_nw_common:
208	movi	a4, Xthal_vpri_state	// address of vpri state structure
209
210	/*
211	 *  Lockout interrupts for exclusive access to virtual priority structure
212	 *  while we examine and modify it.
213	 *  Note that we accessed a4 and don't access any further than a6,
214	 *  so we won't cause any spills, so we could leave WOE enabled (if it is),
215	 *  but we clear it because that might be what the caller wants,
216	 *  and is cleaner.
217	 */
218	//  Get PS and mask off INTLEVEL:
219	rsil	a6, 1		// save a6 = PS, set PS.INTLEVEL = 1
220
221	//  Clear PS.WOE.  (Can we get rid of this?!!!!!):
222	movi	a3, ~0x00040000	// mask to...
223	rsr.ps	a5		// get and save a6 = PS
224//a2,a3,a4,a5,a6
225	and	a5, a5, a3	// ... clear a5.WOE
226	wsr.ps	a5		// clear PS.WOE
227	rsync
228
229//a2,a4,a6
230	/*  Get mask of interrupts to be turned off at requested level:  */
231	l32i	a5, a4, XTHAL_VPRI_ENABLED_OFS		// get the global mask
232	addx4	a3, a2, a4	// a3 = a4 + a2*4  (index into enablemap[] array)
233//a2,a3,a4,a5,a6
234	l32i	a3, a3, XTHAL_VPRI_ENABLEMAP_OFS	// get the per-level mask
235	and	a3, a5, a3	// new INTENABLE value according to new intlevel
236	wsr.intenable	a3	// set it!
237//a2,a4,a6
238
239	l8ui	a5, a4, XTHAL_VPRI_VPRI_OFS	// previous virtual priority
240	s8i	a2, a4, XTHAL_VPRI_VPRI_OFS	// new virtual priority
241
242	//  Let the caller restore PS:
243	//wsr.ps	a6			// restore PS.INTLEVEL
244	//rsync
245
246	mov	a2, a5		// return previous virtual intlevel
247
248# else /* ! XCHAL_HAVE_INTERRUPTS */
249xthal_set_vpri_nw_common:
250#  if XCHAL_HAVE_EXCEPTIONS
251	rsr.ps	a6	// return PS for caller to restore
252#  else
253	movi	a6, 0
254#  endif
255	movi	a2, 0	// no interrupts, report we're always at virtual priority 0
256# endif /* XCHAL_HAVE_INTERRUPTS */
257	ret
258	endfunc
259
260
261
262// unsigned xthal_set_vpri_intlevel_nw(unsigned);
263//
264//  Same as xthal_set_vpri_nw() except that it accepts
265//  an interrupt level rather than a virtual interrupt priority.
266//  This just converts intlevel to vpri and jumps to xthal_set_vpri_nw.
267
268_SYM(xthal_set_vpri_intlevel_nw)
269# if XCHAL_HAVE_INTERRUPTS
270	movi	a3, 0x10
271	movnez	a2, a3, a2	// a2 = (a2 ? 0x10 : 0)
272	addi	a2, a2, 0x0F	// a2 += 0x0F
273# endif
274	j	xthal_set_vpri_nw_common	// set vpri to a2
275	endfunc
276
277
278
279#endif
280
281#if defined(__SPLIT__set_vpri)
282
283// unsigned  xthal_set_vpri (unsigned newvpri);
284//
285//  Normal windowed call (PS.INTLEVEL=0 and PS.WOE=1 on entry and exit).
286//  (PS.UM = 0 or 1)
287//
288//  Returns previous virtual interrupt priority
289//  (0x0F .. 0x1F, or 0 if no interrupts).
290//
291//  On entry:
292//	a2 = new virtual interrupt priority (0x00 .. 0x1F)
293//  On exit:
294//	a2 = previous vpri
295//	INTENABLE = updated according to new vpri
296
297DECLFUNC(xthal_set_vpri)
298	abi_entry
299# if XCHAL_HAVE_INTERRUPTS
300	/*  Make sure a2 is in the range 0x0F .. 0x1F:  */
301	movi	a3, 0x1F	// highest legal virtual interrupt priority
302	sub	a4, a2, a3	// (a4 = newlevel - maxlevel)
303	movgez	a2, a3, a4	// newlevel = maxlevel if (newlevel - maxlevel) >= 0
304	movi	a3, 15		// lowest legal virtual interrupt priority
305	sub	a4, a2, a3	// (a4 = newlevel - 15)
306	movltz	a2, a3, a4	// newlevel = 15 if newlevel < 15
307
308xthal_set_vpri_common1:
309	movi	a4, Xthal_vpri_state	// address of vpri state structure
310
311	/*
312	 *  Lockout interrupts for exclusive access to virtual priority structure
313	 *  while we examine and modify it.
314	 *  Note that we accessed a4 and don't access any further than a6,
315	 *  so we won't cause any spills, so we can leave WOE enabled.
316	 */
317	//  Get PS and mask off INTLEVEL:
318	rsil	a6, 1		// save a6 = PS, set PS.INTLEVEL = 1
319
320	l8ui	a7, a4, XTHAL_VPRI_VPRI_OFS	// previous virtual priority (vpri)
321
322	/*  Get mask of interrupts to be turned off at requested level:  */
323	l32i	a5, a4, XTHAL_VPRI_ENABLED_OFS		// get the global mask
324	addx4	a3, a2, a4	// a3 = a4 + a2*4  (index into enablemap[] array)
325	l32i	a3, a3, XTHAL_VPRI_ENABLEMAP_OFS	// get the per-level mask
326	s8i	a2, a4, XTHAL_VPRI_VPRI_OFS	// new virtual priority (in load-slot)
327	and	a3, a5, a3	// new INTENABLE value according to new intlevel
328	wsr.intenable	a3	// set it!
329
330	wsr.ps	a6		// restore PS.INTLEVEL
331	rsync
332
333	mov	a2, a7		// return previous vpri
334
335# else /* ! XCHAL_HAVE_INTERRUPTS */
336	movi	a2, 0	// no interrupts, report we're always at virtual priority 0
337# endif /* XCHAL_HAVE_INTERRUPTS */
338	abi_return
339	endfunc
340
341
342
343// unsigned  xthal_set_vpri_intlevel (unsigned intlevel);
344//
345//  Equivalent to xthal_set_vpri(XTHAL_VPRI(intlevel,0xF)).
346//  This just converts intlevel to vpri and jumps inside xthal_set_vpri.
347
348DECLFUNC(xthal_set_vpri_intlevel)
349	abi_entry
350# if XCHAL_HAVE_INTERRUPTS
351	movi	a3, 0x10
352	movnez	a2, a3, a2	// a2 = (a2 ? 0x10 : 0)
353	addi	a2, a2, 0x0F	// a2 += 0x0F
354	j	xthal_set_vpri_common1	// set vpri to a2
355# else
356	movi	a2, 0	// no interrupts, report we're always at virtual priority 0
357	abi_return
358# endif
359	endfunc
360
361
362
363// unsigned  xthal_set_vpri_lock (void);
364//
365//  Equivalent to xthal_set_vpri(0x1F);
366//  Returns previous virtual interrupt priority.
367//
368DECLFUNC(xthal_set_vpri_lock)
369	abi_entry
370# if XCHAL_HAVE_INTERRUPTS
371	movi	a2, 0x1F		// lock at intlevel 1
372	j	xthal_set_vpri_common1
373# else
374	movi	a2, 0	// no interrupts, report we're always at virtual priority 0
375	abi_return
376# endif
377	endfunc
378
379
380#endif
381
382#if defined(__SPLIT__get_intpending_nw)
383
384// unsigned xthal_get_intpending_nw(void)
385//
386//  Of the pending level-1 interrupts, returns
387//  the bitmask of interrupts at the highest software priority,
388//  and the index of the first of these.
389//  It also disables interrupts of that software priority and lower
390//  via INTENABLE.
391//
392//	On entry:
393//		a0 = return PC
394//		a1 = sp
395//		a2-a6 = (available) (undefined)
396//		PS.INTLEVEL = 1
397//		PS.WOE = 0
398//	On exit:
399//		a0 = return PC
400//		a1 = sp (NOTE: stack is untouched, a1 is never referenced)
401//		a2 = index of first highest-soft-pri pending l1 interrupt (0..31), or -1 if none
402//		a3 = bitmask of highest-soft-pri pending l1 interrupts (0 if none) (may be deprecated)
403//		a4 = (clobbered)
404//		a5 = new vpri (not typically used by caller? so might get deprecated...?)
405//		a6 = old vpri (eg. to be saved as part of interrupt context's state)
406//		INTENABLE = updated according to new vpri
407//		INTERRUPT bit cleared for interrupt returned in a2 (if any), if software or edge-triggered or write-error
408//		all others = preserved
409
410_SYM(xthal_get_intpending_nw)
411# if XCHAL_HAVE_INTERRUPTS
412	// Give us one more register to play with
413	//wsr.excsave1	a4
414
415	// Figure out which interrupt to process
416
417	/*
418	Perform a binary search to find a mask of the interrupts that are
419	ready at the highest virtual priority level.
420	Xthal_vpri_resolvemap is a binary tree implemented within an array,
421	sorted by priority: each node contains the set of interrupts in
422	the range of priorities corresponding to the right half of its branch.
423	The mask of enabled & pending interrupts is compared with each node to
424	determine in which subbranch (left or right) the highest priority one is
425	present.  After 4 such masks and comparisons (for 16 priorities), we have
426	determined the priority of the highest priority enabled&pending interrupt.
427
428	Table entries for intlevel 'i' are bitmasks defined as follows (map=Xthal_vpri_resolvemap[i-1]):
429	    map[8+(x=0)]          = ints at pri x + 8..15 (8-15)
430	    map[4+(x=0,8)]        = ints at pri x + 4..7  (4-7,12-15)
431	    map[2+(x=0,4,8,12)]   = ints at pri x + 2..3  (2-3,6-7,10-11,14-15)
432	    map[1+(x=0,2..12,14)] = ints at pri x + 1     (1,3,5,7,9,11,13,15)
433	    map[0]                = 0  (unused; for alignment)
434	*/
435
436	rsr.interrupt	a4	// a4 = mask of interrupts pending, including those disabled
437	rsr.intenable	a2	// a2 = mask of interrupts enabled
438	movi	a3, Xthal_vpri_state
439	and	a4, a2, a4	// a4 = mask of enabled interrupts pending
440	beqz	a4, gipfail	// if none (can happen for spurious level-triggered interrupts,
441				//  or ???), we're done
442
443	mov	a5, a3
444	l32i	a2, a5, XTHAL_VPRI_RESOLVEMAP_OFS+8*4
445	bnone	a2, a4, 1f
446	addi	a5, a5, 8*4
4471:	l32i	a2, a5, XTHAL_VPRI_RESOLVEMAP_OFS+4*4
448	bnone	a2, a4, 1f
449	addi	a5, a5, 4*4
4501:	l32i	a2, a5, XTHAL_VPRI_RESOLVEMAP_OFS+2*4
451	bnone	a2, a4, 1f
452	addi	a5, a5, 2*4
4531:	l32i	a2, a5, XTHAL_VPRI_RESOLVEMAP_OFS+1*4
454	bnone	a2, a4, 1f
455	addi	a5, a5, 1*4
4561:
457
458#  if 0
459	a5 = address of map ...
460	l32i	a2, a5, XTHAL_VPRI_RESOLVEMAP_OFS+8*4
461	addi	a?, a5, 8*4
462	and	a2, a2, a4
463	movnez	a5, a?, a2
464	l32i	a2, a5, XTHAL_VPRI_RESOLVEMAP_OFS+4*4
465	addi	a?, a5, 4*4
466	and	a2, a2, a4
467	movnez	a5, a?, a2
468	l32i	a2, a5, XTHAL_VPRI_RESOLVEMAP_OFS+2*4
469	addi	a?, a5, 2*4
470	and	a2, a2, a4
471	movnez	a5, a?, a2
472	l32i	a2, a5, XTHAL_VPRI_RESOLVEMAP_OFS+1*4
473	addi	a?, a5, 1*4
474	and	a2, a2, a4
475	movnez	a5, a?, a2
476#  endif
477
478	//  Here:
479	//	a3 = Xthal_vpri_state
480	//	a5 = Xthal_vpri_state + softpri*4
481	//	a4 = mask of enabled interrupts pending
482	//	a2,a6 = available
483
484	//  Lock interrupts during virtual priority data structure transaction:
485	//rsil	a6, 1			// set PS.INTLEVEL = 1 (a6 ignored)
486	//	a2,a6 = available
487
488	//  The highest priority interrupt(s) in a4 is at softpri = (a5-a3) / 4.
489	//  So interrupts in enablemap[1][softpri] are not in a4 (they are higher priority).
490	//  The set of interrupts at softpri are:
491	//	enablemap[1][softpri-1] - enablemap[1][softpri]
492	//  So and'ing a4 with enablemap[1][softpri - 1] will give us
493	//  the set of interrupts pending at the highest soft priority.
494	//
495	l32i	a2, a5, XTHAL_VPRI_ENABLEMAP_OFS + 16*4 - 4	// get enablemap[1][softpri-1]
496	and	a4, a2, a4		// only keep interrupts of highest pri (softpri)
497
498	//  a4 now has mask of pending interrupts at highest ready level (new vpri)
499
500	//  Update INTENABLE for this new virtual priority
501	l32i	a2, a5, XTHAL_VPRI_ENABLEMAP_OFS + 16*4	// get vpri-specific mask = enablemap[1][softpri]
502	l32i	a6, a3, XTHAL_VPRI_ENABLED_OFS		// get global mask
503	sub	a5, a5, a3		// a5 = softpri * 4 (for below; here for efficiency)
504	and	a2, a2, a6				// and together
505	wsr.intenable	a2		// disable interrupts at or below new vpri
506	//	a2,a6 = available
507
508	//  Update new virtual priority:
509	l8ui	a6, a3, XTHAL_VPRI_VPRI_OFS		// get old vpri (returned)
510	srli	a5, a5, 2		// a5 = softpri  (0..15)
511	addi	a5, a5, 0x10		// a5 = 0x10 + softpri = new virtual priority
512	s8i	a5, a3, XTHAL_VPRI_VPRI_OFS		// store new vpri (returned)
513
514	//  Undo the temporary lock (if was at PS.INTLEVEL > 1):
515	//rsil	a2, 1
516
517	mov	a3, a4		// save for the caller (in case it wants it?)
518
519	//  Choose one of the set of highest-vpri pending interrupts to process.
520	//  For speed (and simplicity), use this simple two-instruction sequence
521	//  to select the least significant bit set in a4.  This implies that
522	//  interrupts with a lower interrupt number take precedence over those
523	//  with a higher interrupt number (!!).
524	//
525	neg	a2, a4		// keep only the least-significant bit that is set...
526	and	a4, a2, a4	// ... in a4
527
528	//  Software, edge-triggered, and write-error interrupts are cleared by writing to the
529	//  INTCLEAR pseudo-reg (to clear relevant bits of the INTERRUPT register).
530	//  To simplify interrupt handlers (so they avoid tracking which type of
531	//  interrupt they handle and act accordingly), clear such interrupts here.
532	//  To avoid race conditions, the clearing must occur *after* we undertake
533	//  to process the interrupt, and *before* actually handling the interrupt.
534	//  Interrupt handlers may additionally clear the interrupt themselves
535	//  at appropriate points if needed to avoid unnecessary interrupts.
536	//
537#define CLEARABLE_INTLEVEL1_MASK	(XCHAL_INTLEVEL1_MASK & XCHAL_INTCLEARABLE_MASK)
538#  if CLEARABLE_INTLEVEL1_MASK != 0
539	//movi	a2, CLEARABLE_INTLEVEL1_MASK
540	//and	a2, a2, a4
541	//wsr.intclear	a2
542	wsr.intclear	a4	// no effect if a4 not a software or edge-triggered or write-error interrupt
543#  endif
544
545	//  Convert the single-bit interrupt mask to an interrupt number.
546	//  (ie. compute log2 using either the NSAU instruction or a binary search)
547
548	find_ms_setbit	a2, a4, a2, 0	// set a2 to index of lsbit set in a4 (0..31)
549				// NOTE: assumes a4 != 0 (otherwise a2 is undefined[?])
550
551	//	a2 has vector number (0..31)
552
553	//rsr.excsave1	a4
554	ret
555
556gipfail:
557	l8ui	a6, a3, XTHAL_VPRI_VPRI_OFS		// get old vpri
558	mov	a5, a6					// is also new vpri (unchanged)
559# else /* XCHAL_HAVE_INTERRUPTS */
560	//  No interrupts configured!
561	movi	a5, 0		// return zero new vpri
562	movi	a6, 0		// return zero old vpri
563# endif /* XCHAL_HAVE_INTERRUPTS */
564	movi	a2, -1		// return bogus vector number (eg. can be quickly tested for negative)
565	movi	a3, 0		// return zero bitmask of interrupts pending
566	ret
567	endfunc
568
569// -----------------------------------------------------------------
570
571#endif
572
573#if defined(__SPLIT__vpri_lock) || \
574    defined(__SPLIT__vpri_lock_nw)
575
576// void xthal_vpri_lock()
577//
578// Used internally by the Core HAL to block interrupts of higher or equal
579// priority than Xthal_vpri_locklevel during virtual interrupt operations.
580//
581DECLFUNC(xthal_vpri_lock)
582	abi_entry
583# if XCHAL_HAVE_INTERRUPTS
584	rsil	a6, 1				// save a6 = PS, set PS.INTLEVEL = 1
585
586	//     if( Xthal_vpri_level < Xthal_vpri_locklevel )
587	//
588	movi	a2, Xthal_vpri_state		// a2 := address of global var. Xthal_vpri_state
589	//interlock
590	l8ui	a3, a2, XTHAL_VPRI_VPRI_OFS	// a3 := Xthal_vpri_level == Xthal_vpri_state.vpri
591	l8ui	a5, a2, XTHAL_VPRI_LOCKLEVEL_OFS  // a5 := Xthal_vpri_locklevel
592	l32i	a4, a2, XTHAL_VPRI_ENABLED_OFS	// a4 := Xthal_vpri_enabled
593	bgeu	a3, a5, xthal_vpri_lock_done
594
595	//  xthal_set_intenable( Xthal_vpri_enablemap[0][Xthal_vpri_locklevel] & Xthal_vpri_enabled );
596	//
597	addx4	a3, a5, a2			// a3 := a2 + a5*4  (index into enablemap[] array)
598	l32i	a3, a3, XTHAL_VPRI_ENABLEMAP_OFS // a3 := Xthal_vpri_enablemap[0][Xthal_vpri_locklevel]
599	//interlock
600	and	a2, a4, a3
601  	wsr.intenable	a2
602
603xthal_vpri_lock_done:
604	wsr.ps	a6				// restore PS.INTLEVEL
605	rsync
606# endif
607	abi_return
608	endfunc
609
610#endif
611
612#if defined(__SPLIT__vpri_unlock) || \
613    defined(__SPLIT__vpri_unlock_nw)
614
615// void xthal_vpri_unlock(void)
616//
617// Enable interrupts according to the current virtual interrupt priority.
618// This effectively "unlocks" interrupts disabled by xthal_vpri_lock()
619// (assuming the virtual interrupt priority hasn't changed).
620//
621DECLFUNC(xthal_vpri_unlock)
622	abi_entry
623# if XCHAL_HAVE_INTERRUPTS
624        //
625	//  This should be free of race-conditions.
626	//
627        //  xthal_set_intenable( Xthal_vpri_enablemap[0][Xthal_vpri_level] & Xthal_vpri_enabled );
628	//
629	movi	a2, Xthal_vpri_state		 // a2 := address of global var. Xthal_vpri_state
630	//interlock
631	l8ui	a3, a2, XTHAL_VPRI_VPRI_OFS      // a3 := Xthal_vpri_level == Xthal_vpri_state.vpri
632	l32i	a4, a2, XTHAL_VPRI_ENABLED_OFS	 // a4 := Xthal_vpri_enabled
633	addx4	a3, a3, a2                       // a3 := a2 + a3*4  (index into enablemap[] array)
634	l32i	a3, a3, XTHAL_VPRI_ENABLEMAP_OFS // a3 := Xthal_vpri_enablemap[0][Xthal_vpri_level]
635	//interlock
636	and	a2, a4, a3
637  	wsr.intenable	a2
638# endif
639	abi_return
640	endfunc
641
642#endif /*SPLIT*/
643
644