1 /* swap_macros.h - helper macros for context switch */
2 
3 /*
4  * Copyright (c) 2014 Wind River Systems, Inc.
5  *
6  * SPDX-License-Identifier: Apache-2.0
7  */
8 
9 #ifndef ZEPHYR_ARCH_ARC_INCLUDE_SWAP_MACROS_H_
10 #define ZEPHYR_ARCH_ARC_INCLUDE_SWAP_MACROS_H_
11 
12 #include <zephyr/kernel_structs.h>
13 #include <offsets_short.h>
14 #include <zephyr/toolchain.h>
15 #include <zephyr/arch/cpu.h>
16 #include <zephyr/arch/arc/tool-compat.h>
17 #include <zephyr/arch/arc/asm-compat/assembler.h>
18 #include <zephyr/kernel.h>
19 #include "../core/dsp/swap_dsp_macros.h"
20 
21 #ifdef _ASMLANGUAGE
22 
23 /* save callee regs of current thread in r2 */
24 .macro _save_callee_saved_regs
25 
26 	SUBR sp, sp, ___callee_saved_stack_t_SIZEOF
27 
28 	/* save regs on stack */
29 	STR r13, sp, ___callee_saved_stack_t_r13_OFFSET
30 	STR r14, sp, ___callee_saved_stack_t_r14_OFFSET
31 	STR r15, sp, ___callee_saved_stack_t_r15_OFFSET
32 	STR r16, sp, ___callee_saved_stack_t_r16_OFFSET
33 	STR r17, sp, ___callee_saved_stack_t_r17_OFFSET
34 	STR r18, sp, ___callee_saved_stack_t_r18_OFFSET
35 	STR r19, sp, ___callee_saved_stack_t_r19_OFFSET
36 	STR r20, sp, ___callee_saved_stack_t_r20_OFFSET
37 	STR r21, sp, ___callee_saved_stack_t_r21_OFFSET
38 	STR r22, sp, ___callee_saved_stack_t_r22_OFFSET
39 	STR r23, sp, ___callee_saved_stack_t_r23_OFFSET
40 	STR r24, sp, ___callee_saved_stack_t_r24_OFFSET
41 	STR r25, sp, ___callee_saved_stack_t_r25_OFFSET
42 	STR r26, sp, ___callee_saved_stack_t_r26_OFFSET
43 	STR fp,  sp, ___callee_saved_stack_t_fp_OFFSET
44 
45 #ifdef CONFIG_USERSPACE
46 #ifdef CONFIG_ARC_HAS_SECURE
47 #ifdef CONFIG_ARC_SECURE_FIRMWARE
48 	lr r13, [_ARC_V2_SEC_U_SP]
49 	st_s r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
50 	lr r13, [_ARC_V2_SEC_K_SP]
51 	st_s r13, [sp, ___callee_saved_stack_t_kernel_sp_OFFSET]
52 #else
53 	lr r13, [_ARC_V2_USER_SP]
54 	st_s r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
55 	lr r13, [_ARC_V2_KERNEL_SP]
56 	st_s r13, [sp, ___callee_saved_stack_t_kernel_sp_OFFSET]
57 #endif /* CONFIG_ARC_SECURE_FIRMWARE */
58 #else
59 	lr r13, [_ARC_V2_USER_SP]
60 	st_s r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
61 #endif
62 #endif
63 	STR r30, sp, ___callee_saved_stack_t_r30_OFFSET
64 
65 #ifdef CONFIG_ARC_HAS_ACCL_REGS
66 	STR r58, sp, ___callee_saved_stack_t_r58_OFFSET
67 #ifndef CONFIG_64BIT
68 	STR r59, sp, ___callee_saved_stack_t_r59_OFFSET
69 #endif /* !CONFIG_64BIT */
70 #endif
71 
72 #ifdef CONFIG_FPU_SHARING
73 	ld_s r13, [r2, ___thread_base_t_user_options_OFFSET]
74 	bbit0 r13, K_FP_IDX, fpu_skip_save
75 	lr r13, [_ARC_V2_FPU_STATUS]
76 	st_s r13, [sp, ___callee_saved_stack_t_fpu_status_OFFSET]
77 	lr r13, [_ARC_V2_FPU_CTRL]
78 	st_s r13, [sp, ___callee_saved_stack_t_fpu_ctrl_OFFSET]
79 
80 #ifdef CONFIG_FP_FPU_DA
81 	lr r13, [_ARC_V2_FPU_DPFP1L]
82 	st_s r13, [sp, ___callee_saved_stack_t_dpfp1l_OFFSET]
83 	lr r13, [_ARC_V2_FPU_DPFP1H]
84 	st_s r13, [sp, ___callee_saved_stack_t_dpfp1h_OFFSET]
85 	lr r13, [_ARC_V2_FPU_DPFP2L]
86 	st_s r13, [sp, ___callee_saved_stack_t_dpfp2l_OFFSET]
87 	lr r13, [_ARC_V2_FPU_DPFP2H]
88 	st_s r13, [sp, ___callee_saved_stack_t_dpfp2h_OFFSET]
89 #endif
90 #endif
91 fpu_skip_save :
92 	_save_dsp_regs
93 	/* save stack pointer in struct k_thread */
94 	STR sp, r2, _thread_offset_to_sp
95 .endm
96 
97 /* load the callee regs of thread (in r2)*/
98 .macro _load_callee_saved_regs
99 	/* restore stack pointer from struct k_thread */
100 	LDR sp, r2, _thread_offset_to_sp
101 
102 #ifdef CONFIG_ARC_HAS_ACCL_REGS
103 	LDR r58, sp, ___callee_saved_stack_t_r58_OFFSET
104 #ifndef CONFIG_64BIT
105 	LDR r59, sp, ___callee_saved_stack_t_r59_OFFSET
106 #endif /* !CONFIG_64BIT */
107 #endif
108 
109 #ifdef CONFIG_FPU_SHARING
110 	ld_s r13, [r2, ___thread_base_t_user_options_OFFSET]
111 	bbit0 r13, K_FP_IDX, fpu_skip_load
112 
113 	ld_s r13, [sp, ___callee_saved_stack_t_fpu_status_OFFSET]
114 	sr r13, [_ARC_V2_FPU_STATUS]
115 	ld_s r13, [sp, ___callee_saved_stack_t_fpu_ctrl_OFFSET]
116 	sr r13, [_ARC_V2_FPU_CTRL]
117 
118 #ifdef CONFIG_FP_FPU_DA
119 	ld_s r13, [sp, ___callee_saved_stack_t_dpfp1l_OFFSET]
120 	sr r13, [_ARC_V2_FPU_DPFP1L]
121 	ld_s r13, [sp, ___callee_saved_stack_t_dpfp1h_OFFSET]
122 	sr r13, [_ARC_V2_FPU_DPFP1H]
123 	ld_s r13, [sp, ___callee_saved_stack_t_dpfp2l_OFFSET]
124 	sr r13, [_ARC_V2_FPU_DPFP2L]
125 	ld_s r13, [sp, ___callee_saved_stack_t_dpfp2h_OFFSET]
126 	sr r13, [_ARC_V2_FPU_DPFP2H]
127 #endif
128 #endif
129 fpu_skip_load :
130 	_load_dsp_regs
131 #ifdef CONFIG_USERSPACE
132 #ifdef CONFIG_ARC_HAS_SECURE
133 #ifdef CONFIG_ARC_SECURE_FIRMWARE
134 	ld_s r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
135 	sr r13, [_ARC_V2_SEC_U_SP]
136 	ld_s r13, [sp, ___callee_saved_stack_t_kernel_sp_OFFSET]
137 	sr r13, [_ARC_V2_SEC_K_SP]
138 #else
139 	ld_s r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
140 	sr r13, [_ARC_V2_USER_SP]
141 	ld_s r13, [sp, ___callee_saved_stack_t_kernel_sp_OFFSET]
142 	sr r13, [_ARC_V2_KERNEL_SP]
143 #endif /* CONFIG_ARC_SECURE_FIRMWARE */
144 #else
145 	ld_s r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
146 	sr r13, [_ARC_V2_USER_SP]
147 #endif
148 #endif
149 
150 	LDR r13, sp, ___callee_saved_stack_t_r13_OFFSET
151 	LDR r14, sp, ___callee_saved_stack_t_r14_OFFSET
152 	LDR r15, sp, ___callee_saved_stack_t_r15_OFFSET
153 	LDR r16, sp, ___callee_saved_stack_t_r16_OFFSET
154 	LDR r17, sp, ___callee_saved_stack_t_r17_OFFSET
155 	LDR r18, sp, ___callee_saved_stack_t_r18_OFFSET
156 	LDR r19, sp, ___callee_saved_stack_t_r19_OFFSET
157 	LDR r20, sp, ___callee_saved_stack_t_r20_OFFSET
158 	LDR r21, sp, ___callee_saved_stack_t_r21_OFFSET
159 	LDR r22, sp, ___callee_saved_stack_t_r22_OFFSET
160 	LDR r23, sp, ___callee_saved_stack_t_r23_OFFSET
161 	LDR r24, sp, ___callee_saved_stack_t_r24_OFFSET
162 	LDR r25, sp, ___callee_saved_stack_t_r25_OFFSET
163 	LDR r26, sp, ___callee_saved_stack_t_r26_OFFSET
164 	LDR fp,  sp, ___callee_saved_stack_t_fp_OFFSET
165 	LDR r30, sp, ___callee_saved_stack_t_r30_OFFSET
166 
167 	ADDR sp, sp, ___callee_saved_stack_t_SIZEOF
168 
169 .endm
170 
171 /* discard callee regs */
172 .macro _discard_callee_saved_regs
173 	ADDR sp, sp, ___callee_saved_stack_t_SIZEOF
174 .endm
175 
176 /*
177  * Must be called with interrupts locked or in P0.
178  * Upon exit, sp will be pointing to the stack frame.
179  */
180 .macro _create_irq_stack_frame
181 
182 	SUBR sp, sp, ___isf_t_SIZEOF
183 
184 	STR blink, sp, ___isf_t_blink_OFFSET
185 
186 	/* store these right away so we can use them if needed */
187 
188 	STR r13, sp, ___isf_t_r13_OFFSET
189 	STR r12, sp, ___isf_t_r12_OFFSET
190 	STR r11, sp, ___isf_t_r11_OFFSET
191 	STR r10, sp, ___isf_t_r10_OFFSET
192 	STR r9,  sp, ___isf_t_r9_OFFSET
193 	STR r8,  sp, ___isf_t_r8_OFFSET
194 	STR r7,  sp, ___isf_t_r7_OFFSET
195 	STR r6,  sp, ___isf_t_r6_OFFSET
196 	STR r5,  sp, ___isf_t_r5_OFFSET
197 	STR r4,  sp, ___isf_t_r4_OFFSET
198 	STR r3,  sp, ___isf_t_r3_OFFSET
199 	STR r2,  sp, ___isf_t_r2_OFFSET
200 	STR r1,  sp, ___isf_t_r1_OFFSET
201 	STR r0,  sp, ___isf_t_r0_OFFSET
202 
203 #ifdef CONFIG_ARC_HAS_ZOL
204 	MOVR r0, lp_count
205 	STR r0, sp, ___isf_t_lp_count_OFFSET
206 	LRR r1, [_ARC_V2_LP_START]
207 	LRR r0, [_ARC_V2_LP_END]
208 	STR r1, sp, ___isf_t_lp_start_OFFSET
209 	STR r0, sp, ___isf_t_lp_end_OFFSET
210 #endif /* CONFIG_ARC_HAS_ZOL */
211 
212 #ifdef CONFIG_CODE_DENSITY
213 	lr r1, [_ARC_V2_JLI_BASE]
214 	lr r0, [_ARC_V2_LDI_BASE]
215 	lr r2, [_ARC_V2_EI_BASE]
216 	st_s r1, [sp, ___isf_t_jli_base_OFFSET]
217 	st_s r0, [sp, ___isf_t_ldi_base_OFFSET]
218 	st_s r2, [sp, ___isf_t_ei_base_OFFSET]
219 #endif
220 
221 .endm
222 
223 /*
224  * Must be called with interrupts locked or in P0.
225  * sp must be pointing the to stack frame.
226  */
227 .macro _pop_irq_stack_frame
228 
229 	LDR blink, sp, ___isf_t_blink_OFFSET
230 
231 #ifdef CONFIG_CODE_DENSITY
232 	ld_s r1, [sp, ___isf_t_jli_base_OFFSET]
233 	ld_s r0, [sp, ___isf_t_ldi_base_OFFSET]
234 	ld_s r2, [sp, ___isf_t_ei_base_OFFSET]
235 	sr r1, [_ARC_V2_JLI_BASE]
236 	sr r0, [_ARC_V2_LDI_BASE]
237 	sr r2, [_ARC_V2_EI_BASE]
238 #endif
239 
240 #ifdef CONFIG_ARC_HAS_ZOL
241 	LDR r0, sp, ___isf_t_lp_count_OFFSET
242 	MOVR lp_count, r0
243 	LDR r1, sp, ___isf_t_lp_start_OFFSET
244 	LDR r0, sp, ___isf_t_lp_end_OFFSET
245 	SRR r1, [_ARC_V2_LP_START]
246 	SRR r0, [_ARC_V2_LP_END]
247 #endif /* CONFIG_ARC_HAS_ZOL */
248 
249 	LDR r13, sp, ___isf_t_r13_OFFSET
250 	LDR r12, sp, ___isf_t_r12_OFFSET
251 	LDR r11, sp, ___isf_t_r11_OFFSET
252 	LDR r10, sp, ___isf_t_r10_OFFSET
253 	LDR r9,  sp, ___isf_t_r9_OFFSET
254 	LDR r8,  sp, ___isf_t_r8_OFFSET
255 	LDR r7,  sp, ___isf_t_r7_OFFSET
256 	LDR r6,  sp, ___isf_t_r6_OFFSET
257 	LDR r5,  sp, ___isf_t_r5_OFFSET
258 	LDR r4,  sp, ___isf_t_r4_OFFSET
259 	LDR r3,  sp, ___isf_t_r3_OFFSET
260 	LDR r2,  sp, ___isf_t_r2_OFFSET
261 	LDR r1,  sp, ___isf_t_r1_OFFSET
262 	LDR r0,  sp, ___isf_t_r0_OFFSET
263 
264 
265 	/*
266 	 * All gprs have been reloaded, the only one that is still usable is
267 	 * ilink.
268 	 *
269 	 * The pc and status32 values will still be on the stack. We cannot
270 	 * pop them yet because the callers of _pop_irq_stack_frame must reload
271 	 * status32 differently depending on the execution context they are
272 	 * running in (arch_switch(), firq or exception).
273 	 */
274 	ADDR sp, sp, ___isf_t_SIZEOF
275 
276 .endm
277 
278 /*
279  * To use this macro, r2 should have the value of thread struct pointer to
280  * _kernel.current. r3 is a scratch reg.
281  */
282 .macro _load_stack_check_regs
283 #if defined(CONFIG_ARC_SECURE_FIRMWARE)
284 	ld r3, [r2, _thread_offset_to_k_stack_base]
285 	sr r3, [_ARC_V2_S_KSTACK_BASE]
286 	ld r3, [r2, _thread_offset_to_k_stack_top]
287 	sr r3, [_ARC_V2_S_KSTACK_TOP]
288 #ifdef CONFIG_USERSPACE
289 	ld r3, [r2, _thread_offset_to_u_stack_base]
290 	sr r3, [_ARC_V2_S_USTACK_BASE]
291 	ld r3, [r2, _thread_offset_to_u_stack_top]
292 	sr r3, [_ARC_V2_S_USTACK_TOP]
293 #endif
294 #else /* CONFIG_ARC_HAS_SECURE */
295 	ld r3, [r2, _thread_offset_to_k_stack_base]
296 	sr r3, [_ARC_V2_KSTACK_BASE]
297 	ld r3, [r2, _thread_offset_to_k_stack_top]
298 	sr r3, [_ARC_V2_KSTACK_TOP]
299 #ifdef CONFIG_USERSPACE
300 	ld r3, [r2, _thread_offset_to_u_stack_base]
301 	sr r3, [_ARC_V2_USTACK_BASE]
302 	ld r3, [r2, _thread_offset_to_u_stack_top]
303 	sr r3, [_ARC_V2_USTACK_TOP]
304 #endif
305 #endif /* CONFIG_ARC_SECURE_FIRMWARE */
306 .endm
307 
308 /* check and increase the interrupt nest counter
309  * after increase, check whether nest counter == 1
310  * the result will be EQ bit of status32
311  * two temp regs are needed
312  */
313 .macro _check_and_inc_int_nest_counter, reg1, reg2
314 #ifdef CONFIG_SMP
315 	/* get pointer to _cpu_t of this CPU */
316 	_get_cpu_id MACRO_ARG(reg1)
317 	ASLR MACRO_ARG(reg1), MACRO_ARG(reg1), ARC_REGSHIFT
318 	LDR MACRO_ARG(reg1), MACRO_ARG(reg1), _curr_cpu
319 	/* _cpu_t.nested is 32 bit despite of platform bittnes */
320 	ld MACRO_ARG(reg2), [MACRO_ARG(reg1), ___cpu_t_nested_OFFSET]
321 #else
322 	MOVR MACRO_ARG(reg1), _kernel
323 	/* z_kernel.nested is 32 bit despite of platform bittnes */
324 	ld MACRO_ARG(reg2), [MACRO_ARG(reg1), _kernel_offset_to_nested]
325 #endif
326 	add MACRO_ARG(reg2), MACRO_ARG(reg2), 1
327 #ifdef CONFIG_SMP
328 	st MACRO_ARG(reg2), [MACRO_ARG(reg1), ___cpu_t_nested_OFFSET]
329 #else
330 	st MACRO_ARG(reg2), [MACRO_ARG(reg1), _kernel_offset_to_nested]
331 #endif
332 	cmp MACRO_ARG(reg2), 1
333 .endm
334 
335 /* decrease interrupt stack nest counter
336  * the counter > 0, interrupt stack is used, or
337  * not used
338  */
339 .macro _dec_int_nest_counter, reg1, reg2
340 #ifdef CONFIG_SMP
341 	/* get pointer to _cpu_t of this CPU */
342 	_get_cpu_id MACRO_ARG(reg1)
343 	ASLR MACRO_ARG(reg1), MACRO_ARG(reg1), ARC_REGSHIFT
344 	LDR MACRO_ARG(reg1), MACRO_ARG(reg1), _curr_cpu
345 	/* _cpu_t.nested is 32 bit despite of platform bittnes */
346 	ld MACRO_ARG(reg2), [MACRO_ARG(reg1), ___cpu_t_nested_OFFSET]
347 #else
348 	MOVR MACRO_ARG(reg1), _kernel
349 	/* z_kernel.nested is 32 bit despite of platform bittnes */
350 	ld MACRO_ARG(reg2), [MACRO_ARG(reg1), _kernel_offset_to_nested]
351 #endif
352 	sub MACRO_ARG(reg2), MACRO_ARG(reg2), 1
353 #ifdef CONFIG_SMP
354 	st MACRO_ARG(reg2), [MACRO_ARG(reg1), ___cpu_t_nested_OFFSET]
355 #else
356 	st MACRO_ARG(reg2), [MACRO_ARG(reg1), _kernel_offset_to_nested]
357 #endif
358 .endm
359 
360 /* If multi bits in IRQ_ACT are set, i.e. last bit != fist bit, it's
361  * in nest interrupt. The result will be EQ bit of status32
362  * need two temp reg to do this
363  */
364 .macro _check_nest_int_by_irq_act, reg1, reg2
365 	lr MACRO_ARG(reg1), [_ARC_V2_AUX_IRQ_ACT]
366 #ifdef CONFIG_ARC_SECURE_FIRMWARE
367 	and MACRO_ARG(reg1), MACRO_ARG(reg1), ((1 << ARC_N_IRQ_START_LEVEL) - 1)
368 #else
369 	and MACRO_ARG(reg1), MACRO_ARG(reg1), 0xffff
370 #endif
371 	ffs MACRO_ARG(reg2), MACRO_ARG(reg1)
372 	fls MACRO_ARG(reg1), MACRO_ARG(reg1)
373 	cmp MACRO_ARG(reg1), MACRO_ARG(reg2)
374 .endm
375 
376 
377 /* macro to get id of current cpu
378  * the result will be in reg (a reg)
379  */
380 .macro _get_cpu_id, reg
381 	LRR MACRO_ARG(reg), [_ARC_V2_IDENTITY]
382 	xbfu MACRO_ARG(reg), MACRO_ARG(reg), 0xe8
383 .endm
384 
385 /* macro to get the interrupt stack of current cpu
386  * the result will be in irq_sp (a reg)
387  */
388 .macro _get_curr_cpu_irq_stack, irq_sp
389 #ifdef CONFIG_SMP
390 	/* get pointer to _cpu_t of this CPU */
391 	_get_cpu_id MACRO_ARG(irq_sp)
392 	ASLR MACRO_ARG(irq_sp), MACRO_ARG(irq_sp), ARC_REGSHIFT
393 	LDR MACRO_ARG(irq_sp), MACRO_ARG(irq_sp), _curr_cpu
394 	/* get pointer to irq_stack itself */
395 	LDR MACRO_ARG(irq_sp), MACRO_ARG(irq_sp), ___cpu_t_irq_stack_OFFSET
396 #else
397 	MOVR MACRO_ARG(irq_sp), _kernel
398 	LDR MACRO_ARG(irq_sp), MACRO_ARG(irq_sp), _kernel_offset_to_irq_stack
399 #endif
400 .endm
401 
402 /* macro to push aux reg through reg */
403 .macro PUSHAX, reg, aux
404 	LRR MACRO_ARG(reg), [MACRO_ARG(aux)]
405 	PUSHR MACRO_ARG(reg)
406 .endm
407 
408 /* macro to pop aux reg through reg */
409 .macro POPAX, reg, aux
410 	POPR MACRO_ARG(reg)
411 	SRR MACRO_ARG(reg), [MACRO_ARG(aux)]
412 .endm
413 
414 
415 /* macro to store old thread call regs */
416 .macro _store_old_thread_callee_regs
417 
418 	_save_callee_saved_regs
419 	/* Save old thread into switch handle which is required by z_sched_switch_spin.
420 	 * NOTE: we shouldn't save anything related to old thread context after this point!
421 	 * TODO: we should add SMP write-after-write data memory barrier here, as we want all
422 	 * previous writes completed before setting switch_handle which is polled by other cores
423 	 * in z_sched_switch_spin in case of SMP. Though it's not likely that this issue
424 	 * will reproduce in real world as there is some gap before reading switch_handle and
425 	 * reading rest of the data we've stored before.
426 	 */
427 	STR r2, r2, ___thread_t_switch_handle_OFFSET
428 .endm
429 
430 /* macro to store old thread call regs  in interrupt*/
431 .macro _irq_store_old_thread_callee_regs
432 #if defined(CONFIG_USERSPACE)
433 /*
434  * when USERSPACE is enabled, according to ARCv2 ISA, SP will be switched
435  * if interrupt comes out in user mode, and will be recorded in bit 31
436  * (U bit) of IRQ_ACT. when interrupt exits, SP will be switched back
437  * according to U bit.
438  *
439  * need to remember the user/kernel status of interrupted thread, will be
440  * restored when thread switched back
441  *
442  */
443 	lr r1, [_ARC_V2_AUX_IRQ_ACT]
444 	and r3, r1, 0x80000000
445 	push_s r3
446 
447 	bclr r1, r1, 31
448 	sr r1, [_ARC_V2_AUX_IRQ_ACT]
449 #endif
450 	_store_old_thread_callee_regs
451 .endm
452 
453 /* macro to load new thread callee regs */
454 .macro _load_new_thread_callee_regs
455 #ifdef CONFIG_ARC_STACK_CHECKING
456 	_load_stack_check_regs
457 #endif
458 	/*
459 	 * _load_callee_saved_regs expects incoming thread in r2.
460 	 * _load_callee_saved_regs restores the stack pointer.
461 	 */
462 	_load_callee_saved_regs
463 
464 #if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
465 	push_s r2
466 	bl configure_mpu_thread
467 	pop_s r2
468 #endif
469 	/* _thread_arch.relinquish_cause is 32 bit despite of platform bittnes */
470 	ld r3, [r2, _thread_offset_to_relinquish_cause]
471 .endm
472 
473 
474 /* when switch to thread caused by coop, some status regs need to set */
475 .macro _set_misc_regs_irq_switch_from_coop
476 #ifdef CONFIG_ARC_SECURE_FIRMWARE
477 	/* must return to secure mode, so set IRM bit to 1 */
478 	lr r0, [_ARC_V2_SEC_STAT]
479 	bset r0, r0, _ARC_V2_SEC_STAT_IRM_BIT
480 	sflag r0
481 #endif
482 .endm
483 
484 /* when switch to thread caused by irq, some status regs need to set */
485 .macro _set_misc_regs_irq_switch_from_irq
486 #if defined(CONFIG_USERSPACE)
487 /*
488  * need to recover the user/kernel status of interrupted thread
489  */
490 	pop_s r3
491 	lr r2, [_ARC_V2_AUX_IRQ_ACT]
492 	or r2, r2, r3
493 	sr r2, [_ARC_V2_AUX_IRQ_ACT]
494 #endif
495 
496 #ifdef CONFIG_ARC_SECURE_FIRMWARE
497 	/* here need to recover SEC_STAT.IRM bit */
498 	pop_s r3
499 	sflag r3
500 #endif
501 .endm
502 
503 /* macro to get next switch handle in assembly */
504 .macro _get_next_switch_handle
505 	PUSHR r2
506 	MOVR r0, sp
507 	bl z_arch_get_next_switch_handle
508 	POPR  r2
509 .endm
510 
511 /* macro to disable stack checking in assembly, need a GPR
512  * to do this
513  */
514 .macro _disable_stack_checking, reg
515 #ifdef CONFIG_ARC_STACK_CHECKING
516 #ifdef CONFIG_ARC_SECURE_FIRMWARE
517 	lr MACRO_ARG(reg), [_ARC_V2_SEC_STAT]
518 	bclr MACRO_ARG(reg), MACRO_ARG(reg), _ARC_V2_SEC_STAT_SSC_BIT
519 	sflag MACRO_ARG(reg)
520 
521 #else
522 	lr MACRO_ARG(reg), [_ARC_V2_STATUS32]
523 	bclr MACRO_ARG(reg), MACRO_ARG(reg), _ARC_V2_STATUS32_SC_BIT
524 	kflag MACRO_ARG(reg)
525 #endif
526 #endif
527 .endm
528 
529 /* macro to enable stack checking in assembly, need a GPR
530  * to do this
531  */
532 .macro _enable_stack_checking, reg
533 #ifdef CONFIG_ARC_STACK_CHECKING
534 #ifdef CONFIG_ARC_SECURE_FIRMWARE
535 	lr MACRO_ARG(reg), [_ARC_V2_SEC_STAT]
536 	bset MACRO_ARG(reg), MACRO_ARG(reg), _ARC_V2_SEC_STAT_SSC_BIT
537 	sflag MACRO_ARG(reg)
538 #else
539 	lr MACRO_ARG(reg), [_ARC_V2_STATUS32]
540 	bset MACRO_ARG(reg), MACRO_ARG(reg), _ARC_V2_STATUS32_SC_BIT
541 	kflag MACRO_ARG(reg)
542 #endif
543 #endif
544 .endm
545 
546 
547 #define __arc_u9_max		(255)
548 #define __arc_u9_min		(-256)
549 #define __arc_ldst32_as_shift	2
550 
551 /*
552  * When we accessing bloated struct member we can exceed u9 operand in store
553  * instruction. So we can use _st32_huge_offset macro instead
554  */
555 .macro _st32_huge_offset, d, s, offset, temp
556 	.if MACRO_ARG(offset) <= __arc_u9_max && MACRO_ARG(offset) >= __arc_u9_min
557 		st MACRO_ARG(d), [MACRO_ARG(s), MACRO_ARG(offset)]
558 	/* Technically we can optimize with .as both big positive and negative offsets here, but
559 	 * as we use only positive offsets in hand-written assembly code we keep only
560 	 * positive offset case here for simplicity.
561 	 */
562 	.elseif !(MACRO_ARG(offset) % (1 << __arc_ldst32_as_shift)) &&                             \
563 		MACRO_ARG(offset) <= (__arc_u9_max << __arc_ldst32_as_shift) &&                    \
564 		MACRO_ARG(offset) >= 0
565 		st.as MACRO_ARG(d), [MACRO_ARG(s), MACRO_ARG(offset) >> __arc_ldst32_as_shift]
566 	.else
567 		ADDR MACRO_ARG(temp), MACRO_ARG(s), MACRO_ARG(offset)
568 		st MACRO_ARG(d), [MACRO_ARG(temp)]
569 	.endif
570 .endm
571 
572 #endif /* _ASMLANGUAGE */
573 
574 #endif /*  ZEPHYR_ARCH_ARC_INCLUDE_SWAP_MACROS_H_ */
575