1/*
2 * Copyright (c) 2016 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7#include <zephyr/toolchain.h>
8#include <zephyr/linker/sections.h>
9#include <offsets_short.h>
10
11/* exports */
12GTEXT(arch_swap)
13GTEXT(z_thread_entry_wrapper)
14
15/* imports */
16GTEXT(_k_neg_eagain)
17
18/* unsigned int arch_swap(unsigned int key)
19 *
20 * Always called with interrupts locked
21 */
22SECTION_FUNC(exception.other, arch_swap)
23
24#if defined(CONFIG_INSTRUMENT_THREAD_SWITCHING)
25	/* Need to preserve r4 as it has the function argument. */
26	addi sp, sp, -12
27	stw ra, 8(sp)
28	stw fp, 4(sp)
29	stw r4, 0(sp)
30
31	call z_thread_mark_switched_out
32
33	ldw r4, 0(sp)
34	ldw fp, 4(sp)
35	ldw ra, 8(sp)
36	addi sp, sp, 12
37#endif
38
39	/* Get a reference to _kernel in r10 */
40	movhi r10, %hi(_kernel)
41	ori   r10, r10, %lo(_kernel)
42
43	/* Get the pointer to kernel->current */
44	ldw  r11, _kernel_offset_to_current(r10)
45
46	/* Store all the callee saved registers. We either got here via
47	 * an exception or from a cooperative invocation of arch_swap() from C
48	 * domain, so all the caller-saved registers have already been
49	 * saved by the exception asm or the calling C code already.
50	 */
51	stw r16, _thread_offset_to_r16(r11)
52	stw r17, _thread_offset_to_r17(r11)
53	stw r18, _thread_offset_to_r18(r11)
54	stw r19, _thread_offset_to_r19(r11)
55	stw r20, _thread_offset_to_r20(r11)
56	stw r21, _thread_offset_to_r21(r11)
57	stw r22, _thread_offset_to_r22(r11)
58	stw r23, _thread_offset_to_r23(r11)
59	stw r28, _thread_offset_to_r28(r11)
60	stw ra,  _thread_offset_to_ra(r11)
61	stw sp,  _thread_offset_to_sp(r11)
62
63	/* r4 has the 'key' argument which is the result of irq_lock()
64	 * before this was called
65	 */
66	stw r4, _thread_offset_to_key(r11)
67
68	/* Populate default return value */
69	movhi r5, %hi(_k_neg_eagain)
70	ori   r5, r5, %lo(_k_neg_eagain)
71	ldw   r4, (r5)
72	stw   r4, _thread_offset_to_retval(r11)
73
74	/* get cached thread to run */
75	ldw   r2, _kernel_offset_to_ready_q_cache(r10)
76
77	/* At this point r2 points to the next thread to be swapped in */
78
79	/* the thread to be swapped in is now the current thread */
80	stw   r2, _kernel_offset_to_current(r10)
81
82	/* Restore callee-saved registers and switch to the incoming
83	 * thread's stack
84	 */
85	ldw r16, _thread_offset_to_r16(r2)
86	ldw r17, _thread_offset_to_r17(r2)
87	ldw r18, _thread_offset_to_r18(r2)
88	ldw r19, _thread_offset_to_r19(r2)
89	ldw r20, _thread_offset_to_r20(r2)
90	ldw r21, _thread_offset_to_r21(r2)
91	ldw r22, _thread_offset_to_r22(r2)
92	ldw r23, _thread_offset_to_r23(r2)
93	ldw r28, _thread_offset_to_r28(r2)
94	ldw ra,  _thread_offset_to_ra(r2)
95	ldw sp,  _thread_offset_to_sp(r2)
96
97	/* We need to irq_unlock(current->coopReg.key);
98	 * key was supplied as argument to arch_swap(). Fetch it.
99	 */
100	ldw r3, _thread_offset_to_key(r2)
101
102	/*
103	 * Load return value into r2 (return value register). -EAGAIN unless
104	 * someone previously called arch_thread_return_value_set(). Do this
105	 * before we potentially unlock interrupts.
106	 */
107	ldw r2, _thread_offset_to_retval(r2)
108
109	/* Now do  irq_unlock(current->coopReg.key) */
110#if (ALT_CPU_NUM_OF_SHADOW_REG_SETS > 0) || \
111		(defined ALT_CPU_EIC_PRESENT) || \
112		(defined ALT_CPU_MMU_PRESENT) || \
113		(defined ALT_CPU_MPU_PRESENT)
114	andi r3, r3, NIOS2_STATUS_PIE_MSK
115	beq r3, zero, no_unlock
116	rdctl r3, status
117	ori r3, r3, NIOS2_STATUS_PIE_MSK
118	wrctl status, r3
119
120no_unlock:
121#else
122	wrctl status, r3
123#endif
124
125#if defined(CONFIG_INSTRUMENT_THREAD_SWITCHING)
126	/* Also need to preserve r2, r3 as return values */
127	addi sp, sp, -20
128	stw ra, 16(sp)
129	stw fp, 12(sp)
130	stw r4, 8(sp)
131	stw r3, 4(sp)
132	stw r2, 0(sp)
133
134	call z_thread_mark_switched_in
135
136	ldw r2, 0(sp)
137	ldw r3, 4(sp)
138	ldw r4, 8(sp)
139	ldw fp, 12(sp)
140	ldw ra, 16(sp)
141	addi sp, sp, 20
142#endif
143	ret
144
145
146/* void z_thread_entry_wrapper(void)
147 */
148SECTION_FUNC(TEXT, z_thread_entry_wrapper)
149	/* This all corresponds to struct init_stack_frame defined in
150	 * thread.c. We need to take this stuff off the stack and put
151	 * it in the appropriate registers
152	 */
153
154	/* Can't return from here, just put NULL in ra */
155	movi ra, 0
156
157	/* Calling convention has first 4 arguments in registers r4-r7. */
158	ldw  r4, 0(sp)
159	ldw  r5, 4(sp)
160	ldw  r6, 8(sp)
161	ldw  r7, 12(sp)
162
163	/* pop all the stuff that we just loaded into registers */
164	addi sp, sp, 16
165
166	call z_thread_entry
167
168