1/*
2 * Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7/*
8 * Thread context switching for ARM64 Cortex-A (AArch64)
9 *
10 * This module implements the routines necessary for thread context switching
11 * on ARM64 Cortex-A (AArch64)
12 */
13
14#include <zephyr/toolchain.h>
15#include <zephyr/linker/sections.h>
16#include <offsets_short.h>
17#include <zephyr/arch/cpu.h>
18#include <zephyr/syscall.h>
19#include "macro_priv.inc"
20
21_ASM_FILE_PROLOGUE
22
23/*
24 * Routine to handle context switches
25 *
26 * This function is directly called either by _isr_wrapper() in case of
27 * preemption, or arch_switch() in case of cooperative switching.
28 *
29 * void z_arm64_context_switch(struct k_thread *new, struct k_thread *old);
30 */
31
32GTEXT(z_arm64_context_switch)
33SECTION_FUNC(TEXT, z_arm64_context_switch)
34
35#ifndef CONFIG_ARM64_SAFE_EXCEPTION_STACK
36	/* Save the current SP_EL0 */
37	mrs	x4, sp_el0
38#endif
39
40	stp	x19, x20, [x1, #_thread_offset_to_callee_saved_x19_x20]
41	stp	x21, x22, [x1, #_thread_offset_to_callee_saved_x21_x22]
42	stp	x23, x24, [x1, #_thread_offset_to_callee_saved_x23_x24]
43	stp	x25, x26, [x1, #_thread_offset_to_callee_saved_x25_x26]
44	stp	x27, x28, [x1, #_thread_offset_to_callee_saved_x27_x28]
45#ifndef CONFIG_ARM64_SAFE_EXCEPTION_STACK
46	stp	x29, x4,  [x1, #_thread_offset_to_callee_saved_x29_sp_el0]
47#else
48	str	x29,      [x1, #_thread_offset_to_callee_saved_x29_sp_el0]
49#endif
50
51	/* Save the current SP_ELx and return address */
52	mov	x4, sp
53	stp	x4, lr, [x1, #_thread_offset_to_callee_saved_sp_elx_lr]
54
55	/* save current thread's exception depth */
56	mrs	x4, tpidrro_el0
57	lsr	x2, x4, #TPIDRROEL0_EXC_SHIFT
58	strb	w2, [x1, #_thread_offset_to_exception_depth]
59
60	/* retrieve next thread's exception depth */
61	ldrb	w2, [x0, #_thread_offset_to_exception_depth]
62	bic	x4, x4, #TPIDRROEL0_EXC_DEPTH
63	orr	x4, x4, x2, lsl #TPIDRROEL0_EXC_SHIFT
64	msr	tpidrro_el0, x4
65
66#ifdef CONFIG_FPU_SHARING
67	/*
68	 * Do this after tpidrro_el0 is updated with the new exception
69	 * depth value, and before old->switch_handle is updated (making
70	 * it available for grab by another CPU) as we still use its stack.
71	 */
72	stp	x0, x1, [sp, #-16]!
73	bl	z_arm64_fpu_thread_context_switch
74	ldp	x0, x1, [sp], #16
75#endif
76
77	/* save old thread into switch handle which is required by
78	 * z_sched_switch_spin()
79	 */
80	 str	x1, [x1, #___thread_t_switch_handle_OFFSET]
81
82#ifdef CONFIG_THREAD_LOCAL_STORAGE
83	/* Grab the TLS pointer */
84	ldr	x2, [x0, #_thread_offset_to_tls]
85
86	/* Store in the "Thread ID" register.
87	 * This register is used as a base pointer to all
88	 * thread variables with offsets added by toolchain.
89	 */
90	msr	tpidr_el0, x2
91#endif
92
93	ldp	x19, x20, [x0, #_thread_offset_to_callee_saved_x19_x20]
94	ldp	x21, x22, [x0, #_thread_offset_to_callee_saved_x21_x22]
95	ldp	x23, x24, [x0, #_thread_offset_to_callee_saved_x23_x24]
96	ldp	x25, x26, [x0, #_thread_offset_to_callee_saved_x25_x26]
97	ldp	x27, x28, [x0, #_thread_offset_to_callee_saved_x27_x28]
98#ifndef CONFIG_ARM64_SAFE_EXCEPTION_STACK
99	ldp	x29, x4,  [x0, #_thread_offset_to_callee_saved_x29_sp_el0]
100
101	/* Restore SP_EL0 */
102	msr	sp_el0, x4
103#else
104	ldr	x29,      [x0, #_thread_offset_to_callee_saved_x29_sp_el0]
105#endif
106
107	/* Restore SP_EL1 and return address */
108	ldp	x4, lr, [x0, #_thread_offset_to_callee_saved_sp_elx_lr]
109	mov	sp, x4
110
111#if defined(CONFIG_ARM64_SAFE_EXCEPTION_STACK)
112	/* arch_curr_cpu()->arch.current_stack_limit = thread->arch.stack_limit */
113	get_cpu	x4
114	ldr	x2, [x0, #_thread_offset_to_stack_limit]
115	str	x2, [x4, #_cpu_offset_to_current_stack_limit]
116#endif
117
118#if defined(CONFIG_USERSPACE) || defined(CONFIG_ARM64_STACK_PROTECTION)
119	str     lr, [sp, #-16]!
120	bl      z_arm64_swap_mem_domains
121	ldr     lr, [sp], #16
122#endif
123
124#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
125	str	lr, [sp, #-16]!
126	bl	z_thread_mark_switched_in
127	ldr	lr, [sp], #16
128#endif
129
130	/* Return to arch_switch() or _isr_wrapper() */
131	ret
132
133/*
134 * Synchronous exceptions handler
135 *
136 * The service call (SVC) is used in the following occasions:
137 * - Cooperative context switching
138 * - IRQ offloading
139 */
140
141GTEXT(z_arm64_sync_exc)
142SECTION_FUNC(TEXT, z_arm64_sync_exc)
143
144	mrs	x0, esr_el1
145	lsr	x1, x0, #26
146
147#ifdef CONFIG_FPU_SHARING
148	cmp	x1, #0x07 /*Access to SIMD or floating-point */
149	bne	1f
150	mov	x0, sp
151	bl	z_arm64_fpu_trap
152	b	z_arm64_exit_exc_fpu_done
1531:
154#endif
155
156	cmp	x1, #0x15 /* 0x15 = SVC */
157	bne	inv
158
159	/* Demux the SVC call */
160	and	x1, x0, #0xff
161
162	cmp	x1, #_SVC_CALL_RUNTIME_EXCEPT
163	beq	oops
164
165#ifdef CONFIG_USERSPACE
166	cmp	x1, #_SVC_CALL_SYSTEM_CALL
167	beq	z_arm64_do_syscall
168#endif
169
170#ifdef CONFIG_IRQ_OFFLOAD
171	cmp	x1, #_SVC_CALL_IRQ_OFFLOAD
172	beq	offload
173	b	inv
174offload:
175	/*
176	 * Retrieve provided routine and argument from the stack.
177	 * Routine pointer is in saved x0, argument in saved x1
178	 * so we load them with x1/x0 (reversed).
179	 */
180	ldp	x1, x0, [sp, ___esf_t_x0_x1_OFFSET]
181
182	/* ++_current_cpu->nested to be checked by arch_is_in_isr() */
183	get_cpu	x2
184	ldr	w3, [x2, #___cpu_t_nested_OFFSET]
185	add	w4, w3, #1
186	str	w4, [x2, #___cpu_t_nested_OFFSET]
187
188	/* If not nested: switch to IRQ stack and save current sp on it. */
189	cbnz	w3, 1f
190	ldr	x3, [x2, #___cpu_t_irq_stack_OFFSET]
191	mov	x4, sp
192	mov	sp, x3
193	str	x4, [sp, #-16]!
194#if defined(CONFIG_ARM64_SAFE_EXCEPTION_STACK)
195	/* update the stack limit with IRQ stack limit */
196	sub	x3, x3, #CONFIG_ISR_STACK_SIZE
197	str	x3, [x2, #_cpu_offset_to_current_stack_limit]
198#endif
1991:
200	/* Execute provided routine (argument is in x0 already). */
201	blr	x1
202
203	/* Exit through regular IRQ exit path */
204	b	z_arm64_irq_done
205#endif
206	b	inv
207
208oops:
209	mov	x0, sp
210	b	z_arm64_do_kernel_oops
211
212inv:
213	mov	x0, #0 /* K_ERR_CPU_EXCEPTION */
214	mov	x1, sp
215	bl	z_arm64_fatal_error
216
217	/* Return here only in case of recoverable error */
218	b	z_arm64_exit_exc
219