1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright © 2019 Keith Packard
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  *
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  *
13  * 2. Redistributions in binary form must reproduce the above
14  *    copyright notice, this list of conditions and the following
15  *    disclaimer in the documentation and/or other materials provided
16  *    with the distribution.
17  *
18  * 3. Neither the name of the copyright holder nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
27  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
29  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
31  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
33  * OF THE POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 #include <picolibc.h>
37 #include "../../crt0.h"
38 
39 #if __ARM_ARCH_PROFILE == 'M'
40 
41 /*
42  * Cortex-mM includes an NVIC and starts with SP initialized, so start
43  * is a C function
44  */
45 
46 extern const void *__interrupt_vector[];
47 
48 #define CPACR	((volatile uint32_t *) (0xE000ED88))
49 
50 #ifdef __clang__
51 const void *__interrupt_reference = __interrupt_vector;
52 #endif
53 
54 void
_start(void)55 _start(void)
56 {
57 	/* Generate a reference to __interrupt_vector so we get one loaded */
58 	__asm__(".equ __my_interrupt_vector, __interrupt_vector");
59     /* Access to the coprocessor has to be enabled in CPACR, if either FPU or
60      * MVE is used. This is described in "Arm v8-M Architecture Reference
61      * Manual". */
62 #if defined __ARM_FP || defined __ARM_FEATURE_MVE
63 	/* Enable FPU */
64 	*CPACR |= 0xf << 20;
65 	/*
66 	 * Wait for the write enabling FPU to reach memory before
67 	 * executing the instruction accessing the status register
68 	 */
69 	__asm__("dsb");
70 	__asm__("isb");
71 
72         /* Clear FPU status register. 0x40000 will initialize FPSCR.LTPSIZE to
73          * a valid value for 8.1-m low overhead loops. */
74 #if __ARM_ARCH >= 8 && __ARM_ARCH_PROFILE == 'M'
75 #define INIT_FPSCR 0x40000
76 #else
77 #define INIT_FPSCR 0x0
78 #endif
79 	__asm__("vmsr fpscr, %0" : : "r" (INIT_FPSCR));
80 #endif
81 
82 #if defined(__ARM_FEATURE_PAC_DEFAULT) || defined(__ARM_FEATURE_BTI_DEFAULT)
83         uint32_t        control;
84         __asm__("mrs %0, CONTROL" : "=r" (control));
85 #ifdef __ARM_FEATURE_PAC_DEFAULT
86         control |= (3 << 6);
87 #endif
88 #ifdef __ARM_FEATURE_BTI_DEFAULT
89         control |= (3 << 4);
90 #endif
91         __asm__("msr CONTROL, %0" : : "r" (control));
92 #endif
93 	__start();
94 }
95 
96 #else /*  __ARM_ARCH_PROFILE == 'M' */
97 
98 #ifdef _PICOCRT_ENABLE_MMU
99 
100 #if __ARM_ARCH >= 7 && __ARM_ARCH_PROFILE != 'R'
101 
102 /*
103  * We need 4096 1MB mappings to cover the usual Normal memory space,
104  * which runs from 0x00000000 to 0x7fffffff along with the usual
105  * Device space which runs from 0x80000000 to 0xffffffff.
106  */
107 #define MMU_NORMAL_COUNT 2048
108 #define MMU_DEVICE_COUNT 2048
109 extern uint32_t __identity_page_table[MMU_NORMAL_COUNT + MMU_DEVICE_COUNT];
110 
111 /* Bits within a short-form section PTE (1MB mapping) */
112 #define MMU_NS_BIT      19
113 #define MMU_NG_BIT      17
114 #define MMU_S_BIT       16
115 #define MMU_AP2_BIT     15
116 #define MMU_TEX_BIT     12
117 #define MMU_AP0_BIT     10
118 #define MMU_XN_BIT      4
119 #define MMU_BC_BIT      2
120 #define MMU_PXN_BIT     0
121 
122 #define MMU_TYPE_1MB    (0x2 << 0)
123 #define MMU_RW          (0x3 << MMU_AP0_BIT)
124 
125 /* Memory attributes when TEX[2] == 0 */
126 #define MMU_STRONGLY_ORDERED    ((0 << MMU_TEX_BIT) | (0 << MMU_BC_BIT))
127 #define MMU_SHAREABLE_DEVICE    ((0 << MMU_TEX_BIT) | (1 << MMU_BC_BIT))
128 #define MMU_WT_NOWA             ((0 << MMU_TEX_BIT) | (2 << MMU_BC_BIT))
129 #define MMU_WB_NOWA             ((0 << MMU_TEX_BIT) | (3 << MMU_BC_BIT))
130 #define MMU_NON_CACHEABLE       ((1 << MMU_TEX_BIT) | (0 << MMU_BC_BIT))
131 #define MMU_WB_WA               ((1 << MMU_TEX_BIT) | (3 << MMU_BC_BIT))
132 #define MMU_NONSHAREABLE_DEVICE ((2 << MMU_TEX_BIT) | (0 << MMU_BC_BIT))
133 
134 /*
135  * Memory attributes when TEX[2] == 1. In this mode
136  * TEX[1:0] define the outer cache attributes and
137  * C, B define the inner cache attributes
138  */
139 #define MMU_MEM_ATTR(_O, _I)    (((4 | (_O)) << MMU_TEX_BIT) | ((_I) << MMU_BC_BIT))
140 #define MMU_MEM_ATTR_NC         0
141 #define MMU_MEM_ATTR_WB_WA      1
142 #define MMU_MEM_ATTR_WT_NOWA    2
143 #define MMU_MEM_ATTR_WB_NOWA    3
144 
145 #define MMU_SHAREABLE           (1 << MMU_S_BIT)
146 #define MMU_NORMAL_MEMORY       (MMU_MEM_ATTR(MMU_MEM_ATTR_WB_WA, MMU_MEM_ATTR_WB_WA) | MMU_SHAREABLE)
147 #define MMU_DEVICE_MEMORY       (MMU_SHAREABLE_DEVICE)
148 #define MMU_NORMAL_FLAGS        (MMU_TYPE_1MB | MMU_RW | MMU_NORMAL_MEMORY)
149 #define MMU_DEVICE_FLAGS        (MMU_TYPE_1MB | MMU_RW | MMU_DEVICE_MEMORY)
150 
151 __asm__(
152     ".section .rodata\n"
153     ".global __identity_page_table\n"
154     ".balign 16384\n"
155     "__identity_page_table:\n"
156     ".set _i, 0\n"
157     ".rept " __XSTRING(MMU_NORMAL_COUNT) "\n"
158     "  .4byte (_i << 20) |" __XSTRING(MMU_NORMAL_FLAGS) "\n"
159     "  .set _i, _i + 1\n"
160     ".endr\n"
161     ".set _i, 0\n"
162     ".rept " __XSTRING(MMU_DEVICE_COUNT) "\n"
163     "  .4byte (1 << 31) | (_i << 20) |" __XSTRING(MMU_DEVICE_FLAGS) "\n"
164     "  .set _i, _i + 1\n"
165     ".endr\n"
166     ".size __identity_page_table, " __XSTRING((MMU_NORMAL_COUNT + MMU_DEVICE_COUNT) * 4) "\n"
167 );
168 #endif
169 
170 #endif /* _PICOCRT_ENABLE_MMU */
171 
172 /*
173  * Set up all of the shadow stack pointers. With Thumb 1 ISA we need
174  * to do this in ARM mode, hence the separate target("arm") function
175  */
176 
177 extern char __stack[];
178 
179 #define MODE_USR        (0x10)
180 #define MODE_FIQ        (0x11)
181 #define MODE_IRQ        (0x12)
182 #define MODE_SVC        (0x13)
183 #define MODE_MON        (0x16)
184 #define MODE_ABT        (0x17)
185 #define MODE_HYP        (0x1a)
186 #define MODE_UND        (0x1b)
187 #define MODE_SYS        (0x1f)
188 #define I_BIT           (1 << 7)
189 #define F_BIT           (1 << 6)
190 
191 #define SET_SP(mode) \
192     __asm__("mov r0, %0\nmsr cpsr_c, r0" :: "r" (mode | I_BIT | F_BIT): "r0");   \
193     __asm__("mov sp, %0" : : "r" (__stack))
194 
195 #define SET_SPS()                               \
196         SET_SP(MODE_IRQ);                       \
197         SET_SP(MODE_ABT);                       \
198         SET_SP(MODE_UND);                       \
199         SET_SP(MODE_FIQ);                       \
200         SET_SP(MODE_SVC);                       \
201         SET_SP(MODE_SYS);
202 
203 #if __ARM_ARCH_ISA_THUMB == 1
204 static __noinline __attribute__((target("arm"))) void
_set_stacks(void)205 _set_stacks(void)
206 {
207         SET_SPS();
208 }
209 #endif
210 
211 /*
212  * Regular ARM has an 8-entry exception vector and starts without SP
213  * initialized, so start is a naked function which sets up the stack
214  * and then branches here.
215  */
216 
217 static void __attribute__((used)) __section(".init")
_cstart(void)218 _cstart(void)
219 {
220 #if __ARM_ARCH_ISA_THUMB == 1
221         _set_stacks();
222 #endif
223 
224 #if __thumb2__ && __ARM_ARCH_PROFILE != 'A'
225 	/* Make exceptions run in Thumb mode */
226 	uint32_t sctlr;
227 	__asm__("mrc p15, 0, %0, c1, c0, 0" : "=r" (sctlr));
228 	sctlr |= (1 << 30);
229 	__asm__("mcr p15, 0, %0, c1, c0, 0" : : "r" (sctlr));
230 #endif
231 #if defined __ARM_FP || defined __ARM_FEATURE_MVE
232 #if __ARM_ARCH > 6
233 	/* Set CPACR for access to CP10 and 11 */
234 	__asm__("mcr p15, 0, %0, c1, c0, 2" : : "r" (0xf << 20));
235 #endif
236 	/* Enable FPU */
237 	__asm__("vmsr fpexc, %0" : : "r" (0x40000000));
238 #endif
239 
240 #ifdef _PICOCRT_ENABLE_MMU
241 
242 #if __ARM_ARCH >= 7 && __ARM_ARCH_PROFILE != 'R'
243 
244 #define SCTLR_MMU (1 << 0)
245 #define SCTLR_DATA_L2 (1 << 2)
246 #define SCTLR_BRANCH_PRED (1 << 11)
247 #define SCTLR_ICACHE (1 << 12)
248 #define SCTLR_TRE       (1 << 28)
249 
250         uint32_t        mmfr0;
251         __asm__("mrc p15, 0, %0, c0, c1, 4" : "=r" (mmfr0));
252 
253         /* Check to see if the processor supports VMSAv7 or better */
254         if ((mmfr0 & 0xf) >= 3)
255         {
256                 /* We have to set up an identity map and enable the MMU for caches.
257                  * Additionally, all page table entries are set to Domain 0, so set up DACR
258                  * so that Domain zero has permission checks enabled rather than "deny all".
259                  */
260 
261                 /* Set DACR Domain 0 permissions checked */
262                 __asm__("mcr p15, 0, %0, c3, c0, 0\n" :: "r" (1));
263 
264                 /*
265                  * Write TTBR
266                  *
267                  * No DSB since tables are statically initialized and dcache is off.
268                  * We or __identity_page_table with 0x3 to set the cacheable flag bits.
269                  */
270                 __asm__("mcr p15, 0, %0, c2, c0, 0\n"
271                         :: "r" ((uintptr_t)__identity_page_table | 0x3));
272 
273                 /* Note: we assume Data+L2 cache has been invalidated by reset. */
274                 __asm__("mcr p15, 0, %0, c7, c5, 0\n" :: "r" (0)); /* ICIALLU: invalidate instruction cache */
275                 __asm__("mcr p15, 0, %0, c8, c7, 0\n" :: "r" (0)); /* TLBIALL: invalidate TLB */
276                 __asm__("mcr p15, 0, %0, c7, c5, 6\n" :: "r" (0)); /* BPIALL: invalidate branch predictor */
277                 __asm__("isb\n");
278 
279                 /* Enable caches, branch prediction and the MMU. Disable TRE */
280                 uint32_t sctlr;
281                 __asm__("mrc p15, 0, %0, c1, c0, 0" : "=r" (sctlr));
282                 sctlr |= SCTLR_ICACHE | SCTLR_BRANCH_PRED | SCTLR_DATA_L2 | SCTLR_MMU;
283                 sctlr &= ~SCTLR_TRE;
284                 __asm__("mcr p15, 0, %0, c1, c0, 0\n" :: "r" (sctlr));
285                 __asm__("isb\n");
286         }
287 #endif
288 
289 #endif /* _PICOCRT_ENABLE_MMU */
290 
291 	__start();
292 }
293 
294 void __attribute__((naked)) __section(".init") __attribute__((used))
_start(void)295 _start(void)
296 {
297 	/* Generate a reference to __vector_table so we get one loaded */
298 	__asm__(".equ __my_vector_table, __vector_table");
299 
300 #if __ARM_ARCH_ISA_THUMB == 1
301         __asm__("mov sp, %0" : : "r" (__stack));
302 #else
303         SET_SPS();
304 #endif
305 	/* Branch to C code */
306 	__asm__("b _cstart");
307 }
308 
309 #endif
310 
311 #ifdef CRT0_SEMIHOST
312 
313 /*
314  * Trap faults, print message and exit when running under semihost
315  */
316 
317 #include <semihost.h>
318 #include <unistd.h>
319 #include <stdio.h>
320 
321 #define _REASON(r) #r
322 #define REASON(r) _REASON(r)
323 
arm_fault_write_reg(const char * prefix,unsigned reg)324 static void arm_fault_write_reg(const char *prefix, unsigned reg)
325 {
326     fputs(prefix, stdout);
327 
328     for (unsigned i = 0; i < 8; i++) {
329         unsigned digitval = 0xF & (reg >> (28 - 4*i));
330         char digitchr = '0' + digitval + (digitval >= 10 ? 'a'-'0'-10 : 0);
331         putchar(digitchr);
332     }
333 
334     putchar('\n');
335 }
336 
337 #if __ARM_ARCH_PROFILE == 'M'
338 
339 #define GET_SP  struct fault *sp; __asm__ ("mov %0, sp" : "=r" (sp))
340 
341 struct fault {
342     unsigned int        r0;
343     unsigned int        r1;
344     unsigned int        r2;
345     unsigned int        r3;
346     unsigned int        r12;
347     unsigned int        lr;
348     unsigned int        pc;
349     unsigned int        xpsr;
350 };
351 
352 static const char *const reasons[] = {
353     "hardfault\n",
354     "memmanage\n",
355     "busfault\n",
356     "usagefault\n"
357 };
358 
359 #define REASON_HARDFAULT        0
360 #define REASON_MEMMANAGE        1
361 #define REASON_BUSFAULT         2
362 #define REASON_USAGE            3
363 
364 static void __attribute__((used))
arm_fault(struct fault * f,int reason)365 arm_fault(struct fault *f, int reason)
366 {
367     fputs("ARM fault: ", stdout);
368     fputs(reasons[reason], stdout);
369     arm_fault_write_reg("\tR0:   0x", f->r0);
370     arm_fault_write_reg("\tR1:   0x", f->r1);
371     arm_fault_write_reg("\tR2:   0x", f->r2);
372     arm_fault_write_reg("\tR3:   0x", f->r3);
373     arm_fault_write_reg("\tR12:  0x", f->r12);
374     arm_fault_write_reg("\tLR:   0x", f->lr);
375     arm_fault_write_reg("\tPC:   0x", f->pc);
376     arm_fault_write_reg("\tXPSR: 0x", f->xpsr);
377     _exit(1);
378 }
379 
380 void __attribute__((naked))
arm_hardfault_isr(void)381 arm_hardfault_isr(void)
382 {
383     __asm__("mov r0, sp");
384     __asm__("movs r1, #" REASON(REASON_HARDFAULT));
385     __asm__("bl  arm_fault");
386 }
387 
388 void __attribute__((naked))
arm_memmange_isr(void)389 arm_memmange_isr(void)
390 {
391     __asm__("mov r0, sp");
392     __asm__("movs r1, #" REASON(REASON_MEMMANAGE));
393     __asm__("bl  arm_fault");
394 }
395 
396 void __attribute__((naked))
arm_busfault_isr(void)397 arm_busfault_isr(void)
398 {
399     __asm__("mov r0, sp");
400     __asm__("movs r1, #" REASON(REASON_BUSFAULT));
401     __asm__("bl  arm_fault");
402 }
403 
404 void __attribute__((naked))
arm_usagefault_isr(void)405 arm_usagefault_isr(void)
406 {
407     __asm__("mov r0, sp");
408     __asm__("movs r1, #" REASON(REASON_USAGE));
409     __asm__("bl  arm_fault");
410 }
411 
412 #else /* __ARM_ARCH_PROFILE == 'M' */
413 
414 struct fault {
415     unsigned int        r[7];
416     unsigned int        pc;
417 };
418 
419 static const char *const reasons[] = {
420     "undef\n",
421     "svc\n",
422     "prefetch_abort\n",
423     "data_abort\n"
424 };
425 
426 #define REASON_UNDEF            0
427 #define REASON_SVC              1
428 #define REASON_PREFETCH_ABORT   2
429 #define REASON_DATA_ABORT       3
430 
431 static void __attribute__((used))
arm_fault(struct fault * f,int reason)432 arm_fault(struct fault *f, int reason)
433 {
434     int r;
435     fputs("ARM fault: ", stdout);
436     fputs(reasons[reason], stdout);
437     char prefix[] = "\tR#:   0x";
438     for (r = 0; r <= 6; r++) {
439         prefix[2] = '0' + r;    /* overwrite # with register number */
440         arm_fault_write_reg(prefix, f->r[r]);
441     }
442     arm_fault_write_reg("\tPC:   0x", f->pc);
443     _exit(1);
444 }
445 
446 #define VECTOR_COMMON \
447     __asm__("push {lr}");                               \
448     __asm__("push {r0-r6}");                            \
449     __asm__("mov r0, sp")
450 
451 void __attribute__((naked)) __section(".init")
arm_undef_vector(void)452 arm_undef_vector(void)
453 {
454     VECTOR_COMMON;
455     __asm__("movs r1, #" REASON(REASON_UNDEF));
456     __asm__("bl  arm_fault");
457 }
458 
459 void __attribute__((naked)) __section(".init")
arm_prefetch_abort_vector(void)460 arm_prefetch_abort_vector(void)
461 {
462     VECTOR_COMMON;
463     __asm__("movs r1, #" REASON(REASON_PREFETCH_ABORT));
464     __asm__("bl  arm_fault");
465 }
466 
467 void __attribute__((naked)) __section(".init")
arm_data_abort_vector(void)468 arm_data_abort_vector(void)
469 {
470     VECTOR_COMMON;
471     __asm__("movs r1, #" REASON(REASON_DATA_ABORT));
472     __asm__("bl  arm_fault");
473 }
474 
475 #endif /* else __ARM_ARCH_PROFILE == 'M' */
476 
477 #endif /* CRT0_SEMIHOST */
478