1 /* Copyright(c) 2021 Intel Corporation. All rights reserved.
2  * SPDX-License-Identifier: Apache-2.0
3  */
4 
5 
6 #include <stddef.h>
7 #include <stdint.h>
8 
9 #include <zephyr/devicetree.h>
10 #include <zephyr/kernel.h>
11 #include <zephyr/init.h>
12 #include <soc_util.h>
13 #include <zephyr/cache.h>
14 #include <adsp_shim.h>
15 #include <adsp_memory.h>
16 #include <cpu_init.h>
17 #include "manifest.h"
18 
19 #ifdef CONFIG_SOC_SERIES_INTEL_ADSP_ACE
20 #include <adsp_boot.h>
21 #endif /* CONFIG_SOC_SERIES_INTEL_ADSP_ACE */
22 
23 /* Important note about linkage:
24  *
25  * The C code here, starting from boot_core0(), is running entirely in
26  * IMR memory.  The sram banks are not initialized yet and the Zephyr
27  * code is not yet copied there.  No use of this memory is legal until
28  * after parse_manifest() returns.  This means that all symbols in
29  * this file must be flagged "__imr" or "__imrdata" (or be guaranteed
30  * to inline via ALWAYS_INLINE, normal gcc "inline" is only a hint)!
31  *
32  * There's a similar note with Xtensa register windows: the Zephyr
33  * exception handles for window overflow are not present in IMR.
34  * While on existing systems, we start running with a VECBASE pointing
35  * to ROM handlers (that seem to work), it seems unsafe to rely on
36  * that.  It's not possible to hit an overflow until at least four
37  * nested function calls, so this is mostly theoretical.  Nonetheless
38  * care should be taken here to make sure the function tree remains
39  * shallow until SRAM initialization is finished.
40  */
41 
42 /* Various cAVS platform dependencies needed by the bootloader code.
43  * These probably want to migrate to devicetree.
44  */
45 
46 
47 #define HOST_PAGE_SIZE		4096
48 #define MANIFEST_SEGMENT_COUNT	3
49 
50 /* FIXME: Use Kconfig or some other means */
51 #if !defined(CONFIG_SOC_SERIES_INTEL_ADSP_ACE)
52 #define RESET_MEMORY_HOLE
53 #endif
54 
55 /* Initial/true entry point.  Does nothing but jump to
56  * z_boot_asm_entry (which cannot be here, because it needs to be able
57  * to reference immediates which must link before it)
58  */
59 __asm__(".pushsection .boot_entry.text, \"ax\" \n\t"
60 	".global rom_entry             \n\t"
61 	"rom_entry:                    \n\t"
62 	"  j z_boot_asm_entry          \n\t"
63 	".popsection                   \n\t");
64 
65 /* Entry stub.  Sets up register windows and stack such that we can
66  * enter C code successfully, and calls boot_core0()
67  */
68 #define STRINGIFY_MACRO(x) Z_STRINGIFY(x)
69 #define IMRSTACK STRINGIFY_MACRO(IMR_BOOT_LDR_MANIFEST_BASE)
70 __asm__(".section .imr.z_boot_asm_entry, \"x\" \n\t"
71 	".align 4                   \n\t"
72 	"z_boot_asm_entry:          \n\t"
73 	"  movi  a0, 0x4002f        \n\t"
74 	"  wsr   a0, PS             \n\t"
75 	"  movi  a0, 0              \n\t"
76 	"  wsr   a0, WINDOWBASE     \n\t"
77 	"  movi  a0, 1              \n\t"
78 	"  wsr   a0, WINDOWSTART    \n\t"
79 	"  rsync                    \n\t"
80 	"  movi  a1, " IMRSTACK    "\n\t"
81 	"  call4 boot_core0   \n\t");
82 
parse_module(struct sof_man_fw_header * hdr,struct sof_man_module * mod)83 static __imr void parse_module(struct sof_man_fw_header *hdr,
84 			       struct sof_man_module *mod)
85 {
86 	int i;
87 	uint32_t bias;
88 
89 	/* each module has 3 segments */
90 	for (i = 0; i < MANIFEST_SEGMENT_COUNT; i++) {
91 
92 		switch (mod->segment[i].flags.r.type) {
93 		case SOF_MAN_SEGMENT_TEXT:
94 		case SOF_MAN_SEGMENT_DATA:
95 			if (mod->segment[i].flags.r.load == 0 ||
96 			    mod->segment[i].v_base_addr >= L2_SRAM_BASE + L2_SRAM_SIZE ||
97 			    mod->segment[i].v_base_addr < L2_SRAM_BASE) {
98 				continue;
99 			}
100 
101 			bias = mod->segment[i].file_offset -
102 				SOF_MAN_ELF_TEXT_OFFSET;
103 
104 			/* copy from IMR to SRAM */
105 			bmemcpy((void *)mod->segment[i].v_base_addr,
106 				(uint8_t *)hdr + bias,
107 				mod->segment[i].flags.r.length *
108 				HOST_PAGE_SIZE);
109 			break;
110 		case SOF_MAN_SEGMENT_BSS:
111 			/* already bbzero'd by sram init */
112 			break;
113 		default:
114 			/* ignore */
115 			break;
116 		}
117 	}
118 }
119 
120 #define MAN_SKIP_ENTRIES 1
121 
122 /* parse FW manifest and copy modules */
parse_manifest(void)123 __imr void parse_manifest(void)
124 {
125 	struct sof_man_fw_desc *desc =
126 		(struct sof_man_fw_desc *)IMR_BOOT_LDR_MANIFEST_BASE;
127 	struct sof_man_fw_header *hdr = &desc->header;
128 	struct sof_man_module *mod;
129 	int i;
130 
131 	sys_cache_data_invd_range(hdr, sizeof(*hdr));
132 
133 	/* copy module to SRAM  - skip bootloader module */
134 	for (i = MAN_SKIP_ENTRIES; i < hdr->num_module_entries; i++) {
135 		mod = desc->man_module + i;
136 
137 		sys_cache_data_invd_range(mod, sizeof(*mod));
138 		parse_module(hdr, mod);
139 	}
140 }
141 
142 extern void hp_sram_init(uint32_t memory_size);
143 extern void lp_sram_init(void);
144 
boot_core0(void)145 __imr void boot_core0(void)
146 {
147 #if defined(CONFIG_INTEL_ADSP_SIM_NO_SECONDARY_CORE_FLOW)
148 	int prid;
149 
150 	prid = arch_proc_id();
151 	if (prid != 0) {
152 		((void (*)(void))DSPCS.bootctl[prid].baddr)();
153 	}
154 #endif
155 
156 	cpu_early_init();
157 
158 #ifdef CONFIG_ADSP_DISABLE_L2CACHE_AT_BOOT
159 	ADSP_L2PCFG_REG = 0;
160 #endif
161 
162 #ifdef RESET_MEMORY_HOLE
163 	/* reset memory hole */
164 	CAVS_SHIM.l2mecs = 0;
165 #endif
166 
167 	hp_sram_init(L2_SRAM_SIZE);
168 	lp_sram_init();
169 	parse_manifest();
170 	sys_cache_data_flush_all();
171 
172 	xtensa_vecbase_lock();
173 
174 	/* Zephyr! */
175 	extern FUNC_NORETURN void z_prep_c(void);
176 	z_prep_c();
177 }
178