1 /* Copyright 2023 The ChromiumOS Authors
2  * SPDX-License-Identifier: Apache-2.0
3  */
4 
5 #include <zephyr/devicetree.h>
6 #include <zephyr/sys/libc-hooks.h>
7 #include <string.h>
8 #include <kernel_internal.h>
9 
10 extern char _mtk_adsp_sram_end[];
11 
12 #define SRAM_START DT_REG_ADDR(DT_NODELABEL(sram0))
13 #define SRAM_SIZE  DT_REG_SIZE(DT_NODELABEL(sram0))
14 #define SRAM_END   (SRAM_START + SRAM_SIZE)
15 
16 extern char _mtk_adsp_dram_end[];
17 
18 #define DRAM_START DT_REG_ADDR(DT_NODELABEL(dram0))
19 #define DRAM_SIZE  DT_REG_SIZE(DT_NODELABEL(dram0))
20 #define DRAM_END   (DRAM_START + DRAM_SIZE)
21 
22 #ifdef CONFIG_SOC_MT8196
23 #define INIT_STACK "0x90400000"
24 #define LOG_BASE   0x90580000
25 #define LOG_LEN    0x80000
26 #else
27 #define INIT_STACK "0x60e00000"
28 #define LOG_BASE   0x60700000
29 #define LOG_LEN    0x100000
30 #endif
31 
32 /* The MT8196 interrupt controller is very simple at runtime, with
33  * just an enable and status register needed, like its
34  * predecessors. But it has routing control which resets to "nothing
35  * enabled", so needs a driver.
36  *
37  * There are 64 interrupt inputs to the controller, controlled by
38  * pairs of words (the "intc64" type below).  Each interrupt is
39  * associated with one[1] of 16 "groups", each of which directs to a
40  * different Xtensa architectural interrupt.  So each Xtensa interrupt
41  * can be configured to handle any subset of interrupt inputs.
42  *
43  * The mapping of groups to Xtensa interrupts is given below.  Note
44  * particularly that the final two groups are NMIs directed to an
45  * interrupt level higher than EXCM_LEVEL, so cannot be safely used
46  * for OS code (they'll interrupt spinlocks), but an app might exploit
47  * them for e.g. debug or watchdog hooks.
48  *
49  * GroupNum  XtensaIRQ  XtensaLevel
50  *     0-5        0-5       1  (L1 is shared w/exceptions, poor choice)
51  *     6-7        7-8       1
52  *    8-10       9-11       2
53  *   11-13      16-18       3
54  *   14,15      20,21       4  (Unmaskable! Do not use w/Zephyr code!)
55  *
56  * Naming of the inputs looks like this, though obviously only a small
57  * fraction have been validated (or are even useful for an audio DSP):
58  *
59  *  0: CCU              20: USB1             40: WDT
60  *  1: SCP              21: SCPVOW           41: CONNSYS1
61  *  2: SPM              22: CCIF3_C0         42: CONNSYS3
62  *  3: PCIE             23: CCIF3_C1         43: CONNSYS4
63  *  4: INFRA_HANG       24: PWR_CTRL         44: CONNSYS2
64  *  5: PERI_TIMEOUT     25: DMA_C0           45: IPIC
65  *  6: MBOX_C0          26: DMA_C1           46: AXI_DMA2
66  *  7: MBOX_C1          27: AXI_DMA0         47: AXI_DMA3
67  *  8: TIMER0           28: AXI_DMA1         48: APSRC_DDREN
68  *  9: TIMER1           29: AUDIO_C0         49: LAT_MON_EMI
69  * 10: IPC_C0           30: AUDIO_C1         50: LAT_MON_INFRA
70  * 11: IPC_C1           31: HIFI5_WDT_C0     51: DEVAPC_VIO
71  * 12: IPC1_RSV         32: HIFI5_WDT_C1     52: AO_INFRA_HANG
72  * 13: C2C_SW_C0        33: APU_MBOX_C0      53: BUS_TRA_EMI
73  * 14: C2C_SW_C1        34: APU_MBOX_C1      54: BUS_TRA_INFRA
74  * 15: UART             35: TIMER2           55: L2SRAM_VIO
75  * 16: UART_BT          36: PWR_ON_C0_IRQ    56: L2SRAM_SETERR
76  * 17: LATENCY_MON      37: PWR_ON_C1_IRQ    57: PCIERC_GRP2
77  * 18: BUS_TRACKER      38: WAKEUP_SRC_C0    58: PCIERC_GRP3
78  * 19: USB0             39: WAKEUP_SRC_C1    59: IRQ_MAX_CHANNEL
79  *
80  * [1] It is legal and works as expected for an interrupt to be part
81  *     of more than one group (more than one interrupt fires to handle
82  *     it), though I don't understand why an application would want to
83  *     do that.
84  */
85 
86 struct intc64 { uint32_t lo, hi; };
87 
88 struct intc_8196 {
89 	struct intc64 input;        /* Raw (?) input signal, normally high */
90 	struct intc64 status;       /* Latched input, inverted (active == 1) */
91 	struct intc64 enable;       /* Interrupt enable */
92 	struct intc64 polarity;     /* 1 == active low */
93 	struct intc64 wake_enable;
94 	struct intc64 _unused;
95 	struct intc64 stage1_enable;
96 	struct intc64 sw_trigger;
97 	struct intc64 groups[16];       /* set bit == "member of group" */
98 	struct intc64 group_status[16]; /* status, but masked by group */
99 };
100 
101 #define INTC (*(volatile struct intc_8196 *)0x1a014000)
102 
set_group_bit(volatile struct intc64 * g,uint32_t bit,bool val)103 static void set_group_bit(volatile struct intc64 *g, uint32_t bit, bool val)
104 {
105 	volatile uint32_t *p  = bit < 32 ? &g->lo : &g->hi;
106 	volatile uint32_t mask = BIT(bit & 0x1f);
107 
108 	*p = val ? (*p | mask) : (*p & ~mask);
109 }
110 
mt8196_intc_set_irq_group(uint32_t irq,uint32_t group)111 static void mt8196_intc_set_irq_group(uint32_t irq, uint32_t group)
112 {
113 	for (int i = 0; i < 16; i++) {
114 		set_group_bit(&INTC.groups[i], irq, i == group);
115 	}
116 }
117 
mt8196_intc_init(void)118 void mt8196_intc_init(void)
119 {
120 	struct intc64 zero = { 0, 0 };
121 
122 	INTC.enable = zero;
123 	INTC.polarity.lo = 0xffffffff;
124 	INTC.polarity.hi = 0xffffffff;
125 	INTC.wake_enable = zero;
126 	INTC.stage1_enable = zero;
127 	for (int i = 0; i < ARRAY_SIZE(INTC.groups); i++) {
128 		INTC.groups[i] = zero;
129 	}
130 
131 	/* Now wire up known interrupts for existing drivers to their
132 	 * legacy settings
133 	 */
134 	mt8196_intc_set_irq_group(6, 2); /* mbox0 in group 2 */
135 	mt8196_intc_set_irq_group(7, 2); /* mbox1 in group 2 */
136 	mt8196_intc_set_irq_group(8, 1); /* ostimer in group 1 */
137 }
138 
139 /* This is the true boot vector.  This device allows for direct
140  * setting of the alternate reset vector, so we let it link wherever
141  * it lands and extract its address in the loader.  This represents
142  * the minimum amount of effort required to successfully call a C
143  * function (and duplicates a few versions elsewhere in the tree:
144  * really this should move to the arch layer).  The initial stack
145  * really should be the end of _interrupt_stacks[0]
146  */
147 __asm__(".align 4\n\t"
148 	".global mtk_adsp_boot_entry\n\t"
149 	"mtk_adsp_boot_entry:\n\t"
150 	"  movi  a0, 0x4002f\n\t" /* WOE|EXCM|INTLVL=15 */
151 	"  wsr   a0, PS\n\t"
152 	"  movi  a0, 0\n\t"
153 	"  wsr   a0, WINDOWBASE\n\t"
154 	"  movi  a0, 1\n\t"
155 	"  wsr   a0, WINDOWSTART\n\t"
156 	"  rsync\n\t"
157 	"  movi  a1, " INIT_STACK "\n\t"
158 	"  call4 c_boot\n\t");
159 
160 /* Unfortunately the SOF kernel loader doesn't understand the boot
161  * vector in the ELF/rimage file yet, so we still need a stub to get
162  * actual audio firmware to load.  Leave a stub in place that jumps to
163  * our "real" vector.  Note that this is frustratingly pessimal: the
164  * kernel wants the entry point to be at the start of the SRAM region,
165  * but (1) Xtensa can only load an immediate from addresses LOWER than
166  * a L32R instruction, which we can't do and so need to jump across a
167  * region to put one, and (2) the vector table that gets displaced has
168  * a 1024 byte alignment requirement, forcing us to waste ~1011 bytes
169  * needlessly.
170  */
171 __asm__(".pushsection .sof_entry.text\n\t"
172 	"  j 2f\n"
173 	".align 4\n\t"
174 	"1:\n\t"
175 	"  .word mtk_adsp_boot_entry\n"
176 	"2:\n\t"
177 	"  l32r a0, 1b\n\t"
178 	"  jx a0\n\t"
179 	".popsection");
180 
181 /* Initial MPU configuration, needed to enable caching */
enable_mpu(void)182 static void enable_mpu(void)
183 {
184 	/* Note: we set the linked/in-use-by-zephyr regions of both
185 	 * SRAM and DRAM cached for performance.  The remainder is
186 	 * left uncached, as it's likely to be shared with the host
187 	 * and/or DMA.  This seems like a good default choice pending
188 	 * proper MPU integration
189 	 */
190 	static const uint32_t mpu[][2] = {
191 		{ 0x00000000, 0x06000 },          /* inaccessible null region */
192 		{ 0x10000000, 0x06f00 },          /* MMIO registers */
193 		{ 0x1d000000, 0x06000 },          /* inaccessible */
194 		{ SRAM_START, 0xf7f00 },          /* cached SRAM */
195 		{ (uint32_t)&_mtk_adsp_sram_end, 0x06f00 }, /* uncached SRAM */
196 		{ SRAM_END,   0x06000 },          /* inaccessible */
197 		{ DRAM_START, 0xf7f00 },          /* cached DRAM */
198 		{ (uint32_t)&_mtk_adsp_dram_end, 0x06f00 }, /* uncached DRAM */
199 		{ DRAM_END,   0x06000 },          /* inaccessible top of mem */
200 	};
201 
202 	/* Must write BACKWARDS FROM THE END to avoid introducing a
203 	 * non-monotonic segment at the current instruction fetch.  The
204 	 * exception triggers even if all the segments involved are
205 	 * disabled!
206 	 */
207 	int32_t nseg = ARRAY_SIZE(mpu);
208 
209 	for (int32_t i = 31; i >= 32 - nseg; i--) {
210 		int32_t mpuidx = i - (32 - nseg);
211 		uint32_t addren = mpu[mpuidx][0] | 1;
212 		uint32_t segprot = (mpu[mpuidx][1]) | i;
213 
214 		/* If an active pipelined instruction fetch is in the
215 		 * same segment, wptlb must be preceded by a memw in
216 		 * the same cache line.  Jumping to an aligned-by-8
217 		 * address ensures that the following two (3-byte)
218 		 * instructions are in the same 8 byte-aligned region.
219 		 */
220 		__asm__ volatile("  j 1f\n"
221 				 ".align 8\n"
222 				 "1:\n"
223 				 "  memw\n"
224 				 "  wptlb %1, %0"
225 				 :: "r"(addren), "r"(segprot));
226 	}
227 }
228 
229 /* Temporary console output, pending integration of a winstream
230  * backend.  This simply appends a null-terminated string to an
231  * otherwise unused 1M region of shared DRAM (it's a hole in the SOF
232  * memory map before the DMA memory, so untouched by existing audio
233  * firmware), making early debugging much easier: it can be read
234  * directly out of /dev/mem (with e.g. dd | hexdump) and survives
235  * device resets/panics/etc.  But it doesn't handle more than 1M of
236  * output, there's no way to detect a reset of the stream, and in fact
237  * it's actually racy with device startup as if you read too early
238  * you'll see the old run and not the new one.  And it's wasteful,
239  * even if this device has a ton of usably-mapped DRAM
240  *
241  * Also note that the storage for the buffer and length value get
242  * reset by the DRAM clear near the end of c_boot().  If you want to
243  * use this for extremely early logging you'll need to stub out the
244  * dram clear and also set buf[0] to 0 manually (as it isn't affected
245  * by device reset).
246  */
arch_printk_char_out(int c)247 int arch_printk_char_out(int c)
248 {
249 	char volatile * const buf = (void *)LOG_BASE;
250 	const size_t max = LOG_LEN - 4;
251 	int volatile * const len = (int *)&buf[max];
252 
253 	if (*len < max) {
254 		buf[*len + 1] = 0;
255 		buf[(*len)++] = c;
256 	}
257 	return 0;
258 }
259 
c_boot(void)260 void c_boot(void)
261 {
262 	extern char _bss_start, _bss_end, z_xtensa_vecbase; /* Linker-emitted */
263 	uint32_t memctl = 0xffffff00; /* enable all caches */
264 
265 	/* Clear bss before doing anything else, device memory is
266 	 * persistent across resets (!) and we'd like our static
267 	 * variables to be actually zero.  Do this without using
268 	 * memset() out of pedantry (because we don't know which libc is
269 	 * in use or whether it requires statics).
270 	 */
271 	for (char *p = &_bss_start; p < &_bss_end; p++) {
272 		*p = 0;
273 	}
274 
275 	/* Set up MPU memory regions, both for protection and to
276 	 * enable caching (the hardware defaults is "uncached rwx
277 	 * memory everywhere").
278 	 */
279 	enable_mpu();
280 
281 	/* But the CPU core won't actually use the cache without MEMCTL... */
282 	__asm__ volatile("wsr %0, MEMCTL; rsync" :: "r"(memctl));
283 
284 	/* Need the vector base set to receive exceptions and
285 	 * interrupts (including register window exceptions, meaning
286 	 * we can't make C function calls until this is done!)
287 	 */
288 	__asm__ volatile("wsr %0, VECBASE; rsync" :: "r"(&z_xtensa_vecbase));
289 
290 #ifdef CONFIG_SOC_SERIES_MT8195
291 	mtk_adsp_cpu_freq_init();
292 #endif
293 
294 	/* Likewise, memory power is external to the device, and the
295 	 * kernel SOF loader doesn't zero it, so zero our unlinked
296 	 * memory to prevent possible pollution from previous runs.
297 	 * This region is uncached, no need to flush.
298 	 */
299 	memset(_mtk_adsp_sram_end, 0, SRAM_END - (uint32_t)&_mtk_adsp_sram_end);
300 	memset(_mtk_adsp_dram_end, 0, DRAM_END - (uint32_t)&_mtk_adsp_dram_end);
301 
302 	/* Clear pending interrupts.  Note that this hardware has a
303 	 * habit of starting with all its timer interrupts flagged.
304 	 * These have to be cleared by writing to the equivalent
305 	 * CCOMPAREn register.  Assumes XCHAL_NUM_TIMERS == 3...
306 	 */
307 	uint32_t val = 0;
308 
309 	__asm__ volatile("wsr %0, CCOMPARE0" :: "r"(val));
310 	__asm__ volatile("wsr %0, CCOMPARE1" :: "r"(val));
311 	__asm__ volatile("wsr %0, CCOMPARE2" :: "r"(val));
312 	__ASSERT_NO_MSG(XCHAL_NUM_TIMERS == 3);
313 	val = 0xffffffff;
314 	__asm__ volatile("wsr %0, INTCLEAR" :: "r"(val));
315 
316 	/* Default console, a driver can override this later */
317 	__stdout_hook_install(arch_printk_char_out);
318 
319 #ifdef CONFIG_SOC_MT8196
320 	mt8196_intc_init();
321 #endif
322 
323 	void z_prep_c(void);
324 	z_prep_c();
325 }
326