1 // SPDX-License-Identifier: BSD-3-Clause
2 //
3 // Copyright(c) 2018 Intel Corporation. All rights reserved.
4 //
5 // Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
6 //         Keyon Jie <yang.jie@linux.intel.com>
7 //         Rander Wang <rander.wang@intel.com>
8 //         Janusz Jankowski <janusz.jankowski@linux.intel.com>
9 
10 #include <cavs/version.h>
11 #if (CONFIG_CAVS_LPS)
12 #include <cavs/lps_wait.h>
13 #endif
14 #include <cavs/mem_window.h>
15 #include <sof/common.h>
16 #include <sof/compiler_info.h>
17 #include <sof/debug/debug.h>
18 #include <sof/drivers/dw-dma.h>
19 #include <sof/drivers/idc.h>
20 #include <sof/drivers/interrupt.h>
21 #include <sof/ipc/common.h>
22 #include <sof/drivers/timer.h>
23 #include <sof/fw-ready-metadata.h>
24 #include <sof/lib/agent.h>
25 #include <sof/lib/cache.h>
26 #include <sof/lib/clk.h>
27 #include <sof/lib/cpu.h>
28 #include <sof/lib/dai.h>
29 #include <sof/lib/dma.h>
30 #include <sof/lib/io.h>
31 #include <sof/lib/mailbox.h>
32 #include <sof/lib/memory.h>
33 #include <sof/lib/mm_heap.h>
34 #include <sof/lib/notifier.h>
35 #include <sof/lib/pm_runtime.h>
36 #include <sof/lib/wait.h>
37 #include <sof/platform.h>
38 #include <sof/schedule/edf_schedule.h>
39 #include <sof/schedule/ll_schedule.h>
40 #include <sof/schedule/ll_schedule_domain.h>
41 #include <sof/trace/dma-trace.h>
42 #include <sof/trace/trace.h>
43 #include <ipc/header.h>
44 #include <ipc/info.h>
45 #include <kernel/abi.h>
46 #include <kernel/ext_manifest.h>
47 
48 #include <version.h>
49 #include <errno.h>
50 #include <stdint.h>
51 
52 static const struct sof_ipc_fw_ready ready
53 	__section(".fw_ready") = {
54 	.hdr = {
55 		.cmd = SOF_IPC_FW_READY,
56 		.size = sizeof(struct sof_ipc_fw_ready),
57 	},
58 	.version = {
59 		.hdr.size = sizeof(struct sof_ipc_fw_version),
60 		.micro = SOF_MICRO,
61 		.minor = SOF_MINOR,
62 		.major = SOF_MAJOR,
63 /* opt-in; reproducible build by default */
64 #if BLD_COUNTERS
65 		.build = SOF_BUILD, /* See version-build-counter.cmake */
66 		.date = __DATE__,
67 		.time = __TIME__,
68 #else
69 		.build = -1,
70 		.date = "dtermin.\0",
71 		.time = "fwready.\0",
72 #endif
73 		.tag = SOF_TAG,
74 		.abi_version = SOF_ABI_VERSION,
75 		.src_hash = SOF_SRC_HASH,
76 	},
77 	.flags = DEBUG_SET_FW_READY_FLAGS,
78 };
79 
80 #if CONFIG_MEM_WND
81 #define SRAM_WINDOW_HOST_OFFSET(x) (0x80000 + x * 0x20000)
82 
83 #define NUM_WINDOWS 7
84 
85 const struct ext_man_windows xsram_window
86 		__aligned(EXT_MAN_ALIGN) __section(".fw_metadata") __unused = {
87 	.hdr = {
88 		.type = EXT_MAN_ELEM_WINDOW,
89 		.elem_size = ALIGN_UP_COMPILE(sizeof(struct ext_man_windows), EXT_MAN_ALIGN),
90 	},
91 	.window = {
92 		.ext_hdr	= {
93 			.hdr.cmd = SOF_IPC_FW_READY,
94 			.hdr.size = sizeof(struct sof_ipc_window),
95 			.type	= SOF_IPC_EXT_WINDOW,
96 		},
97 		.num_windows	= NUM_WINDOWS,
98 		.window	= {
99 			{
100 				.type	= SOF_IPC_REGION_REGS,
101 				.id	= 0,	/* map to host window 0 */
102 				.flags	= 0, // TODO: set later
103 				.size	= MAILBOX_SW_REG_SIZE,
104 				.offset	= 0,
105 			},
106 			{
107 				.type	= SOF_IPC_REGION_UPBOX,
108 				.id	= 0,	/* map to host window 0 */
109 				.flags	= 0, // TODO: set later
110 				.size	= MAILBOX_DSPBOX_SIZE,
111 				.offset	= MAILBOX_SW_REG_SIZE,
112 			},
113 			{
114 				.type	= SOF_IPC_REGION_DOWNBOX,
115 				.id	= 1,	/* map to host window 1 */
116 				.flags	= 0, // TODO: set later
117 				.size	= MAILBOX_HOSTBOX_SIZE,
118 				.offset	= 0,
119 			},
120 			{
121 				.type	= SOF_IPC_REGION_DEBUG,
122 				.id	= 2,	/* map to host window 2 */
123 				.flags	= 0, // TODO: set later
124 				.size	= MAILBOX_DEBUG_SIZE,
125 				.offset	= 0,
126 			},
127 			{
128 				.type	= SOF_IPC_REGION_EXCEPTION,
129 				.id	= 2,	/* map to host window 2 */
130 				.flags	= 0, // TODO: set later
131 				.size	= MAILBOX_EXCEPTION_SIZE,
132 				.offset	= MAILBOX_EXCEPTION_OFFSET,
133 			},
134 			{
135 				.type	= SOF_IPC_REGION_STREAM,
136 				.id	= 2,	/* map to host window 2 */
137 				.flags	= 0, // TODO: set later
138 				.size	= MAILBOX_STREAM_SIZE,
139 				.offset	= MAILBOX_STREAM_OFFSET,
140 			},
141 			{
142 				.type	= SOF_IPC_REGION_TRACE,
143 				.id	= 3,	/* map to host window 3 */
144 				.flags	= 0, // TODO: set later
145 				.size	= MAILBOX_TRACE_SIZE,
146 				.offset	= 0,
147 			},
148 		},
149 	},
150 };
151 #endif
152 
153 #if CONFIG_CANNONLAKE || CONFIG_ICELAKE || CONFIG_TIGERLAKE
154 #if CONFIG_CAVS_LPRO_ONLY
155 #define CAVS_DEFAULT_RO		SHIM_CLKCTL_RLROSCC
156 #define CAVS_DEFAULT_RO_FOR_MEM	SHIM_CLKCTL_OCS_LP_RING
157 #else
158 #define CAVS_DEFAULT_RO		SHIM_CLKCTL_RHROSCC
159 #define CAVS_DEFAULT_RO_FOR_MEM	SHIM_CLKCTL_OCS_HP_RING
160 #endif
161 #endif
162 
163 #if CONFIG_DW_GPIO
164 
165 #include <sof/drivers/gpio.h>
166 
167 const struct gpio_pin_config gpio_data[] = {
168 	{	/* GPIO0 */
169 		.mux_id = 1,
170 		.mux_config = {.bit = 0, .mask = 3, .fn = 1},
171 	}, {	/* GPIO1 */
172 		.mux_id = 1,
173 		.mux_config = {.bit = 2, .mask = 3, .fn = 1},
174 	}, {	/* GPIO2 */
175 		.mux_id = 1,
176 		.mux_config = {.bit = 4, .mask = 3, .fn = 1},
177 	}, {	/* GPIO3 */
178 		.mux_id = 1,
179 		.mux_config = {.bit = 6, .mask = 3, .fn = 1},
180 	}, {	/* GPIO4 */
181 		.mux_id = 1,
182 		.mux_config = {.bit = 8, .mask = 3, .fn = 1},
183 	}, {	/* GPIO5 */
184 		.mux_id = 1,
185 		.mux_config = {.bit = 10, .mask = 3, .fn = 1},
186 	}, {	/* GPIO6 */
187 		.mux_id = 1,
188 		.mux_config = {.bit = 12, .mask = 3, .fn = 1},
189 	}, {	/* GPIO7 */
190 		.mux_id = 1,
191 		.mux_config = {.bit = 14, .mask = 3, .fn = 1},
192 	}, {	/* GPIO8 */
193 		.mux_id = 1,
194 		.mux_config = {.bit = 16, .mask = 1, .fn = 1},
195 	}, {	/* GPIO9 */
196 		.mux_id = 0,
197 		.mux_config = {.bit = 11, .mask = 1, .fn = 1},
198 	}, {	/* GPIO10 */
199 		.mux_id = 0,
200 		.mux_config = {.bit = 11, .mask = 1, .fn = 1},
201 	}, {	/* GPIO11 */
202 		.mux_id = 0,
203 		.mux_config = {.bit = 11, .mask = 1, .fn = 1},
204 	}, {	/* GPIO12 */
205 		.mux_id = 0,
206 		.mux_config = {.bit = 11, .mask = 1, .fn = 1},
207 	}, {	/* GPIO13 */
208 		.mux_id = 0,
209 		.mux_config = {.bit = 0, .mask = 1, .fn = 1},
210 	}, {	/* GPIO14 */
211 		.mux_id = 0,
212 		.mux_config = {.bit = 1, .mask = 1, .fn = 1},
213 	}, {	/* GPIO15 */
214 		.mux_id = 0,
215 		.mux_config = {.bit = 9, .mask = 1, .fn = 1},
216 	}, {	/* GPIO16 */
217 		.mux_id = 0,
218 		.mux_config = {.bit = 9, .mask = 1, .fn = 1},
219 	}, {	/* GPIO17 */
220 		.mux_id = 0,
221 		.mux_config = {.bit = 9, .mask = 1, .fn = 1},
222 	}, {	/* GPIO18 */
223 		.mux_id = 0,
224 		.mux_config = {.bit = 9, .mask = 1, .fn = 1},
225 	}, {	/* GPIO19 */
226 		.mux_id = 0,
227 		.mux_config = {.bit = 10, .mask = 1, .fn = 1},
228 	}, {	/* GPIO20 */
229 		.mux_id = 0,
230 		.mux_config = {.bit = 10, .mask = 1, .fn = 1},
231 	}, {	/* GPIO21 */
232 		.mux_id = 0,
233 		.mux_config = {.bit = 10, .mask = 1, .fn = 1},
234 	}, {	/* GPIO22 */
235 		.mux_id = 0,
236 		.mux_config = {.bit = 10, .mask = 1, .fn = 1},
237 	}, {	/* GPIO23 */
238 		.mux_id = 0,
239 		.mux_config = {.bit = 16, .mask = 1, .fn = 1},
240 	}, {	/* GPIO24 */
241 		.mux_id = 0,
242 		.mux_config = {.bit = 16, .mask = 1, .fn = 1},
243 	}, {	/* GPIO25 */
244 		.mux_id = 0,
245 		.mux_config = {.bit = 26, .mask = 1, .fn = 1},
246 	},
247 };
248 
249 const int n_gpios = ARRAY_SIZE(gpio_data);
250 
251 #if CONFIG_INTEL_IOMUX
252 
253 #include <sof/drivers/iomux.h>
254 
255 struct iomux iomux_data[] = {
256 	{.base = EXT_CTRL_BASE + 0x30,},
257 	{.base = EXT_CTRL_BASE + 0x34,},
258 	{.base = EXT_CTRL_BASE + 0x38,},
259 };
260 
261 const int n_iomux = ARRAY_SIZE(iomux_data);
262 
263 #endif
264 
265 #endif
266 
267 SHARED_DATA struct timer timer = {
268 	.id = TIMER3, /* external timer */
269 	.irq = IRQ_EXT_TSTAMP0_LVL2,
270 	.irq_name = irq_name_level2,
271 };
272 
273 SHARED_DATA struct timer arch_timers[CONFIG_CORE_COUNT];
274 
275 #if CONFIG_DW_SPI
276 
277 #include <sof/drivers/spi.h>
278 
279 static struct spi_platform_data spi = {
280 	.base		= DW_SPI_SLAVE_BASE,
281 	.type		= SOF_SPI_INTEL_SLAVE,
282 	.fifo[SPI_DIR_RX] = {
283 		.handshake	= DMA_HANDSHAKE_SSI_RX,
284 	},
285 	.fifo[SPI_DIR_TX] = {
286 		.handshake	= DMA_HANDSHAKE_SSI_TX,
287 	}
288 };
289 
platform_boot_complete(uint32_t boot_message)290 int platform_boot_complete(uint32_t boot_message)
291 {
292 	return spi_push(spi_get(SOF_SPI_INTEL_SLAVE), &ready, sizeof(ready));
293 }
294 
295 #else
296 
platform_boot_complete(uint32_t boot_message)297 int platform_boot_complete(uint32_t boot_message)
298 {
299 	ipc_cmd_hdr header;
300 	uint32_t data;
301 
302 #if CONFIG_TIGERLAKE && !CONFIG_CAVS_LPRO_ONLY
303 	/* TGL specific HW recommended flow */
304 	pm_runtime_get(PM_RUNTIME_DSP, PWRD_BY_HPRO | (CONFIG_CORE_COUNT - 1));
305 #endif
306 
307 	mailbox_dspbox_write(0, &ready, sizeof(ready));
308 
309 	/* get any IPC specific boot message and optional data */
310 	data = SRAM_WINDOW_HOST_OFFSET(0) >> 12;
311 	ipc_boot_complete_msg(&header, &data);
312 
313 	/* tell host we are ready */
314 #if CAVS_VERSION == CAVS_VERSION_1_5
315 	ipc_write(IPC_DIPCIE, data);
316 	ipc_write(IPC_DIPCI, IPC_DIPCI_BUSY | header);
317 #else
318 	ipc_write(IPC_DIPCIDD, data);
319 	ipc_write(IPC_DIPCIDR, IPC_DIPCIDR_BUSY | header);
320 #endif
321 	return 0;
322 }
323 
324 #endif
325 
326 #if CAVS_VERSION >= CAVS_VERSION_1_8
327 /* init HW  */
platform_init_hw(void)328 static void platform_init_hw(void)
329 {
330 	io_reg_write(DSP_INIT_GENO,
331 		GENO_MDIVOSEL | GENO_DIOPTOSEL);
332 
333 	io_reg_write(DSP_INIT_IOPO,
334 		IOPO_DMIC_FLAG | IOPO_I2S_FLAG);
335 
336 	io_reg_write(DSP_INIT_ALHO,
337 		ALHO_ASO_FLAG | ALHO_CSO_FLAG);
338 
339 	io_reg_write(DSP_INIT_LPGPDMA(0),
340 		LPGPDMA_CHOSEL_FLAG | LPGPDMA_CTLOSEL_FLAG);
341 	io_reg_write(DSP_INIT_LPGPDMA(1),
342 		LPGPDMA_CHOSEL_FLAG | LPGPDMA_CTLOSEL_FLAG);
343 }
344 #endif
345 
346 /* Runs on the primary core only */
platform_init(struct sof * sof)347 int platform_init(struct sof *sof)
348 {
349 #if CONFIG_DW_SPI
350 	struct spi *spi_dev;
351 #endif
352 	int ret;
353 	int i;
354 
355 	sof->platform_timer = cache_to_uncache(&timer);
356 	sof->cpu_timers = (struct timer *)cache_to_uncache(&arch_timers);
357 
358 	for (i = 0; i < CONFIG_CORE_COUNT; i++)
359 		sof->cpu_timers[i] = (struct timer) {
360 			.id = TIMER1, /* internal timer */
361 			.irq = IRQ_NUM_TIMER2,
362 		};
363 
364 	/* Turn off memory for all unused cores */
365 	for (i = 0; i < CONFIG_CORE_COUNT; i++)
366 		if (i != PLATFORM_PRIMARY_CORE_ID)
367 			pm_runtime_put(CORE_MEMORY_POW, i);
368 
369 	/* pm runtime already initialized, request the DSP to stay in D0
370 	 * until we are allowed to do full power gating (by the IPC req).
371 	 */
372 	pm_runtime_disable(PM_RUNTIME_DSP, 0);
373 
374 #if CONFIG_CANNONLAKE || CONFIG_ICELAKE || CONFIG_SUECREEK || CONFIG_TIGERLAKE
375 	trace_point(TRACE_BOOT_PLATFORM_ENTRY);
376 	platform_init_hw();
377 #endif
378 
379 	trace_point(TRACE_BOOT_PLATFORM_IRQ);
380 	platform_interrupt_init();
381 
382 #if CONFIG_MEM_WND
383 	trace_point(TRACE_BOOT_PLATFORM_MBOX);
384 	platform_memory_windows_init(MEM_WND_INIT_CLEAR);
385 #endif
386 
387 #ifndef __ZEPHYR__
388 	/* init timers, clocks and schedulers */
389 	trace_point(TRACE_BOOT_PLATFORM_TIMER);
390 	platform_timer_start(sof->platform_timer);
391 #endif
392 
393 	trace_point(TRACE_BOOT_PLATFORM_CLOCK);
394 	platform_clock_init(sof);
395 
396 	trace_point(TRACE_BOOT_PLATFORM_SCHED);
397 	scheduler_init_edf();
398 
399 	/* init low latency timer domain and scheduler */
400 	sof->platform_timer_domain =
401 		timer_domain_init(sof->platform_timer, PLATFORM_DEFAULT_CLOCK);
402 	scheduler_init_ll(sof->platform_timer_domain);
403 
404 	/* init the system agent */
405 	trace_point(TRACE_BOOT_PLATFORM_AGENT);
406 	sa_init(sof, CONFIG_SYSTICK_PERIOD);
407 
408 	/* Set CPU to max frequency for booting (single shim_write below) */
409 	trace_point(TRACE_BOOT_PLATFORM_CPU_FREQ);
410 #if CONFIG_APOLLOLAKE
411 	/* initialize PM for boot */
412 
413 	/* TODO: there are two clk freqs CRO & CRO/4
414 	 * Running on CRO all the time atm
415 	 */
416 
417 	shim_write(SHIM_CLKCTL,
418 		   SHIM_CLKCTL_HDCS_PLL | /* HP domain clocked by PLL */
419 		   SHIM_CLKCTL_LDCS_PLL | /* LP domain clocked by PLL */
420 		   SHIM_CLKCTL_DPCS_DIV1(0) | /* Core 0 clk not divided */
421 		   SHIM_CLKCTL_DPCS_DIV1(1) | /* Core 1 clk not divided */
422 		   SHIM_CLKCTL_HPMPCS_DIV2 | /* HP mem clock div by 2 */
423 		   SHIM_CLKCTL_LPMPCS_DIV4 | /* LP mem clock div by 4 */
424 		   SHIM_CLKCTL_TCPAPLLS_DIS |
425 		   SHIM_CLKCTL_TCPLCG_DIS(0) | SHIM_CLKCTL_TCPLCG_DIS(1));
426 
427 	shim_write(SHIM_LPSCTL, shim_read(SHIM_LPSCTL));
428 
429 #elif CONFIG_CANNONLAKE || CONFIG_ICELAKE || CONFIG_TIGERLAKE
430 
431 	/* initialize PM for boot */
432 
433 	/* request configured ring oscillator and wait for status ready */
434 	shim_write(SHIM_CLKCTL, shim_read(SHIM_CLKCTL) | CAVS_DEFAULT_RO);
435 	while (!(shim_read(SHIM_CLKSTS) & CAVS_DEFAULT_RO))
436 		idelay(16);
437 
438 	shim_write(SHIM_CLKCTL,
439 		   CAVS_DEFAULT_RO | /* Request configured RING Osc */
440 		   CAVS_DEFAULT_RO_FOR_MEM | /* Select configured
441 					     * RING Oscillator Clk for memory
442 					     */
443 		   SHIM_CLKCTL_HMCS_DIV2 | /* HP mem clock div by 2 */
444 		   SHIM_CLKCTL_LMCS_DIV4 | /* LP mem clock div by 4 */
445 		   SHIM_CLKCTL_TCPLCG_DIS_ALL); /* Allow Local Clk Gating */
446 
447 	/* prevent LP GPDMA 0&1 clock gating */
448 	shim_write(SHIM_GPDMA_CLKCTL(0), SHIM_CLKCTL_LPGPDMAFDCGB);
449 	shim_write(SHIM_GPDMA_CLKCTL(1), SHIM_CLKCTL_LPGPDMAFDCGB);
450 
451 	/* prevent DSP Common power gating */
452 	pm_runtime_get(PM_RUNTIME_DSP, PLATFORM_PRIMARY_CORE_ID);
453 
454 #if CONFIG_DSP_RESIDENCY_COUNTERS
455 #if CONFIG_CAVS_LPRO_ONLY
456 	init_dsp_r_state(r1_r_state);
457 #else
458 	init_dsp_r_state(r0_r_state);
459 #endif
460 #endif
461 
462 #elif CONFIG_SUECREEK
463 	/* TODO: need to merge as for APL */
464 	clock_set_freq(CLK_CPU(cpu_get_id()), CLK_MAX_CPU_HZ);
465 
466 	/* prevent Core0 clock gating. */
467 	shim_write(SHIM_CLKCTL, shim_read(SHIM_CLKCTL) |
468 		SHIM_CLKCTL_TCPLCG(0));
469 
470 	/* prevent LP GPDMA 0&1 clock gating */
471 	shim_write(SHIM_GPDMA_CLKCTL(0), SHIM_CLKCTL_LPGPDMAFDCGB);
472 	shim_write(SHIM_GPDMA_CLKCTL(1), SHIM_CLKCTL_LPGPDMAFDCGB);
473 
474 	/* prevent DSP Common power gating */
475 	pm_runtime_get(PM_RUNTIME_DSP, PLATFORM_PRIMARY_CORE_ID);
476 #endif
477 
478 	/* init DMACs */
479 	trace_point(TRACE_BOOT_PLATFORM_DMA);
480 	ret = dmac_init(sof);
481 	if (ret < 0)
482 		return ret;
483 
484 	/* init low latency single channel DW-DMA domain and scheduler */
485 	sof->platform_dma_domain =
486 		dma_single_chan_domain_init
487 			(&sof->dma_info->dma_array[PLATFORM_DW_DMA_INDEX],
488 			 PLATFORM_NUM_DW_DMACS,
489 			 PLATFORM_DEFAULT_CLOCK);
490 	scheduler_init_ll(sof->platform_dma_domain);
491 
492 	/* initialize the host IPC mechanisms */
493 	trace_point(TRACE_BOOT_PLATFORM_IPC);
494 	ipc_init(sof);
495 
496 	/* initialize IDC mechanism */
497 	trace_point(TRACE_BOOT_PLATFORM_IDC);
498 	ret = idc_init();
499 	if (ret < 0)
500 		return ret;
501 
502 	/* init DAIs */
503 	trace_point(TRACE_BOOT_PLATFORM_DAI);
504 	ret = dai_init(sof);
505 	if (ret < 0)
506 		return ret;
507 
508 #if CONFIG_DW_SPI
509 	/* initialize the SPI slave */
510 	trace_point(TRACE_BOOT_PLATFORM_SPI);
511 	spi_init();
512 	ret = spi_install(&spi, 1);
513 	if (ret < 0)
514 		return ret;
515 
516 	spi_dev = spi_get(SOF_SPI_INTEL_SLAVE);
517 	if (!spi_dev)
518 		return -ENODEV;
519 
520 	/* initialize the SPI-SLave module */
521 	ret = spi_probe(spi_dev);
522 	if (ret < 0)
523 		return ret;
524 #elif CONFIG_TRACE
525 	/* Initialize DMA for Trace*/
526 	trace_point(TRACE_BOOT_PLATFORM_DMA_TRACE);
527 	ret = dma_trace_init_complete(sof->dmat);
528 	if (ret < 0)
529 		return ret;
530 #endif
531 
532 	/* show heap status */
533 	heap_trace_all(1);
534 
535 	return 0;
536 }
537 
538 #ifndef __ZEPHYR__
platform_wait_for_interrupt(int level)539 void platform_wait_for_interrupt(int level)
540 {
541 	platform_clock_on_waiti();
542 
543 #if (CONFIG_CAVS_LPS)
544 	if (pm_runtime_is_active(PM_RUNTIME_DSP, PLATFORM_PRIMARY_CORE_ID))
545 		arch_wait_for_interrupt(level);
546 	else
547 		lps_wait_for_interrupt(level);
548 #else
549 	arch_wait_for_interrupt(level);
550 #endif
551 }
552 #endif
553 
554 /* These structs and macros are from from the ROM code header
555  * on cAVS platforms, please keep them immutable
556  */
557 
558 #define ADSP_IMR_MAGIC_VALUE		0x02468ACE
559 #define IMR_L1_CACHE_ADDRESS		0xB0000000
560 
561 struct imr_header {
562 	uint32_t adsp_imr_magic;
563 	uint32_t structure_version;
564 	uint32_t structure_size;
565 	uint32_t imr_state;
566 	uint32_t imr_size;
567 	void *imr_restore_vector;
568 };
569 
570 struct imr_state {
571 	struct imr_header header;
572 	uint8_t reserved[0x1000 - sizeof(struct imr_header)];
573 };
574 
575 struct imr_layout {
576 	uint8_t     css_reserved[0x1000];
577 	struct imr_state    imr_state;
578 };
579 
580 /* cAVS ROM structs and macros end */
581 
imr_layout_update(void * vector)582 static void imr_layout_update(void *vector)
583 {
584 	struct imr_layout *imr_layout = (struct imr_layout *)(IMR_L1_CACHE_ADDRESS);
585 
586 	/* update the IMR layout and write it back to uncache memory
587 	 * for ROM code usage. The ROM code will read this from IMR
588 	 * at the subsequent run and decide (e.g. combine with checking
589 	 * if FW_PURGE IPC from host got) if it can use the previous
590 	 * IMR FW directly. So this here is only a host->FW->ROM one way
591 	 * configuration, no symmetric task need to done in any
592 	 * platform_resume() to clear the configuration.
593 	 */
594 	dcache_invalidate_region(imr_layout, sizeof(*imr_layout));
595 	imr_layout->imr_state.header.adsp_imr_magic = ADSP_IMR_MAGIC_VALUE;
596 	imr_layout->imr_state.header.imr_restore_vector = vector;
597 	dcache_writeback_region(imr_layout, sizeof(*imr_layout));
598 }
599 
platform_context_save(struct sof * sof)600 int platform_context_save(struct sof *sof)
601 {
602 	imr_layout_update((void *)IMR_BOOT_LDR_TEXT_ENTRY_BASE);
603 
604 	return 0;
605 }
606