1 /*
2  * Copyright (c) 2023 Intel Corporation
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include "aon_share.h"
8 #include "aon_defs.h"
9 #include "../ish_dma.h"
10 
11 /**
12  * Due to very limit AON memory size (typically total 8KB), we don't want to
13  * define and allocate whole 256 entries for aontask'IDT, that almost need 2KB
14  * (256 * 8), so we just defined the only needed IDT entries:
15  * AON_IDT_ENTRY_VEC_FIRST ~  AON_IDT_ENTRY_VEC_LAST
16  */
17 #define AON_IDT_ENTRY_VEC_FIRST		SEDI_VEC_RESET_PREP
18 #define AON_IDT_ENTRY_VEC_LAST		SEDI_VEC_PMU2IOAPIC
19 
20 static void handle_reset(enum ish_pm_state pm_state);
21 
22 /* ISR for PMU wakeup interrupt */
pmu_wakeup_isr(void)23 static void pmu_wakeup_isr(void)
24 {
25 	/**
26 	 * Indicate completion of servicing the interrupt to IOAPIC first
27 	 * then indicate completion of servicing the interrupt to LAPIC
28 	 */
29 	write32(SEDI_IOAPIC_EOI, SEDI_VEC_PMU2IOAPIC);
30 	write32(LAPIC_EOI, 0x0);
31 
32 	__asm__ volatile ("iret;");
33 
34 	__builtin_unreachable();
35 }
36 
37 /* ISR for reset prep interrupt */
reset_prep_isr(void)38 static void reset_prep_isr(void)
39 {
40 	/* mask reset prep avail interrupt */
41 	write32(PMU_RST_PREP, PMU_RST_PREP_INT_MASK);
42 
43 	/**
44 	 * Indicate completion of servicing the interrupt to IOAPIC first
45 	 * then indicate completion of servicing the interrupt to LAPIC
46 	 */
47 	write32(SEDI_IOAPIC_EOI, SEDI_VEC_RESET_PREP);
48 	write32(LAPIC_EOI, 0x0);
49 
50 	handle_reset(ISH_PM_STATE_RESET_PREP);
51 
52 	__builtin_unreachable();
53 }
54 
55 /**
56  * Use a static data array for aon IDT, and setting IDT header for IDTR
57  * register
58  *
59  * Since on x86, the IDT entry index (count from 0) is also the interrupt
60  * vector number, for IDT header, the 'start' filed still need to point to
61  * the entry 0, and 'size' must count from entry 0.
62  */
63 
64 static struct idt_entry aon_idt[AON_IDT_ENTRY_VEC_LAST -
65 				AON_IDT_ENTRY_VEC_FIRST + 1];
66 
67 static struct idt_header aon_idt_hdr = {
68 
69 	.limit = (sizeof(struct idt_entry) * (AON_IDT_ENTRY_VEC_LAST + 1)) - 1,
70 	.entries = (struct idt_entry *)((uint32_t)&aon_idt -
71 			(sizeof(struct idt_entry) * AON_IDT_ENTRY_VEC_FIRST))
72 };
73 
74 /* TSS segment for aon task */
75 struct tss_entry aon_tss = {
76 	.prev_task_link = 0,
77 	.reserved1 = 0,
78 	.esp0 = (uint8_t *)(CONFIG_AON_PERSISTENT_BASE - AON_SP_RESERVED),
79 	/* entry 1 in LDT for data segment */
80 	.ss0 = 0xc,
81 	.reserved2 = 0,
82 	.esp1 = 0,
83 	.ss1 = 0,
84 	.reserved3 = 0,
85 	.esp2 = 0,
86 	.ss2 = 0,
87 	.reserved4 = 0,
88 	.cr3 = 0,
89 	/* task execute entry point */
90 	.eip = (uint32_t)&ish_aon_main,
91 	.eflags = 0,
92 	.eax = 0,
93 	.ecx = 0,
94 	.edx = 0,
95 	.ebx = 0,
96 	/* set stack top pointer at the end of usable aon memory */
97 	.esp = CONFIG_AON_PERSISTENT_BASE - AON_SP_RESERVED,
98 	.ebp = CONFIG_AON_PERSISTENT_BASE - AON_SP_RESERVED,
99 	.esi = 0,
100 	.edi = 0,
101 	/* entry 1 in LDT for data segment */
102 	.es = 0xc,
103 	.reserved5 = 0,
104 	/* entry 0 in LDT for code segment */
105 	.cs = 0x4,
106 	.reserved6 = 0,
107 	/* entry 1 in LDT for data segment */
108 	.ss = 0xc,
109 	.reserved7 = 0,
110 	/* entry 1 in LDT for data segment */
111 	.ds = 0xc,
112 	.reserved8 = 0,
113 	/* entry 1 in LDT for data segment */
114 	.fs = 0xc,
115 	.reserved9 = 0,
116 	/* entry 1 in LDT for data segment */
117 	.gs = 0xc,
118 	.reserved10 = 0,
119 	.ldt_seg_selector = 0,
120 	.reserved11 = 0,
121 	.trap_debug = 0,
122 
123 	/**
124 	 * TSS's limit specified as 0x67, to allow the task has permission to
125 	 * access I/O port using IN/OUT instructions,'iomap_base_addr' field
126 	 * must be greater than or equal to TSS' limit
127 	 * see 'I/O port permissions' on
128 	 *	https://en.wikipedia.org/wiki/Task_state_segment
129 	 */
130 	.iomap_base_addr = GDT_DESC_TSS_LIMIT
131 };
132 
133 /**
134  * define code and data LDT segements for aontask
135  * code : base = 0x0, limit = 0xFFFFFFFF, Present = 1, DPL = 0
136  * data : base = 0x0, limit = 0xFFFFFFFF, Present = 1, DPL = 0
137  */
138 static ldt_entry aon_ldt[2] = {
139 
140 	/**
141 	 * entry 0 for code segment
142 	 * base: 0x0
143 	 * limit: 0xFFFFFFFF
144 	 * flag: 0x9B, Present = 1, DPL = 0, code segment
145 	 */
146 	{
147 		.dword_lo = GEN_GDT_DESC_LO(0x0, 0xFFFFFFFF,
148 				GDT_DESC_CODE_FLAGS),
149 
150 		.dword_up = GEN_GDT_DESC_UP(0x0, 0xFFFFFFFF,
151 				GDT_DESC_CODE_FLAGS)
152 	},
153 
154 	/**
155 	 * entry 1 for data segment
156 	 * base: 0x0
157 	 * limit: 0xFFFFFFFF
158 	 * flag: 0x93, Present = 1, DPL = 0, data segment
159 	 */
160 	{
161 		.dword_lo = GEN_GDT_DESC_LO(0x0, 0xFFFFFFFF,
162 				GDT_DESC_DATA_FLAGS),
163 
164 		.dword_up = GEN_GDT_DESC_UP(0x0, 0xFFFFFFFF,
165 				GDT_DESC_DATA_FLAGS)
166 	}
167 };
168 
169 
170 /* shared data structure between main FW and aon task */
171 struct ish_aon_share aon_share = {
172 	.magic_id = AON_MAGIC_ID,
173 	.error_count = 0,
174 	.last_error = AON_SUCCESS,
175 	.aon_tss = &aon_tss,
176 	.aon_ldt = &aon_ldt[0],
177 	.aon_ldt_size = sizeof(aon_ldt),
178 };
179 
180 /* snowball structure */
181 #define SNOWBALL_BASE	(CONFIG_AON_PERSISTENT_BASE + 256)
182 
183 struct snowball_struct *snowball = (void *)SNOWBALL_BASE;
184 
185 
186 /* In IMR DDR, ISH FW image has a manifest header */
187 #define ISH_FW_IMAGE_MANIFEST_HEADER_SIZE (0x1000)
188 
189 /* simple count based delay */
delay(uint32_t count)190 static inline void delay(uint32_t count)
191 {
192 	while (count)
193 		count--;
194 }
195 
enable_dma_bcg(void)196 static inline void enable_dma_bcg(void)
197 {
198 	write32(CCU_BCG_DMA, 1);
199 }
200 
disable_dma_bcg(void)201 static inline void disable_dma_bcg(void)
202 {
203 	write32(CCU_BCG_DMA, 0);
204 }
205 
store_main_fw(void)206 static int store_main_fw(void)
207 {
208 	int ret;
209 	uint64_t imr_fw_addr;
210 	uint64_t imr_fw_rw_addr;
211 
212 	imr_fw_addr = (((uint64_t)snowball->uma_base_hi << 32) +
213 		       snowball->uma_base_lo +
214 		       snowball->fw_offset +
215 		       ISH_FW_IMAGE_MANIFEST_HEADER_SIZE);
216 
217 	imr_fw_rw_addr = (imr_fw_addr
218 			  + aon_share.main_fw_rw_addr
219 			  - CONFIG_RAM_BASE);
220 
221 	/* disable BCG (Block Clock Gating) for DMA, DMA can be accessed now */
222 	disable_dma_bcg();
223 
224 	/* store main FW's read and write data region to IMR/UMA DDR */
225 	ret = ish_dma_copy(
226 		PAGING_CHAN,
227 		imr_fw_rw_addr,
228 		aon_share.main_fw_rw_addr,
229 		aon_share.main_fw_rw_size,
230 		SRAM_TO_UMA);
231 
232 	/* enable BCG for DMA, DMA can't be accessed now */
233 	enable_dma_bcg();
234 
235 	if (ret != DMA_RC_OK) {
236 
237 		aon_share.last_error = AON_ERROR_DMA_FAILED;
238 		aon_share.error_count++;
239 
240 		return AON_ERROR_DMA_FAILED;
241 	}
242 
243 	return AON_SUCCESS;
244 }
245 
restore_main_fw(void)246 static int restore_main_fw(void)
247 {
248 	int ret;
249 	uint64_t imr_fw_addr;
250 	uint64_t imr_fw_ro_addr;
251 	uint64_t imr_fw_rw_addr;
252 
253 	imr_fw_addr = (((uint64_t)snowball->uma_base_hi << 32) +
254 		       snowball->uma_base_lo +
255 		       snowball->fw_offset +
256 		       ISH_FW_IMAGE_MANIFEST_HEADER_SIZE);
257 
258 	imr_fw_ro_addr = (imr_fw_addr
259 			  + aon_share.main_fw_ro_addr
260 			  - CONFIG_RAM_BASE);
261 
262 	imr_fw_rw_addr = (imr_fw_addr
263 			  + aon_share.main_fw_rw_addr
264 			  - CONFIG_RAM_BASE);
265 
266 	/* disable BCG (Block Clock Gating) for DMA, DMA can be accessed now */
267 	disable_dma_bcg();
268 
269 	/* restore main FW's read only code and data region from IMR/UMA DDR */
270 	ret = ish_dma_copy(
271 		PAGING_CHAN,
272 		aon_share.main_fw_ro_addr,
273 		imr_fw_ro_addr,
274 		aon_share.main_fw_ro_size,
275 		UMA_TO_SRAM);
276 
277 	if (ret != DMA_RC_OK) {
278 
279 		aon_share.last_error = AON_ERROR_DMA_FAILED;
280 		aon_share.error_count++;
281 
282 		/* enable BCG for DMA, DMA can't be accessed now */
283 		enable_dma_bcg();
284 
285 		return AON_ERROR_DMA_FAILED;
286 	}
287 
288 	/* restore main FW's read and write data region from IMR/UMA DDR */
289 	ret = ish_dma_copy(
290 			PAGING_CHAN,
291 			aon_share.main_fw_rw_addr,
292 			imr_fw_rw_addr,
293 			aon_share.main_fw_rw_size,
294 			UMA_TO_SRAM
295 			);
296 
297 	/* enable BCG for DMA, DMA can't be accessed now */
298 	enable_dma_bcg();
299 
300 	if (ret != DMA_RC_OK) {
301 
302 		aon_share.last_error = AON_ERROR_DMA_FAILED;
303 		aon_share.error_count++;
304 
305 		return AON_ERROR_DMA_FAILED;
306 	}
307 
308 	return AON_SUCCESS;
309 }
310 
311 #define SRAM_POWER_OFF_BANKS	CONFIG_RAM_BANKS
312 
313 /* SRAM needs time to enter retention mode */
314 #define CYCLES_PER_US                  100
315 #define SRAM_RETENTION_US_DELAY	       5
316 #define SRAM_RETENTION_CYCLES_DELAY    (SRAM_RETENTION_US_DELAY * CYCLES_PER_US)
317 
318 #ifdef CONFIG_SOC_INTEL_ISH_5_6_0
319 #define SRAM_WARM_UP_COUNTER	(1000)
320 #define SRAM_CTRL_ERASE_SIZE_BIT	2
321 #define SRAM_CTRL_ERASE_BYTE_TO_QWORD	3
322 #define SRAM_BANK_ERASE_SIZE                                                   \
323 	((CONFIG_RAM_BANK_SIZE >> SRAM_CTRL_ERASE_BYTE_TO_QWORD)               \
324 	 << SRAM_CTRL_ERASE_SIZE_BIT)
325 #define SRAM_TILES		(CONFIG_RAM_BANKS * 2)
326 
sram_toggle_tile(uint32_t tile_id,uint32_t enable)327 static uint32_t sram_toggle_tile(uint32_t tile_id, uint32_t enable)
328 {
329 	uint32_t pmu_sram_val = read32(PMU_SRAM_PG_EN);
330 	uint32_t pmu_toggle_bit = (1 << tile_id);
331 	uint32_t u = 0;
332 
333 	if (enable && (pmu_sram_val & pmu_toggle_bit)) {
334 		pmu_sram_val &= ~pmu_toggle_bit;
335 		write32(PMU_SRAM_PG_EN, pmu_sram_val);
336 		while (!(pmu_toggle_bit & read32(PMU_SRAM_PWR_STATUS)))
337 			;
338 		for (u = 0; u < SRAM_WARM_UP_COUNTER; ++u)
339 			__asm__ volatile ("nop");
340 	} else if (!enable && (~pmu_sram_val & pmu_toggle_bit)) {
341 		pmu_sram_val |= pmu_toggle_bit;
342 		write32(PMU_SRAM_PG_EN, pmu_sram_val);
343 		while ((pmu_toggle_bit & read32(PMU_SRAM_PWR_STATUS)))
344 			;
345 		for (u = 0; u < SRAM_WARM_UP_COUNTER; ++u)
346 			__asm__ volatile ("nop");
347 	} else {
348 		enable = 0;
349 	}
350 	return enable;
351 }
352 
sram_toggle_bank(unsigned int bank_number,unsigned int enable)353 static void sram_toggle_bank(unsigned int bank_number, unsigned int enable)
354 {
355 	uint32_t tile_id = bank_number << 1;
356 
357 	if (enable) {
358 		if (sram_toggle_tile(tile_id, enable) &&
359 		    sram_toggle_tile((tile_id + 1), enable)) {
360 			write32(ISH_SRAM_CTRL_ERASE_ADDR,
361 				CONFIG_RAM_BASE +
362 					bank_number * CONFIG_RAM_BANK_SIZE);
363 			write32(ISH_SRAM_CTRL_ERASE_CTRL, (SRAM_BANK_ERASE_SIZE | 0x1));
364 			while (read32(ISH_SRAM_CTRL_ERASE_CTRL) & 0x1)
365 				;
366 		}
367 	} else {
368 		sram_toggle_tile(tile_id, enable);
369 		sram_toggle_tile((tile_id + 1), enable);
370 	}
371 
372 	write32(ISH_SRAM_CTRL_INTR, read32(ISH_SRAM_CTRL_INTR));
373 }
374 
sram_power(int on)375 static void sram_power(int on)
376 {
377 	int i;
378 
379 	for (i = 0; i < SRAM_POWER_OFF_BANKS; i++) {
380 		sram_toggle_bank(i, on);
381 	}
382 }
383 #else
384 
385 /**
386  * check SRAM bank i power gated status in PMU_SRAM_PG_EN register
387  * 1: power gated 0: not power gated
388  */
389 #define BANK_PG_STATUS(i)	(read32(PMU_SRAM_PG_EN) & (0x1 << (i)))
390 
391 /* enable power gate of a SRAM bank */
392 #define BANK_PG_ENABLE(i)	(write32(PMU_SRAM_PG_EN, (read32(PMU_SRAM_PG_EN) | (0x1 << (i)))))
393 
394 /* disable power gate of a SRAM bank */
395 #define BANK_PG_DISABLE(i)                                                     \
396 	(write32(PMU_SRAM_PG_EN, (read32(PMU_SRAM_PG_EN) & (~(0x1 << (i))))))
397 
398 /**
399  * check SRAM bank i disabled status in ISH_SRAM_CTRL_CSFGR register
400  * 1: disabled 0: enabled
401  */
402 #define BANK_DISABLE_STATUS(i)	(read32(ISH_SRAM_CTRL_CSFGR) & (0x1 << ((i) + 4)))
403 
404 /* enable a SRAM bank in ISH_SRAM_CTRL_CSFGR register */
405 #define BANK_ENABLE(i)                                                         \
406 	(write32(ISH_SRAM_CTRL_CSFGR,                                          \
407 		 (read32(ISH_SRAM_CTRL_CSFGR) & (~(0x1 << ((i) + 4))))))
408 
409 /* disable a SRAM bank in ISH_SRAM_CTRL_CSFGR register */
410 #define BANK_DISABLE(i)                                                        \
411 	(write32(ISH_SRAM_CTRL_CSFGR,                                          \
412 		 (read32(ISH_SRAM_CTRL_CSFGR) | (0x1 << ((i) + 4)))))
413 
414 /* SRAM needs time to warm up after power on */
415 #define SRAM_WARM_UP_DELAY_CNT		10
416 
sram_power(int on)417 static void sram_power(int on)
418 {
419 	int i;
420 	uint32_t bank_size;
421 	uint32_t sram_addr;
422 	uint32_t erase_cfg;
423 
424 	bank_size = CONFIG_RAM_BANK_SIZE;
425 	sram_addr = CONFIG_RAM_BASE;
426 
427 	/**
428 	 * set erase size as one bank, erase control register using DWORD as
429 	 * size unit, and using 0 based length, i.e if set 0, will erase one
430 	 * DWORD
431 	 */
432 	erase_cfg = ((bank_size >> 3) << 2) | 0x1;
433 
434 	for (i = 0; i < SRAM_POWER_OFF_BANKS; i++) {
435 
436 		if (on && (BANK_PG_STATUS(i))) {
437 
438 			/* power on and enable a bank */
439 			BANK_PG_DISABLE(i);
440 
441 			delay(SRAM_WARM_UP_DELAY_CNT);
442 
443 			/* erase a bank */
444 			write32(ISH_SRAM_CTRL_ERASE_ADDR, sram_addr + (i * bank_size));
445 			write32(ISH_SRAM_CTRL_ERASE_CTRL, erase_cfg);
446 
447 			/* wait erase complete */
448 			while (read32(ISH_SRAM_CTRL_ERASE_CTRL) & 0x1)
449 				continue;
450 
451 		} else {
452 			/* disable and power off a bank */
453 			BANK_PG_ENABLE(i);
454 		}
455 
456 		/**
457 		 * clear interrupt status register, not allow generate SRAM
458 		 * interrupts. Bringup already masked all SRAM interrupts when
459 		 * booting ISH
460 		 */
461 		write32(ISH_SRAM_CTRL_INTR, 0xFFFFFFFF);
462 
463 	}
464 }
465 #endif
466 
467 #define RTC_TICKS_IN_SECOND 32768
468 
get_rtc(void)469 static uint64_t get_rtc(void)
470 {
471 	uint32_t lower;
472 	uint32_t upper;
473 
474 	do {
475 		upper = read32(MISC_ISH_RTC_COUNTER1);
476 		lower = read32(MISC_ISH_RTC_COUNTER0);
477 	} while (upper != read32(MISC_ISH_RTC_COUNTER1));
478 
479 	return ((uint64_t)upper << 32) | lower;
480 }
481 
is_ipapg_allowed(void)482 static int is_ipapg_allowed(void)
483 {
484 	uint32_t power_ctrl_enabled, sw_power_req, power_ctrl_wake;
485 	int system_power_state;
486 
487 	system_power_state = ((read32(PMU_PMC_HOST_RST_CTL) & PMU_HOST_RST_B) == 0);
488 
489 	write32(PMU_PMC_HOST_RST_CTL, read32(PMU_PMC_HOST_RST_CTL));
490 
491 	power_ctrl_enabled = read32(PMU_D3_STATUS);
492 	sw_power_req = read32(PMU_SW_PG_REQ);
493 	power_ctrl_wake = read32(PMU_PMC_PG_WAKE);
494 
495 	if (system_power_state)
496 		power_ctrl_enabled |= PMU_PCE_PG_ALLOWED;
497 
498 	write32(PMU_INTERNAL_PCE, ((power_ctrl_enabled & PMU_PCE_SHADOW_MASK) |
499 			   PMU_PCE_CHANGE_DETECTED | PMU_PCE_CHANGE_MASK));
500 
501 	write32(PMU_SW_PG_REQ, (sw_power_req | PMU_SW_PG_REQ_B_RISE |
502 			PMU_SW_PG_REQ_B_FALL));
503 	write32(PMU_PMC_PG_WAKE, (power_ctrl_wake | PMU_PMC_PG_WAKE_RISE |
504 			  PMU_PMC_PG_WAKE_FALL));
505 	write32(PMU_D3_STATUS, (read32(PMU_D3_STATUS) & (PMU_D0I3_ENABLE_MASK |
506 					   PMU_D3_BIT_SET | PMU_BME_BIT_SET)));
507 
508 	power_ctrl_enabled = read32(PMU_D3_STATUS);
509 	sw_power_req = read32(PMU_SW_PG_REQ);
510 	power_ctrl_wake = read32(PMU_PMC_PG_WAKE);
511 
512 	if (system_power_state) {
513 		uint64_t rtc_start = get_rtc();
514 		uint64_t rtc_end;
515 
516 		while (power_ctrl_wake & PMU_PMC_PG_WAKE_VAL) {
517 			power_ctrl_wake = read32(PMU_PMC_PG_WAKE);
518 			rtc_end = get_rtc();
519 			if (rtc_end - rtc_start > RTC_TICKS_IN_SECOND)
520 				break;
521 		}
522 	}
523 
524 	if (((power_ctrl_enabled & PMU_PCE_PG_ALLOWED) || system_power_state) &&
525 	    (((sw_power_req & PMU_SW_PG_REQ_B_VAL) == 0) ||
526 	     ((power_ctrl_enabled & PMU_PCE_PMCRE) == 0)) &&
527 	    ((power_ctrl_wake & PMU_PMC_PG_WAKE_VAL) == 0))
528 		return 1;
529 	else
530 		return 0;
531 }
532 
533 #define NUMBER_IRQ_PINS 30
534 static uint32_t ioapic_rte[NUMBER_IRQ_PINS];
535 
do_ipapg(void)536 static int do_ipapg(void)
537 {
538 	int ret;
539 	uint32_t rte_offset = SEDI_IOAPIC_IOREDTBL;
540 
541 	for (int pin = 0; pin < NUMBER_IRQ_PINS; pin++) {
542 		write32(SEDI_IOAPIC_IDX, rte_offset + pin * 2);
543 		ioapic_rte[pin] = read32(SEDI_IOAPIC_WDW);
544 	}
545 
546 	ret = ipapg();
547 
548 	rte_offset = SEDI_IOAPIC_IOREDTBL;
549 	for (int pin = 0; pin < NUMBER_IRQ_PINS; pin++) {
550 		write32(SEDI_IOAPIC_IDX, rte_offset + pin * 2);
551 		write32(SEDI_IOAPIC_WDW, ioapic_rte[pin]);
552 	}
553 
554 	return ret;
555 }
556 
set_vnnred_aoncg(void)557 static inline void set_vnnred_aoncg(void)
558 {
559 	write32(PMU_VNNAON_RED, 1);
560 	write32(CCU_AONCG_EN, 1);
561 }
562 
clear_vnnred_aoncg(void)563 static inline void clear_vnnred_aoncg(void)
564 {
565 	write32(PMU_VNNAON_RED, 0);
566 	write32(CCU_AONCG_EN, 0);
567 }
568 
569 #ifdef CONFIG_SOC_INTEL_ISH_5_6_0
570 #define STRINGIFY(x)			#x
571 #define SLINE(num)			STRINGIFY(num)
572 #define RETENTION_EXIT_CYCLES_DELAY	5
573 
sram_enter_sleep_mode(void)574 static void sram_enter_sleep_mode(void)
575 {
576 	uint32_t val, sum_mask, mask;
577 
578 	sum_mask = mask = 0x1;
579 	val = read32(PMU_SRAM_DEEPSLEEP);
580 	while (sum_mask <= CONFIG_RAM_BANK_TILE_MASK) {
581 		if (!(val & mask)) {
582 			write32(PMU_SRAM_DEEPSLEEP, val | sum_mask);
583 			while (read32(PMU_SRAM_PWR_STATUS) & mask)
584 				;
585 		}
586 		mask <<= 1;
587 		sum_mask += mask;
588 	}
589 }
590 
sram_exit_sleep_mode(void)591 static void sram_exit_sleep_mode(void)
592 {
593 	uint32_t val, sum_mask, mask;
594 
595 	sum_mask = mask = 0x1;
596 	val = read32(PMU_SRAM_DEEPSLEEP);
597 	while (sum_mask <= CONFIG_RAM_BANK_TILE_MASK) {
598 		if ((val & mask)) {
599 			write32(PMU_SRAM_DEEPSLEEP, val & ~sum_mask);
600 			while (!(read32(PMU_SRAM_PWR_STATUS) & mask))
601 				;
602 			__asm__ volatile (
603 					"movl $"SLINE(RETENTION_EXIT_CYCLES_DELAY)", %%ecx;"
604 					"loop .;\n\t"
605 					:
606 					:
607 					: "ecx"
608 					);
609 		}
610 		mask <<= 1;
611 		sum_mask += mask;
612 	}
613 }
614 #endif
615 
handle_d0i2(void)616 static void handle_d0i2(void)
617 {
618 	pg_exit_save_ctx();
619 	aon_share.pg_exit = 0;
620 
621 #ifdef CONFIG_SOC_INTEL_ISH_5_6_0
622 	sram_enter_sleep_mode();
623 #else
624 	/* set main SRAM into retention mode*/
625 	write32(PMU_LDO_CTRL, (PMU_LDO_ENABLE_BIT
626 		| PMU_LDO_RETENTION_BIT));
627 #endif
628 	/* delay some cycles before halt */
629 	delay(SRAM_RETENTION_CYCLES_DELAY);
630 
631 	set_vnnred_aoncg();
632 
633 	if (is_ipapg_allowed()) {
634 		uint32_t sram_cfg_reg;
635 
636 		sram_cfg_reg = read32(ISH_SRAM_CTRL_CSFGR);
637 
638 		aon_share.pg_exit = do_ipapg();
639 
640 		if (aon_share.pg_exit)
641 			write32(ISH_SRAM_CTRL_CSFGR, sram_cfg_reg);
642 	} else {
643 		ish_mia_halt();
644 	}
645 
646 	/* wakeup from PMU interrupt */
647 
648 	clear_vnnred_aoncg();
649 
650 #ifdef CONFIG_SOC_INTEL_ISH_5_6_0
651 	sram_exit_sleep_mode();
652 #else
653 	/* set main SRAM intto normal mode */
654 	write32(PMU_LDO_CTRL, PMU_LDO_ENABLE_BIT);
655 
656 	/**
657 	 * poll LDO_READY status to make sure SRAM LDO is on
658 	 * (exited retention mode)
659 	 */
660 	while (!(read32(PMU_LDO_CTRL) & PMU_LDO_READY_BIT))
661 		continue;
662 #endif
663 
664 	if (read32(PMU_RST_PREP) & PMU_RST_PREP_AVAIL)
665 		handle_reset(ISH_PM_STATE_RESET_PREP);
666 
667 	if (aon_share.pg_exit)
668 		ish_dma_set_msb(PAGING_CHAN, aon_share.uma_msb,
669 				aon_share.uma_msb);
670 }
671 
handle_d0i3(void)672 static void handle_d0i3(void)
673 {
674 	int ret;
675 
676 	pg_exit_save_ctx();
677 	aon_share.pg_exit = 0;
678 
679 	/* store main FW 's context to IMR DDR from main SRAM */
680 	ret = store_main_fw();
681 
682 	/* if store main FW failed, then switch back to main FW */
683 	if (ret != AON_SUCCESS)
684 		return;
685 
686 	/* power off main SRAM */
687 	sram_power(0);
688 
689 	set_vnnred_aoncg();
690 
691 	if (is_ipapg_allowed()) {
692 		uint32_t sram_cfg_reg;
693 
694 		sram_cfg_reg = read32(ISH_SRAM_CTRL_CSFGR);
695 
696 		aon_share.pg_exit = do_ipapg();
697 
698 		if (aon_share.pg_exit)
699 			write32(ISH_SRAM_CTRL_CSFGR, sram_cfg_reg);
700 	} else {
701 		ish_mia_halt();
702 	}
703 
704 	/* wakeup from PMU interrupt */
705 
706 	clear_vnnred_aoncg();
707 
708 	if (read32(PMU_RST_PREP) & PMU_RST_PREP_AVAIL)
709 		handle_reset(ISH_PM_STATE_RESET_PREP);
710 
711 	/* power on main SRAM */
712 	sram_power(1);
713 
714 	if (aon_share.pg_exit)
715 		ish_dma_set_msb(PAGING_CHAN, aon_share.uma_msb,
716 				aon_share.uma_msb);
717 
718 	/* restore main FW 's context to main SRAM from IMR DDR */
719 	ret = restore_main_fw();
720 
721 	if (ret != AON_SUCCESS) {
722 		/* we can't switch back to main FW now, reset ISH */
723 		handle_reset(ISH_PM_STATE_RESET);
724 	}
725 }
726 
handle_d3(void)727 static void handle_d3(void)
728 {
729 	/* handle D3 */
730 	handle_reset(ISH_PM_STATE_RESET);
731 }
732 
disable_csme_csrirq(void)733 static inline void disable_csme_csrirq(void)
734 {
735 	write32(IPC_PIMR_CIM_SEC, 1);
736 }
737 
handle_reset(enum ish_pm_state pm_state)738 static void handle_reset(enum ish_pm_state pm_state)
739 {
740 	(void)(pm_state);
741 
742 	/* disable watch dog */
743 	write32(WDT_CONTROL, (read32(WDT_CONTROL) & (~WDT_CONTROL_ENABLE_BIT)));
744 
745 	/* disable all gpio interrupts */
746 	write32(ISH_GPIO_GRER, 0);
747 	write32(ISH_GPIO_GFER, 0);
748 	write32(ISH_GPIO_GIMR, 0);
749 
750 	/* disable CSME CSR irq */
751 	disable_csme_csrirq();
752 
753 	/* power off main SRAM */
754 	sram_power(0);
755 
756 	while (1) {
757 		/* clear ISH2HOST doorbell register */
758 		write32(IPC_ISH2HOST_DOORBELL_ADDR, 0);
759 
760 		/* clear error register in MISC space */
761 		write32(MISC_ISH_ECC_ERR_SRESP, 1);
762 
763 		/*
764 		 * Disable power gating of RF(Cache) and ROMs.
765 		 *
766 		 * Before switch to aon task, RF and ROMs are already
767 		 * power gated, so we need disable the power gating
768 		 * before reset to ROM, to make sure ROM code runs
769 		 * correctly.
770 		 */
771 		write32(PMU_RF_ROM_PWR_CTRL, 0);
772 
773 		/* reset ISH minute-ia cpu core, will goto ISH ROM */
774 		ish_mia_reset();
775 
776 		__builtin_unreachable();
777 
778 		ish_mia_halt();
779 	}
780 
781 }
782 
handle_unknown_state(void)783 static void handle_unknown_state(void)
784 {
785 	aon_share.last_error = AON_ERROR_NOT_SUPPORT_POWER_MODE;
786 	aon_share.error_count++;
787 
788 	/* switch back to main FW */
789 }
790 
ish_aon_main(void)791 void ish_aon_main(void)
792 {
793 
794 	/* set PMU wakeup interrupt gate using LDT code segment selector(0x4) */
795 	aon_idt[AON_IDT_ENTRY_VEC_LAST -
796 		AON_IDT_ENTRY_VEC_FIRST].dword_lo =
797 		GEN_IDT_DESC_LO(&pmu_wakeup_isr, 0x4, IDT_DESC_FLAGS);
798 
799 	aon_idt[AON_IDT_ENTRY_VEC_LAST -
800 		AON_IDT_ENTRY_VEC_FIRST].dword_up =
801 		GEN_IDT_DESC_UP(&pmu_wakeup_isr, 0x4, IDT_DESC_FLAGS);
802 
803 	/*
804 	 * set reset prep interrupt gate using LDT code segment
805 	 * selector(0x4)
806 	 */
807 	aon_idt[0].dword_lo = GEN_IDT_DESC_LO(&reset_prep_isr,
808 		0x4, IDT_DESC_FLAGS);
809 
810 	aon_idt[0].dword_up = GEN_IDT_DESC_UP(&reset_prep_isr,
811 		0x4, IDT_DESC_FLAGS);
812 
813 	while (1) {
814 
815 		/**
816 		 *  will start to run from here when switched to aontask from
817 		 *  the second time
818 		 */
819 
820 		/* save main FW's IDT and load aontask's IDT */
821 		__asm__ volatile (
822 				"sidtl %0;\n"
823 				"lidtl %1;\n"
824 				:
825 				: "m" (aon_share.main_fw_idt_hdr),
826 				  "m" (aon_idt_hdr)
827 				);
828 
829 		aon_share.last_error = AON_SUCCESS;
830 
831 		switch (aon_share.pm_state) {
832 		case ISH_PM_STATE_D0I2:
833 			handle_d0i2();
834 			break;
835 		case ISH_PM_STATE_D0I3:
836 			handle_d0i3();
837 			break;
838 		case ISH_PM_STATE_D3:
839 			handle_d3();
840 			break;
841 		case ISH_PM_STATE_RESET:
842 		case ISH_PM_STATE_RESET_PREP:
843 			handle_reset(aon_share.pm_state);
844 			break;
845 		default:
846 			handle_unknown_state();
847 			break;
848 		}
849 
850 		/* check if D3 rising status */
851 		if (read32(PMU_D3_STATUS) &
852 		    (PMU_D3_BIT_RISING_EDGE_STATUS | PMU_D3_BIT_SET)) {
853 			if (!(aon_share.host_in_suspend)) {
854 				aon_share.pm_state = ISH_PM_STATE_D3;
855 				handle_d3();
856 			}
857 		}
858 
859 		/* restore main FW's IDT and switch back to main FW */
860 		__asm__ volatile(
861 				"lidtl %0;\n"
862 				:
863 				: "m" (aon_share.main_fw_idt_hdr)
864 				);
865 
866 		if (aon_share.pg_exit) {
867 			mainfw_gdt.entries[tr / sizeof(struct gdt_entry)]
868 				.flags &= 0xfd;
869 			pg_exit_restore_ctx();
870 		}
871 
872 		__asm__ volatile ("iret;");
873 	}
874 }
875