1 /*
2  * Copyright (c) 2023 -2024 Intel Corporation
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include "aon_share.h"
8 #include "aon_defs.h"
9 #include "../ish_dma.h"
10 /**
11  * Due to very limit AON memory size (typically total 8KB), we don't want to
12  * define and allocate whole 256 entries for aontask'IDT, that almost need 2KB
13  * (256 * 8), so we just defined the only needed IDT entries:
14  * AON_IDT_ENTRY_VEC_FIRST ~  AON_IDT_ENTRY_VEC_LAST
15  */
16 #define AON_IDT_ENTRY_VEC_FIRST		SEDI_VEC_RESET_PREP
17 #define AON_IDT_ENTRY_VEC_LAST		SEDI_VEC_PMU2IOAPIC
18 
19 static void handle_reset(enum ish_pm_state pm_state);
20 
21 /* ISR for PMU wakeup interrupt */
pmu_wakeup_isr(void)22 static void pmu_wakeup_isr(void)
23 {
24 	/**
25 	 * Indicate completion of servicing the interrupt to IOAPIC first
26 	 * then indicate completion of servicing the interrupt to LAPIC
27 	 */
28 	write32(SEDI_IOAPIC_EOI, SEDI_VEC_PMU2IOAPIC);
29 	write32(LAPIC_EOI, 0x0);
30 
31 	__asm__ volatile ("iret;");
32 
33 	__builtin_unreachable();
34 }
35 
36 /* ISR for reset prep interrupt */
reset_prep_isr(void)37 static void reset_prep_isr(void)
38 {
39 	/* mask reset prep avail interrupt */
40 	write32(PMU_RST_PREP, PMU_RST_PREP_INT_MASK);
41 
42 	/**
43 	 * Indicate completion of servicing the interrupt to IOAPIC first
44 	 * then indicate completion of servicing the interrupt to LAPIC
45 	 */
46 	write32(SEDI_IOAPIC_EOI, SEDI_VEC_RESET_PREP);
47 	write32(LAPIC_EOI, 0x0);
48 
49 	handle_reset(ISH_PM_STATE_RESET_PREP);
50 
51 	__builtin_unreachable();
52 }
53 
54 /**
55  * Use a static data array for aon IDT, and setting IDT header for IDTR
56  * register
57  *
58  * Since on x86, the IDT entry index (count from 0) is also the interrupt
59  * vector number, for IDT header, the 'start' filed still need to point to
60  * the entry 0, and 'size' must count from entry 0.
61  */
62 
63 static struct idt_entry aon_idt[AON_IDT_ENTRY_VEC_LAST -
64 				AON_IDT_ENTRY_VEC_FIRST + 1];
65 
66 static struct idt_header aon_idt_hdr = {
67 
68 	.limit = (sizeof(struct idt_entry) * (AON_IDT_ENTRY_VEC_LAST + 1)) - 1,
69 	.entries = (struct idt_entry *)((uint32_t)&aon_idt -
70 			(sizeof(struct idt_entry) * AON_IDT_ENTRY_VEC_FIRST))
71 };
72 
73 /* TSS segment for aon task */
74 struct tss_entry aon_tss = {
75 	.prev_task_link = 0,
76 	.reserved1 = 0,
77 	.esp0 = (uint8_t *)(CONFIG_AON_PERSISTENT_BASE - AON_SP_RESERVED),
78 	/* entry 1 in LDT for data segment */
79 	.ss0 = 0xc,
80 	.reserved2 = 0,
81 	.esp1 = 0,
82 	.ss1 = 0,
83 	.reserved3 = 0,
84 	.esp2 = 0,
85 	.ss2 = 0,
86 	.reserved4 = 0,
87 	.cr3 = 0,
88 	/* task execute entry point */
89 	.eip = (uint32_t)&ish_aon_main,
90 	.eflags = 0,
91 	.eax = 0,
92 	.ecx = 0,
93 	.edx = 0,
94 	.ebx = 0,
95 	/* set stack top pointer at the end of usable aon memory */
96 	.esp = CONFIG_AON_PERSISTENT_BASE - AON_SP_RESERVED,
97 	.ebp = CONFIG_AON_PERSISTENT_BASE - AON_SP_RESERVED,
98 	.esi = 0,
99 	.edi = 0,
100 	/* entry 1 in LDT for data segment */
101 	.es = 0xc,
102 	.reserved5 = 0,
103 	/* entry 0 in LDT for code segment */
104 	.cs = 0x4,
105 	.reserved6 = 0,
106 	/* entry 1 in LDT for data segment */
107 	.ss = 0xc,
108 	.reserved7 = 0,
109 	/* entry 1 in LDT for data segment */
110 	.ds = 0xc,
111 	.reserved8 = 0,
112 	/* entry 1 in LDT for data segment */
113 	.fs = 0xc,
114 	.reserved9 = 0,
115 	/* entry 1 in LDT for data segment */
116 	.gs = 0xc,
117 	.reserved10 = 0,
118 	.ldt_seg_selector = 0,
119 	.reserved11 = 0,
120 	.trap_debug = 0,
121 
122 	/**
123 	 * TSS's limit specified as 0x67, to allow the task has permission to
124 	 * access I/O port using IN/OUT instructions,'iomap_base_addr' field
125 	 * must be greater than or equal to TSS' limit
126 	 * see 'I/O port permissions' on
127 	 *	https://en.wikipedia.org/wiki/Task_state_segment
128 	 */
129 	.iomap_base_addr = GDT_DESC_TSS_LIMIT
130 };
131 
132 /**
133  * define code and data LDT segements for aontask
134  * code : base = 0x0, limit = 0xFFFFFFFF, Present = 1, DPL = 0
135  * data : base = 0x0, limit = 0xFFFFFFFF, Present = 1, DPL = 0
136  */
137 static ldt_entry aon_ldt[2] = {
138 
139 	/**
140 	 * entry 0 for code segment
141 	 * base: 0x0
142 	 * limit: 0xFFFFFFFF
143 	 * flag: 0x9B, Present = 1, DPL = 0, code segment
144 	 */
145 	{
146 		.dword_lo = GEN_GDT_DESC_LO(0x0, 0xFFFFFFFF,
147 				GDT_DESC_CODE_FLAGS),
148 
149 		.dword_up = GEN_GDT_DESC_UP(0x0, 0xFFFFFFFF,
150 				GDT_DESC_CODE_FLAGS)
151 	},
152 
153 	/**
154 	 * entry 1 for data segment
155 	 * base: 0x0
156 	 * limit: 0xFFFFFFFF
157 	 * flag: 0x93, Present = 1, DPL = 0, data segment
158 	 */
159 	{
160 		.dword_lo = GEN_GDT_DESC_LO(0x0, 0xFFFFFFFF,
161 				GDT_DESC_DATA_FLAGS),
162 
163 		.dword_up = GEN_GDT_DESC_UP(0x0, 0xFFFFFFFF,
164 				GDT_DESC_DATA_FLAGS)
165 	}
166 };
167 
168 
169 /* shared data structure between main FW and aon task */
170 struct ish_aon_share aon_share = {
171 	.magic_id = AON_MAGIC_ID,
172 	.error_count = 0,
173 	.last_error = AON_SUCCESS,
174 	.aon_tss = &aon_tss,
175 	.aon_ldt = &aon_ldt[0],
176 	.aon_ldt_size = sizeof(aon_ldt),
177 };
178 
179 /* snowball structure */
180 #define SNOWBALL_BASE	(CONFIG_AON_PERSISTENT_BASE + 256)
181 
182 struct snowball_struct *snowball = (void *)SNOWBALL_BASE;
183 
184 
185 /* In IMR DDR, ISH FW image has a manifest header */
186 #define ISH_FW_IMAGE_MANIFEST_HEADER_SIZE (0x1000)
187 
188 /* simple count based delay */
delay(uint32_t count)189 static inline void delay(uint32_t count)
190 {
191 	while (count)
192 		count--;
193 }
194 
enable_dma_bcg(void)195 static inline void enable_dma_bcg(void)
196 {
197 	write32(CCU_BCG_DMA, 1);
198 }
199 
disable_dma_bcg(void)200 static inline void disable_dma_bcg(void)
201 {
202 	write32(CCU_BCG_DMA, 0);
203 }
204 
store_main_fw(void)205 static int store_main_fw(void)
206 {
207 	int ret;
208 	uint64_t imr_fw_addr;
209 	uint64_t imr_fw_rw_addr;
210 
211 	imr_fw_addr = (((uint64_t)snowball->uma_base_hi << 32) +
212 		       snowball->uma_base_lo +
213 		       snowball->fw_offset +
214 		       ISH_FW_IMAGE_MANIFEST_HEADER_SIZE);
215 
216 	imr_fw_rw_addr = (imr_fw_addr
217 			  + aon_share.main_fw_rw_addr
218 			  - CONFIG_RAM_BASE);
219 
220 	/* disable BCG (Block Clock Gating) for DMA, DMA can be accessed now */
221 	disable_dma_bcg();
222 
223 	/* store main FW's read and write data region to IMR/UMA DDR */
224 	ret = ish_dma_copy(
225 		PAGING_CHAN,
226 		imr_fw_rw_addr,
227 		aon_share.main_fw_rw_addr,
228 		aon_share.main_fw_rw_size,
229 		SRAM_TO_UMA);
230 
231 	/* enable BCG for DMA, DMA can't be accessed now */
232 	enable_dma_bcg();
233 
234 	if (ret != DMA_RC_OK) {
235 
236 		aon_share.last_error = AON_ERROR_DMA_FAILED;
237 		aon_share.error_count++;
238 
239 		return AON_ERROR_DMA_FAILED;
240 	}
241 
242 	return AON_SUCCESS;
243 }
244 
restore_main_fw(void)245 static int restore_main_fw(void)
246 {
247 	int ret;
248 	uint64_t imr_fw_addr;
249 	uint64_t imr_fw_ro_addr;
250 	uint64_t imr_fw_rw_addr;
251 
252 	imr_fw_addr = (((uint64_t)snowball->uma_base_hi << 32) +
253 		       snowball->uma_base_lo +
254 		       snowball->fw_offset +
255 		       ISH_FW_IMAGE_MANIFEST_HEADER_SIZE);
256 
257 	imr_fw_ro_addr = (imr_fw_addr
258 			  + aon_share.main_fw_ro_addr
259 			  - CONFIG_RAM_BASE);
260 
261 	imr_fw_rw_addr = (imr_fw_addr
262 			  + aon_share.main_fw_rw_addr
263 			  - CONFIG_RAM_BASE);
264 
265 	/* disable BCG (Block Clock Gating) for DMA, DMA can be accessed now */
266 	disable_dma_bcg();
267 
268 	/* restore main FW's read only code and data region from IMR/UMA DDR */
269 	ret = ish_dma_copy(
270 		PAGING_CHAN,
271 		aon_share.main_fw_ro_addr,
272 		imr_fw_ro_addr,
273 		aon_share.main_fw_ro_size,
274 		UMA_TO_SRAM);
275 
276 	if (ret != DMA_RC_OK) {
277 
278 		aon_share.last_error = AON_ERROR_DMA_FAILED;
279 		aon_share.error_count++;
280 
281 		/* enable BCG for DMA, DMA can't be accessed now */
282 		enable_dma_bcg();
283 
284 		return AON_ERROR_DMA_FAILED;
285 	}
286 
287 	/* restore main FW's read and write data region from IMR/UMA DDR */
288 	ret = ish_dma_copy(
289 			PAGING_CHAN,
290 			aon_share.main_fw_rw_addr,
291 			imr_fw_rw_addr,
292 			aon_share.main_fw_rw_size,
293 			UMA_TO_SRAM
294 			);
295 
296 	/* enable BCG for DMA, DMA can't be accessed now */
297 	enable_dma_bcg();
298 
299 	if (ret != DMA_RC_OK) {
300 
301 		aon_share.last_error = AON_ERROR_DMA_FAILED;
302 		aon_share.error_count++;
303 
304 		return AON_ERROR_DMA_FAILED;
305 	}
306 
307 	return AON_SUCCESS;
308 }
309 
310 #define SRAM_POWER_OFF_BANKS	CONFIG_RAM_BANKS
311 
312 /* SRAM needs time to enter retention mode */
313 #define CYCLES_PER_US                  100
314 #ifdef CONFIG_SOC_INTEL_ISH_5_8_0
315 #define SRAM_RETENTION_US_DELAY        20
316 #else
317 #define SRAM_RETENTION_US_DELAY        5
318 #endif
319 #define SRAM_RETENTION_CYCLES_DELAY    (SRAM_RETENTION_US_DELAY * CYCLES_PER_US)
320 
321 #ifdef CONFIG_SOC_INTEL_ISH_5_6_0
322 #define SRAM_WARM_UP_COUNTER	(1000)
323 #define SRAM_CTRL_ERASE_SIZE_BIT	2
324 #define SRAM_CTRL_ERASE_BYTE_TO_QWORD	3
325 #define SRAM_BANK_ERASE_SIZE                                                   \
326 	((CONFIG_RAM_BANK_SIZE >> SRAM_CTRL_ERASE_BYTE_TO_QWORD)               \
327 	 << SRAM_CTRL_ERASE_SIZE_BIT)
328 #define SRAM_TILES		(CONFIG_RAM_BANKS * 2)
329 
sram_toggle_tile(uint32_t tile_id,uint32_t enable)330 static uint32_t sram_toggle_tile(uint32_t tile_id, uint32_t enable)
331 {
332 	uint32_t pmu_sram_val = read32(PMU_SRAM_PG_EN);
333 	uint32_t pmu_toggle_bit = (1 << tile_id);
334 	uint32_t u = 0;
335 
336 	if (enable && (pmu_sram_val & pmu_toggle_bit)) {
337 		pmu_sram_val &= ~pmu_toggle_bit;
338 		write32(PMU_SRAM_PG_EN, pmu_sram_val);
339 		while (!(pmu_toggle_bit & read32(PMU_SRAM_PWR_STATUS)))
340 			;
341 		for (u = 0; u < SRAM_WARM_UP_COUNTER; ++u)
342 			__asm__ volatile ("nop");
343 	} else if (!enable && (~pmu_sram_val & pmu_toggle_bit)) {
344 		pmu_sram_val |= pmu_toggle_bit;
345 		write32(PMU_SRAM_PG_EN, pmu_sram_val);
346 		while ((pmu_toggle_bit & read32(PMU_SRAM_PWR_STATUS)))
347 			;
348 		for (u = 0; u < SRAM_WARM_UP_COUNTER; ++u)
349 			__asm__ volatile ("nop");
350 	} else {
351 		enable = 0;
352 	}
353 	return enable;
354 }
355 
sram_toggle_bank(unsigned int bank_number,unsigned int enable)356 static void sram_toggle_bank(unsigned int bank_number, unsigned int enable)
357 {
358 	uint32_t tile_id = bank_number << 1;
359 
360 	if (enable) {
361 		if (sram_toggle_tile(tile_id, enable) &&
362 		    sram_toggle_tile((tile_id + 1), enable)) {
363 			write32(ISH_SRAM_CTRL_ERASE_ADDR,
364 				CONFIG_RAM_BASE +
365 					bank_number * CONFIG_RAM_BANK_SIZE);
366 			write32(ISH_SRAM_CTRL_ERASE_CTRL, (SRAM_BANK_ERASE_SIZE | 0x1));
367 			while (read32(ISH_SRAM_CTRL_ERASE_CTRL) & 0x1)
368 				;
369 		}
370 	} else {
371 		sram_toggle_tile(tile_id, enable);
372 		sram_toggle_tile((tile_id + 1), enable);
373 	}
374 
375 	write32(ISH_SRAM_CTRL_INTR, read32(ISH_SRAM_CTRL_INTR));
376 }
377 
sram_power(int on)378 static void sram_power(int on)
379 {
380 	int i;
381 
382 	for (i = 0; i < SRAM_POWER_OFF_BANKS; i++) {
383 		sram_toggle_bank(i, on);
384 	}
385 }
386 #else
387 
388 /**
389  * check SRAM bank i power gated status in PMU_SRAM_PG_EN register
390  * 1: power gated 0: not power gated
391  */
392 #define BANK_PG_STATUS(i)	(read32(PMU_SRAM_PG_EN) & (0x1 << (i)))
393 
394 /* enable power gate of a SRAM bank */
395 #define BANK_PG_ENABLE(i)	(write32(PMU_SRAM_PG_EN, (read32(PMU_SRAM_PG_EN) | (0x1 << (i)))))
396 
397 /* disable power gate of a SRAM bank */
398 #define BANK_PG_DISABLE(i)                                                     \
399 	(write32(PMU_SRAM_PG_EN, (read32(PMU_SRAM_PG_EN) & (~(0x1 << (i))))))
400 
401 /**
402  * check SRAM bank i disabled status in ISH_SRAM_CTRL_CSFGR register
403  * 1: disabled 0: enabled
404  */
405 #define BANK_DISABLE_STATUS(i)	(read32(ISH_SRAM_CTRL_CSFGR) & (0x1 << ((i) + 4)))
406 
407 /* enable a SRAM bank in ISH_SRAM_CTRL_CSFGR register */
408 #define BANK_ENABLE(i)                                                         \
409 	(write32(ISH_SRAM_CTRL_CSFGR,                                          \
410 		 (read32(ISH_SRAM_CTRL_CSFGR) & (~(0x1 << ((i) + 4))))))
411 
412 /* disable a SRAM bank in ISH_SRAM_CTRL_CSFGR register */
413 #define BANK_DISABLE(i)                                                        \
414 	(write32(ISH_SRAM_CTRL_CSFGR,                                          \
415 		 (read32(ISH_SRAM_CTRL_CSFGR) | (0x1 << ((i) + 4)))))
416 
417 /* SRAM needs time to warm up after power on */
418 #define SRAM_WARM_UP_DELAY_CNT		10
419 
sram_power(int on)420 static void sram_power(int on)
421 {
422 	int i;
423 	uint32_t bank_size;
424 	uint32_t sram_addr;
425 	uint32_t erase_cfg;
426 
427 	bank_size = CONFIG_RAM_BANK_SIZE;
428 	sram_addr = CONFIG_RAM_BASE;
429 
430 	/**
431 	 * set erase size as one bank, erase control register using DWORD as
432 	 * size unit, and using 0 based length, i.e if set 0, will erase one
433 	 * DWORD
434 	 */
435 	erase_cfg = ((bank_size >> 3) << 2) | 0x1;
436 
437 	for (i = 0; i < SRAM_POWER_OFF_BANKS; i++) {
438 
439 		if (on && (BANK_PG_STATUS(i))) {
440 
441 			/* power on and enable a bank */
442 			BANK_PG_DISABLE(i);
443 
444 			delay(SRAM_WARM_UP_DELAY_CNT);
445 
446 			/* erase a bank */
447 			write32(ISH_SRAM_CTRL_ERASE_ADDR, sram_addr + (i * bank_size));
448 			write32(ISH_SRAM_CTRL_ERASE_CTRL, erase_cfg);
449 
450 			/* wait erase complete */
451 			while (read32(ISH_SRAM_CTRL_ERASE_CTRL) & 0x1)
452 				continue;
453 
454 		} else {
455 			/* disable and power off a bank */
456 			BANK_PG_ENABLE(i);
457 		}
458 
459 		/**
460 		 * clear interrupt status register, not allow generate SRAM
461 		 * interrupts. Bringup already masked all SRAM interrupts when
462 		 * booting ISH
463 		 */
464 		write32(ISH_SRAM_CTRL_INTR, 0xFFFFFFFF);
465 
466 	}
467 }
468 #endif
469 
470 #define RTC_TICKS_IN_SECOND 32768
471 
get_rtc(void)472 static uint64_t get_rtc(void)
473 {
474 	uint32_t lower;
475 	uint32_t upper;
476 
477 	do {
478 		upper = read32(MISC_ISH_RTC_COUNTER1);
479 		lower = read32(MISC_ISH_RTC_COUNTER0);
480 	} while (upper != read32(MISC_ISH_RTC_COUNTER1));
481 
482 	return ((uint64_t)upper << 32) | lower;
483 }
484 
is_ipapg_allowed(void)485 static int is_ipapg_allowed(void)
486 {
487 	uint32_t power_ctrl_enabled, sw_power_req, power_ctrl_wake;
488 	int system_power_state;
489 
490 	system_power_state = ((read32(PMU_PMC_HOST_RST_CTL) & PMU_HOST_RST_B) == 0);
491 
492 	write32(PMU_PMC_HOST_RST_CTL, read32(PMU_PMC_HOST_RST_CTL));
493 
494 	power_ctrl_enabled = read32(PMU_D3_STATUS);
495 	sw_power_req = read32(PMU_SW_PG_REQ);
496 	power_ctrl_wake = read32(PMU_PMC_PG_WAKE);
497 
498 	if (system_power_state)
499 		power_ctrl_enabled |= PMU_PCE_PG_ALLOWED;
500 
501 	write32(PMU_INTERNAL_PCE, ((power_ctrl_enabled & PMU_PCE_SHADOW_MASK) |
502 			   PMU_PCE_CHANGE_DETECTED | PMU_PCE_CHANGE_MASK));
503 
504 	write32(PMU_SW_PG_REQ, (sw_power_req | PMU_SW_PG_REQ_B_RISE |
505 			PMU_SW_PG_REQ_B_FALL));
506 	write32(PMU_PMC_PG_WAKE, (power_ctrl_wake | PMU_PMC_PG_WAKE_RISE |
507 			  PMU_PMC_PG_WAKE_FALL));
508 	write32(PMU_D3_STATUS, (read32(PMU_D3_STATUS) & (PMU_D0I3_ENABLE_MASK |
509 					   PMU_D3_BIT_SET | PMU_BME_BIT_SET)));
510 
511 	power_ctrl_enabled = read32(PMU_D3_STATUS);
512 	sw_power_req = read32(PMU_SW_PG_REQ);
513 	power_ctrl_wake = read32(PMU_PMC_PG_WAKE);
514 
515 	if (system_power_state) {
516 		uint64_t rtc_start = get_rtc();
517 		uint64_t rtc_end;
518 
519 		while (power_ctrl_wake & PMU_PMC_PG_WAKE_VAL) {
520 			power_ctrl_wake = read32(PMU_PMC_PG_WAKE);
521 			rtc_end = get_rtc();
522 			if (rtc_end - rtc_start > RTC_TICKS_IN_SECOND)
523 				break;
524 		}
525 	}
526 
527 	if (((power_ctrl_enabled & PMU_PCE_PG_ALLOWED) || system_power_state) &&
528 	    (((sw_power_req & PMU_SW_PG_REQ_B_VAL) == 0) ||
529 	     ((power_ctrl_enabled & PMU_PCE_PMCRE) == 0)))
530 		return 1;
531 	else
532 		return 0;
533 }
534 
535 #define NUMBER_IRQ_PINS 30
536 static uint32_t ioapic_rte[NUMBER_IRQ_PINS];
537 
do_ipapg(void)538 static int do_ipapg(void)
539 {
540 	int ret;
541 	uint32_t rte_offset = SEDI_IOAPIC_IOREDTBL;
542 
543 	for (int pin = 0; pin < NUMBER_IRQ_PINS; pin++) {
544 		write32(SEDI_IOAPIC_IDX, rte_offset + pin * 2);
545 		ioapic_rte[pin] = read32(SEDI_IOAPIC_WDW);
546 	}
547 
548 	ret = ipapg();
549 
550 	rte_offset = SEDI_IOAPIC_IOREDTBL;
551 	for (int pin = 0; pin < NUMBER_IRQ_PINS; pin++) {
552 		write32(SEDI_IOAPIC_IDX, rte_offset + pin * 2);
553 		write32(SEDI_IOAPIC_WDW, ioapic_rte[pin]);
554 	}
555 
556 	return ret;
557 }
558 
set_vnnred_aoncg(void)559 static inline void set_vnnred_aoncg(void)
560 {
561 	write32(PMU_VNNAON_RED, 1);
562 	write32(CCU_AONCG_EN, 1);
563 
564 	write32(CCU_TCG_EN, 1);
565 	write32(CCU_TCG_ENABLE, 0);
566 	write32(CCU_BCG_ENABLE, 0);
567 }
568 
clear_vnnred_aoncg(void)569 static inline void clear_vnnred_aoncg(void)
570 {
571 	write32(PMU_VNNAON_RED, 0);
572 	write32(CCU_AONCG_EN, 0);
573 
574 	write32(CCU_TCG_EN, 0);
575 	write32(CCU_TCG_ENABLE, 1);
576 	write32(CCU_BCG_ENABLE, 1);
577 }
578 
579 #if (defined(CONFIG_SOC_INTEL_ISH_5_6_0) || defined(CONFIG_SOC_INTEL_ISH_5_8_0))
580 #define STRINGIFY(x)			#x
581 #define SLINE(num)			STRINGIFY(num)
582 #define RETENTION_EXIT_CYCLES_DELAY	5
583 #ifdef CONFIG_SOC_INTEL_ISH_5_8_0
584 #define SRAM_PG_BITS(i) (0x3 << (2 * i))
585 #define BANK_PWR_STATUS(i) (read32(PMU_SRAM_PWR_STATUS) & SRAM_PG_BITS(i))
586 #define BANK_DS_ENABLE(i) \
587 		(write32(PMU_SRAM_DEEPSLEEP, read32(PMU_SRAM_DEEPSLEEP) | (0x3 << (2 * i))))
588 
589 #define BANK_DS_DISABLE(i) \
590 		(write32(PMU_SRAM_DEEPSLEEP, read32(PMU_SRAM_DEEPSLEEP) & ~(0x3 << (2 * i))))
591 #endif
592 
593 
sram_enter_sleep_mode(void)594 static void sram_enter_sleep_mode(void)
595 {
596 #if defined(CONFIG_SOC_INTEL_ISH_5_6_0)
597 	uint32_t val, sum_mask, mask;
598 
599 	sum_mask = mask = 0x1;
600 	val = read32(PMU_SRAM_DEEPSLEEP);
601 	while (sum_mask <= CONFIG_RAM_BANK_TILE_MASK) {
602 		if (!(val & mask)) {
603 			write32(PMU_SRAM_DEEPSLEEP, val | sum_mask);
604 			while (read32(PMU_SRAM_PWR_STATUS) & mask)
605 				;
606 		}
607 		mask <<= 1;
608 		sum_mask += mask;
609 	}
610 #else
611 	for (int i = 0; i < SRAM_POWER_OFF_BANKS; i++) {
612 		BANK_DS_ENABLE(i);
613 		while (BANK_PWR_STATUS(i))
614 			;
615 	}
616 #endif
617 }
618 
sram_exit_sleep_mode(void)619 static void sram_exit_sleep_mode(void)
620 {
621 #if defined(CONFIG_SOC_INTEL_ISH_5_6_0)
622 	uint32_t val, sum_mask, mask;
623 
624 	sum_mask = mask = 0x1;
625 	val = read32(PMU_SRAM_DEEPSLEEP);
626 	while (sum_mask <= CONFIG_RAM_BANK_TILE_MASK) {
627 		if ((val & mask)) {
628 			write32(PMU_SRAM_DEEPSLEEP, val & ~sum_mask);
629 			while (!(read32(PMU_SRAM_PWR_STATUS) & mask))
630 				;
631 			__asm__ volatile (
632 					"movl $"SLINE(RETENTION_EXIT_CYCLES_DELAY)", %%ecx;"
633 					"loop .;\n\t"
634 					:
635 					:
636 					: "ecx"
637 					);
638 		}
639 		mask <<= 1;
640 		sum_mask += mask;
641 	}
642 #else
643 	for (int i = 0; i < SRAM_POWER_OFF_BANKS; i++) {
644 		if (!BANK_PWR_STATUS(i)) {
645 			BANK_DS_DISABLE(i);
646 			delay(RETENTION_EXIT_CYCLES_DELAY);
647 			while (!BANK_PWR_STATUS(i))
648 				;
649 		}
650 	}
651 #endif
652 }
653 #endif
654 
handle_d0i2(void)655 static void handle_d0i2(void)
656 {
657 	pg_exit_save_ctx();
658 	aon_share.pg_exit = 0;
659 
660 #if (defined(CONFIG_SOC_INTEL_ISH_5_6_0) || defined(CONFIG_SOC_INTEL_ISH_5_8_0))
661 	sram_enter_sleep_mode();
662 #else
663 	/* set main SRAM into retention mode*/
664 	write32(PMU_LDO_CTRL, (PMU_LDO_ENABLE_BIT
665 		| PMU_LDO_RETENTION_BIT));
666 #endif
667 	/* delay some cycles before halt */
668 	delay(SRAM_RETENTION_CYCLES_DELAY);
669 
670 	set_vnnred_aoncg();
671 
672 	if (is_ipapg_allowed()) {
673 		uint32_t sram_cfg_reg;
674 
675 		sram_cfg_reg = read32(ISH_SRAM_CTRL_CSFGR);
676 
677 		aon_share.pg_exit = do_ipapg();
678 
679 		if (aon_share.pg_exit)
680 			write32(ISH_SRAM_CTRL_CSFGR, sram_cfg_reg);
681 	} else {
682 		ish_mia_halt();
683 	}
684 
685 	/* wakeup from PMU interrupt */
686 
687 	clear_vnnred_aoncg();
688 
689 #if (defined(CONFIG_SOC_INTEL_ISH_5_6_0) || defined(CONFIG_SOC_INTEL_ISH_5_8_0))
690 	sram_exit_sleep_mode();
691 #else
692 	/* set main SRAM intto normal mode */
693 	write32(PMU_LDO_CTRL, PMU_LDO_ENABLE_BIT);
694 
695 	/**
696 	 * poll LDO_READY status to make sure SRAM LDO is on
697 	 * (exited retention mode)
698 	 */
699 	while (!(read32(PMU_LDO_CTRL) & PMU_LDO_READY_BIT))
700 		continue;
701 #endif
702 
703 	if (read32(PMU_RST_PREP) & PMU_RST_PREP_AVAIL)
704 		handle_reset(ISH_PM_STATE_RESET_PREP);
705 
706 	if (aon_share.pg_exit)
707 		ish_dma_set_msb(PAGING_CHAN, aon_share.uma_msb,
708 				aon_share.uma_msb);
709 }
710 
handle_d0i3(void)711 static void handle_d0i3(void)
712 {
713 	int ret;
714 
715 	pg_exit_save_ctx();
716 	aon_share.pg_exit = 0;
717 
718 	/* store main FW 's context to IMR DDR from main SRAM */
719 	ret = store_main_fw();
720 
721 	/* if store main FW failed, then switch back to main FW */
722 	if (ret != AON_SUCCESS)
723 		return;
724 
725 	/* power off main SRAM */
726 	sram_power(0);
727 
728 	set_vnnred_aoncg();
729 
730 	if (is_ipapg_allowed()) {
731 		uint32_t sram_cfg_reg;
732 
733 		sram_cfg_reg = read32(ISH_SRAM_CTRL_CSFGR);
734 
735 		aon_share.pg_exit = do_ipapg();
736 
737 		if (aon_share.pg_exit)
738 			write32(ISH_SRAM_CTRL_CSFGR, sram_cfg_reg);
739 	} else {
740 		ish_mia_halt();
741 	}
742 
743 	/* wakeup from PMU interrupt */
744 
745 	clear_vnnred_aoncg();
746 
747 	if (read32(PMU_RST_PREP) & PMU_RST_PREP_AVAIL)
748 		handle_reset(ISH_PM_STATE_RESET_PREP);
749 
750 	/* power on main SRAM */
751 	sram_power(1);
752 
753 	if (aon_share.pg_exit)
754 		ish_dma_set_msb(PAGING_CHAN, aon_share.uma_msb,
755 				aon_share.uma_msb);
756 
757 	/* restore main FW 's context to main SRAM from IMR DDR */
758 	ret = restore_main_fw();
759 
760 	if (ret != AON_SUCCESS) {
761 		/* we can't switch back to main FW now, reset ISH */
762 		handle_reset(ISH_PM_STATE_RESET);
763 	}
764 }
765 
handle_d3(void)766 static void handle_d3(void)
767 {
768 	/* handle D3 */
769 	handle_reset(ISH_PM_STATE_RESET);
770 }
771 
disable_csme_csrirq(void)772 static inline void disable_csme_csrirq(void)
773 {
774 	write32(IPC_PIMR_CIM_SEC, 1);
775 }
776 
handle_reset(enum ish_pm_state pm_state)777 static void handle_reset(enum ish_pm_state pm_state)
778 {
779 	(void)(pm_state);
780 
781 	/* disable watch dog */
782 	write32(WDT_CONTROL, (read32(WDT_CONTROL) & (~WDT_CONTROL_ENABLE_BIT)));
783 
784 	/* disable all gpio interrupts */
785 	write32(ISH_GPIO_GRER, 0);
786 	write32(ISH_GPIO_GFER, 0);
787 	write32(ISH_GPIO_GIMR, 0);
788 
789 	/* disable CSME CSR irq */
790 	disable_csme_csrirq();
791 
792 	while (1) {
793 		/* clear ISH2HOST doorbell register */
794 		write32(IPC_ISH2HOST_DOORBELL_ADDR, 0);
795 
796 		/* clear error register in MISC space */
797 		write32(MISC_ISH_ECC_ERR_SRESP, 1);
798 
799 		/*
800 		 * Disable power gating of RF(Cache) and ROMs.
801 		 *
802 		 * Before switch to aon task, RF and ROMs are already
803 		 * power gated, so we need disable the power gating
804 		 * before reset to ROM, to make sure ROM code runs
805 		 * correctly.
806 		 */
807 		write32(PMU_RF_ROM_PWR_CTRL, 0);
808 
809 		/* reset ISH minute-ia cpu core, will goto ISH ROM */
810 		ish_mia_reset();
811 
812 		__builtin_unreachable();
813 
814 		ish_mia_halt();
815 	}
816 
817 }
818 
handle_unknown_state(void)819 static void handle_unknown_state(void)
820 {
821 	aon_share.last_error = AON_ERROR_NOT_SUPPORT_POWER_MODE;
822 	aon_share.error_count++;
823 
824 	/* switch back to main FW */
825 }
826 
ish_aon_main(void)827 void ish_aon_main(void)
828 {
829 
830 	/* set PMU wakeup interrupt gate using LDT code segment selector(0x4) */
831 	aon_idt[AON_IDT_ENTRY_VEC_LAST -
832 		AON_IDT_ENTRY_VEC_FIRST].dword_lo =
833 		GEN_IDT_DESC_LO(&pmu_wakeup_isr, 0x4, IDT_DESC_FLAGS);
834 
835 	aon_idt[AON_IDT_ENTRY_VEC_LAST -
836 		AON_IDT_ENTRY_VEC_FIRST].dword_up =
837 		GEN_IDT_DESC_UP(&pmu_wakeup_isr, 0x4, IDT_DESC_FLAGS);
838 
839 	/*
840 	 * set reset prep interrupt gate using LDT code segment
841 	 * selector(0x4)
842 	 */
843 	aon_idt[0].dword_lo = GEN_IDT_DESC_LO(&reset_prep_isr,
844 		0x4, IDT_DESC_FLAGS);
845 
846 	aon_idt[0].dword_up = GEN_IDT_DESC_UP(&reset_prep_isr,
847 		0x4, IDT_DESC_FLAGS);
848 
849 	while (1) {
850 
851 		/**
852 		 *  will start to run from here when switched to aontask from
853 		 *  the second time
854 		 */
855 
856 		/* save main FW's IDT and load aontask's IDT */
857 		__asm__ volatile (
858 				"sidtl %0;\n"
859 				"lidtl %1;\n"
860 				:
861 				: "m" (aon_share.main_fw_idt_hdr),
862 				  "m" (aon_idt_hdr)
863 				);
864 
865 		aon_share.last_error = AON_SUCCESS;
866 
867 		switch (aon_share.pm_state) {
868 		case ISH_PM_STATE_D0I2:
869 			handle_d0i2();
870 			break;
871 		case ISH_PM_STATE_D0I3:
872 			handle_d0i3();
873 			break;
874 		case ISH_PM_STATE_D3:
875 			handle_d3();
876 			break;
877 		case ISH_PM_STATE_RESET:
878 		case ISH_PM_STATE_RESET_PREP:
879 			handle_reset(aon_share.pm_state);
880 			break;
881 		default:
882 			handle_unknown_state();
883 			break;
884 		}
885 
886 		/* check if D3 rising status */
887 		if (read32(PMU_D3_STATUS) &
888 		    (PMU_D3_BIT_RISING_EDGE_STATUS | PMU_D3_BIT_SET)) {
889 			if (!(aon_share.host_in_suspend)) {
890 				aon_share.pm_state = ISH_PM_STATE_D3;
891 				handle_d3();
892 			}
893 		}
894 
895 		/* restore main FW's IDT and switch back to main FW */
896 		__asm__ volatile(
897 				"lidtl %0;\n"
898 				:
899 				: "m" (aon_share.main_fw_idt_hdr)
900 				);
901 
902 		if (aon_share.pg_exit) {
903 			mainfw_gdt.entries[tr / sizeof(struct gdt_entry)]
904 				.flags &= 0xfd;
905 			pg_exit_restore_ctx();
906 		}
907 
908 		__asm__ volatile ("iret;");
909 	}
910 }
911