1 /*
2 * Copyright (c) 2023 Intel Corporation
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include "aon_share.h"
8 #include "aon_defs.h"
9 #include "../ish_dma.h"
10
11 /**
12 * Due to very limit AON memory size (typically total 8KB), we don't want to
13 * define and allocate whole 256 entries for aontask'IDT, that almost need 2KB
14 * (256 * 8), so we just defined the only needed IDT entries:
15 * AON_IDT_ENTRY_VEC_FIRST ~ AON_IDT_ENTRY_VEC_LAST
16 */
17 #define AON_IDT_ENTRY_VEC_FIRST SEDI_VEC_RESET_PREP
18 #define AON_IDT_ENTRY_VEC_LAST SEDI_VEC_PMU2IOAPIC
19
20 static void handle_reset(enum ish_pm_state pm_state);
21
22 /* ISR for PMU wakeup interrupt */
pmu_wakeup_isr(void)23 static void pmu_wakeup_isr(void)
24 {
25 /**
26 * Indicate completion of servicing the interrupt to IOAPIC first
27 * then indicate completion of servicing the interrupt to LAPIC
28 */
29 write32(SEDI_IOAPIC_EOI, SEDI_VEC_PMU2IOAPIC);
30 write32(LAPIC_EOI, 0x0);
31
32 __asm__ volatile ("iret;");
33
34 __builtin_unreachable();
35 }
36
37 /* ISR for reset prep interrupt */
reset_prep_isr(void)38 static void reset_prep_isr(void)
39 {
40 /* mask reset prep avail interrupt */
41 write32(PMU_RST_PREP, PMU_RST_PREP_INT_MASK);
42
43 /**
44 * Indicate completion of servicing the interrupt to IOAPIC first
45 * then indicate completion of servicing the interrupt to LAPIC
46 */
47 write32(SEDI_IOAPIC_EOI, SEDI_VEC_RESET_PREP);
48 write32(LAPIC_EOI, 0x0);
49
50 handle_reset(ISH_PM_STATE_RESET_PREP);
51
52 __builtin_unreachable();
53 }
54
55 /**
56 * Use a static data array for aon IDT, and setting IDT header for IDTR
57 * register
58 *
59 * Since on x86, the IDT entry index (count from 0) is also the interrupt
60 * vector number, for IDT header, the 'start' filed still need to point to
61 * the entry 0, and 'size' must count from entry 0.
62 */
63
64 static struct idt_entry aon_idt[AON_IDT_ENTRY_VEC_LAST -
65 AON_IDT_ENTRY_VEC_FIRST + 1];
66
67 static struct idt_header aon_idt_hdr = {
68
69 .limit = (sizeof(struct idt_entry) * (AON_IDT_ENTRY_VEC_LAST + 1)) - 1,
70 .entries = (struct idt_entry *)((uint32_t)&aon_idt -
71 (sizeof(struct idt_entry) * AON_IDT_ENTRY_VEC_FIRST))
72 };
73
74 /* TSS segment for aon task */
75 struct tss_entry aon_tss = {
76 .prev_task_link = 0,
77 .reserved1 = 0,
78 .esp0 = (uint8_t *)(CONFIG_AON_PERSISTENT_BASE - AON_SP_RESERVED),
79 /* entry 1 in LDT for data segment */
80 .ss0 = 0xc,
81 .reserved2 = 0,
82 .esp1 = 0,
83 .ss1 = 0,
84 .reserved3 = 0,
85 .esp2 = 0,
86 .ss2 = 0,
87 .reserved4 = 0,
88 .cr3 = 0,
89 /* task execute entry point */
90 .eip = (uint32_t)&ish_aon_main,
91 .eflags = 0,
92 .eax = 0,
93 .ecx = 0,
94 .edx = 0,
95 .ebx = 0,
96 /* set stack top pointer at the end of usable aon memory */
97 .esp = CONFIG_AON_PERSISTENT_BASE - AON_SP_RESERVED,
98 .ebp = CONFIG_AON_PERSISTENT_BASE - AON_SP_RESERVED,
99 .esi = 0,
100 .edi = 0,
101 /* entry 1 in LDT for data segment */
102 .es = 0xc,
103 .reserved5 = 0,
104 /* entry 0 in LDT for code segment */
105 .cs = 0x4,
106 .reserved6 = 0,
107 /* entry 1 in LDT for data segment */
108 .ss = 0xc,
109 .reserved7 = 0,
110 /* entry 1 in LDT for data segment */
111 .ds = 0xc,
112 .reserved8 = 0,
113 /* entry 1 in LDT for data segment */
114 .fs = 0xc,
115 .reserved9 = 0,
116 /* entry 1 in LDT for data segment */
117 .gs = 0xc,
118 .reserved10 = 0,
119 .ldt_seg_selector = 0,
120 .reserved11 = 0,
121 .trap_debug = 0,
122
123 /**
124 * TSS's limit specified as 0x67, to allow the task has permission to
125 * access I/O port using IN/OUT instructions,'iomap_base_addr' field
126 * must be greater than or equal to TSS' limit
127 * see 'I/O port permissions' on
128 * https://en.wikipedia.org/wiki/Task_state_segment
129 */
130 .iomap_base_addr = GDT_DESC_TSS_LIMIT
131 };
132
133 /**
134 * define code and data LDT segements for aontask
135 * code : base = 0x0, limit = 0xFFFFFFFF, Present = 1, DPL = 0
136 * data : base = 0x0, limit = 0xFFFFFFFF, Present = 1, DPL = 0
137 */
138 static ldt_entry aon_ldt[2] = {
139
140 /**
141 * entry 0 for code segment
142 * base: 0x0
143 * limit: 0xFFFFFFFF
144 * flag: 0x9B, Present = 1, DPL = 0, code segment
145 */
146 {
147 .dword_lo = GEN_GDT_DESC_LO(0x0, 0xFFFFFFFF,
148 GDT_DESC_CODE_FLAGS),
149
150 .dword_up = GEN_GDT_DESC_UP(0x0, 0xFFFFFFFF,
151 GDT_DESC_CODE_FLAGS)
152 },
153
154 /**
155 * entry 1 for data segment
156 * base: 0x0
157 * limit: 0xFFFFFFFF
158 * flag: 0x93, Present = 1, DPL = 0, data segment
159 */
160 {
161 .dword_lo = GEN_GDT_DESC_LO(0x0, 0xFFFFFFFF,
162 GDT_DESC_DATA_FLAGS),
163
164 .dword_up = GEN_GDT_DESC_UP(0x0, 0xFFFFFFFF,
165 GDT_DESC_DATA_FLAGS)
166 }
167 };
168
169
170 /* shared data structure between main FW and aon task */
171 struct ish_aon_share aon_share = {
172 .magic_id = AON_MAGIC_ID,
173 .error_count = 0,
174 .last_error = AON_SUCCESS,
175 .aon_tss = &aon_tss,
176 .aon_ldt = &aon_ldt[0],
177 .aon_ldt_size = sizeof(aon_ldt),
178 };
179
180 /* snowball structure */
181 #define SNOWBALL_BASE (CONFIG_AON_PERSISTENT_BASE + 256)
182
183 struct snowball_struct *snowball = (void *)SNOWBALL_BASE;
184
185
186 /* In IMR DDR, ISH FW image has a manifest header */
187 #define ISH_FW_IMAGE_MANIFEST_HEADER_SIZE (0x1000)
188
189 /* simple count based delay */
delay(uint32_t count)190 static inline void delay(uint32_t count)
191 {
192 while (count)
193 count--;
194 }
195
enable_dma_bcg(void)196 static inline void enable_dma_bcg(void)
197 {
198 write32(CCU_BCG_DMA, 1);
199 }
200
disable_dma_bcg(void)201 static inline void disable_dma_bcg(void)
202 {
203 write32(CCU_BCG_DMA, 0);
204 }
205
store_main_fw(void)206 static int store_main_fw(void)
207 {
208 int ret;
209 uint64_t imr_fw_addr;
210 uint64_t imr_fw_rw_addr;
211
212 imr_fw_addr = (((uint64_t)snowball->uma_base_hi << 32) +
213 snowball->uma_base_lo +
214 snowball->fw_offset +
215 ISH_FW_IMAGE_MANIFEST_HEADER_SIZE);
216
217 imr_fw_rw_addr = (imr_fw_addr
218 + aon_share.main_fw_rw_addr
219 - CONFIG_RAM_BASE);
220
221 /* disable BCG (Block Clock Gating) for DMA, DMA can be accessed now */
222 disable_dma_bcg();
223
224 /* store main FW's read and write data region to IMR/UMA DDR */
225 ret = ish_dma_copy(
226 PAGING_CHAN,
227 imr_fw_rw_addr,
228 aon_share.main_fw_rw_addr,
229 aon_share.main_fw_rw_size,
230 SRAM_TO_UMA);
231
232 /* enable BCG for DMA, DMA can't be accessed now */
233 enable_dma_bcg();
234
235 if (ret != DMA_RC_OK) {
236
237 aon_share.last_error = AON_ERROR_DMA_FAILED;
238 aon_share.error_count++;
239
240 return AON_ERROR_DMA_FAILED;
241 }
242
243 return AON_SUCCESS;
244 }
245
restore_main_fw(void)246 static int restore_main_fw(void)
247 {
248 int ret;
249 uint64_t imr_fw_addr;
250 uint64_t imr_fw_ro_addr;
251 uint64_t imr_fw_rw_addr;
252
253 imr_fw_addr = (((uint64_t)snowball->uma_base_hi << 32) +
254 snowball->uma_base_lo +
255 snowball->fw_offset +
256 ISH_FW_IMAGE_MANIFEST_HEADER_SIZE);
257
258 imr_fw_ro_addr = (imr_fw_addr
259 + aon_share.main_fw_ro_addr
260 - CONFIG_RAM_BASE);
261
262 imr_fw_rw_addr = (imr_fw_addr
263 + aon_share.main_fw_rw_addr
264 - CONFIG_RAM_BASE);
265
266 /* disable BCG (Block Clock Gating) for DMA, DMA can be accessed now */
267 disable_dma_bcg();
268
269 /* restore main FW's read only code and data region from IMR/UMA DDR */
270 ret = ish_dma_copy(
271 PAGING_CHAN,
272 aon_share.main_fw_ro_addr,
273 imr_fw_ro_addr,
274 aon_share.main_fw_ro_size,
275 UMA_TO_SRAM);
276
277 if (ret != DMA_RC_OK) {
278
279 aon_share.last_error = AON_ERROR_DMA_FAILED;
280 aon_share.error_count++;
281
282 /* enable BCG for DMA, DMA can't be accessed now */
283 enable_dma_bcg();
284
285 return AON_ERROR_DMA_FAILED;
286 }
287
288 /* restore main FW's read and write data region from IMR/UMA DDR */
289 ret = ish_dma_copy(
290 PAGING_CHAN,
291 aon_share.main_fw_rw_addr,
292 imr_fw_rw_addr,
293 aon_share.main_fw_rw_size,
294 UMA_TO_SRAM
295 );
296
297 /* enable BCG for DMA, DMA can't be accessed now */
298 enable_dma_bcg();
299
300 if (ret != DMA_RC_OK) {
301
302 aon_share.last_error = AON_ERROR_DMA_FAILED;
303 aon_share.error_count++;
304
305 return AON_ERROR_DMA_FAILED;
306 }
307
308 return AON_SUCCESS;
309 }
310
311 #define SRAM_POWER_OFF_BANKS CONFIG_RAM_BANKS
312
313 /* SRAM needs time to enter retention mode */
314 #define CYCLES_PER_US 100
315 #define SRAM_RETENTION_US_DELAY 5
316 #define SRAM_RETENTION_CYCLES_DELAY (SRAM_RETENTION_US_DELAY * CYCLES_PER_US)
317
318 #ifdef CONFIG_SOC_INTEL_ISH_5_6_0
319 #define SRAM_WARM_UP_COUNTER (1000)
320 #define SRAM_CTRL_ERASE_SIZE_BIT 2
321 #define SRAM_CTRL_ERASE_BYTE_TO_QWORD 3
322 #define SRAM_BANK_ERASE_SIZE \
323 ((CONFIG_RAM_BANK_SIZE >> SRAM_CTRL_ERASE_BYTE_TO_QWORD) \
324 << SRAM_CTRL_ERASE_SIZE_BIT)
325 #define SRAM_TILES (CONFIG_RAM_BANKS * 2)
326
sram_toggle_tile(uint32_t tile_id,uint32_t enable)327 static uint32_t sram_toggle_tile(uint32_t tile_id, uint32_t enable)
328 {
329 uint32_t pmu_sram_val = read32(PMU_SRAM_PG_EN);
330 uint32_t pmu_toggle_bit = (1 << tile_id);
331 uint32_t u = 0;
332
333 if (enable && (pmu_sram_val & pmu_toggle_bit)) {
334 pmu_sram_val &= ~pmu_toggle_bit;
335 write32(PMU_SRAM_PG_EN, pmu_sram_val);
336 while (!(pmu_toggle_bit & read32(PMU_SRAM_PWR_STATUS)))
337 ;
338 for (u = 0; u < SRAM_WARM_UP_COUNTER; ++u)
339 __asm__ volatile ("nop");
340 } else if (!enable && (~pmu_sram_val & pmu_toggle_bit)) {
341 pmu_sram_val |= pmu_toggle_bit;
342 write32(PMU_SRAM_PG_EN, pmu_sram_val);
343 while ((pmu_toggle_bit & read32(PMU_SRAM_PWR_STATUS)))
344 ;
345 for (u = 0; u < SRAM_WARM_UP_COUNTER; ++u)
346 __asm__ volatile ("nop");
347 } else {
348 enable = 0;
349 }
350 return enable;
351 }
352
sram_toggle_bank(unsigned int bank_number,unsigned int enable)353 static void sram_toggle_bank(unsigned int bank_number, unsigned int enable)
354 {
355 uint32_t tile_id = bank_number << 1;
356
357 if (enable) {
358 if (sram_toggle_tile(tile_id, enable) &&
359 sram_toggle_tile((tile_id + 1), enable)) {
360 write32(ISH_SRAM_CTRL_ERASE_ADDR,
361 CONFIG_RAM_BASE +
362 bank_number * CONFIG_RAM_BANK_SIZE);
363 write32(ISH_SRAM_CTRL_ERASE_CTRL, (SRAM_BANK_ERASE_SIZE | 0x1));
364 while (read32(ISH_SRAM_CTRL_ERASE_CTRL) & 0x1)
365 ;
366 }
367 } else {
368 sram_toggle_tile(tile_id, enable);
369 sram_toggle_tile((tile_id + 1), enable);
370 }
371
372 write32(ISH_SRAM_CTRL_INTR, read32(ISH_SRAM_CTRL_INTR));
373 }
374
sram_power(int on)375 static void sram_power(int on)
376 {
377 int i;
378
379 for (i = 0; i < SRAM_POWER_OFF_BANKS; i++) {
380 sram_toggle_bank(i, on);
381 }
382 }
383 #else
384
385 /**
386 * check SRAM bank i power gated status in PMU_SRAM_PG_EN register
387 * 1: power gated 0: not power gated
388 */
389 #define BANK_PG_STATUS(i) (read32(PMU_SRAM_PG_EN) & (0x1 << (i)))
390
391 /* enable power gate of a SRAM bank */
392 #define BANK_PG_ENABLE(i) (write32(PMU_SRAM_PG_EN, (read32(PMU_SRAM_PG_EN) | (0x1 << (i)))))
393
394 /* disable power gate of a SRAM bank */
395 #define BANK_PG_DISABLE(i) \
396 (write32(PMU_SRAM_PG_EN, (read32(PMU_SRAM_PG_EN) & (~(0x1 << (i))))))
397
398 /**
399 * check SRAM bank i disabled status in ISH_SRAM_CTRL_CSFGR register
400 * 1: disabled 0: enabled
401 */
402 #define BANK_DISABLE_STATUS(i) (read32(ISH_SRAM_CTRL_CSFGR) & (0x1 << ((i) + 4)))
403
404 /* enable a SRAM bank in ISH_SRAM_CTRL_CSFGR register */
405 #define BANK_ENABLE(i) \
406 (write32(ISH_SRAM_CTRL_CSFGR, \
407 (read32(ISH_SRAM_CTRL_CSFGR) & (~(0x1 << ((i) + 4))))))
408
409 /* disable a SRAM bank in ISH_SRAM_CTRL_CSFGR register */
410 #define BANK_DISABLE(i) \
411 (write32(ISH_SRAM_CTRL_CSFGR, \
412 (read32(ISH_SRAM_CTRL_CSFGR) | (0x1 << ((i) + 4)))))
413
414 /* SRAM needs time to warm up after power on */
415 #define SRAM_WARM_UP_DELAY_CNT 10
416
sram_power(int on)417 static void sram_power(int on)
418 {
419 int i;
420 uint32_t bank_size;
421 uint32_t sram_addr;
422 uint32_t erase_cfg;
423
424 bank_size = CONFIG_RAM_BANK_SIZE;
425 sram_addr = CONFIG_RAM_BASE;
426
427 /**
428 * set erase size as one bank, erase control register using DWORD as
429 * size unit, and using 0 based length, i.e if set 0, will erase one
430 * DWORD
431 */
432 erase_cfg = ((bank_size >> 3) << 2) | 0x1;
433
434 for (i = 0; i < SRAM_POWER_OFF_BANKS; i++) {
435
436 if (on && (BANK_PG_STATUS(i))) {
437
438 /* power on and enable a bank */
439 BANK_PG_DISABLE(i);
440
441 delay(SRAM_WARM_UP_DELAY_CNT);
442
443 /* erase a bank */
444 write32(ISH_SRAM_CTRL_ERASE_ADDR, sram_addr + (i * bank_size));
445 write32(ISH_SRAM_CTRL_ERASE_CTRL, erase_cfg);
446
447 /* wait erase complete */
448 while (read32(ISH_SRAM_CTRL_ERASE_CTRL) & 0x1)
449 continue;
450
451 } else {
452 /* disable and power off a bank */
453 BANK_PG_ENABLE(i);
454 }
455
456 /**
457 * clear interrupt status register, not allow generate SRAM
458 * interrupts. Bringup already masked all SRAM interrupts when
459 * booting ISH
460 */
461 write32(ISH_SRAM_CTRL_INTR, 0xFFFFFFFF);
462
463 }
464 }
465 #endif
466
467 #define RTC_TICKS_IN_SECOND 32768
468
get_rtc(void)469 static uint64_t get_rtc(void)
470 {
471 uint32_t lower;
472 uint32_t upper;
473
474 do {
475 upper = read32(MISC_ISH_RTC_COUNTER1);
476 lower = read32(MISC_ISH_RTC_COUNTER0);
477 } while (upper != read32(MISC_ISH_RTC_COUNTER1));
478
479 return ((uint64_t)upper << 32) | lower;
480 }
481
is_ipapg_allowed(void)482 static int is_ipapg_allowed(void)
483 {
484 uint32_t power_ctrl_enabled, sw_power_req, power_ctrl_wake;
485 int system_power_state;
486
487 system_power_state = ((read32(PMU_PMC_HOST_RST_CTL) & PMU_HOST_RST_B) == 0);
488
489 write32(PMU_PMC_HOST_RST_CTL, read32(PMU_PMC_HOST_RST_CTL));
490
491 power_ctrl_enabled = read32(PMU_D3_STATUS);
492 sw_power_req = read32(PMU_SW_PG_REQ);
493 power_ctrl_wake = read32(PMU_PMC_PG_WAKE);
494
495 if (system_power_state)
496 power_ctrl_enabled |= PMU_PCE_PG_ALLOWED;
497
498 write32(PMU_INTERNAL_PCE, ((power_ctrl_enabled & PMU_PCE_SHADOW_MASK) |
499 PMU_PCE_CHANGE_DETECTED | PMU_PCE_CHANGE_MASK));
500
501 write32(PMU_SW_PG_REQ, (sw_power_req | PMU_SW_PG_REQ_B_RISE |
502 PMU_SW_PG_REQ_B_FALL));
503 write32(PMU_PMC_PG_WAKE, (power_ctrl_wake | PMU_PMC_PG_WAKE_RISE |
504 PMU_PMC_PG_WAKE_FALL));
505 write32(PMU_D3_STATUS, (read32(PMU_D3_STATUS) & (PMU_D0I3_ENABLE_MASK |
506 PMU_D3_BIT_SET | PMU_BME_BIT_SET)));
507
508 power_ctrl_enabled = read32(PMU_D3_STATUS);
509 sw_power_req = read32(PMU_SW_PG_REQ);
510 power_ctrl_wake = read32(PMU_PMC_PG_WAKE);
511
512 if (system_power_state) {
513 uint64_t rtc_start = get_rtc();
514 uint64_t rtc_end;
515
516 while (power_ctrl_wake & PMU_PMC_PG_WAKE_VAL) {
517 power_ctrl_wake = read32(PMU_PMC_PG_WAKE);
518 rtc_end = get_rtc();
519 if (rtc_end - rtc_start > RTC_TICKS_IN_SECOND)
520 break;
521 }
522 }
523
524 if (((power_ctrl_enabled & PMU_PCE_PG_ALLOWED) || system_power_state) &&
525 (((sw_power_req & PMU_SW_PG_REQ_B_VAL) == 0) ||
526 ((power_ctrl_enabled & PMU_PCE_PMCRE) == 0)))
527 return 1;
528 else
529 return 0;
530 }
531
532 #define NUMBER_IRQ_PINS 30
533 static uint32_t ioapic_rte[NUMBER_IRQ_PINS];
534
do_ipapg(void)535 static int do_ipapg(void)
536 {
537 int ret;
538 uint32_t rte_offset = SEDI_IOAPIC_IOREDTBL;
539
540 for (int pin = 0; pin < NUMBER_IRQ_PINS; pin++) {
541 write32(SEDI_IOAPIC_IDX, rte_offset + pin * 2);
542 ioapic_rte[pin] = read32(SEDI_IOAPIC_WDW);
543 }
544
545 ret = ipapg();
546
547 rte_offset = SEDI_IOAPIC_IOREDTBL;
548 for (int pin = 0; pin < NUMBER_IRQ_PINS; pin++) {
549 write32(SEDI_IOAPIC_IDX, rte_offset + pin * 2);
550 write32(SEDI_IOAPIC_WDW, ioapic_rte[pin]);
551 }
552
553 return ret;
554 }
555
set_vnnred_aoncg(void)556 static inline void set_vnnred_aoncg(void)
557 {
558 write32(PMU_VNNAON_RED, 1);
559 write32(CCU_AONCG_EN, 1);
560 }
561
clear_vnnred_aoncg(void)562 static inline void clear_vnnred_aoncg(void)
563 {
564 write32(PMU_VNNAON_RED, 0);
565 write32(CCU_AONCG_EN, 0);
566 }
567
568 #ifdef CONFIG_SOC_INTEL_ISH_5_6_0
569 #define STRINGIFY(x) #x
570 #define SLINE(num) STRINGIFY(num)
571 #define RETENTION_EXIT_CYCLES_DELAY 5
572
sram_enter_sleep_mode(void)573 static void sram_enter_sleep_mode(void)
574 {
575 uint32_t val, sum_mask, mask;
576
577 sum_mask = mask = 0x1;
578 val = read32(PMU_SRAM_DEEPSLEEP);
579 while (sum_mask <= CONFIG_RAM_BANK_TILE_MASK) {
580 if (!(val & mask)) {
581 write32(PMU_SRAM_DEEPSLEEP, val | sum_mask);
582 while (read32(PMU_SRAM_PWR_STATUS) & mask)
583 ;
584 }
585 mask <<= 1;
586 sum_mask += mask;
587 }
588 }
589
sram_exit_sleep_mode(void)590 static void sram_exit_sleep_mode(void)
591 {
592 uint32_t val, sum_mask, mask;
593
594 sum_mask = mask = 0x1;
595 val = read32(PMU_SRAM_DEEPSLEEP);
596 while (sum_mask <= CONFIG_RAM_BANK_TILE_MASK) {
597 if ((val & mask)) {
598 write32(PMU_SRAM_DEEPSLEEP, val & ~sum_mask);
599 while (!(read32(PMU_SRAM_PWR_STATUS) & mask))
600 ;
601 __asm__ volatile (
602 "movl $"SLINE(RETENTION_EXIT_CYCLES_DELAY)", %%ecx;"
603 "loop .;\n\t"
604 :
605 :
606 : "ecx"
607 );
608 }
609 mask <<= 1;
610 sum_mask += mask;
611 }
612 }
613 #endif
614
handle_d0i2(void)615 static void handle_d0i2(void)
616 {
617 pg_exit_save_ctx();
618 aon_share.pg_exit = 0;
619
620 #ifdef CONFIG_SOC_INTEL_ISH_5_6_0
621 sram_enter_sleep_mode();
622 #else
623 /* set main SRAM into retention mode*/
624 write32(PMU_LDO_CTRL, (PMU_LDO_ENABLE_BIT
625 | PMU_LDO_RETENTION_BIT));
626 #endif
627 /* delay some cycles before halt */
628 delay(SRAM_RETENTION_CYCLES_DELAY);
629
630 set_vnnred_aoncg();
631
632 if (is_ipapg_allowed()) {
633 uint32_t sram_cfg_reg;
634
635 sram_cfg_reg = read32(ISH_SRAM_CTRL_CSFGR);
636
637 aon_share.pg_exit = do_ipapg();
638
639 if (aon_share.pg_exit)
640 write32(ISH_SRAM_CTRL_CSFGR, sram_cfg_reg);
641 } else {
642 ish_mia_halt();
643 }
644
645 /* wakeup from PMU interrupt */
646
647 clear_vnnred_aoncg();
648
649 #ifdef CONFIG_SOC_INTEL_ISH_5_6_0
650 sram_exit_sleep_mode();
651 #else
652 /* set main SRAM intto normal mode */
653 write32(PMU_LDO_CTRL, PMU_LDO_ENABLE_BIT);
654
655 /**
656 * poll LDO_READY status to make sure SRAM LDO is on
657 * (exited retention mode)
658 */
659 while (!(read32(PMU_LDO_CTRL) & PMU_LDO_READY_BIT))
660 continue;
661 #endif
662
663 if (read32(PMU_RST_PREP) & PMU_RST_PREP_AVAIL)
664 handle_reset(ISH_PM_STATE_RESET_PREP);
665
666 if (aon_share.pg_exit)
667 ish_dma_set_msb(PAGING_CHAN, aon_share.uma_msb,
668 aon_share.uma_msb);
669 }
670
handle_d0i3(void)671 static void handle_d0i3(void)
672 {
673 int ret;
674
675 pg_exit_save_ctx();
676 aon_share.pg_exit = 0;
677
678 /* store main FW 's context to IMR DDR from main SRAM */
679 ret = store_main_fw();
680
681 /* if store main FW failed, then switch back to main FW */
682 if (ret != AON_SUCCESS)
683 return;
684
685 /* power off main SRAM */
686 sram_power(0);
687
688 set_vnnred_aoncg();
689
690 if (is_ipapg_allowed()) {
691 uint32_t sram_cfg_reg;
692
693 sram_cfg_reg = read32(ISH_SRAM_CTRL_CSFGR);
694
695 aon_share.pg_exit = do_ipapg();
696
697 if (aon_share.pg_exit)
698 write32(ISH_SRAM_CTRL_CSFGR, sram_cfg_reg);
699 } else {
700 ish_mia_halt();
701 }
702
703 /* wakeup from PMU interrupt */
704
705 clear_vnnred_aoncg();
706
707 if (read32(PMU_RST_PREP) & PMU_RST_PREP_AVAIL)
708 handle_reset(ISH_PM_STATE_RESET_PREP);
709
710 /* power on main SRAM */
711 sram_power(1);
712
713 if (aon_share.pg_exit)
714 ish_dma_set_msb(PAGING_CHAN, aon_share.uma_msb,
715 aon_share.uma_msb);
716
717 /* restore main FW 's context to main SRAM from IMR DDR */
718 ret = restore_main_fw();
719
720 if (ret != AON_SUCCESS) {
721 /* we can't switch back to main FW now, reset ISH */
722 handle_reset(ISH_PM_STATE_RESET);
723 }
724 }
725
handle_d3(void)726 static void handle_d3(void)
727 {
728 /* handle D3 */
729 handle_reset(ISH_PM_STATE_RESET);
730 }
731
disable_csme_csrirq(void)732 static inline void disable_csme_csrirq(void)
733 {
734 write32(IPC_PIMR_CIM_SEC, 1);
735 }
736
handle_reset(enum ish_pm_state pm_state)737 static void handle_reset(enum ish_pm_state pm_state)
738 {
739 (void)(pm_state);
740
741 /* disable watch dog */
742 write32(WDT_CONTROL, (read32(WDT_CONTROL) & (~WDT_CONTROL_ENABLE_BIT)));
743
744 /* disable all gpio interrupts */
745 write32(ISH_GPIO_GRER, 0);
746 write32(ISH_GPIO_GFER, 0);
747 write32(ISH_GPIO_GIMR, 0);
748
749 /* disable CSME CSR irq */
750 disable_csme_csrirq();
751
752 /* power off main SRAM */
753 sram_power(0);
754
755 while (1) {
756 /* clear ISH2HOST doorbell register */
757 write32(IPC_ISH2HOST_DOORBELL_ADDR, 0);
758
759 /* clear error register in MISC space */
760 write32(MISC_ISH_ECC_ERR_SRESP, 1);
761
762 /*
763 * Disable power gating of RF(Cache) and ROMs.
764 *
765 * Before switch to aon task, RF and ROMs are already
766 * power gated, so we need disable the power gating
767 * before reset to ROM, to make sure ROM code runs
768 * correctly.
769 */
770 write32(PMU_RF_ROM_PWR_CTRL, 0);
771
772 /* reset ISH minute-ia cpu core, will goto ISH ROM */
773 ish_mia_reset();
774
775 __builtin_unreachable();
776
777 ish_mia_halt();
778 }
779
780 }
781
handle_unknown_state(void)782 static void handle_unknown_state(void)
783 {
784 aon_share.last_error = AON_ERROR_NOT_SUPPORT_POWER_MODE;
785 aon_share.error_count++;
786
787 /* switch back to main FW */
788 }
789
ish_aon_main(void)790 void ish_aon_main(void)
791 {
792
793 /* set PMU wakeup interrupt gate using LDT code segment selector(0x4) */
794 aon_idt[AON_IDT_ENTRY_VEC_LAST -
795 AON_IDT_ENTRY_VEC_FIRST].dword_lo =
796 GEN_IDT_DESC_LO(&pmu_wakeup_isr, 0x4, IDT_DESC_FLAGS);
797
798 aon_idt[AON_IDT_ENTRY_VEC_LAST -
799 AON_IDT_ENTRY_VEC_FIRST].dword_up =
800 GEN_IDT_DESC_UP(&pmu_wakeup_isr, 0x4, IDT_DESC_FLAGS);
801
802 /*
803 * set reset prep interrupt gate using LDT code segment
804 * selector(0x4)
805 */
806 aon_idt[0].dword_lo = GEN_IDT_DESC_LO(&reset_prep_isr,
807 0x4, IDT_DESC_FLAGS);
808
809 aon_idt[0].dword_up = GEN_IDT_DESC_UP(&reset_prep_isr,
810 0x4, IDT_DESC_FLAGS);
811
812 while (1) {
813
814 /**
815 * will start to run from here when switched to aontask from
816 * the second time
817 */
818
819 /* save main FW's IDT and load aontask's IDT */
820 __asm__ volatile (
821 "sidtl %0;\n"
822 "lidtl %1;\n"
823 :
824 : "m" (aon_share.main_fw_idt_hdr),
825 "m" (aon_idt_hdr)
826 );
827
828 aon_share.last_error = AON_SUCCESS;
829
830 switch (aon_share.pm_state) {
831 case ISH_PM_STATE_D0I2:
832 handle_d0i2();
833 break;
834 case ISH_PM_STATE_D0I3:
835 handle_d0i3();
836 break;
837 case ISH_PM_STATE_D3:
838 handle_d3();
839 break;
840 case ISH_PM_STATE_RESET:
841 case ISH_PM_STATE_RESET_PREP:
842 handle_reset(aon_share.pm_state);
843 break;
844 default:
845 handle_unknown_state();
846 break;
847 }
848
849 /* check if D3 rising status */
850 if (read32(PMU_D3_STATUS) &
851 (PMU_D3_BIT_RISING_EDGE_STATUS | PMU_D3_BIT_SET)) {
852 if (!(aon_share.host_in_suspend)) {
853 aon_share.pm_state = ISH_PM_STATE_D3;
854 handle_d3();
855 }
856 }
857
858 /* restore main FW's IDT and switch back to main FW */
859 __asm__ volatile(
860 "lidtl %0;\n"
861 :
862 : "m" (aon_share.main_fw_idt_hdr)
863 );
864
865 if (aon_share.pg_exit) {
866 mainfw_gdt.entries[tr / sizeof(struct gdt_entry)]
867 .flags &= 0xfd;
868 pg_exit_restore_ctx();
869 }
870
871 __asm__ volatile ("iret;");
872 }
873 }
874