1 /*
2 * Copyright (c) 2024 Intel Corporation
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include "aon/aon_share.h"
8 #include "ish_dma.h"
9 #include "ish_pm.h"
10 #include "sedi_driver_core.h"
11 #include "sedi_driver_uart.h"
12 #include <sedi_driver_rtc.h>
13 #include <zephyr/sys/printk.h>
14 #include <zephyr/irq.h>
15 #include <zephyr/drivers/interrupt_controller/ioapic.h>
16 #include <zephyr/arch/x86/ia32/segmentation.h>
17
18 /* defined in link script: soc/x86/intel_ish/scripts/ish_linker.ld */
19 extern uint32_t __text_region_start;
20 extern uint32_t __rodata_region_end;
21 extern uint32_t _image_ram_start;
22 extern uint32_t _image_ram_end;
23
24 /* Disable debug print by default */
25 #define PM_DEBUG_PRINTS 1
26
27 #ifdef PM_DEBUG_PRINTS
28 #define PM_LOG(...) printk(__VA_ARGS__)
29 #else
30 #define PM_LOG(...)
31 #endif
32
33 #define DLL 0x0
34 #define DLH 0x4
35 #define LCR 0xC
36 #define DLF 0xC0
37
38 static sedi_uart_config_t uart0_cfg, uart1_cfg, uart2_cfg;
39
uart_to_idle(void)40 static void uart_to_idle(void)
41 {
42 sedi_uart_get_config(SEDI_UART_0, &uart0_cfg);
43 write32(SEDI_IREG_BASE(UART, 0) + LCR, 0x80);
44 write32(SEDI_IREG_BASE(UART, 0) + DLL, 0x1);
45 write32(SEDI_IREG_BASE(UART, 0) + DLH, 0x0);
46 write32(SEDI_IREG_BASE(UART, 0) + LCR, 0);
47
48 sedi_uart_get_config(SEDI_UART_0, &uart1_cfg);
49 write32(SEDI_IREG_BASE(UART, 1) + LCR, 0x80);
50 write32(SEDI_IREG_BASE(UART, 1) + DLL, 0x1);
51 write32(SEDI_IREG_BASE(UART, 1) + DLH, 0x0);
52 write32(SEDI_IREG_BASE(UART, 1) + LCR, 0);
53
54 sedi_uart_get_config(SEDI_UART_0, &uart2_cfg);
55 write32(SEDI_IREG_BASE(UART, 2) + LCR, 0x80);
56 write32(SEDI_IREG_BASE(UART, 2) + DLL, 0x1);
57 write32(SEDI_IREG_BASE(UART, 2) + DLH, 0x0);
58 write32(SEDI_IREG_BASE(UART, 2) + LCR, 0);
59 }
60
uart_port_restore(void)61 static void uart_port_restore(void)
62 {
63 sedi_uart_set_config(SEDI_UART_0, &uart0_cfg);
64 sedi_uart_set_config(SEDI_UART_1, &uart1_cfg);
65 sedi_uart_set_config(SEDI_UART_2, &uart2_cfg);
66 }
67
pg_exit_restore_hw(void)68 static void pg_exit_restore_hw(void)
69 {
70 write32(LAPIC_SPUR, LAPIC_ENABLE | LAPIC_SPUR_RESET);
71
72 write32(CCU_RST_HST, read32(CCU_RST_HST));
73 write32(CCU_TCG_ENABLE, 0);
74 write32(CCU_BCG_ENABLE, 0);
75
76 write32(CCU_BCG_MIA, 0);
77 write32(CCU_BCG_DMA, 0);
78 write32(CCU_BCG_I2C, 0);
79 write32(CCU_BCG_SPI, 0);
80 write32(CCU_BCG_UART, 0);
81 write32(CCU_BCG_GPIO, 0);
82 }
83
84 /**
85 * ISH PMU does not support both-edge interrupt triggered gpio configuration.
86 * If both edges are configured, then the ISH can't stay in low power mode
87 * because it will exit immediately.
88 *
89 * To keep LPM fucntions intact and still support both edge configuration,
90 * the alternative way is:
91 * Before entering LPM, scan all gpio pins which are configured to be
92 * triggered by both-edge, and temporarily set each gpio pin to the single
93 * edge expected to be triggered next time, that is, opposite to its value.
94 * After exiting LPM, then restore the both-edge trigger configuration.
95 **/
convert_both_edge_gpio_to_single_edge(void)96 static uint32_t convert_both_edge_gpio_to_single_edge(void)
97 {
98 uint32_t both_edge_pins = 0;
99 int i = 0;
100
101 /*
102 * scan GPIO GFER, GRER and GIMR registers to find the both edge
103 * interrupt trigger mode enabled pins.
104 */
105 for (i = 0; i < 32; i++) {
106 if (read32(ISH_GPIO_GIMR) & BIT(i) && read32(ISH_GPIO_GRER) & BIT(i) &&
107 read32(ISH_GPIO_GFER & BIT(i))) {
108 /* Record the pin so we can restore it later */
109 both_edge_pins |= BIT(i);
110
111 if (read32(ISH_GPIO_GPLR) & BIT(i)) {
112 /* pin is high, just keep falling edge mode */
113 write32(ISH_GPIO_GRER, read32(ISH_GPIO_GRER) & ~BIT(i));
114 } else {
115 /* pin is low, just keep rising edge mode */
116 write32(ISH_GPIO_GFER, read32(ISH_GPIO_GFER) & ~BIT(i));
117 }
118 }
119 }
120
121 return both_edge_pins;
122 }
123
restore_both_edge_gpio_config(uint32_t both_edge_pin_map)124 static void restore_both_edge_gpio_config(uint32_t both_edge_pin_map)
125 {
126 write32(ISH_GPIO_GRER, read32(ISH_GPIO_GRER) | both_edge_pin_map);
127 write32(ISH_GPIO_GFER, read32(ISH_GPIO_GFER) | both_edge_pin_map);
128 }
129
130 /* power management internal context data structure */
131 struct pm_context {
132 /* aontask image valid flag */
133 int aon_valid;
134 /* point to the aon shared data in aontask */
135 struct ish_aon_share *aon_share;
136 /* TSS segment selector for task switching */
137 int aon_tss_selector[2];
138 } __attribute__((packed));
139
140 static struct pm_context pm_ctx = {
141 .aon_valid = 0,
142 /* aon shared data located in the start of aon memory */
143 .aon_share = (struct ish_aon_share *)CONFIG_AON_RAM_BASE,
144 };
145
146 /* D0ix statistics data, including each state's count and total stay time */
147 struct pm_stat {
148 uint64_t count;
149 uint64_t total_time_us;
150 };
151
152 struct pm_statistics {
153 struct pm_stat d0i0;
154 struct pm_stat d0i1;
155 struct pm_stat d0i2;
156 struct pm_stat d0i3;
157 struct pm_stat pg;
158 };
159
160 static struct pm_statistics pm_stats;
161
162 /*
163 * Log a new statistic
164 *
165 * t0: start time, in us
166 * t1: end time, in us
167 */
log_pm_stat(struct pm_stat * stat,uint64_t t0,uint64_t t1)168 static void log_pm_stat(struct pm_stat *stat, uint64_t t0, uint64_t t1)
169 {
170 stat->total_time_us += t1 - t0;
171 stat->count++;
172 }
173
174 extern struct pseudo_descriptor _gdt;
175
176 /* TSS descriptor for saving main FW's cpu context during aontask switching */
177 static struct tss_entry main_tss;
178
179 /**
180 * add new entry in GDT
181 *
182 * @param desc_lo lower DWORD of the entry descriptor
183 * @param desc_up upper DWORD of the entry descriptor
184 *
185 * @return the descriptor selector index of the added entry
186 */
add_gdt_entry(uint32_t desc_lo,uint32_t desc_up)187 static uint32_t add_gdt_entry(uint32_t desc_lo, uint32_t desc_up)
188 {
189 int index;
190 struct gdt_header *gdt_ptr = (struct gdt_header *)&_gdt;
191 struct gdt_entry *__gdt = (struct gdt_entry *)(gdt_ptr->entries);
192
193 /**
194 * get the first empty entry of GDT which defined in crt0.S
195 * each entry has a fixed size of 8 bytes
196 */
197 index = (gdt_ptr->limit + 1) >> 3;
198
199 /* add the new entry descriptor to the GDT */
200 __gdt[index].dword_lo = desc_lo;
201 __gdt[index].dword_up = desc_up;
202
203 /* update GDT's limit size */
204 gdt_ptr->limit += sizeof(struct gdt_entry);
205
206 return ((index + 1) << 3) - sizeof(struct gdt_entry);
207 }
208
init_aon_task(void)209 static void init_aon_task(void)
210 {
211 uint32_t desc_lo, desc_up, main_tss_index;
212 struct ish_aon_share *aon_share = pm_ctx.aon_share;
213 struct tss_entry *aon_tss = aon_share->aon_tss;
214
215 if (aon_share->magic_id != AON_MAGIC_ID) {
216 pm_ctx.aon_valid = 0;
217 return;
218 }
219
220 pm_ctx.aon_valid = 1;
221
222 pm_ctx.aon_tss_selector[0] = 0;
223
224 main_tss.iomap_base_addr = GDT_DESC_TSS_LIMIT;
225
226 desc_lo = GEN_GDT_DESC_LO((uint32_t)&main_tss,
227 GDT_DESC_TSS_LIMIT, GDT_DESC_TSS_FLAGS);
228 desc_up = GEN_GDT_DESC_UP((uint32_t)&main_tss,
229 GDT_DESC_TSS_LIMIT, GDT_DESC_TSS_FLAGS);
230 main_tss_index = add_gdt_entry(desc_lo, desc_up);
231
232 desc_lo = GEN_GDT_DESC_LO((uint32_t)aon_tss,
233 GDT_DESC_TSS_LIMIT, GDT_DESC_TSS_FLAGS);
234 desc_up = GEN_GDT_DESC_UP((uint32_t)aon_tss,
235 GDT_DESC_TSS_LIMIT, GDT_DESC_TSS_FLAGS);
236 pm_ctx.aon_tss_selector[1] = add_gdt_entry(desc_lo, desc_up);
237
238 desc_lo = GEN_GDT_DESC_LO((uint32_t)aon_share->aon_ldt,
239 aon_share->aon_ldt_size, GDT_DESC_LDT_FLAGS);
240 desc_up = GEN_GDT_DESC_UP((uint32_t)aon_share->aon_ldt,
241 aon_share->aon_ldt_size, GDT_DESC_LDT_FLAGS);
242 aon_tss->ldt_seg_selector = add_gdt_entry(desc_lo, desc_up);
243
244 __asm__ volatile("lgdt _gdt;\n"
245 "push %%eax;\n"
246 "movw %0, %%ax;\n"
247 "ltr %%ax;\n"
248 "pop %%eax;\n"
249 :
250 : "r"((uint16_t)main_tss_index));
251
252 aon_share->main_fw_ro_addr = (uint32_t)&__text_region_start;
253 aon_share->main_fw_ro_size = (uint32_t)&__rodata_region_end -
254 (uint32_t)&__text_region_start;
255
256 aon_share->main_fw_rw_addr = (uint32_t)&_image_ram_start;
257 aon_share->main_fw_rw_size = (uint32_t)&_image_ram_end -
258 (uint32_t)&_image_ram_start;
259
260 aon_share->uma_msb = read32(IPC_UMA_RANGE_LOWER_1);
261
262 ish_dma_init();
263 }
264
check_aon_task_status(void)265 static inline void check_aon_task_status(void)
266 {
267 struct ish_aon_share *aon_share = pm_ctx.aon_share;
268
269 if (aon_share->last_error != AON_SUCCESS) {
270 PM_LOG("aontask has errors:\n");
271 PM_LOG(" last error: %d\n", aon_share->last_error);
272 PM_LOG(" error counts: %d\n", aon_share->error_count);
273 }
274 }
275
pm_disable_irqs(uint64_t current_ioapic_state)276 static void pm_disable_irqs(uint64_t current_ioapic_state)
277 {
278 int i;
279
280 for (i = 0; i < IOAPIC_NUM_RTES; i++) {
281 if ((((uint64_t)1) << i & current_ioapic_state) && (i != SEDI_IRQ_PMU2IOAPIC)
282 && (i != SEDI_IRQ_RESET_PREP))
283 sedi_core_irq_disable(i);
284 }
285 }
286
pm_enable_irqs(uint64_t current_ioapic_state)287 static void pm_enable_irqs(uint64_t current_ioapic_state)
288 {
289 int i;
290
291 for (i = 0; i < IOAPIC_NUM_RTES; i++) {
292 if ((((uint64_t)1) << i & current_ioapic_state) && (i != SEDI_IRQ_PMU2IOAPIC)
293 && (i != SEDI_IRQ_RESET_PREP))
294 sedi_core_irq_enable(i);
295 }
296 }
297
switch_to_aontask(void)298 static void switch_to_aontask(void)
299 {
300 unsigned int eflags = sedi_core_irq_lock();
301
302 __sync_synchronize();
303
304 /* disable cache and flush cache */
305 __asm__ volatile("movl %%cr0, %%eax;\n"
306 "orl $0x60000000, %%eax;\n"
307 "movl %%eax, %%cr0;\n"
308 "wbinvd;"
309 :
310 :
311 : "eax");
312
313 /* switch to aontask through a far call with aontask's TSS selector */
314 __asm__ volatile("lcall *%0;" ::"m"(*pm_ctx.aon_tss_selector) :);
315
316 /* clear TS (Task Switched) flag and enable cache */
317 __asm__ volatile("clts;\n"
318 "movl %%cr0, %%eax;\n"
319 "andl $0x9FFFFFFF, %%eax;\n"
320 "movl %%eax, %%cr0;"
321 :
322 :
323 : "eax");
324
325 sedi_core_irq_unlock(eflags);
326 }
327
handle_reset_in_aontask(enum ish_pm_state pm_state)328 static void handle_reset_in_aontask(enum ish_pm_state pm_state)
329 {
330 uint64_t ioapic_state;
331
332 pm_ctx.aon_share->pm_state = pm_state;
333
334 ioapic_state = sedi_core_get_irq_map();
335 pm_disable_irqs(ioapic_state);
336 sedi_core_irq_enable(SEDI_IRQ_PMU2IOAPIC);
337 sedi_core_irq_enable(SEDI_IRQ_RESET_PREP);
338
339 /* enable Trunk Clock Gating (TCG) of ISH */
340 write32(CCU_TCG_EN, 1);
341
342 /* enable power gating of RF(Cache) and ROMs */
343 write32(PMU_RF_ROM_PWR_CTRL, 1);
344
345 switch_to_aontask();
346
347 __builtin_unreachable();
348 }
349
enter_d0i0(void)350 static void enter_d0i0(void)
351 {
352 uint64_t t0, t1;
353
354 t0 = sedi_rtc_get_us();
355 pm_ctx.aon_share->pm_state = ISH_PM_STATE_D0I0;
356
357 /* halt ISH cpu, will wakeup from any interrupt */
358 ish_mia_halt();
359
360 t1 = sedi_rtc_get_us();
361 pm_ctx.aon_share->pm_state = ISH_PM_STATE_D0;
362 log_pm_stat(&pm_stats.d0i0, t0, t1);
363 }
364
enter_d0i1(void)365 static void enter_d0i1(void)
366 {
367 #ifndef CONFIG_SOC_INTEL_ISH_5_6_0
368 uint64_t ioapic_state;
369 #endif
370 uint64_t t0, t1;
371 uint32_t both_edge_gpio_pins;
372
373 #ifndef CONFIG_SOC_INTEL_ISH_5_6_0
374 ioapic_state = sedi_core_get_irq_map();
375 pm_disable_irqs(ioapic_state);
376 #endif
377 sedi_core_irq_enable(SEDI_IRQ_PMU2IOAPIC);
378 sedi_core_irq_enable(SEDI_IRQ_RESET_PREP);
379
380 t0 = sedi_rtc_get_us();
381 pm_ctx.aon_share->pm_state = ISH_PM_STATE_D0I1;
382
383 both_edge_gpio_pins = convert_both_edge_gpio_to_single_edge();
384
385 #ifndef CONFIG_SOC_INTEL_ISH_5_6_0
386 /* enable Trunk Clock Gating (TCG) of ISH */
387 write32(CCU_TCG_EN, 1);
388 #else
389 write32(CCU_BCG_MIA, read32(CCU_BCG_MIA) | CCU_BCG_BIT_MIA);
390 #endif
391
392 /* halt ISH cpu, will wakeup from PMU wakeup interrupt */
393 ish_mia_halt();
394
395 #ifndef CONFIG_SOC_INTEL_ISH_5_6_0
396 /* disable Trunk Clock Gating (TCG) of ISH */
397 write32(CCU_TCG_EN, 0);
398 #else
399 write32(CCU_BCG_MIA, read32(CCU_BCG_MIA) & (~CCU_BCG_BIT_MIA));
400 #endif
401
402 restore_both_edge_gpio_config(both_edge_gpio_pins);
403
404 pm_ctx.aon_share->pm_state = ISH_PM_STATE_D0;
405 t1 = sedi_rtc_get_us();
406 log_pm_stat(&pm_stats.d0i1, t0, t1);
407
408 #ifndef CONFIG_SOC_INTEL_ISH_5_6_0
409 /* restore interrupts */
410 pm_enable_irqs(ioapic_state);
411 #endif
412 sedi_core_irq_disable(SEDI_IRQ_PMU2IOAPIC);
413 }
414
enter_d0i2(void)415 static void enter_d0i2(void)
416 {
417 uint64_t ioapic_state;
418 uint64_t t0, t1;
419 uint32_t both_edge_gpio_pins;
420
421 ioapic_state = sedi_core_get_irq_map();
422 pm_disable_irqs(ioapic_state);
423 sedi_core_irq_enable(SEDI_IRQ_PMU2IOAPIC);
424 sedi_core_irq_enable(SEDI_IRQ_RESET_PREP);
425
426 t0 = sedi_rtc_get_us();
427 pm_ctx.aon_share->pm_state = ISH_PM_STATE_D0I2;
428
429 both_edge_gpio_pins = convert_both_edge_gpio_to_single_edge();
430
431 /* enable Trunk Clock Gating (TCG) of ISH */
432 write32(CCU_TCG_EN, 1);
433
434 /* enable power gating of RF(Cache) and ROMs */
435 write32(PMU_RF_ROM_PWR_CTRL, 1);
436
437 switch_to_aontask();
438
439 /* returned from aontask */
440
441 if (pm_ctx.aon_share->pg_exit)
442 pg_exit_restore_hw();
443
444 /* disable power gating of RF(Cache) and ROMs */
445 write32(PMU_RF_ROM_PWR_CTRL, 0);
446
447 /* disable Trunk Clock Gating (TCG) of ISH */
448 write32(CCU_TCG_EN, 0);
449
450 t1 = sedi_rtc_get_us();
451 pm_ctx.aon_share->pm_state = ISH_PM_STATE_D0;
452 log_pm_stat(&pm_stats.d0i2, t0, t1);
453
454 restore_both_edge_gpio_config(both_edge_gpio_pins);
455
456 if (pm_ctx.aon_share->pg_exit)
457 log_pm_stat(&pm_stats.pg, t0, t1);
458
459 /* restore interrupts */
460 pm_enable_irqs(ioapic_state);
461 sedi_core_irq_disable(SEDI_IRQ_PMU2IOAPIC);
462 }
463
enter_d0i3(void)464 static void enter_d0i3(void)
465 {
466 uint64_t ioapic_state;
467 uint64_t t0, t1;
468 uint32_t both_edge_gpio_pins;
469
470 ioapic_state = sedi_core_get_irq_map();
471 pm_disable_irqs(ioapic_state);
472 sedi_core_irq_enable(SEDI_IRQ_PMU2IOAPIC);
473 sedi_core_irq_enable(SEDI_IRQ_RESET_PREP);
474
475 t0 = sedi_rtc_get_us();
476 pm_ctx.aon_share->pm_state = ISH_PM_STATE_D0I3;
477
478 both_edge_gpio_pins = convert_both_edge_gpio_to_single_edge();
479
480 /* enable Trunk Clock Gating (TCG) of ISH */
481 write32(CCU_TCG_EN, 1);
482
483 /* enable power gating of RF(Cache) and ROMs */
484 write32(PMU_RF_ROM_PWR_CTRL, 1);
485
486 switch_to_aontask();
487
488 /* returned from aontask */
489
490 if (pm_ctx.aon_share->pg_exit)
491 pg_exit_restore_hw();
492
493 /* disable power gating of RF(Cache) and ROMs */
494 write32(PMU_RF_ROM_PWR_CTRL, 0);
495
496 /* disable Trunk Clock Gating (TCG) of ISH */
497 write32(CCU_TCG_EN, 0);
498
499 restore_both_edge_gpio_config(both_edge_gpio_pins);
500
501 t1 = sedi_rtc_get_us();
502 pm_ctx.aon_share->pm_state = ISH_PM_STATE_D0;
503 log_pm_stat(&pm_stats.d0i3, t0, t1);
504
505 if (pm_ctx.aon_share->pg_exit)
506 log_pm_stat(&pm_stats.pg, t0, t1);
507
508 /* restore interrupts */
509 pm_enable_irqs(ioapic_state);
510 sedi_core_irq_disable(SEDI_IRQ_PMU2IOAPIC);
511 }
512
pre_setting_d0ix(void)513 static void pre_setting_d0ix(void)
514 {
515 write32(PMU_VNN_REQ, read32(PMU_VNN_REQ));
516 uart_to_idle();
517 }
518
post_setting_d0ix(void)519 static void post_setting_d0ix(void)
520 {
521 uart_port_restore();
522 }
523
sedi_pm_enter_power_state(int state)524 void sedi_pm_enter_power_state(int state)
525 {
526 switch (state) {
527 case ISH_PM_STATE_D0I1:
528 #ifndef CONFIG_SOC_INTEL_ISH_5_6_0
529 pre_setting_d0ix();
530 #endif
531 enter_d0i1();
532 #ifndef CONFIG_SOC_INTEL_ISH_5_6_0
533 post_setting_d0ix();
534 #endif
535 break;
536 case ISH_PM_STATE_D0I2:
537 pre_setting_d0ix();
538 enter_d0i2();
539 post_setting_d0ix();
540 check_aon_task_status();
541 break;
542 case ISH_PM_STATE_D0I3:
543 pre_setting_d0ix();
544 enter_d0i3();
545 post_setting_d0ix();
546 check_aon_task_status();
547 break;
548 default:
549 enter_d0i0();
550 break;
551 }
552 }
553
reset_bcg(void)554 static void reset_bcg(void)
555 {
556 write32(CCU_BCG_MIA, 0);
557 write32(CCU_BCG_DMA, 0);
558 write32(CCU_BCG_I2C, 0);
559 write32(CCU_BCG_SPI, 0);
560 write32(CCU_BCG_UART, 0);
561 write32(CCU_BCG_GPIO, 0);
562 }
563
handle_d3(uint32_t irq_vec)564 static void handle_d3(uint32_t irq_vec)
565 {
566 write32(PMU_D3_STATUS, read32(PMU_D3_STATUS));
567
568 if (read32(PMU_D3_STATUS) & (PMU_D3_BIT_RISING_EDGE_STATUS | PMU_D3_BIT_SET)) {
569 /*
570 * Indicate completion of servicing the interrupt to IOAPIC
571 * first then indicate completion of servicing the interrupt
572 * to LAPIC
573 */
574 write32(SEDI_IOAPIC_EOI, irq_vec);
575 write32(LAPIC_EOI, 0x0);
576
577 if (!(pm_ctx.aon_share->host_in_suspend))
578 ish_pm_reset(ISH_PM_STATE_D3);
579 }
580 }
581
pcie_dev_isr(void)582 static void pcie_dev_isr(void)
583 {
584 handle_d3(SEDI_VEC_PCIEDEV);
585 }
586 /**
587 * main FW only need handle PMU wakeup interrupt for D0i1 state, aontask will
588 * handle PMU wakeup interrupt for other low power states
589 */
pmu_wakeup_isr(void)590 static void pmu_wakeup_isr(void)
591 {
592 /* at current nothing need to do */
593 }
594
reset_prep_isr(void)595 static void reset_prep_isr(void)
596 {
597 /* mask reset prep avail interrupt */
598 write32(PMU_RST_PREP, PMU_RST_PREP_INT_MASK);
599
600 /*
601 * Indicate completion of servicing the interrupt to IOAPIC first
602 * then indicate completion of servicing the interrupt to LAPIC
603 */
604 write32(SEDI_IOAPIC_EOI, SEDI_VEC_RESET_PREP);
605 write32(LAPIC_EOI, 0x0);
606
607 ish_mia_reset();
608 __builtin_unreachable();
609 }
610
sedi_pm_init(void)611 void sedi_pm_init(void)
612 {
613 /* clear reset bit */
614 write32(ISH_RST_REG, 0);
615
616 /* clear reset history register in CCU */
617 write32(CCU_RST_HST, read32(CCU_RST_HST));
618
619 /* disable TCG and disable BCG */
620 write32(CCU_TCG_EN, 0);
621 reset_bcg();
622
623 init_aon_task();
624
625 write32(PMU_GPIO_WAKE_MASK0, 0);
626 write32(PMU_GPIO_WAKE_MASK1, 0);
627
628 /* unmask all wake up events */
629 write32(PMU_MASK_EVENT, ~PMU_MASK_EVENT_BIT_ALL);
630
631 write32(PMU_ISH_FABRIC_CNT, (read32(PMU_ISH_FABRIC_CNT) & 0xffff0000) | FABRIC_IDLE_COUNT);
632 write32(PMU_PGCB_CLKGATE_CTRL, TRUNK_CLKGATE_COUNT);
633
634 IRQ_CONNECT(SEDI_IRQ_RESET_PREP, 5, reset_prep_isr, 0, IOAPIC_LEVEL);
635 IRQ_CONNECT(SEDI_IRQ_PMU2IOAPIC, 5, pmu_wakeup_isr, 0, IOAPIC_LEVEL);
636 IRQ_CONNECT(SEDI_IRQ_PCIEDEV, 5, pcie_dev_isr, 0, IOAPIC_LEVEL);
637
638 /* unmask reset prep avail interrupt */
639 write32(PMU_RST_PREP, 0);
640 sedi_core_irq_enable(SEDI_IRQ_RESET_PREP);
641
642 /* unmask D3 and BME interrupts */
643 write32(PMU_D3_STATUS, read32(PMU_D3_STATUS) & (PMU_D3_BIT_SET | PMU_BME_BIT_SET));
644
645 if ((!(read32(PMU_D3_STATUS) & PMU_D3_BIT_SET)) &&
646 (read32(PMU_D3_STATUS) & PMU_BME_BIT_SET))
647 write32(PMU_D3_STATUS, read32(PMU_D3_STATUS));
648
649 sedi_core_irq_enable(SEDI_IRQ_PCIEDEV);
650 }
651
ish_pm_reset(enum ish_pm_state pm_state)652 void ish_pm_reset(enum ish_pm_state pm_state)
653 {
654 if (pm_ctx.aon_valid) {
655 handle_reset_in_aontask(pm_state);
656 } else {
657 ish_mia_reset();
658 }
659
660 __builtin_unreachable();
661 }
662
sedi_pm_reset(void)663 void sedi_pm_reset(void)
664 {
665 ish_mia_reset();
666 }
667
sedi_pm_host_suspend(uint32_t suspend)668 void sedi_pm_host_suspend(uint32_t suspend)
669 {
670 pm_ctx.aon_share->host_in_suspend = suspend;
671 }
672
673 /*
674 * helper for print idle_stats
675 */
print_stats(const char * name,const struct pm_stat * stat)676 static void print_stats(const char *name, const struct pm_stat *stat)
677 {
678 if (stat->count)
679 PM_LOG(" %s:\n"
680 " counts: %llu\n"
681 " time: %.6llu ms\n",
682 name, stat->count, (stat->total_time_us)/1000);
683 }
684
685 /**
686 * Print low power idle statistics
687 */
command_idle_stats(void)688 void command_idle_stats(void)
689 {
690 #ifdef PM_DEBUG_PRINTS
691 struct ish_aon_share *aon_share = pm_ctx.aon_share;
692 #endif
693 uint64_t tall;
694
695 tall = sedi_rtc_get_us();
696
697 PM_LOG("Aontask exists: %s\n", pm_ctx.aon_valid ? "Yes" : "No");
698 PM_LOG("Total time on: %.6llu ms\n", tall/1000);
699 PM_LOG("Idle sleep:\n");
700 print_stats("D0i0", &pm_stats.d0i0);
701
702 PM_LOG("Deep sleep:\n");
703 print_stats("D0i1", &pm_stats.d0i1);
704 print_stats("D0i2", &pm_stats.d0i2);
705 print_stats("D0i3", &pm_stats.d0i3);
706 print_stats("IPAPG", &pm_stats.pg);
707
708 if (pm_ctx.aon_valid) {
709 PM_LOG(" Aontask status:\n");
710 PM_LOG(" last error: %u\n", aon_share->last_error);
711 PM_LOG(" error counts: %u\n", aon_share->error_count);
712 }
713 }
714