1 /*
2  * Copyright (c) 2023 Intel Corporation
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include "aon/aon_share.h"
8 #include "ish_dma.h"
9 #include "ish_pm.h"
10 #include "sedi_driver_core.h"
11 #include "sedi_driver_uart.h"
12 #include <sedi_driver_rtc.h>
13 #include <zephyr/sys/printk.h>
14 #include <zephyr/irq.h>
15 #include <zephyr/drivers/interrupt_controller/ioapic.h>
16 #include <zephyr/arch/x86/ia32/segmentation.h>
17 
18 /* defined in link script: soc/x86/intel_ish/scripts/ish_linker.ld */
19 extern uint32_t __text_region_start;
20 extern uint32_t __rodata_region_end;
21 extern uint32_t _image_ram_start;
22 extern uint32_t _image_ram_end;
23 
24 /* Disable debug print by default */
25 #define PM_DEBUG_PRINTS 1
26 
27 #ifdef PM_DEBUG_PRINTS
28 #define PM_LOG(...) printk(__VA_ARGS__)
29 #else
30 #define PM_LOG(...)
31 #endif
32 
33 #define DLL	0x0
34 #define DLH	0x4
35 #define LCR	0xC
36 #define DLF	0xC0
37 
38 static sedi_uart_config_t uart0_cfg, uart1_cfg, uart2_cfg;
39 
uart_to_idle(void)40 static void uart_to_idle(void)
41 {
42 	sedi_uart_get_config(SEDI_UART_0, &uart0_cfg);
43 	write32(SEDI_IREG_BASE(UART, 0) + LCR, 0x80);
44 	write32(SEDI_IREG_BASE(UART, 0)  + DLL, 0x1);
45 	write32(SEDI_IREG_BASE(UART, 0)  + DLH, 0x0);
46 	write32(SEDI_IREG_BASE(UART, 0)  + LCR, 0);
47 
48 	sedi_uart_get_config(SEDI_UART_0, &uart1_cfg);
49 	write32(SEDI_IREG_BASE(UART, 1)  + LCR, 0x80);
50 	write32(SEDI_IREG_BASE(UART, 1)  + DLL, 0x1);
51 	write32(SEDI_IREG_BASE(UART, 1)  + DLH, 0x0);
52 	write32(SEDI_IREG_BASE(UART, 1)  + LCR, 0);
53 
54 	sedi_uart_get_config(SEDI_UART_0, &uart2_cfg);
55 	write32(SEDI_IREG_BASE(UART, 2)  + LCR, 0x80);
56 	write32(SEDI_IREG_BASE(UART, 2)  + DLL, 0x1);
57 	write32(SEDI_IREG_BASE(UART, 2)  + DLH, 0x0);
58 	write32(SEDI_IREG_BASE(UART, 2)  + LCR, 0);
59 }
60 
uart_port_restore(void)61 static void uart_port_restore(void)
62 {
63 	sedi_uart_set_config(SEDI_UART_0, &uart0_cfg);
64 	sedi_uart_set_config(SEDI_UART_1, &uart1_cfg);
65 	sedi_uart_set_config(SEDI_UART_2, &uart2_cfg);
66 }
67 
pg_exit_restore_hw(void)68 static void pg_exit_restore_hw(void)
69 {
70 	write32(LAPIC_SPUR, LAPIC_ENABLE | LAPIC_SPUR_RESET);
71 
72 	write32(CCU_RST_HST, read32(CCU_RST_HST));
73 	write32(CCU_TCG_ENABLE, 0);
74 	write32(CCU_BCG_ENABLE, 0);
75 
76 	write32(CCU_BCG_MIA, 0);
77 	write32(CCU_BCG_DMA, 0);
78 	write32(CCU_BCG_I2C, 0);
79 	write32(CCU_BCG_SPI, 0);
80 	write32(CCU_BCG_UART, 0);
81 	write32(CCU_BCG_GPIO, 0);
82 }
83 
84 /* power management internal context data structure */
85 struct pm_context {
86 	/* aontask image valid flag */
87 	int aon_valid;
88 	/* point to the aon shared data in aontask */
89 	struct ish_aon_share *aon_share;
90 	/* TSS segment selector for task switching */
91 	int aon_tss_selector[2];
92 } __attribute__((packed));
93 
94 static struct pm_context pm_ctx = {
95 	.aon_valid = 0,
96 	/* aon shared data located in the start of aon memory */
97 	.aon_share = (struct ish_aon_share *)CONFIG_AON_RAM_BASE,
98 };
99 
100 /* D0ix statistics data, including each state's count and total stay time */
101 struct pm_stat {
102 	uint64_t count;
103 	uint64_t total_time_us;
104 };
105 
106 struct pm_statistics {
107 	struct pm_stat d0i0;
108 	struct pm_stat d0i1;
109 	struct pm_stat d0i2;
110 	struct pm_stat d0i3;
111 	struct pm_stat pg;
112 };
113 
114 static struct pm_statistics pm_stats;
115 
116 /*
117  * Log a new statistic
118  *
119  * t0: start time, in us
120  * t1: end time, in us
121  */
log_pm_stat(struct pm_stat * stat,uint64_t t0,uint64_t t1)122 static void log_pm_stat(struct pm_stat *stat, uint64_t t0, uint64_t t1)
123 {
124 	stat->total_time_us += t1 - t0;
125 	stat->count++;
126 }
127 
128 extern struct pseudo_descriptor _gdt;
129 
130 /* TSS descriptor for saving main FW's cpu context during aontask switching */
131 static struct tss_entry main_tss;
132 
133 /**
134  * add new entry in GDT
135  *
136  * @param desc_lo	lower DWORD of the entry descriptor
137  * @param desc_up	upper DWORD of the entry descriptor
138  *
139  * @return		the descriptor selector index of the added entry
140  */
add_gdt_entry(uint32_t desc_lo,uint32_t desc_up)141 static uint32_t add_gdt_entry(uint32_t desc_lo, uint32_t desc_up)
142 {
143 	int index;
144 	struct gdt_header *gdt_ptr = (struct gdt_header *)&_gdt;
145 	struct gdt_entry *__gdt = (struct gdt_entry *)(gdt_ptr->entries);
146 
147 	/**
148 	 * get the first empty entry of GDT which defined in crt0.S
149 	 * each entry has a fixed size of 8 bytes
150 	 */
151 	index = (gdt_ptr->limit + 1) >> 3;
152 
153 	/* add the new entry descriptor to the GDT */
154 	__gdt[index].dword_lo = desc_lo;
155 	__gdt[index].dword_up = desc_up;
156 
157 	/* update GDT's limit size */
158 	gdt_ptr->limit += sizeof(struct gdt_entry);
159 
160 	return ((index + 1) << 3) - sizeof(struct gdt_entry);
161 }
162 
init_aon_task(void)163 static void init_aon_task(void)
164 {
165 	uint32_t desc_lo, desc_up, main_tss_index;
166 	struct ish_aon_share *aon_share = pm_ctx.aon_share;
167 	struct tss_entry *aon_tss = aon_share->aon_tss;
168 
169 	if (aon_share->magic_id != AON_MAGIC_ID) {
170 		pm_ctx.aon_valid = 0;
171 		return;
172 	}
173 
174 	pm_ctx.aon_valid = 1;
175 
176 	pm_ctx.aon_tss_selector[0] = 0;
177 
178 	main_tss.iomap_base_addr = GDT_DESC_TSS_LIMIT;
179 
180 	desc_lo = GEN_GDT_DESC_LO((uint32_t)&main_tss,
181 			GDT_DESC_TSS_LIMIT, GDT_DESC_TSS_FLAGS);
182 	desc_up = GEN_GDT_DESC_UP((uint32_t)&main_tss,
183 			GDT_DESC_TSS_LIMIT, GDT_DESC_TSS_FLAGS);
184 	main_tss_index = add_gdt_entry(desc_lo, desc_up);
185 
186 	desc_lo = GEN_GDT_DESC_LO((uint32_t)aon_tss,
187 			GDT_DESC_TSS_LIMIT, GDT_DESC_TSS_FLAGS);
188 	desc_up = GEN_GDT_DESC_UP((uint32_t)aon_tss,
189 			GDT_DESC_TSS_LIMIT, GDT_DESC_TSS_FLAGS);
190 	pm_ctx.aon_tss_selector[1] = add_gdt_entry(desc_lo, desc_up);
191 
192 	desc_lo = GEN_GDT_DESC_LO((uint32_t)aon_share->aon_ldt,
193 				  aon_share->aon_ldt_size, GDT_DESC_LDT_FLAGS);
194 	desc_up = GEN_GDT_DESC_UP((uint32_t)aon_share->aon_ldt,
195 				  aon_share->aon_ldt_size, GDT_DESC_LDT_FLAGS);
196 	aon_tss->ldt_seg_selector = add_gdt_entry(desc_lo, desc_up);
197 
198 	__asm__ volatile("lgdt _gdt;\n"
199 			 "push %%eax;\n"
200 			 "movw %0, %%ax;\n"
201 			 "ltr %%ax;\n"
202 			 "pop %%eax;\n"
203 			 :
204 			 : "r"((uint16_t)main_tss_index));
205 
206 	aon_share->main_fw_ro_addr = (uint32_t)&__text_region_start;
207 	aon_share->main_fw_ro_size = (uint32_t)&__rodata_region_end -
208 				     (uint32_t)&__text_region_start;
209 
210 	aon_share->main_fw_rw_addr = (uint32_t)&_image_ram_start;
211 	aon_share->main_fw_rw_size = (uint32_t)&_image_ram_end -
212 				     (uint32_t)&_image_ram_start;
213 
214 	aon_share->uma_msb = read32(IPC_UMA_RANGE_LOWER_1);
215 
216 	ish_dma_init();
217 }
218 
check_aon_task_status(void)219 static inline void check_aon_task_status(void)
220 {
221 	struct ish_aon_share *aon_share = pm_ctx.aon_share;
222 
223 	if (aon_share->last_error != AON_SUCCESS) {
224 		PM_LOG("aontask has errors:\n");
225 		PM_LOG("    last error:   %d\n", aon_share->last_error);
226 		PM_LOG("    error counts: %d\n", aon_share->error_count);
227 	}
228 }
229 
pm_disable_irqs(uint64_t current_ioapic_state)230 static void pm_disable_irqs(uint64_t current_ioapic_state)
231 {
232 	int i;
233 
234 	for (i = 0; i < IOAPIC_NUM_RTES; i++) {
235 		if ((((uint64_t)1) << i & current_ioapic_state) && (i != SEDI_IRQ_PMU2IOAPIC)
236 			&& (i != SEDI_IRQ_RESET_PREP))
237 			sedi_core_irq_disable(i);
238 	}
239 }
240 
pm_enable_irqs(uint64_t current_ioapic_state)241 static void pm_enable_irqs(uint64_t current_ioapic_state)
242 {
243 	int i;
244 
245 	for (i = 0; i < IOAPIC_NUM_RTES; i++) {
246 		if ((((uint64_t)1) << i & current_ioapic_state) && (i != SEDI_IRQ_PMU2IOAPIC)
247 			&& (i != SEDI_IRQ_RESET_PREP))
248 			sedi_core_irq_enable(i);
249 	}
250 }
251 
switch_to_aontask(void)252 static void switch_to_aontask(void)
253 {
254 	unsigned int eflags = sedi_core_irq_lock();
255 
256 	__sync_synchronize();
257 
258 	/* disable cache and flush cache */
259 	__asm__ volatile("movl %%cr0, %%eax;\n"
260 			 "orl $0x60000000, %%eax;\n"
261 			 "movl %%eax, %%cr0;\n"
262 			 "wbinvd;"
263 			 :
264 			 :
265 			 : "eax");
266 
267 	/* switch to aontask through a far call with aontask's TSS selector */
268 	__asm__ volatile("lcall *%0;" ::"m"(*pm_ctx.aon_tss_selector) :);
269 
270 	/* clear TS (Task Switched) flag and enable cache */
271 	__asm__ volatile("clts;\n"
272 			 "movl %%cr0, %%eax;\n"
273 			 "andl $0x9FFFFFFF, %%eax;\n"
274 			 "movl %%eax, %%cr0;"
275 			 :
276 			 :
277 			 : "eax");
278 
279 	sedi_core_irq_unlock(eflags);
280 }
281 
handle_reset_in_aontask(enum ish_pm_state pm_state)282 static void handle_reset_in_aontask(enum ish_pm_state pm_state)
283 {
284 	uint64_t ioapic_state;
285 
286 	pm_ctx.aon_share->pm_state = pm_state;
287 
288 	ioapic_state = sedi_core_get_irq_map();
289 	pm_disable_irqs(ioapic_state);
290 	sedi_core_irq_enable(SEDI_IRQ_PMU2IOAPIC);
291 	sedi_core_irq_enable(SEDI_IRQ_RESET_PREP);
292 
293 	/* enable Trunk Clock Gating (TCG) of ISH */
294 	write32(CCU_TCG_EN, 1);
295 
296 	/* enable power gating of RF(Cache) and ROMs */
297 	write32(PMU_RF_ROM_PWR_CTRL, 1);
298 
299 	switch_to_aontask();
300 
301 	__builtin_unreachable();
302 }
303 
enter_d0i0(void)304 static void enter_d0i0(void)
305 {
306 	uint64_t t0, t1;
307 
308 	t0 = sedi_rtc_get_us();
309 	pm_ctx.aon_share->pm_state = ISH_PM_STATE_D0I0;
310 
311 	/* halt ISH cpu, will wakeup from any interrupt */
312 	ish_mia_halt();
313 
314 	t1 = sedi_rtc_get_us();
315 	pm_ctx.aon_share->pm_state = ISH_PM_STATE_D0;
316 	log_pm_stat(&pm_stats.d0i0, t0, t1);
317 }
318 
enter_d0i1(void)319 static void enter_d0i1(void)
320 {
321 #ifndef CONFIG_SOC_INTEL_ISH_5_6_0
322 	uint64_t ioapic_state;
323 #endif
324 	uint64_t t0, t1;
325 
326 #ifndef CONFIG_SOC_INTEL_ISH_5_6_0
327 	ioapic_state = sedi_core_get_irq_map();
328 	pm_disable_irqs(ioapic_state);
329 #endif
330 	sedi_core_irq_enable(SEDI_IRQ_PMU2IOAPIC);
331 	sedi_core_irq_enable(SEDI_IRQ_RESET_PREP);
332 
333 	t0 = sedi_rtc_get_us();
334 	pm_ctx.aon_share->pm_state = ISH_PM_STATE_D0I1;
335 
336 #ifndef CONFIG_SOC_INTEL_ISH_5_6_0
337 	/* enable Trunk Clock Gating (TCG) of ISH */
338 	write32(CCU_TCG_EN, 1);
339 #else
340 	write32(CCU_BCG_MIA, read32(CCU_BCG_MIA) | CCU_BCG_BIT_MIA);
341 #endif
342 
343 	/* halt ISH cpu, will wakeup from PMU wakeup interrupt */
344 	ish_mia_halt();
345 
346 #ifndef CONFIG_SOC_INTEL_ISH_5_6_0
347 	/* disable Trunk Clock Gating (TCG) of ISH */
348 	write32(CCU_TCG_EN, 0);
349 #else
350 	write32(CCU_BCG_MIA, read32(CCU_BCG_MIA) & (~CCU_BCG_BIT_MIA));
351 #endif
352 
353 	pm_ctx.aon_share->pm_state = ISH_PM_STATE_D0;
354 	t1 = sedi_rtc_get_us();
355 	log_pm_stat(&pm_stats.d0i1, t0, t1);
356 
357 #ifndef CONFIG_SOC_INTEL_ISH_5_6_0
358 	/* restore interrupts */
359 	pm_enable_irqs(ioapic_state);
360 #endif
361 	sedi_core_irq_disable(SEDI_IRQ_PMU2IOAPIC);
362 }
363 
enter_d0i2(void)364 static void enter_d0i2(void)
365 {
366 	uint64_t ioapic_state;
367 	uint64_t t0, t1;
368 
369 	ioapic_state = sedi_core_get_irq_map();
370 	pm_disable_irqs(ioapic_state);
371 	sedi_core_irq_enable(SEDI_IRQ_PMU2IOAPIC);
372 	sedi_core_irq_enable(SEDI_IRQ_RESET_PREP);
373 
374 	t0 = sedi_rtc_get_us();
375 	pm_ctx.aon_share->pm_state = ISH_PM_STATE_D0I2;
376 
377 	/* enable Trunk Clock Gating (TCG) of ISH */
378 	write32(CCU_TCG_EN, 1);
379 
380 	/* enable power gating of RF(Cache) and ROMs */
381 	write32(PMU_RF_ROM_PWR_CTRL, 1);
382 
383 	switch_to_aontask();
384 
385 	/* returned from aontask */
386 
387 	if (pm_ctx.aon_share->pg_exit)
388 		pg_exit_restore_hw();
389 
390 	/* disable power gating of RF(Cache) and ROMs */
391 	write32(PMU_RF_ROM_PWR_CTRL, 0);
392 
393 	/* disable Trunk Clock Gating (TCG) of ISH */
394 	write32(CCU_TCG_EN, 0);
395 
396 	t1 = sedi_rtc_get_us();
397 	pm_ctx.aon_share->pm_state = ISH_PM_STATE_D0;
398 	log_pm_stat(&pm_stats.d0i2, t0, t1);
399 
400 	if (pm_ctx.aon_share->pg_exit)
401 		log_pm_stat(&pm_stats.pg, t0, t1);
402 
403 	/* restore interrupts */
404 	pm_enable_irqs(ioapic_state);
405 	sedi_core_irq_disable(SEDI_IRQ_PMU2IOAPIC);
406 }
407 
enter_d0i3(void)408 static void enter_d0i3(void)
409 {
410 	uint64_t ioapic_state;
411 	uint64_t t0, t1;
412 
413 	ioapic_state = sedi_core_get_irq_map();
414 	pm_disable_irqs(ioapic_state);
415 	sedi_core_irq_enable(SEDI_IRQ_PMU2IOAPIC);
416 	sedi_core_irq_enable(SEDI_IRQ_RESET_PREP);
417 
418 	t0 = sedi_rtc_get_us();
419 	pm_ctx.aon_share->pm_state = ISH_PM_STATE_D0I3;
420 
421 	/* enable Trunk Clock Gating (TCG) of ISH */
422 	write32(CCU_TCG_EN, 1);
423 
424 	/* enable power gating of RF(Cache) and ROMs */
425 	write32(PMU_RF_ROM_PWR_CTRL, 1);
426 
427 	switch_to_aontask();
428 
429 	/* returned from aontask */
430 
431 	if (pm_ctx.aon_share->pg_exit)
432 		pg_exit_restore_hw();
433 
434 	/* disable power gating of RF(Cache) and ROMs */
435 	write32(PMU_RF_ROM_PWR_CTRL, 0);
436 
437 	/* disable Trunk Clock Gating (TCG) of ISH */
438 	write32(CCU_TCG_EN, 0);
439 
440 	t1 = sedi_rtc_get_us();
441 	pm_ctx.aon_share->pm_state = ISH_PM_STATE_D0;
442 	log_pm_stat(&pm_stats.d0i3, t0, t1);
443 
444 	if (pm_ctx.aon_share->pg_exit)
445 		log_pm_stat(&pm_stats.pg, t0, t1);
446 
447 	/* restore interrupts */
448 	pm_enable_irqs(ioapic_state);
449 	sedi_core_irq_disable(SEDI_IRQ_PMU2IOAPIC);
450 }
451 
pre_setting_d0ix(void)452 static void pre_setting_d0ix(void)
453 {
454 	write32(PMU_VNN_REQ, read32(PMU_VNN_REQ));
455 	uart_to_idle();
456 }
457 
post_setting_d0ix(void)458 static void post_setting_d0ix(void)
459 {
460 	uart_port_restore();
461 }
462 
sedi_pm_enter_power_state(int state)463 void sedi_pm_enter_power_state(int state)
464 {
465 	switch (state) {
466 	case ISH_PM_STATE_D0I1:
467 #ifndef CONFIG_SOC_INTEL_ISH_5_6_0
468 		pre_setting_d0ix();
469 #endif
470 		enter_d0i1();
471 #ifndef CONFIG_SOC_INTEL_ISH_5_6_0
472 		post_setting_d0ix();
473 #endif
474 		break;
475 	case ISH_PM_STATE_D0I2:
476 		pre_setting_d0ix();
477 		enter_d0i2();
478 		post_setting_d0ix();
479 		check_aon_task_status();
480 		break;
481 	case ISH_PM_STATE_D0I3:
482 		pre_setting_d0ix();
483 		enter_d0i3();
484 		post_setting_d0ix();
485 		check_aon_task_status();
486 		break;
487 	default:
488 		enter_d0i0();
489 		break;
490 	}
491 }
492 
reset_bcg(void)493 static void reset_bcg(void)
494 {
495 	write32(CCU_BCG_MIA, 0);
496 	write32(CCU_BCG_DMA, 0);
497 	write32(CCU_BCG_I2C, 0);
498 	write32(CCU_BCG_SPI, 0);
499 	write32(CCU_BCG_UART, 0);
500 	write32(CCU_BCG_GPIO, 0);
501 }
502 
handle_d3(uint32_t irq_vec)503 static void handle_d3(uint32_t irq_vec)
504 {
505 	write32(PMU_D3_STATUS, read32(PMU_D3_STATUS));
506 
507 	if (read32(PMU_D3_STATUS) & (PMU_D3_BIT_RISING_EDGE_STATUS | PMU_D3_BIT_SET)) {
508 		/*
509 		 * Indicate completion of servicing the interrupt to IOAPIC
510 		 * first then indicate completion of servicing the interrupt
511 		 * to LAPIC
512 		 */
513 		write32(SEDI_IOAPIC_EOI, irq_vec);
514 		write32(LAPIC_EOI, 0x0);
515 
516 		if (!(pm_ctx.aon_share->host_in_suspend))
517 			ish_pm_reset(ISH_PM_STATE_D3);
518 	}
519 }
520 
pcie_dev_isr(void)521 static void pcie_dev_isr(void)
522 {
523 	handle_d3(SEDI_VEC_PCIEDEV);
524 }
525 /**
526  * main FW only need handle PMU wakeup interrupt for D0i1 state, aontask will
527  * handle PMU wakeup interrupt for other low power states
528  */
pmu_wakeup_isr(void)529 static void pmu_wakeup_isr(void)
530 {
531 	/* at current nothing need to do */
532 }
533 
reset_prep_isr(void)534 static void reset_prep_isr(void)
535 {
536 	/* mask reset prep avail interrupt */
537 	write32(PMU_RST_PREP, PMU_RST_PREP_INT_MASK);
538 
539 	/*
540 	 * Indicate completion of servicing the interrupt to IOAPIC first
541 	 * then indicate completion of servicing the interrupt to LAPIC
542 	 */
543 	write32(SEDI_IOAPIC_EOI, SEDI_VEC_RESET_PREP);
544 	write32(LAPIC_EOI, 0x0);
545 
546 	ish_mia_reset();
547 	__builtin_unreachable();
548 }
549 
sedi_pm_init(void)550 void sedi_pm_init(void)
551 {
552 	/* clear reset bit */
553 	write32(ISH_RST_REG, 0);
554 
555 	/* clear reset history register in CCU */
556 	write32(CCU_RST_HST, read32(CCU_RST_HST));
557 
558 	/* disable TCG and disable BCG */
559 	write32(CCU_TCG_EN, 0);
560 	reset_bcg();
561 
562 	init_aon_task();
563 
564 	write32(PMU_GPIO_WAKE_MASK0, 0);
565 	write32(PMU_GPIO_WAKE_MASK1, 0);
566 
567 	/* unmask all wake up events */
568 	write32(PMU_MASK_EVENT, ~PMU_MASK_EVENT_BIT_ALL);
569 
570 	write32(PMU_ISH_FABRIC_CNT, (read32(PMU_ISH_FABRIC_CNT) & 0xffff0000) | FABRIC_IDLE_COUNT);
571 	write32(PMU_PGCB_CLKGATE_CTRL, TRUNK_CLKGATE_COUNT);
572 
573 	IRQ_CONNECT(SEDI_IRQ_RESET_PREP, 5, reset_prep_isr, 0, IOAPIC_LEVEL);
574 	IRQ_CONNECT(SEDI_IRQ_PMU2IOAPIC, 5, pmu_wakeup_isr, 0, IOAPIC_LEVEL);
575 	IRQ_CONNECT(SEDI_IRQ_PCIEDEV, 5, pcie_dev_isr, 0, IOAPIC_LEVEL);
576 
577 	/* unmask reset prep avail interrupt */
578 	write32(PMU_RST_PREP, 0);
579 	sedi_core_irq_enable(SEDI_IRQ_RESET_PREP);
580 
581 	/* unmask D3 and BME interrupts */
582 	write32(PMU_D3_STATUS, read32(PMU_D3_STATUS) & (PMU_D3_BIT_SET | PMU_BME_BIT_SET));
583 
584 	if ((!(read32(PMU_D3_STATUS) & PMU_D3_BIT_SET)) &&
585 	    (read32(PMU_D3_STATUS) & PMU_BME_BIT_SET))
586 		write32(PMU_D3_STATUS, read32(PMU_D3_STATUS));
587 
588 	sedi_core_irq_enable(SEDI_IRQ_PCIEDEV);
589 }
590 
ish_pm_reset(enum ish_pm_state pm_state)591 void ish_pm_reset(enum ish_pm_state pm_state)
592 {
593 	if (pm_ctx.aon_valid) {
594 		handle_reset_in_aontask(pm_state);
595 	} else {
596 		ish_mia_reset();
597 	}
598 
599 	__builtin_unreachable();
600 }
601 
sedi_pm_reset(void)602 void sedi_pm_reset(void)
603 {
604 	ish_mia_reset();
605 }
606 
sedi_pm_host_suspend(uint32_t suspend)607 void sedi_pm_host_suspend(uint32_t suspend)
608 {
609 	pm_ctx.aon_share->host_in_suspend = suspend;
610 }
611 
612 /*
613  * helper for print idle_stats
614  */
print_stats(const char * name,const struct pm_stat * stat)615 static void print_stats(const char *name, const struct pm_stat *stat)
616 {
617 	if (stat->count)
618 		PM_LOG("    %s:\n"
619 		       "        counts: %llu\n"
620 		       "        time:   %.6llu ms\n",
621 		       name, stat->count, (stat->total_time_us)/1000);
622 }
623 
624 /**
625  * Print low power idle statistics
626  */
command_idle_stats(void)627 void command_idle_stats(void)
628 {
629 #ifdef PM_DEBUG_PRINTS
630 	struct ish_aon_share *aon_share = pm_ctx.aon_share;
631 #endif
632 	uint64_t tall;
633 
634 	tall = sedi_rtc_get_us();
635 
636 	PM_LOG("Aontask exists: %s\n", pm_ctx.aon_valid ? "Yes" : "No");
637 	PM_LOG("Total time on: %.6llu ms\n", tall/1000);
638 	PM_LOG("Idle sleep:\n");
639 	print_stats("D0i0", &pm_stats.d0i0);
640 
641 	PM_LOG("Deep sleep:\n");
642 	print_stats("D0i1", &pm_stats.d0i1);
643 	print_stats("D0i2", &pm_stats.d0i2);
644 	print_stats("D0i3", &pm_stats.d0i3);
645 	print_stats("IPAPG", &pm_stats.pg);
646 
647 	if (pm_ctx.aon_valid) {
648 		PM_LOG("    Aontask status:\n");
649 		PM_LOG("        last error:   %u\n", aon_share->last_error);
650 		PM_LOG("        error counts: %u\n", aon_share->error_count);
651 	}
652 }
653