1 /*
2 * Copyright (c) 2023 Intel Corporation
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include "aon/aon_share.h"
8 #include "ish_dma.h"
9 #include "ish_pm.h"
10 #include "sedi_driver_core.h"
11 #include "sedi_driver_uart.h"
12 #include <sedi_driver_rtc.h>
13 #include <zephyr/sys/printk.h>
14 #include <zephyr/irq.h>
15 #include <zephyr/drivers/interrupt_controller/ioapic.h>
16 #include <zephyr/arch/x86/ia32/segmentation.h>
17
18 /* defined in link script: soc/x86/intel_ish/scripts/ish_linker.ld */
19 extern uint32_t __text_region_start;
20 extern uint32_t __rodata_region_end;
21 extern uint32_t _image_ram_start;
22 extern uint32_t _image_ram_end;
23
24 /* Disable debug print by default */
25 #define PM_DEBUG_PRINTS 1
26
27 #ifdef PM_DEBUG_PRINTS
28 #define PM_LOG(...) printk(__VA_ARGS__)
29 #else
30 #define PM_LOG(...)
31 #endif
32
33 #define DLL 0x0
34 #define DLH 0x4
35 #define LCR 0xC
36 #define DLF 0xC0
37
38 static sedi_uart_config_t uart0_cfg, uart1_cfg, uart2_cfg;
39
uart_to_idle(void)40 static void uart_to_idle(void)
41 {
42 sedi_uart_get_config(SEDI_UART_0, &uart0_cfg);
43 write32(SEDI_IREG_BASE(UART, 0) + LCR, 0x80);
44 write32(SEDI_IREG_BASE(UART, 0) + DLL, 0x1);
45 write32(SEDI_IREG_BASE(UART, 0) + DLH, 0x0);
46 write32(SEDI_IREG_BASE(UART, 0) + LCR, 0);
47
48 sedi_uart_get_config(SEDI_UART_0, &uart1_cfg);
49 write32(SEDI_IREG_BASE(UART, 1) + LCR, 0x80);
50 write32(SEDI_IREG_BASE(UART, 1) + DLL, 0x1);
51 write32(SEDI_IREG_BASE(UART, 1) + DLH, 0x0);
52 write32(SEDI_IREG_BASE(UART, 1) + LCR, 0);
53
54 sedi_uart_get_config(SEDI_UART_0, &uart2_cfg);
55 write32(SEDI_IREG_BASE(UART, 2) + LCR, 0x80);
56 write32(SEDI_IREG_BASE(UART, 2) + DLL, 0x1);
57 write32(SEDI_IREG_BASE(UART, 2) + DLH, 0x0);
58 write32(SEDI_IREG_BASE(UART, 2) + LCR, 0);
59 }
60
uart_port_restore(void)61 static void uart_port_restore(void)
62 {
63 sedi_uart_set_config(SEDI_UART_0, &uart0_cfg);
64 sedi_uart_set_config(SEDI_UART_1, &uart1_cfg);
65 sedi_uart_set_config(SEDI_UART_2, &uart2_cfg);
66 }
67
pg_exit_restore_hw(void)68 static void pg_exit_restore_hw(void)
69 {
70 write32(LAPIC_SPUR, LAPIC_ENABLE | LAPIC_SPUR_RESET);
71
72 write32(CCU_RST_HST, read32(CCU_RST_HST));
73 write32(CCU_TCG_ENABLE, 0);
74 write32(CCU_BCG_ENABLE, 0);
75
76 write32(CCU_BCG_MIA, 0);
77 write32(CCU_BCG_DMA, 0);
78 write32(CCU_BCG_I2C, 0);
79 write32(CCU_BCG_SPI, 0);
80 write32(CCU_BCG_UART, 0);
81 write32(CCU_BCG_GPIO, 0);
82 }
83
84 /* power management internal context data structure */
85 struct pm_context {
86 /* aontask image valid flag */
87 int aon_valid;
88 /* point to the aon shared data in aontask */
89 struct ish_aon_share *aon_share;
90 /* TSS segment selector for task switching */
91 int aon_tss_selector[2];
92 } __attribute__((packed));
93
94 static struct pm_context pm_ctx = {
95 .aon_valid = 0,
96 /* aon shared data located in the start of aon memory */
97 .aon_share = (struct ish_aon_share *)CONFIG_AON_RAM_BASE,
98 };
99
100 /* D0ix statistics data, including each state's count and total stay time */
101 struct pm_stat {
102 uint64_t count;
103 uint64_t total_time_us;
104 };
105
106 struct pm_statistics {
107 struct pm_stat d0i0;
108 struct pm_stat d0i1;
109 struct pm_stat d0i2;
110 struct pm_stat d0i3;
111 struct pm_stat pg;
112 };
113
114 static struct pm_statistics pm_stats;
115
116 /*
117 * Log a new statistic
118 *
119 * t0: start time, in us
120 * t1: end time, in us
121 */
log_pm_stat(struct pm_stat * stat,uint64_t t0,uint64_t t1)122 static void log_pm_stat(struct pm_stat *stat, uint64_t t0, uint64_t t1)
123 {
124 stat->total_time_us += t1 - t0;
125 stat->count++;
126 }
127
128 extern struct pseudo_descriptor _gdt;
129
130 /* TSS descriptor for saving main FW's cpu context during aontask switching */
131 static struct tss_entry main_tss;
132
133 /**
134 * add new entry in GDT
135 *
136 * @param desc_lo lower DWORD of the entry descriptor
137 * @param desc_up upper DWORD of the entry descriptor
138 *
139 * @return the descriptor selector index of the added entry
140 */
add_gdt_entry(uint32_t desc_lo,uint32_t desc_up)141 static uint32_t add_gdt_entry(uint32_t desc_lo, uint32_t desc_up)
142 {
143 int index;
144 struct gdt_header *gdt_ptr = (struct gdt_header *)&_gdt;
145 struct gdt_entry *__gdt = (struct gdt_entry *)(gdt_ptr->entries);
146
147 /**
148 * get the first empty entry of GDT which defined in crt0.S
149 * each entry has a fixed size of 8 bytes
150 */
151 index = (gdt_ptr->limit + 1) >> 3;
152
153 /* add the new entry descriptor to the GDT */
154 __gdt[index].dword_lo = desc_lo;
155 __gdt[index].dword_up = desc_up;
156
157 /* update GDT's limit size */
158 gdt_ptr->limit += sizeof(struct gdt_entry);
159
160 return ((index + 1) << 3) - sizeof(struct gdt_entry);
161 }
162
init_aon_task(void)163 static void init_aon_task(void)
164 {
165 uint32_t desc_lo, desc_up;
166 struct ish_aon_share *aon_share = pm_ctx.aon_share;
167 struct tss_entry *aon_tss = aon_share->aon_tss;
168
169 if (aon_share->magic_id != AON_MAGIC_ID) {
170 pm_ctx.aon_valid = 0;
171 return;
172 }
173
174 pm_ctx.aon_valid = 1;
175
176 pm_ctx.aon_tss_selector[0] = 0;
177
178 main_tss.iomap_base_addr = GDT_DESC_TSS_LIMIT;
179
180 desc_lo = GEN_GDT_DESC_LO((uint32_t)&main_tss,
181 GDT_DESC_TSS_LIMIT, GDT_DESC_TSS_FLAGS);
182 desc_up = GEN_GDT_DESC_UP((uint32_t)&main_tss,
183 GDT_DESC_TSS_LIMIT, GDT_DESC_TSS_FLAGS);
184 add_gdt_entry(desc_lo, desc_up);
185
186 desc_lo = GEN_GDT_DESC_LO((uint32_t)aon_tss,
187 GDT_DESC_TSS_LIMIT, GDT_DESC_TSS_FLAGS);
188 desc_up = GEN_GDT_DESC_UP((uint32_t)aon_tss,
189 GDT_DESC_TSS_LIMIT, GDT_DESC_TSS_FLAGS);
190 pm_ctx.aon_tss_selector[1] = add_gdt_entry(desc_lo, desc_up);
191
192 desc_lo = GEN_GDT_DESC_LO((uint32_t)aon_share->aon_ldt,
193 aon_share->aon_ldt_size, GDT_DESC_LDT_FLAGS);
194 desc_up = GEN_GDT_DESC_UP((uint32_t)aon_share->aon_ldt,
195 aon_share->aon_ldt_size, GDT_DESC_LDT_FLAGS);
196 aon_tss->ldt_seg_selector = add_gdt_entry(desc_lo, desc_up);
197
198 __asm__ volatile("lgdt _gdt;\n"
199 "push %eax;\n"
200 "movw $0x18, %ax;\n"
201 "ltr %ax;\n"
202 "pop %eax;");
203
204 aon_share->main_fw_ro_addr = (uint32_t)&__text_region_start;
205 aon_share->main_fw_ro_size = (uint32_t)&__rodata_region_end -
206 (uint32_t)&__text_region_start;
207
208 aon_share->main_fw_rw_addr = (uint32_t)&_image_ram_start;
209 aon_share->main_fw_rw_size = (uint32_t)&_image_ram_end -
210 (uint32_t)&_image_ram_start;
211
212 aon_share->uma_msb = read32(IPC_UMA_RANGE_LOWER_1);
213
214 ish_dma_init();
215 }
216
check_aon_task_status(void)217 static inline void check_aon_task_status(void)
218 {
219 struct ish_aon_share *aon_share = pm_ctx.aon_share;
220
221 if (aon_share->last_error != AON_SUCCESS) {
222 PM_LOG("aontask has errors:\n");
223 PM_LOG(" last error: %d\n", aon_share->last_error);
224 PM_LOG(" error counts: %d\n", aon_share->error_count);
225 }
226 }
227
pm_disable_irqs(uint64_t current_ioapic_state)228 static void pm_disable_irqs(uint64_t current_ioapic_state)
229 {
230 int i;
231
232 for (i = 0; i < IOAPIC_NUM_RTES; i++) {
233 if ((((uint64_t)1) << i & current_ioapic_state) && (i != SEDI_IRQ_PMU2IOAPIC)
234 && (i != SEDI_IRQ_RESET_PREP))
235 sedi_core_irq_disable(i);
236 }
237 }
238
pm_enable_irqs(uint64_t current_ioapic_state)239 static void pm_enable_irqs(uint64_t current_ioapic_state)
240 {
241 int i;
242
243 for (i = 0; i < IOAPIC_NUM_RTES; i++) {
244 if ((((uint64_t)1) << i & current_ioapic_state) && (i != SEDI_IRQ_PMU2IOAPIC)
245 && (i != SEDI_IRQ_RESET_PREP))
246 sedi_core_irq_enable(i);
247 }
248 }
249
switch_to_aontask(void)250 static void switch_to_aontask(void)
251 {
252 unsigned int eflags = sedi_core_irq_lock();
253
254 __sync_synchronize();
255
256 /* disable cache and flush cache */
257 __asm__ volatile("movl %%cr0, %%eax;\n"
258 "orl $0x60000000, %%eax;\n"
259 "movl %%eax, %%cr0;\n"
260 "wbinvd;"
261 :
262 :
263 : "eax");
264
265 /* switch to aontask through a far call with aontask's TSS selector */
266 __asm__ volatile("lcall *%0;" ::"m"(*pm_ctx.aon_tss_selector) :);
267
268 /* clear TS (Task Switched) flag and enable cache */
269 __asm__ volatile("clts;\n"
270 "movl %%cr0, %%eax;\n"
271 "andl $0x9FFFFFFF, %%eax;\n"
272 "movl %%eax, %%cr0;"
273 :
274 :
275 : "eax");
276
277 sedi_core_irq_unlock(eflags);
278 }
279
handle_reset_in_aontask(enum ish_pm_state pm_state)280 static void handle_reset_in_aontask(enum ish_pm_state pm_state)
281 {
282 uint64_t ioapic_state;
283
284 pm_ctx.aon_share->pm_state = pm_state;
285
286 ioapic_state = sedi_core_get_irq_map();
287 pm_disable_irqs(ioapic_state);
288 sedi_core_irq_enable(SEDI_IRQ_PMU2IOAPIC);
289 sedi_core_irq_enable(SEDI_IRQ_RESET_PREP);
290
291 /* enable Trunk Clock Gating (TCG) of ISH */
292 write32(CCU_TCG_EN, 1);
293
294 /* enable power gating of RF(Cache) and ROMs */
295 write32(PMU_RF_ROM_PWR_CTRL, 1);
296
297 switch_to_aontask();
298
299 __builtin_unreachable();
300 }
301
enter_d0i0(void)302 static void enter_d0i0(void)
303 {
304 uint64_t t0, t1;
305
306 t0 = sedi_rtc_get_us();
307 pm_ctx.aon_share->pm_state = ISH_PM_STATE_D0I0;
308
309 /* halt ISH cpu, will wakeup from any interrupt */
310 ish_mia_halt();
311
312 t1 = sedi_rtc_get_us();
313 pm_ctx.aon_share->pm_state = ISH_PM_STATE_D0;
314 log_pm_stat(&pm_stats.d0i0, t0, t1);
315 }
316
enter_d0i1(void)317 static void enter_d0i1(void)
318 {
319 #ifndef CONFIG_SOC_INTEL_ISH_5_6_0
320 uint64_t ioapic_state;
321 #endif
322 uint64_t t0, t1;
323
324 #ifndef CONFIG_SOC_INTEL_ISH_5_6_0
325 ioapic_state = sedi_core_get_irq_map();
326 pm_disable_irqs(ioapic_state);
327 #endif
328 sedi_core_irq_enable(SEDI_IRQ_PMU2IOAPIC);
329 sedi_core_irq_enable(SEDI_IRQ_RESET_PREP);
330
331 t0 = sedi_rtc_get_us();
332 pm_ctx.aon_share->pm_state = ISH_PM_STATE_D0I1;
333
334 #ifndef CONFIG_SOC_INTEL_ISH_5_6_0
335 /* enable Trunk Clock Gating (TCG) of ISH */
336 write32(CCU_TCG_EN, 1);
337 #else
338 write32(CCU_BCG_MIA, read32(CCU_BCG_MIA) | CCU_BCG_BIT_MIA);
339 #endif
340
341 /* halt ISH cpu, will wakeup from PMU wakeup interrupt */
342 ish_mia_halt();
343
344 #ifndef CONFIG_SOC_INTEL_ISH_5_6_0
345 /* disable Trunk Clock Gating (TCG) of ISH */
346 write32(CCU_TCG_EN, 0);
347 #else
348 write32(CCU_BCG_MIA, read32(CCU_BCG_MIA) & (~CCU_BCG_BIT_MIA));
349 #endif
350
351 pm_ctx.aon_share->pm_state = ISH_PM_STATE_D0;
352 t1 = sedi_rtc_get_us();
353 log_pm_stat(&pm_stats.d0i1, t0, t1);
354
355 #ifndef CONFIG_SOC_INTEL_ISH_5_6_0
356 /* restore interrupts */
357 pm_enable_irqs(ioapic_state);
358 #endif
359 sedi_core_irq_disable(SEDI_IRQ_PMU2IOAPIC);
360 }
361
enter_d0i2(void)362 static void enter_d0i2(void)
363 {
364 uint64_t ioapic_state;
365 uint64_t t0, t1;
366
367 ioapic_state = sedi_core_get_irq_map();
368 pm_disable_irqs(ioapic_state);
369 sedi_core_irq_enable(SEDI_IRQ_PMU2IOAPIC);
370 sedi_core_irq_enable(SEDI_IRQ_RESET_PREP);
371
372 t0 = sedi_rtc_get_us();
373 pm_ctx.aon_share->pm_state = ISH_PM_STATE_D0I2;
374
375 /* enable Trunk Clock Gating (TCG) of ISH */
376 write32(CCU_TCG_EN, 1);
377
378 /* enable power gating of RF(Cache) and ROMs */
379 write32(PMU_RF_ROM_PWR_CTRL, 1);
380
381 switch_to_aontask();
382
383 /* returned from aontask */
384
385 if (pm_ctx.aon_share->pg_exit)
386 pg_exit_restore_hw();
387
388 /* disable power gating of RF(Cache) and ROMs */
389 write32(PMU_RF_ROM_PWR_CTRL, 0);
390
391 /* disable Trunk Clock Gating (TCG) of ISH */
392 write32(CCU_TCG_EN, 0);
393
394 t1 = sedi_rtc_get_us();
395 pm_ctx.aon_share->pm_state = ISH_PM_STATE_D0;
396 log_pm_stat(&pm_stats.d0i2, t0, t1);
397
398 if (pm_ctx.aon_share->pg_exit)
399 log_pm_stat(&pm_stats.pg, t0, t1);
400
401 /* restore interrupts */
402 pm_enable_irqs(ioapic_state);
403 sedi_core_irq_disable(SEDI_IRQ_PMU2IOAPIC);
404 }
405
enter_d0i3(void)406 static void enter_d0i3(void)
407 {
408 uint64_t ioapic_state;
409 uint64_t t0, t1;
410
411 ioapic_state = sedi_core_get_irq_map();
412 pm_disable_irqs(ioapic_state);
413 sedi_core_irq_enable(SEDI_IRQ_PMU2IOAPIC);
414 sedi_core_irq_enable(SEDI_IRQ_RESET_PREP);
415
416 t0 = sedi_rtc_get_us();
417 pm_ctx.aon_share->pm_state = ISH_PM_STATE_D0I3;
418
419 /* enable Trunk Clock Gating (TCG) of ISH */
420 write32(CCU_TCG_EN, 1);
421
422 /* enable power gating of RF(Cache) and ROMs */
423 write32(PMU_RF_ROM_PWR_CTRL, 1);
424
425 switch_to_aontask();
426
427 /* returned from aontask */
428
429 if (pm_ctx.aon_share->pg_exit)
430 pg_exit_restore_hw();
431
432 /* disable power gating of RF(Cache) and ROMs */
433 write32(PMU_RF_ROM_PWR_CTRL, 0);
434
435 /* disable Trunk Clock Gating (TCG) of ISH */
436 write32(CCU_TCG_EN, 0);
437
438 t1 = sedi_rtc_get_us();
439 pm_ctx.aon_share->pm_state = ISH_PM_STATE_D0;
440 log_pm_stat(&pm_stats.d0i3, t0, t1);
441
442 if (pm_ctx.aon_share->pg_exit)
443 log_pm_stat(&pm_stats.pg, t0, t1);
444
445 /* restore interrupts */
446 pm_enable_irqs(ioapic_state);
447 sedi_core_irq_disable(SEDI_IRQ_PMU2IOAPIC);
448 }
449
pre_setting_d0ix(void)450 static void pre_setting_d0ix(void)
451 {
452 write32(PMU_VNN_REQ, read32(PMU_VNN_REQ));
453 uart_to_idle();
454 }
455
post_setting_d0ix(void)456 static void post_setting_d0ix(void)
457 {
458 uart_port_restore();
459 }
460
sedi_pm_enter_power_state(int state)461 void sedi_pm_enter_power_state(int state)
462 {
463 switch (state) {
464 case ISH_PM_STATE_D0I1:
465 #ifndef CONFIG_SOC_INTEL_ISH_5_6_0
466 pre_setting_d0ix();
467 #endif
468 enter_d0i1();
469 #ifndef CONFIG_SOC_INTEL_ISH_5_6_0
470 post_setting_d0ix();
471 #endif
472 break;
473 case ISH_PM_STATE_D0I2:
474 pre_setting_d0ix();
475 enter_d0i2();
476 post_setting_d0ix();
477 check_aon_task_status();
478 break;
479 case ISH_PM_STATE_D0I3:
480 pre_setting_d0ix();
481 enter_d0i3();
482 post_setting_d0ix();
483 check_aon_task_status();
484 break;
485 default:
486 enter_d0i0();
487 break;
488 }
489 }
490
reset_bcg(void)491 static void reset_bcg(void)
492 {
493 write32(CCU_BCG_MIA, 0);
494 write32(CCU_BCG_DMA, 0);
495 write32(CCU_BCG_I2C, 0);
496 write32(CCU_BCG_SPI, 0);
497 write32(CCU_BCG_UART, 0);
498 write32(CCU_BCG_GPIO, 0);
499 }
500
handle_d3(uint32_t irq_vec)501 static void handle_d3(uint32_t irq_vec)
502 {
503 write32(PMU_D3_STATUS, read32(PMU_D3_STATUS));
504
505 if (read32(PMU_D3_STATUS) & (PMU_D3_BIT_RISING_EDGE_STATUS | PMU_D3_BIT_SET)) {
506 /*
507 * Indicate completion of servicing the interrupt to IOAPIC
508 * first then indicate completion of servicing the interrupt
509 * to LAPIC
510 */
511 write32(SEDI_IOAPIC_EOI, irq_vec);
512 write32(LAPIC_EOI, 0x0);
513
514 if (!(pm_ctx.aon_share->host_in_suspend))
515 ish_pm_reset(ISH_PM_STATE_D3);
516 }
517 }
518
pcie_dev_isr(void)519 static void pcie_dev_isr(void)
520 {
521 handle_d3(SEDI_VEC_PCIEDEV);
522 }
523 /**
524 * main FW only need handle PMU wakeup interrupt for D0i1 state, aontask will
525 * handle PMU wakeup interrupt for other low power states
526 */
pmu_wakeup_isr(void)527 static void pmu_wakeup_isr(void)
528 {
529 /* at current nothing need to do */
530 }
531
reset_prep_isr(void)532 static void reset_prep_isr(void)
533 {
534 /* mask reset prep avail interrupt */
535 write32(PMU_RST_PREP, PMU_RST_PREP_INT_MASK);
536
537 /*
538 * Indicate completion of servicing the interrupt to IOAPIC first
539 * then indicate completion of servicing the interrupt to LAPIC
540 */
541 write32(SEDI_IOAPIC_EOI, SEDI_VEC_RESET_PREP);
542 write32(LAPIC_EOI, 0x0);
543
544 ish_mia_reset();
545 __builtin_unreachable();
546 }
547
sedi_pm_init(void)548 void sedi_pm_init(void)
549 {
550 /* clear reset bit */
551 write32(ISH_RST_REG, 0);
552
553 /* clear reset history register in CCU */
554 write32(CCU_RST_HST, read32(CCU_RST_HST));
555
556 /* disable TCG and disable BCG */
557 write32(CCU_TCG_EN, 0);
558 reset_bcg();
559
560 init_aon_task();
561
562 write32(PMU_GPIO_WAKE_MASK0, 0);
563 write32(PMU_GPIO_WAKE_MASK1, 0);
564
565 /* unmask all wake up events */
566 write32(PMU_MASK_EVENT, ~PMU_MASK_EVENT_BIT_ALL);
567
568 write32(PMU_ISH_FABRIC_CNT, (read32(PMU_ISH_FABRIC_CNT) & 0xffff0000) | FABRIC_IDLE_COUNT);
569 write32(PMU_PGCB_CLKGATE_CTRL, TRUNK_CLKGATE_COUNT);
570
571 IRQ_CONNECT(SEDI_IRQ_RESET_PREP, 5, reset_prep_isr, 0, IOAPIC_LEVEL);
572 IRQ_CONNECT(SEDI_IRQ_PMU2IOAPIC, 5, pmu_wakeup_isr, 0, IOAPIC_LEVEL);
573 IRQ_CONNECT(SEDI_IRQ_PCIEDEV, 5, pcie_dev_isr, 0, IOAPIC_LEVEL);
574
575 /* unmask reset prep avail interrupt */
576 write32(PMU_RST_PREP, 0);
577 sedi_core_irq_enable(SEDI_IRQ_RESET_PREP);
578
579 /* unmask D3 and BME interrupts */
580 write32(PMU_D3_STATUS, read32(PMU_D3_STATUS) & (PMU_D3_BIT_SET | PMU_BME_BIT_SET));
581
582 if ((!(read32(PMU_D3_STATUS) & PMU_D3_BIT_SET)) &&
583 (read32(PMU_D3_STATUS) & PMU_BME_BIT_SET))
584 write32(PMU_D3_STATUS, read32(PMU_D3_STATUS));
585
586 sedi_core_irq_enable(SEDI_IRQ_PCIEDEV);
587 }
588
ish_pm_reset(enum ish_pm_state pm_state)589 void ish_pm_reset(enum ish_pm_state pm_state)
590 {
591 if (pm_ctx.aon_valid) {
592 handle_reset_in_aontask(pm_state);
593 } else {
594 ish_mia_reset();
595 }
596
597 __builtin_unreachable();
598 }
599
sedi_pm_reset(void)600 void sedi_pm_reset(void)
601 {
602 ish_mia_reset();
603 }
604
sedi_pm_host_suspend(uint32_t suspend)605 void sedi_pm_host_suspend(uint32_t suspend)
606 {
607 pm_ctx.aon_share->host_in_suspend = suspend;
608 }
609
610 /*
611 * helper for print idle_stats
612 */
print_stats(const char * name,const struct pm_stat * stat)613 static void print_stats(const char *name, const struct pm_stat *stat)
614 {
615 if (stat->count)
616 PM_LOG(" %s:\n"
617 " counts: %llu\n"
618 " time: %.6llu ms\n",
619 name, stat->count, (stat->total_time_us)/1000);
620 }
621
622 /**
623 * Print low power idle statistics
624 */
command_idle_stats(void)625 void command_idle_stats(void)
626 {
627 #ifdef PM_DEBUG_PRINTS
628 struct ish_aon_share *aon_share = pm_ctx.aon_share;
629 #endif
630 uint64_t tall;
631
632 tall = sedi_rtc_get_us();
633
634 PM_LOG("Aontask exists: %s\n", pm_ctx.aon_valid ? "Yes" : "No");
635 PM_LOG("Total time on: %.6llu ms\n", tall/1000);
636 PM_LOG("Idle sleep:\n");
637 print_stats("D0i0", &pm_stats.d0i0);
638
639 PM_LOG("Deep sleep:\n");
640 print_stats("D0i1", &pm_stats.d0i1);
641 print_stats("D0i2", &pm_stats.d0i2);
642 print_stats("D0i3", &pm_stats.d0i3);
643 print_stats("IPAPG", &pm_stats.pg);
644
645 if (pm_ctx.aon_valid) {
646 PM_LOG(" Aontask status:\n");
647 PM_LOG(" last error: %u\n", aon_share->last_error);
648 PM_LOG(" error counts: %u\n", aon_share->error_count);
649 }
650 }
651