1 /*
2  * SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /*
8  The cache has an interrupt that can be raised as soon as an access to a cached
9  region (flash, psram) is done without the cache being enabled. We use that here
10  to panic the CPU, which from a debugging perspective is better than grabbing bad
11  data from the bus.
12 */
13 
14 #include <stdint.h>
15 #include <stdio.h>
16 #include <stdlib.h>
17 #include <stdbool.h>
18 
19 #include "esp_err.h"
20 #include "esp_attr.h"
21 #include "esp_cpu.h"
22 
23 #include "esp_intr_alloc.h"
24 #include "soc/dport_reg.h"
25 
26 #include "esp_rom_sys.h"
27 
28 #include "sdkconfig.h"
29 
esp_cache_err_int_init(void)30 void esp_cache_err_int_init(void)
31 {
32     uint32_t core_id = esp_cpu_get_core_id();
33     ESP_INTR_DISABLE(ETS_MEMACCESS_ERR_INUM);
34 
35     // We do not register a handler for the interrupt because it is interrupt
36     // level 4 which is not serviceable from C. Instead, xtensa_vectors.S has
37     // a call to the panic handler for
38     // this interrupt.
39     esp_rom_route_intr_matrix(core_id, ETS_CACHE_IA_INTR_SOURCE, ETS_MEMACCESS_ERR_INUM);
40 
41     // Enable invalid cache access interrupt when the cache is disabled.
42     // When the interrupt happens, we can not determine the CPU where the
43     // invalid cache access has occurred. We enable the interrupt to catch
44     // invalid access on both CPUs, but the interrupt is connected to the
45     // CPU which happens to call this function.
46     // For this reason, panic handler backtrace will not be correct if the
47     // interrupt is connected to PRO CPU and invalid access happens on the APP
48     // CPU.
49 
50     if (core_id == PRO_CPU_NUM) {
51         DPORT_SET_PERI_REG_MASK(DPORT_CACHE_IA_INT_EN_REG,
52             DPORT_CACHE_IA_INT_PRO_OPPOSITE |
53             DPORT_CACHE_IA_INT_PRO_DRAM1 |
54             DPORT_CACHE_IA_INT_PRO_DROM0 |
55             DPORT_CACHE_IA_INT_PRO_IROM0 |
56             DPORT_CACHE_IA_INT_PRO_IRAM0 |
57             DPORT_CACHE_IA_INT_PRO_IRAM1);
58     } else {
59         DPORT_SET_PERI_REG_MASK(DPORT_CACHE_IA_INT_EN_REG,
60             DPORT_CACHE_IA_INT_APP_OPPOSITE |
61             DPORT_CACHE_IA_INT_APP_DRAM1 |
62             DPORT_CACHE_IA_INT_APP_DROM0 |
63             DPORT_CACHE_IA_INT_APP_IROM0 |
64             DPORT_CACHE_IA_INT_APP_IRAM0 |
65             DPORT_CACHE_IA_INT_APP_IRAM1);
66     }
67     ESP_INTR_ENABLE(ETS_MEMACCESS_ERR_INUM);
68 }
69 
esp_cache_err_get_cpuid(void)70 int esp_cache_err_get_cpuid(void)
71 {
72     const uint32_t pro_mask =
73             DPORT_PRO_CPU_DISABLED_CACHE_IA_DRAM1 |
74             DPORT_PRO_CPU_DISABLED_CACHE_IA_DROM0 |
75             DPORT_PRO_CPU_DISABLED_CACHE_IA_IROM0 |
76             DPORT_PRO_CPU_DISABLED_CACHE_IA_IRAM0 |
77             DPORT_PRO_CPU_DISABLED_CACHE_IA_IRAM1 |
78             DPORT_APP_CPU_DISABLED_CACHE_IA_OPPOSITE;
79 
80     if (DPORT_GET_PERI_REG_MASK(DPORT_PRO_DCACHE_DBUG3_REG, pro_mask)) {
81         return PRO_CPU_NUM;
82     }
83 
84     const uint32_t app_mask =
85             DPORT_APP_CPU_DISABLED_CACHE_IA_DRAM1 |
86             DPORT_APP_CPU_DISABLED_CACHE_IA_DROM0 |
87             DPORT_APP_CPU_DISABLED_CACHE_IA_IROM0 |
88             DPORT_APP_CPU_DISABLED_CACHE_IA_IRAM0 |
89             DPORT_APP_CPU_DISABLED_CACHE_IA_IRAM1 |
90             DPORT_PRO_CPU_DISABLED_CACHE_IA_OPPOSITE;
91 
92     if (DPORT_GET_PERI_REG_MASK(DPORT_APP_DCACHE_DBUG3_REG, app_mask)) {
93         return APP_CPU_NUM;
94     }
95     return -1;
96 }
97