1 /*
2  * SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @file cache_err_int.c
9  * @brief The cache has an interrupt that can be raised as soon as an access to a cached
10  *        region (Flash, PSRAM) is done without the cache being enabled.
11  *        We use that here to panic the CPU, which from a debugging perspective,
12  *        is better than grabbing bad data from the bus.
13  */
14 
15 #include <stdint.h>
16 #include "sdkconfig.h"
17 #include "esp_err.h"
18 #include "esp_log.h"
19 #include "esp_attr.h"
20 #include "esp_cpu.h"
21 #include "esp_intr_alloc.h"
22 #include "soc/soc.h"
23 #include "soc/periph_defs.h"
24 #include "esp_rom_sys.h"
25 #include "hal/cache_ll.h"
26 
27 static const char *TAG = "CACHE_ERR";
28 
esp_cache_err_int_init(void)29 void esp_cache_err_int_init(void)
30 {
31     uint32_t core_id = esp_cpu_get_core_id();
32     ESP_INTR_DISABLE(ETS_CACHEERR_INUM);
33 
34     // We do not register a handler for the interrupt because it is interrupt
35     // level 4 which is not serviceable from C. Instead, xtensa_vectors.S has
36     // a call to the panic handler for this interrupt.
37     esp_rom_route_intr_matrix(core_id, ETS_CACHE_IA_INTR_SOURCE, ETS_CACHEERR_INUM);
38 
39     // Enable invalid cache access interrupt when the cache is disabled.
40     // When the interrupt happens, we can not determine the CPU where the
41     // invalid cache access has occurred. We enable the interrupt to catch
42     // invalid access on both CPUs, but the interrupt is connected to the
43     // CPU which happens to call this function.
44     // For this reason, panic handler backtrace will not be correct if the
45     // interrupt is connected to PRO CPU and invalid access happens on the APP CPU.
46 
47     ESP_DRAM_LOGV(TAG, "illegal error intr clr & ena mask is: 0x%x", CACHE_LL_L1_ILG_EVENT_MASK);
48     //illegal error intr doesn't depend on cache_id
49     cache_ll_l1_clear_illegal_error_intr(0, CACHE_LL_L1_ILG_EVENT_MASK);
50     cache_ll_l1_enable_illegal_error_intr(0, CACHE_LL_L1_ILG_EVENT_MASK);
51 
52     if (core_id == PRO_CPU_NUM) {
53         esp_rom_route_intr_matrix(core_id, ETS_CACHE_CORE0_ACS_INTR_SOURCE, ETS_CACHEERR_INUM);
54 
55         /* On the hardware side, stat by clearing all the bits reponsible for
56          * enabling cache access error interrupts.  */
57         ESP_DRAM_LOGV(TAG, "core 0 access error intr clr & ena mask is: 0x%x", CACHE_LL_L1_ACCESS_EVENT_MASK);
58         cache_ll_l1_clear_access_error_intr(0, CACHE_LL_L1_ACCESS_EVENT_MASK);
59         cache_ll_l1_enable_access_error_intr(0, CACHE_LL_L1_ACCESS_EVENT_MASK);
60     } else {
61         esp_rom_route_intr_matrix(core_id, ETS_CACHE_CORE1_ACS_INTR_SOURCE, ETS_CACHEERR_INUM);
62 
63         /* On the hardware side, stat by clearing all the bits reponsible for
64          * enabling cache access error interrupts.  */
65         ESP_DRAM_LOGV(TAG, "core 1 access error intr clr & ena mask is: 0x%x", CACHE_LL_L1_ACCESS_EVENT_MASK);
66         cache_ll_l1_clear_access_error_intr(1, CACHE_LL_L1_ACCESS_EVENT_MASK);
67         cache_ll_l1_enable_access_error_intr(1, CACHE_LL_L1_ACCESS_EVENT_MASK);
68     }
69 
70     ESP_INTR_ENABLE(ETS_CACHEERR_INUM);
71 }
72 
esp_cache_err_get_cpuid(void)73 int esp_cache_err_get_cpuid(void)
74 {
75     if (cache_ll_l1_get_access_error_intr_status(0, CACHE_LL_L1_ACCESS_EVENT_MASK)) {
76         return PRO_CPU_NUM;
77     }
78 
79     if (cache_ll_l1_get_access_error_intr_status(1, CACHE_LL_L1_ACCESS_EVENT_MASK)) {
80         return APP_CPU_NUM;
81     }
82 
83     return -1;
84 }
85