1 /*
2  * SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 // The LL layer for Cache register operations
8 
9 #pragma once
10 
11 #include <stdbool.h>
12 #include "soc/dport_reg.h"
13 #include "soc/ext_mem_defs.h"
14 #include "hal/cache_types.h"
15 #include "hal/assert.h"
16 
17 
18 #ifdef __cplusplus
19 extern "C" {
20 #endif
21 
22 /**
23  * @brief enable a cache unit
24  *
25  * @param cache_id  cache ID (when l1 cache is per core)
26  */
27 __attribute__((always_inline))
cache_ll_l1_enable_cache(uint32_t cache_id)28 static inline void cache_ll_l1_enable_cache(uint32_t cache_id)
29 {
30     HAL_ASSERT(cache_id == 0 || cache_id == 1);
31 
32     if (cache_id == 0) {
33         DPORT_REG_SET_BIT(DPORT_PRO_CACHE_CTRL_REG, DPORT_PRO_CACHE_ENABLE);
34     } else {
35         DPORT_REG_SET_BIT(DPORT_APP_CACHE_CTRL_REG, DPORT_APP_CACHE_ENABLE);
36     }
37 }
38 
39 /**
40  * @brief disable a cache unit
41  *
42  * @param cache_id  cache ID (when l1 cache is per core)
43  */
44 __attribute__((always_inline))
cache_ll_l1_disable_cache(uint32_t cache_id)45 static inline void cache_ll_l1_disable_cache(uint32_t cache_id)
46 {
47     if (cache_id == 0) {
48         while (DPORT_GET_PERI_REG_BITS2(DPORT_PRO_DCACHE_DBUG0_REG, DPORT_PRO_CACHE_STATE, DPORT_PRO_CACHE_STATE_S) != 1){
49             ;
50         }
51         DPORT_REG_CLR_BIT(DPORT_PRO_CACHE_CTRL_REG, DPORT_PRO_CACHE_ENABLE);
52     } else {
53         while (DPORT_GET_PERI_REG_BITS2(DPORT_APP_DCACHE_DBUG0_REG, DPORT_APP_CACHE_STATE, DPORT_APP_CACHE_STATE_S) != 1){
54             ;
55         }
56         DPORT_REG_CLR_BIT(DPORT_APP_CACHE_CTRL_REG, DPORT_APP_CACHE_ENABLE);
57     }
58 }
59 
60 /**
61  * @brief Get the status of cache if it is enabled or not
62  *
63  * @param   cache_id    cache ID (when l1 cache is per core)
64  * @param   type        see `cache_type_t`
65  * @return  enabled or not
66  */
67 __attribute__((always_inline))
cache_ll_l1_is_cache_enabled(uint32_t cache_id,cache_type_t type)68 static inline bool cache_ll_l1_is_cache_enabled(uint32_t cache_id, cache_type_t type)
69 {
70     HAL_ASSERT(cache_id == 0 || cache_id == 1);
71     (void) type;    //On 32 it shares between I and D cache
72 
73     bool enabled;
74     if (cache_id == 0) {
75         enabled = DPORT_REG_GET_BIT(DPORT_PRO_CACHE_CTRL_REG, DPORT_PRO_CACHE_ENABLE);
76     } else {
77         enabled = DPORT_REG_GET_BIT(DPORT_APP_CACHE_CTRL_REG, DPORT_APP_CACHE_ENABLE);
78     }
79     return enabled;
80 }
81 
82 /**
83  * @brief Get the buses of a particular cache that are mapped to a virtual address range
84  *
85  * External virtual address can only be accessed when the involved cache buses are enabled.
86  * This API is to get the cache buses where the memory region (from `vaddr_start` to `vaddr_start + len`) reside.
87  *
88  * @param cache_id          cache ID (when l1 cache is per core)
89  * @param vaddr_start       virtual address start
90  * @param len               vaddr length
91  */
92 #if !BOOTLOADER_BUILD
93 __attribute__((always_inline))
94 #endif
cache_ll_l1_get_bus(uint32_t cache_id,uint32_t vaddr_start,uint32_t len)95 static inline cache_bus_mask_t cache_ll_l1_get_bus(uint32_t cache_id, uint32_t vaddr_start, uint32_t len)
96 {
97     HAL_ASSERT(cache_id == 0 || cache_id == 1);
98     cache_bus_mask_t mask = 0;
99 
100     uint32_t vaddr_end = vaddr_start + len - 1;
101     if (vaddr_start >= IROM0_CACHE_ADDRESS_HIGH) {
102         HAL_ASSERT(false);      //out of range
103     } else if (vaddr_start >= IROM0_CACHE_ADDRESS_LOW) {
104         mask |= CACHE_BUS_IBUS2;
105     } else if (vaddr_start >= IRAM1_CACHE_ADDRESS_LOW) {
106         mask |= CACHE_BUS_IBUS1;
107         mask |= (vaddr_end >= IROM0_CACHE_ADDRESS_LOW) ? CACHE_BUS_IBUS2 : 0;
108     } else if (vaddr_start >= IRAM0_CACHE_ADDRESS_LOW) {
109         mask |= CACHE_BUS_IBUS0;
110         mask |= (vaddr_end >= IRAM1_CACHE_ADDRESS_LOW) ? CACHE_BUS_IBUS1 : 0;
111         mask |= (vaddr_end >= IROM0_CACHE_ADDRESS_LOW) ? CACHE_BUS_IBUS2 : 0;
112     } else if (vaddr_start >= DRAM1_CACHE_ADDRESS_LOW) {
113         HAL_ASSERT(vaddr_end < DRAM1_CACHE_ADDRESS_HIGH);  //out of range, vaddr should be consecutive, see `ext_mem_defs.h`
114         mask |= CACHE_BUS_DBUS1;
115     } else if (vaddr_start >= DROM0_CACHE_ADDRESS_LOW) {
116         HAL_ASSERT(vaddr_end < DROM0_CACHE_ADDRESS_HIGH);  //out of range, vaddr should be consecutive, see `ext_mem_defs.h`
117         mask |= CACHE_BUS_DBUS0;
118     } else {
119         HAL_ASSERT(false);
120     }
121 
122     return mask;
123 }
124 
125 /**
126  * Enable the Cache Buses
127  *
128  * @param cache_id    cache ID (when l1 cache is per core)
129  * @param mask        To know which buses should be enabled
130  * @param enable      1: enable; 0: disable
131  */
132 #if !BOOTLOADER_BUILD
133 __attribute__((always_inline))
134 #endif
cache_ll_l1_enable_bus(uint32_t cache_id,cache_bus_mask_t mask)135 static inline void cache_ll_l1_enable_bus(uint32_t cache_id, cache_bus_mask_t mask)
136 {
137     (void) mask;
138     HAL_ASSERT(cache_id == 0 || cache_id == 1);
139 
140     uint32_t bus_mask = 0;
141     if (cache_id == 0) {
142         bus_mask |= (mask & CACHE_BUS_IBUS0) ? DPORT_PRO_CACHE_MASK_IRAM0 : 0;
143         bus_mask |= (mask & CACHE_BUS_IBUS1) ? DPORT_PRO_CACHE_MASK_IRAM1 : 0;
144         bus_mask |= (mask & CACHE_BUS_IBUS2) ? DPORT_PRO_CACHE_MASK_IROM0 : 0;
145 
146         bus_mask |= (mask & CACHE_BUS_DBUS0) ? DPORT_PRO_CACHE_MASK_DROM0 : 0;
147         bus_mask |= (mask & CACHE_BUS_DBUS1) ? DPORT_PRO_CACHE_MASK_DRAM1 : 0;
148 
149         DPORT_REG_CLR_BIT(DPORT_PRO_CACHE_CTRL1_REG, bus_mask);
150     } else {
151         bus_mask |= (mask & CACHE_BUS_IBUS0) ? DPORT_APP_CACHE_MASK_IRAM0 : 0;
152         bus_mask |= (mask & CACHE_BUS_IBUS1) ? DPORT_APP_CACHE_MASK_IRAM1 : 0;
153         bus_mask |= (mask & CACHE_BUS_IBUS2) ? DPORT_APP_CACHE_MASK_IROM0 : 0;
154 
155         bus_mask |= (mask & CACHE_BUS_DBUS0) ? DPORT_APP_CACHE_MASK_DROM0 : 0;
156         bus_mask |= (mask & CACHE_BUS_DBUS1) ? DPORT_APP_CACHE_MASK_DRAM1 : 0;
157 
158         DPORT_REG_CLR_BIT(DPORT_APP_CACHE_CTRL1_REG, bus_mask);
159     }
160 }
161 
162 /**
163  * Returns enabled buses for a given core
164  *
165  * @param cache_id    cache ID (when l1 cache is per core)
166  *
167  * @return State of enabled buses
168  */
169 __attribute__((always_inline))
cache_ll_l1_get_enabled_bus(uint32_t cache_id)170 static inline cache_bus_mask_t cache_ll_l1_get_enabled_bus(uint32_t cache_id)
171 {
172     cache_bus_mask_t mask = 0;
173     HAL_ASSERT(cache_id == 0 || cache_id == 1);
174     if (cache_id == 0) {
175         uint32_t bus_mask= DPORT_REG_READ(DPORT_PRO_CACHE_CTRL1_REG);
176         mask |= (!(bus_mask & DPORT_PRO_CACHE_MASK_IRAM0)) ? CACHE_BUS_IBUS0 : 0;
177         mask |= (!(bus_mask & DPORT_PRO_CACHE_MASK_IRAM1)) ? CACHE_BUS_IBUS1 : 0;
178         mask |= (!(bus_mask & DPORT_PRO_CACHE_MASK_IROM0)) ? CACHE_BUS_IBUS2 : 0;
179 
180         mask |= (!(bus_mask & DPORT_PRO_CACHE_MASK_DROM0)) ? CACHE_BUS_DBUS0 : 0;
181         mask |= (!(bus_mask & DPORT_PRO_CACHE_MASK_DRAM1)) ? CACHE_BUS_DBUS1 : 0;
182     } else {
183         uint32_t bus_mask= DPORT_REG_READ(DPORT_APP_CACHE_CTRL1_REG);
184         mask |= (!(bus_mask & DPORT_APP_CACHE_MASK_IRAM0)) ? CACHE_BUS_IBUS0 : 0;
185         mask |= (!(bus_mask & DPORT_APP_CACHE_MASK_IRAM1)) ? CACHE_BUS_IBUS1 : 0;
186         mask |= (!(bus_mask & DPORT_APP_CACHE_MASK_IROM0)) ? CACHE_BUS_IBUS2 : 0;
187 
188         mask |= (!(bus_mask & DPORT_APP_CACHE_MASK_DROM0)) ? CACHE_BUS_DBUS0 : 0;
189         mask |= (!(bus_mask & DPORT_APP_CACHE_MASK_DRAM1)) ? CACHE_BUS_DBUS1 : 0;
190     }
191     return mask;
192 }
193 
194 /**
195  * Disable the Cache Buses
196  *
197  * @param cache_id    cache ID (when l1 cache is per core)
198  * @param mask        To know which buses should be enabled
199  * @param enable      1: enable; 0: disable
200  */
201 __attribute__((always_inline))
cache_ll_l1_disable_bus(uint32_t cache_id,cache_bus_mask_t mask)202 static inline void cache_ll_l1_disable_bus(uint32_t cache_id, cache_bus_mask_t mask)
203 {
204     (void) mask;
205     HAL_ASSERT(cache_id == 0 || cache_id == 1);
206 
207     uint32_t bus_mask = 0;
208     if (cache_id == 0) {
209         bus_mask |= (mask & CACHE_BUS_IBUS0) ? DPORT_PRO_CACHE_MASK_IRAM0 : 0;
210         bus_mask |= (mask & CACHE_BUS_IBUS1) ? DPORT_PRO_CACHE_MASK_IRAM1 : 0;
211         bus_mask |= (mask & CACHE_BUS_IBUS2) ? DPORT_PRO_CACHE_MASK_IROM0 : 0;
212 
213         bus_mask |= (mask & CACHE_BUS_DBUS0) ? DPORT_PRO_CACHE_MASK_DROM0 : 0;
214         bus_mask |= (mask & CACHE_BUS_DBUS1) ? DPORT_PRO_CACHE_MASK_DRAM1 : 0;
215 
216         DPORT_REG_SET_BIT(DPORT_PRO_CACHE_CTRL1_REG, bus_mask);
217     } else {
218         bus_mask |= (mask & CACHE_BUS_IBUS0) ? DPORT_APP_CACHE_MASK_IRAM0 : 0;
219         bus_mask |= (mask & CACHE_BUS_IBUS1) ? DPORT_APP_CACHE_MASK_IRAM1 : 0;
220         bus_mask |= (mask & CACHE_BUS_IBUS2) ? DPORT_APP_CACHE_MASK_IROM0 : 0;
221 
222         bus_mask |= (mask & CACHE_BUS_DBUS0) ? DPORT_APP_CACHE_MASK_DROM0 : 0;
223         bus_mask |= (mask & CACHE_BUS_DBUS1) ? DPORT_APP_CACHE_MASK_DRAM1 : 0;
224 
225         DPORT_REG_SET_BIT(DPORT_APP_CACHE_CTRL1_REG, bus_mask);
226     }
227 }
228 
229 #ifdef __cplusplus
230 }
231 #endif
232