1 /*
2 * SPDX-FileCopyrightText: 2020-2021 Espressif Systems (Shanghai) CO LTD
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 // #define LOG_LOCAL_LEVEL ESP_LOG_DEBUG
8
9 #include <stdlib.h>
10 #include <string.h>
11 #include <sys/lock.h>
12 #include "sdkconfig.h"
13 #include "esp_compiler.h"
14 #include "esp_heap_caps.h"
15 #include "esp_intr_alloc.h"
16 #include "esp_log.h"
17 #include "esp_check.h"
18 #include "soc/soc_caps.h"
19 #include "soc/gpio_periph.h"
20 #include "soc/io_mux_reg.h"
21 #include "hal/cpu_hal.h"
22 #include "hal/cpu_ll.h"
23 #include "hal/gpio_hal.h"
24 #include "driver/periph_ctrl.h"
25 #include "esp_rom_gpio.h"
26 #include "freertos/FreeRTOS.h"
27 #include "driver/dedic_gpio.h"
28 #include "soc/dedic_gpio_periph.h"
29 #if SOC_DEDIC_GPIO_ALLOW_REG_ACCESS
30 #include "soc/dedic_gpio_struct.h"
31 #include "hal/dedic_gpio_ll.h"
32 #endif
33
34
35 static const char *TAG = "dedic_gpio";
36
37 typedef struct dedic_gpio_platform_t dedic_gpio_platform_t;
38 typedef struct dedic_gpio_bundle_t dedic_gpio_bundle_t;
39
40 // Dedicated GPIO driver platform, GPIO bundles will be installed onto it
41 static dedic_gpio_platform_t *s_platform[SOC_CPU_CORES_NUM];
42 // platform level mutex lock
43 static _lock_t s_platform_mutexlock[SOC_CPU_CORES_NUM];
44
45 struct dedic_gpio_platform_t {
46 portMUX_TYPE spinlock; // Spinlock, stop GPIO channels from accessing common resource concurrently
47 uint32_t out_occupied_mask; // mask of output channels that already occupied
48 uint32_t in_occupied_mask; // mask of input channels that already occupied
49 #if SOC_DEDIC_GPIO_HAS_INTERRUPT
50 intr_handle_t intr_hdl; // interrupt handle
51 dedic_gpio_isr_callback_t cbs[SOC_DEDIC_GPIO_IN_CHANNELS_NUM]; // array of callback function for input channel
52 void *cb_args[SOC_DEDIC_GPIO_IN_CHANNELS_NUM]; // array of callback arguments for input channel
53 dedic_gpio_bundle_t *in_bundles[SOC_DEDIC_GPIO_IN_CHANNELS_NUM]; // which bundle belongs to for input channel
54 #endif
55 #if SOC_DEDIC_GPIO_ALLOW_REG_ACCESS
56 dedic_dev_t *dev;
57 #endif
58 };
59
60 struct dedic_gpio_bundle_t {
61 uint32_t core_id; // CPU core ID, a GPIO bundle must be installed to a specific CPU core
62 uint32_t out_mask; // mask of output channels in the bank
63 uint32_t in_mask; // mask of input channels in the bank
64 uint32_t out_offset; // offset in the bank (seen from output channel)
65 uint32_t in_offset; // offset in the bank (seen from input channel)
66 size_t nr_gpio; // number of GPIOs in the gpio_array
67 int gpio_array[]; // array of GPIO numbers (configured by user)
68 };
69
dedic_gpio_build_platform(uint32_t core_id)70 static esp_err_t dedic_gpio_build_platform(uint32_t core_id)
71 {
72 esp_err_t ret = ESP_OK;
73 if (!s_platform[core_id]) {
74 // prevent building platform concurrently
75 _lock_acquire(&s_platform_mutexlock[core_id]);
76 if (!s_platform[core_id]) {
77 s_platform[core_id] = calloc(1, sizeof(dedic_gpio_platform_t));
78 if (s_platform[core_id]) {
79 // initialize platfrom members
80 s_platform[core_id]->spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
81 #if SOC_DEDIC_GPIO_ALLOW_REG_ACCESS
82 s_platform[core_id]->dev = &DEDIC_GPIO;
83 #endif // SOC_DEDIC_GPIO_ALLOW_REG_ACCESS
84 #if !SOC_DEDIC_PERIPH_AUTO_ENABLE
85 periph_module_enable(dedic_gpio_periph_signals.module); // enable APB clock to peripheral
86 #endif // !SOC_DEDIC_PERIPH_AUTO_ENABLE
87 }
88 }
89 _lock_release(&s_platform_mutexlock[core_id]);
90
91 ESP_GOTO_ON_FALSE(s_platform[core_id], ESP_ERR_NO_MEM, err, TAG, "no mem for s_platform[%d]", core_id);
92 ESP_LOGD(TAG, "build platform on core[%d] at %p", core_id, s_platform);
93 }
94
95 err:
96 return ret;
97 }
98
dedic_gpio_break_platform(uint32_t core_id)99 static void dedic_gpio_break_platform(uint32_t core_id)
100 {
101 if (s_platform[core_id]) {
102 // prevent breaking platform concurrently
103 _lock_acquire(&s_platform_mutexlock[core_id]);
104 if (s_platform[core_id]) {
105 free(s_platform[core_id]);
106 s_platform[core_id] = NULL;
107 #if !SOC_DEDIC_PERIPH_AUTO_ENABLE
108 periph_module_disable(dedic_gpio_periph_signals.module); // disable module if no GPIO channel is being used
109 #endif // !SOC_DEDIC_PERIPH_AUTO_ENABLE
110 }
111 _lock_release(&s_platform_mutexlock[core_id]);
112 }
113 }
114
115 #if SOC_DEDIC_GPIO_HAS_INTERRUPT
dedic_gpio_default_isr(void * arg)116 static void dedic_gpio_default_isr(void *arg)
117 {
118 bool need_yield = false;
119 dedic_gpio_platform_t *platform = (dedic_gpio_platform_t *)arg;
120
121 // get and clear interrupt status
122 portENTER_CRITICAL_ISR(&platform->spinlock);
123 uint32_t status = dedic_gpio_ll_get_interrupt_status(platform->dev);
124 dedic_gpio_ll_clear_interrupt_status(platform->dev, status);
125 portEXIT_CRITICAL_ISR(&platform->spinlock);
126
127 // handle dedicated channel one by one
128 while (status) {
129 uint32_t channel = __builtin_ffs(status) - 1; // get dedicated channel number which triggered the interrupt
130 if (platform->cbs[channel]) {
131 if (platform->cbs[channel](platform->in_bundles[channel], channel - platform->in_bundles[channel]->in_offset, platform->cb_args[channel])) {
132 need_yield = true; // note that we need to yield at the end of isr
133 }
134 }
135 status = status & (status - 1); // clear the right most bit '1'
136 }
137
138 if (need_yield) {
139 portYIELD_FROM_ISR();
140 }
141 }
142
dedic_gpio_install_interrupt(uint32_t core_id)143 static esp_err_t dedic_gpio_install_interrupt(uint32_t core_id)
144 {
145 esp_err_t ret = ESP_OK;
146 if (!s_platform[core_id]->intr_hdl) {
147 // prevent install interrupt concurrently
148 _lock_acquire(&s_platform_mutexlock[core_id]);
149 if (!s_platform[core_id]->intr_hdl) {
150 int isr_flags = 0;
151 ret = esp_intr_alloc(dedic_gpio_periph_signals.irq, isr_flags, dedic_gpio_default_isr, s_platform[core_id], &s_platform[core_id]->intr_hdl);
152 // clear pending interrupt
153 uint32_t status = dedic_gpio_ll_get_interrupt_status(s_platform[core_id]->dev);
154 dedic_gpio_ll_clear_interrupt_status(s_platform[core_id]->dev, status);
155 }
156 _lock_release(&s_platform_mutexlock[core_id]);
157 ESP_GOTO_ON_ERROR(ret, err, TAG, "alloc interrupt failed");
158 }
159
160 err:
161 return ret;
162 }
163
dedic_gpio_uninstall_interrupt(uint32_t core_id)164 static void dedic_gpio_uninstall_interrupt(uint32_t core_id)
165 {
166 if (s_platform[core_id]->intr_hdl) {
167 // prevent uninstall interrupt concurrently
168 _lock_acquire(&s_platform_mutexlock[core_id]);
169 if (s_platform[core_id]->intr_hdl) {
170 esp_intr_free(s_platform[core_id]->intr_hdl);
171 s_platform[core_id]->intr_hdl = NULL;
172 // disable all interrupt
173 dedic_gpio_ll_enable_interrupt(s_platform[core_id]->dev, ~0UL, false);
174 }
175 _lock_release(&s_platform_mutexlock[core_id]);
176 }
177 }
178
dedic_gpio_set_interrupt(uint32_t core_id,uint32_t channel,dedic_gpio_intr_type_t type)179 static void dedic_gpio_set_interrupt(uint32_t core_id, uint32_t channel, dedic_gpio_intr_type_t type)
180 {
181 dedic_gpio_ll_set_interrupt_type(s_platform[core_id]->dev, channel, type);
182 if (type != DEDIC_GPIO_INTR_NONE) {
183 dedic_gpio_ll_enable_interrupt(s_platform[core_id]->dev, 1 << channel, true);
184 } else {
185 dedic_gpio_ll_enable_interrupt(s_platform[core_id]->dev, 1 << channel, false);
186 }
187 }
188 #endif // SOC_DEDIC_GPIO_HAS_INTERRUPT
189
dedic_gpio_new_bundle(const dedic_gpio_bundle_config_t * config,dedic_gpio_bundle_handle_t * ret_bundle)190 esp_err_t dedic_gpio_new_bundle(const dedic_gpio_bundle_config_t *config, dedic_gpio_bundle_handle_t *ret_bundle)
191 {
192 esp_err_t ret = ESP_OK;
193 dedic_gpio_bundle_t *bundle = NULL;
194 uint32_t out_mask = 0;
195 uint32_t in_mask = 0;
196 uint32_t core_id = cpu_hal_get_core_id(); // dedicated GPIO will be binded to the CPU who invokes this API
197
198 ESP_GOTO_ON_FALSE(config && ret_bundle, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
199 ESP_GOTO_ON_FALSE(config->gpio_array && config->array_size > 0, ESP_ERR_INVALID_ARG, err, TAG, "invalid GPIO array or size");
200 ESP_GOTO_ON_FALSE(config->flags.in_en || config->flags.out_en, ESP_ERR_INVALID_ARG, err, TAG, "no input/output mode specified");
201 // lazy install s_platform[core_id]
202 ESP_GOTO_ON_ERROR(dedic_gpio_build_platform(core_id), err, TAG, "build platform %d failed", core_id);
203
204 size_t bundle_size = sizeof(dedic_gpio_bundle_t) + config->array_size * sizeof(config->gpio_array[0]);
205 bundle = calloc(1, bundle_size);
206 ESP_GOTO_ON_FALSE(bundle, ESP_ERR_NO_MEM, err, TAG, "no mem for bundle");
207
208 // for performance reasons, we only search for continuous channels
209 uint32_t pattern = (1 << config->array_size) - 1;
210 // configure outwards channels
211 uint32_t out_offset = 0;
212 if (config->flags.out_en) {
213 ESP_GOTO_ON_FALSE(config->array_size <= SOC_DEDIC_GPIO_OUT_CHANNELS_NUM, ESP_ERR_INVALID_ARG, err, TAG,
214 "array size(%d) exceeds maximum supported out channels(%d)", config->array_size, SOC_DEDIC_GPIO_OUT_CHANNELS_NUM);
215 // prevent install bundle concurrently
216 portENTER_CRITICAL(&s_platform[core_id]->spinlock);
217 for (size_t i = 0; i <= SOC_DEDIC_GPIO_OUT_CHANNELS_NUM - config->array_size; i++) {
218 if ((s_platform[core_id]->out_occupied_mask & (pattern << i)) == 0) {
219 out_mask = pattern << i;
220 out_offset = i;
221 break;
222 }
223 }
224 if (out_mask) {
225 s_platform[core_id]->out_occupied_mask |= out_mask;
226 #if SOC_DEDIC_GPIO_ALLOW_REG_ACCESS
227 // always enable instruction to access output GPIO, which has better performance than register access
228 dedic_gpio_ll_enable_instruction_access_out(s_platform[core_id]->dev, out_mask, true);
229 #endif
230 }
231 portEXIT_CRITICAL(&s_platform[core_id]->spinlock);
232 ESP_GOTO_ON_FALSE(out_mask, ESP_ERR_NOT_FOUND, err, TAG, "no free outward channels on core[%d]", core_id);
233 ESP_LOGD(TAG, "new outward bundle(%p) on core[%d], offset=%d, mask(%x)", bundle, core_id, out_offset, out_mask);
234 }
235
236 // configure inwards channels
237 uint32_t in_offset = 0;
238 if (config->flags.in_en) {
239 ESP_GOTO_ON_FALSE(config->array_size <= SOC_DEDIC_GPIO_IN_CHANNELS_NUM, ESP_ERR_INVALID_ARG, err, TAG,
240 "array size(%d) exceeds maximum supported in channels(%d)", config->array_size, SOC_DEDIC_GPIO_IN_CHANNELS_NUM);
241 // prevent install bundle concurrently
242 portENTER_CRITICAL(&s_platform[core_id]->spinlock);
243 for (size_t i = 0; i <= SOC_DEDIC_GPIO_IN_CHANNELS_NUM - config->array_size; i++) {
244 if ((s_platform[core_id]->in_occupied_mask & (pattern << i)) == 0) {
245 in_mask = pattern << i;
246 in_offset = i;
247 break;
248 }
249 }
250 if (in_mask) {
251 s_platform[core_id]->in_occupied_mask |= in_mask;
252 }
253 portEXIT_CRITICAL(&s_platform[core_id]->spinlock);
254 ESP_GOTO_ON_FALSE(in_mask, ESP_ERR_NOT_FOUND, err, TAG, "no free inward channels on core[%d]", core_id);
255 ESP_LOGD(TAG, "new inward bundle(%p) on core[%d], offset=%d, mask(%x)", bundle, core_id, in_offset, in_mask);
256 }
257
258 // route dedicated GPIO channel signals to GPIO matrix
259 if (config->flags.in_en) {
260 for (size_t i = 0; i < config->array_size; i++) {
261 gpio_hal_iomux_func_sel(GPIO_PIN_MUX_REG[config->gpio_array[i]], PIN_FUNC_GPIO);
262 esp_rom_gpio_connect_in_signal(config->gpio_array[i], dedic_gpio_periph_signals.cores[core_id].in_sig_per_channel[in_offset + i], config->flags.in_invert);
263 }
264 }
265 if (config->flags.out_en) {
266 for (size_t i = 0; i < config->array_size; i++) {
267 gpio_hal_iomux_func_sel(GPIO_PIN_MUX_REG[config->gpio_array[i]], PIN_FUNC_GPIO);
268 esp_rom_gpio_connect_out_signal(config->gpio_array[i], dedic_gpio_periph_signals.cores[core_id].out_sig_per_channel[out_offset + i], config->flags.out_invert, false);
269 }
270 #if !SOC_DEDIC_GPIO_OUT_AUTO_ENABLE
271 cpu_ll_enable_dedic_gpio_output(s_platform[core_id]->out_occupied_mask);
272 #endif // !SOC_DEDIC_GPIO_OUT_AUTO_ENABLE
273 }
274
275 // it's safe to initialize bundle members without locks here
276 bundle->core_id = core_id;
277 bundle->out_mask = out_mask;
278 bundle->in_mask = in_mask;
279 bundle->out_offset = out_offset;
280 bundle->in_offset = in_offset;
281 bundle->nr_gpio = config->array_size;
282 memcpy(bundle->gpio_array, config->gpio_array, config->array_size * sizeof(config->gpio_array[0]));
283
284 *ret_bundle = bundle; // return bundle instance
285 return ESP_OK;
286
287 err:
288 if (s_platform[core_id] && (out_mask || in_mask)) {
289 portENTER_CRITICAL(&s_platform[core_id]->spinlock);
290 s_platform[core_id]->out_occupied_mask &= ~out_mask;
291 s_platform[core_id]->in_occupied_mask &= ~in_mask;
292 portEXIT_CRITICAL(&s_platform[core_id]->spinlock);
293 }
294 if (bundle) {
295 free(bundle);
296 }
297 return ret;
298 }
299
dedic_gpio_del_bundle(dedic_gpio_bundle_handle_t bundle)300 esp_err_t dedic_gpio_del_bundle(dedic_gpio_bundle_handle_t bundle)
301 {
302 esp_err_t ret = ESP_OK;
303 bool recycle_all = false;
304 ESP_GOTO_ON_FALSE(bundle, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
305
306 uint32_t core_id = cpu_hal_get_core_id();
307 ESP_GOTO_ON_FALSE(core_id == bundle->core_id, ESP_FAIL, err, TAG, "del bundle on wrong CPU");
308
309 portENTER_CRITICAL(&s_platform[core_id]->spinlock);
310 s_platform[core_id]->out_occupied_mask &= ~(bundle->out_mask);
311 s_platform[core_id]->in_occupied_mask &= ~(bundle->in_mask);
312 if (!s_platform[core_id]->in_occupied_mask && !s_platform[core_id]->out_occupied_mask) {
313 recycle_all = true;
314 }
315 portEXIT_CRITICAL(&s_platform[core_id]->spinlock);
316
317 free(bundle);
318
319 if (recycle_all) {
320 #if SOC_DEDIC_GPIO_HAS_INTERRUPT
321 dedic_gpio_uninstall_interrupt(core_id);
322 #endif
323 dedic_gpio_break_platform(core_id);
324 }
325
326 err:
327 return ret;
328 }
329
dedic_gpio_get_out_mask(dedic_gpio_bundle_handle_t bundle,uint32_t * mask)330 esp_err_t dedic_gpio_get_out_mask(dedic_gpio_bundle_handle_t bundle, uint32_t *mask)
331 {
332 esp_err_t ret = ESP_OK;
333 ESP_GOTO_ON_FALSE(bundle && mask, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
334 *mask = bundle->out_mask;
335 err:
336 return ret;
337 }
338
dedic_gpio_get_in_mask(dedic_gpio_bundle_handle_t bundle,uint32_t * mask)339 esp_err_t dedic_gpio_get_in_mask(dedic_gpio_bundle_handle_t bundle, uint32_t *mask)
340 {
341 esp_err_t ret = ESP_OK;
342 ESP_GOTO_ON_FALSE(bundle && mask, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
343 *mask = bundle->in_mask;
344 err:
345 return ret;
346 }
347
dedic_gpio_bundle_write(dedic_gpio_bundle_handle_t bundle,uint32_t mask,uint32_t value)348 void dedic_gpio_bundle_write(dedic_gpio_bundle_handle_t bundle, uint32_t mask, uint32_t value)
349 {
350 // For performance reasons, we don't want to check the validation of parameters here
351 // Even didn't check if we're working on the correct CPU core (i.e. bundle->core_id == current core_id)
352 cpu_ll_write_dedic_gpio_mask(bundle->out_mask & (mask << bundle->out_offset), value << bundle->out_offset);
353 }
354
dedic_gpio_bundle_read_out(dedic_gpio_bundle_handle_t bundle)355 uint32_t dedic_gpio_bundle_read_out(dedic_gpio_bundle_handle_t bundle)
356 {
357 // For performance reasons, we don't want to check the validation of parameters here
358 // Even didn't check if we're working on the correct CPU core (i.e. bundle->core_id == current core_id)
359 uint32_t value = cpu_ll_read_dedic_gpio_out();
360 return (value & bundle->out_mask) >> (bundle->out_offset);
361 }
362
dedic_gpio_bundle_read_in(dedic_gpio_bundle_handle_t bundle)363 uint32_t dedic_gpio_bundle_read_in(dedic_gpio_bundle_handle_t bundle)
364 {
365 // For performance reasons, we don't want to check the validation of parameters here
366 // Even didn't check if we're working on the correct CPU core (i.e. bundle->core_id == current core_id)
367 uint32_t value = cpu_ll_read_dedic_gpio_in();
368 return (value & bundle->in_mask) >> (bundle->in_offset);
369 }
370
371 #if SOC_DEDIC_GPIO_HAS_INTERRUPT
dedic_gpio_bundle_set_interrupt_and_callback(dedic_gpio_bundle_handle_t bundle,uint32_t mask,dedic_gpio_intr_type_t intr_type,dedic_gpio_isr_callback_t cb_isr,void * cb_args)372 esp_err_t dedic_gpio_bundle_set_interrupt_and_callback(dedic_gpio_bundle_handle_t bundle, uint32_t mask, dedic_gpio_intr_type_t intr_type, dedic_gpio_isr_callback_t cb_isr, void *cb_args)
373 {
374 esp_err_t ret = ESP_OK;
375 ESP_GOTO_ON_FALSE(bundle, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
376 uint32_t core_id = cpu_hal_get_core_id();
377 // lazy alloc interrupt
378 ESP_GOTO_ON_ERROR(dedic_gpio_install_interrupt(core_id), err, TAG, "allocate interrupt on core %d failed", core_id);
379
380 uint32_t channel_mask = bundle->in_mask & (mask << bundle->in_offset);
381 uint32_t channel = 0;
382 while (channel_mask) {
383 channel = __builtin_ffs(channel_mask) - 1;
384 portENTER_CRITICAL(&s_platform[core_id]->spinlock);
385 dedic_gpio_set_interrupt(core_id, channel, intr_type);
386 portEXIT_CRITICAL(&s_platform[core_id]->spinlock);
387
388 s_platform[core_id]->cbs[channel] = cb_isr;
389 s_platform[core_id]->cb_args[channel] = cb_args;
390 s_platform[core_id]->in_bundles[channel] = bundle;
391 channel_mask = channel_mask & (channel_mask - 1); // clear the right most bit '1'
392 }
393
394 err:
395 return ret;
396 }
397 #endif // SOC_DEDIC_GPIO_HAS_INTERRUPT
398