1 /*
2  * SPDX-FileCopyrightText: 2020-2021 Espressif Systems (Shanghai) CO LTD
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 // #define LOG_LOCAL_LEVEL ESP_LOG_DEBUG
8 
9 #include <stdlib.h>
10 #include <string.h>
11 #include <sys/lock.h>
12 #include "sdkconfig.h"
13 #include "esp_compiler.h"
14 #include "esp_heap_caps.h"
15 #include "esp_intr_alloc.h"
16 #include "esp_log.h"
17 #include "esp_check.h"
18 #include "esp_cpu.h"
19 #include "soc/soc_caps.h"
20 #include "soc/gpio_periph.h"
21 #include "soc/io_mux_reg.h"
22 #include "hal/dedic_gpio_cpu_ll.h"
23 #include "hal/gpio_hal.h"
24 #include "esp_private/periph_ctrl.h"
25 #include "esp_rom_gpio.h"
26 #include "freertos/FreeRTOS.h"
27 #include "driver/dedic_gpio.h"
28 #include "soc/dedic_gpio_periph.h"
29 #if SOC_DEDIC_GPIO_ALLOW_REG_ACCESS
30 #include "soc/dedic_gpio_struct.h"
31 #include "hal/dedic_gpio_ll.h"
32 #endif
33 
34 
35 static const char *TAG = "dedic_gpio";
36 
37 typedef struct dedic_gpio_platform_t dedic_gpio_platform_t;
38 typedef struct dedic_gpio_bundle_t dedic_gpio_bundle_t;
39 
40 // Dedicated GPIO driver platform, GPIO bundles will be installed onto it
41 static dedic_gpio_platform_t *s_platform[SOC_CPU_CORES_NUM];
42 // platform level mutex lock
43 static _lock_t s_platform_mutexlock[SOC_CPU_CORES_NUM];
44 
45 struct dedic_gpio_platform_t {
46     portMUX_TYPE spinlock;      // Spinlock, stop GPIO channels from accessing common resource concurrently
47     uint32_t out_occupied_mask; // mask of output channels that already occupied
48     uint32_t in_occupied_mask;  // mask of input channels that already occupied
49 #if SOC_DEDIC_GPIO_HAS_INTERRUPT
50     intr_handle_t intr_hdl;     // interrupt handle
51     dedic_gpio_isr_callback_t cbs[SOC_DEDIC_GPIO_IN_CHANNELS_NUM];   // array of callback function for input channel
52     void *cb_args[SOC_DEDIC_GPIO_IN_CHANNELS_NUM];                   // array of callback arguments for input channel
53     dedic_gpio_bundle_t *in_bundles[SOC_DEDIC_GPIO_IN_CHANNELS_NUM]; // which bundle belongs to for input channel
54 #endif
55 #if SOC_DEDIC_GPIO_ALLOW_REG_ACCESS
56     dedic_dev_t *dev;
57 #endif
58 };
59 
60 struct dedic_gpio_bundle_t {
61     uint32_t core_id;    // CPU core ID, a GPIO bundle must be installed to a specific CPU core
62     uint32_t out_mask;   // mask of output channels in the bank
63     uint32_t in_mask;    // mask of input channels in the bank
64     uint32_t out_offset; // offset in the bank (seen from output channel)
65     uint32_t in_offset;  // offset in the bank (seen from input channel)
66     size_t nr_gpio;      // number of GPIOs in the gpio_array
67     int gpio_array[];    // array of GPIO numbers (configured by user)
68 };
69 
dedic_gpio_build_platform(int core_id)70 static esp_err_t dedic_gpio_build_platform(int core_id)
71 {
72     esp_err_t ret = ESP_OK;
73     if (!s_platform[core_id]) {
74         // prevent building platform concurrently
75         _lock_acquire(&s_platform_mutexlock[core_id]);
76         if (!s_platform[core_id]) {
77             s_platform[core_id] = calloc(1, sizeof(dedic_gpio_platform_t));
78             if (s_platform[core_id]) {
79                 // initialize platfrom members
80                 s_platform[core_id]->spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
81                 // initial occupy_mask: 1111...100...0
82                 s_platform[core_id]->out_occupied_mask = UINT32_MAX & ~((1 << SOC_DEDIC_GPIO_OUT_CHANNELS_NUM) - 1);
83                 s_platform[core_id]->in_occupied_mask = UINT32_MAX & ~((1 << SOC_DEDIC_GPIO_IN_CHANNELS_NUM) - 1);
84 #if SOC_DEDIC_GPIO_ALLOW_REG_ACCESS
85                 s_platform[core_id]->dev = &DEDIC_GPIO;
86 #endif // SOC_DEDIC_GPIO_ALLOW_REG_ACCESS
87 #if !SOC_DEDIC_PERIPH_ALWAYS_ENABLE
88                 periph_module_enable(dedic_gpio_periph_signals.module); // enable APB clock to peripheral
89 #endif // !SOC_DEDIC_PERIPH_ALWAYS_ENABLE
90             }
91         }
92         _lock_release(&s_platform_mutexlock[core_id]);
93 
94         ESP_GOTO_ON_FALSE(s_platform[core_id], ESP_ERR_NO_MEM, err, TAG, "no mem for s_platform[%d]", core_id);
95         ESP_LOGD(TAG, "build platform on core[%d] at %p", core_id, s_platform);
96     }
97 
98 err:
99     return ret;
100 }
101 
dedic_gpio_break_platform(uint32_t core_id)102 static void dedic_gpio_break_platform(uint32_t core_id)
103 {
104     if (s_platform[core_id]) {
105         // prevent breaking platform concurrently
106         _lock_acquire(&s_platform_mutexlock[core_id]);
107         if (s_platform[core_id]) {
108             free(s_platform[core_id]);
109             s_platform[core_id] = NULL;
110 #if !SOC_DEDIC_PERIPH_ALWAYS_ENABLE
111             periph_module_disable(dedic_gpio_periph_signals.module); // disable module if no GPIO channel is being used
112 #endif // !SOC_DEDIC_PERIPH_ALWAYS_ENABLE
113         }
114         _lock_release(&s_platform_mutexlock[core_id]);
115     }
116 }
117 
118 #if SOC_DEDIC_GPIO_HAS_INTERRUPT
dedic_gpio_default_isr(void * arg)119 static void dedic_gpio_default_isr(void *arg)
120 {
121     bool need_yield = false;
122     dedic_gpio_platform_t *platform = (dedic_gpio_platform_t *)arg;
123 
124     // get and clear interrupt status
125     portENTER_CRITICAL_ISR(&platform->spinlock);
126     uint32_t status = dedic_gpio_ll_get_interrupt_status(platform->dev);
127     dedic_gpio_ll_clear_interrupt_status(platform->dev, status);
128     portEXIT_CRITICAL_ISR(&platform->spinlock);
129 
130     // handle dedicated channel one by one
131     while (status) {
132         uint32_t channel = __builtin_ffs(status) - 1; // get dedicated channel number which triggered the interrupt
133         if (platform->cbs[channel]) {
134             if (platform->cbs[channel](platform->in_bundles[channel], channel - platform->in_bundles[channel]->in_offset, platform->cb_args[channel])) {
135                 need_yield = true; // note that we need to yield at the end of isr
136             }
137         }
138         status = status & (status - 1); // clear the right most bit '1'
139     }
140 
141     if (need_yield) {
142         portYIELD_FROM_ISR();
143     }
144 }
145 
dedic_gpio_install_interrupt(uint32_t core_id)146 static esp_err_t dedic_gpio_install_interrupt(uint32_t core_id)
147 {
148     esp_err_t ret = ESP_OK;
149     if (!s_platform[core_id]->intr_hdl) {
150         // prevent install interrupt concurrently
151         _lock_acquire(&s_platform_mutexlock[core_id]);
152         if (!s_platform[core_id]->intr_hdl) {
153             int isr_flags = 0;
154             ret = esp_intr_alloc(dedic_gpio_periph_signals.irq, isr_flags, dedic_gpio_default_isr, s_platform[core_id], &s_platform[core_id]->intr_hdl);
155             // clear pending interrupt
156             uint32_t status = dedic_gpio_ll_get_interrupt_status(s_platform[core_id]->dev);
157             dedic_gpio_ll_clear_interrupt_status(s_platform[core_id]->dev, status);
158         }
159         _lock_release(&s_platform_mutexlock[core_id]);
160         ESP_GOTO_ON_ERROR(ret, err, TAG, "alloc interrupt failed");
161     }
162 
163 err:
164     return ret;
165 }
166 
dedic_gpio_uninstall_interrupt(uint32_t core_id)167 static void dedic_gpio_uninstall_interrupt(uint32_t core_id)
168 {
169     if (s_platform[core_id]->intr_hdl) {
170         // prevent uninstall interrupt concurrently
171         _lock_acquire(&s_platform_mutexlock[core_id]);
172         if (s_platform[core_id]->intr_hdl) {
173             esp_intr_free(s_platform[core_id]->intr_hdl);
174             s_platform[core_id]->intr_hdl = NULL;
175             // disable all interrupt
176             dedic_gpio_ll_enable_interrupt(s_platform[core_id]->dev, ~0UL, false);
177         }
178         _lock_release(&s_platform_mutexlock[core_id]);
179     }
180 }
181 
dedic_gpio_set_interrupt(uint32_t core_id,uint32_t channel,dedic_gpio_intr_type_t type)182 static void dedic_gpio_set_interrupt(uint32_t core_id, uint32_t channel, dedic_gpio_intr_type_t type)
183 {
184     dedic_gpio_ll_set_interrupt_type(s_platform[core_id]->dev, channel, type);
185     if (type != DEDIC_GPIO_INTR_NONE) {
186         dedic_gpio_ll_enable_interrupt(s_platform[core_id]->dev, 1 << channel, true);
187     } else {
188         dedic_gpio_ll_enable_interrupt(s_platform[core_id]->dev, 1 << channel, false);
189     }
190 }
191 #endif // SOC_DEDIC_GPIO_HAS_INTERRUPT
192 
dedic_gpio_new_bundle(const dedic_gpio_bundle_config_t * config,dedic_gpio_bundle_handle_t * ret_bundle)193 esp_err_t dedic_gpio_new_bundle(const dedic_gpio_bundle_config_t *config, dedic_gpio_bundle_handle_t *ret_bundle)
194 {
195     esp_err_t ret = ESP_OK;
196     dedic_gpio_bundle_t *bundle = NULL;
197     uint32_t out_mask = 0;
198     uint32_t in_mask = 0;
199     int core_id = esp_cpu_get_core_id(); // dedicated GPIO will be binded to the CPU who invokes this API
200 
201     ESP_GOTO_ON_FALSE(config && ret_bundle, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
202     ESP_GOTO_ON_FALSE(config->gpio_array && config->array_size > 0, ESP_ERR_INVALID_ARG, err, TAG, "invalid GPIO array or size");
203     ESP_GOTO_ON_FALSE(config->flags.in_en || config->flags.out_en, ESP_ERR_INVALID_ARG, err, TAG, "no input/output mode specified");
204     // lazy install s_platform[core_id]
205     ESP_GOTO_ON_ERROR(dedic_gpio_build_platform(core_id), err, TAG, "build platform %d failed", core_id);
206 
207     size_t bundle_size = sizeof(dedic_gpio_bundle_t) + config->array_size * sizeof(config->gpio_array[0]);
208     bundle = calloc(1, bundle_size);
209     ESP_GOTO_ON_FALSE(bundle, ESP_ERR_NO_MEM, err, TAG, "no mem for bundle");
210 
211     // for performance reasons, we only search for continuous channels
212     uint32_t pattern = (1 << config->array_size) - 1;
213     // configure outwards channels
214     uint32_t out_offset = 0;
215     if (config->flags.out_en) {
216         ESP_GOTO_ON_FALSE(config->array_size <= SOC_DEDIC_GPIO_OUT_CHANNELS_NUM, ESP_ERR_INVALID_ARG, err, TAG,
217                           "array size(%d) exceeds maximum supported out channels(%d)", config->array_size, SOC_DEDIC_GPIO_OUT_CHANNELS_NUM);
218         // prevent install bundle concurrently
219         portENTER_CRITICAL(&s_platform[core_id]->spinlock);
220         for (size_t i = 0; i <= SOC_DEDIC_GPIO_OUT_CHANNELS_NUM - config->array_size; i++) {
221             if ((s_platform[core_id]->out_occupied_mask & (pattern << i)) == 0) {
222                 out_mask = pattern << i;
223                 out_offset = i;
224                 break;
225             }
226         }
227         if (out_mask) {
228             s_platform[core_id]->out_occupied_mask |= out_mask;
229 #if SOC_DEDIC_GPIO_ALLOW_REG_ACCESS
230             // always enable instruction to access output GPIO, which has better performance than register access
231             dedic_gpio_ll_enable_instruction_access_out(s_platform[core_id]->dev, out_mask, true);
232 #endif
233         }
234         portEXIT_CRITICAL(&s_platform[core_id]->spinlock);
235         ESP_GOTO_ON_FALSE(out_mask, ESP_ERR_NOT_FOUND, err, TAG, "no free outward channels on core[%d]", core_id);
236         ESP_LOGD(TAG, "new outward bundle(%p) on core[%d], offset=%"PRIu32", mask(%"PRIx32")", bundle, core_id, out_offset, out_mask);
237     }
238 
239     // configure inwards channels
240     uint32_t in_offset = 0;
241     if (config->flags.in_en) {
242         ESP_GOTO_ON_FALSE(config->array_size <= SOC_DEDIC_GPIO_IN_CHANNELS_NUM, ESP_ERR_INVALID_ARG, err, TAG,
243                           "array size(%d) exceeds maximum supported in channels(%d)", config->array_size, SOC_DEDIC_GPIO_IN_CHANNELS_NUM);
244         // prevent install bundle concurrently
245         portENTER_CRITICAL(&s_platform[core_id]->spinlock);
246         for (size_t i = 0; i <= SOC_DEDIC_GPIO_IN_CHANNELS_NUM - config->array_size; i++) {
247             if ((s_platform[core_id]->in_occupied_mask & (pattern << i)) == 0) {
248                 in_mask = pattern << i;
249                 in_offset = i;
250                 break;
251             }
252         }
253         if (in_mask) {
254             s_platform[core_id]->in_occupied_mask |= in_mask;
255         }
256         portEXIT_CRITICAL(&s_platform[core_id]->spinlock);
257         ESP_GOTO_ON_FALSE(in_mask, ESP_ERR_NOT_FOUND, err, TAG, "no free inward channels on core[%d]", core_id);
258         ESP_LOGD(TAG, "new inward bundle(%p) on core[%d], offset=%"PRIu32", mask(%"PRIx32")", bundle, core_id, in_offset, in_mask);
259     }
260 
261     // route dedicated GPIO channel signals to GPIO matrix
262     if (config->flags.in_en) {
263         for (size_t i = 0; i < config->array_size; i++) {
264             gpio_hal_iomux_func_sel(GPIO_PIN_MUX_REG[config->gpio_array[i]], PIN_FUNC_GPIO);
265             esp_rom_gpio_connect_in_signal(config->gpio_array[i], dedic_gpio_periph_signals.cores[core_id].in_sig_per_channel[in_offset + i], config->flags.in_invert);
266         }
267     }
268     if (config->flags.out_en) {
269         for (size_t i = 0; i < config->array_size; i++) {
270             gpio_hal_iomux_func_sel(GPIO_PIN_MUX_REG[config->gpio_array[i]], PIN_FUNC_GPIO);
271             esp_rom_gpio_connect_out_signal(config->gpio_array[i], dedic_gpio_periph_signals.cores[core_id].out_sig_per_channel[out_offset + i], config->flags.out_invert, false);
272         }
273 #if !SOC_DEDIC_GPIO_OUT_AUTO_ENABLE
274         dedic_gpio_cpu_ll_enable_output(s_platform[core_id]->out_occupied_mask);
275 #endif // !SOC_DEDIC_GPIO_OUT_AUTO_ENABLE
276     }
277 
278     // it's safe to initialize bundle members without locks here
279     bundle->core_id = core_id;
280     bundle->out_mask = out_mask;
281     bundle->in_mask = in_mask;
282     bundle->out_offset = out_offset;
283     bundle->in_offset = in_offset;
284     bundle->nr_gpio = config->array_size;
285     memcpy(bundle->gpio_array, config->gpio_array, config->array_size * sizeof(config->gpio_array[0]));
286 
287     *ret_bundle = bundle; // return bundle instance
288     return ESP_OK;
289 
290 err:
291     if (s_platform[core_id] && (out_mask || in_mask)) {
292         portENTER_CRITICAL(&s_platform[core_id]->spinlock);
293         s_platform[core_id]->out_occupied_mask &= ~out_mask;
294         s_platform[core_id]->in_occupied_mask &= ~in_mask;
295         portEXIT_CRITICAL(&s_platform[core_id]->spinlock);
296     }
297     if (bundle) {
298         free(bundle);
299     }
300     return ret;
301 }
302 
dedic_gpio_del_bundle(dedic_gpio_bundle_handle_t bundle)303 esp_err_t dedic_gpio_del_bundle(dedic_gpio_bundle_handle_t bundle)
304 {
305     esp_err_t ret = ESP_OK;
306     bool recycle_all = false;
307     ESP_GOTO_ON_FALSE(bundle, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
308 
309     uint32_t core_id = esp_cpu_get_core_id();
310     ESP_GOTO_ON_FALSE(core_id == bundle->core_id, ESP_FAIL, err, TAG, "del bundle on wrong CPU");
311 
312     portENTER_CRITICAL(&s_platform[core_id]->spinlock);
313     s_platform[core_id]->out_occupied_mask &= ~(bundle->out_mask);
314     s_platform[core_id]->in_occupied_mask &= ~(bundle->in_mask);
315     if (s_platform[core_id]->in_occupied_mask == (UINT32_MAX & ~((1 << SOC_DEDIC_GPIO_IN_CHANNELS_NUM) - 1)) &&
316         s_platform[core_id]->out_occupied_mask == (UINT32_MAX & ~((1 << SOC_DEDIC_GPIO_OUT_CHANNELS_NUM) - 1))) {
317         recycle_all = true;
318     }
319     portEXIT_CRITICAL(&s_platform[core_id]->spinlock);
320 
321     free(bundle);
322 
323     if (recycle_all) {
324 #if SOC_DEDIC_GPIO_HAS_INTERRUPT
325         dedic_gpio_uninstall_interrupt(core_id);
326 #endif
327         dedic_gpio_break_platform(core_id);
328     }
329 
330 err:
331     return ret;
332 }
333 
dedic_gpio_get_out_mask(dedic_gpio_bundle_handle_t bundle,uint32_t * mask)334 esp_err_t dedic_gpio_get_out_mask(dedic_gpio_bundle_handle_t bundle, uint32_t *mask)
335 {
336     esp_err_t ret = ESP_OK;
337     ESP_GOTO_ON_FALSE(bundle && mask, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
338     *mask = bundle->out_mask;
339 err:
340     return ret;
341 }
342 
dedic_gpio_get_in_mask(dedic_gpio_bundle_handle_t bundle,uint32_t * mask)343 esp_err_t dedic_gpio_get_in_mask(dedic_gpio_bundle_handle_t bundle, uint32_t *mask)
344 {
345     esp_err_t ret = ESP_OK;
346     ESP_GOTO_ON_FALSE(bundle && mask, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
347     *mask = bundle->in_mask;
348 err:
349     return ret;
350 }
351 
352 
dedic_gpio_get_out_offset(dedic_gpio_bundle_handle_t bundle,uint32_t * offset)353 esp_err_t dedic_gpio_get_out_offset(dedic_gpio_bundle_handle_t bundle, uint32_t *offset)
354 {
355     esp_err_t ret = ESP_OK;
356     ESP_GOTO_ON_FALSE(bundle && offset, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
357     *offset = bundle->out_offset;
358 err:
359     return ret;
360 }
361 
dedic_gpio_get_in_offset(dedic_gpio_bundle_handle_t bundle,uint32_t * offset)362 esp_err_t dedic_gpio_get_in_offset(dedic_gpio_bundle_handle_t bundle, uint32_t *offset)
363 {
364     esp_err_t ret = ESP_OK;
365     ESP_GOTO_ON_FALSE(bundle && offset, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
366     *offset = bundle->in_offset;
367 err:
368     return ret;
369 }
370 
dedic_gpio_bundle_write(dedic_gpio_bundle_handle_t bundle,uint32_t mask,uint32_t value)371 void dedic_gpio_bundle_write(dedic_gpio_bundle_handle_t bundle, uint32_t mask, uint32_t value)
372 {
373     // For performance reasons, we don't want to check the validation of parameters here
374     // Even didn't check if we're working on the correct CPU core (i.e. bundle->core_id == current core_id)
375     dedic_gpio_cpu_ll_write_mask(bundle->out_mask & (mask << bundle->out_offset), value << bundle->out_offset);
376 }
377 
dedic_gpio_bundle_read_out(dedic_gpio_bundle_handle_t bundle)378 uint32_t dedic_gpio_bundle_read_out(dedic_gpio_bundle_handle_t bundle)
379 {
380     // For performance reasons, we don't want to check the validation of parameters here
381     // Even didn't check if we're working on the correct CPU core (i.e. bundle->core_id == current core_id)
382     uint32_t value =  dedic_gpio_cpu_ll_read_out();
383     return (value & bundle->out_mask) >> (bundle->out_offset);
384 }
385 
dedic_gpio_bundle_read_in(dedic_gpio_bundle_handle_t bundle)386 uint32_t dedic_gpio_bundle_read_in(dedic_gpio_bundle_handle_t bundle)
387 {
388     // For performance reasons, we don't want to check the validation of parameters here
389     // Even didn't check if we're working on the correct CPU core (i.e. bundle->core_id == current core_id)
390     uint32_t value  = dedic_gpio_cpu_ll_read_in();
391     return (value & bundle->in_mask) >> (bundle->in_offset);
392 }
393 
394 #if SOC_DEDIC_GPIO_HAS_INTERRUPT
dedic_gpio_bundle_set_interrupt_and_callback(dedic_gpio_bundle_handle_t bundle,uint32_t mask,dedic_gpio_intr_type_t intr_type,dedic_gpio_isr_callback_t cb_isr,void * cb_args)395 esp_err_t dedic_gpio_bundle_set_interrupt_and_callback(dedic_gpio_bundle_handle_t bundle, uint32_t mask, dedic_gpio_intr_type_t intr_type, dedic_gpio_isr_callback_t cb_isr, void *cb_args)
396 {
397     esp_err_t ret = ESP_OK;
398     ESP_GOTO_ON_FALSE(bundle, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
399     int core_id = esp_cpu_get_core_id();
400     // lazy alloc interrupt
401     ESP_GOTO_ON_ERROR(dedic_gpio_install_interrupt(core_id), err, TAG, "allocate interrupt on core %d failed", core_id);
402 
403     uint32_t channel_mask = bundle->in_mask & (mask << bundle->in_offset);
404     uint32_t channel = 0;
405     while (channel_mask) {
406         channel = __builtin_ffs(channel_mask) - 1;
407         portENTER_CRITICAL(&s_platform[core_id]->spinlock);
408         dedic_gpio_set_interrupt(core_id, channel, intr_type);
409         portEXIT_CRITICAL(&s_platform[core_id]->spinlock);
410 
411         s_platform[core_id]->cbs[channel] = cb_isr;
412         s_platform[core_id]->cb_args[channel] = cb_args;
413         s_platform[core_id]->in_bundles[channel] = bundle;
414         channel_mask = channel_mask & (channel_mask - 1); // clear the right most bit '1'
415     }
416 
417 err:
418     return ret;
419 }
420 #endif // SOC_DEDIC_GPIO_HAS_INTERRUPT
421