1 // Copyright 2020 Espressif Systems (Shanghai) PTE LTD
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 // #define LOG_LOCAL_LEVEL ESP_LOG_DEBUG
15 #include <stdlib.h>
16 #include <string.h>
17 #include <sys/lock.h>
18 #include "sdkconfig.h"
19 #include "esp_compiler.h"
20 #include "esp_heap_caps.h"
21 #include "esp_intr_alloc.h"
22 #include "esp_log.h"
23 #include "soc/soc_caps.h"
24 #include "soc/gpio_periph.h"
25 #include "soc/io_mux_reg.h"
26 #include "hal/cpu_hal.h"
27 #include "hal/cpu_ll.h"
28 #include "hal/gpio_hal.h"
29 #include "driver/periph_ctrl.h"
30 #include "esp_rom_gpio.h"
31 #include "freertos/FreeRTOS.h"
32 #include "driver/dedic_gpio.h"
33 #include "soc/dedic_gpio_periph.h"
34 #if SOC_DEDIC_GPIO_ALLOW_REG_ACCESS
35 #include "soc/dedic_gpio_struct.h"
36 #include "hal/dedic_gpio_ll.h"
37 #endif
38
39
40 static const char *TAG = "dedic_gpio";
41
42 #define DEDIC_CHECK(a, msg, tag, ret, ...) \
43 do { \
44 if (unlikely(!(a))) { \
45 ESP_LOGE(TAG, "%s(%d): " msg, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
46 ret_code = ret; \
47 goto tag; \
48 } \
49 } while (0)
50
51 typedef struct dedic_gpio_platform_t dedic_gpio_platform_t;
52 typedef struct dedic_gpio_bundle_t dedic_gpio_bundle_t;
53
54 // Dedicated GPIO driver platform, GPIO bundles will be installed onto it
55 static dedic_gpio_platform_t *s_platform[SOC_CPU_CORES_NUM];
56 // platform level mutex lock
57 static _lock_t s_platform_mutexlock[SOC_CPU_CORES_NUM];
58
59 struct dedic_gpio_platform_t {
60 portMUX_TYPE spinlock; // Spinlock, stop GPIO channels from accessing common resource concurrently
61 uint32_t out_occupied_mask; // mask of output channels that already occupied
62 uint32_t in_occupied_mask; // mask of input channels that already occupied
63 #if SOC_DEDIC_GPIO_HAS_INTERRUPT
64 intr_handle_t intr_hdl; // interrupt handle
65 dedic_gpio_isr_callback_t cbs[SOC_DEDIC_GPIO_IN_CHANNELS_NUM]; // array of callback function for input channel
66 void *cb_args[SOC_DEDIC_GPIO_IN_CHANNELS_NUM]; // array of callback arguments for input channel
67 dedic_gpio_bundle_t *in_bundles[SOC_DEDIC_GPIO_IN_CHANNELS_NUM]; // which bundle belongs to for input channel
68 #endif
69 #if SOC_DEDIC_GPIO_ALLOW_REG_ACCESS
70 dedic_dev_t *dev;
71 #endif
72 };
73
74 struct dedic_gpio_bundle_t {
75 uint32_t core_id; // CPU core ID, a GPIO bundle must be installed to a specific CPU core
76 uint32_t out_mask; // mask of output channels in the bank
77 uint32_t in_mask; // mask of input channels in the bank
78 uint32_t out_offset; // offset in the bank (seen from output channel)
79 uint32_t in_offset; // offset in the bank (seen from input channel)
80 size_t nr_gpio; // number of GPIOs in the gpio_array
81 int gpio_array[0]; // array of GPIO numbers (configured by user)
82 };
83
dedic_gpio_build_platform(uint32_t core_id)84 static esp_err_t dedic_gpio_build_platform(uint32_t core_id)
85 {
86 esp_err_t ret_code = ESP_OK;
87 if (!s_platform[core_id]) {
88 // prevent building platform concurrently
89 _lock_acquire(&s_platform_mutexlock[core_id]);
90 if (!s_platform[core_id]) {
91 s_platform[core_id] = calloc(1, sizeof(dedic_gpio_platform_t));
92 if (s_platform[core_id]) {
93 // initialize platfrom members
94 s_platform[core_id]->spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
95 #if SOC_DEDIC_GPIO_ALLOW_REG_ACCESS
96 s_platform[core_id]->dev = &DEDIC_GPIO;
97 #endif
98 periph_module_enable(dedic_gpio_periph_signals.module); // enable APB clock to peripheral
99 }
100 }
101 _lock_release(&s_platform_mutexlock[core_id]);
102
103 DEDIC_CHECK(s_platform[core_id], "no mem for s_platform[%d]", err, ESP_ERR_NO_MEM, core_id);
104 ESP_LOGD(TAG, "build platform on core[%d] at %p", core_id, s_platform);
105 }
106
107 err:
108 return ret_code;
109 }
110
dedic_gpio_break_platform(uint32_t core_id)111 static void dedic_gpio_break_platform(uint32_t core_id)
112 {
113 if (s_platform[core_id]) {
114 // prevent breaking platform concurrently
115 _lock_acquire(&s_platform_mutexlock[core_id]);
116 if (s_platform[core_id]) {
117 free(s_platform[core_id]);
118 s_platform[core_id] = NULL;
119 periph_module_disable(dedic_gpio_periph_signals.module); // disable module if no GPIO channel is being used
120 }
121 _lock_release(&s_platform_mutexlock[core_id]);
122 }
123 }
124
125 #if SOC_DEDIC_GPIO_HAS_INTERRUPT
dedic_gpio_default_isr(void * arg)126 static void dedic_gpio_default_isr(void *arg)
127 {
128 bool need_yield = false;
129 dedic_gpio_platform_t *platform = (dedic_gpio_platform_t *)arg;
130
131 // get and clear interrupt status
132 portENTER_CRITICAL_ISR(&platform->spinlock);
133 uint32_t status = dedic_gpio_ll_get_interrupt_status(platform->dev);
134 dedic_gpio_ll_clear_interrupt_status(platform->dev, status);
135 portEXIT_CRITICAL_ISR(&platform->spinlock);
136
137 // handle dedicated channel one by one
138 while (status) {
139 uint32_t channel = __builtin_ffs(status) - 1; // get dedicated channel number which triggered the interrupt
140 if (platform->cbs[channel]) {
141 if (platform->cbs[channel](platform->in_bundles[channel], channel - platform->in_bundles[channel]->in_offset, platform->cb_args[channel])) {
142 need_yield = true; // note that we need to yield at the end of isr
143 }
144 }
145 status = status & (status - 1); // clear the right most bit '1'
146 }
147
148 if (need_yield) {
149 portYIELD_FROM_ISR();
150 }
151 }
152
dedic_gpio_install_interrupt(uint32_t core_id)153 static esp_err_t dedic_gpio_install_interrupt(uint32_t core_id)
154 {
155 esp_err_t ret_code = ESP_OK;
156 if (!s_platform[core_id]->intr_hdl) {
157 // prevent install interrupt concurrently
158 _lock_acquire(&s_platform_mutexlock[core_id]);
159 if (!s_platform[core_id]->intr_hdl) {
160 int isr_flags = 0;
161 ret_code = esp_intr_alloc(dedic_gpio_periph_signals.irq, isr_flags, dedic_gpio_default_isr, s_platform[core_id], &s_platform[core_id]->intr_hdl);
162 // clear pending interrupt
163 uint32_t status = dedic_gpio_ll_get_interrupt_status(s_platform[core_id]->dev);
164 dedic_gpio_ll_clear_interrupt_status(s_platform[core_id]->dev, status);
165 }
166 _lock_release(&s_platform_mutexlock[core_id]);
167 DEDIC_CHECK(ret_code == ESP_OK, "alloc interrupt failed", err, ret_code);
168 }
169
170 err:
171 return ret_code;
172 }
173
dedic_gpio_uninstall_interrupt(uint32_t core_id)174 static void dedic_gpio_uninstall_interrupt(uint32_t core_id)
175 {
176 if (s_platform[core_id]->intr_hdl) {
177 // prevent uninstall interrupt concurrently
178 _lock_acquire(&s_platform_mutexlock[core_id]);
179 if (s_platform[core_id]->intr_hdl) {
180 esp_intr_free(s_platform[core_id]->intr_hdl);
181 s_platform[core_id]->intr_hdl = NULL;
182 // disable all interrupt
183 dedic_gpio_ll_enable_interrupt(s_platform[core_id]->dev, ~0UL, false);
184 }
185 _lock_release(&s_platform_mutexlock[core_id]);
186 }
187 }
188
dedic_gpio_set_interrupt(uint32_t core_id,uint32_t channel,dedic_gpio_intr_type_t type)189 static void dedic_gpio_set_interrupt(uint32_t core_id, uint32_t channel, dedic_gpio_intr_type_t type)
190 {
191 dedic_gpio_ll_set_interrupt_type(s_platform[core_id]->dev, channel, type);
192 if (type != DEDIC_GPIO_INTR_NONE) {
193 dedic_gpio_ll_enable_interrupt(s_platform[core_id]->dev, 1 << channel, true);
194 } else {
195 dedic_gpio_ll_enable_interrupt(s_platform[core_id]->dev, 1 << channel, false);
196 }
197 }
198 #endif // SOC_DEDIC_GPIO_HAS_INTERRUPT
199
dedic_gpio_new_bundle(const dedic_gpio_bundle_config_t * config,dedic_gpio_bundle_handle_t * ret_bundle)200 esp_err_t dedic_gpio_new_bundle(const dedic_gpio_bundle_config_t *config, dedic_gpio_bundle_handle_t *ret_bundle)
201 {
202 esp_err_t ret_code = ESP_OK;
203 dedic_gpio_bundle_t *bundle = NULL;
204 uint32_t out_mask = 0;
205 uint32_t in_mask = 0;
206 uint32_t core_id = cpu_hal_get_core_id(); // dedicated GPIO will be binded to the CPU who invokes this API
207
208 DEDIC_CHECK(config && ret_bundle, "invalid argument", err, ESP_ERR_INVALID_ARG);
209 DEDIC_CHECK(config->gpio_array && config->array_size > 0, "invalid GPIO array or size", err, ESP_ERR_INVALID_ARG);
210 DEDIC_CHECK(config->flags.in_en || config->flags.out_en, "no input/output mode specified", err, ESP_ERR_INVALID_ARG);
211 // lazy install s_platform[core_id]
212 DEDIC_CHECK(dedic_gpio_build_platform(core_id) == ESP_OK, "build platform %d failed", err, ESP_FAIL, core_id);
213
214 size_t bundle_size = sizeof(dedic_gpio_bundle_t) + config->array_size * sizeof(config->gpio_array[0]);
215 bundle = calloc(1, bundle_size);
216 DEDIC_CHECK(bundle, "no mem for bundle", err, ESP_ERR_NO_MEM);
217
218 // for performance reasons, we only search for continuous channels
219 uint32_t pattern = (1 << config->array_size) - 1;
220 // configure outwards channels
221 uint32_t out_offset = 0;
222 if (config->flags.out_en) {
223 DEDIC_CHECK(SOC_DEDIC_GPIO_OUT_CHANNELS_NUM >= config->array_size, "array size(%d) exceeds maximum supported out channels(%d)",
224 err, ESP_ERR_INVALID_ARG, config->array_size, SOC_DEDIC_GPIO_OUT_CHANNELS_NUM);
225 // prevent install bundle concurrently
226 portENTER_CRITICAL(&s_platform[core_id]->spinlock);
227 for (size_t i = 0; i <= SOC_DEDIC_GPIO_OUT_CHANNELS_NUM - config->array_size; i++) {
228 if ((s_platform[core_id]->out_occupied_mask & (pattern << i)) == 0) {
229 out_mask = pattern << i;
230 out_offset = i;
231 break;
232 }
233 }
234 if (out_mask) {
235 s_platform[core_id]->out_occupied_mask |= out_mask;
236 #if SOC_DEDIC_GPIO_ALLOW_REG_ACCESS
237 // always enable instruction to access output GPIO, which has better performance than register access
238 dedic_gpio_ll_enable_instruction_access_out(s_platform[core_id]->dev, out_mask, true);
239 #endif
240 }
241 portEXIT_CRITICAL(&s_platform[core_id]->spinlock);
242 DEDIC_CHECK(out_mask, "no free outward channels on core[%d]", err, ESP_ERR_NOT_FOUND, core_id);
243 ESP_LOGD(TAG, "new outward bundle(%p) on core[%d], offset=%d, mask(%x)", bundle, core_id, out_offset, out_mask);
244 }
245
246 // configure inwards channels
247 uint32_t in_offset = 0;
248 if (config->flags.in_en) {
249 DEDIC_CHECK(SOC_DEDIC_GPIO_IN_CHANNELS_NUM >= config->array_size, "array size(%d) exceeds maximum supported in channels(%d)",
250 err, ESP_ERR_INVALID_ARG, config->array_size, SOC_DEDIC_GPIO_IN_CHANNELS_NUM);
251 // prevent install bundle concurrently
252 portENTER_CRITICAL(&s_platform[core_id]->spinlock);
253 for (size_t i = 0; i <= SOC_DEDIC_GPIO_IN_CHANNELS_NUM - config->array_size; i++) {
254 if ((s_platform[core_id]->in_occupied_mask & (pattern << i)) == 0) {
255 in_mask = pattern << i;
256 in_offset = i;
257 break;
258 }
259 }
260 if (in_mask) {
261 s_platform[core_id]->in_occupied_mask |= in_mask;
262 }
263 portEXIT_CRITICAL(&s_platform[core_id]->spinlock);
264 DEDIC_CHECK(in_mask, "no free inward channels on core[%d]", err, ESP_ERR_NOT_FOUND, core_id);
265 ESP_LOGD(TAG, "new inward bundle(%p) on core[%d], offset=%d, mask(%x)", bundle, core_id, in_offset, in_mask);
266 }
267
268 // route dedicated GPIO channel signals to GPIO matrix
269 if (config->flags.in_en) {
270 for (size_t i = 0; i < config->array_size; i++) {
271 gpio_hal_iomux_func_sel(GPIO_PIN_MUX_REG[config->gpio_array[i]], PIN_FUNC_GPIO);
272 esp_rom_gpio_connect_in_signal(config->gpio_array[i], dedic_gpio_periph_signals.cores[core_id].in_sig_per_channel[in_offset + i], config->flags.in_invert);
273 }
274 }
275 if (config->flags.out_en) {
276 for (size_t i = 0; i < config->array_size; i++) {
277 gpio_hal_iomux_func_sel(GPIO_PIN_MUX_REG[config->gpio_array[i]], PIN_FUNC_GPIO);
278 esp_rom_gpio_connect_out_signal(config->gpio_array[i], dedic_gpio_periph_signals.cores[core_id].out_sig_per_channel[out_offset + i], config->flags.out_invert, false);
279 }
280 }
281
282 // it's safe to initialize bundle members without locks here
283 bundle->core_id = core_id;
284 bundle->out_mask = out_mask;
285 bundle->in_mask = in_mask;
286 bundle->out_offset = out_offset;
287 bundle->in_offset = in_offset;
288 bundle->nr_gpio = config->array_size;
289 memcpy(bundle->gpio_array, config->gpio_array, config->array_size * sizeof(config->gpio_array[0]));
290
291 *ret_bundle = bundle; // return bundle instance
292 return ESP_OK;
293
294 err:
295 if (s_platform[core_id] && (out_mask || in_mask)) {
296 portENTER_CRITICAL(&s_platform[core_id]->spinlock);
297 s_platform[core_id]->out_occupied_mask &= ~out_mask;
298 s_platform[core_id]->in_occupied_mask &= ~in_mask;
299 portEXIT_CRITICAL(&s_platform[core_id]->spinlock);
300 }
301 if (bundle) {
302 free(bundle);
303 }
304 return ret_code;
305 }
306
dedic_gpio_del_bundle(dedic_gpio_bundle_handle_t bundle)307 esp_err_t dedic_gpio_del_bundle(dedic_gpio_bundle_handle_t bundle)
308 {
309 esp_err_t ret_code = ESP_OK;
310 bool recycle_all = false;
311 DEDIC_CHECK(bundle, "invalid argument", err, ESP_ERR_INVALID_ARG);
312
313 uint32_t core_id = cpu_hal_get_core_id();
314 DEDIC_CHECK(core_id == bundle->core_id, "del bundle on wrong CPU", err, ESP_FAIL);
315
316 portENTER_CRITICAL(&s_platform[core_id]->spinlock);
317 s_platform[core_id]->out_occupied_mask &= ~(bundle->out_mask);
318 s_platform[core_id]->in_occupied_mask &= ~(bundle->in_mask);
319 if (!s_platform[core_id]->in_occupied_mask && !s_platform[core_id]->out_occupied_mask) {
320 recycle_all = true;
321 }
322 portEXIT_CRITICAL(&s_platform[core_id]->spinlock);
323
324 free(bundle);
325
326 if (recycle_all) {
327 #if SOC_DEDIC_GPIO_HAS_INTERRUPT
328 dedic_gpio_uninstall_interrupt(core_id);
329 #endif
330 dedic_gpio_break_platform(core_id);
331 }
332
333 err:
334 return ret_code;
335 }
336
dedic_gpio_get_out_mask(dedic_gpio_bundle_handle_t bundle,uint32_t * mask)337 esp_err_t dedic_gpio_get_out_mask(dedic_gpio_bundle_handle_t bundle, uint32_t *mask)
338 {
339 esp_err_t ret_code = ESP_OK;
340 DEDIC_CHECK(bundle && mask, "invalid argument", err, ESP_ERR_INVALID_ARG);
341 *mask = bundle->out_mask;
342 err:
343 return ret_code;
344 }
345
dedic_gpio_get_in_mask(dedic_gpio_bundle_handle_t bundle,uint32_t * mask)346 esp_err_t dedic_gpio_get_in_mask(dedic_gpio_bundle_handle_t bundle, uint32_t *mask)
347 {
348 esp_err_t ret_code = ESP_OK;
349 DEDIC_CHECK(bundle && mask, "invalid argument", err, ESP_ERR_INVALID_ARG);
350 *mask = bundle->in_mask;
351 err:
352 return ret_code;
353 }
354
dedic_gpio_bundle_write(dedic_gpio_bundle_handle_t bundle,uint32_t mask,uint32_t value)355 void dedic_gpio_bundle_write(dedic_gpio_bundle_handle_t bundle, uint32_t mask, uint32_t value)
356 {
357 // For performace reasons, we don't want to check the validation of parameters here
358 // Even didn't check if we're working on the correct CPU core (i.e. bundle->core_id == current core_id)
359 cpu_ll_write_dedic_gpio_mask(bundle->out_mask & (mask << bundle->out_offset), value << bundle->out_offset);
360 }
361
dedic_gpio_bundle_read_out(dedic_gpio_bundle_handle_t bundle)362 uint32_t dedic_gpio_bundle_read_out(dedic_gpio_bundle_handle_t bundle)
363 {
364 // For performace reasons, we don't want to check the validation of parameters here
365 // Even didn't check if we're working on the correct CPU core (i.e. bundle->core_id == current core_id)
366 uint32_t value = cpu_ll_read_dedic_gpio_out();
367 return (value & bundle->out_mask) >> (bundle->out_offset);
368 }
369
dedic_gpio_bundle_read_in(dedic_gpio_bundle_handle_t bundle)370 uint32_t dedic_gpio_bundle_read_in(dedic_gpio_bundle_handle_t bundle)
371 {
372 // For performace reasons, we don't want to check the validation of parameters here
373 // Even didn't check if we're working on the correct CPU core (i.e. bundle->core_id == current core_id)
374 uint32_t value = cpu_ll_read_dedic_gpio_in();
375 return (value & bundle->in_mask) >> (bundle->in_offset);
376 }
377
378 #if SOC_DEDIC_GPIO_HAS_INTERRUPT
dedic_gpio_bundle_set_interrupt_and_callback(dedic_gpio_bundle_handle_t bundle,uint32_t mask,dedic_gpio_intr_type_t intr_type,dedic_gpio_isr_callback_t cb_isr,void * cb_args)379 esp_err_t dedic_gpio_bundle_set_interrupt_and_callback(dedic_gpio_bundle_handle_t bundle, uint32_t mask, dedic_gpio_intr_type_t intr_type, dedic_gpio_isr_callback_t cb_isr, void *cb_args)
380 {
381 esp_err_t ret_code = ESP_OK;
382 DEDIC_CHECK(bundle, "invalid argument", err, ESP_ERR_INVALID_ARG);
383 uint32_t core_id = cpu_hal_get_core_id();
384 // lazy alloc interrupt
385 DEDIC_CHECK(dedic_gpio_install_interrupt(core_id) == ESP_OK, "allocate interrupt on core %d failed", err, ESP_FAIL, core_id);
386
387 uint32_t channel_mask = bundle->in_mask & (mask << bundle->in_offset);
388 uint32_t channel = 0;
389 while (channel_mask) {
390 channel = __builtin_ffs(channel_mask) - 1;
391 portENTER_CRITICAL(&s_platform[core_id]->spinlock);
392 dedic_gpio_set_interrupt(core_id, channel, intr_type);
393 portEXIT_CRITICAL(&s_platform[core_id]->spinlock);
394
395 s_platform[core_id]->cbs[channel] = cb_isr;
396 s_platform[core_id]->cb_args[channel] = cb_args;
397 s_platform[core_id]->in_bundles[channel] = bundle;
398 channel_mask = channel_mask & (channel_mask - 1); // clear the right most bit '1'
399 }
400
401 err:
402 return ret_code;
403 }
404 #endif // SOC_DEDIC_GPIO_HAS_INTERRUPT
405