1 /*
2 * SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <stdio.h>
8 #include <stdatomic.h>
9 #include <sys/fcntl.h>
10 #include <sys/param.h>
11 #include <sys/queue.h>
12 #include "arpa/inet.h" // for ntohs, etc.
13 #include "errno.h"
14
15 #include "esp_vfs_l2tap.h"
16
17 #include "lwip/prot/ethernet.h" // Ethernet headers
18 #include "esp_vfs.h"
19 #include "esp_log.h"
20 #include "esp_check.h"
21 #include "esp_netif.h"
22 #include "esp_eth_driver.h"
23
24 #include "freertos/FreeRTOS.h"
25 #include "freertos/semphr.h"
26 #include "freertos/queue.h"
27
28
29 #define INVALID_FD (-1)
30
31 #define L2TAP_MAX_FDS CONFIG_ESP_NETIF_L2_TAP_MAX_FDS
32 #define RX_QUEUE_MAX_SIZE CONFIG_ESP_NETIF_L2_TAP_RX_QUEUE_SIZE
33
34 typedef enum {
35 L2TAP_SOCK_STATE_READY,
36 L2TAP_SOCK_STATE_OPENED,
37 L2TAP_SOCK_STATE_CLOSING
38 } l2tap_socket_state_t;
39
40 typedef struct {
41 _Atomic l2tap_socket_state_t state;
42 bool non_blocking;
43 l2tap_iodriver_handle driver_handle;
44 uint16_t ethtype_filter;
45 QueueHandle_t rx_queue;
46 SemaphoreHandle_t close_done_sem;
47
48 esp_err_t (*driver_transmit)(l2tap_iodriver_handle io_handle, void *buffer, size_t len);
49 void (*driver_free_rx_buffer)(l2tap_iodriver_handle io_handle, void* buffer);
50 } l2tap_context_t;
51
52 typedef struct {
53 void *buff;
54 size_t len;
55 } frame_queue_entry_t;
56
57 typedef struct {
58 esp_vfs_select_sem_t select_sem;
59 fd_set *readfds;
60 fd_set *writefds;
61 fd_set *errorfds;
62 fd_set readfds_orig;
63 fd_set writefds_orig;
64 fd_set errorfds_orig;
65 } l2tap_select_args_t;
66
67 typedef enum {
68 L2TAP_SELECT_READ_NOTIF,
69 L2TAP_SELECT_WRITE_NOTIF,
70 L2TAP_SELECT_ERR_NOTIF
71 } l2tap_select_notif_e;
72
73 static l2tap_context_t s_l2tap_sockets[L2TAP_MAX_FDS] = {0};
74
75 static bool s_is_registered = false;
76
77 static portMUX_TYPE s_critical_section_lock = portMUX_INITIALIZER_UNLOCKED;
78
79 static l2tap_select_args_t **s_registered_selects = NULL;
80 static int32_t s_registered_select_cnt = 0;
81
82 static const char *TAG = "vfs_l2tap";
83
84 static void l2tap_select_notify(int fd, l2tap_select_notif_e select_notif);
85
86 /* ================== Utils ====================== */
init_rx_queue(l2tap_context_t * l2tap_socket)87 static esp_err_t init_rx_queue(l2tap_context_t *l2tap_socket)
88 {
89 l2tap_socket->rx_queue = xQueueCreate(RX_QUEUE_MAX_SIZE, sizeof(frame_queue_entry_t));
90 ESP_RETURN_ON_FALSE(l2tap_socket->rx_queue, ESP_ERR_NO_MEM, TAG, "create work queue failed");
91 return ESP_OK;
92 }
93
push_rx_queue(l2tap_context_t * l2tap_socket,void * buff,size_t len)94 static esp_err_t push_rx_queue(l2tap_context_t *l2tap_socket, void *buff, size_t len)
95 {
96 frame_queue_entry_t frame_info;
97
98 frame_info.buff = buff;
99 frame_info.len = len;
100 // try send to queue and check if the queue is full
101 if (xQueueSend(l2tap_socket->rx_queue, &frame_info, 0) != pdTRUE) {
102 return ESP_ERR_NO_MEM;
103 }
104 return ESP_OK;
105 }
106
pop_rx_queue(l2tap_context_t * l2tap_socket,void * buff,size_t len)107 static ssize_t pop_rx_queue(l2tap_context_t *l2tap_socket, void *buff, size_t len)
108 {
109 TickType_t timeout = portMAX_DELAY;
110 if (l2tap_socket->non_blocking) {
111 timeout = 0;
112 }
113
114 frame_queue_entry_t frame_info;
115 if (xQueueReceive(l2tap_socket->rx_queue, &frame_info, timeout) == pdTRUE) {
116 // empty queue was issued indicating the fd is going to be closed
117 if (frame_info.len == 0) {
118 // indicate to "clean_task" that task waiting for queue was unblocked
119 push_rx_queue(l2tap_socket, NULL, 0);
120 goto err;
121 }
122
123 if (len > frame_info.len) {
124 len = frame_info.len;
125 }
126 memcpy(buff, frame_info.buff, len);
127 l2tap_socket->driver_free_rx_buffer(l2tap_socket->driver_handle, frame_info.buff);
128 } else {
129 goto err;
130 }
131
132 return len;
133 err:
134 return -1;
135 }
136
rx_queue_empty(l2tap_context_t * l2tap_socket)137 static bool rx_queue_empty(l2tap_context_t *l2tap_socket)
138 {
139 return (uxQueueMessagesWaiting(l2tap_socket->rx_queue) == 0);
140 }
141
flush_rx_queue(l2tap_context_t * l2tap_socket)142 static void flush_rx_queue(l2tap_context_t *l2tap_socket)
143 {
144 frame_queue_entry_t frame_info;
145 while (xQueueReceive(l2tap_socket->rx_queue, &frame_info, 0) == pdTRUE) {
146 if (frame_info.len > 0) {
147 free(frame_info.buff);
148 }
149 }
150 }
151
delete_rx_queue(l2tap_context_t * l2tap_socket)152 static void delete_rx_queue(l2tap_context_t *l2tap_socket)
153 {
154 vQueueDelete(l2tap_socket->rx_queue);
155 l2tap_socket->rx_queue = NULL;
156 }
157
l2tap_lock(void)158 static inline void l2tap_lock(void)
159 {
160 portENTER_CRITICAL(&s_critical_section_lock);
161 }
162
l2tap_unlock(void)163 static inline void l2tap_unlock(void)
164 {
165 portEXIT_CRITICAL(&s_critical_section_lock);
166 }
167
default_free_rx_buffer(l2tap_iodriver_handle io_handle,void * buffer)168 static inline void default_free_rx_buffer(l2tap_iodriver_handle io_handle, void* buffer)
169 {
170 free(buffer);
171 }
172
173 /* ================== ESP NETIF L2 TAP intf ====================== */
esp_vfs_l2tap_eth_filter(l2tap_iodriver_handle driver_handle,void * buff,size_t * size)174 esp_err_t esp_vfs_l2tap_eth_filter(l2tap_iodriver_handle driver_handle, void *buff, size_t *size)
175 {
176 struct eth_hdr *eth_header = buff;
177 uint16_t eth_type = ntohs(eth_header->type);
178
179 for (int i = 0; i < L2TAP_MAX_FDS; i++) {
180 if (atomic_load(&s_l2tap_sockets[i].state) == L2TAP_SOCK_STATE_OPENED) {
181 l2tap_lock(); // read of socket config needs to be atomic since it can be manipulated from other task
182 if (s_l2tap_sockets[i].driver_handle == driver_handle && (s_l2tap_sockets[i].ethtype_filter == eth_type ||
183 // IEEE 802.2 Frame is identified based on its length which is less than IEEE802.3 max length (Ethernet II Types IDs start over this value)
184 // Note that IEEE 802.2 LLC resolution is expected to be performed by upper stream app
185 (s_l2tap_sockets[i].ethtype_filter <= ETH_IEEE802_3_MAX_LEN && eth_type <= ETH_IEEE802_3_MAX_LEN))) {
186 l2tap_unlock();
187 if (push_rx_queue(&s_l2tap_sockets[i], buff, *size) != ESP_OK) {
188 // just tail drop when queue is full
189 s_l2tap_sockets[i].driver_free_rx_buffer(s_l2tap_sockets[i].driver_handle, buff);
190 ESP_LOGD(TAG, "fd %d rx queue is full", i);
191 }
192 l2tap_lock();
193 if (s_registered_select_cnt) {
194 l2tap_select_notify(i, L2TAP_SELECT_READ_NOTIF);
195 }
196 l2tap_unlock();
197 *size = 0; // the frame is not passed to IP stack when size set to 0
198 } else {
199 l2tap_unlock();
200 }
201 }
202 }
203 return ESP_OK;
204 }
205
206 /* ====================== vfs ====================== */
l2tap_open(const char * path,int flags,int mode)207 static int l2tap_open(const char *path, int flags, int mode)
208 {
209 int fd;
210
211 // Find free fd and initialize
212 for (fd = 0; fd < L2TAP_MAX_FDS; fd++) {
213 l2tap_socket_state_t exp_state = L2TAP_SOCK_STATE_READY;
214 if (atomic_compare_exchange_strong(&s_l2tap_sockets[fd].state, &exp_state,
215 L2TAP_SOCK_STATE_OPENED)) {
216 if (init_rx_queue(&s_l2tap_sockets[fd]) != ESP_OK) {
217 atomic_store(&s_l2tap_sockets[fd].state, L2TAP_SOCK_STATE_READY);
218 goto err;
219 }
220 s_l2tap_sockets[fd].ethtype_filter = 0x0;
221 s_l2tap_sockets[fd].driver_handle = NULL;
222 s_l2tap_sockets[fd].non_blocking = ((flags & O_NONBLOCK) == O_NONBLOCK);
223 s_l2tap_sockets[fd].driver_transmit = esp_eth_transmit;
224 s_l2tap_sockets[fd].driver_free_rx_buffer = default_free_rx_buffer;
225 return fd;
226 }
227 }
228 err:
229 return INVALID_FD;
230 }
231
l2tap_write(int fd,const void * data,size_t size)232 static ssize_t l2tap_write(int fd, const void *data, size_t size)
233 {
234 ssize_t ret = -1;
235
236 if (size == 0) {
237 return 0;
238 }
239
240 if (atomic_load(&s_l2tap_sockets[fd].state) == L2TAP_SOCK_STATE_OPENED) {
241 if (s_l2tap_sockets[fd].ethtype_filter > ETH_IEEE802_3_MAX_LEN &&
242 ((struct eth_hdr *)data)->type != htons(s_l2tap_sockets[fd].ethtype_filter)) {
243 // bad message
244 errno = EBADMSG;
245 goto err;
246 }
247
248 if (s_l2tap_sockets[fd].driver_transmit(s_l2tap_sockets[fd].driver_handle, (void *)data, size) == ESP_OK) {
249 ret = size;
250 } else {
251 // I/O error
252 errno = EIO;
253 }
254 } else {
255 // bad file desc
256 errno = EBADF;
257 }
258 err:
259 return ret;
260 }
261
l2tap_read(int fd,void * data,size_t size)262 static ssize_t l2tap_read(int fd, void *data, size_t size)
263 {
264 // fd might be in process of closing (close was already called but preempted)
265 if (atomic_load(&s_l2tap_sockets[fd].state) != L2TAP_SOCK_STATE_OPENED) {
266 // bad file desc
267 errno = EBADF;
268 return -1;
269 }
270
271 if (size == 0) {
272 return 0;
273 }
274
275 ssize_t actual_size = -1;
276 if ((actual_size = pop_rx_queue(&s_l2tap_sockets[fd], data, size)) < 0) {
277 errno = EAGAIN;
278 }
279
280 return actual_size;
281 }
282
l2tap_clean_task(void * task_param)283 void l2tap_clean_task(void *task_param)
284 {
285 l2tap_context_t *l2tap_socket = (l2tap_context_t *)task_param;
286
287 // push empty queue to unblock possibly blocking task
288 push_rx_queue(l2tap_socket, NULL, 0);
289 // wait for the indication that blocking task was executed (unblocked)
290 pop_rx_queue(l2tap_socket, NULL, 0);
291
292 // now, all higher priority tasks should finished their execution and new accesses to the queue were prevended
293 // by L2TAP_SOCK_STATE_CLOSING => we are free to free queue resources
294 flush_rx_queue(l2tap_socket);
295 delete_rx_queue(l2tap_socket);
296
297 // unblock task which originally called close
298 xSemaphoreGive(l2tap_socket->close_done_sem);
299
300 // all done, delete itsefl
301 vTaskDelete(NULL);
302 }
303
l2tap_close(int fd)304 static int l2tap_close(int fd)
305 {
306 if (atomic_load(&s_l2tap_sockets[fd].state) != L2TAP_SOCK_STATE_OPENED) {
307 // not valid opened fd
308 errno = EBADF;
309 return -1;
310 }
311
312 // prevent any further manipulations with the socket (already started will be finished though)
313 atomic_store(&s_l2tap_sockets[fd].state, L2TAP_SOCK_STATE_CLOSING);
314
315 if ((s_l2tap_sockets[fd].close_done_sem = xSemaphoreCreateBinary()) == NULL) {
316 ESP_LOGE(TAG, "create close_done_sem failed");
317 return -1;
318 }
319 // If one task is blocked in I/O operation and another task tries to close the fd, the first task is
320 // unblocked by pushing empty queue in low priority task (to ensure context switch to the first task).
321 // The first's task read operation then ends with error and the low priority task frees the queue resources.
322 if (xTaskCreate(l2tap_clean_task, "l2tap_clean_task", 1024, &s_l2tap_sockets[fd], tskIDLE_PRIORITY, NULL) == pdFAIL) {
323 ESP_LOGE(TAG, "create l2tap_clean_task failed");
324 return -1;
325 }
326
327 // wait for the low priority close task & then delete the semaphore
328 xSemaphoreTake(s_l2tap_sockets[fd].close_done_sem, portMAX_DELAY);
329 vSemaphoreDelete(s_l2tap_sockets[fd].close_done_sem); // no worries to delete, this task owns the semaphore
330
331 // indicate that socket is ready to be used again
332 atomic_store(&s_l2tap_sockets[fd].state, L2TAP_SOCK_STATE_READY);
333 return 0;
334 }
335
l2tap_ioctl(int fd,int cmd,va_list args)336 static int l2tap_ioctl(int fd, int cmd, va_list args)
337 {
338 esp_netif_t *esp_netif;
339 switch (cmd) {
340 case L2TAP_S_RCV_FILTER: ;
341 uint16_t *new_ethtype_filter = va_arg(args, uint16_t *);
342 l2tap_lock();
343 // socket needs to be assigned to interface at first
344 if (s_l2tap_sockets[fd].driver_handle == NULL) {
345 // Permission denied (filter change is denied at this state)
346 errno = EACCES;
347 l2tap_unlock();
348 goto err;
349 }
350 // do nothing when same filter is to be set
351 if (s_l2tap_sockets[fd].ethtype_filter != *new_ethtype_filter) {
352 // check if the ethtype filter is not already used by other socket of the same interface
353 for (int i = 0; i < L2TAP_MAX_FDS; i++) {
354 if (atomic_load(&s_l2tap_sockets[i].state) == L2TAP_SOCK_STATE_OPENED &&
355 s_l2tap_sockets[i].driver_handle == s_l2tap_sockets[fd].driver_handle &&
356 s_l2tap_sockets[i].ethtype_filter == *new_ethtype_filter) {
357 // invalid argument
358 errno = EINVAL;
359 l2tap_unlock();
360 goto err;
361 }
362 }
363 s_l2tap_sockets[fd].ethtype_filter = *new_ethtype_filter;
364 }
365 l2tap_unlock();
366 break;
367 case L2TAP_G_RCV_FILTER: ;
368 uint16_t *ethtype_filter_dest = va_arg(args, uint16_t *);
369 *ethtype_filter_dest = s_l2tap_sockets[fd].ethtype_filter;
370 break;
371 case L2TAP_S_INTF_DEVICE: ;
372 const char *str = va_arg(args, const char *);
373 esp_netif = esp_netif_get_handle_from_ifkey(str);
374 if (esp_netif == NULL) {
375 // No such device
376 errno = ENODEV;
377 goto err;
378 }
379 l2tap_lock();
380 s_l2tap_sockets[fd].driver_handle = esp_netif_get_io_driver(esp_netif);
381 l2tap_unlock();
382 break;
383 case L2TAP_G_INTF_DEVICE: ;
384 const char **str_p = va_arg(args, const char **);
385 *str_p = NULL;
386 esp_netif = NULL;
387 while ((esp_netif = esp_netif_next(esp_netif)) != NULL) {
388 if (s_l2tap_sockets[fd].driver_handle == esp_netif_get_io_driver(esp_netif)) {
389 *str_p = esp_netif_get_ifkey(esp_netif);
390 }
391 }
392 break;
393 case L2TAP_S_DEVICE_DRV_HNDL: ;
394 l2tap_iodriver_handle set_driver_hdl = va_arg(args, l2tap_iodriver_handle);
395 if (set_driver_hdl == NULL) {
396 // No such device (not valid driver handle)
397 errno = ENODEV;
398 goto err;
399 }
400 l2tap_lock();
401 s_l2tap_sockets[fd].driver_handle = set_driver_hdl;
402 l2tap_unlock();
403 break;
404 case L2TAP_G_DEVICE_DRV_HNDL: ;
405 l2tap_iodriver_handle *get_driver_hdl = va_arg(args, l2tap_iodriver_handle*);
406 *get_driver_hdl = s_l2tap_sockets[fd].driver_handle;
407 break;
408 default:
409 // unsupported operation
410 errno = ENOSYS;
411 goto err;
412 break;
413 }
414 va_end(args);
415 return 0;
416 err:
417 va_end(args);
418 return -1;
419 }
420
l2tap_fcntl(int fd,int cmd,int arg)421 static int l2tap_fcntl(int fd, int cmd, int arg)
422 {
423 int result = 0;
424 if (cmd == F_GETFL) {
425 if (s_l2tap_sockets[fd].non_blocking) {
426 result |= O_NONBLOCK;
427 }
428 } else if (cmd == F_SETFL) {
429 s_l2tap_sockets[fd].non_blocking = (arg & O_NONBLOCK) != 0;
430 } else {
431 // unsupported operation
432 result = -1;
433 errno = ENOSYS;
434 }
435 return result;
436 }
437
438 #ifdef CONFIG_VFS_SUPPORT_SELECT
439
register_select(l2tap_select_args_t * args)440 static esp_err_t register_select(l2tap_select_args_t *args)
441 {
442 esp_err_t ret = ESP_ERR_INVALID_ARG;
443
444 if (args) {
445 const int new_size = s_registered_select_cnt + 1;
446 l2tap_select_args_t **registered_selects_new;
447 if ((registered_selects_new = realloc(s_registered_selects, new_size * sizeof(l2tap_select_args_t *))) == NULL) {
448 ret = ESP_ERR_NO_MEM;
449 } else {
450 s_registered_selects = registered_selects_new;
451 s_registered_selects[s_registered_select_cnt] = args;
452 s_registered_select_cnt = new_size;
453 ret = ESP_OK;
454 }
455 }
456
457 return ret;
458 }
459
unregister_select(l2tap_select_args_t * args)460 static esp_err_t unregister_select(l2tap_select_args_t *args)
461 {
462 esp_err_t ret = ESP_OK;
463 if (args) {
464 ret = ESP_ERR_INVALID_STATE;
465 for (int i = 0; i < s_registered_select_cnt; ++i) {
466 if (s_registered_selects[i] == args) {
467 const int new_size = s_registered_select_cnt - 1;
468 // The item is removed by overwriting it with the last item. The subsequent rellocation will drop the
469 // last item.
470 s_registered_selects[i] = s_registered_selects[new_size];
471 s_registered_selects = realloc(s_registered_selects, new_size * sizeof(l2tap_select_args_t *));
472 if (s_registered_selects || new_size == 0) {
473 s_registered_select_cnt = new_size;
474 ret = ESP_OK;
475 } else {
476 ret = ESP_ERR_NO_MEM;
477 }
478 break;
479 }
480 }
481 }
482 return ret;
483 }
484
l2tap_select_notify(int fd,l2tap_select_notif_e select_notif)485 static void l2tap_select_notify(int fd, l2tap_select_notif_e select_notif)
486 {
487 for (int i = 0; i < s_registered_select_cnt; i++) {
488 l2tap_select_args_t *args = s_registered_selects[i];
489 if (args) {
490 switch (select_notif) {
491 case L2TAP_SELECT_READ_NOTIF:
492 if (FD_ISSET(fd, &args->readfds_orig)) {
493 FD_SET(fd, args->readfds);
494 esp_vfs_select_triggered(args->select_sem);
495 }
496 break;
497 case L2TAP_SELECT_WRITE_NOTIF:
498 if (FD_ISSET(fd, &args->writefds_orig)) {
499 FD_SET(fd, args->writefds);
500 esp_vfs_select_triggered(args->select_sem);
501 }
502 break;
503 case L2TAP_SELECT_ERR_NOTIF:
504 if (FD_ISSET(fd, &args->errorfds_orig)) {
505 FD_SET(fd, args->errorfds);
506 esp_vfs_select_triggered(args->select_sem);
507 }
508 break;
509 }
510 }
511 }
512 }
513
l2tap_start_select(int nfds,fd_set * readfds,fd_set * writefds,fd_set * exceptfds,esp_vfs_select_sem_t select_sem,void ** end_select_args)514 static esp_err_t l2tap_start_select(int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds,
515 esp_vfs_select_sem_t select_sem, void **end_select_args)
516 {
517 const int max_fds = MIN(nfds, L2TAP_MAX_FDS);
518 *end_select_args = NULL;
519
520 l2tap_select_args_t *args = malloc(sizeof(l2tap_select_args_t));
521
522 if (args == NULL) {
523 return ESP_ERR_NO_MEM;
524 }
525
526 args->select_sem = select_sem;
527 args->readfds = readfds;
528 args->writefds = writefds;
529 args->errorfds = exceptfds;
530 args->readfds_orig = *readfds; // store the original values because they will be set to zero
531 args->writefds_orig = *writefds;
532 args->errorfds_orig = *exceptfds;
533 FD_ZERO(readfds);
534 FD_ZERO(writefds);
535 FD_ZERO(exceptfds);
536
537 l2tap_lock();
538
539 for (int i = 0; i < max_fds; i++) {
540 if (FD_ISSET(i, &args->readfds_orig)) {
541 if (!rx_queue_empty(&s_l2tap_sockets[i])) {
542 // signalize immediately when data is buffered
543 FD_SET(i, readfds);
544 esp_vfs_select_triggered(args->select_sem);
545 }
546 }
547 }
548
549 esp_err_t ret = register_select(args);
550 if (ret != ESP_OK) {
551 l2tap_unlock();
552 free(args);
553 return ret;
554 }
555
556 l2tap_unlock();
557
558 *end_select_args = args;
559
560 return ESP_OK;
561 }
562
l2tap_end_select(void * end_select_args)563 static esp_err_t l2tap_end_select(void *end_select_args)
564 {
565 l2tap_select_args_t *args = end_select_args;
566 if (args == NULL) {
567 return ESP_OK;
568 }
569
570 l2tap_lock();
571 esp_err_t ret = unregister_select(args);
572 l2tap_unlock();
573
574 if (args) {
575 free(args);
576 }
577
578 return ret;
579 }
580 #endif //CONFIG_VFS_SUPPORT_SELECT
581
esp_vfs_l2tap_intf_register(l2tap_vfs_config_t * config)582 esp_err_t esp_vfs_l2tap_intf_register(l2tap_vfs_config_t *config)
583 {
584 l2tap_vfs_config_t def_config = L2TAP_VFS_CONFIG_DEFAULT();
585
586 if (config == NULL) {
587 ESP_LOGD(TAG, "vfs is to be registered with default settings");
588 config = &def_config;
589 }
590
591 ESP_RETURN_ON_FALSE(!s_is_registered, ESP_ERR_INVALID_STATE, TAG, "vfs is already registered");
592 s_is_registered = true;
593 esp_vfs_t vfs = {
594 .flags = ESP_VFS_FLAG_DEFAULT,
595 .write = &l2tap_write,
596 .open = &l2tap_open,
597 .close = &l2tap_close,
598 .read = &l2tap_read,
599 .fcntl = &l2tap_fcntl,
600 .ioctl = &l2tap_ioctl,
601 #ifdef CONFIG_VFS_SUPPORT_SELECT
602 .start_select = &l2tap_start_select,
603 .end_select = &l2tap_end_select,
604 #endif // CONFIG_VFS_SUPPORT_SELECT
605 };
606 ESP_RETURN_ON_ERROR(esp_vfs_register(config->base_path, &vfs, NULL), TAG, "vfs register error");
607
608 return ESP_OK;
609 }
610
esp_vfs_l2tap_intf_unregister(const char * base_path)611 esp_err_t esp_vfs_l2tap_intf_unregister(const char *base_path)
612 {
613 for (int i = 0; i < L2TAP_MAX_FDS; i++) {
614 ESP_RETURN_ON_FALSE(atomic_load(&s_l2tap_sockets[i].state) == L2TAP_SOCK_STATE_READY,
615 ESP_ERR_INVALID_STATE, TAG, "all FDs need to be closed");
616 }
617
618 if (base_path == NULL) {
619 ESP_RETURN_ON_ERROR(esp_vfs_unregister(L2TAP_VFS_DEFAULT_PATH), TAG, "vfs un-register error");
620 } else {
621 ESP_RETURN_ON_ERROR(esp_vfs_unregister(base_path), TAG, "vfs un-register error");
622 }
623 s_is_registered = false;
624
625 return ESP_OK;
626 }
627