1 /*
2 * Copyright (c) 2019 Tobias Svehagen
3 * Copyright (c) 2020 Grinn
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #include <zephyr/logging/log.h>
9 LOG_MODULE_REGISTER(wifi_esp_at_offload, CONFIG_WIFI_LOG_LEVEL);
10
11 #include <zephyr/kernel.h>
12 #include <zephyr/device.h>
13 #include <string.h>
14 #include <stdlib.h>
15 #include <errno.h>
16
17 #include <zephyr/net/net_pkt.h>
18 #include <zephyr/net/net_if.h>
19 #include <zephyr/net/net_offload.h>
20
21 #include "esp.h"
22
esp_listen(struct net_context * context,int backlog)23 static int esp_listen(struct net_context *context, int backlog)
24 {
25 return -ENOTSUP;
26 }
27
_sock_connect(struct esp_data * dev,struct esp_socket * sock)28 static int _sock_connect(struct esp_data *dev, struct esp_socket *sock)
29 {
30 /* Calculate the largest possible AT command length based on both TCP and UDP variants. */
31 char connect_msg[MAX(sizeof("AT+CIPSTART=000,\"TCP\",\"\",65535,7200") +
32 NET_IPV4_ADDR_LEN,
33 sizeof("AT+CIPSTART=000,\"UDP\",\"\",65535,65535,0,\"\"") +
34 2 * NET_IPV4_ADDR_LEN)];
35 char dst_addr_str[NET_IPV4_ADDR_LEN];
36 char src_addr_str[NET_IPV4_ADDR_LEN];
37 struct sockaddr src;
38 struct sockaddr dst;
39 int ret;
40 int mode;
41
42 if (!esp_flags_are_set(dev, EDF_STA_CONNECTED | EDF_AP_ENABLED)) {
43 return -ENETUNREACH;
44 }
45
46 k_mutex_lock(&sock->lock, K_FOREVER);
47 src = sock->src;
48 dst = sock->dst;
49 k_mutex_unlock(&sock->lock);
50
51 if (dst.sa_family == AF_INET) {
52 net_addr_ntop(dst.sa_family,
53 &net_sin(&dst)->sin_addr,
54 dst_addr_str, sizeof(dst_addr_str));
55 } else {
56 strcpy(dst_addr_str, "0.0.0.0");
57 }
58
59 if (esp_socket_ip_proto(sock) == IPPROTO_TCP) {
60 snprintk(connect_msg, sizeof(connect_msg),
61 "AT+CIPSTART=%d,\"TCP\",\"%s\",%d,7200",
62 sock->link_id, dst_addr_str,
63 ntohs(net_sin(&dst)->sin_port));
64 } else {
65 if (src.sa_family == AF_INET && net_sin(&src)->sin_port != 0) {
66 net_addr_ntop(src.sa_family,
67 &net_sin(&src)->sin_addr,
68 src_addr_str, sizeof(src_addr_str));
69 /* <mode>: In the UDP Wi-Fi passthrough
70 *
71 * 0: After UDP data is received, the parameters <"remote host">
72 * and <remote port> will stay unchanged (default).
73 * 1: Only the first time that UDP data is received from an IP
74 * address and port that are different from the initially set
75 * value of parameters <remote host> and <remote port>, will
76 * they be changed to the IP address and port of the device that
77 * sends the data.
78 * 2: Each time UDP data is received, the <"remote host"> and
79 * <remote port> will be changed to the IP address and port of the
80 * device that sends the data.
81 *
82 * When remote IP and port are both 0, it means that the socket is
83 * being used as a server, and you need to set the connection mode
84 * to 2.
85 */
86 if ((net_sin(&dst)->sin_addr.s_addr == 0) &&
87 (ntohs(net_sin(&dst)->sin_port) == 0)) {
88 mode = 2;
89 /* Port 0 is reserved and a valid port needs to be provided when
90 * connecting.
91 */
92 net_sin(&dst)->sin_port = 65535;
93 } else {
94 mode = 0;
95 }
96
97 snprintk(connect_msg, sizeof(connect_msg),
98 "AT+CIPSTART=%d,\"UDP\",\"%s\",%d,%d,%d,\"%s\"",
99 sock->link_id, dst_addr_str,
100 ntohs(net_sin(&dst)->sin_port), ntohs(net_sin(&src)->sin_port),
101 mode, src_addr_str);
102 } else {
103 snprintk(connect_msg, sizeof(connect_msg),
104 "AT+CIPSTART=%d,\"UDP\",\"%s\",%d",
105 sock->link_id, dst_addr_str,
106 ntohs(net_sin(&dst)->sin_port));
107 }
108 }
109
110 LOG_DBG("link %d, ip_proto %s, addr %s", sock->link_id,
111 esp_socket_ip_proto(sock) == IPPROTO_TCP ? "TCP" : "UDP",
112 dst_addr_str);
113
114 ret = esp_cmd_send(dev, NULL, 0, connect_msg, ESP_CMD_TIMEOUT);
115 if (ret == 0) {
116 esp_socket_flags_set(sock, ESP_SOCK_CONNECTED);
117 if (esp_socket_type(sock) == SOCK_STREAM) {
118 net_context_set_state(sock->context,
119 NET_CONTEXT_CONNECTED);
120 }
121 } else if (ret == -ETIMEDOUT) {
122 /* FIXME:
123 * What if the connection finishes after we return from
124 * here? The caller might think that it can discard the
125 * socket. Set some flag to indicate that the link should
126 * be closed if it ever connects?
127 */
128 }
129
130 return ret;
131 }
132
esp_connect_work(struct k_work * work)133 void esp_connect_work(struct k_work *work)
134 {
135 struct esp_socket *sock = CONTAINER_OF(work, struct esp_socket,
136 connect_work);
137 struct esp_data *dev = esp_socket_to_dev(sock);
138 int ret;
139
140 ret = _sock_connect(dev, sock);
141
142 k_mutex_lock(&sock->lock, K_FOREVER);
143 if (sock->connect_cb) {
144 sock->connect_cb(sock->context, ret, sock->conn_user_data);
145 }
146 k_mutex_unlock(&sock->lock);
147 }
148
esp_bind(struct net_context * context,const struct sockaddr * addr,socklen_t addrlen)149 static int esp_bind(struct net_context *context, const struct sockaddr *addr,
150 socklen_t addrlen)
151 {
152 struct esp_socket *sock;
153 struct esp_data *dev;
154
155 sock = (struct esp_socket *)context->offload_context;
156 dev = esp_socket_to_dev(sock);
157
158 if (esp_socket_ip_proto(sock) == IPPROTO_TCP) {
159 return 0;
160 }
161
162 if (IS_ENABLED(CONFIG_NET_IPV4) && addr->sa_family == AF_INET) {
163 LOG_DBG("link %d", sock->link_id);
164
165 if (esp_socket_connected(sock)) {
166 return -EISCONN;
167 }
168
169 k_mutex_lock(&sock->lock, K_FOREVER);
170 sock->src = *addr;
171 k_mutex_unlock(&sock->lock);
172
173 return 0;
174 }
175
176 return -EAFNOSUPPORT;
177 }
178
esp_connect(struct net_context * context,const struct sockaddr * addr,socklen_t addrlen,net_context_connect_cb_t cb,int32_t timeout,void * user_data)179 static int esp_connect(struct net_context *context,
180 const struct sockaddr *addr,
181 socklen_t addrlen,
182 net_context_connect_cb_t cb,
183 int32_t timeout,
184 void *user_data)
185 {
186 struct esp_socket *sock;
187 struct esp_data *dev;
188 int ret;
189
190 sock = (struct esp_socket *)context->offload_context;
191 dev = esp_socket_to_dev(sock);
192
193 LOG_DBG("link %d, timeout %d", sock->link_id, timeout);
194
195 if (!IS_ENABLED(CONFIG_NET_IPV4) || addr->sa_family != AF_INET) {
196 return -EAFNOSUPPORT;
197 }
198
199 if (esp_socket_connected(sock)) {
200 return -EISCONN;
201 }
202
203 k_mutex_lock(&sock->lock, K_FOREVER);
204 sock->dst = *addr;
205 sock->connect_cb = cb;
206 sock->conn_user_data = user_data;
207 k_mutex_unlock(&sock->lock);
208
209 if (timeout == 0) {
210 esp_socket_work_submit(sock, &sock->connect_work);
211 return 0;
212 }
213
214 ret = _sock_connect(dev, sock);
215
216 if (ret != -ETIMEDOUT && cb) {
217 cb(context, ret, user_data);
218 }
219
220 return ret;
221 }
222
esp_accept(struct net_context * context,net_tcp_accept_cb_t cb,int32_t timeout,void * user_data)223 static int esp_accept(struct net_context *context,
224 net_tcp_accept_cb_t cb, int32_t timeout,
225 void *user_data)
226 {
227 return -ENOTSUP;
228 }
229
MODEM_CMD_DIRECT_DEFINE(on_cmd_tx_ready)230 MODEM_CMD_DIRECT_DEFINE(on_cmd_tx_ready)
231 {
232 struct esp_data *dev = CONTAINER_OF(data, struct esp_data,
233 cmd_handler_data);
234
235 k_sem_give(&dev->sem_tx_ready);
236 return len;
237 }
238
MODEM_CMD_DEFINE(on_cmd_send_ok)239 MODEM_CMD_DEFINE(on_cmd_send_ok)
240 {
241 struct esp_data *dev = CONTAINER_OF(data, struct esp_data,
242 cmd_handler_data);
243
244 modem_cmd_handler_set_error(data, 0);
245 k_sem_give(&dev->sem_response);
246
247 return 0;
248 }
249
MODEM_CMD_DEFINE(on_cmd_send_fail)250 MODEM_CMD_DEFINE(on_cmd_send_fail)
251 {
252 struct esp_data *dev = CONTAINER_OF(data, struct esp_data,
253 cmd_handler_data);
254
255 modem_cmd_handler_set_error(data, -EIO);
256 k_sem_give(&dev->sem_response);
257
258 return 0;
259 }
260
_sock_send(struct esp_socket * sock,struct net_pkt * pkt)261 static int _sock_send(struct esp_socket *sock, struct net_pkt *pkt)
262 {
263 struct esp_data *dev = esp_socket_to_dev(sock);
264 char cmd_buf[sizeof("AT+CIPSEND=0,,\"\",") +
265 sizeof(STRINGIFY(ESP_MTU)) - 1 +
266 NET_IPV4_ADDR_LEN + sizeof("65535") - 1];
267 char addr_str[NET_IPV4_ADDR_LEN];
268 int ret, write_len, pkt_len;
269 struct net_buf *frag;
270 static const struct modem_cmd cmds[] = {
271 MODEM_CMD_DIRECT(">", on_cmd_tx_ready),
272 MODEM_CMD("SEND OK", on_cmd_send_ok, 0U, ""),
273 MODEM_CMD("SEND FAIL", on_cmd_send_fail, 0U, ""),
274 };
275 struct sockaddr dst;
276
277 if (!esp_flags_are_set(dev, EDF_STA_CONNECTED | EDF_AP_ENABLED)) {
278 return -ENETUNREACH;
279 }
280
281 pkt_len = net_pkt_get_len(pkt);
282
283 LOG_DBG("link %d, len %d", sock->link_id, pkt_len);
284
285 if (esp_socket_ip_proto(sock) == IPPROTO_TCP) {
286 snprintk(cmd_buf, sizeof(cmd_buf),
287 "AT+CIPSEND=%d,%d", sock->link_id, pkt_len);
288 } else {
289 k_mutex_lock(&sock->lock, K_FOREVER);
290 dst = sock->dst;
291 k_mutex_unlock(&sock->lock);
292
293 net_addr_ntop(dst.sa_family,
294 &net_sin(&dst)->sin_addr,
295 addr_str, sizeof(addr_str));
296 snprintk(cmd_buf, sizeof(cmd_buf),
297 "AT+CIPSEND=%d,%d,\"%s\",%d",
298 sock->link_id, pkt_len, addr_str,
299 ntohs(net_sin(&dst)->sin_port));
300 }
301
302 k_sem_take(&dev->cmd_handler_data.sem_tx_lock, K_FOREVER);
303 k_sem_reset(&dev->sem_tx_ready);
304
305 ret = modem_cmd_send_ext(&dev->mctx.iface, &dev->mctx.cmd_handler,
306 cmds, ARRAY_SIZE(cmds), cmd_buf,
307 &dev->sem_response, ESP_CMD_TIMEOUT,
308 MODEM_NO_TX_LOCK | MODEM_NO_UNSET_CMDS);
309 if (ret < 0) {
310 LOG_DBG("Failed to send command");
311 goto out;
312 }
313
314 /* Reset semaphore that will be released by 'SEND OK' or 'SEND FAIL' */
315 k_sem_reset(&dev->sem_response);
316
317 /* Wait for '>' */
318 ret = k_sem_take(&dev->sem_tx_ready, K_MSEC(5000));
319 if (ret < 0) {
320 LOG_DBG("Timeout waiting for tx");
321 goto out;
322 }
323
324 frag = pkt->frags;
325 while (frag && pkt_len) {
326 write_len = MIN(pkt_len, frag->len);
327 dev->mctx.iface.write(&dev->mctx.iface, frag->data, write_len);
328 pkt_len -= write_len;
329 frag = frag->frags;
330 }
331
332 /* Wait for 'SEND OK' or 'SEND FAIL' */
333 ret = k_sem_take(&dev->sem_response, ESP_CMD_TIMEOUT);
334 if (ret < 0) {
335 LOG_DBG("No send response");
336 goto out;
337 }
338
339 ret = modem_cmd_handler_get_error(&dev->cmd_handler_data);
340 if (ret != 0) {
341 LOG_DBG("Failed to send data");
342 }
343
344 out:
345 (void)modem_cmd_handler_update_cmds(&dev->cmd_handler_data,
346 NULL, 0U, false);
347 k_sem_give(&dev->cmd_handler_data.sem_tx_lock);
348
349 return ret;
350 }
351
esp_socket_can_send(struct esp_socket * sock)352 static bool esp_socket_can_send(struct esp_socket *sock)
353 {
354 atomic_val_t flags = esp_socket_flags(sock);
355
356 if ((flags & ESP_SOCK_CONNECTED) && !(flags & ESP_SOCK_CLOSE_PENDING)) {
357 return true;
358 }
359
360 return false;
361 }
362
esp_socket_send_one_pkt(struct esp_socket * sock)363 static int esp_socket_send_one_pkt(struct esp_socket *sock)
364 {
365 struct net_context *context = sock->context;
366 struct net_pkt *pkt;
367 int ret;
368
369 pkt = k_fifo_get(&sock->tx_fifo, K_NO_WAIT);
370 if (!pkt) {
371 return -ENOMSG;
372 }
373
374 if (!esp_socket_can_send(sock)) {
375 goto pkt_unref;
376 }
377
378 ret = _sock_send(sock, pkt);
379 if (ret < 0) {
380 LOG_ERR("Failed to send data: link %d, ret %d",
381 sock->link_id, ret);
382
383 /*
384 * If this is stream data, then we should stop pushing anything
385 * more to this socket, as there will be a hole in the data
386 * stream, which application layer is not expecting.
387 */
388 if (esp_socket_type(sock) == SOCK_STREAM) {
389 if (!esp_socket_flags_test_and_set(sock,
390 ESP_SOCK_CLOSE_PENDING)) {
391 esp_socket_work_submit(sock, &sock->close_work);
392 }
393 }
394 } else if (context->send_cb) {
395 context->send_cb(context, ret, context->user_data);
396 }
397
398 pkt_unref:
399 net_pkt_unref(pkt);
400
401 return 0;
402 }
403
esp_send_work(struct k_work * work)404 void esp_send_work(struct k_work *work)
405 {
406 struct esp_socket *sock = CONTAINER_OF(work, struct esp_socket,
407 send_work);
408 int err;
409
410 do {
411 err = esp_socket_send_one_pkt(sock);
412 } while (err != -ENOMSG);
413 }
414
esp_sendto(struct net_pkt * pkt,const struct sockaddr * dst_addr,socklen_t addrlen,net_context_send_cb_t cb,int32_t timeout,void * user_data)415 static int esp_sendto(struct net_pkt *pkt,
416 const struct sockaddr *dst_addr,
417 socklen_t addrlen,
418 net_context_send_cb_t cb,
419 int32_t timeout,
420 void *user_data)
421 {
422 struct net_context *context;
423 struct esp_socket *sock;
424 struct esp_data *dev;
425 int ret = 0;
426
427 context = pkt->context;
428 sock = (struct esp_socket *)context->offload_context;
429 dev = esp_socket_to_dev(sock);
430
431 LOG_DBG("link %d, timeout %d", sock->link_id, timeout);
432
433 if (!esp_flags_are_set(dev, EDF_STA_CONNECTED | EDF_AP_ENABLED)) {
434 return -ENETUNREACH;
435 }
436
437 if (esp_socket_type(sock) == SOCK_STREAM) {
438 atomic_val_t flags = esp_socket_flags(sock);
439
440 if (!(flags & ESP_SOCK_CONNECTED) ||
441 (flags & ESP_SOCK_CLOSE_PENDING)) {
442 return -ENOTCONN;
443 }
444 } else {
445 if (!esp_socket_connected(sock)) {
446 if (!dst_addr) {
447 return -ENOTCONN;
448 }
449
450 /* Use a timeout of 5000 ms here even though the
451 * timeout parameter might be different. We want to
452 * have a valid link id before proceeding.
453 */
454 ret = esp_connect(context, dst_addr, addrlen, NULL,
455 (5 * MSEC_PER_SEC), NULL);
456 if (ret < 0) {
457 return ret;
458 }
459 } else if (esp_socket_type(sock) == SOCK_DGRAM) {
460 memcpy(&sock->dst, dst_addr, addrlen);
461 }
462 }
463
464 return esp_socket_queue_tx(sock, pkt);
465 }
466
esp_send(struct net_pkt * pkt,net_context_send_cb_t cb,int32_t timeout,void * user_data)467 static int esp_send(struct net_pkt *pkt,
468 net_context_send_cb_t cb,
469 int32_t timeout,
470 void *user_data)
471 {
472 return esp_sendto(pkt, NULL, 0, cb, timeout, user_data);
473 }
474
475 #define CIPRECVDATA_CMD_MIN_LEN (sizeof("+CIPRECVDATA,L:") - 1)
476
477 #if defined(CONFIG_WIFI_ESP_AT_CIPDINFO_USE)
478 #define CIPRECVDATA_CMD_MAX_LEN (sizeof("+CIPRECVDATA,LLLL,\"255.255.255.255\",65535:") - 1)
479 #else
480 #define CIPRECVDATA_CMD_MAX_LEN (sizeof("+CIPRECVDATA,LLLL:") - 1)
481 #endif
482
cmd_ciprecvdata_parse(struct esp_socket * sock,struct net_buf * buf,uint16_t len,int * data_offset,int * data_len,char * ip_str,int * port)483 static int cmd_ciprecvdata_parse(struct esp_socket *sock,
484 struct net_buf *buf, uint16_t len,
485 int *data_offset, int *data_len, char *ip_str,
486 int *port)
487 {
488 char cmd_buf[CIPRECVDATA_CMD_MAX_LEN + 1];
489 char *endptr;
490 size_t frags_len;
491 size_t match_len;
492
493 frags_len = net_buf_frags_len(buf);
494 if (frags_len < CIPRECVDATA_CMD_MIN_LEN) {
495 return -EAGAIN;
496 }
497
498 match_len = net_buf_linearize(cmd_buf, CIPRECVDATA_CMD_MAX_LEN,
499 buf, 0, CIPRECVDATA_CMD_MAX_LEN);
500 cmd_buf[match_len] = 0;
501
502 *data_len = strtol(&cmd_buf[len], &endptr, 10);
503
504 #if defined(CONFIG_WIFI_ESP_AT_CIPDINFO_USE)
505 char *strstart = endptr + 1;
506 char *strend = strchr(strstart, ',');
507
508 if (strstart == NULL || strend == NULL) {
509 return -EAGAIN;
510 }
511
512 memcpy(ip_str, strstart, strend - strstart);
513 ip_str[strend - strstart] = '\0';
514 *port = strtol(strend + 1, &endptr, 10);
515 #else
516 ARG_UNUSED(ip_str);
517 ARG_UNUSED(port);
518 #endif
519
520 if (endptr == &cmd_buf[len] ||
521 (*endptr == 0 && match_len >= CIPRECVDATA_CMD_MAX_LEN) ||
522 *data_len > CIPRECVDATA_MAX_LEN) {
523 LOG_ERR("Invalid cmd: %s", cmd_buf);
524 return -EBADMSG;
525 } else if (*endptr == 0) {
526 return -EAGAIN;
527 } else if (*endptr != _CIPRECVDATA_END) {
528 LOG_ERR("Invalid end of cmd: 0x%02x != 0x%02x", *endptr,
529 _CIPRECVDATA_END);
530 return -EBADMSG;
531 }
532
533 /* data_offset is the offset to where the actual data starts */
534 *data_offset = (endptr - cmd_buf) + 1;
535
536 /* FIXME: Inefficient way of waiting for data */
537 if (*data_offset + *data_len > frags_len) {
538 return -EAGAIN;
539 }
540
541 *endptr = 0;
542
543 return 0;
544 }
545
MODEM_CMD_DIRECT_DEFINE(on_cmd_ciprecvdata)546 MODEM_CMD_DIRECT_DEFINE(on_cmd_ciprecvdata)
547 {
548 struct esp_data *dev = CONTAINER_OF(data, struct esp_data,
549 cmd_handler_data);
550 struct esp_socket *sock = dev->rx_sock;
551 int data_offset, data_len;
552 int err;
553
554 #if defined(CONFIG_WIFI_ESP_AT_CIPDINFO_USE)
555 char raw_remote_ip[INET_ADDRSTRLEN + 3] = {0};
556 int port = 0;
557
558 err = cmd_ciprecvdata_parse(sock, data->rx_buf, len, &data_offset,
559 &data_len, raw_remote_ip, &port);
560 #else
561 err = cmd_ciprecvdata_parse(sock, data->rx_buf, len, &data_offset,
562 &data_len, NULL, NULL);
563 #endif
564 if (err) {
565 if (err == -EAGAIN) {
566 return -EAGAIN;
567 }
568
569 return err;
570 }
571
572 #if defined(CONFIG_WIFI_ESP_AT_CIPDINFO_USE)
573 struct sockaddr_in *recv_addr =
574 (struct sockaddr_in *) &sock->context->remote;
575
576 recv_addr->sin_port = htons(port);
577 recv_addr->sin_family = AF_INET;
578
579 /* IP addr comes within quotation marks, which is disliked by
580 * conv function. So we remove them by subtraction 2 from
581 * raw_remote_ip length and index from &raw_remote_ip[1].
582 */
583 char remote_ip_addr[INET_ADDRSTRLEN];
584 size_t remote_ip_str_len;
585
586 remote_ip_str_len = MIN(sizeof(remote_ip_addr) - 1,
587 strlen(raw_remote_ip) - 2);
588 strncpy(remote_ip_addr, &raw_remote_ip[1], remote_ip_str_len);
589 remote_ip_addr[remote_ip_str_len] = '\0';
590
591 if (net_addr_pton(AF_INET, remote_ip_addr, &recv_addr->sin_addr) < 0) {
592 LOG_ERR("Invalid src addr %s", remote_ip_addr);
593 err = -EIO;
594 return err;
595 }
596 #endif
597 esp_socket_rx(sock, data->rx_buf, data_offset, data_len);
598
599 return data_offset + data_len;
600 }
601
esp_recvdata_work(struct k_work * work)602 void esp_recvdata_work(struct k_work *work)
603 {
604 struct esp_socket *sock = CONTAINER_OF(work, struct esp_socket,
605 recvdata_work);
606 struct esp_data *data = esp_socket_to_dev(sock);
607 char cmd[sizeof("AT+CIPRECVDATA=000,"STRINGIFY(CIPRECVDATA_MAX_LEN))];
608 static const struct modem_cmd cmds[] = {
609 MODEM_CMD_DIRECT(_CIPRECVDATA, on_cmd_ciprecvdata),
610 };
611 int ret;
612
613 LOG_DBG("reading available data on link %d", sock->link_id);
614
615 data->rx_sock = sock;
616
617 snprintk(cmd, sizeof(cmd), "AT+CIPRECVDATA=%d,%d", sock->link_id,
618 CIPRECVDATA_MAX_LEN);
619
620 ret = esp_cmd_send(data, cmds, ARRAY_SIZE(cmds), cmd, ESP_CMD_TIMEOUT);
621 if (ret < 0) {
622 LOG_ERR("Error during rx: link %d, ret %d", sock->link_id,
623 ret);
624 }
625 }
626
esp_close_work(struct k_work * work)627 void esp_close_work(struct k_work *work)
628 {
629 struct esp_socket *sock = CONTAINER_OF(work, struct esp_socket,
630 close_work);
631 atomic_val_t old_flags;
632
633 old_flags = esp_socket_flags_clear(sock,
634 (ESP_SOCK_CONNECTED | ESP_SOCK_CLOSE_PENDING));
635
636 if ((old_flags & ESP_SOCK_CONNECTED) &&
637 (old_flags & ESP_SOCK_CLOSE_PENDING)) {
638 esp_socket_close(sock);
639 }
640
641 /* Should we notify that the socket has been closed? */
642 if (old_flags & ESP_SOCK_CLOSE_PENDING) {
643 k_mutex_lock(&sock->lock, K_FOREVER);
644 if (sock->recv_cb) {
645 sock->recv_cb(sock->context, NULL, NULL, NULL, 0,
646 sock->recv_user_data);
647 k_sem_give(&sock->sem_data_ready);
648 }
649 k_mutex_unlock(&sock->lock);
650 }
651 }
652
esp_recv(struct net_context * context,net_context_recv_cb_t cb,int32_t timeout,void * user_data)653 static int esp_recv(struct net_context *context,
654 net_context_recv_cb_t cb,
655 int32_t timeout,
656 void *user_data)
657 {
658 struct esp_socket *sock = context->offload_context;
659 struct esp_data *dev = esp_socket_to_dev(sock);
660 int ret;
661
662 LOG_DBG("link_id %d, timeout %d, cb %p, data %p",
663 sock->link_id, timeout, cb, user_data);
664
665 /*
666 * UDP "listening" socket needs to be bound using AT+CIPSTART before any
667 * traffic can be received.
668 */
669 if (!esp_socket_connected(sock) &&
670 esp_socket_ip_proto(sock) == IPPROTO_UDP &&
671 sock->src.sa_family == AF_INET &&
672 net_sin(&sock->src)->sin_port != 0) {
673 _sock_connect(dev, sock);
674 }
675
676 k_mutex_lock(&sock->lock, K_FOREVER);
677 sock->recv_cb = cb;
678 sock->recv_user_data = user_data;
679 k_sem_reset(&sock->sem_data_ready);
680 k_mutex_unlock(&sock->lock);
681
682 if (timeout == 0) {
683 return 0;
684 }
685
686 ret = k_sem_take(&sock->sem_data_ready, K_MSEC(timeout));
687
688 k_mutex_lock(&sock->lock, K_FOREVER);
689 sock->recv_cb = NULL;
690 sock->recv_user_data = NULL;
691 k_mutex_unlock(&sock->lock);
692
693 return ret;
694 }
695
esp_put(struct net_context * context)696 static int esp_put(struct net_context *context)
697 {
698 struct esp_socket *sock = context->offload_context;
699
700 esp_socket_workq_stop_and_flush(sock);
701
702 if (esp_socket_flags_test_and_clear(sock, ESP_SOCK_CONNECTED)) {
703 esp_socket_close(sock);
704 }
705
706 k_mutex_lock(&sock->lock, K_FOREVER);
707 sock->connect_cb = NULL;
708 sock->recv_cb = NULL;
709 k_mutex_unlock(&sock->lock);
710
711 k_sem_reset(&sock->sem_free);
712
713 esp_socket_unref(sock);
714
715 /*
716 * Let's get notified when refcount reaches 0. Call to
717 * esp_socket_unref() in this function might or might not be the last
718 * one. The reason is that there might be still some work in progress in
719 * esp_rx thread (parsing unsolicited AT command), so we want to wait
720 * until it finishes.
721 */
722 k_sem_take(&sock->sem_free, K_FOREVER);
723
724 sock->context = NULL;
725
726 esp_socket_put(sock);
727
728 return 0;
729 }
730
esp_get(sa_family_t family,enum net_sock_type type,enum net_ip_protocol ip_proto,struct net_context ** context)731 static int esp_get(sa_family_t family,
732 enum net_sock_type type,
733 enum net_ip_protocol ip_proto,
734 struct net_context **context)
735 {
736 struct esp_socket *sock;
737 struct esp_data *dev;
738
739 LOG_DBG("");
740
741 if (family != AF_INET) {
742 return -EAFNOSUPPORT;
743 }
744
745 /* FIXME:
746 * iface has not yet been assigned to context so there is currently
747 * no way to know which interface to operate on. Therefore this driver
748 * only supports one device node.
749 */
750 dev = &esp_driver_data;
751
752 sock = esp_socket_get(dev, *context);
753 if (!sock) {
754 LOG_ERR("No socket available!");
755 return -ENOMEM;
756 }
757
758 return 0;
759 }
760
761 static struct net_offload esp_offload = {
762 .get = esp_get,
763 .bind = esp_bind,
764 .listen = esp_listen,
765 .connect = esp_connect,
766 .accept = esp_accept,
767 .send = esp_send,
768 .sendto = esp_sendto,
769 .recv = esp_recv,
770 .put = esp_put,
771 };
772
esp_offload_init(struct net_if * iface)773 int esp_offload_init(struct net_if *iface)
774 {
775 iface->if_dev->offload = &esp_offload;
776
777 return 0;
778 }
779