1 /*
2 * Copyright (c) 2019 Tobias Svehagen
3 * Copyright (c) 2020 Grinn
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #include "esp.h"
9
10 #include <zephyr/logging/log.h>
11 LOG_MODULE_DECLARE(wifi_esp_at, CONFIG_WIFI_LOG_LEVEL);
12
13 #define RX_NET_PKT_ALLOC_TIMEOUT \
14 K_MSEC(CONFIG_WIFI_ESP_AT_RX_NET_PKT_ALLOC_TIMEOUT)
15
16 struct esp_workq_flush_data {
17 struct k_work work;
18 struct k_sem sem;
19 };
20
esp_socket_get(struct esp_data * data,struct net_context * context)21 struct esp_socket *esp_socket_get(struct esp_data *data,
22 struct net_context *context)
23 {
24 struct esp_socket *sock = data->sockets;
25 struct esp_socket *sock_end = sock + ARRAY_SIZE(data->sockets);
26
27 for (; sock < sock_end; sock++) {
28 if (!esp_socket_flags_test_and_set(sock, ESP_SOCK_IN_USE)) {
29 /* here we should configure all the stuff needed */
30 sock->context = context;
31 context->offload_context = sock;
32
33 sock->connect_cb = NULL;
34 sock->recv_cb = NULL;
35 memset(&sock->src, 0x0, sizeof(sock->src));
36 memset(&sock->dst, 0x0, sizeof(sock->dst));
37
38 atomic_inc(&sock->refcount);
39
40 return sock;
41 }
42 }
43
44 return NULL;
45 }
46
esp_socket_put(struct esp_socket * sock)47 int esp_socket_put(struct esp_socket *sock)
48 {
49 atomic_clear(&sock->flags);
50
51 return 0;
52 }
53
esp_socket_ref(struct esp_socket * sock)54 struct esp_socket *esp_socket_ref(struct esp_socket *sock)
55 {
56 atomic_val_t ref;
57
58 do {
59 ref = atomic_get(&sock->refcount);
60 if (!ref) {
61 return NULL;
62 }
63 } while (!atomic_cas(&sock->refcount, ref, ref + 1));
64
65 return sock;
66 }
67
esp_socket_unref(struct esp_socket * sock)68 void esp_socket_unref(struct esp_socket *sock)
69 {
70 atomic_val_t ref;
71
72 do {
73 ref = atomic_get(&sock->refcount);
74 if (!ref) {
75 return;
76 }
77 } while (!atomic_cas(&sock->refcount, ref, ref - 1));
78
79 k_sem_give(&sock->sem_free);
80 }
81
esp_socket_init(struct esp_data * data)82 void esp_socket_init(struct esp_data *data)
83 {
84 struct esp_socket *sock;
85 int i;
86
87 for (i = 0; i < ARRAY_SIZE(data->sockets); ++i) {
88 sock = &data->sockets[i];
89 sock->idx = i;
90 sock->link_id = i;
91 atomic_clear(&sock->refcount);
92 atomic_clear(&sock->flags);
93 k_mutex_init(&sock->lock);
94 k_sem_init(&sock->sem_data_ready, 0, 1);
95 k_work_init(&sock->connect_work, esp_connect_work);
96 k_work_init(&sock->recvdata_work, esp_recvdata_work);
97 k_work_init(&sock->close_work, esp_close_work);
98 k_work_init(&sock->send_work, esp_send_work);
99 k_fifo_init(&sock->tx_fifo);
100 }
101 }
102
esp_socket_prepare_pkt(struct esp_socket * sock,struct net_buf * src,size_t offset,size_t len)103 static struct net_pkt *esp_socket_prepare_pkt(struct esp_socket *sock,
104 struct net_buf *src,
105 size_t offset, size_t len)
106 {
107 struct esp_data *data = esp_socket_to_dev(sock);
108 struct net_buf *frag;
109 struct net_pkt *pkt;
110 size_t to_copy;
111
112 pkt = net_pkt_rx_alloc_with_buffer(data->net_iface, len, AF_UNSPEC,
113 0, RX_NET_PKT_ALLOC_TIMEOUT);
114 if (!pkt) {
115 return NULL;
116 }
117
118 frag = src;
119
120 /* find the right fragment to start copying from */
121 while (frag && offset >= frag->len) {
122 offset -= frag->len;
123 frag = frag->frags;
124 }
125
126 /* traverse the fragment chain until len bytes are copied */
127 while (frag && len > 0) {
128 to_copy = MIN(len, frag->len - offset);
129 if (net_pkt_write(pkt, frag->data + offset, to_copy) != 0) {
130 net_pkt_unref(pkt);
131 return NULL;
132 }
133
134 /* to_copy is always <= len */
135 len -= to_copy;
136 frag = frag->frags;
137
138 /* after the first iteration, this value will be 0 */
139 offset = 0;
140 }
141
142 net_pkt_set_context(pkt, sock->context);
143 net_pkt_cursor_init(pkt);
144
145 #if defined(CONFIG_WIFI_ESP_AT_CIPDINFO_USE)
146 memcpy(&pkt->remote, &sock->context->remote, sizeof(pkt->remote));
147 pkt->family = sock->src.sa_family;
148 #endif
149
150 return pkt;
151 }
152
esp_socket_rx(struct esp_socket * sock,struct net_buf * buf,size_t offset,size_t len)153 void esp_socket_rx(struct esp_socket *sock, struct net_buf *buf,
154 size_t offset, size_t len)
155 {
156 struct net_pkt *pkt;
157 atomic_val_t flags;
158
159 flags = esp_socket_flags(sock);
160
161 #ifdef CONFIG_WIFI_ESP_AT_PASSIVE_MODE
162 /* In Passive Receive mode, ESP modem will buffer rx data and make it still
163 * available even though the peer has closed the connection.
164 */
165 if (!(flags & ESP_SOCK_CONNECTED) &&
166 !(flags & ESP_SOCK_CLOSE_PENDING)) {
167 #else
168 if (!(flags & ESP_SOCK_CONNECTED) ||
169 (flags & ESP_SOCK_CLOSE_PENDING)) {
170 #endif
171 LOG_DBG("Received data on closed link %d", sock->link_id);
172 return;
173 }
174
175 pkt = esp_socket_prepare_pkt(sock, buf, offset, len);
176 if (!pkt) {
177 LOG_ERR("Failed to get net_pkt: len %zu", len);
178 if (esp_socket_type(sock) == SOCK_STREAM) {
179 if (!esp_socket_flags_test_and_set(sock,
180 ESP_SOCK_CLOSE_PENDING)) {
181 esp_socket_work_submit(sock, &sock->close_work);
182 }
183 }
184 return;
185 }
186
187 #ifdef CONFIG_NET_SOCKETS
188 /* We need to claim the net_context mutex here so that the ordering of
189 * net_context and socket mutex claims matches the TX code path. Failure
190 * to do so can lead to deadlocks.
191 */
192 if (sock->context->cond.lock) {
193 k_mutex_lock(sock->context->cond.lock, K_FOREVER);
194 }
195 #endif /* CONFIG_NET_SOCKETS */
196 k_mutex_lock(&sock->lock, K_FOREVER);
197 if (sock->recv_cb) {
198 sock->recv_cb(sock->context, pkt, NULL, NULL,
199 0, sock->recv_user_data);
200 k_sem_give(&sock->sem_data_ready);
201 } else {
202 /* Discard */
203 net_pkt_unref(pkt);
204 }
205 k_mutex_unlock(&sock->lock);
206 #ifdef CONFIG_NET_SOCKETS
207 if (sock->context->cond.lock) {
208 k_mutex_unlock(sock->context->cond.lock);
209 }
210 #endif /* CONFIG_NET_SOCKETS */
211 }
212
213 void esp_socket_close(struct esp_socket *sock)
214 {
215 struct esp_data *dev = esp_socket_to_dev(sock);
216 char cmd_buf[sizeof("AT+CIPCLOSE=000")];
217 int ret;
218
219 snprintk(cmd_buf, sizeof(cmd_buf), "AT+CIPCLOSE=%d",
220 sock->link_id);
221 ret = esp_cmd_send(dev, NULL, 0, cmd_buf, ESP_CMD_TIMEOUT);
222 if (ret < 0) {
223 /* FIXME:
224 * If link doesn't close correctly here, esp_get could
225 * allocate a socket with an already open link.
226 */
227 LOG_ERR("Failed to close link %d, ret %d",
228 sock->link_id, ret);
229 }
230 }
231
232 static void esp_workq_flush_work(struct k_work *work)
233 {
234 struct esp_workq_flush_data *flush =
235 CONTAINER_OF(work, struct esp_workq_flush_data, work);
236
237 k_sem_give(&flush->sem);
238 }
239
240 void esp_socket_workq_stop_and_flush(struct esp_socket *sock)
241 {
242 struct esp_workq_flush_data flush;
243
244 k_work_init(&flush.work, esp_workq_flush_work);
245 k_sem_init(&flush.sem, 0, 1);
246
247 k_mutex_lock(&sock->lock, K_FOREVER);
248 esp_socket_flags_set(sock, ESP_SOCK_WORKQ_STOPPED);
249 __esp_socket_work_submit(sock, &flush.work);
250 k_mutex_unlock(&sock->lock);
251
252 k_sem_take(&flush.sem, K_FOREVER);
253 }
254