1 /*
2 * Copyright (c) 2019 Tobias Svehagen
3 * Copyright (c) 2020 Grinn
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #include "esp.h"
9
10 #include <zephyr/logging/log.h>
11 LOG_MODULE_DECLARE(wifi_esp_at, CONFIG_WIFI_LOG_LEVEL);
12
13 #define RX_NET_PKT_ALLOC_TIMEOUT \
14 K_MSEC(CONFIG_WIFI_ESP_AT_RX_NET_PKT_ALLOC_TIMEOUT)
15
16 struct esp_workq_flush_data {
17 struct k_work work;
18 struct k_sem sem;
19 };
20
esp_socket_get(struct esp_data * data,struct net_context * context)21 struct esp_socket *esp_socket_get(struct esp_data *data,
22 struct net_context *context)
23 {
24 struct esp_socket *sock = data->sockets;
25 struct esp_socket *sock_end = sock + ARRAY_SIZE(data->sockets);
26
27 for (; sock < sock_end; sock++) {
28 if (!esp_socket_flags_test_and_set(sock, ESP_SOCK_IN_USE)) {
29 /* here we should configure all the stuff needed */
30 sock->context = context;
31 context->offload_context = sock;
32
33 sock->connect_cb = NULL;
34 sock->recv_cb = NULL;
35 memset(&sock->src, 0x0, sizeof(sock->src));
36 memset(&sock->dst, 0x0, sizeof(sock->dst));
37
38 atomic_inc(&sock->refcount);
39
40 return sock;
41 }
42 }
43
44 return NULL;
45 }
46
esp_socket_put(struct esp_socket * sock)47 int esp_socket_put(struct esp_socket *sock)
48 {
49 atomic_clear(&sock->flags);
50
51 return 0;
52 }
53
esp_socket_ref(struct esp_socket * sock)54 struct esp_socket *esp_socket_ref(struct esp_socket *sock)
55 {
56 atomic_val_t ref;
57
58 do {
59 ref = atomic_get(&sock->refcount);
60 if (!ref) {
61 return NULL;
62 }
63 } while (!atomic_cas(&sock->refcount, ref, ref + 1));
64
65 return sock;
66 }
67
esp_socket_unref(struct esp_socket * sock)68 void esp_socket_unref(struct esp_socket *sock)
69 {
70 atomic_val_t ref;
71
72 do {
73 ref = atomic_get(&sock->refcount);
74 if (!ref) {
75 return;
76 }
77 } while (!atomic_cas(&sock->refcount, ref, ref - 1));
78
79 /* notifies free only on 1-to-0 transition */
80 if (ref > 1) {
81 return;
82 }
83
84 k_sem_give(&sock->sem_free);
85 }
86
esp_socket_init(struct esp_data * data)87 void esp_socket_init(struct esp_data *data)
88 {
89 struct esp_socket *sock;
90 int i;
91
92 for (i = 0; i < ARRAY_SIZE(data->sockets); ++i) {
93 sock = &data->sockets[i];
94 sock->idx = i;
95 sock->link_id = i;
96 atomic_clear(&sock->refcount);
97 atomic_clear(&sock->flags);
98 k_mutex_init(&sock->lock);
99 k_sem_init(&sock->sem_data_ready, 0, 1);
100 k_work_init(&sock->connect_work, esp_connect_work);
101 k_work_init(&sock->recvdata_work, esp_recvdata_work);
102 k_work_init(&sock->close_work, esp_close_work);
103 k_work_init(&sock->send_work, esp_send_work);
104 k_fifo_init(&sock->tx_fifo);
105 }
106 }
107
esp_socket_prepare_pkt(struct esp_socket * sock,struct net_buf * src,size_t offset,size_t len)108 static struct net_pkt *esp_socket_prepare_pkt(struct esp_socket *sock,
109 struct net_buf *src,
110 size_t offset, size_t len)
111 {
112 struct esp_data *data = esp_socket_to_dev(sock);
113 struct net_buf *frag;
114 struct net_pkt *pkt;
115 size_t to_copy;
116
117 pkt = net_pkt_rx_alloc_with_buffer(data->net_iface, len, NET_AF_UNSPEC,
118 0, RX_NET_PKT_ALLOC_TIMEOUT);
119 if (!pkt) {
120 return NULL;
121 }
122
123 frag = src;
124
125 /* find the right fragment to start copying from */
126 while (frag && offset >= frag->len) {
127 offset -= frag->len;
128 frag = frag->frags;
129 }
130
131 /* traverse the fragment chain until len bytes are copied */
132 while (frag && len > 0) {
133 to_copy = MIN(len, frag->len - offset);
134 if (net_pkt_write(pkt, frag->data + offset, to_copy) != 0) {
135 net_pkt_unref(pkt);
136 return NULL;
137 }
138
139 /* to_copy is always <= len */
140 len -= to_copy;
141 frag = frag->frags;
142
143 /* after the first iteration, this value will be 0 */
144 offset = 0;
145 }
146
147 net_pkt_set_context(pkt, sock->context);
148 net_pkt_cursor_init(pkt);
149
150 #if defined(CONFIG_WIFI_ESP_AT_CIPDINFO_USE)
151 memcpy(&pkt->remote, &sock->context->remote, sizeof(pkt->remote));
152 pkt->family = sock->src.sa_family;
153 #endif
154
155 return pkt;
156 }
157
esp_socket_rx(struct esp_socket * sock,struct net_buf * buf,size_t offset,size_t len)158 void esp_socket_rx(struct esp_socket *sock, struct net_buf *buf,
159 size_t offset, size_t len)
160 {
161 struct net_pkt *pkt;
162 atomic_val_t flags;
163
164 flags = esp_socket_flags(sock);
165
166 #ifdef CONFIG_WIFI_ESP_AT_PASSIVE_MODE
167 /* In Passive Receive mode, ESP modem will buffer rx data and make it still
168 * available even though the peer has closed the connection.
169 */
170 if (!(flags & ESP_SOCK_CONNECTED) &&
171 !(flags & ESP_SOCK_CLOSE_PENDING)) {
172 #else
173 if (!(flags & ESP_SOCK_CONNECTED) ||
174 (flags & ESP_SOCK_CLOSE_PENDING)) {
175 #endif
176 LOG_DBG("Received data on closed link %d", sock->link_id);
177 return;
178 }
179
180 pkt = esp_socket_prepare_pkt(sock, buf, offset, len);
181 if (!pkt) {
182 LOG_ERR("Failed to get net_pkt: len %zu", len);
183 if (esp_socket_type(sock) == NET_SOCK_STREAM) {
184 if (!esp_socket_flags_test_and_set(sock,
185 ESP_SOCK_CLOSE_PENDING)) {
186 esp_socket_work_submit(sock, &sock->close_work);
187 }
188 }
189 return;
190 }
191
192 #ifdef CONFIG_NET_SOCKETS
193 /* We need to claim the net_context mutex here so that the ordering of
194 * net_context and socket mutex claims matches the TX code path. Failure
195 * to do so can lead to deadlocks.
196 */
197 /* In the close path, we will meet deadlock in the scenario:
198 * 1. on_cmd_ipd/esp_socket_rx invokes esp_socket_ref_from_link_id
199 * and increments refcount.
200 * 2. zvfs_close/esp_put locks cond.lock.
201 * 3. zvfs_close/esp_put waits on sem_free.
202 * 4. on_cmd_ipd/esp_socket_rx waits on cond.lock before esp_socket_unref.
203 * 5. sem_free waits on esp_socket_unref for refcount reaching zero.
204 */
205 if (sock->context->cond.lock) {
206 int ret = -EAGAIN;
207
208 /*
209 * If the socket is closing, we can ignore the packet and won't
210 * trap in deadlock.
211 */
212 while (atomic_get(&sock->refcount) > 1 && ret == -EAGAIN) {
213 ret = k_mutex_lock(sock->context->cond.lock, K_SECONDS(1));
214 }
215 if (ret != 0) {
216 /* Discard */
217 net_pkt_unref(pkt);
218 return;
219 }
220 }
221 #endif /* CONFIG_NET_SOCKETS */
222 k_mutex_lock(&sock->lock, K_FOREVER);
223 if (sock->recv_cb) {
224 sock->recv_cb(sock->context, pkt, NULL, NULL,
225 0, sock->recv_user_data);
226 k_sem_give(&sock->sem_data_ready);
227 } else {
228 /* Discard */
229 net_pkt_unref(pkt);
230 }
231 k_mutex_unlock(&sock->lock);
232 #ifdef CONFIG_NET_SOCKETS
233 if (sock->context->cond.lock) {
234 k_mutex_unlock(sock->context->cond.lock);
235 }
236 #endif /* CONFIG_NET_SOCKETS */
237 }
238
239 void esp_socket_close(struct esp_socket *sock)
240 {
241 struct esp_data *dev = esp_socket_to_dev(sock);
242 char cmd_buf[sizeof("AT+CIPCLOSE=000")];
243 int ret;
244
245 snprintk(cmd_buf, sizeof(cmd_buf), "AT+CIPCLOSE=%d",
246 sock->link_id);
247 ret = esp_cmd_send(dev, NULL, 0, cmd_buf, ESP_CMD_TIMEOUT);
248 if (ret < 0) {
249 /* FIXME:
250 * If link doesn't close correctly here, esp_get could
251 * allocate a socket with an already open link.
252 */
253 LOG_ERR("Failed to close link %d, ret %d",
254 sock->link_id, ret);
255 }
256 }
257
258 static void esp_workq_flush_work(struct k_work *work)
259 {
260 struct esp_workq_flush_data *flush =
261 CONTAINER_OF(work, struct esp_workq_flush_data, work);
262
263 k_sem_give(&flush->sem);
264 }
265
266 void esp_socket_workq_stop_and_flush(struct esp_socket *sock)
267 {
268 struct esp_workq_flush_data flush;
269
270 k_work_init(&flush.work, esp_workq_flush_work);
271 k_sem_init(&flush.sem, 0, 1);
272
273 k_mutex_lock(&sock->lock, K_FOREVER);
274 esp_socket_flags_set(sock, ESP_SOCK_WORKQ_STOPPED);
275 __esp_socket_work_submit(sock, &flush.work);
276 k_mutex_unlock(&sock->lock);
277
278 k_sem_take(&flush.sem, K_FOREVER);
279 }
280