1 /*
2 * Copyright (c) 2019 Tobias Svehagen
3 * Copyright (c) 2020 Grinn
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #include "esp.h"
9
10 #include <zephyr/logging/log.h>
11 LOG_MODULE_DECLARE(wifi_esp_at, CONFIG_WIFI_LOG_LEVEL);
12
13 #define RX_NET_PKT_ALLOC_TIMEOUT \
14 K_MSEC(CONFIG_WIFI_ESP_AT_RX_NET_PKT_ALLOC_TIMEOUT)
15
16 struct esp_workq_flush_data {
17 struct k_work work;
18 struct k_sem sem;
19 };
20
esp_socket_get(struct esp_data * data,struct net_context * context)21 struct esp_socket *esp_socket_get(struct esp_data *data,
22 struct net_context *context)
23 {
24 struct esp_socket *sock = data->sockets;
25 struct esp_socket *sock_end = sock + ARRAY_SIZE(data->sockets);
26
27 for (; sock < sock_end; sock++) {
28 if (!esp_socket_flags_test_and_set(sock, ESP_SOCK_IN_USE)) {
29 /* here we should configure all the stuff needed */
30 sock->context = context;
31 context->offload_context = sock;
32
33 sock->connect_cb = NULL;
34 sock->recv_cb = NULL;
35
36 atomic_inc(&sock->refcount);
37
38 return sock;
39 }
40 }
41
42 return NULL;
43 }
44
esp_socket_put(struct esp_socket * sock)45 int esp_socket_put(struct esp_socket *sock)
46 {
47 atomic_clear(&sock->flags);
48
49 return 0;
50 }
51
esp_socket_ref(struct esp_socket * sock)52 struct esp_socket *esp_socket_ref(struct esp_socket *sock)
53 {
54 atomic_val_t ref;
55
56 do {
57 ref = atomic_get(&sock->refcount);
58 if (!ref) {
59 return NULL;
60 }
61 } while (!atomic_cas(&sock->refcount, ref, ref + 1));
62
63 return sock;
64 }
65
esp_socket_unref(struct esp_socket * sock)66 void esp_socket_unref(struct esp_socket *sock)
67 {
68 atomic_val_t ref;
69
70 do {
71 ref = atomic_get(&sock->refcount);
72 if (!ref) {
73 return;
74 }
75 } while (!atomic_cas(&sock->refcount, ref, ref - 1));
76
77 k_sem_give(&sock->sem_free);
78 }
79
esp_socket_init(struct esp_data * data)80 void esp_socket_init(struct esp_data *data)
81 {
82 struct esp_socket *sock;
83 int i;
84
85 for (i = 0; i < ARRAY_SIZE(data->sockets); ++i) {
86 sock = &data->sockets[i];
87 sock->idx = i;
88 sock->link_id = i;
89 atomic_clear(&sock->refcount);
90 atomic_clear(&sock->flags);
91 k_mutex_init(&sock->lock);
92 k_sem_init(&sock->sem_data_ready, 0, 1);
93 k_work_init(&sock->connect_work, esp_connect_work);
94 k_work_init(&sock->recvdata_work, esp_recvdata_work);
95 k_work_init(&sock->close_work, esp_close_work);
96 k_work_init(&sock->send_work, esp_send_work);
97 k_fifo_init(&sock->tx_fifo);
98 }
99 }
100
esp_socket_prepare_pkt(struct esp_socket * sock,struct net_buf * src,size_t offset,size_t len)101 static struct net_pkt *esp_socket_prepare_pkt(struct esp_socket *sock,
102 struct net_buf *src,
103 size_t offset, size_t len)
104 {
105 struct esp_data *data = esp_socket_to_dev(sock);
106 struct net_buf *frag;
107 struct net_pkt *pkt;
108 size_t to_copy;
109
110 pkt = net_pkt_rx_alloc_with_buffer(data->net_iface, len, AF_UNSPEC,
111 0, RX_NET_PKT_ALLOC_TIMEOUT);
112 if (!pkt) {
113 return NULL;
114 }
115
116 frag = src;
117
118 /* find the right fragment to start copying from */
119 while (frag && offset >= frag->len) {
120 offset -= frag->len;
121 frag = frag->frags;
122 }
123
124 /* traverse the fragment chain until len bytes are copied */
125 while (frag && len > 0) {
126 to_copy = MIN(len, frag->len - offset);
127 if (net_pkt_write(pkt, frag->data + offset, to_copy) != 0) {
128 net_pkt_unref(pkt);
129 return NULL;
130 }
131
132 /* to_copy is always <= len */
133 len -= to_copy;
134 frag = frag->frags;
135
136 /* after the first iteration, this value will be 0 */
137 offset = 0;
138 }
139
140 net_pkt_set_context(pkt, sock->context);
141 net_pkt_cursor_init(pkt);
142
143 return pkt;
144 }
145
esp_socket_rx(struct esp_socket * sock,struct net_buf * buf,size_t offset,size_t len)146 void esp_socket_rx(struct esp_socket *sock, struct net_buf *buf,
147 size_t offset, size_t len)
148 {
149 struct net_pkt *pkt;
150 atomic_val_t flags;
151
152 flags = esp_socket_flags(sock);
153
154 #ifdef CONFIG_WIFI_ESP_AT_PASSIVE_MODE
155 /* In Passive Receive mode, ESP modem will buffer rx data and make it still
156 * available even though the peer has closed the connection.
157 */
158 if (!(flags & ESP_SOCK_CONNECTED) &&
159 !(flags & ESP_SOCK_CLOSE_PENDING)) {
160 #else
161 if (!(flags & ESP_SOCK_CONNECTED) ||
162 (flags & ESP_SOCK_CLOSE_PENDING)) {
163 #endif
164 LOG_DBG("Received data on closed link %d", sock->link_id);
165 return;
166 }
167
168 pkt = esp_socket_prepare_pkt(sock, buf, offset, len);
169 if (!pkt) {
170 LOG_ERR("Failed to get net_pkt: len %zu", len);
171 if (esp_socket_type(sock) == SOCK_STREAM) {
172 if (!esp_socket_flags_test_and_set(sock,
173 ESP_SOCK_CLOSE_PENDING)) {
174 esp_socket_work_submit(sock, &sock->close_work);
175 }
176 }
177 return;
178 }
179
180 #ifdef CONFIG_NET_SOCKETS
181 /* We need to claim the net_context mutex here so that the ordering of
182 * net_context and socket mutex claims matches the TX code path. Failure
183 * to do so can lead to deadlocks.
184 */
185 if (sock->context->cond.lock) {
186 k_mutex_lock(sock->context->cond.lock, K_FOREVER);
187 }
188 #endif /* CONFIG_NET_SOCKETS */
189 k_mutex_lock(&sock->lock, K_FOREVER);
190 if (sock->recv_cb) {
191 sock->recv_cb(sock->context, pkt, NULL, NULL,
192 0, sock->recv_user_data);
193 k_sem_give(&sock->sem_data_ready);
194 } else {
195 /* Discard */
196 net_pkt_unref(pkt);
197 }
198 k_mutex_unlock(&sock->lock);
199 #ifdef CONFIG_NET_SOCKETS
200 if (sock->context->cond.lock) {
201 k_mutex_unlock(sock->context->cond.lock);
202 }
203 #endif /* CONFIG_NET_SOCKETS */
204 }
205
206 void esp_socket_close(struct esp_socket *sock)
207 {
208 struct esp_data *dev = esp_socket_to_dev(sock);
209 char cmd_buf[sizeof("AT+CIPCLOSE=000")];
210 int ret;
211
212 snprintk(cmd_buf, sizeof(cmd_buf), "AT+CIPCLOSE=%d",
213 sock->link_id);
214 ret = esp_cmd_send(dev, NULL, 0, cmd_buf, ESP_CMD_TIMEOUT);
215 if (ret < 0) {
216 /* FIXME:
217 * If link doesn't close correctly here, esp_get could
218 * allocate a socket with an already open link.
219 */
220 LOG_ERR("Failed to close link %d, ret %d",
221 sock->link_id, ret);
222 }
223 }
224
225 static void esp_workq_flush_work(struct k_work *work)
226 {
227 struct esp_workq_flush_data *flush =
228 CONTAINER_OF(work, struct esp_workq_flush_data, work);
229
230 k_sem_give(&flush->sem);
231 }
232
233 void esp_socket_workq_stop_and_flush(struct esp_socket *sock)
234 {
235 struct esp_workq_flush_data flush;
236
237 k_work_init(&flush.work, esp_workq_flush_work);
238 k_sem_init(&flush.sem, 0, 1);
239
240 k_mutex_lock(&sock->lock, K_FOREVER);
241 esp_socket_flags_set(sock, ESP_SOCK_WORKQ_STOPPED);
242 __esp_socket_work_submit(sock, &flush.work);
243 k_mutex_unlock(&sock->lock);
244
245 k_sem_take(&flush.sem, K_FOREVER);
246 }
247