1 /*
2 * Copyright (c) 2023 Antmicro
3 * Copyright (c) 2024 Silicon Laboratories Inc.
4 * SPDX-License-Identifier: Apache-2.0
5 */
6 #include <zephyr/net/net_offload.h>
7 #include <zephyr/logging/log.h>
8 #include <assert.h>
9
10 #include "siwx91x_wifi.h"
11 #include "siwx91x_wifi_socket.h"
12
13 #include "sl_status.h"
14 #include "sl_net_ip_types.h"
15 #include "sl_net_si91x.h"
16 #include "sl_si91x_types.h"
17 #include "sl_si91x_socket.h"
18 #include "sl_si91x_socket_utility.h"
19
20 LOG_MODULE_DECLARE(siwx91x_wifi);
21
22 BUILD_ASSERT(NUMBER_OF_SOCKETS < sizeof(uint32_t) * 8);
23 BUILD_ASSERT(NUMBER_OF_SOCKETS < SIZEOF_FIELD(sl_si91x_fd_set, __fds_bits) * 8);
24
25 NET_BUF_POOL_FIXED_DEFINE(siwx91x_tx_pool, 1, NET_ETH_MTU, 0, NULL);
26 NET_BUF_POOL_FIXED_DEFINE(siwx91x_rx_pool, 10, NET_ETH_MTU, 0, NULL);
27
siwx91x_get_type(void)28 enum offloaded_net_if_types siwx91x_get_type(void)
29 {
30 return L2_OFFLOADED_NET_IF_TYPE_WIFI;
31 }
32
33 /* SiWx91x does not use the standard struct sockaddr (despite it uses the same
34 * name):
35 * - uses Little Endian for port number while Posix uses big endian
36 * - IPv6 addresses are bytes swapped
37 * Note: this function allows to have in == out.
38 */
siwx91x_sockaddr_swap_bytes(struct sockaddr * out,const struct sockaddr * in,socklen_t in_len)39 static void siwx91x_sockaddr_swap_bytes(struct sockaddr *out,
40 const struct sockaddr *in, socklen_t in_len)
41 {
42 const struct sockaddr_in6 *in6 = (const struct sockaddr_in6 *)in;
43 struct sockaddr_in6 *out6 = (struct sockaddr_in6 *)out;
44
45 /* In Zephyr, size of sockaddr == size of sockaddr_storage
46 * (while in Posix sockaddr is smaller than sockaddr_storage).
47 */
48 memcpy(out, in, in_len);
49 if (in->sa_family == AF_INET6) {
50 ARRAY_FOR_EACH(in6->sin6_addr.s6_addr32, i) {
51 out6->sin6_addr.s6_addr32[i] = ntohl(in6->sin6_addr.s6_addr32[i]);
52 }
53 out6->sin6_port = ntohs(in6->sin6_port);
54 } else if (in->sa_family == AF_INET) {
55 out6->sin6_port = ntohs(in6->sin6_port);
56 }
57 }
58
siwx91x_on_join_ipv4(struct siwx91x_dev * sidev)59 void siwx91x_on_join_ipv4(struct siwx91x_dev *sidev)
60 {
61 sl_net_ip_configuration_t ip_config4 = {
62 .mode = SL_IP_MANAGEMENT_DHCP,
63 .type = SL_IPV4,
64 };
65 struct in_addr addr4 = { };
66 int ret;
67
68 if (!IS_ENABLED(CONFIG_NET_IPV4)) {
69 return;
70 }
71 /* FIXME: support for static IP configuration */
72 ret = sl_si91x_configure_ip_address(&ip_config4, SL_SI91X_WIFI_CLIENT_VAP_ID);
73 if (!ret) {
74 memcpy(addr4.s4_addr, ip_config4.ip.v4.ip_address.bytes, sizeof(addr4.s4_addr));
75 /* FIXME: also report gateway (net_if_ipv4_router_add()) */
76 net_if_ipv4_addr_add(sidev->iface, &addr4, NET_ADDR_DHCP, 0);
77 } else {
78 LOG_ERR("sl_si91x_configure_ip_address(): %#04x", ret);
79 }
80 }
81
siwx91x_on_join_ipv6(struct siwx91x_dev * sidev)82 void siwx91x_on_join_ipv6(struct siwx91x_dev *sidev)
83 {
84 sl_net_ip_configuration_t ip_config6 = {
85 .mode = SL_IP_MANAGEMENT_DHCP,
86 .type = SL_IPV6,
87 };
88 struct in6_addr addr6 = { };
89 int ret;
90
91 if (!IS_ENABLED(CONFIG_NET_IPV6)) {
92 return;
93 }
94 /* FIXME: support for static IP configuration */
95 ret = sl_si91x_configure_ip_address(&ip_config6, SL_SI91X_WIFI_CLIENT_VAP_ID);
96 if (!ret) {
97 ARRAY_FOR_EACH(addr6.s6_addr32, i) {
98 addr6.s6_addr32[i] = ntohl(ip_config6.ip.v6.global_address.value[i]);
99 }
100 /* SiWx91x already take care of DAD and sending ND is not
101 * supported anyway.
102 */
103 net_if_flag_set(sidev->iface, NET_IF_IPV6_NO_ND);
104 /* FIXME: also report gateway and link local address */
105 net_if_ipv6_addr_add(sidev->iface, &addr6, NET_ADDR_AUTOCONF, 0);
106 } else {
107 LOG_ERR("sl_si91x_configure_ip_address(): %#04x", ret);
108 }
109 }
110
siwx91x_sock_recv_sync(struct net_context * context,net_context_recv_cb_t cb,void * user_data)111 static int siwx91x_sock_recv_sync(struct net_context *context,
112 net_context_recv_cb_t cb, void *user_data)
113 {
114 struct net_if *iface = net_context_get_iface(context);
115 int sockfd = (int)context->offload_context;
116 struct net_pkt *pkt;
117 struct net_buf *buf;
118 int ret;
119
120 pkt = net_pkt_rx_alloc_on_iface(iface, K_MSEC(100));
121 if (!pkt) {
122 return -ENOBUFS;
123 }
124 buf = net_buf_alloc(&siwx91x_rx_pool, K_MSEC(100));
125 if (!buf) {
126 net_pkt_unref(pkt);
127 return -ENOBUFS;
128 }
129 net_pkt_append_buffer(pkt, buf);
130
131 ret = sl_si91x_recvfrom(sockfd, buf->data, NET_ETH_MTU, 0, NULL, NULL);
132 if (ret < 0) {
133 net_pkt_unref(pkt);
134 ret = -errno;
135 } else {
136 net_buf_add(buf, ret);
137 net_pkt_cursor_init(pkt);
138 ret = 0;
139 }
140 if (cb) {
141 cb(context, pkt, NULL, NULL, ret, user_data);
142 }
143 return ret;
144 }
145
siwx91x_sock_on_recv(sl_si91x_fd_set * read_fd,sl_si91x_fd_set * write_fd,sl_si91x_fd_set * except_fd,int status)146 static void siwx91x_sock_on_recv(sl_si91x_fd_set *read_fd, sl_si91x_fd_set *write_fd,
147 sl_si91x_fd_set *except_fd, int status)
148 {
149 /* When CONFIG_NET_SOCKETS_OFFLOAD is set, only one interface exist */
150 struct siwx91x_dev *sidev = net_if_get_first_wifi()->if_dev->dev->data;
151
152 ARRAY_FOR_EACH(sidev->fds_cb, i) {
153 if (SL_SI91X_FD_ISSET(i, read_fd)) {
154 if (sidev->fds_cb[i].cb) {
155 siwx91x_sock_recv_sync(sidev->fds_cb[i].context,
156 sidev->fds_cb[i].cb,
157 sidev->fds_cb[i].user_data);
158 } else {
159 SL_SI91X_FD_CLR(i, &sidev->fds_watch);
160 k_event_post(&sidev->fds_recv_event, 1U << i);
161 }
162 }
163 }
164
165 sl_si91x_select(NUMBER_OF_SOCKETS, &sidev->fds_watch, NULL, NULL, NULL,
166 siwx91x_sock_on_recv);
167 }
168
siwx91x_sock_get(sa_family_t family,enum net_sock_type type,enum net_ip_protocol ip_proto,struct net_context ** context)169 static int siwx91x_sock_get(sa_family_t family, enum net_sock_type type,
170 enum net_ip_protocol ip_proto, struct net_context **context)
171 {
172 struct siwx91x_dev *sidev = net_if_get_first_wifi()->if_dev->dev->data;
173 int sockfd;
174
175 sockfd = sl_si91x_socket(family, type, ip_proto);
176 if (sockfd < 0) {
177 return -errno;
178 }
179 assert(!sidev->fds_cb[sockfd].cb);
180 (*context)->offload_context = (void *)sockfd;
181 return sockfd;
182 }
183
siwx91x_sock_put(struct net_context * context)184 static int siwx91x_sock_put(struct net_context *context)
185 {
186 struct siwx91x_dev *sidev = net_context_get_iface(context)->if_dev->dev->data;
187 int sockfd = (int)context->offload_context;
188 int ret;
189
190 SL_SI91X_FD_CLR(sockfd, &sidev->fds_watch);
191 memset(&sidev->fds_cb[sockfd], 0, sizeof(sidev->fds_cb[sockfd]));
192 sl_si91x_select(NUMBER_OF_SOCKETS, &sidev->fds_watch, NULL, NULL, NULL,
193 siwx91x_sock_on_recv);
194 ret = sl_si91x_shutdown(sockfd, 0);
195 if (ret < 0) {
196 ret = -errno;
197 }
198 return ret;
199 }
200
siwx91x_sock_bind(struct net_context * context,const struct sockaddr * addr,socklen_t addrlen)201 static int siwx91x_sock_bind(struct net_context *context,
202 const struct sockaddr *addr, socklen_t addrlen)
203 {
204 struct siwx91x_dev *sidev = net_context_get_iface(context)->if_dev->dev->data;
205 int sockfd = (int)context->offload_context;
206 struct sockaddr addr_le;
207 int ret;
208
209 /* Zephyr tends to call bind() even if the TCP socket is a client. 917
210 * return an error in this case.
211 */
212 if (net_context_get_proto(context) == IPPROTO_TCP &&
213 !((struct sockaddr_in *)addr)->sin_port) {
214 return 0;
215 }
216 siwx91x_sockaddr_swap_bytes(&addr_le, addr, addrlen);
217 ret = sl_si91x_bind(sockfd, &addr_le, addrlen);
218 if (ret) {
219 return -errno;
220 }
221 /* WiseConnect refuses to run select on TCP listening sockets */
222 if (net_context_get_proto(context) == IPPROTO_UDP) {
223 SL_SI91X_FD_SET(sockfd, &sidev->fds_watch);
224 sl_si91x_select(NUMBER_OF_SOCKETS, &sidev->fds_watch, NULL, NULL, NULL,
225 siwx91x_sock_on_recv);
226 }
227 return 0;
228 }
229
siwx91x_sock_connect(struct net_context * context,const struct sockaddr * addr,socklen_t addrlen,net_context_connect_cb_t cb,int32_t timeout,void * user_data)230 static int siwx91x_sock_connect(struct net_context *context,
231 const struct sockaddr *addr, socklen_t addrlen,
232 net_context_connect_cb_t cb, int32_t timeout, void *user_data)
233 {
234 struct siwx91x_dev *sidev = net_context_get_iface(context)->if_dev->dev->data;
235 int sockfd = (int)context->offload_context;
236 struct sockaddr addr_le;
237 int ret;
238
239 /* sl_si91x_connect() always return immediately, so we ignore timeout */
240 siwx91x_sockaddr_swap_bytes(&addr_le, addr, addrlen);
241 ret = sl_si91x_connect(sockfd, &addr_le, addrlen);
242 if (ret) {
243 ret = -errno;
244 }
245 SL_SI91X_FD_SET(sockfd, &sidev->fds_watch);
246 sl_si91x_select(NUMBER_OF_SOCKETS, &sidev->fds_watch, NULL, NULL, NULL,
247 siwx91x_sock_on_recv);
248 net_context_set_state(context, NET_CONTEXT_CONNECTED);
249 if (cb) {
250 cb(context, ret, user_data);
251 }
252 return ret;
253 }
254
siwx91x_sock_listen(struct net_context * context,int backlog)255 static int siwx91x_sock_listen(struct net_context *context, int backlog)
256 {
257 int sockfd = (int)context->offload_context;
258 int ret;
259
260 ret = sl_si91x_listen(sockfd, backlog);
261 if (ret) {
262 return -errno;
263 }
264 net_context_set_state(context, NET_CONTEXT_LISTENING);
265 return 0;
266 }
267
siwx91x_sock_accept(struct net_context * context,net_tcp_accept_cb_t cb,int32_t timeout,void * user_data)268 static int siwx91x_sock_accept(struct net_context *context,
269 net_tcp_accept_cb_t cb, int32_t timeout, void *user_data)
270 {
271 struct siwx91x_dev *sidev = net_context_get_iface(context)->if_dev->dev->data;
272 int sockfd = (int)context->offload_context;
273 struct net_context *newcontext;
274 struct sockaddr addr_le;
275 int ret;
276
277 /* TODO: support timeout != K_FOREVER */
278 assert(timeout < 0);
279
280 ret = net_context_get(net_context_get_family(context),
281 net_context_get_type(context),
282 net_context_get_proto(context), &newcontext);
283 if (ret < 0) {
284 return ret;
285 }
286 /* net_context_get() calls siwx91x_sock_get() but sl_si91x_accept() also
287 * allocates a socket.
288 */
289 ret = siwx91x_sock_put(newcontext);
290 if (ret < 0) {
291 return ret;
292 }
293 /* The iface is reset when getting a new context. */
294 newcontext->iface = context->iface;
295 ret = sl_si91x_accept(sockfd, &addr_le, sizeof(addr_le));
296 if (ret < 0) {
297 return -errno;
298 }
299 newcontext->flags |= NET_CONTEXT_REMOTE_ADDR_SET;
300 newcontext->offload_context = (void *)ret;
301 siwx91x_sockaddr_swap_bytes(&newcontext->remote, &addr_le, sizeof(addr_le));
302
303 SL_SI91X_FD_SET(ret, &sidev->fds_watch);
304 sl_si91x_select(NUMBER_OF_SOCKETS, &sidev->fds_watch, NULL, NULL, NULL,
305 siwx91x_sock_on_recv);
306 if (cb) {
307 cb(newcontext, &addr_le, sizeof(addr_le), 0, user_data);
308 }
309
310 return 0;
311 }
312
siwx91x_sock_sendto(struct net_pkt * pkt,const struct sockaddr * addr,socklen_t addrlen,net_context_send_cb_t cb,int32_t timeout,void * user_data)313 static int siwx91x_sock_sendto(struct net_pkt *pkt,
314 const struct sockaddr *addr, socklen_t addrlen,
315 net_context_send_cb_t cb, int32_t timeout, void *user_data)
316 {
317 struct net_context *context = pkt->context;
318 int sockfd = (int)context->offload_context;
319 struct sockaddr addr_le;
320 struct net_buf *buf;
321 int ret;
322
323 /* struct net_pkt use fragmented buffers while SiWx91x API need a
324 * continuous buffer.
325 */
326 if (net_pkt_get_len(pkt) > NET_ETH_MTU) {
327 LOG_ERR("unexpected buffer size");
328 ret = -ENOBUFS;
329 goto out_cb;
330 }
331 buf = net_buf_alloc(&siwx91x_tx_pool, K_FOREVER);
332 if (!buf) {
333 ret = -ENOBUFS;
334 goto out_cb;
335 }
336 if (net_pkt_read(pkt, buf->data, net_pkt_get_len(pkt))) {
337 ret = -ENOBUFS;
338 goto out_release_buf;
339 }
340 net_buf_add(buf, net_pkt_get_len(pkt));
341
342 /* sl_si91x_sendto() always return immediately, so we ignore timeout */
343 siwx91x_sockaddr_swap_bytes(&addr_le, addr, addrlen);
344 ret = sl_si91x_sendto(sockfd, buf->data, net_pkt_get_len(pkt), 0, &addr_le, addrlen);
345 if (ret < 0) {
346 ret = -errno;
347 goto out_release_buf;
348 }
349 net_pkt_unref(pkt);
350
351 out_release_buf:
352 net_buf_unref(buf);
353
354 out_cb:
355 if (cb) {
356 cb(pkt->context, ret, user_data);
357 }
358 return ret;
359 }
360
siwx91x_sock_send(struct net_pkt * pkt,net_context_send_cb_t cb,int32_t timeout,void * user_data)361 static int siwx91x_sock_send(struct net_pkt *pkt,
362 net_context_send_cb_t cb, int32_t timeout, void *user_data)
363 {
364 return siwx91x_sock_sendto(pkt, NULL, 0, cb, timeout, user_data);
365 }
366
siwx91x_sock_recv(struct net_context * context,net_context_recv_cb_t cb,int32_t timeout,void * user_data)367 static int siwx91x_sock_recv(struct net_context *context,
368 net_context_recv_cb_t cb, int32_t timeout, void *user_data)
369 {
370 struct net_if *iface = net_context_get_iface(context);
371 struct siwx91x_dev *sidev = iface->if_dev->dev->data;
372 int sockfd = (int)context->offload_context;
373 int ret;
374
375 ret = k_event_wait(&sidev->fds_recv_event, 1U << sockfd, false,
376 timeout < 0 ? K_FOREVER : K_MSEC(timeout));
377 if (timeout == 0) {
378 sidev->fds_cb[sockfd].context = context;
379 sidev->fds_cb[sockfd].cb = cb;
380 sidev->fds_cb[sockfd].user_data = user_data;
381 } else {
382 memset(&sidev->fds_cb[sockfd], 0, sizeof(sidev->fds_cb[sockfd]));
383 }
384
385 if (ret) {
386 k_event_clear(&sidev->fds_recv_event, 1U << sockfd);
387 ret = siwx91x_sock_recv_sync(context, cb, user_data);
388 SL_SI91X_FD_SET(sockfd, &sidev->fds_watch);
389 }
390
391 sl_si91x_select(NUMBER_OF_SOCKETS, &sidev->fds_watch, NULL, NULL, NULL,
392 siwx91x_sock_on_recv);
393 return ret;
394 }
395
396 static struct net_offload siwx91x_offload = {
397 .get = siwx91x_sock_get,
398 .put = siwx91x_sock_put,
399 .bind = siwx91x_sock_bind,
400 .listen = siwx91x_sock_listen,
401 .connect = siwx91x_sock_connect,
402 .accept = siwx91x_sock_accept,
403 .sendto = siwx91x_sock_sendto,
404 .send = siwx91x_sock_send,
405 .recv = siwx91x_sock_recv,
406 };
407
siwx91x_sock_init(struct net_if * iface)408 void siwx91x_sock_init(struct net_if *iface)
409 {
410 struct siwx91x_dev *sidev = iface->if_dev->dev->data;
411
412 iface->if_dev->offload = &siwx91x_offload;
413 k_event_init(&sidev->fds_recv_event);
414 }
415