1 /*
2 * Copyright (c) 2019 Intel Corporation
3 * Copyright (c) 2021 Nordic Semiconductor
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #include <stdbool.h>
9 #include <zephyr/posix/fcntl.h>
10
11 #include <zephyr/logging/log.h>
12 LOG_MODULE_REGISTER(net_sock_can, CONFIG_NET_SOCKETS_LOG_LEVEL);
13
14 #include <zephyr/kernel.h>
15 #include <zephyr/drivers/entropy.h>
16 #include <zephyr/sys/util.h>
17 #include <zephyr/net/net_context.h>
18 #include <zephyr/net/net_pkt.h>
19 #include <zephyr/net/socket.h>
20 #include <zephyr/internal/syscall_handler.h>
21 #include <zephyr/sys/fdtable.h>
22 #include <zephyr/net/canbus.h>
23 #include <zephyr/net/socketcan.h>
24 #include <zephyr/net/socketcan_utils.h>
25 #include <zephyr/drivers/can.h>
26
27 #include "sockets_internal.h"
28
29 #define MEM_ALLOC_TIMEOUT K_MSEC(50)
30
31 struct can_recv {
32 struct net_if *iface;
33 struct net_context *ctx;
34 socketcan_id_t can_id;
35 socketcan_id_t can_mask;
36 };
37
38 static struct can_recv receivers[CONFIG_NET_SOCKETS_CAN_RECEIVERS];
39
40 extern const struct socket_op_vtable sock_fd_op_vtable;
41
42 static const struct socket_op_vtable can_sock_fd_op_vtable;
43
k_fifo_wait_non_empty(struct k_fifo * fifo,k_timeout_t timeout)44 static inline int k_fifo_wait_non_empty(struct k_fifo *fifo,
45 k_timeout_t timeout)
46 {
47 struct k_poll_event events[] = {
48 K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_FIFO_DATA_AVAILABLE,
49 K_POLL_MODE_NOTIFY_ONLY, fifo),
50 };
51
52 return k_poll(events, ARRAY_SIZE(events), timeout);
53 }
54
zcan_socket(int family,int type,int proto)55 int zcan_socket(int family, int type, int proto)
56 {
57 struct net_context *ctx;
58 int fd;
59 int ret;
60
61 fd = zvfs_reserve_fd();
62 if (fd < 0) {
63 return -1;
64 }
65
66 ret = net_context_get(family, type, proto, &ctx);
67 if (ret < 0) {
68 zvfs_free_fd(fd);
69 errno = -ret;
70 return -1;
71 }
72
73 /* Initialize user_data, all other calls will preserve it */
74 ctx->user_data = NULL;
75
76 k_fifo_init(&ctx->recv_q);
77
78 /* Condition variable is used to avoid keeping lock for a long time
79 * when waiting data to be received
80 */
81 k_condvar_init(&ctx->cond.recv);
82
83 zvfs_finalize_typed_fd(fd, ctx, (const struct fd_op_vtable *)&can_sock_fd_op_vtable,
84 ZVFS_MODE_IFSOCK);
85
86 return fd;
87 }
88
zcan_received_cb(struct net_context * ctx,struct net_pkt * pkt,union net_ip_header * ip_hdr,union net_proto_header * proto_hdr,int status,void * user_data)89 static void zcan_received_cb(struct net_context *ctx, struct net_pkt *pkt,
90 union net_ip_header *ip_hdr,
91 union net_proto_header *proto_hdr,
92 int status, void *user_data)
93 {
94 /* The ctx parameter is not really relevant here. It refers to first
95 * net_context that was used when registering CAN socket.
96 * In practice there can be multiple sockets that are interested in
97 * same CAN id packets. That is why we need to implement the dispatcher
98 * which will give the packet to correct net_context(s).
99 */
100 struct net_pkt *clone = NULL;
101 int i;
102
103 for (i = 0; i < ARRAY_SIZE(receivers); i++) {
104 struct can_frame *zframe =
105 (struct can_frame *)net_pkt_data(pkt);
106 struct socketcan_frame sframe;
107
108 if (!receivers[i].ctx ||
109 receivers[i].iface != net_pkt_iface(pkt)) {
110 continue;
111 }
112
113 socketcan_from_can_frame(zframe, &sframe);
114
115 if ((sframe.can_id & receivers[i].can_mask) !=
116 (receivers[i].can_id & receivers[i].can_mask)) {
117 continue;
118 }
119
120 /* If there are multiple receivers configured, we use the
121 * original net_pkt as a template, and just clone it to all
122 * recipients. This is done like this so that we avoid the
123 * original net_pkt being freed while we are cloning it.
124 */
125 if (pkt != NULL && ARRAY_SIZE(receivers) > 1) {
126 /* There are multiple receivers, we need to clone
127 * the packet.
128 */
129 clone = net_pkt_clone(pkt, MEM_ALLOC_TIMEOUT);
130 if (!clone) {
131 /* Sent the packet to at least one recipient
132 * if there is no memory to clone the packet.
133 */
134 clone = pkt;
135 }
136 } else {
137 clone = pkt;
138 }
139
140 ctx = receivers[i].ctx;
141
142 /* To prevent the reader from missing the wake-up signal
143 * as described in commit 1184089 and implemented in sockets.c
144 */
145 if (ctx->cond.lock) {
146 (void)k_mutex_lock(ctx->cond.lock, K_FOREVER);
147 }
148
149 NET_DBG("[%d] ctx %p pkt %p st %d", i, ctx, clone, status);
150
151 /* if pkt is NULL, EOF */
152 if (!clone) {
153 struct net_pkt *last_pkt =
154 k_fifo_peek_tail(&ctx->recv_q);
155
156 if (!last_pkt) {
157 /* If there're no packets in the queue,
158 * recv() may be blocked waiting on it to
159 * become non-empty, so cancel that wait.
160 */
161 sock_set_eof(ctx);
162 k_fifo_cancel_wait(&ctx->recv_q);
163
164 NET_DBG("Marked socket %p as peer-closed", ctx);
165 } else {
166 net_pkt_set_eof(last_pkt, true);
167
168 NET_DBG("Set EOF flag on pkt %p", ctx);
169 }
170 } else {
171 /* Normal packet */
172 net_pkt_set_eof(clone, false);
173
174 k_fifo_put(&ctx->recv_q, clone);
175 }
176
177 if (ctx->cond.lock) {
178 k_mutex_unlock(ctx->cond.lock);
179 }
180
181 k_condvar_signal(&ctx->cond.recv);
182 }
183
184 if (clone && clone != pkt) {
185 net_pkt_unref(pkt);
186 }
187 }
188
zcan_bind_ctx(struct net_context * ctx,const struct sockaddr * addr,socklen_t addrlen)189 static int zcan_bind_ctx(struct net_context *ctx, const struct sockaddr *addr,
190 socklen_t addrlen)
191 {
192 struct sockaddr_can *can_addr = (struct sockaddr_can *)addr;
193 struct net_if *iface;
194 int ret;
195
196 if (addrlen != sizeof(struct sockaddr_can)) {
197 return -EINVAL;
198 }
199
200 iface = net_if_get_by_index(can_addr->can_ifindex);
201 if (!iface) {
202 return -ENOENT;
203 }
204
205 net_context_set_iface(ctx, iface);
206
207 ret = net_context_bind(ctx, addr, addrlen);
208 if (ret < 0) {
209 errno = -ret;
210 return -1;
211 }
212
213 /* For CAN socket, we expect to receive packets after call to bind().
214 */
215 ret = net_context_recv(ctx, zcan_received_cb, K_NO_WAIT,
216 ctx->user_data);
217 if (ret < 0) {
218 errno = -ret;
219 return -1;
220 }
221
222 return 0;
223 }
224
zcan_sendto_ctx(struct net_context * ctx,const void * buf,size_t len,int flags,const struct sockaddr * dest_addr,socklen_t addrlen)225 ssize_t zcan_sendto_ctx(struct net_context *ctx, const void *buf, size_t len,
226 int flags, const struct sockaddr *dest_addr,
227 socklen_t addrlen)
228 {
229 struct sockaddr_can can_addr;
230 struct can_frame zframe;
231 k_timeout_t timeout = K_FOREVER;
232 int ret;
233
234 /* Setting destination address does not probably make sense here so
235 * ignore it. You need to use bind() to set the CAN interface.
236 */
237 if (dest_addr) {
238 NET_DBG("CAN destination address ignored");
239 }
240
241 if ((flags & ZSOCK_MSG_DONTWAIT) || sock_is_nonblock(ctx)) {
242 timeout = K_NO_WAIT;
243 } else {
244 net_context_get_option(ctx, NET_OPT_SNDTIMEO, &timeout, NULL);
245 }
246
247 if (addrlen == 0) {
248 addrlen = sizeof(struct sockaddr_can);
249 }
250
251 if (dest_addr == NULL) {
252 memset(&can_addr, 0, sizeof(can_addr));
253
254 can_addr.can_ifindex = -1;
255 can_addr.can_family = AF_CAN;
256
257 dest_addr = (struct sockaddr *)&can_addr;
258 }
259
260 NET_ASSERT(len == sizeof(struct socketcan_frame));
261
262 socketcan_to_can_frame((struct socketcan_frame *)buf, &zframe);
263
264 ret = net_context_sendto(ctx, (void *)&zframe, sizeof(zframe),
265 dest_addr, addrlen, NULL, timeout,
266 ctx->user_data);
267 if (ret < 0) {
268 errno = -ret;
269 return -1;
270 }
271
272 return len;
273 }
274
zcan_recvfrom_ctx(struct net_context * ctx,void * buf,size_t max_len,int flags,struct sockaddr * src_addr,socklen_t * addrlen)275 static ssize_t zcan_recvfrom_ctx(struct net_context *ctx, void *buf,
276 size_t max_len, int flags,
277 struct sockaddr *src_addr,
278 socklen_t *addrlen)
279 {
280 struct can_frame zframe;
281 size_t recv_len = 0;
282 k_timeout_t timeout = K_FOREVER;
283 struct net_pkt *pkt;
284
285 if ((flags & ZSOCK_MSG_DONTWAIT) || sock_is_nonblock(ctx)) {
286 timeout = K_NO_WAIT;
287 } else {
288 net_context_get_option(ctx, NET_OPT_RCVTIMEO, &timeout, NULL);
289 }
290
291 if (flags & ZSOCK_MSG_PEEK) {
292 int ret;
293
294 ret = k_fifo_wait_non_empty(&ctx->recv_q, timeout);
295 /* EAGAIN when timeout expired, EINTR when cancelled */
296 if (ret && ret != -EAGAIN && ret != -EINTR) {
297 errno = -ret;
298 return -1;
299 }
300
301 pkt = k_fifo_peek_head(&ctx->recv_q);
302 } else {
303 /* Mechanism as in sockets.c to allow parallel rx/tx
304 */
305 if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
306 int res;
307
308 res = zsock_wait_data(ctx, &timeout);
309 if (res < 0) {
310 errno = -res;
311 return -1;
312 }
313 }
314
315 pkt = k_fifo_get(&ctx->recv_q, timeout);
316 }
317
318 if (!pkt) {
319 errno = EAGAIN;
320 return -1;
321 }
322
323 /* We do not handle any headers here, just pass the whole packet to
324 * the caller.
325 */
326 recv_len = net_pkt_get_len(pkt);
327 if (recv_len > max_len) {
328 recv_len = max_len;
329 }
330
331 if (net_pkt_read(pkt, (void *)&zframe, sizeof(zframe))) {
332 net_pkt_unref(pkt);
333
334 errno = EIO;
335 return -1;
336 }
337
338 NET_ASSERT(recv_len == sizeof(struct socketcan_frame));
339
340 socketcan_from_can_frame(&zframe, (struct socketcan_frame *)buf);
341
342 net_pkt_unref(pkt);
343
344 return recv_len;
345 }
346
zcan_getsockopt_ctx(struct net_context * ctx,int level,int optname,void * optval,socklen_t * optlen)347 static int zcan_getsockopt_ctx(struct net_context *ctx, int level, int optname,
348 void *optval, socklen_t *optlen)
349 {
350 if (!optval || !optlen) {
351 errno = EINVAL;
352 return -1;
353 }
354
355 return sock_fd_op_vtable.getsockopt(ctx, level, optname,
356 optval, optlen);
357 }
358
zcan_setsockopt_ctx(struct net_context * ctx,int level,int optname,const void * optval,socklen_t optlen)359 static int zcan_setsockopt_ctx(struct net_context *ctx, int level, int optname,
360 const void *optval, socklen_t optlen)
361 {
362 return sock_fd_op_vtable.setsockopt(ctx, level, optname,
363 optval, optlen);
364 }
365
can_sock_read_vmeth(void * obj,void * buffer,size_t count)366 static ssize_t can_sock_read_vmeth(void *obj, void *buffer, size_t count)
367 {
368 return zcan_recvfrom_ctx(obj, buffer, count, 0, NULL, 0);
369 }
370
can_sock_write_vmeth(void * obj,const void * buffer,size_t count)371 static ssize_t can_sock_write_vmeth(void *obj, const void *buffer,
372 size_t count)
373 {
374 return zcan_sendto_ctx(obj, buffer, count, 0, NULL, 0);
375 }
376
is_already_attached(struct socketcan_filter * sfilter,struct net_if * iface,struct net_context * ctx)377 static bool is_already_attached(struct socketcan_filter *sfilter,
378 struct net_if *iface,
379 struct net_context *ctx)
380 {
381 int i;
382
383 for (i = 0; i < ARRAY_SIZE(receivers); i++) {
384 if (receivers[i].ctx != ctx && receivers[i].iface == iface &&
385 ((receivers[i].can_id & receivers[i].can_mask) ==
386 (UNALIGNED_GET(&sfilter->can_id) &
387 UNALIGNED_GET(&sfilter->can_mask)))) {
388 return true;
389 }
390 }
391
392 return false;
393 }
394
close_socket(struct net_context * ctx)395 static int close_socket(struct net_context *ctx)
396 {
397 const struct canbus_api *api;
398 struct net_if *iface;
399 const struct device *dev;
400
401 iface = net_context_get_iface(ctx);
402 dev = net_if_get_device(iface);
403 api = dev->api;
404
405 if (!api || !api->close) {
406 return -ENOTSUP;
407 }
408
409 api->close(dev, net_context_get_can_filter_id(ctx));
410
411 return 0;
412 }
413
can_close_socket(struct net_context * ctx)414 static int can_close_socket(struct net_context *ctx)
415 {
416 int i, ret;
417
418 for (i = 0; i < ARRAY_SIZE(receivers); i++) {
419 if (receivers[i].ctx == ctx) {
420 struct socketcan_filter sfilter;
421
422 receivers[i].ctx = NULL;
423
424 sfilter.can_id = receivers[i].can_id;
425 sfilter.can_mask = receivers[i].can_mask;
426
427 if (!is_already_attached(&sfilter,
428 net_context_get_iface(ctx),
429 ctx)) {
430 /* We can detach now as there are no other
431 * sockets that have same filter.
432 */
433 ret = close_socket(ctx);
434 if (ret < 0) {
435 return ret;
436 }
437 }
438
439 return 0;
440 }
441 }
442
443 return 0;
444 }
445
can_sock_close_vmeth(void * obj)446 static int can_sock_close_vmeth(void *obj)
447 {
448 int ret;
449
450 ret = can_close_socket(obj);
451 if (ret < 0) {
452 NET_DBG("Cannot detach net_context %p (%d)", obj, ret);
453
454 errno = -ret;
455 ret = -1;
456 }
457
458 return ret;
459 }
460
can_sock_ioctl_vmeth(void * obj,unsigned int request,va_list args)461 static int can_sock_ioctl_vmeth(void *obj, unsigned int request, va_list args)
462 {
463 return sock_fd_op_vtable.fd_vtable.ioctl(obj, request, args);
464 }
465
466 /*
467 * TODO: A CAN socket can be bound to a network device using SO_BINDTODEVICE.
468 */
can_sock_bind_vmeth(void * obj,const struct sockaddr * addr,socklen_t addrlen)469 static int can_sock_bind_vmeth(void *obj, const struct sockaddr *addr,
470 socklen_t addrlen)
471 {
472 return zcan_bind_ctx(obj, addr, addrlen);
473 }
474
475 /* The connect() function is no longer necessary. */
can_sock_connect_vmeth(void * obj,const struct sockaddr * addr,socklen_t addrlen)476 static int can_sock_connect_vmeth(void *obj, const struct sockaddr *addr,
477 socklen_t addrlen)
478 {
479 return 0;
480 }
481
482 /*
483 * The listen() and accept() functions are without any functionality,
484 * since the client-Server-Semantic is no longer present.
485 * When we use RAW-sockets we are sending unconnected packets.
486 */
can_sock_listen_vmeth(void * obj,int backlog)487 static int can_sock_listen_vmeth(void *obj, int backlog)
488 {
489 return 0;
490 }
491
can_sock_accept_vmeth(void * obj,struct sockaddr * addr,socklen_t * addrlen)492 static int can_sock_accept_vmeth(void *obj, struct sockaddr *addr,
493 socklen_t *addrlen)
494 {
495 return 0;
496 }
497
can_sock_sendto_vmeth(void * obj,const void * buf,size_t len,int flags,const struct sockaddr * dest_addr,socklen_t addrlen)498 static ssize_t can_sock_sendto_vmeth(void *obj, const void *buf, size_t len,
499 int flags,
500 const struct sockaddr *dest_addr,
501 socklen_t addrlen)
502 {
503 return zcan_sendto_ctx(obj, buf, len, flags, dest_addr, addrlen);
504 }
505
can_sock_recvfrom_vmeth(void * obj,void * buf,size_t max_len,int flags,struct sockaddr * src_addr,socklen_t * addrlen)506 static ssize_t can_sock_recvfrom_vmeth(void *obj, void *buf, size_t max_len,
507 int flags, struct sockaddr *src_addr,
508 socklen_t *addrlen)
509 {
510 return zcan_recvfrom_ctx(obj, buf, max_len, flags,
511 src_addr, addrlen);
512 }
513
can_sock_getsockopt_vmeth(void * obj,int level,int optname,void * optval,socklen_t * optlen)514 static int can_sock_getsockopt_vmeth(void *obj, int level, int optname,
515 void *optval, socklen_t *optlen)
516 {
517 if (level == SOL_CAN_RAW) {
518 const struct canbus_api *api;
519 struct net_if *iface;
520 const struct device *dev;
521
522 if (optval == NULL) {
523 errno = EINVAL;
524 return -1;
525 }
526
527 iface = net_context_get_iface(obj);
528 dev = net_if_get_device(iface);
529 api = dev->api;
530
531 if (!api || !api->getsockopt) {
532 errno = ENOTSUP;
533 return -1;
534 }
535
536 return api->getsockopt(dev, obj, level, optname, optval,
537 optlen);
538 }
539
540 return zcan_getsockopt_ctx(obj, level, optname, optval, optlen);
541 }
542
can_register_receiver(struct net_if * iface,struct net_context * ctx,socketcan_id_t can_id,socketcan_id_t can_mask)543 static int can_register_receiver(struct net_if *iface, struct net_context *ctx,
544 socketcan_id_t can_id, socketcan_id_t can_mask)
545 {
546 int i;
547
548 NET_DBG("Max %zu receivers", ARRAY_SIZE(receivers));
549
550 for (i = 0; i < ARRAY_SIZE(receivers); i++) {
551 if (receivers[i].ctx != NULL) {
552 continue;
553 }
554
555 receivers[i].ctx = ctx;
556 receivers[i].iface = iface;
557 receivers[i].can_id = can_id;
558 receivers[i].can_mask = can_mask;
559
560 return i;
561 }
562
563 return -ENOENT;
564 }
565
can_unregister_receiver(struct net_if * iface,struct net_context * ctx,socketcan_id_t can_id,socketcan_id_t can_mask)566 static void can_unregister_receiver(struct net_if *iface,
567 struct net_context *ctx,
568 socketcan_id_t can_id, socketcan_id_t can_mask)
569 {
570 int i;
571
572 for (i = 0; i < ARRAY_SIZE(receivers); i++) {
573 if (receivers[i].ctx == ctx &&
574 receivers[i].iface == iface &&
575 receivers[i].can_id == can_id &&
576 receivers[i].can_mask == can_mask) {
577 receivers[i].ctx = NULL;
578 return;
579 }
580 }
581 }
582
can_register_filters(struct net_if * iface,struct net_context * ctx,const struct socketcan_filter * sfilters,int count)583 static int can_register_filters(struct net_if *iface, struct net_context *ctx,
584 const struct socketcan_filter *sfilters, int count)
585 {
586 int i, ret;
587
588 NET_DBG("Registering %d filters", count);
589
590 for (i = 0; i < count; i++) {
591 ret = can_register_receiver(iface, ctx, sfilters[i].can_id,
592 sfilters[i].can_mask);
593 if (ret < 0) {
594 goto revert;
595 }
596 }
597
598 return 0;
599
600 revert:
601 for (i = 0; i < count; i++) {
602 can_unregister_receiver(iface, ctx, sfilters[i].can_id,
603 sfilters[i].can_mask);
604 }
605
606 return ret;
607 }
608
can_unregister_filters(struct net_if * iface,struct net_context * ctx,const struct socketcan_filter * sfilters,int count)609 static void can_unregister_filters(struct net_if *iface,
610 struct net_context *ctx,
611 const struct socketcan_filter *sfilters,
612 int count)
613 {
614 int i;
615
616 NET_DBG("Unregistering %d filters", count);
617
618 for (i = 0; i < count; i++) {
619 can_unregister_receiver(iface, ctx, sfilters[i].can_id,
620 sfilters[i].can_mask);
621 }
622 }
623
can_sock_setsockopt_vmeth(void * obj,int level,int optname,const void * optval,socklen_t optlen)624 static int can_sock_setsockopt_vmeth(void *obj, int level, int optname,
625 const void *optval, socklen_t optlen)
626 {
627 const struct canbus_api *api;
628 struct net_if *iface;
629 const struct device *dev;
630 int ret;
631
632 if (level != SOL_CAN_RAW) {
633 return zcan_setsockopt_ctx(obj, level, optname, optval, optlen);
634 }
635
636 /* The application must use CAN_filter and then we convert
637 * it to zcan_filter as the CANBUS drivers expects that.
638 */
639 if (optname == CAN_RAW_FILTER && optlen != sizeof(struct socketcan_filter)) {
640 errno = EINVAL;
641 return -1;
642 }
643
644 if (optval == NULL) {
645 errno = EINVAL;
646 return -1;
647 }
648
649 iface = net_context_get_iface(obj);
650 dev = net_if_get_device(iface);
651 api = dev->api;
652
653 if (!api || !api->setsockopt) {
654 errno = ENOTSUP;
655 return -1;
656 }
657
658 if (optname == CAN_RAW_FILTER) {
659 int count, i;
660
661 if (optlen % sizeof(struct socketcan_filter) != 0) {
662 errno = EINVAL;
663 return -1;
664 }
665
666 count = optlen / sizeof(struct socketcan_filter);
667
668 ret = can_register_filters(iface, obj, optval, count);
669 if (ret < 0) {
670 errno = -ret;
671 return -1;
672 }
673
674 for (i = 0; i < count; i++) {
675 struct socketcan_filter *sfilter;
676 struct can_filter zfilter;
677 bool duplicate;
678
679 sfilter = &((struct socketcan_filter *)optval)[i];
680
681 /* If someone has already attached the same filter to
682 * same interface, we do not need to do it here again.
683 */
684 duplicate = is_already_attached(sfilter, iface, obj);
685 if (duplicate) {
686 continue;
687 }
688
689 socketcan_to_can_filter(sfilter, &zfilter);
690
691 ret = api->setsockopt(dev, obj, level, optname,
692 &zfilter, sizeof(zfilter));
693 if (ret < 0) {
694 break;
695 }
696 }
697
698 if (ret < 0) {
699 can_unregister_filters(iface, obj, optval, count);
700
701 errno = -ret;
702 return -1;
703 }
704
705 return 0;
706 }
707
708 return api->setsockopt(dev, obj, level, optname, optval, optlen);
709 }
710
711 static const struct socket_op_vtable can_sock_fd_op_vtable = {
712 .fd_vtable = {
713 .read = can_sock_read_vmeth,
714 .write = can_sock_write_vmeth,
715 .close = can_sock_close_vmeth,
716 .ioctl = can_sock_ioctl_vmeth,
717 },
718 .bind = can_sock_bind_vmeth,
719 .connect = can_sock_connect_vmeth,
720 .listen = can_sock_listen_vmeth,
721 .accept = can_sock_accept_vmeth,
722 .sendto = can_sock_sendto_vmeth,
723 .recvfrom = can_sock_recvfrom_vmeth,
724 .getsockopt = can_sock_getsockopt_vmeth,
725 .setsockopt = can_sock_setsockopt_vmeth,
726 };
727
can_is_supported(int family,int type,int proto)728 static bool can_is_supported(int family, int type, int proto)
729 {
730 if (type != SOCK_RAW || proto != CAN_RAW) {
731 return false;
732 }
733
734 return true;
735 }
736
737 NET_SOCKET_REGISTER(af_can, NET_SOCKET_DEFAULT_PRIO, AF_CAN, can_is_supported,
738 zcan_socket);
739