1 /*
2 * Copyright (c) 2019 Alexander Wachter
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include "isotp_internal.h"
8 #include <net/buf.h>
9 #include <kernel.h>
10 #include <init.h>
11 #include <sys/util.h>
12 #include <logging/log.h>
13 #include <timeout_q.h>
14
15 LOG_MODULE_REGISTER(isotp, CONFIG_ISOTP_LOG_LEVEL);
16
17 #ifdef CONFIG_ISOTP_ENABLE_CONTEXT_BUFFERS
18 K_MEM_SLAB_DEFINE(ctx_slab, sizeof(struct isotp_send_ctx),
19 CONFIG_ISOTP_TX_CONTEXT_BUF_COUNT, 4);
20 #endif
21
22 static void receive_pool_free(struct net_buf *buf);
23 static void receive_ff_sf_pool_free(struct net_buf *buf);
24
25 NET_BUF_POOL_DEFINE(isotp_rx_pool, CONFIG_ISOTP_RX_BUF_COUNT,
26 CONFIG_ISOTP_RX_BUF_SIZE, sizeof(uint32_t),
27 receive_pool_free);
28
29 NET_BUF_POOL_DEFINE(isotp_rx_sf_ff_pool, CONFIG_ISOTP_RX_SF_FF_BUF_COUNT,
30 ISOTP_CAN_DL, sizeof(uint32_t), receive_ff_sf_pool_free);
31
32 static struct isotp_global_ctx global_ctx = {
33 .alloc_list = SYS_SLIST_STATIC_INIT(&global_ctx.alloc_list),
34 .ff_sf_alloc_list = SYS_SLIST_STATIC_INIT(&global_ctx.ff_sf_alloc_list)
35 };
36
37 #ifdef CONFIG_ISOTP_USE_TX_BUF
38 NET_BUF_POOL_VAR_DEFINE(isotp_tx_pool, CONFIG_ISOTP_TX_BUF_COUNT,
39 CONFIG_ISOTP_BUF_TX_DATA_POOL_SIZE, NULL);
40 #endif
41
42 static void receive_state_machine(struct isotp_recv_ctx *ctx);
43
44 /*
45 * Wake every context that is waiting for a buffer
46 */
receive_pool_free(struct net_buf * buf)47 static void receive_pool_free(struct net_buf *buf)
48 {
49 struct isotp_recv_ctx *ctx;
50 sys_snode_t *ctx_node;
51
52 net_buf_destroy(buf);
53
54 SYS_SLIST_FOR_EACH_NODE(&global_ctx.alloc_list, ctx_node) {
55 ctx = CONTAINER_OF(ctx_node, struct isotp_recv_ctx, alloc_node);
56 k_work_submit(&ctx->work);
57 }
58 }
59
receive_ff_sf_pool_free(struct net_buf * buf)60 static void receive_ff_sf_pool_free(struct net_buf *buf)
61 {
62 struct isotp_recv_ctx *ctx;
63 sys_snode_t *ctx_node;
64
65 net_buf_destroy(buf);
66
67 SYS_SLIST_FOR_EACH_NODE(&global_ctx.ff_sf_alloc_list, ctx_node) {
68 ctx = CONTAINER_OF(ctx_node, struct isotp_recv_ctx, alloc_node);
69 k_work_submit(&ctx->work);
70 }
71 }
72
_k_fifo_wait_non_empty(struct k_fifo * fifo,k_timeout_t timeout)73 static inline int _k_fifo_wait_non_empty(struct k_fifo *fifo,
74 k_timeout_t timeout)
75 {
76 struct k_poll_event events[] = {
77 K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_FIFO_DATA_AVAILABLE,
78 K_POLL_MODE_NOTIFY_ONLY, fifo),
79 };
80
81 return k_poll(events, ARRAY_SIZE(events), timeout);
82 }
83
receive_report_error(struct isotp_recv_ctx * ctx,int err)84 static inline void receive_report_error(struct isotp_recv_ctx *ctx, int err)
85 {
86 ctx->state = ISOTP_RX_STATE_ERR;
87 ctx->error_nr = err;
88 }
89
receive_can_tx_isr(int error,void * arg)90 void receive_can_tx_isr(int error, void *arg)
91 {
92 struct isotp_recv_ctx *ctx = (struct isotp_recv_ctx *)arg;
93
94 if (error != 0) {
95 LOG_ERR("Error sending FC frame (%d)", error);
96 receive_report_error(ctx, ISOTP_N_ERROR);
97 k_work_submit(&ctx->work);
98 }
99 }
100
receive_get_ff_length(struct net_buf * buf)101 static inline uint32_t receive_get_ff_length(struct net_buf *buf)
102 {
103 uint32_t len;
104 uint8_t pci = net_buf_pull_u8(buf);
105
106 len = ((pci & ISOTP_PCI_FF_DL_UPPER_MASK) << 8) | net_buf_pull_u8(buf);
107
108 /* Jumbo packet (32 bit length)*/
109 if (!len) {
110 len = net_buf_pull_be32(buf);
111 }
112
113 return len;
114 }
115
receive_get_sf_length(struct net_buf * buf)116 static inline uint32_t receive_get_sf_length(struct net_buf *buf)
117 {
118 uint8_t len = net_buf_pull_u8(buf) & ISOTP_PCI_SF_DL_MASK;
119
120 /* Single frames > 16 bytes (CAN-FD only) */
121 if (IS_ENABLED(ISOTP_USE_CAN_FD) && !len) {
122 len = net_buf_pull_u8(buf);
123 }
124
125 return len;
126 }
127
receive_send_fc(struct isotp_recv_ctx * ctx,uint8_t fs)128 static void receive_send_fc(struct isotp_recv_ctx *ctx, uint8_t fs)
129 {
130 struct zcan_frame frame = {
131 .id_type = ctx->tx_addr.id_type,
132 .rtr = CAN_DATAFRAME,
133 .id = ctx->tx_addr.ext_id
134 };
135 uint8_t *data = frame.data;
136 uint8_t payload_len;
137 int ret;
138
139 __ASSERT_NO_MSG(!(fs & ISOTP_PCI_TYPE_MASK));
140
141 if (ctx->tx_addr.use_ext_addr) {
142 *data++ = ctx->tx_addr.ext_addr;
143 }
144
145 *data++ = ISOTP_PCI_TYPE_FC | fs;
146 *data++ = ctx->opts.bs;
147 *data++ = ctx->opts.stmin;
148 payload_len = data - frame.data;
149
150 #if defined(CONFIG_ISOTP_REQUIRE_RX_PADDING) || \
151 defined(CONFIG_ISOTP_ENABLE_TX_PADDING)
152 /* AUTOSAR requirement SWS_CanTp_00347 */
153 memset(&frame.data[payload_len], 0xCC, ISOTP_CAN_DL - payload_len);
154 frame.dlc = ISOTP_CAN_DL;
155 #else
156 frame.dlc = payload_len;
157 #endif
158
159 ret = can_send(ctx->can_dev, &frame, K_MSEC(ISOTP_A),
160 receive_can_tx_isr, ctx);
161 if (ret) {
162 LOG_ERR("Can't send FC, (%d)", ret);
163 receive_report_error(ctx, ISOTP_N_TIMEOUT_A);
164 receive_state_machine(ctx);
165 }
166 }
167
receive_alloc_buffer_chain(uint32_t len)168 static inline struct net_buf *receive_alloc_buffer_chain(uint32_t len)
169 {
170 struct net_buf *buf, *frag, *last;
171 uint32_t remaining_len;
172
173 LOG_DBG("Allocate %d bytes ", len);
174 buf = net_buf_alloc_fixed(&isotp_rx_pool, K_NO_WAIT);
175 if (!buf) {
176 return NULL;
177 }
178
179 if (len <= CONFIG_ISOTP_RX_BUF_SIZE) {
180 return buf;
181 }
182
183 remaining_len = len - CONFIG_ISOTP_RX_BUF_SIZE;
184 last = buf;
185 while (remaining_len) {
186 frag = net_buf_alloc_fixed(&isotp_rx_pool, K_NO_WAIT);
187 if (!frag) {
188 net_buf_unref(buf);
189 return NULL;
190 }
191
192 net_buf_frag_insert(last, frag);
193 last = frag;
194 remaining_len = remaining_len > CONFIG_ISOTP_RX_BUF_SIZE ?
195 remaining_len - CONFIG_ISOTP_RX_BUF_SIZE : 0;
196 }
197
198 return buf;
199 }
200
receive_timeout_handler(struct _timeout * to)201 static void receive_timeout_handler(struct _timeout *to)
202 {
203 struct isotp_recv_ctx *ctx = CONTAINER_OF(to, struct isotp_recv_ctx,
204 timeout);
205
206 switch (ctx->state) {
207 case ISOTP_RX_STATE_WAIT_CF:
208 LOG_ERR("Timeout while waiting for CF");
209 receive_report_error(ctx, ISOTP_N_TIMEOUT_CR);
210 break;
211
212 case ISOTP_RX_STATE_TRY_ALLOC:
213 ctx->state = ISOTP_RX_STATE_SEND_WAIT;
214 break;
215 }
216
217 k_work_submit(&ctx->work);
218 }
219
receive_alloc_buffer(struct isotp_recv_ctx * ctx)220 static int receive_alloc_buffer(struct isotp_recv_ctx *ctx)
221 {
222 struct net_buf *buf = NULL;
223
224 if (ctx->opts.bs == 0) {
225 /* Alloc all buffers because we can't wait during reception */
226 buf = receive_alloc_buffer_chain(ctx->length);
227 } else {
228 buf = receive_alloc_buffer_chain(ctx->opts.bs *
229 (ISOTP_CAN_DL - 1));
230 }
231
232 if (!buf) {
233 z_add_timeout(&ctx->timeout, receive_timeout_handler,
234 K_MSEC(ISOTP_ALLOC_TIMEOUT));
235
236 if (ctx->wft == ISOTP_WFT_FIRST) {
237 LOG_DBG("Allocation failed. Append to alloc list");
238 ctx->wft = 0;
239 sys_slist_append(&global_ctx.alloc_list,
240 &ctx->alloc_node);
241 } else {
242 LOG_DBG("Allocation failed. Send WAIT frame");
243 ctx->state = ISOTP_RX_STATE_SEND_WAIT;
244 receive_state_machine(ctx);
245 }
246
247 return -1;
248 }
249
250 if (ctx->state == ISOTP_RX_STATE_TRY_ALLOC) {
251 z_abort_timeout(&ctx->timeout);
252 ctx->wft = ISOTP_WFT_FIRST;
253 sys_slist_find_and_remove(&global_ctx.alloc_list,
254 &ctx->alloc_node);
255 }
256
257 if (ctx->opts.bs != 0) {
258 ctx->buf = buf;
259 } else {
260 net_buf_frag_insert(ctx->buf, buf);
261 }
262
263 ctx->act_frag = buf;
264 return 0;
265 }
266
receive_state_machine(struct isotp_recv_ctx * ctx)267 static void receive_state_machine(struct isotp_recv_ctx *ctx)
268 {
269 int ret;
270 uint32_t *ud_rem_len;
271
272 switch (ctx->state) {
273 case ISOTP_RX_STATE_PROCESS_SF:
274 ctx->length = receive_get_sf_length(ctx->buf);
275 ud_rem_len = net_buf_user_data(ctx->buf);
276 *ud_rem_len = 0;
277 LOG_DBG("SM process SF of length %d", ctx->length);
278 net_buf_put(&ctx->fifo, ctx->buf);
279 ctx->state = ISOTP_RX_STATE_RECYCLE;
280 receive_state_machine(ctx);
281 break;
282
283 case ISOTP_RX_STATE_PROCESS_FF:
284 ctx->length = receive_get_ff_length(ctx->buf);
285 LOG_DBG("SM process FF. Length: %d", ctx->length);
286 ctx->length -= ctx->buf->len;
287 if (ctx->opts.bs == 0 &&
288 ctx->length > CONFIG_ISOTP_RX_BUF_COUNT *
289 CONFIG_ISOTP_RX_BUF_SIZE) {
290 LOG_ERR("Pkt length is %d but buffer has only %d bytes",
291 ctx->length,
292 CONFIG_ISOTP_RX_BUF_COUNT *
293 CONFIG_ISOTP_RX_BUF_SIZE);
294 receive_report_error(ctx, ISOTP_N_BUFFER_OVERFLW);
295 receive_state_machine(ctx);
296 break;
297 }
298
299 if (ctx->opts.bs) {
300 ctx->bs = ctx->opts.bs;
301 ud_rem_len = net_buf_user_data(ctx->buf);
302 *ud_rem_len = ctx->length;
303 net_buf_put(&ctx->fifo, ctx->buf);
304 }
305
306 ctx->wft = ISOTP_WFT_FIRST;
307 ctx->state = ISOTP_RX_STATE_TRY_ALLOC;
308 __fallthrough;
309 case ISOTP_RX_STATE_TRY_ALLOC:
310 LOG_DBG("SM try to allocate");
311 z_abort_timeout(&ctx->timeout);
312 ret = receive_alloc_buffer(ctx);
313 if (ret) {
314 LOG_DBG("SM allocation failed. Wait for free buffer");
315 break;
316 }
317
318 ctx->state = ISOTP_RX_STATE_SEND_FC;
319 __fallthrough;
320 case ISOTP_RX_STATE_SEND_FC:
321 LOG_DBG("SM send CTS FC frame");
322 receive_send_fc(ctx, ISOTP_PCI_FS_CTS);
323 z_add_timeout(&ctx->timeout, receive_timeout_handler,
324 K_MSEC(ISOTP_CR));
325 ctx->state = ISOTP_RX_STATE_WAIT_CF;
326 break;
327
328 case ISOTP_RX_STATE_SEND_WAIT:
329 if (++ctx->wft < CONFIG_ISOTP_WFTMAX) {
330 LOG_DBG("Send wait frame number %d", ctx->wft);
331 receive_send_fc(ctx, ISOTP_PCI_FS_WAIT);
332 z_add_timeout(&ctx->timeout, receive_timeout_handler,
333 K_MSEC(ISOTP_ALLOC_TIMEOUT));
334 ctx->state = ISOTP_RX_STATE_TRY_ALLOC;
335 break;
336 }
337
338 sys_slist_find_and_remove(&global_ctx.alloc_list,
339 &ctx->alloc_node);
340 LOG_ERR("Sent %d wait frames. Giving up to alloc now",
341 ctx->wft);
342 receive_report_error(ctx, ISOTP_N_BUFFER_OVERFLW);
343 __fallthrough;
344 case ISOTP_RX_STATE_ERR:
345 LOG_DBG("SM ERR state. err nr: %d", ctx->error_nr);
346 z_abort_timeout(&ctx->timeout);
347
348 if (ctx->error_nr == ISOTP_N_BUFFER_OVERFLW) {
349 receive_send_fc(ctx, ISOTP_PCI_FS_OVFLW);
350 }
351
352 k_fifo_cancel_wait(&ctx->fifo);
353 net_buf_unref(ctx->buf);
354 ctx->buf = NULL;
355 ctx->state = ISOTP_RX_STATE_RECYCLE;
356 __fallthrough;
357 case ISOTP_RX_STATE_RECYCLE:
358 LOG_DBG("SM recycle context for next message");
359 ctx->buf = net_buf_alloc_fixed(&isotp_rx_sf_ff_pool, K_NO_WAIT);
360 if (!ctx->buf) {
361 LOG_DBG("No free context. Append to waiters list");
362 sys_slist_append(&global_ctx.ff_sf_alloc_list,
363 &ctx->alloc_node);
364 break;
365 }
366
367 sys_slist_find_and_remove(&global_ctx.ff_sf_alloc_list,
368 &ctx->alloc_node);
369 ctx->state = ISOTP_RX_STATE_WAIT_FF_SF;
370 __fallthrough;
371 case ISOTP_RX_STATE_UNBOUND:
372 break;
373
374 default:
375 break;
376 }
377 }
378
receive_work_handler(struct k_work * item)379 static void receive_work_handler(struct k_work *item)
380 {
381 struct isotp_recv_ctx *ctx = CONTAINER_OF(item, struct isotp_recv_ctx,
382 work);
383
384 receive_state_machine(ctx);
385 }
386
process_ff_sf(struct isotp_recv_ctx * ctx,struct zcan_frame * frame)387 static void process_ff_sf(struct isotp_recv_ctx *ctx, struct zcan_frame *frame)
388 {
389 int index = 0;
390 uint8_t payload_len;
391 uint32_t rx_sa; /* ISO-TP fixed source address (if used) */
392
393 if (ctx->rx_addr.use_ext_addr) {
394 if (frame->data[index++] != ctx->rx_addr.ext_addr) {
395 return;
396 }
397 }
398
399 if (ctx->rx_addr.use_fixed_addr) {
400 /* store actual CAN ID used by the sender */
401 ctx->rx_addr.ext_id = frame->id;
402 /* replace TX target address with RX source address */
403 rx_sa = (frame->id & ISOTP_FIXED_ADDR_SA_MASK) >>
404 ISOTP_FIXED_ADDR_SA_POS;
405 ctx->tx_addr.ext_id &= ~(ISOTP_FIXED_ADDR_TA_MASK);
406 ctx->tx_addr.ext_id |= rx_sa << ISOTP_FIXED_ADDR_TA_POS;
407 /* use same priority for TX as in received message */
408 ctx->tx_addr.ext_id &= ~(ISOTP_FIXED_ADDR_PRIO_MASK);
409 ctx->tx_addr.ext_id |= frame->id & ISOTP_FIXED_ADDR_PRIO_MASK;
410 }
411
412 switch (frame->data[index] & ISOTP_PCI_TYPE_MASK) {
413 case ISOTP_PCI_TYPE_FF:
414 LOG_DBG("Got FF IRQ");
415 if (frame->dlc != ISOTP_CAN_DL) {
416 LOG_INF("FF DLC invalid. Ignore");
417 return;
418 }
419
420 payload_len = ISOTP_CAN_DL;
421 ctx->state = ISOTP_RX_STATE_PROCESS_FF;
422 ctx->sn_expected = 1;
423 break;
424
425 case ISOTP_PCI_TYPE_SF:
426 LOG_DBG("Got SF IRQ");
427 #ifdef CONFIG_ISOTP_REQUIRE_RX_PADDING
428 /* AUTOSAR requirement SWS_CanTp_00345 */
429 if (frame->dlc != ISOTP_CAN_DL) {
430 LOG_INF("SF DLC invalid. Ignore");
431 return;
432 }
433 #endif
434
435 payload_len = index + 1 + (frame->data[index] &
436 ISOTP_PCI_SF_DL_MASK);
437
438 if (payload_len > frame->dlc) {
439 LOG_INF("SF DL does not fit. Ignore");
440 return;
441 }
442
443 ctx->state = ISOTP_RX_STATE_PROCESS_SF;
444 break;
445
446 default:
447 LOG_INF("Got unexpected frame. Ignore");
448 return;
449 }
450
451 net_buf_add_mem(ctx->buf, &frame->data[index], payload_len - index);
452 }
453
receive_add_mem(struct isotp_recv_ctx * ctx,uint8_t * data,size_t len)454 static inline void receive_add_mem(struct isotp_recv_ctx *ctx, uint8_t *data,
455 size_t len)
456 {
457 size_t tailroom = net_buf_tailroom(ctx->act_frag);
458
459 if (tailroom >= len) {
460 net_buf_add_mem(ctx->act_frag, data, len);
461 return;
462 }
463
464 /* Use next fragment that is already allocated*/
465 net_buf_add_mem(ctx->act_frag, data, tailroom);
466 ctx->act_frag = ctx->act_frag->frags;
467 if (!ctx->act_frag) {
468 LOG_ERR("No fragment left to append data");
469 receive_report_error(ctx, ISOTP_N_BUFFER_OVERFLW);
470 return;
471 }
472
473 net_buf_add_mem(ctx->act_frag, data + tailroom, len - tailroom);
474 }
475
process_cf(struct isotp_recv_ctx * ctx,struct zcan_frame * frame)476 static void process_cf(struct isotp_recv_ctx *ctx, struct zcan_frame *frame)
477 {
478 uint32_t *ud_rem_len = (uint32_t *)net_buf_user_data(ctx->buf);
479 int index = 0;
480 uint32_t data_len;
481
482 if (ctx->rx_addr.use_ext_addr) {
483 if (frame->data[index++] != ctx->rx_addr.ext_addr) {
484 return;
485 }
486 }
487
488 if ((frame->data[index] & ISOTP_PCI_TYPE_MASK) != ISOTP_PCI_TYPE_CF) {
489 LOG_DBG("Waiting for CF but got something else (%d)",
490 frame->data[index] >> ISOTP_PCI_TYPE_POS);
491 receive_report_error(ctx, ISOTP_N_UNEXP_PDU);
492 k_work_submit(&ctx->work);
493 return;
494 }
495
496 z_abort_timeout(&ctx->timeout);
497 z_add_timeout(&ctx->timeout, receive_timeout_handler,
498 K_MSEC(ISOTP_CR));
499
500 if ((frame->data[index++] & ISOTP_PCI_SN_MASK) != ctx->sn_expected++) {
501 LOG_ERR("Sequence number missmatch");
502 receive_report_error(ctx, ISOTP_N_WRONG_SN);
503 k_work_submit(&ctx->work);
504 return;
505 }
506
507 #ifdef CONFIG_ISOTP_REQUIRE_RX_PADDING
508 /* AUTOSAR requirement SWS_CanTp_00346 */
509 if (frame->dlc != ISOTP_CAN_DL) {
510 LOG_ERR("CF DL invalid");
511 receive_report_error(ctx, ISOTP_N_ERROR);
512 return;
513 }
514 #endif
515
516 LOG_DBG("Got CF irq. Appending data");
517 data_len = (ctx->length > frame->dlc - index) ? frame->dlc - index :
518 ctx->length;
519 receive_add_mem(ctx, &frame->data[index], data_len);
520 ctx->length -= data_len;
521 LOG_DBG("%d bytes remaining", ctx->length);
522
523 if (ctx->length == 0) {
524 ctx->state = ISOTP_RX_STATE_RECYCLE;
525 *ud_rem_len = 0;
526 net_buf_put(&ctx->fifo, ctx->buf);
527 return;
528 }
529
530 if (ctx->opts.bs && !--ctx->bs) {
531 LOG_DBG("Block is complete. Allocate new buffer");
532 ctx->bs = ctx->opts.bs;
533 *ud_rem_len = ctx->length;
534 net_buf_put(&ctx->fifo, ctx->buf);
535 ctx->state = ISOTP_RX_STATE_TRY_ALLOC;
536 }
537 }
538
receive_can_rx_isr(struct zcan_frame * frame,void * arg)539 static void receive_can_rx_isr(struct zcan_frame *frame, void *arg)
540 {
541 struct isotp_recv_ctx *ctx = (struct isotp_recv_ctx *)arg;
542
543 switch (ctx->state) {
544 case ISOTP_RX_STATE_WAIT_FF_SF:
545 __ASSERT_NO_MSG(ctx->buf);
546 process_ff_sf(ctx, frame);
547 break;
548
549 case ISOTP_RX_STATE_WAIT_CF:
550 process_cf(ctx, frame);
551 /* still waiting for more CF */
552 if (ctx->state == ISOTP_RX_STATE_WAIT_CF) {
553 return;
554 }
555
556 break;
557
558 case ISOTP_RX_STATE_RECYCLE:
559 LOG_ERR("Got a frame but was not yet ready for a new one");
560 receive_report_error(ctx, ISOTP_N_BUFFER_OVERFLW);
561 break;
562
563 default:
564 LOG_INF("Got a frame in a state where it is unexpected.");
565 }
566
567 k_work_submit(&ctx->work);
568 }
569
attach_ff_filter(struct isotp_recv_ctx * ctx)570 static inline int attach_ff_filter(struct isotp_recv_ctx *ctx)
571 {
572 uint32_t mask;
573
574 if (ctx->rx_addr.use_fixed_addr) {
575 mask = ISOTP_FIXED_ADDR_RX_MASK;
576 } else {
577 mask = CAN_EXT_ID_MASK;
578 }
579
580 struct zcan_filter filter = {
581 .id_type = ctx->rx_addr.id_type,
582 .rtr = CAN_DATAFRAME,
583 .id = ctx->rx_addr.ext_id,
584 .rtr_mask = 1,
585 .id_mask = mask
586 };
587
588 ctx->filter_id = can_attach_isr(ctx->can_dev, receive_can_rx_isr, ctx,
589 &filter);
590 if (ctx->filter_id < 0) {
591 LOG_ERR("Error attaching FF filter [%d]", ctx->filter_id);
592 return ISOTP_NO_FREE_FILTER;
593 }
594
595 return 0;
596 }
597
isotp_bind(struct isotp_recv_ctx * ctx,const struct device * can_dev,const struct isotp_msg_id * rx_addr,const struct isotp_msg_id * tx_addr,const struct isotp_fc_opts * opts,k_timeout_t timeout)598 int isotp_bind(struct isotp_recv_ctx *ctx, const struct device *can_dev,
599 const struct isotp_msg_id *rx_addr,
600 const struct isotp_msg_id *tx_addr,
601 const struct isotp_fc_opts *opts,
602 k_timeout_t timeout)
603 {
604 int ret;
605
606 __ASSERT(ctx, "ctx is NULL");
607 __ASSERT(can_dev, "CAN device is NULL");
608 __ASSERT(rx_addr && tx_addr, "RX or TX addr is NULL");
609 __ASSERT(opts, "OPTS is NULL");
610
611 ctx->can_dev = can_dev;
612 ctx->rx_addr = *rx_addr;
613 ctx->tx_addr = *tx_addr;
614 k_fifo_init(&ctx->fifo);
615
616 __ASSERT(opts->stmin < ISOTP_STMIN_MAX, "STmin limit");
617 __ASSERT(opts->stmin <= ISOTP_STMIN_MS_MAX ||
618 opts->stmin >= ISOTP_STMIN_US_BEGIN, "STmin reserved");
619
620 ctx->opts = *opts;
621 ctx->state = ISOTP_RX_STATE_WAIT_FF_SF;
622
623 LOG_DBG("Binding to addr: 0x%x. Responding on 0x%x",
624 ctx->rx_addr.ext_id, ctx->tx_addr.ext_id);
625
626 ctx->buf = net_buf_alloc_fixed(&isotp_rx_sf_ff_pool, timeout);
627 if (!ctx->buf) {
628 LOG_ERR("No buffer for FF left");
629 return ISOTP_NO_NET_BUF_LEFT;
630 }
631
632 ret = attach_ff_filter(ctx);
633 if (ret) {
634 LOG_ERR("Can't attach filter for binding");
635 net_buf_unref(ctx->buf);
636 ctx->buf = NULL;
637 return ret;
638 }
639
640 k_work_init(&ctx->work, receive_work_handler);
641 z_init_timeout(&ctx->timeout);
642
643 return ISOTP_N_OK;
644 }
645
isotp_unbind(struct isotp_recv_ctx * ctx)646 void isotp_unbind(struct isotp_recv_ctx *ctx)
647 {
648 struct net_buf *buf;
649
650 if (ctx->filter_id >= 0 && ctx->can_dev) {
651 can_detach(ctx->can_dev, ctx->filter_id);
652 }
653
654 z_abort_timeout(&ctx->timeout);
655
656 sys_slist_find_and_remove(&global_ctx.ff_sf_alloc_list,
657 &ctx->alloc_node);
658 sys_slist_find_and_remove(&global_ctx.alloc_list,
659 &ctx->alloc_node);
660
661 ctx->state = ISOTP_RX_STATE_UNBOUND;
662
663 while ((buf = net_buf_get(&ctx->fifo, K_NO_WAIT))) {
664 net_buf_unref(buf);
665 }
666
667 k_fifo_cancel_wait(&ctx->fifo);
668
669 if (ctx->buf) {
670 net_buf_unref(ctx->buf);
671 }
672
673 LOG_DBG("Unbound");
674 }
675
isotp_recv_net(struct isotp_recv_ctx * ctx,struct net_buf ** buffer,k_timeout_t timeout)676 int isotp_recv_net(struct isotp_recv_ctx *ctx, struct net_buf **buffer,
677 k_timeout_t timeout)
678 {
679 struct net_buf *buf;
680 int ret;
681
682 buf = net_buf_get(&ctx->fifo, timeout);
683 if (!buf) {
684 ret = ctx->error_nr ? ctx->error_nr : ISOTP_RECV_TIMEOUT;
685 ctx->error_nr = 0;
686
687 return ret;
688 }
689
690 *buffer = buf;
691
692 return *(uint32_t *)net_buf_user_data(buf);
693 }
694
pull_frags(struct k_fifo * fifo,struct net_buf * buf,size_t len)695 static inline void pull_frags(struct k_fifo *fifo, struct net_buf *buf,
696 size_t len)
697 {
698 size_t rem_len = len;
699 struct net_buf *frag = buf;
700
701 /* frags to be removed */
702 while (frag && (frag->len <= rem_len)) {
703 rem_len -= frag->len;
704 frag = frag->frags;
705 k_fifo_get(fifo, K_NO_WAIT);
706 }
707
708 if (frag) {
709 /* Start of frags to be preserved */
710 net_buf_ref(frag);
711 net_buf_pull(frag, rem_len);
712 }
713
714 net_buf_unref(buf);
715 }
716
isotp_recv(struct isotp_recv_ctx * ctx,uint8_t * data,size_t len,k_timeout_t timeout)717 int isotp_recv(struct isotp_recv_ctx *ctx, uint8_t *data, size_t len,
718 k_timeout_t timeout)
719 {
720 size_t num_copied, frags_len;
721 struct net_buf *buf;
722 int ret;
723
724 ret = _k_fifo_wait_non_empty(&ctx->fifo, timeout);
725 if (ret) {
726 if (ctx->error_nr) {
727 ret = ctx->error_nr;
728 ctx->error_nr = 0;
729 return ret;
730 }
731
732 if (ret == -EAGAIN) {
733 return ISOTP_RECV_TIMEOUT;
734 }
735
736 return ISOTP_N_ERROR;
737 }
738
739 buf = k_fifo_peek_head(&ctx->fifo);
740
741 if (!buf) {
742 return ISOTP_N_ERROR;
743 }
744
745 frags_len = net_buf_frags_len(buf);
746 num_copied = net_buf_linearize(data, len, buf, 0, len);
747
748 pull_frags(&ctx->fifo, buf, num_copied);
749
750 return num_copied;
751 }
752
send_report_error(struct isotp_send_ctx * ctx,uint32_t err)753 static inline void send_report_error(struct isotp_send_ctx *ctx, uint32_t err)
754 {
755 ctx->state = ISOTP_TX_ERR;
756 ctx->error_nr = err;
757 }
758
send_can_tx_isr(int error,void * arg)759 static void send_can_tx_isr(int error, void *arg)
760 {
761 struct isotp_send_ctx *ctx = (struct isotp_send_ctx *)arg;
762
763 ctx->tx_backlog--;
764
765 if (ctx->state == ISOTP_TX_WAIT_BACKLOG) {
766 if (ctx->tx_backlog > 0) {
767 return;
768 }
769
770 ctx->state = ISOTP_TX_WAIT_FIN;
771 }
772
773 k_work_submit(&ctx->work);
774 }
775
send_timeout_handler(struct _timeout * to)776 static void send_timeout_handler(struct _timeout *to)
777 {
778 struct isotp_send_ctx *ctx = CONTAINER_OF(to, struct isotp_send_ctx,
779 timeout);
780
781 if (ctx->state != ISOTP_TX_SEND_CF) {
782 send_report_error(ctx, ISOTP_N_TIMEOUT_BS);
783 LOG_ERR("Reception of next FC has timed out");
784 }
785
786 k_work_submit(&ctx->work);
787 }
788
send_process_fc(struct isotp_send_ctx * ctx,struct zcan_frame * frame)789 static void send_process_fc(struct isotp_send_ctx *ctx,
790 struct zcan_frame *frame)
791 {
792 uint8_t *data = frame->data;
793
794 if (ctx->rx_addr.use_ext_addr) {
795 if (ctx->rx_addr.ext_addr != *data++) {
796 return;
797 }
798 }
799
800 if ((*data & ISOTP_PCI_TYPE_MASK) != ISOTP_PCI_TYPE_FC) {
801 LOG_ERR("Got unexpected PDU expected FC");
802 send_report_error(ctx, ISOTP_N_UNEXP_PDU);
803 return;
804 }
805
806 #ifdef CONFIG_ISOTP_ENABLE_TX_PADDING
807 /* AUTOSAR requirement SWS_CanTp_00349 */
808 if (frame->dlc != ISOTP_CAN_DL) {
809 LOG_ERR("FC DL invalid. Ignore");
810 send_report_error(ctx, ISOTP_N_ERROR);
811 return;
812 }
813 #endif
814
815 switch (*data++ & ISOTP_PCI_FS_MASK) {
816 case ISOTP_PCI_FS_CTS:
817 ctx->state = ISOTP_TX_SEND_CF;
818 ctx->wft = 0;
819 ctx->tx_backlog = 0;
820 ctx->opts.bs = *data++;
821 ctx->opts.stmin = *data++;
822 ctx->bs = ctx->opts.bs;
823 LOG_DBG("Got CTS. BS: %d, STmin: %d", ctx->opts.bs,
824 ctx->opts.stmin);
825 break;
826
827 case ISOTP_PCI_FS_WAIT:
828 LOG_DBG("Got WAIT frame");
829 z_abort_timeout(&ctx->timeout);
830 z_add_timeout(&ctx->timeout, send_timeout_handler,
831 K_MSEC(ISOTP_BS));
832 if (ctx->wft >= CONFIG_ISOTP_WFTMAX) {
833 LOG_INF("Got to many wait frames");
834 send_report_error(ctx, ISOTP_N_WFT_OVRN);
835 }
836
837 ctx->wft++;
838 break;
839
840 case ISOTP_PCI_FS_OVFLW:
841 LOG_ERR("Got overflow FC frame");
842 send_report_error(ctx, ISOTP_N_BUFFER_OVERFLW);
843 break;
844
845 default:
846 send_report_error(ctx, ISOTP_N_INVALID_FS);
847 }
848 }
849
send_can_rx_isr(struct zcan_frame * frame,void * arg)850 static void send_can_rx_isr(struct zcan_frame *frame, void *arg)
851 {
852 struct isotp_send_ctx *ctx = (struct isotp_send_ctx *)arg;
853
854 if (ctx->state == ISOTP_TX_WAIT_FC) {
855 z_abort_timeout(&ctx->timeout);
856 send_process_fc(ctx, frame);
857 } else {
858 LOG_ERR("Got unexpected PDU");
859 send_report_error(ctx, ISOTP_N_UNEXP_PDU);
860 }
861
862 k_work_submit(&ctx->work);
863 }
864
get_ctx_data_length(struct isotp_send_ctx * ctx)865 static size_t get_ctx_data_length(struct isotp_send_ctx *ctx)
866 {
867 return ctx->is_net_buf ? net_buf_frags_len(ctx->buf) : ctx->len;
868 }
869
get_data_ctx(struct isotp_send_ctx * ctx)870 static const uint8_t *get_data_ctx(struct isotp_send_ctx *ctx)
871 {
872 if (ctx->is_net_buf) {
873 return ctx->buf->data;
874 } else {
875 return ctx->data;
876 }
877 }
878
pull_data_ctx(struct isotp_send_ctx * ctx,size_t len)879 static void pull_data_ctx(struct isotp_send_ctx *ctx, size_t len)
880 {
881 if (ctx->is_net_buf) {
882 net_buf_pull_mem(ctx->buf, len);
883 } else {
884 ctx->data += len;
885 ctx->len -= len;
886 }
887 }
888
send_sf(struct isotp_send_ctx * ctx)889 static inline int send_sf(struct isotp_send_ctx *ctx)
890 {
891 struct zcan_frame frame = {
892 .id_type = ctx->tx_addr.id_type,
893 .rtr = CAN_DATAFRAME,
894 .id = ctx->tx_addr.ext_id
895 };
896 size_t len = get_ctx_data_length(ctx);
897 int index = 0;
898 int ret;
899 const uint8_t *data;
900
901 data = get_data_ctx(ctx);
902 pull_data_ctx(ctx, len);
903
904 if (ctx->tx_addr.use_ext_addr) {
905 frame.data[index++] = ctx->tx_addr.ext_addr;
906 }
907
908 frame.data[index++] = ISOTP_PCI_TYPE_SF | len;
909
910 if (len > ISOTP_CAN_DL - index) {
911 LOG_ERR("SF len does not fit DL");
912 return -ENOSPC;
913 }
914
915 memcpy(&frame.data[index], data, len);
916
917 #ifdef CONFIG_ISOTP_ENABLE_TX_PADDING
918 /* AUTOSAR requirement SWS_CanTp_00348 */
919 memset(&frame.data[index + len], 0xCC, ISOTP_CAN_DL - len - index);
920 frame.dlc = ISOTP_CAN_DL;
921 #else
922 frame.dlc = len + index;
923 #endif
924
925 ctx->state = ISOTP_TX_SEND_SF;
926 ret = can_send(ctx->can_dev, &frame, K_MSEC(ISOTP_A),
927 send_can_tx_isr, ctx);
928 return ret;
929 }
930
send_ff(struct isotp_send_ctx * ctx)931 static inline int send_ff(struct isotp_send_ctx *ctx)
932 {
933 struct zcan_frame frame = {
934 .id_type = ctx->tx_addr.id_type,
935 .rtr = CAN_DATAFRAME,
936 .id = ctx->tx_addr.ext_id,
937 .dlc = ISOTP_CAN_DL
938 };
939 int index = 0;
940 size_t len = get_ctx_data_length(ctx);
941 int ret;
942 const uint8_t *data;
943
944 if (ctx->tx_addr.use_ext_addr) {
945 frame.data[index++] = ctx->tx_addr.ext_addr;
946 }
947
948 if (len > 0xFFF) {
949 frame.data[index++] = ISOTP_PCI_TYPE_FF;
950 frame.data[index++] = 0;
951 frame.data[index++] = (len >> 3 * 8) & 0xFF;
952 frame.data[index++] = (len >> 2 * 8) & 0xFF;
953 frame.data[index++] = (len >> 8) & 0xFF;
954 frame.data[index++] = len & 0xFF;
955 } else {
956 frame.data[index++] = ISOTP_PCI_TYPE_FF | (len >> 8);
957 frame.data[index++] = len & 0xFF;
958 }
959
960 /* According to ISO FF has sn 0 and is incremented to one
961 * alltough it's not part of the FF frame
962 */
963 ctx->sn = 1;
964 data = get_data_ctx(ctx);
965 pull_data_ctx(ctx, ISOTP_CAN_DL - index);
966 memcpy(&frame.data[index], data, ISOTP_CAN_DL - index);
967
968 ret = can_send(ctx->can_dev, &frame, K_MSEC(ISOTP_A),
969 send_can_tx_isr, ctx);
970 return ret;
971 }
972
send_cf(struct isotp_send_ctx * ctx)973 static inline int send_cf(struct isotp_send_ctx *ctx)
974 {
975 struct zcan_frame frame = {
976 .id_type = ctx->tx_addr.id_type,
977 .rtr = CAN_DATAFRAME,
978 .id = ctx->tx_addr.ext_id,
979 };
980 int index = 0;
981 int ret;
982 int len;
983 int rem_len;
984 const uint8_t *data;
985
986 if (ctx->tx_addr.use_ext_addr) {
987 frame.data[index++] = ctx->tx_addr.ext_addr;
988 }
989
990 /*sn wraps around at 0xF automatically because it has a 4 bit size*/
991 frame.data[index++] = ISOTP_PCI_TYPE_CF | ctx->sn;
992
993 rem_len = get_ctx_data_length(ctx);
994 len = MIN(rem_len, ISOTP_CAN_DL - index);
995 rem_len -= len;
996 data = get_data_ctx(ctx);
997 memcpy(&frame.data[index], data, len);
998
999 #ifdef CONFIG_ISOTP_ENABLE_TX_PADDING
1000 /* AUTOSAR requirement SWS_CanTp_00348 */
1001 memset(&frame.data[index + len], 0xCC, ISOTP_CAN_DL - len - index);
1002 frame.dlc = ISOTP_CAN_DL;
1003 #else
1004 frame.dlc = len + index;
1005 #endif
1006
1007 ret = can_send(ctx->can_dev, &frame, K_MSEC(ISOTP_A),
1008 send_can_tx_isr, ctx);
1009 if (ret == CAN_TX_OK) {
1010 ctx->sn++;
1011 pull_data_ctx(ctx, len);
1012 ctx->bs--;
1013 ctx->tx_backlog++;
1014 }
1015
1016 ret = ret ? ret : rem_len;
1017 return ret;
1018 }
1019
1020 #ifdef CONFIG_ISOTP_ENABLE_CONTEXT_BUFFERS
free_send_ctx(struct isotp_send_ctx ** ctx)1021 static inline void free_send_ctx(struct isotp_send_ctx **ctx)
1022 {
1023 if ((*ctx)->is_net_buf) {
1024 net_buf_unref((*ctx)->buf);
1025 (*ctx)->buf = NULL;
1026 }
1027
1028 if ((*ctx)->is_ctx_slab) {
1029 k_mem_slab_free(&ctx_slab, (void **)ctx);
1030 }
1031 }
1032
alloc_ctx(struct isotp_send_ctx ** ctx,k_timeout_t timeout)1033 static int alloc_ctx(struct isotp_send_ctx **ctx, k_timeout_t timeout)
1034 {
1035 int ret;
1036
1037 ret = k_mem_slab_alloc(&ctx_slab, (void **)ctx, timeout);
1038 if (ret) {
1039 return ISOTP_NO_CTX_LEFT;
1040 }
1041
1042 (*ctx)->is_ctx_slab = 1;
1043
1044 return 0;
1045 }
1046 #else
1047 #define free_send_ctx(x)
1048 #endif /*CONFIG_ISOTP_ENABLE_CONTEXT_BUFFERS*/
1049
stmin_to_ticks(uint8_t stmin)1050 static k_timeout_t stmin_to_ticks(uint8_t stmin)
1051 {
1052 /* According to ISO 15765-2 stmin should be 127ms if value is corrupt */
1053 if (stmin > ISOTP_STMIN_MAX ||
1054 (stmin > ISOTP_STMIN_MS_MAX && stmin < ISOTP_STMIN_US_BEGIN)) {
1055 return K_MSEC(ISOTP_STMIN_MS_MAX);
1056 }
1057
1058 if (stmin >= ISOTP_STMIN_US_BEGIN) {
1059 return K_USEC((stmin + 1 - ISOTP_STMIN_US_BEGIN) * 100U);
1060 }
1061
1062 return K_MSEC(stmin);
1063 }
1064
send_state_machine(struct isotp_send_ctx * ctx)1065 static void send_state_machine(struct isotp_send_ctx *ctx)
1066 {
1067 int ret;
1068
1069 switch (ctx->state) {
1070
1071 case ISOTP_TX_SEND_FF:
1072 send_ff(ctx);
1073 z_add_timeout(&ctx->timeout, send_timeout_handler,
1074 K_MSEC(ISOTP_BS));
1075 ctx->state = ISOTP_TX_WAIT_FC;
1076 LOG_DBG("SM send FF");
1077 break;
1078
1079 case ISOTP_TX_SEND_CF:
1080 LOG_DBG("SM send CF");
1081 z_abort_timeout(&ctx->timeout);
1082 do {
1083 ret = send_cf(ctx);
1084 if (!ret) {
1085 ctx->state = ISOTP_TX_WAIT_BACKLOG;
1086 break;
1087 }
1088
1089 if (ret < 0) {
1090 LOG_ERR("Failed to send CF");
1091 send_report_error(ctx, ret == CAN_TIMEOUT ?
1092 ISOTP_N_TIMEOUT_A :
1093 ISOTP_N_ERROR);
1094 break;
1095 }
1096
1097 if (ctx->opts.bs && !ctx->bs) {
1098 z_add_timeout(&ctx->timeout,
1099 send_timeout_handler,
1100 K_MSEC(ISOTP_BS));
1101 ctx->state = ISOTP_TX_WAIT_FC;
1102 LOG_DBG("BS reached. Wait for FC again");
1103 break;
1104 } else if (ctx->opts.stmin) {
1105 ctx->state = ISOTP_TX_WAIT_ST;
1106 break;
1107 }
1108 } while (ret > 0);
1109
1110 break;
1111
1112 case ISOTP_TX_WAIT_ST:
1113 z_add_timeout(&ctx->timeout, send_timeout_handler,
1114 stmin_to_ticks(ctx->opts.stmin));
1115 ctx->state = ISOTP_TX_SEND_CF;
1116 LOG_DBG("SM wait ST");
1117 break;
1118
1119 case ISOTP_TX_ERR:
1120 LOG_DBG("SM error");
1121 __fallthrough;
1122 case ISOTP_TX_WAIT_FIN:
1123 if (ctx->filter_id >= 0) {
1124 can_detach(ctx->can_dev, ctx->filter_id);
1125 }
1126
1127 LOG_DBG("SM finish");
1128 z_abort_timeout(&ctx->timeout);
1129
1130 if (ctx->has_callback) {
1131 ctx->fin_cb.cb(ctx->error_nr, ctx->fin_cb.arg);
1132 free_send_ctx(&ctx);
1133 } else {
1134 k_sem_give(&ctx->fin_sem);
1135 }
1136
1137 ctx->state = ISOTP_TX_STATE_RESET;
1138 break;
1139
1140 default:
1141 break;
1142 }
1143 }
1144
send_work_handler(struct k_work * item)1145 static void send_work_handler(struct k_work *item)
1146 {
1147 struct isotp_send_ctx *ctx = CONTAINER_OF(item, struct isotp_send_ctx,
1148 work);
1149
1150 send_state_machine(ctx);
1151 }
1152
attach_fc_filter(struct isotp_send_ctx * ctx)1153 static inline int attach_fc_filter(struct isotp_send_ctx *ctx)
1154 {
1155 struct zcan_filter filter = {
1156 .id_type = ctx->rx_addr.id_type,
1157 .rtr = CAN_DATAFRAME,
1158 .id = ctx->rx_addr.ext_id,
1159 .rtr_mask = 1,
1160 .id_mask = CAN_EXT_ID_MASK
1161 };
1162
1163 ctx->filter_id = can_attach_isr(ctx->can_dev, send_can_rx_isr, ctx,
1164 &filter);
1165 if (ctx->filter_id < 0) {
1166 LOG_ERR("Error attaching FC filter [%d]", ctx->filter_id);
1167 return ISOTP_NO_FREE_FILTER;
1168 }
1169
1170 return 0;
1171 }
1172
send(struct isotp_send_ctx * ctx,const struct device * can_dev,const struct isotp_msg_id * tx_addr,const struct isotp_msg_id * rx_addr,isotp_tx_callback_t complete_cb,void * cb_arg)1173 static int send(struct isotp_send_ctx *ctx, const struct device *can_dev,
1174 const struct isotp_msg_id *tx_addr,
1175 const struct isotp_msg_id *rx_addr,
1176 isotp_tx_callback_t complete_cb, void *cb_arg)
1177 {
1178 size_t len;
1179 int ret;
1180
1181 __ASSERT_NO_MSG(ctx);
1182 __ASSERT_NO_MSG(can_dev);
1183 __ASSERT_NO_MSG(rx_addr && tx_addr);
1184
1185 if (complete_cb) {
1186 ctx->fin_cb.cb = complete_cb;
1187 ctx->fin_cb.arg = cb_arg;
1188 ctx->has_callback = 1;
1189 } else {
1190 k_sem_init(&ctx->fin_sem, 0, 1);
1191 ctx->has_callback = 0;
1192 }
1193
1194 ctx->can_dev = can_dev;
1195 ctx->tx_addr = *tx_addr;
1196 ctx->rx_addr = *rx_addr;
1197 ctx->error_nr = ISOTP_N_OK;
1198 ctx->wft = 0;
1199 k_work_init(&ctx->work, send_work_handler);
1200 z_init_timeout(&ctx->timeout);
1201
1202 len = get_ctx_data_length(ctx);
1203 LOG_DBG("Send %d bytes to addr 0x%x and listen on 0x%x", len,
1204 ctx->tx_addr.ext_id, ctx->rx_addr.ext_id);
1205 if (len > ISOTP_CAN_DL - (tx_addr->use_ext_addr ? 2 : 1)) {
1206 ret = attach_fc_filter(ctx);
1207 if (ret) {
1208 LOG_ERR("Can't attach fc filter: %d", ret);
1209 free_send_ctx(&ctx);
1210 return ret;
1211 }
1212
1213 LOG_DBG("Starting work to send FF");
1214 ctx->state = ISOTP_TX_SEND_FF;
1215 k_work_submit(&ctx->work);
1216 } else {
1217 LOG_DBG("Sending single frame");
1218 ctx->filter_id = -1;
1219 ret = send_sf(ctx);
1220 ctx->state = ISOTP_TX_WAIT_FIN;
1221 if (ret) {
1222 free_send_ctx(&ctx);
1223 return ret == CAN_TIMEOUT ?
1224 ISOTP_N_TIMEOUT_A : ISOTP_N_ERROR;
1225 }
1226 }
1227
1228 if (!complete_cb) {
1229 k_sem_take(&ctx->fin_sem, K_FOREVER);
1230 ret = ctx->error_nr;
1231 free_send_ctx(&ctx);
1232 return ret;
1233 }
1234
1235 return ISOTP_N_OK;
1236 }
1237
isotp_send(struct isotp_send_ctx * ctx,const struct device * can_dev,const uint8_t * data,size_t len,const struct isotp_msg_id * tx_addr,const struct isotp_msg_id * rx_addr,isotp_tx_callback_t complete_cb,void * cb_arg)1238 int isotp_send(struct isotp_send_ctx *ctx, const struct device *can_dev,
1239 const uint8_t *data, size_t len,
1240 const struct isotp_msg_id *tx_addr,
1241 const struct isotp_msg_id *rx_addr,
1242 isotp_tx_callback_t complete_cb, void *cb_arg)
1243 {
1244 ctx->data = data;
1245 ctx->len = len;
1246 ctx->is_ctx_slab = 0;
1247 ctx->is_net_buf = 0;
1248
1249 return send(ctx, can_dev, tx_addr, rx_addr, complete_cb, cb_arg);
1250 }
1251
1252 #ifdef CONFIG_ISOTP_ENABLE_CONTEXT_BUFFERS
1253
isotp_send_ctx_buf(const struct device * can_dev,const uint8_t * data,size_t len,const struct isotp_msg_id * tx_addr,const struct isotp_msg_id * rx_addr,isotp_tx_callback_t complete_cb,void * cb_arg,k_timeout_t timeout)1254 int isotp_send_ctx_buf(const struct device *can_dev,
1255 const uint8_t *data, size_t len,
1256 const struct isotp_msg_id *tx_addr,
1257 const struct isotp_msg_id *rx_addr,
1258 isotp_tx_callback_t complete_cb, void *cb_arg,
1259 k_timeout_t timeout)
1260 {
1261 struct isotp_send_ctx *ctx;
1262 int ret;
1263
1264 __ASSERT_NO_MSG(data);
1265
1266 ret = alloc_ctx(&ctx, timeout);
1267 if (ret) {
1268 return ret;
1269 }
1270
1271 ctx->data = data;
1272 ctx->len = len;
1273 ctx->is_net_buf = 0;
1274
1275 return send(ctx, can_dev, tx_addr, rx_addr, complete_cb, cb_arg);
1276 }
1277
isotp_send_net_ctx_buf(const struct device * can_dev,struct net_buf * data,const struct isotp_msg_id * tx_addr,const struct isotp_msg_id * rx_addr,isotp_tx_callback_t complete_cb,void * cb_arg,k_timeout_t timeout)1278 int isotp_send_net_ctx_buf(const struct device *can_dev,
1279 struct net_buf *data,
1280 const struct isotp_msg_id *tx_addr,
1281 const struct isotp_msg_id *rx_addr,
1282 isotp_tx_callback_t complete_cb, void *cb_arg,
1283 k_timeout_t timeout)
1284 {
1285 struct isotp_send_ctx *ctx;
1286 int ret;
1287
1288 __ASSERT_NO_MSG(data);
1289
1290 ret = alloc_ctx(&ctx, timeout);
1291 if (ret) {
1292 return ret;
1293 }
1294
1295 ctx->is_net_buf = 1;
1296 ctx->buf = data;
1297
1298 return send(ctx, can_dev, tx_addr, rx_addr, complete_cb, cb_arg);
1299 }
1300
1301 #ifdef CONFIG_ISOTP_USE_TX_BUF
isotp_send_buf(const struct device * can_dev,const uint8_t * data,size_t len,const struct isotp_msg_id * tx_addr,const struct isotp_msg_id * rx_addr,isotp_tx_callback_t complete_cb,void * cb_arg,k_timeout_t timeout)1302 int isotp_send_buf(const struct device *can_dev,
1303 const uint8_t *data, size_t len,
1304 const struct isotp_msg_id *tx_addr,
1305 const struct isotp_msg_id *rx_addr,
1306 isotp_tx_callback_t complete_cb, void *cb_arg,
1307 k_timeout_t timeout)
1308 {
1309 struct isotp_send_ctx *ctx;
1310 struct net_buf *buf;
1311 int ret;
1312
1313 __ASSERT_NO_MSG(data);
1314
1315 ret = alloc_ctx(&ctx, timeout);
1316 if (ret) {
1317 return ret;
1318 }
1319
1320 buf = net_buf_alloc_len(&isotp_tx_pool, len, timeout);
1321 if (!buf) {
1322 k_mem_slab_free(&ctx_slab, (void **)&ctx);
1323 return ISOTP_NO_BUF_DATA_LEFT;
1324 }
1325
1326 net_buf_add_mem(buf, data, len);
1327
1328 ctx->is_net_buf = 1;
1329 ctx->buf = buf;
1330
1331 return send(ctx, can_dev, tx_addr, rx_addr, complete_cb, cb_arg);
1332 }
1333 #endif /*CONFIG_ISOTP_USE_TX_BUF*/
1334 #endif /*CONFIG_ISOTP_ENABLE_CONTEXT_BUFFERS*/
1335