1 /*
2  * Copyright (c) 2019 Alexander Wachter
3  * Copyright (c) 2023 Enphase Energy
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include "isotp_internal.h"
9 #include <zephyr/net/buf.h>
10 #include <zephyr/kernel.h>
11 #include <zephyr/init.h>
12 #include <zephyr/sys/util.h>
13 #include <zephyr/logging/log.h>
14 
15 LOG_MODULE_REGISTER(isotp, CONFIG_ISOTP_LOG_LEVEL);
16 
17 #ifdef CONFIG_ISOTP_ENABLE_CONTEXT_BUFFERS
18 K_MEM_SLAB_DEFINE(ctx_slab, sizeof(struct isotp_send_ctx),
19 		  CONFIG_ISOTP_TX_CONTEXT_BUF_COUNT, 4);
20 #endif
21 
22 static void receive_pool_free(struct net_buf *buf);
23 static void receive_ff_sf_pool_free(struct net_buf *buf);
24 
25 NET_BUF_POOL_DEFINE(isotp_rx_pool, CONFIG_ISOTP_RX_BUF_COUNT,
26 		    CONFIG_ISOTP_RX_BUF_SIZE, sizeof(uint32_t),
27 		    receive_pool_free);
28 
29 NET_BUF_POOL_DEFINE(isotp_rx_sf_ff_pool, CONFIG_ISOTP_RX_SF_FF_BUF_COUNT,
30 		    CAN_MAX_DLEN, sizeof(uint32_t), receive_ff_sf_pool_free);
31 
32 static struct isotp_global_ctx global_ctx = {
33 	.alloc_list = SYS_SLIST_STATIC_INIT(&global_ctx.alloc_list),
34 	.ff_sf_alloc_list = SYS_SLIST_STATIC_INIT(&global_ctx.ff_sf_alloc_list)
35 };
36 
37 #ifdef CONFIG_ISOTP_USE_TX_BUF
38 NET_BUF_POOL_VAR_DEFINE(isotp_tx_pool, CONFIG_ISOTP_TX_BUF_COUNT,
39 			CONFIG_ISOTP_BUF_TX_DATA_POOL_SIZE, 0, NULL);
40 #endif
41 
42 static void receive_state_machine(struct isotp_recv_ctx *rctx);
43 
prepare_frame(struct can_frame * frame,struct isotp_msg_id * addr)44 static inline void prepare_frame(struct can_frame *frame, struct isotp_msg_id *addr)
45 {
46 	frame->id = addr->ext_id;
47 	frame->flags = ((addr->flags & ISOTP_MSG_IDE) != 0 ? CAN_FRAME_IDE : 0) |
48 		       ((addr->flags & ISOTP_MSG_FDF) != 0 ? CAN_FRAME_FDF : 0) |
49 		       ((addr->flags & ISOTP_MSG_BRS) != 0 ? CAN_FRAME_BRS : 0);
50 }
51 
prepare_filter(struct can_filter * filter,struct isotp_msg_id * addr,uint32_t mask)52 static inline void prepare_filter(struct can_filter *filter, struct isotp_msg_id *addr,
53 				  uint32_t mask)
54 {
55 	filter->id = addr->ext_id;
56 	filter->mask = mask;
57 	filter->flags = (addr->flags & ISOTP_MSG_IDE) != 0 ? CAN_FILTER_IDE : 0;
58 }
59 
60 /*
61  * Wake every context that is waiting for a buffer
62  */
receive_pool_free(struct net_buf * buf)63 static void receive_pool_free(struct net_buf *buf)
64 {
65 	struct isotp_recv_ctx *rctx;
66 	sys_snode_t *rctx_node;
67 
68 	net_buf_destroy(buf);
69 
70 	SYS_SLIST_FOR_EACH_NODE(&global_ctx.alloc_list, rctx_node) {
71 		rctx = CONTAINER_OF(rctx_node, struct isotp_recv_ctx, alloc_node);
72 		k_work_submit(&rctx->work);
73 	}
74 }
75 
receive_ff_sf_pool_free(struct net_buf * buf)76 static void receive_ff_sf_pool_free(struct net_buf *buf)
77 {
78 	struct isotp_recv_ctx *rctx;
79 	sys_snode_t *rctx_node;
80 
81 	net_buf_destroy(buf);
82 
83 	SYS_SLIST_FOR_EACH_NODE(&global_ctx.ff_sf_alloc_list, rctx_node) {
84 		rctx = CONTAINER_OF(rctx_node, struct isotp_recv_ctx, alloc_node);
85 		k_work_submit(&rctx->work);
86 	}
87 }
88 
receive_report_error(struct isotp_recv_ctx * rctx,int err)89 static inline void receive_report_error(struct isotp_recv_ctx *rctx, int err)
90 {
91 	rctx->state = ISOTP_RX_STATE_ERR;
92 	rctx->error_nr = err;
93 }
94 
receive_can_tx(const struct device * dev,int error,void * arg)95 static void receive_can_tx(const struct device *dev, int error, void *arg)
96 {
97 	struct isotp_recv_ctx *rctx = (struct isotp_recv_ctx *)arg;
98 
99 	ARG_UNUSED(dev);
100 
101 	if (error != 0) {
102 		LOG_ERR("Error sending FC frame (%d)", error);
103 		receive_report_error(rctx, ISOTP_N_ERROR);
104 		k_work_submit(&rctx->work);
105 	}
106 }
107 
receive_get_ff_length(struct net_buf * buf)108 static inline uint32_t receive_get_ff_length(struct net_buf *buf)
109 {
110 	uint32_t len;
111 	uint8_t pci = net_buf_pull_u8(buf);
112 
113 	len = ((pci & ISOTP_PCI_FF_DL_UPPER_MASK) << 8) | net_buf_pull_u8(buf);
114 
115 	/* Jumbo packet (32 bit length)*/
116 	if (!len) {
117 		len = net_buf_pull_be32(buf);
118 	}
119 
120 	return len;
121 }
122 
receive_get_sf_length(struct net_buf * buf,bool fdf)123 static inline uint32_t receive_get_sf_length(struct net_buf *buf, bool fdf)
124 {
125 	uint8_t len = net_buf_pull_u8(buf) & ISOTP_PCI_SF_DL_MASK;
126 
127 	/* Single frames > 8 bytes (CAN FD only) */
128 	if (IS_ENABLED(CONFIG_CAN_FD_MODE) && fdf && !len) {
129 		len = net_buf_pull_u8(buf);
130 	}
131 
132 	return len;
133 }
134 
receive_send_fc(struct isotp_recv_ctx * rctx,uint8_t fs)135 static void receive_send_fc(struct isotp_recv_ctx *rctx, uint8_t fs)
136 {
137 	struct can_frame frame;
138 	uint8_t *data = frame.data;
139 	uint8_t payload_len;
140 	int ret;
141 
142 	__ASSERT_NO_MSG(!(fs & ISOTP_PCI_TYPE_MASK));
143 
144 	prepare_frame(&frame, &rctx->tx_addr);
145 
146 	if ((rctx->tx_addr.flags & ISOTP_MSG_EXT_ADDR) != 0) {
147 		*data++ = rctx->tx_addr.ext_addr;
148 	}
149 
150 	*data++ = ISOTP_PCI_TYPE_FC | fs;
151 	*data++ = rctx->opts.bs;
152 	*data++ = rctx->opts.stmin;
153 	payload_len = data - frame.data;
154 
155 #ifdef CONFIG_ISOTP_ENABLE_TX_PADDING
156 	/* AUTOSAR requirement SWS_CanTp_00347 */
157 	memset(&frame.data[payload_len], ISOTP_PAD_BYTE,
158 	       ISOTP_PADDED_FRAME_DL_MIN - payload_len);
159 	frame.dlc = can_bytes_to_dlc(ISOTP_PADDED_FRAME_DL_MIN);
160 #else
161 	frame.dlc = can_bytes_to_dlc(payload_len);
162 #endif
163 
164 	ret = can_send(rctx->can_dev, &frame, K_MSEC(ISOTP_A_TIMEOUT_MS), receive_can_tx, rctx);
165 	if (ret) {
166 		LOG_ERR("Can't send FC, (%d)", ret);
167 		receive_report_error(rctx, ISOTP_N_TIMEOUT_A);
168 		receive_state_machine(rctx);
169 	}
170 }
171 
receive_alloc_buffer_chain(uint32_t len)172 static inline struct net_buf *receive_alloc_buffer_chain(uint32_t len)
173 {
174 	struct net_buf *buf, *frag, *last;
175 	uint32_t remaining_len;
176 
177 	LOG_DBG("Allocate %d bytes ", len);
178 	buf = net_buf_alloc_fixed(&isotp_rx_pool, K_NO_WAIT);
179 	if (!buf) {
180 		return NULL;
181 	}
182 
183 	if (len <= CONFIG_ISOTP_RX_BUF_SIZE) {
184 		return buf;
185 	}
186 
187 	remaining_len = len - CONFIG_ISOTP_RX_BUF_SIZE;
188 	last = buf;
189 	while (remaining_len) {
190 		frag = net_buf_alloc_fixed(&isotp_rx_pool, K_NO_WAIT);
191 		if (!frag) {
192 			net_buf_unref(buf);
193 			return NULL;
194 		}
195 
196 		net_buf_frag_insert(last, frag);
197 		last = frag;
198 		remaining_len = remaining_len > CONFIG_ISOTP_RX_BUF_SIZE ?
199 				remaining_len - CONFIG_ISOTP_RX_BUF_SIZE : 0;
200 	}
201 
202 	return buf;
203 }
204 
receive_timeout_handler(struct k_timer * timer)205 static void receive_timeout_handler(struct k_timer *timer)
206 {
207 	struct isotp_recv_ctx *rctx = CONTAINER_OF(timer, struct isotp_recv_ctx, timer);
208 
209 	switch (rctx->state) {
210 	case ISOTP_RX_STATE_WAIT_CF:
211 		LOG_ERR("Timeout while waiting for CF");
212 		receive_report_error(rctx, ISOTP_N_TIMEOUT_CR);
213 		break;
214 
215 	case ISOTP_RX_STATE_TRY_ALLOC:
216 		rctx->state = ISOTP_RX_STATE_SEND_WAIT;
217 		break;
218 	}
219 
220 	k_work_submit(&rctx->work);
221 }
222 
receive_alloc_buffer(struct isotp_recv_ctx * rctx)223 static int receive_alloc_buffer(struct isotp_recv_ctx *rctx)
224 {
225 	struct net_buf *buf = NULL;
226 
227 	if (rctx->opts.bs == 0) {
228 		/* Alloc all buffers because we can't wait during reception */
229 		buf = receive_alloc_buffer_chain(rctx->length);
230 	} else {
231 		/* Alloc the minimum of the remaining length and bytes of one block */
232 		uint32_t len = MIN(rctx->length, rctx->opts.bs * (rctx->rx_addr.dl - 1));
233 
234 		buf = receive_alloc_buffer_chain(len);
235 	}
236 
237 	if (!buf) {
238 		k_timer_start(&rctx->timer, K_MSEC(ISOTP_ALLOC_TIMEOUT_MS), K_NO_WAIT);
239 
240 		if (rctx->wft == ISOTP_WFT_FIRST) {
241 			LOG_DBG("Allocation failed. Append to alloc list");
242 			rctx->wft = 0;
243 			sys_slist_append(&global_ctx.alloc_list, &rctx->alloc_node);
244 		} else {
245 			LOG_DBG("Allocation failed. Send WAIT frame");
246 			rctx->state = ISOTP_RX_STATE_SEND_WAIT;
247 			receive_state_machine(rctx);
248 		}
249 
250 		return -1;
251 	}
252 
253 	if (rctx->state == ISOTP_RX_STATE_TRY_ALLOC) {
254 		k_timer_stop(&rctx->timer);
255 		rctx->wft = ISOTP_WFT_FIRST;
256 		sys_slist_find_and_remove(&global_ctx.alloc_list, &rctx->alloc_node);
257 	}
258 
259 	if (rctx->opts.bs != 0) {
260 		rctx->buf = buf;
261 	} else {
262 		net_buf_frag_insert(rctx->buf, buf);
263 	}
264 
265 	rctx->act_frag = buf;
266 	return 0;
267 }
268 
receive_state_machine(struct isotp_recv_ctx * rctx)269 static void receive_state_machine(struct isotp_recv_ctx *rctx)
270 {
271 	int ret;
272 	uint32_t *ud_rem_len;
273 
274 	switch (rctx->state) {
275 	case ISOTP_RX_STATE_PROCESS_SF:
276 		rctx->length = receive_get_sf_length(rctx->buf,
277 						    (rctx->rx_addr.flags & ISOTP_MSG_FDF) != 0);
278 		ud_rem_len = net_buf_user_data(rctx->buf);
279 		*ud_rem_len = 0;
280 		LOG_DBG("SM process SF of length %d", rctx->length);
281 		net_buf_put(&rctx->fifo, rctx->buf);
282 		rctx->state = ISOTP_RX_STATE_RECYCLE;
283 		receive_state_machine(rctx);
284 		break;
285 
286 	case ISOTP_RX_STATE_PROCESS_FF:
287 		rctx->length = receive_get_ff_length(rctx->buf);
288 		LOG_DBG("SM process FF. Length: %d", rctx->length);
289 		rctx->length -= rctx->buf->len;
290 		if (rctx->opts.bs == 0 &&
291 		    rctx->length > CONFIG_ISOTP_RX_BUF_COUNT * CONFIG_ISOTP_RX_BUF_SIZE) {
292 			LOG_ERR("Pkt length is %d but buffer has only %d bytes", rctx->length,
293 				CONFIG_ISOTP_RX_BUF_COUNT * CONFIG_ISOTP_RX_BUF_SIZE);
294 			receive_report_error(rctx, ISOTP_N_BUFFER_OVERFLW);
295 			receive_state_machine(rctx);
296 			break;
297 		}
298 
299 		if (rctx->opts.bs) {
300 			rctx->bs = rctx->opts.bs;
301 			ud_rem_len = net_buf_user_data(rctx->buf);
302 			*ud_rem_len = rctx->length;
303 			net_buf_put(&rctx->fifo, rctx->buf);
304 		}
305 
306 		rctx->wft = ISOTP_WFT_FIRST;
307 		rctx->state = ISOTP_RX_STATE_TRY_ALLOC;
308 		__fallthrough;
309 	case ISOTP_RX_STATE_TRY_ALLOC:
310 		LOG_DBG("SM try to allocate");
311 		k_timer_stop(&rctx->timer);
312 		ret = receive_alloc_buffer(rctx);
313 		if (ret) {
314 			LOG_DBG("SM allocation failed. Wait for free buffer");
315 			break;
316 		}
317 
318 		rctx->state = ISOTP_RX_STATE_SEND_FC;
319 		__fallthrough;
320 	case ISOTP_RX_STATE_SEND_FC:
321 		LOG_DBG("SM send CTS FC frame");
322 		receive_send_fc(rctx, ISOTP_PCI_FS_CTS);
323 		k_timer_start(&rctx->timer, K_MSEC(ISOTP_CR_TIMEOUT_MS), K_NO_WAIT);
324 		rctx->state = ISOTP_RX_STATE_WAIT_CF;
325 		break;
326 
327 	case ISOTP_RX_STATE_SEND_WAIT:
328 		if (++rctx->wft < CONFIG_ISOTP_WFTMAX) {
329 			LOG_DBG("Send wait frame number %d", rctx->wft);
330 			receive_send_fc(rctx, ISOTP_PCI_FS_WAIT);
331 			k_timer_start(&rctx->timer, K_MSEC(ISOTP_ALLOC_TIMEOUT_MS), K_NO_WAIT);
332 			rctx->state = ISOTP_RX_STATE_TRY_ALLOC;
333 			break;
334 		}
335 
336 		sys_slist_find_and_remove(&global_ctx.alloc_list, &rctx->alloc_node);
337 		LOG_ERR("Sent %d wait frames. Giving up to alloc now", rctx->wft);
338 		receive_report_error(rctx, ISOTP_N_BUFFER_OVERFLW);
339 		__fallthrough;
340 	case ISOTP_RX_STATE_ERR:
341 		LOG_DBG("SM ERR state. err nr: %d", rctx->error_nr);
342 		k_timer_stop(&rctx->timer);
343 
344 		if (rctx->error_nr == ISOTP_N_BUFFER_OVERFLW) {
345 			receive_send_fc(rctx, ISOTP_PCI_FS_OVFLW);
346 		}
347 
348 		k_fifo_cancel_wait(&rctx->fifo);
349 		net_buf_unref(rctx->buf);
350 		rctx->buf = NULL;
351 		rctx->state = ISOTP_RX_STATE_RECYCLE;
352 		__fallthrough;
353 	case ISOTP_RX_STATE_RECYCLE:
354 		LOG_DBG("SM recycle context for next message");
355 		rctx->buf = net_buf_alloc_fixed(&isotp_rx_sf_ff_pool, K_NO_WAIT);
356 		if (!rctx->buf) {
357 			LOG_DBG("No free context. Append to waiters list");
358 			sys_slist_append(&global_ctx.ff_sf_alloc_list, &rctx->alloc_node);
359 			break;
360 		}
361 
362 		sys_slist_find_and_remove(&global_ctx.ff_sf_alloc_list, &rctx->alloc_node);
363 		rctx->state = ISOTP_RX_STATE_WAIT_FF_SF;
364 		__fallthrough;
365 	case ISOTP_RX_STATE_UNBOUND:
366 		break;
367 
368 	default:
369 		break;
370 	}
371 }
372 
receive_work_handler(struct k_work * item)373 static void receive_work_handler(struct k_work *item)
374 {
375 	struct isotp_recv_ctx *rctx = CONTAINER_OF(item, struct isotp_recv_ctx, work);
376 
377 	receive_state_machine(rctx);
378 }
379 
process_ff_sf(struct isotp_recv_ctx * rctx,struct can_frame * frame)380 static void process_ff_sf(struct isotp_recv_ctx *rctx, struct can_frame *frame)
381 {
382 	int index = 0;
383 	uint8_t sf_len;
384 	uint8_t payload_len;
385 	uint32_t rx_sa;		/* ISO-TP fixed source address (if used) */
386 	uint8_t can_dl = can_dlc_to_bytes(frame->dlc);
387 
388 	if ((rctx->rx_addr.flags & ISOTP_MSG_EXT_ADDR) != 0) {
389 		if (frame->data[index++] != rctx->rx_addr.ext_addr) {
390 			return;
391 		}
392 	}
393 
394 	if ((rctx->rx_addr.flags & ISOTP_MSG_FIXED_ADDR) != 0) {
395 		/* store actual CAN ID used by the sender */
396 		rctx->rx_addr.ext_id = frame->id;
397 		/* replace TX target address with RX source address */
398 		rx_sa = (frame->id & ISOTP_FIXED_ADDR_SA_MASK) >>
399 		     ISOTP_FIXED_ADDR_SA_POS;
400 		rctx->tx_addr.ext_id &= ~(ISOTP_FIXED_ADDR_TA_MASK);
401 		rctx->tx_addr.ext_id |= rx_sa << ISOTP_FIXED_ADDR_TA_POS;
402 		/* use same priority for TX as in received message */
403 		if (ISOTP_FIXED_ADDR_PRIO_MASK) {
404 			rctx->tx_addr.ext_id &= ~(ISOTP_FIXED_ADDR_PRIO_MASK);
405 			rctx->tx_addr.ext_id |= frame->id & ISOTP_FIXED_ADDR_PRIO_MASK;
406 		}
407 	}
408 
409 	switch (frame->data[index] & ISOTP_PCI_TYPE_MASK) {
410 	case ISOTP_PCI_TYPE_FF:
411 		LOG_DBG("Got FF IRQ");
412 		if (can_dl < ISOTP_FF_DL_MIN) {
413 			LOG_INF("FF DLC invalid. Ignore");
414 			return;
415 		}
416 
417 		payload_len = can_dl;
418 		rctx->state = ISOTP_RX_STATE_PROCESS_FF;
419 		rctx->rx_addr.dl = can_dl;
420 		rctx->sn_expected = 1;
421 		break;
422 
423 	case ISOTP_PCI_TYPE_SF:
424 		LOG_DBG("Got SF IRQ");
425 #ifdef CONFIG_ISOTP_REQUIRE_RX_PADDING
426 		/* AUTOSAR requirement SWS_CanTp_00345 */
427 		if (can_dl < ISOTP_PADDED_FRAME_DL_MIN) {
428 			LOG_INF("SF DLC invalid. Ignore");
429 			return;
430 		}
431 #endif
432 		sf_len = frame->data[index] & ISOTP_PCI_SF_DL_MASK;
433 
434 		/* Single frames > 8 bytes (CAN FD only) */
435 		if (IS_ENABLED(CONFIG_CAN_FD_MODE) && (rctx->rx_addr.flags & ISOTP_MSG_FDF) != 0 &&
436 		    can_dl > ISOTP_4BIT_SF_MAX_CAN_DL) {
437 			if (sf_len != 0) {
438 				LOG_INF("SF DL invalid. Ignore");
439 				return;
440 			}
441 			sf_len = frame->data[index + 1];
442 			payload_len = index + 2 + sf_len;
443 		} else {
444 			payload_len = index + 1 + sf_len;
445 		}
446 
447 		if (payload_len > can_dl) {
448 			LOG_INF("SF DL does not fit. Ignore");
449 			return;
450 		}
451 
452 		rctx->state = ISOTP_RX_STATE_PROCESS_SF;
453 		break;
454 
455 	default:
456 		LOG_INF("Got unexpected frame. Ignore");
457 		return;
458 	}
459 
460 	net_buf_add_mem(rctx->buf, &frame->data[index], payload_len - index);
461 }
462 
receive_add_mem(struct isotp_recv_ctx * rctx,uint8_t * data,size_t len)463 static inline void receive_add_mem(struct isotp_recv_ctx *rctx, uint8_t *data, size_t len)
464 {
465 	size_t tailroom = net_buf_tailroom(rctx->act_frag);
466 
467 	if (tailroom >= len) {
468 		net_buf_add_mem(rctx->act_frag, data, len);
469 		return;
470 	}
471 
472 	/* Use next fragment that is already allocated*/
473 	net_buf_add_mem(rctx->act_frag, data, tailroom);
474 	rctx->act_frag = rctx->act_frag->frags;
475 	if (!rctx->act_frag) {
476 		LOG_ERR("No fragment left to append data");
477 		receive_report_error(rctx, ISOTP_N_BUFFER_OVERFLW);
478 		return;
479 	}
480 
481 	net_buf_add_mem(rctx->act_frag, data + tailroom, len - tailroom);
482 }
483 
process_cf(struct isotp_recv_ctx * rctx,struct can_frame * frame)484 static void process_cf(struct isotp_recv_ctx *rctx, struct can_frame *frame)
485 {
486 	uint32_t *ud_rem_len = (uint32_t *)net_buf_user_data(rctx->buf);
487 	int index = 0;
488 	uint32_t data_len;
489 	uint8_t can_dl = can_dlc_to_bytes(frame->dlc);
490 
491 	if ((rctx->rx_addr.flags & ISOTP_MSG_EXT_ADDR) != 0) {
492 		if (frame->data[index++] != rctx->rx_addr.ext_addr) {
493 			return;
494 		}
495 	}
496 
497 	if ((frame->data[index] & ISOTP_PCI_TYPE_MASK) != ISOTP_PCI_TYPE_CF) {
498 		LOG_DBG("Waiting for CF but got something else (%d)",
499 			frame->data[index] >> ISOTP_PCI_TYPE_POS);
500 		receive_report_error(rctx, ISOTP_N_UNEXP_PDU);
501 		k_work_submit(&rctx->work);
502 		return;
503 	}
504 
505 	k_timer_start(&rctx->timer, K_MSEC(ISOTP_CR_TIMEOUT_MS), K_NO_WAIT);
506 
507 	if ((frame->data[index++] & ISOTP_PCI_SN_MASK) != rctx->sn_expected++) {
508 		LOG_ERR("Sequence number mismatch");
509 		receive_report_error(rctx, ISOTP_N_WRONG_SN);
510 		k_work_submit(&rctx->work);
511 		return;
512 	}
513 
514 #ifdef CONFIG_ISOTP_REQUIRE_RX_PADDING
515 	/* AUTOSAR requirement SWS_CanTp_00346 */
516 	if (can_dl < ISOTP_PADDED_FRAME_DL_MIN) {
517 		LOG_ERR("CF DL invalid");
518 		receive_report_error(rctx, ISOTP_N_ERROR);
519 		return;
520 	}
521 #endif
522 
523 	/* First frame defines the RX data length, consecutive frames
524 	 * must have the same length (except the last frame)
525 	 */
526 	if (can_dl != rctx->rx_addr.dl && rctx->length > can_dl - index) {
527 		LOG_ERR("CF DL invalid");
528 		receive_report_error(rctx, ISOTP_N_ERROR);
529 		return;
530 	}
531 
532 	LOG_DBG("Got CF irq. Appending data");
533 	data_len = MIN(rctx->length, can_dl - index);
534 	receive_add_mem(rctx, &frame->data[index], data_len);
535 	rctx->length -= data_len;
536 	LOG_DBG("%d bytes remaining", rctx->length);
537 
538 	if (rctx->length == 0) {
539 		rctx->state = ISOTP_RX_STATE_RECYCLE;
540 		*ud_rem_len = 0;
541 		net_buf_put(&rctx->fifo, rctx->buf);
542 		return;
543 	}
544 
545 	if (rctx->opts.bs && !--rctx->bs) {
546 		LOG_DBG("Block is complete. Allocate new buffer");
547 		rctx->bs = rctx->opts.bs;
548 		*ud_rem_len = rctx->length;
549 		net_buf_put(&rctx->fifo, rctx->buf);
550 		rctx->state = ISOTP_RX_STATE_TRY_ALLOC;
551 	}
552 }
553 
receive_can_rx(const struct device * dev,struct can_frame * frame,void * arg)554 static void receive_can_rx(const struct device *dev, struct can_frame *frame, void *arg)
555 {
556 	struct isotp_recv_ctx *rctx = (struct isotp_recv_ctx *)arg;
557 
558 	ARG_UNUSED(dev);
559 
560 	if (IS_ENABLED(CONFIG_CAN_ACCEPT_RTR) && (frame->flags & CAN_FRAME_RTR) != 0U) {
561 		return;
562 	}
563 
564 	switch (rctx->state) {
565 	case ISOTP_RX_STATE_WAIT_FF_SF:
566 		__ASSERT_NO_MSG(rctx->buf);
567 		process_ff_sf(rctx, frame);
568 		break;
569 
570 	case ISOTP_RX_STATE_WAIT_CF:
571 		process_cf(rctx, frame);
572 		/* still waiting for more CF */
573 		if (rctx->state == ISOTP_RX_STATE_WAIT_CF) {
574 			return;
575 		}
576 
577 		break;
578 
579 	case ISOTP_RX_STATE_RECYCLE:
580 		LOG_ERR("Got a frame but was not yet ready for a new one");
581 		receive_report_error(rctx, ISOTP_N_BUFFER_OVERFLW);
582 		break;
583 
584 	default:
585 		LOG_INF("Got a frame in a state where it is unexpected.");
586 	}
587 
588 	k_work_submit(&rctx->work);
589 }
590 
add_ff_sf_filter(struct isotp_recv_ctx * rctx)591 static inline int add_ff_sf_filter(struct isotp_recv_ctx *rctx)
592 {
593 	struct can_filter filter;
594 	uint32_t mask;
595 
596 	if ((rctx->rx_addr.flags & ISOTP_MSG_FIXED_ADDR) != 0) {
597 		mask = ISOTP_FIXED_ADDR_RX_MASK;
598 	} else if ((rctx->rx_addr.flags & ISOTP_MSG_IDE) != 0) {
599 		mask = CAN_EXT_ID_MASK;
600 	} else {
601 		mask = CAN_STD_ID_MASK;
602 	}
603 
604 	prepare_filter(&filter, &rctx->rx_addr, mask);
605 
606 	rctx->filter_id = can_add_rx_filter(rctx->can_dev, receive_can_rx, rctx, &filter);
607 	if (rctx->filter_id < 0) {
608 		LOG_ERR("Error adding FF filter [%d]", rctx->filter_id);
609 		return ISOTP_NO_FREE_FILTER;
610 	}
611 
612 	return 0;
613 }
614 
isotp_bind(struct isotp_recv_ctx * rctx,const struct device * can_dev,const struct isotp_msg_id * rx_addr,const struct isotp_msg_id * tx_addr,const struct isotp_fc_opts * opts,k_timeout_t timeout)615 int isotp_bind(struct isotp_recv_ctx *rctx, const struct device *can_dev,
616 	       const struct isotp_msg_id *rx_addr,
617 	       const struct isotp_msg_id *tx_addr,
618 	       const struct isotp_fc_opts *opts,
619 	       k_timeout_t timeout)
620 {
621 	can_mode_t cap;
622 	int ret;
623 
624 	__ASSERT(rctx, "rctx is NULL");
625 	__ASSERT(can_dev, "CAN device is NULL");
626 	__ASSERT(rx_addr && tx_addr, "RX or TX addr is NULL");
627 	__ASSERT(opts, "OPTS is NULL");
628 
629 	rctx->can_dev = can_dev;
630 	rctx->rx_addr = *rx_addr;
631 	rctx->tx_addr = *tx_addr;
632 	k_fifo_init(&rctx->fifo);
633 
634 	__ASSERT(opts->stmin < ISOTP_STMIN_MAX, "STmin limit");
635 	__ASSERT(opts->stmin <= ISOTP_STMIN_MS_MAX ||
636 		 opts->stmin >= ISOTP_STMIN_US_BEGIN, "STmin reserved");
637 
638 	rctx->opts = *opts;
639 	rctx->state = ISOTP_RX_STATE_WAIT_FF_SF;
640 
641 	if ((rx_addr->flags & ISOTP_MSG_FDF) != 0 || (tx_addr->flags & ISOTP_MSG_FDF) != 0) {
642 		ret = can_get_capabilities(can_dev, &cap);
643 		if (ret != 0 || (cap & CAN_MODE_FD) == 0) {
644 			LOG_ERR("CAN controller does not support FD mode");
645 			return ISOTP_N_ERROR;
646 		}
647 	}
648 
649 	LOG_DBG("Binding to addr: 0x%x. Responding on 0x%x",
650 		rctx->rx_addr.ext_id, rctx->tx_addr.ext_id);
651 
652 	rctx->buf = net_buf_alloc_fixed(&isotp_rx_sf_ff_pool, timeout);
653 	if (!rctx->buf) {
654 		LOG_ERR("No buffer for FF left");
655 		return ISOTP_NO_NET_BUF_LEFT;
656 	}
657 
658 	ret = add_ff_sf_filter(rctx);
659 	if (ret) {
660 		LOG_ERR("Can't add filter for binding");
661 		net_buf_unref(rctx->buf);
662 		rctx->buf = NULL;
663 		return ret;
664 	}
665 
666 	k_work_init(&rctx->work, receive_work_handler);
667 	k_timer_init(&rctx->timer, receive_timeout_handler, NULL);
668 
669 	return ISOTP_N_OK;
670 }
671 
isotp_unbind(struct isotp_recv_ctx * rctx)672 void isotp_unbind(struct isotp_recv_ctx *rctx)
673 {
674 	struct net_buf *buf;
675 
676 	if (rctx->filter_id >= 0 && rctx->can_dev) {
677 		can_remove_rx_filter(rctx->can_dev, rctx->filter_id);
678 	}
679 
680 	k_timer_stop(&rctx->timer);
681 
682 	sys_slist_find_and_remove(&global_ctx.ff_sf_alloc_list, &rctx->alloc_node);
683 	sys_slist_find_and_remove(&global_ctx.alloc_list, &rctx->alloc_node);
684 
685 	rctx->state = ISOTP_RX_STATE_UNBOUND;
686 
687 	while ((buf = net_buf_get(&rctx->fifo, K_NO_WAIT))) {
688 		net_buf_unref(buf);
689 	}
690 
691 	k_fifo_cancel_wait(&rctx->fifo);
692 
693 	if (rctx->buf) {
694 		net_buf_unref(rctx->buf);
695 	}
696 
697 	LOG_DBG("Unbound");
698 }
699 
isotp_recv_net(struct isotp_recv_ctx * rctx,struct net_buf ** buffer,k_timeout_t timeout)700 int isotp_recv_net(struct isotp_recv_ctx *rctx, struct net_buf **buffer, k_timeout_t timeout)
701 {
702 	struct net_buf *buf;
703 	int ret;
704 
705 	buf = net_buf_get(&rctx->fifo, timeout);
706 	if (!buf) {
707 		ret = rctx->error_nr ? rctx->error_nr : ISOTP_RECV_TIMEOUT;
708 		rctx->error_nr = 0;
709 
710 		return ret;
711 	}
712 
713 	*buffer = buf;
714 
715 	return *(uint32_t *)net_buf_user_data(buf);
716 }
717 
isotp_recv(struct isotp_recv_ctx * rctx,uint8_t * data,size_t len,k_timeout_t timeout)718 int isotp_recv(struct isotp_recv_ctx *rctx, uint8_t *data, size_t len, k_timeout_t timeout)
719 {
720 	size_t copied, to_copy;
721 	int err;
722 
723 	if (!rctx->recv_buf) {
724 		rctx->recv_buf = net_buf_get(&rctx->fifo, timeout);
725 		if (!rctx->recv_buf) {
726 			err = rctx->error_nr ? rctx->error_nr : ISOTP_RECV_TIMEOUT;
727 			rctx->error_nr = 0;
728 
729 			return err;
730 		}
731 	}
732 
733 	/* traverse fragments and delete them after copying the data */
734 	copied = 0;
735 	while (rctx->recv_buf && copied < len) {
736 		to_copy = MIN(len - copied, rctx->recv_buf->len);
737 		memcpy((uint8_t *)data + copied, rctx->recv_buf->data, to_copy);
738 
739 		if (rctx->recv_buf->len == to_copy) {
740 			/* point recv_buf to next frag */
741 			rctx->recv_buf = net_buf_frag_del(NULL, rctx->recv_buf);
742 		} else {
743 			/* pull received data from remaining frag(s) */
744 			net_buf_pull(rctx->recv_buf, to_copy);
745 		}
746 
747 		copied += to_copy;
748 	}
749 
750 	return copied;
751 }
752 
send_report_error(struct isotp_send_ctx * sctx,uint32_t err)753 static inline void send_report_error(struct isotp_send_ctx *sctx, uint32_t err)
754 {
755 	sctx->state = ISOTP_TX_ERR;
756 	sctx->error_nr = err;
757 }
758 
send_can_tx_cb(const struct device * dev,int error,void * arg)759 static void send_can_tx_cb(const struct device *dev, int error, void *arg)
760 {
761 	struct isotp_send_ctx *sctx = (struct isotp_send_ctx *)arg;
762 
763 	ARG_UNUSED(dev);
764 
765 	sctx->tx_backlog--;
766 	k_sem_give(&sctx->tx_sem);
767 
768 	if (sctx->state == ISOTP_TX_WAIT_BACKLOG) {
769 		if (sctx->tx_backlog > 0) {
770 			return;
771 		}
772 
773 		sctx->state = ISOTP_TX_WAIT_FIN;
774 	}
775 
776 	k_work_submit(&sctx->work);
777 }
778 
send_timeout_handler(struct k_timer * timer)779 static void send_timeout_handler(struct k_timer *timer)
780 {
781 	struct isotp_send_ctx *sctx = CONTAINER_OF(timer, struct isotp_send_ctx, timer);
782 
783 	if (sctx->state != ISOTP_TX_SEND_CF) {
784 		send_report_error(sctx, ISOTP_N_TIMEOUT_BS);
785 		LOG_ERR("Reception of next FC has timed out");
786 	}
787 
788 	k_work_submit(&sctx->work);
789 }
790 
send_process_fc(struct isotp_send_ctx * sctx,struct can_frame * frame)791 static void send_process_fc(struct isotp_send_ctx *sctx, struct can_frame *frame)
792 {
793 	uint8_t *data = frame->data;
794 
795 	if ((sctx->rx_addr.flags & ISOTP_MSG_EXT_ADDR) != 0) {
796 		if (sctx->rx_addr.ext_addr != *data++) {
797 			return;
798 		}
799 	}
800 
801 	if ((*data & ISOTP_PCI_TYPE_MASK) != ISOTP_PCI_TYPE_FC) {
802 		LOG_ERR("Got unexpected PDU expected FC");
803 		send_report_error(sctx, ISOTP_N_UNEXP_PDU);
804 		return;
805 	}
806 
807 #ifdef CONFIG_ISOTP_REQUIRE_RX_PADDING
808 	/* AUTOSAR requirement SWS_CanTp_00349 */
809 	if (frame->dlc < ISOTP_PADDED_FRAME_DL_MIN) {
810 		LOG_ERR("FC DL invalid. Ignore");
811 		send_report_error(sctx, ISOTP_N_ERROR);
812 		return;
813 	}
814 #endif
815 
816 	switch (*data++ & ISOTP_PCI_FS_MASK) {
817 	case ISOTP_PCI_FS_CTS:
818 		sctx->state = ISOTP_TX_SEND_CF;
819 		sctx->wft = 0;
820 		sctx->tx_backlog = 0;
821 		k_sem_reset(&sctx->tx_sem);
822 		sctx->opts.bs = *data++;
823 		sctx->opts.stmin = *data++;
824 		sctx->bs = sctx->opts.bs;
825 		LOG_DBG("Got CTS. BS: %d, STmin: %d", sctx->opts.bs,
826 			sctx->opts.stmin);
827 		break;
828 
829 	case ISOTP_PCI_FS_WAIT:
830 		LOG_DBG("Got WAIT frame");
831 		k_timer_start(&sctx->timer, K_MSEC(ISOTP_BS_TIMEOUT_MS), K_NO_WAIT);
832 		if (sctx->wft >= CONFIG_ISOTP_WFTMAX) {
833 			LOG_INF("Got to many wait frames");
834 			send_report_error(sctx, ISOTP_N_WFT_OVRN);
835 		}
836 
837 		sctx->wft++;
838 		break;
839 
840 	case ISOTP_PCI_FS_OVFLW:
841 		LOG_ERR("Got overflow FC frame");
842 		send_report_error(sctx, ISOTP_N_BUFFER_OVERFLW);
843 		break;
844 
845 	default:
846 		send_report_error(sctx, ISOTP_N_INVALID_FS);
847 	}
848 }
849 
send_can_rx_cb(const struct device * dev,struct can_frame * frame,void * arg)850 static void send_can_rx_cb(const struct device *dev, struct can_frame *frame, void *arg)
851 {
852 	struct isotp_send_ctx *sctx = (struct isotp_send_ctx *)arg;
853 
854 	ARG_UNUSED(dev);
855 
856 	if (IS_ENABLED(CONFIG_CAN_ACCEPT_RTR) && (frame->flags & CAN_FRAME_RTR) != 0U) {
857 		return;
858 	}
859 
860 	if (sctx->state == ISOTP_TX_WAIT_FC) {
861 		k_timer_stop(&sctx->timer);
862 		send_process_fc(sctx, frame);
863 	} else {
864 		LOG_ERR("Got unexpected PDU");
865 		send_report_error(sctx, ISOTP_N_UNEXP_PDU);
866 	}
867 
868 	k_work_submit(&sctx->work);
869 }
870 
get_send_ctx_data_len(struct isotp_send_ctx * sctx)871 static size_t get_send_ctx_data_len(struct isotp_send_ctx *sctx)
872 {
873 	return sctx->is_net_buf ? net_buf_frags_len(sctx->buf) : sctx->len;
874 }
875 
get_send_ctx_data(struct isotp_send_ctx * sctx)876 static const uint8_t *get_send_ctx_data(struct isotp_send_ctx *sctx)
877 {
878 	if (sctx->is_net_buf) {
879 		return sctx->buf->data;
880 	} else {
881 		return sctx->data;
882 	}
883 }
884 
pull_send_ctx_data(struct isotp_send_ctx * sctx,size_t len)885 static void pull_send_ctx_data(struct isotp_send_ctx *sctx, size_t len)
886 {
887 	if (sctx->is_net_buf) {
888 		net_buf_pull_mem(sctx->buf, len);
889 	} else {
890 		sctx->data += len;
891 		sctx->len -= len;
892 	}
893 }
894 
send_sf(struct isotp_send_ctx * sctx)895 static inline int send_sf(struct isotp_send_ctx *sctx)
896 {
897 	struct can_frame frame;
898 	size_t len = get_send_ctx_data_len(sctx);
899 	int index = 0;
900 	int ret;
901 	const uint8_t *data;
902 
903 	prepare_frame(&frame, &sctx->tx_addr);
904 
905 	data = get_send_ctx_data(sctx);
906 	pull_send_ctx_data(sctx, len);
907 
908 	if ((sctx->tx_addr.flags & ISOTP_MSG_EXT_ADDR) != 0) {
909 		frame.data[index++] = sctx->tx_addr.ext_addr;
910 	}
911 
912 	if (IS_ENABLED(CONFIG_CAN_FD_MODE) && (sctx->tx_addr.flags & ISOTP_MSG_FDF) != 0 &&
913 	    len > ISOTP_4BIT_SF_MAX_CAN_DL - 1 - index) {
914 		frame.data[index++] = ISOTP_PCI_TYPE_SF;
915 		frame.data[index++] = len;
916 	} else {
917 		frame.data[index++] = ISOTP_PCI_TYPE_SF | len;
918 	}
919 
920 	if (len > sctx->tx_addr.dl - index) {
921 		LOG_ERR("SF len does not fit DL");
922 		return -ENOSPC;
923 	}
924 
925 	memcpy(&frame.data[index], data, len);
926 
927 	if (IS_ENABLED(CONFIG_ISOTP_ENABLE_TX_PADDING) ||
928 	    (IS_ENABLED(CONFIG_CAN_FD_MODE) && (sctx->tx_addr.flags & ISOTP_MSG_FDF) != 0 &&
929 	     len + index > ISOTP_PADDED_FRAME_DL_MIN)) {
930 		/* AUTOSAR requirements SWS_CanTp_00348 / SWS_CanTp_00351.
931 		 * Mandatory for ISO-TP CAN FD frames > 8 bytes.
932 		 */
933 		frame.dlc = can_bytes_to_dlc(
934 			MAX(ISOTP_PADDED_FRAME_DL_MIN, len + index));
935 		memset(&frame.data[index + len], ISOTP_PAD_BYTE,
936 		       can_dlc_to_bytes(frame.dlc) - len - index);
937 	} else {
938 		frame.dlc = can_bytes_to_dlc(len + index);
939 	}
940 
941 	sctx->state = ISOTP_TX_SEND_SF;
942 	ret = can_send(sctx->can_dev, &frame, K_MSEC(ISOTP_A_TIMEOUT_MS), send_can_tx_cb, sctx);
943 	return ret;
944 }
945 
send_ff(struct isotp_send_ctx * sctx)946 static inline int send_ff(struct isotp_send_ctx *sctx)
947 {
948 	struct can_frame frame;
949 	int index = 0;
950 	size_t len = get_send_ctx_data_len(sctx);
951 	int ret;
952 	const uint8_t *data;
953 
954 	prepare_frame(&frame, &sctx->tx_addr);
955 
956 	frame.dlc = can_bytes_to_dlc(sctx->tx_addr.dl);
957 
958 	if ((sctx->tx_addr.flags & ISOTP_MSG_EXT_ADDR) != 0) {
959 		frame.data[index++] = sctx->tx_addr.ext_addr;
960 	}
961 
962 	if (len > 0xFFF) {
963 		frame.data[index++] = ISOTP_PCI_TYPE_FF;
964 		frame.data[index++] = 0;
965 		frame.data[index++] = (len >> 3 * 8) & 0xFF;
966 		frame.data[index++] = (len >> 2 * 8) & 0xFF;
967 		frame.data[index++] = (len >>   8) & 0xFF;
968 		frame.data[index++] = len & 0xFF;
969 	} else {
970 		frame.data[index++] = ISOTP_PCI_TYPE_FF | (len >> 8);
971 		frame.data[index++] = len & 0xFF;
972 	}
973 
974 	/* According to ISO FF has sn 0 and is incremented to one
975 	 * although it's not part of the FF frame
976 	 */
977 	sctx->sn = 1;
978 	data = get_send_ctx_data(sctx);
979 	pull_send_ctx_data(sctx, sctx->tx_addr.dl - index);
980 	memcpy(&frame.data[index], data, sctx->tx_addr.dl - index);
981 
982 	ret = can_send(sctx->can_dev, &frame, K_MSEC(ISOTP_A_TIMEOUT_MS), send_can_tx_cb, sctx);
983 	return ret;
984 }
985 
send_cf(struct isotp_send_ctx * sctx)986 static inline int send_cf(struct isotp_send_ctx *sctx)
987 {
988 	struct can_frame frame;
989 	int index = 0;
990 	int ret;
991 	int len;
992 	int rem_len;
993 	const uint8_t *data;
994 
995 	prepare_frame(&frame, &sctx->tx_addr);
996 
997 	if ((sctx->tx_addr.flags & ISOTP_MSG_EXT_ADDR) != 0) {
998 		frame.data[index++] = sctx->tx_addr.ext_addr;
999 	}
1000 
1001 	/*sn wraps around at 0xF automatically because it has a 4 bit size*/
1002 	frame.data[index++] = ISOTP_PCI_TYPE_CF | sctx->sn;
1003 
1004 	rem_len = get_send_ctx_data_len(sctx);
1005 	len = MIN(rem_len, sctx->tx_addr.dl - index);
1006 	rem_len -= len;
1007 	data = get_send_ctx_data(sctx);
1008 	memcpy(&frame.data[index], data, len);
1009 
1010 	if (IS_ENABLED(CONFIG_ISOTP_ENABLE_TX_PADDING) ||
1011 	    (IS_ENABLED(CONFIG_CAN_FD_MODE) && (sctx->tx_addr.flags & ISOTP_MSG_FDF) != 0 &&
1012 	     len + index > ISOTP_PADDED_FRAME_DL_MIN)) {
1013 		/* AUTOSAR requirements SWS_CanTp_00348 / SWS_CanTp_00351.
1014 		 * Mandatory for ISO-TP CAN FD frames > 8 bytes.
1015 		 */
1016 		frame.dlc = can_bytes_to_dlc(
1017 			MAX(ISOTP_PADDED_FRAME_DL_MIN, len + index));
1018 		memset(&frame.data[index + len], ISOTP_PAD_BYTE,
1019 		       can_dlc_to_bytes(frame.dlc) - len - index);
1020 	} else {
1021 		frame.dlc = can_bytes_to_dlc(len + index);
1022 	}
1023 
1024 	ret = can_send(sctx->can_dev, &frame, K_MSEC(ISOTP_A_TIMEOUT_MS), send_can_tx_cb, sctx);
1025 	if (ret == 0) {
1026 		sctx->sn++;
1027 		pull_send_ctx_data(sctx, len);
1028 		sctx->bs--;
1029 		sctx->tx_backlog++;
1030 	}
1031 
1032 	ret = ret ? ret : rem_len;
1033 	return ret;
1034 }
1035 
1036 #ifdef CONFIG_ISOTP_ENABLE_CONTEXT_BUFFERS
free_send_ctx(struct isotp_send_ctx ** sctx)1037 static inline void free_send_ctx(struct isotp_send_ctx **sctx)
1038 {
1039 	if ((*sctx)->is_net_buf) {
1040 		net_buf_unref((*sctx)->buf);
1041 		(*sctx)->buf = NULL;
1042 	}
1043 
1044 	if ((*sctx)->is_ctx_slab) {
1045 		k_mem_slab_free(&ctx_slab, (void *)*sctx);
1046 	}
1047 }
1048 
alloc_send_ctx(struct isotp_send_ctx ** sctx,k_timeout_t timeout)1049 static int alloc_send_ctx(struct isotp_send_ctx **sctx, k_timeout_t timeout)
1050 {
1051 	int ret;
1052 
1053 	ret = k_mem_slab_alloc(&ctx_slab, (void **)sctx, timeout);
1054 	if (ret) {
1055 		return ISOTP_NO_CTX_LEFT;
1056 	}
1057 
1058 	(*sctx)->is_ctx_slab = 1;
1059 
1060 	return 0;
1061 }
1062 #else
1063 #define free_send_ctx(x)
1064 #endif /*CONFIG_ISOTP_ENABLE_CONTEXT_BUFFERS*/
1065 
stmin_to_timeout(uint8_t stmin)1066 static k_timeout_t stmin_to_timeout(uint8_t stmin)
1067 {
1068 	/* According to ISO 15765-2 stmin should be 127ms if value is corrupt */
1069 	if (stmin > ISOTP_STMIN_MAX ||
1070 	    (stmin > ISOTP_STMIN_MS_MAX && stmin < ISOTP_STMIN_US_BEGIN)) {
1071 		return K_MSEC(ISOTP_STMIN_MS_MAX);
1072 	}
1073 
1074 	if (stmin >= ISOTP_STMIN_US_BEGIN) {
1075 		return K_USEC((stmin + 1 - ISOTP_STMIN_US_BEGIN) * 100U);
1076 	}
1077 
1078 	return K_MSEC(stmin);
1079 }
1080 
send_state_machine(struct isotp_send_ctx * sctx)1081 static void send_state_machine(struct isotp_send_ctx *sctx)
1082 {
1083 	int ret;
1084 
1085 	switch (sctx->state) {
1086 
1087 	case ISOTP_TX_SEND_FF:
1088 		send_ff(sctx);
1089 		k_timer_start(&sctx->timer, K_MSEC(ISOTP_BS_TIMEOUT_MS), K_NO_WAIT);
1090 		sctx->state = ISOTP_TX_WAIT_FC;
1091 		LOG_DBG("SM send FF");
1092 		break;
1093 
1094 	case ISOTP_TX_SEND_CF:
1095 		LOG_DBG("SM send CF");
1096 		k_timer_stop(&sctx->timer);
1097 		do {
1098 			ret = send_cf(sctx);
1099 			if (!ret) {
1100 				sctx->state = ISOTP_TX_WAIT_BACKLOG;
1101 				break;
1102 			}
1103 
1104 			if (ret < 0) {
1105 				LOG_ERR("Failed to send CF");
1106 				send_report_error(sctx, ret == -EAGAIN ?
1107 						ISOTP_N_TIMEOUT_A :
1108 						ISOTP_N_ERROR);
1109 				break;
1110 			}
1111 
1112 			if (sctx->opts.bs && !sctx->bs) {
1113 				k_timer_start(&sctx->timer, K_MSEC(ISOTP_BS_TIMEOUT_MS), K_NO_WAIT);
1114 				sctx->state = ISOTP_TX_WAIT_FC;
1115 				LOG_DBG("BS reached. Wait for FC again");
1116 				break;
1117 			} else if (sctx->opts.stmin) {
1118 				sctx->state = ISOTP_TX_WAIT_ST;
1119 				break;
1120 			}
1121 
1122 			/* Ensure FIFO style transmission of CF */
1123 			k_sem_take(&sctx->tx_sem, K_FOREVER);
1124 		} while (ret > 0);
1125 
1126 		break;
1127 
1128 	case ISOTP_TX_WAIT_ST:
1129 		k_timer_start(&sctx->timer, stmin_to_timeout(sctx->opts.stmin), K_NO_WAIT);
1130 		sctx->state = ISOTP_TX_SEND_CF;
1131 		LOG_DBG("SM wait ST");
1132 		break;
1133 
1134 	case ISOTP_TX_ERR:
1135 		LOG_DBG("SM error");
1136 		__fallthrough;
1137 	case ISOTP_TX_SEND_SF:
1138 		__fallthrough;
1139 	case ISOTP_TX_WAIT_FIN:
1140 		if (sctx->filter_id >= 0) {
1141 			can_remove_rx_filter(sctx->can_dev, sctx->filter_id);
1142 		}
1143 
1144 		LOG_DBG("SM finish");
1145 		k_timer_stop(&sctx->timer);
1146 
1147 		if (sctx->has_callback) {
1148 			sctx->fin_cb.cb(sctx->error_nr, sctx->fin_cb.arg);
1149 			free_send_ctx(&sctx);
1150 		} else {
1151 			k_sem_give(&sctx->fin_sem);
1152 		}
1153 
1154 		sctx->state = ISOTP_TX_STATE_RESET;
1155 		break;
1156 
1157 	default:
1158 		break;
1159 	}
1160 }
1161 
send_work_handler(struct k_work * item)1162 static void send_work_handler(struct k_work *item)
1163 {
1164 	struct isotp_send_ctx *sctx = CONTAINER_OF(item, struct isotp_send_ctx, work);
1165 
1166 	send_state_machine(sctx);
1167 }
1168 
add_fc_filter(struct isotp_send_ctx * sctx)1169 static inline int add_fc_filter(struct isotp_send_ctx *sctx)
1170 {
1171 	struct can_filter filter;
1172 	uint32_t mask;
1173 
1174 	if ((sctx->rx_addr.flags & ISOTP_MSG_IDE) != 0) {
1175 		mask = CAN_EXT_ID_MASK;
1176 	} else {
1177 		mask = CAN_STD_ID_MASK;
1178 	}
1179 
1180 	prepare_filter(&filter, &sctx->rx_addr, mask);
1181 
1182 	sctx->filter_id = can_add_rx_filter(sctx->can_dev, send_can_rx_cb, sctx,
1183 					   &filter);
1184 	if (sctx->filter_id < 0) {
1185 		LOG_ERR("Error adding FC filter [%d]", sctx->filter_id);
1186 		return ISOTP_NO_FREE_FILTER;
1187 	}
1188 
1189 	return 0;
1190 }
1191 
send(struct isotp_send_ctx * sctx,const struct device * can_dev,const struct isotp_msg_id * tx_addr,const struct isotp_msg_id * rx_addr,isotp_tx_callback_t complete_cb,void * cb_arg)1192 static int send(struct isotp_send_ctx *sctx, const struct device *can_dev,
1193 		const struct isotp_msg_id *tx_addr,
1194 		const struct isotp_msg_id *rx_addr,
1195 		isotp_tx_callback_t complete_cb, void *cb_arg)
1196 {
1197 	can_mode_t cap;
1198 	size_t len;
1199 	int ret;
1200 
1201 	__ASSERT_NO_MSG(sctx);
1202 	__ASSERT_NO_MSG(can_dev);
1203 	__ASSERT_NO_MSG(rx_addr && tx_addr);
1204 
1205 	if ((rx_addr->flags & ISOTP_MSG_FDF) != 0 || (tx_addr->flags & ISOTP_MSG_FDF) != 0) {
1206 		ret = can_get_capabilities(can_dev, &cap);
1207 		if (ret != 0 || (cap & CAN_MODE_FD) == 0) {
1208 			LOG_ERR("CAN controller does not support FD mode");
1209 			return ISOTP_N_ERROR;
1210 		}
1211 	}
1212 
1213 	if (complete_cb) {
1214 		sctx->fin_cb.cb = complete_cb;
1215 		sctx->fin_cb.arg = cb_arg;
1216 		sctx->has_callback = 1;
1217 	} else {
1218 		k_sem_init(&sctx->fin_sem, 0, 1);
1219 		sctx->has_callback = 0;
1220 	}
1221 
1222 	k_sem_init(&sctx->tx_sem, 0, 1);
1223 	sctx->can_dev = can_dev;
1224 	sctx->tx_addr = *tx_addr;
1225 	sctx->rx_addr = *rx_addr;
1226 	sctx->error_nr = ISOTP_N_OK;
1227 	sctx->wft = 0;
1228 	k_work_init(&sctx->work, send_work_handler);
1229 	k_timer_init(&sctx->timer, send_timeout_handler, NULL);
1230 
1231 	switch (sctx->tx_addr.dl) {
1232 	case 0:
1233 		if ((sctx->tx_addr.flags & ISOTP_MSG_FDF) == 0) {
1234 			sctx->tx_addr.dl = 8;
1235 		} else {
1236 			sctx->tx_addr.dl = 64;
1237 		}
1238 		__fallthrough;
1239 	case 8:
1240 		break;
1241 	case 12:
1242 	case 16:
1243 	case 20:
1244 	case 24:
1245 	case 32:
1246 	case 48:
1247 	case 64:
1248 		if ((sctx->tx_addr.flags & ISOTP_MSG_FDF) == 0) {
1249 			LOG_ERR("TX_DL > 8 only supported with FD mode");
1250 			return ISOTP_N_ERROR;
1251 		}
1252 		break;
1253 	default:
1254 		LOG_ERR("Invalid TX_DL: %u", sctx->tx_addr.dl);
1255 		return ISOTP_N_ERROR;
1256 	}
1257 
1258 	len = get_send_ctx_data_len(sctx);
1259 	LOG_DBG("Send %zu bytes to addr 0x%x and listen on 0x%x", len,
1260 		sctx->tx_addr.ext_id, sctx->rx_addr.ext_id);
1261 	/* Single frames > 8 bytes use an additional byte for length (CAN FD only) */
1262 	if (len > sctx->tx_addr.dl - (((tx_addr->flags & ISOTP_MSG_EXT_ADDR) != 0) ? 2 : 1) -
1263 			  ((sctx->tx_addr.dl > ISOTP_4BIT_SF_MAX_CAN_DL) ? 1 : 0)) {
1264 		ret = add_fc_filter(sctx);
1265 		if (ret) {
1266 			LOG_ERR("Can't add fc filter: %d", ret);
1267 			free_send_ctx(&sctx);
1268 			return ret;
1269 		}
1270 
1271 		LOG_DBG("Starting work to send FF");
1272 		sctx->state = ISOTP_TX_SEND_FF;
1273 		k_work_submit(&sctx->work);
1274 	} else {
1275 		LOG_DBG("Sending single frame");
1276 		sctx->filter_id = -1;
1277 		ret = send_sf(sctx);
1278 		if (ret) {
1279 			free_send_ctx(&sctx);
1280 			return ret == -EAGAIN ?
1281 			       ISOTP_N_TIMEOUT_A : ISOTP_N_ERROR;
1282 		}
1283 	}
1284 
1285 	if (!complete_cb) {
1286 		k_sem_take(&sctx->fin_sem, K_FOREVER);
1287 		ret = sctx->error_nr;
1288 		free_send_ctx(&sctx);
1289 		return ret;
1290 	}
1291 
1292 	return ISOTP_N_OK;
1293 }
1294 
isotp_send(struct isotp_send_ctx * sctx,const struct device * can_dev,const uint8_t * data,size_t len,const struct isotp_msg_id * tx_addr,const struct isotp_msg_id * rx_addr,isotp_tx_callback_t complete_cb,void * cb_arg)1295 int isotp_send(struct isotp_send_ctx *sctx, const struct device *can_dev,
1296 	       const uint8_t *data, size_t len,
1297 	       const struct isotp_msg_id *tx_addr,
1298 	       const struct isotp_msg_id *rx_addr,
1299 	       isotp_tx_callback_t complete_cb, void *cb_arg)
1300 {
1301 	sctx->data = data;
1302 	sctx->len = len;
1303 	sctx->is_ctx_slab = 0;
1304 	sctx->is_net_buf = 0;
1305 
1306 	return send(sctx, can_dev, tx_addr, rx_addr, complete_cb, cb_arg);
1307 }
1308 
1309 #ifdef CONFIG_ISOTP_ENABLE_CONTEXT_BUFFERS
1310 
isotp_send_ctx_buf(const struct device * can_dev,const uint8_t * data,size_t len,const struct isotp_msg_id * tx_addr,const struct isotp_msg_id * rx_addr,isotp_tx_callback_t complete_cb,void * cb_arg,k_timeout_t timeout)1311 int isotp_send_ctx_buf(const struct device *can_dev,
1312 		       const uint8_t *data, size_t len,
1313 		       const struct isotp_msg_id *tx_addr,
1314 		       const struct isotp_msg_id *rx_addr,
1315 		       isotp_tx_callback_t complete_cb, void *cb_arg,
1316 		       k_timeout_t timeout)
1317 {
1318 	struct isotp_send_ctx *sctx;
1319 	int ret;
1320 
1321 	__ASSERT_NO_MSG(data);
1322 
1323 	ret = alloc_send_ctx(&sctx, timeout);
1324 	if (ret) {
1325 		return ret;
1326 	}
1327 
1328 	sctx->data = data;
1329 	sctx->len = len;
1330 	sctx->is_net_buf = 0;
1331 
1332 	return send(sctx, can_dev, tx_addr, rx_addr, complete_cb, cb_arg);
1333 }
1334 
isotp_send_net_ctx_buf(const struct device * can_dev,struct net_buf * data,const struct isotp_msg_id * tx_addr,const struct isotp_msg_id * rx_addr,isotp_tx_callback_t complete_cb,void * cb_arg,k_timeout_t timeout)1335 int isotp_send_net_ctx_buf(const struct device *can_dev,
1336 			   struct net_buf *data,
1337 			   const struct isotp_msg_id *tx_addr,
1338 			   const struct isotp_msg_id *rx_addr,
1339 			   isotp_tx_callback_t complete_cb, void *cb_arg,
1340 			   k_timeout_t timeout)
1341 {
1342 	struct isotp_send_ctx *sctx;
1343 	int ret;
1344 
1345 	__ASSERT_NO_MSG(data);
1346 
1347 	ret = alloc_send_ctx(&sctx, timeout);
1348 	if (ret) {
1349 		return ret;
1350 	}
1351 
1352 	sctx->is_net_buf = 1;
1353 	sctx->buf = data;
1354 
1355 	return send(sctx, can_dev, tx_addr, rx_addr, complete_cb, cb_arg);
1356 }
1357 
1358 #ifdef CONFIG_ISOTP_USE_TX_BUF
isotp_send_buf(const struct device * can_dev,const uint8_t * data,size_t len,const struct isotp_msg_id * tx_addr,const struct isotp_msg_id * rx_addr,isotp_tx_callback_t complete_cb,void * cb_arg,k_timeout_t timeout)1359 int isotp_send_buf(const struct device *can_dev,
1360 		   const uint8_t *data, size_t len,
1361 		   const struct isotp_msg_id *tx_addr,
1362 		   const struct isotp_msg_id *rx_addr,
1363 		   isotp_tx_callback_t complete_cb, void *cb_arg,
1364 		   k_timeout_t timeout)
1365 {
1366 	struct isotp_send_ctx *sctx;
1367 	struct net_buf *buf;
1368 	int ret;
1369 
1370 	__ASSERT_NO_MSG(data);
1371 
1372 	ret = alloc_send_ctx(&sctx, timeout);
1373 	if (ret) {
1374 		return ret;
1375 	}
1376 
1377 	buf = net_buf_alloc_len(&isotp_tx_pool, len, timeout);
1378 	if (!buf) {
1379 		k_mem_slab_free(&ctx_slab, (void *)sctx);
1380 		return ISOTP_NO_BUF_DATA_LEFT;
1381 	}
1382 
1383 	net_buf_add_mem(buf, data, len);
1384 
1385 	sctx->is_net_buf = 1;
1386 	sctx->buf = buf;
1387 
1388 	return send(sctx, can_dev, tx_addr, rx_addr, complete_cb, cb_arg);
1389 }
1390 #endif  /*CONFIG_ISOTP_USE_TX_BUF*/
1391 #endif  /*CONFIG_ISOTP_ENABLE_CONTEXT_BUFFERS*/
1392