1 /*
2  * Copyright (c) 2022-2023 Vestas Wind Systems A/S
3  * Copyright (c) 2020 Alexander Wachter
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <zephyr/drivers/can.h>
9 #include <zephyr/drivers/can/can_mcan.h>
10 #include <zephyr/drivers/can/transceiver.h>
11 #include <zephyr/kernel.h>
12 #include <zephyr/logging/log.h>
13 #include <zephyr/sys/sys_io.h>
14 #include <zephyr/sys/util.h>
15 
16 LOG_MODULE_REGISTER(can_mcan, CONFIG_CAN_LOG_LEVEL);
17 
18 #define CAN_INIT_TIMEOUT_MS 100
19 
can_mcan_read_reg(const struct device * dev,uint16_t reg,uint32_t * val)20 int can_mcan_read_reg(const struct device *dev, uint16_t reg, uint32_t *val)
21 {
22 	const struct can_mcan_config *config = dev->config;
23 	int err;
24 
25 	err = config->ops->read_reg(dev, reg, val);
26 	if (err != 0) {
27 		LOG_ERR("failed to read reg 0x%03x (err %d)", reg, err);
28 	}
29 
30 	return err;
31 }
32 
can_mcan_write_reg(const struct device * dev,uint16_t reg,uint32_t val)33 int can_mcan_write_reg(const struct device *dev, uint16_t reg, uint32_t val)
34 {
35 	const struct can_mcan_config *config = dev->config;
36 	int err;
37 
38 	err = config->ops->write_reg(dev, reg, val);
39 	if (err != 0) {
40 		LOG_ERR("failed to write reg 0x%03x (err %d)", reg, err);
41 	}
42 
43 	return err;
44 }
45 
can_mcan_exit_sleep_mode(const struct device * dev)46 static int can_mcan_exit_sleep_mode(const struct device *dev)
47 {
48 	struct can_mcan_data *data = dev->data;
49 	uint32_t start_time;
50 	uint32_t cccr;
51 	int err;
52 
53 	k_mutex_lock(&data->lock, K_FOREVER);
54 
55 	err = can_mcan_read_reg(dev, CAN_MCAN_CCCR, &cccr);
56 	if (err != 0) {
57 		goto unlock;
58 	}
59 
60 	cccr &= ~CAN_MCAN_CCCR_CSR;
61 
62 	err = can_mcan_write_reg(dev, CAN_MCAN_CCCR, cccr);
63 	if (err != 0) {
64 		goto unlock;
65 	}
66 
67 	start_time = k_cycle_get_32();
68 
69 	err = can_mcan_read_reg(dev, CAN_MCAN_CCCR, &cccr);
70 	if (err != 0) {
71 		goto unlock;
72 	}
73 
74 	while ((cccr & CAN_MCAN_CCCR_CSA) == CAN_MCAN_CCCR_CSA) {
75 		if (k_cycle_get_32() - start_time > k_ms_to_cyc_ceil32(CAN_INIT_TIMEOUT_MS)) {
76 			cccr |= CAN_MCAN_CCCR_CSR;
77 			err = can_mcan_write_reg(dev, CAN_MCAN_CCCR, cccr);
78 			if (err != 0) {
79 				goto unlock;
80 			}
81 
82 			err = -EAGAIN;
83 			goto unlock;
84 		}
85 
86 		err = can_mcan_read_reg(dev, CAN_MCAN_CCCR, &cccr);
87 		if (err != 0) {
88 			goto unlock;
89 		}
90 	}
91 
92 
93 unlock:
94 	k_mutex_unlock(&data->lock);
95 
96 	return err;
97 }
98 
can_mcan_enter_init_mode(const struct device * dev,k_timeout_t timeout)99 static int can_mcan_enter_init_mode(const struct device *dev, k_timeout_t timeout)
100 {
101 	struct can_mcan_data *data = dev->data;
102 	int64_t start_time;
103 	uint32_t cccr;
104 	int err;
105 
106 	k_mutex_lock(&data->lock, K_FOREVER);
107 
108 	err = can_mcan_read_reg(dev, CAN_MCAN_CCCR, &cccr);
109 	if (err != 0) {
110 		goto unlock;
111 	}
112 
113 	cccr |= CAN_MCAN_CCCR_INIT;
114 
115 	err = can_mcan_write_reg(dev, CAN_MCAN_CCCR, cccr);
116 	if (err != 0) {
117 		goto unlock;
118 	}
119 
120 	start_time = k_uptime_ticks();
121 
122 	err = can_mcan_read_reg(dev, CAN_MCAN_CCCR, &cccr);
123 	if (err != 0) {
124 		goto unlock;
125 	}
126 
127 	while ((cccr & CAN_MCAN_CCCR_INIT) == 0U) {
128 		if (k_uptime_ticks() - start_time > timeout.ticks) {
129 			cccr &= ~CAN_MCAN_CCCR_INIT;
130 			err = can_mcan_write_reg(dev, CAN_MCAN_CCCR, cccr);
131 			if (err != 0) {
132 				goto unlock;
133 			}
134 
135 			err = -EAGAIN;
136 			goto unlock;
137 		}
138 
139 		err = can_mcan_read_reg(dev, CAN_MCAN_CCCR, &cccr);
140 		if (err != 0) {
141 			goto unlock;
142 		}
143 	}
144 
145 unlock:
146 	k_mutex_unlock(&data->lock);
147 
148 	return err;
149 }
150 
can_mcan_leave_init_mode(const struct device * dev,k_timeout_t timeout)151 static int can_mcan_leave_init_mode(const struct device *dev, k_timeout_t timeout)
152 {
153 	struct can_mcan_data *data = dev->data;
154 	int64_t start_time;
155 	uint32_t cccr;
156 	int err;
157 
158 	k_mutex_lock(&data->lock, K_FOREVER);
159 
160 	err = can_mcan_read_reg(dev, CAN_MCAN_CCCR, &cccr);
161 	if (err != 0) {
162 		goto unlock;
163 	}
164 
165 	cccr &= ~CAN_MCAN_CCCR_INIT;
166 
167 	err = can_mcan_write_reg(dev, CAN_MCAN_CCCR, cccr);
168 	if (err != 0) {
169 		goto unlock;
170 	}
171 
172 	start_time = k_uptime_ticks();
173 
174 	err = can_mcan_read_reg(dev, CAN_MCAN_CCCR, &cccr);
175 	if (err != 0) {
176 		goto unlock;
177 	}
178 
179 	while ((cccr & CAN_MCAN_CCCR_INIT) != 0U) {
180 		if (k_uptime_ticks() - start_time > timeout.ticks) {
181 			err = -EAGAIN;
182 			goto unlock;
183 		}
184 
185 		err = can_mcan_read_reg(dev, CAN_MCAN_CCCR, &cccr);
186 		if (err != 0) {
187 			goto unlock;
188 		}
189 	}
190 
191 unlock:
192 	k_mutex_unlock(&data->lock);
193 
194 	return err;
195 }
196 
can_mcan_set_timing(const struct device * dev,const struct can_timing * timing)197 int can_mcan_set_timing(const struct device *dev, const struct can_timing *timing)
198 {
199 	struct can_mcan_data *data = dev->data;
200 	uint32_t nbtp = 0U;
201 	int err;
202 
203 	if (data->started) {
204 		return -EBUSY;
205 	}
206 
207 	k_mutex_lock(&data->lock, K_FOREVER);
208 
209 	nbtp |= FIELD_PREP(CAN_MCAN_NBTP_NSJW, timing->sjw - 1UL) |
210 		FIELD_PREP(CAN_MCAN_NBTP_NTSEG1, timing->phase_seg1 - 1UL) |
211 		FIELD_PREP(CAN_MCAN_NBTP_NTSEG2, timing->phase_seg2 - 1UL) |
212 		FIELD_PREP(CAN_MCAN_NBTP_NBRP, timing->prescaler - 1UL);
213 
214 	err = can_mcan_write_reg(dev, CAN_MCAN_NBTP, nbtp);
215 	if (err != 0) {
216 		goto unlock;
217 	}
218 
219 unlock:
220 	k_mutex_unlock(&data->lock);
221 
222 	return err;
223 }
224 
225 #ifdef CONFIG_CAN_FD_MODE
can_mcan_set_timing_data(const struct device * dev,const struct can_timing * timing_data)226 int can_mcan_set_timing_data(const struct device *dev, const struct can_timing *timing_data)
227 {
228 	struct can_mcan_data *data = dev->data;
229 	uint32_t dbtp = 0U;
230 	int err;
231 
232 	if (data->started) {
233 		return -EBUSY;
234 	}
235 
236 	k_mutex_lock(&data->lock, K_FOREVER);
237 
238 	dbtp |= FIELD_PREP(CAN_MCAN_DBTP_DSJW, timing_data->sjw - 1UL) |
239 		FIELD_PREP(CAN_MCAN_DBTP_DTSEG1, timing_data->phase_seg1 - 1UL) |
240 		FIELD_PREP(CAN_MCAN_DBTP_DTSEG2, timing_data->phase_seg2 - 1UL) |
241 		FIELD_PREP(CAN_MCAN_DBTP_DBRP, timing_data->prescaler - 1UL);
242 
243 	err = can_mcan_write_reg(dev, CAN_MCAN_DBTP, dbtp);
244 	if (err != 0) {
245 		goto unlock;
246 	}
247 
248 unlock:
249 	k_mutex_unlock(&data->lock);
250 
251 	return err;
252 }
253 #endif /* CONFIG_CAN_FD_MODE */
254 
can_mcan_get_capabilities(const struct device * dev,can_mode_t * cap)255 int can_mcan_get_capabilities(const struct device *dev, can_mode_t *cap)
256 {
257 	ARG_UNUSED(dev);
258 
259 	*cap = CAN_MODE_NORMAL | CAN_MODE_LOOPBACK | CAN_MODE_LISTENONLY;
260 
261 #if CONFIG_CAN_FD_MODE
262 	*cap |= CAN_MODE_FD;
263 #endif /* CONFIG_CAN_FD_MODE */
264 
265 	return 0;
266 }
267 
can_mcan_start(const struct device * dev)268 int can_mcan_start(const struct device *dev)
269 {
270 	const struct can_mcan_config *config = dev->config;
271 	struct can_mcan_data *data = dev->data;
272 	int err;
273 
274 	if (data->started) {
275 		return -EALREADY;
276 	}
277 
278 	if (config->phy != NULL) {
279 		err = can_transceiver_enable(config->phy);
280 		if (err != 0) {
281 			LOG_ERR("failed to enable CAN transceiver (err %d)", err);
282 			return err;
283 		}
284 	}
285 
286 	/* Reset statistics */
287 	CAN_STATS_RESET(dev);
288 
289 	err = can_mcan_leave_init_mode(dev, K_MSEC(CAN_INIT_TIMEOUT_MS));
290 	if (err != 0) {
291 		LOG_ERR("failed to leave init mode");
292 
293 		if (config->phy != NULL) {
294 			/* Attempt to disable the CAN transceiver in case of error */
295 			(void)can_transceiver_disable(config->phy);
296 		}
297 
298 		return -EIO;
299 	}
300 
301 	data->started = true;
302 
303 	return 0;
304 }
305 
can_mcan_stop(const struct device * dev)306 int can_mcan_stop(const struct device *dev)
307 {
308 	const struct can_mcan_config *config = dev->config;
309 	const struct can_mcan_callbacks *cbs = config->callbacks;
310 	struct can_mcan_data *data = dev->data;
311 	can_tx_callback_t tx_cb;
312 	uint32_t tx_idx;
313 	int err;
314 
315 	if (!data->started) {
316 		return -EALREADY;
317 	}
318 
319 	/* CAN transmissions are automatically stopped when entering init mode */
320 	err = can_mcan_enter_init_mode(dev, K_MSEC(CAN_INIT_TIMEOUT_MS));
321 	if (err != 0) {
322 		LOG_ERR("Failed to enter init mode");
323 		return -EIO;
324 	}
325 
326 	if (config->phy != NULL) {
327 		err = can_transceiver_disable(config->phy);
328 		if (err != 0) {
329 			LOG_ERR("failed to disable CAN transceiver (err %d)", err);
330 			return err;
331 		}
332 	}
333 
334 	can_mcan_enable_configuration_change(dev);
335 
336 	data->started = false;
337 
338 	for (tx_idx = 0U; tx_idx < cbs->num_tx; tx_idx++) {
339 		tx_cb = cbs->tx[tx_idx].function;
340 
341 		if (tx_cb != NULL) {
342 			cbs->tx[tx_idx].function = NULL;
343 			tx_cb(dev, -ENETDOWN, cbs->tx[tx_idx].user_data);
344 			k_sem_give(&data->tx_sem);
345 		}
346 	}
347 
348 	return 0;
349 }
350 
can_mcan_set_mode(const struct device * dev,can_mode_t mode)351 int can_mcan_set_mode(const struct device *dev, can_mode_t mode)
352 {
353 	struct can_mcan_data *data = dev->data;
354 	uint32_t cccr;
355 	uint32_t test;
356 	int err;
357 
358 #ifdef CONFIG_CAN_FD_MODE
359 	if ((mode & ~(CAN_MODE_LOOPBACK | CAN_MODE_LISTENONLY | CAN_MODE_FD)) != 0U) {
360 		LOG_ERR("unsupported mode: 0x%08x", mode);
361 		return -ENOTSUP;
362 	}
363 #else  /* CONFIG_CAN_FD_MODE */
364 	if ((mode & ~(CAN_MODE_LOOPBACK | CAN_MODE_LISTENONLY)) != 0U) {
365 		LOG_ERR("unsupported mode: 0x%08x", mode);
366 		return -ENOTSUP;
367 	}
368 #endif /* !CONFIG_CAN_FD_MODE */
369 
370 	if (data->started) {
371 		return -EBUSY;
372 	}
373 
374 	k_mutex_lock(&data->lock, K_FOREVER);
375 
376 	err = can_mcan_read_reg(dev, CAN_MCAN_CCCR, &cccr);
377 	if (err != 0) {
378 		goto unlock;
379 	}
380 
381 	err = can_mcan_read_reg(dev, CAN_MCAN_TEST, &test);
382 	if (err != 0) {
383 		goto unlock;
384 	}
385 
386 	if ((mode & CAN_MODE_LOOPBACK) != 0) {
387 		/* Loopback mode */
388 		cccr |= CAN_MCAN_CCCR_TEST;
389 		test |= CAN_MCAN_TEST_LBCK;
390 	} else {
391 		cccr &= ~CAN_MCAN_CCCR_TEST;
392 	}
393 
394 	if ((mode & CAN_MODE_LISTENONLY) != 0) {
395 		/* Bus monitoring mode */
396 		cccr |= CAN_MCAN_CCCR_MON;
397 	} else {
398 		cccr &= ~CAN_MCAN_CCCR_MON;
399 	}
400 
401 #ifdef CONFIG_CAN_FD_MODE
402 	if ((mode & CAN_MODE_FD) != 0) {
403 		cccr |= CAN_MCAN_CCCR_FDOE | CAN_MCAN_CCCR_BRSE;
404 		data->fd = true;
405 	} else {
406 		cccr &= ~(CAN_MCAN_CCCR_FDOE | CAN_MCAN_CCCR_BRSE);
407 		data->fd = false;
408 	}
409 #endif /* CONFIG_CAN_FD_MODE */
410 
411 	err = can_mcan_write_reg(dev, CAN_MCAN_CCCR, cccr);
412 	if (err != 0) {
413 		goto unlock;
414 	}
415 
416 	err = can_mcan_write_reg(dev, CAN_MCAN_TEST, test);
417 	if (err != 0) {
418 		goto unlock;
419 	}
420 
421 unlock:
422 	k_mutex_unlock(&data->lock);
423 
424 	return err;
425 }
426 
can_mcan_state_change_handler(const struct device * dev)427 static void can_mcan_state_change_handler(const struct device *dev)
428 {
429 	struct can_mcan_data *data = dev->data;
430 	const can_state_change_callback_t cb = data->state_change_cb;
431 	void *cb_data = data->state_change_cb_data;
432 	struct can_bus_err_cnt err_cnt;
433 	enum can_state state;
434 
435 	(void)can_mcan_get_state(dev, &state, &err_cnt);
436 
437 	if (cb != NULL) {
438 		cb(dev, state, err_cnt, cb_data);
439 	}
440 }
441 
can_mcan_tx_event_handler(const struct device * dev)442 static void can_mcan_tx_event_handler(const struct device *dev)
443 {
444 	const struct can_mcan_config *config = dev->config;
445 	const struct can_mcan_callbacks *cbs = config->callbacks;
446 	struct can_mcan_data *data = dev->data;
447 	struct can_mcan_tx_event_fifo tx_event;
448 	can_tx_callback_t tx_cb;
449 	void *user_data;
450 	uint32_t event_idx;
451 	uint32_t tx_idx;
452 	uint32_t txefs;
453 	int err;
454 
455 	err = can_mcan_read_reg(dev, CAN_MCAN_TXEFS, &txefs);
456 	if (err != 0) {
457 		return;
458 	}
459 
460 	while ((txefs & CAN_MCAN_TXEFS_EFFL) != 0U) {
461 		event_idx = FIELD_GET(CAN_MCAN_TXEFS_EFGI, txefs);
462 		err = can_mcan_read_mram(dev,
463 					 config->mram_offsets[CAN_MCAN_MRAM_CFG_TX_EVENT_FIFO] +
464 					 event_idx * sizeof(struct can_mcan_tx_event_fifo),
465 					 &tx_event,
466 					 sizeof(struct can_mcan_tx_event_fifo));
467 		if (err != 0) {
468 			LOG_ERR("failed to read tx event fifo (err %d)", err);
469 			return;
470 		}
471 
472 		tx_idx = tx_event.mm;
473 
474 		/* Acknowledge TX event */
475 		err = can_mcan_write_reg(dev, CAN_MCAN_TXEFA, event_idx);
476 		if (err != 0) {
477 			return;
478 		}
479 
480 		__ASSERT_NO_MSG(tx_idx <= cbs->num_tx);
481 		tx_cb = cbs->tx[tx_idx].function;
482 		user_data = cbs->tx[tx_idx].user_data;
483 		cbs->tx[tx_idx].function = NULL;
484 
485 		k_sem_give(&data->tx_sem);
486 
487 		tx_cb(dev, 0, user_data);
488 
489 		err = can_mcan_read_reg(dev, CAN_MCAN_TXEFS, &txefs);
490 		if (err != 0) {
491 			return;
492 		}
493 	}
494 }
495 
496 #ifdef CONFIG_CAN_STATS
can_mcan_lec_update_stats(const struct device * dev,enum can_mcan_psr_lec lec)497 static void can_mcan_lec_update_stats(const struct device *dev, enum can_mcan_psr_lec lec)
498 {
499 	switch (lec) {
500 	case CAN_MCAN_PSR_LEC_STUFF_ERROR:
501 		CAN_STATS_STUFF_ERROR_INC(dev);
502 		break;
503 	case CAN_MCAN_PSR_LEC_FORM_ERROR:
504 		CAN_STATS_FORM_ERROR_INC(dev);
505 		break;
506 	case CAN_MCAN_PSR_LEC_ACK_ERROR:
507 		CAN_STATS_ACK_ERROR_INC(dev);
508 		break;
509 	case CAN_MCAN_PSR_LEC_BIT1_ERROR:
510 		CAN_STATS_BIT1_ERROR_INC(dev);
511 		break;
512 	case CAN_MCAN_PSR_LEC_BIT0_ERROR:
513 		CAN_STATS_BIT0_ERROR_INC(dev);
514 		break;
515 	case CAN_MCAN_PSR_LEC_CRC_ERROR:
516 		CAN_STATS_CRC_ERROR_INC(dev);
517 		break;
518 	case CAN_MCAN_PSR_LEC_NO_ERROR:
519 	case CAN_MCAN_PSR_LEC_NO_CHANGE:
520 	default:
521 		break;
522 	}
523 }
524 #endif /* CONFIG_CAN_STATS */
525 
can_mcan_read_psr(const struct device * dev,uint32_t * val)526 static int can_mcan_read_psr(const struct device *dev, uint32_t *val)
527 {
528 	/* Reading the lower byte of the PSR register clears the protocol last
529 	 * error codes (LEC). To avoid missing errors, this function should be
530 	 * used whenever the PSR register is read.
531 	 */
532 	int err = can_mcan_read_reg(dev, CAN_MCAN_PSR, val);
533 
534 	if (err != 0) {
535 		return err;
536 	}
537 
538 #ifdef CONFIG_CAN_STATS
539 	enum can_mcan_psr_lec lec;
540 
541 	lec = FIELD_GET(CAN_MCAN_PSR_LEC, *val);
542 	can_mcan_lec_update_stats(dev, lec);
543 #ifdef CONFIG_CAN_FD_MODE
544 	lec = FIELD_GET(CAN_MCAN_PSR_DLEC, *val);
545 	can_mcan_lec_update_stats(dev, lec);
546 #endif
547 #endif /* CONFIG_CAN_STATS */
548 
549 	return 0;
550 }
551 
can_mcan_line_0_isr(const struct device * dev)552 void can_mcan_line_0_isr(const struct device *dev)
553 {
554 	const uint32_t events = CAN_MCAN_IR_BO | CAN_MCAN_IR_EP | CAN_MCAN_IR_EW |
555 				CAN_MCAN_IR_TEFN | CAN_MCAN_IR_TEFL | CAN_MCAN_IR_ARA |
556 				CAN_MCAN_IR_MRAF | CAN_MCAN_IR_PEA | CAN_MCAN_IR_PED;
557 	struct can_mcan_data *data = dev->data;
558 	uint32_t ir;
559 	int err;
560 
561 	err = can_mcan_read_reg(dev, CAN_MCAN_IR, &ir);
562 	if (err != 0) {
563 		return;
564 	}
565 
566 	while ((ir & events) != 0U) {
567 		err = can_mcan_write_reg(dev, CAN_MCAN_IR, ir & events);
568 		if (err != 0) {
569 			return;
570 		}
571 
572 		if ((ir & (CAN_MCAN_IR_BO | CAN_MCAN_IR_EP | CAN_MCAN_IR_EW)) != 0U) {
573 			can_mcan_state_change_handler(dev);
574 		}
575 
576 		/* TX event FIFO new entry */
577 		if ((ir & CAN_MCAN_IR_TEFN) != 0U) {
578 			can_mcan_tx_event_handler(dev);
579 		}
580 
581 		if ((ir & CAN_MCAN_IR_TEFL) != 0U) {
582 			LOG_ERR("TX FIFO element lost");
583 			k_sem_give(&data->tx_sem);
584 		}
585 
586 		if ((ir & CAN_MCAN_IR_ARA) != 0U) {
587 			LOG_ERR("Access to reserved address");
588 		}
589 
590 		if ((ir & CAN_MCAN_IR_MRAF) != 0U) {
591 			LOG_ERR("Message RAM access failure");
592 		}
593 
594 #ifdef CONFIG_CAN_STATS
595 		if ((ir & (CAN_MCAN_IR_PEA | CAN_MCAN_IR_PED)) != 0U) {
596 			uint32_t reg;
597 			/* This function automatically updates protocol error stats */
598 			can_mcan_read_psr(dev, &reg);
599 		}
600 #endif
601 
602 		err = can_mcan_read_reg(dev, CAN_MCAN_IR, &ir);
603 		if (err != 0) {
604 			return;
605 		}
606 	}
607 }
608 
can_mcan_get_message(const struct device * dev,uint16_t fifo_offset,uint16_t fifo_status_reg,uint16_t fifo_ack_reg)609 static void can_mcan_get_message(const struct device *dev, uint16_t fifo_offset,
610 				 uint16_t fifo_status_reg, uint16_t fifo_ack_reg)
611 {
612 	const struct can_mcan_config *config = dev->config;
613 	const struct can_mcan_callbacks *cbs = config->callbacks;
614 	struct can_mcan_rx_fifo_hdr hdr;
615 	struct can_frame frame = {0};
616 	can_rx_callback_t cb;
617 	void *user_data;
618 	uint8_t flags;
619 	uint32_t get_idx;
620 	uint32_t filt_idx;
621 	int data_length;
622 	uint32_t fifo_status;
623 	int err;
624 
625 	err = can_mcan_read_reg(dev, fifo_status_reg, &fifo_status);
626 	if (err != 0) {
627 		return;
628 	}
629 
630 	while (FIELD_GET(CAN_MCAN_RXF0S_F0FL, fifo_status) != 0U) {
631 		get_idx = FIELD_GET(CAN_MCAN_RXF0S_F0GI, fifo_status);
632 
633 		err = can_mcan_read_mram(dev, fifo_offset + get_idx *
634 					 sizeof(struct can_mcan_rx_fifo) +
635 					 offsetof(struct can_mcan_rx_fifo, hdr),
636 					 &hdr, sizeof(struct can_mcan_rx_fifo_hdr));
637 		if (err != 0) {
638 			LOG_ERR("failed to read Rx FIFO header (err %d)", err);
639 			return;
640 		}
641 
642 		frame.dlc = hdr.dlc;
643 
644 		if (hdr.rtr != 0) {
645 			frame.flags |= CAN_FRAME_RTR;
646 		}
647 
648 		if (hdr.fdf != 0) {
649 			frame.flags |= CAN_FRAME_FDF;
650 		}
651 
652 		if (hdr.brs != 0) {
653 			frame.flags |= CAN_FRAME_BRS;
654 		}
655 
656 		if (hdr.esi != 0) {
657 			frame.flags |= CAN_FRAME_ESI;
658 		}
659 
660 #ifdef CONFIG_CAN_RX_TIMESTAMP
661 		frame.timestamp = hdr.rxts;
662 #endif /* CONFIG_CAN_RX_TIMESTAMP */
663 
664 		filt_idx = hdr.fidx;
665 
666 		if (hdr.xtd != 0) {
667 			frame.id = hdr.ext_id;
668 			frame.flags |= CAN_FRAME_IDE;
669 			flags = cbs->ext[filt_idx].flags;
670 		} else {
671 			frame.id = hdr.std_id;
672 			flags = cbs->std[filt_idx].flags;
673 		}
674 
675 		if (((frame.flags & CAN_FRAME_RTR) == 0U && (flags & CAN_FILTER_DATA) == 0U) ||
676 		    ((frame.flags & CAN_FRAME_RTR) != 0U && (flags & CAN_FILTER_RTR) == 0U)) {
677 			/* RTR bit does not match filter, drop frame */
678 			err = can_mcan_write_reg(dev, fifo_ack_reg, get_idx);
679 			if (err != 0) {
680 				return;
681 			}
682 			goto ack;
683 		}
684 
685 		if (((frame.flags & CAN_FRAME_FDF) != 0U && (flags & CAN_FILTER_FDF) == 0U) ||
686 		    ((frame.flags & CAN_FRAME_FDF) == 0U && (flags & CAN_FILTER_FDF) != 0U)) {
687 			/* FDF bit does not match filter, drop frame */
688 			err = can_mcan_write_reg(dev, fifo_ack_reg, get_idx);
689 			if (err != 0) {
690 				return;
691 			}
692 			goto ack;
693 		}
694 
695 		data_length = can_dlc_to_bytes(frame.dlc);
696 		if (data_length <= sizeof(frame.data)) {
697 			if ((frame.flags & CAN_FRAME_RTR) == 0U) {
698 				err = can_mcan_read_mram(dev, fifo_offset + get_idx *
699 							 sizeof(struct can_mcan_rx_fifo) +
700 							 offsetof(struct can_mcan_rx_fifo, data_32),
701 							 &frame.data_32,
702 							 ROUND_UP(data_length, sizeof(uint32_t)));
703 				if (err != 0) {
704 					LOG_ERR("failed to read Rx FIFO data (err %d)", err);
705 					return;
706 				}
707 			}
708 
709 			if ((frame.flags & CAN_FRAME_IDE) != 0) {
710 				LOG_DBG("Frame on filter %d, ID: 0x%x",
711 					filt_idx + cbs->num_std, frame.id);
712 				__ASSERT_NO_MSG(filt_idx <= cbs->num_ext);
713 				cb = cbs->ext[filt_idx].function;
714 				user_data = cbs->ext[filt_idx].user_data;
715 			} else {
716 				LOG_DBG("Frame on filter %d, ID: 0x%x", filt_idx, frame.id);
717 				__ASSERT_NO_MSG(filt_idx <= cbs->num_std);
718 				cb = cbs->std[filt_idx].function;
719 				user_data = cbs->std[filt_idx].user_data;
720 			}
721 
722 			if (cb) {
723 				cb(dev, &frame, user_data);
724 			} else {
725 				LOG_DBG("cb missing");
726 			}
727 		} else {
728 			LOG_ERR("Frame is too big");
729 		}
730 
731 ack:
732 		err = can_mcan_write_reg(dev, fifo_ack_reg, get_idx);
733 		if (err != 0) {
734 			return;
735 		}
736 
737 		err = can_mcan_read_reg(dev, fifo_status_reg, &fifo_status);
738 		if (err != 0) {
739 			return;
740 		}
741 	}
742 }
743 
can_mcan_line_1_isr(const struct device * dev)744 void can_mcan_line_1_isr(const struct device *dev)
745 {
746 	const struct can_mcan_config *config = dev->config;
747 	const uint32_t events =
748 		CAN_MCAN_IR_RF0N | CAN_MCAN_IR_RF1N | CAN_MCAN_IR_RF0L | CAN_MCAN_IR_RF1L;
749 	uint32_t ir;
750 	int err;
751 
752 	err = can_mcan_read_reg(dev, CAN_MCAN_IR, &ir);
753 	if (err != 0) {
754 		return;
755 	}
756 
757 	while ((ir & events) != 0U) {
758 		err = can_mcan_write_reg(dev, CAN_MCAN_IR, events & ir);
759 		if (err != 0) {
760 			return;
761 		}
762 
763 		if ((ir & CAN_MCAN_IR_RF0N) != 0U) {
764 			LOG_DBG("RX FIFO0 INT");
765 			can_mcan_get_message(dev, config->mram_offsets[CAN_MCAN_MRAM_CFG_RX_FIFO0],
766 					     CAN_MCAN_RXF0S, CAN_MCAN_RXF0A);
767 		}
768 
769 		if ((ir & CAN_MCAN_IR_RF1N) != 0U) {
770 			LOG_DBG("RX FIFO1 INT");
771 			can_mcan_get_message(dev, config->mram_offsets[CAN_MCAN_MRAM_CFG_RX_FIFO1],
772 					     CAN_MCAN_RXF1S, CAN_MCAN_RXF1A);
773 		}
774 
775 		if ((ir & CAN_MCAN_IR_RF0L) != 0U) {
776 			LOG_ERR("Message lost on FIFO0");
777 			CAN_STATS_RX_OVERRUN_INC(dev);
778 		}
779 
780 		if ((ir & CAN_MCAN_IR_RF1L) != 0U) {
781 			LOG_ERR("Message lost on FIFO1");
782 			CAN_STATS_RX_OVERRUN_INC(dev);
783 		}
784 
785 		err = can_mcan_read_reg(dev, CAN_MCAN_IR, &ir);
786 		if (err != 0) {
787 			return;
788 		}
789 	}
790 }
791 
can_mcan_get_state(const struct device * dev,enum can_state * state,struct can_bus_err_cnt * err_cnt)792 int can_mcan_get_state(const struct device *dev, enum can_state *state,
793 		       struct can_bus_err_cnt *err_cnt)
794 {
795 	struct can_mcan_data *data = dev->data;
796 	uint32_t reg;
797 	int err;
798 
799 	if (state != NULL) {
800 		err = can_mcan_read_psr(dev, &reg);
801 		if (err != 0) {
802 			return err;
803 		}
804 
805 		if (!data->started) {
806 			*state = CAN_STATE_STOPPED;
807 		} else if ((reg & CAN_MCAN_PSR_BO) != 0U) {
808 			*state = CAN_STATE_BUS_OFF;
809 		} else if ((reg & CAN_MCAN_PSR_EP) != 0U) {
810 			*state = CAN_STATE_ERROR_PASSIVE;
811 		} else if ((reg & CAN_MCAN_PSR_EW) != 0U) {
812 			*state = CAN_STATE_ERROR_WARNING;
813 		} else {
814 			*state = CAN_STATE_ERROR_ACTIVE;
815 		}
816 	}
817 
818 	if (err_cnt != NULL) {
819 		err = can_mcan_read_reg(dev, CAN_MCAN_ECR, &reg);
820 		if (err != 0) {
821 			return err;
822 		}
823 
824 		err_cnt->tx_err_cnt = FIELD_GET(CAN_MCAN_ECR_TEC, reg);
825 		err_cnt->rx_err_cnt = FIELD_GET(CAN_MCAN_ECR_REC, reg);
826 	}
827 
828 	return 0;
829 }
830 
831 #ifndef CONFIG_CAN_AUTO_BUS_OFF_RECOVERY
can_mcan_recover(const struct device * dev,k_timeout_t timeout)832 int can_mcan_recover(const struct device *dev, k_timeout_t timeout)
833 {
834 	struct can_mcan_data *data = dev->data;
835 
836 	if (!data->started) {
837 		return -ENETDOWN;
838 	}
839 
840 	return can_mcan_leave_init_mode(dev, timeout);
841 }
842 #endif /* CONFIG_CAN_AUTO_BUS_OFF_RECOVERY */
843 
can_mcan_send(const struct device * dev,const struct can_frame * frame,k_timeout_t timeout,can_tx_callback_t callback,void * user_data)844 int can_mcan_send(const struct device *dev, const struct can_frame *frame, k_timeout_t timeout,
845 		  can_tx_callback_t callback, void *user_data)
846 {
847 	const struct can_mcan_config *config = dev->config;
848 	const struct can_mcan_callbacks *cbs = config->callbacks;
849 	struct can_mcan_data *data = dev->data;
850 	size_t data_length = can_dlc_to_bytes(frame->dlc);
851 	struct can_mcan_tx_buffer_hdr tx_hdr = {
852 		.rtr = (frame->flags & CAN_FRAME_RTR) != 0U ? 1U : 0U,
853 		.xtd = (frame->flags & CAN_FRAME_IDE) != 0U ? 1U : 0U,
854 		.esi = 0U,
855 		.dlc = frame->dlc,
856 #ifdef CONFIG_CAN_FD_MODE
857 		.fdf = (frame->flags & CAN_FRAME_FDF) != 0U ? 1U : 0U,
858 		.brs = (frame->flags & CAN_FRAME_BRS) != 0U ? 1U : 0U,
859 #else  /* CONFIG_CAN_FD_MODE */
860 		.fdf = 0U,
861 		.brs = 0U,
862 #endif /* !CONFIG_CAN_FD_MODE */
863 		.efc = 1U,
864 	};
865 	uint32_t put_idx = -1;
866 	uint32_t reg;
867 	int err;
868 
869 	LOG_DBG("Sending %zu bytes. Id: 0x%x, ID type: %s %s %s %s", data_length, frame->id,
870 		(frame->flags & CAN_FRAME_IDE) != 0U ? "extended" : "standard",
871 		(frame->flags & CAN_FRAME_RTR) != 0U ? "RTR" : "",
872 		(frame->flags & CAN_FRAME_FDF) != 0U ? "FD frame" : "",
873 		(frame->flags & CAN_FRAME_BRS) != 0U ? "BRS" : "");
874 
875 	__ASSERT_NO_MSG(callback != NULL);
876 
877 #ifdef CONFIG_CAN_FD_MODE
878 	if ((frame->flags & ~(CAN_FRAME_IDE | CAN_FRAME_RTR | CAN_FRAME_FDF | CAN_FRAME_BRS)) !=
879 	    0) {
880 		LOG_ERR("unsupported CAN frame flags 0x%02x", frame->flags);
881 		return -ENOTSUP;
882 	}
883 
884 	if (!data->fd && ((frame->flags & (CAN_FRAME_FDF | CAN_FRAME_BRS)) != 0U)) {
885 		LOG_ERR("CAN-FD format not supported in non-FD mode");
886 		return -ENOTSUP;
887 	}
888 #else  /* CONFIG_CAN_FD_MODE */
889 	if ((frame->flags & ~(CAN_FRAME_IDE | CAN_FRAME_RTR)) != 0U) {
890 		LOG_ERR("unsupported CAN frame flags 0x%02x", frame->flags);
891 		return -ENOTSUP;
892 	}
893 #endif /* !CONFIG_CAN_FD_MODE */
894 
895 	if (data_length > sizeof(frame->data)) {
896 		LOG_ERR("data length (%zu) > max frame data length (%zu)", data_length,
897 			sizeof(frame->data));
898 		return -EINVAL;
899 	}
900 
901 	if ((frame->flags & CAN_FRAME_FDF) != 0U) {
902 		if (frame->dlc > CANFD_MAX_DLC) {
903 			LOG_ERR("DLC of %d for CAN-FD format frame", frame->dlc);
904 			return -EINVAL;
905 		}
906 	} else {
907 		if (frame->dlc > CAN_MAX_DLC) {
908 			LOG_ERR("DLC of %d for non-FD format frame", frame->dlc);
909 			return -EINVAL;
910 		}
911 	}
912 
913 	if (!data->started) {
914 		return -ENETDOWN;
915 	}
916 
917 	err = can_mcan_read_psr(dev, &reg);
918 	if (err != 0) {
919 		return err;
920 	}
921 
922 	if ((reg & CAN_MCAN_PSR_BO) != 0U) {
923 		return -ENETUNREACH;
924 	}
925 
926 	err = k_sem_take(&data->tx_sem, timeout);
927 	if (err != 0) {
928 		return -EAGAIN;
929 	}
930 
931 	k_mutex_lock(&data->tx_mtx, K_FOREVER);
932 
933 	/* Acquire a free TX buffer */
934 	for (int i = 0; i < cbs->num_tx; i++) {
935 		if (cbs->tx[i].function == NULL) {
936 			put_idx = i;
937 			break;
938 		}
939 	}
940 
941 	tx_hdr.mm = put_idx;
942 
943 	if ((frame->flags & CAN_FRAME_IDE) != 0U) {
944 		tx_hdr.ext_id = frame->id;
945 	} else {
946 		tx_hdr.std_id = frame->id & CAN_STD_ID_MASK;
947 	}
948 
949 	err = can_mcan_write_mram(dev, config->mram_offsets[CAN_MCAN_MRAM_CFG_TX_BUFFER] + put_idx *
950 				  sizeof(struct can_mcan_tx_buffer) +
951 				  offsetof(struct can_mcan_tx_buffer, hdr),
952 				  &tx_hdr, sizeof(struct can_mcan_tx_buffer_hdr));
953 	if (err != 0) {
954 		LOG_ERR("failed to write Tx Buffer header (err %d)", err);
955 		goto err_unlock;
956 	}
957 
958 	if ((frame->flags & CAN_FRAME_RTR) == 0U) {
959 		err = can_mcan_write_mram(dev, config->mram_offsets[CAN_MCAN_MRAM_CFG_TX_BUFFER] +
960 					put_idx * sizeof(struct can_mcan_tx_buffer) +
961 					offsetof(struct can_mcan_tx_buffer, data_32),
962 					&frame->data_32, ROUND_UP(data_length, sizeof(uint32_t)));
963 		if (err != 0) {
964 			LOG_ERR("failed to write Tx Buffer data (err %d)", err);
965 			goto err_unlock;
966 		}
967 	}
968 
969 	__ASSERT_NO_MSG(put_idx < cbs->num_tx);
970 	cbs->tx[put_idx].function = callback;
971 	cbs->tx[put_idx].user_data = user_data;
972 
973 	err = can_mcan_write_reg(dev, CAN_MCAN_TXBAR, BIT(put_idx));
974 	if (err != 0) {
975 		cbs->tx[put_idx].function = NULL;
976 		goto err_unlock;
977 	}
978 
979 	k_mutex_unlock(&data->tx_mtx);
980 	return 0;
981 
982 err_unlock:
983 	k_mutex_unlock(&data->tx_mtx);
984 	k_sem_give(&data->tx_sem);
985 
986 	return err;
987 }
988 
can_mcan_get_max_filters(const struct device * dev,bool ide)989 int can_mcan_get_max_filters(const struct device *dev, bool ide)
990 {
991 	const struct can_mcan_config *config = dev->config;
992 	const struct can_mcan_callbacks *cbs = config->callbacks;
993 
994 	if (ide) {
995 		return cbs->num_ext;
996 	} else {
997 		return cbs->num_std;
998 	}
999 }
1000 
1001 /* Use masked configuration only for simplicity. If someone needs more than
1002  * 28 standard filters, dual mode needs to be implemented.
1003  * Dual mode gets tricky, because we can only activate both filters.
1004  * If one of the IDs is not used anymore, we would need to mark it as unused.
1005  */
can_mcan_add_rx_filter_std(const struct device * dev,can_rx_callback_t callback,void * user_data,const struct can_filter * filter)1006 int can_mcan_add_rx_filter_std(const struct device *dev, can_rx_callback_t callback,
1007 			       void *user_data, const struct can_filter *filter)
1008 {
1009 	const struct can_mcan_config *config = dev->config;
1010 	const struct can_mcan_callbacks *cbs = config->callbacks;
1011 	struct can_mcan_data *data = dev->data;
1012 	struct can_mcan_std_filter filter_element = {
1013 		.sfid1 = filter->id,
1014 		.sfid2 = filter->mask,
1015 		.sft = CAN_MCAN_SFT_CLASSIC
1016 	};
1017 	int filter_id = -ENOSPC;
1018 	int err;
1019 	int i;
1020 
1021 	k_mutex_lock(&data->lock, K_FOREVER);
1022 
1023 	for (i = 0; i < cbs->num_std; i++) {
1024 		if (cbs->std[i].function == NULL) {
1025 			filter_id = i;
1026 			break;
1027 		}
1028 	}
1029 
1030 	if (filter_id == -ENOSPC) {
1031 		LOG_WRN("No free standard id filter left");
1032 		k_mutex_unlock(&data->lock);
1033 		return -ENOSPC;
1034 	}
1035 
1036 	/* TODO proper fifo balancing */
1037 	filter_element.sfec = filter_id & 0x01 ? CAN_MCAN_XFEC_FIFO1 : CAN_MCAN_XFEC_FIFO0;
1038 
1039 	err = can_mcan_write_mram(dev, config->mram_offsets[CAN_MCAN_MRAM_CFG_STD_FILTER] +
1040 				  filter_id * sizeof(struct can_mcan_std_filter),
1041 				  &filter_element, sizeof(filter_element));
1042 	if (err != 0) {
1043 		LOG_ERR("failed to write std filter element (err %d)", err);
1044 		return err;
1045 	}
1046 
1047 	k_mutex_unlock(&data->lock);
1048 
1049 	LOG_DBG("Attached std filter at %d", filter_id);
1050 
1051 	__ASSERT_NO_MSG(filter_id <= cbs->num_std);
1052 	cbs->std[filter_id].function = callback;
1053 	cbs->std[filter_id].user_data = user_data;
1054 	cbs->std[filter_id].flags = filter->flags;
1055 
1056 	return filter_id;
1057 }
1058 
can_mcan_add_rx_filter_ext(const struct device * dev,can_rx_callback_t callback,void * user_data,const struct can_filter * filter)1059 static int can_mcan_add_rx_filter_ext(const struct device *dev, can_rx_callback_t callback,
1060 				      void *user_data, const struct can_filter *filter)
1061 {
1062 	const struct can_mcan_config *config = dev->config;
1063 	const struct can_mcan_callbacks *cbs = config->callbacks;
1064 	struct can_mcan_data *data = dev->data;
1065 	struct can_mcan_ext_filter filter_element = {
1066 		.efid2 = filter->mask,
1067 		.efid1 = filter->id,
1068 		.eft = CAN_MCAN_EFT_CLASSIC
1069 	};
1070 	int filter_id = -ENOSPC;
1071 	int err;
1072 	int i;
1073 
1074 	k_mutex_lock(&data->lock, K_FOREVER);
1075 
1076 	for (i = 0; i < cbs->num_ext; i++) {
1077 		if (cbs->ext[i].function == NULL) {
1078 			filter_id = i;
1079 			break;
1080 		}
1081 	}
1082 
1083 	if (filter_id == -ENOSPC) {
1084 		LOG_WRN("No free extended id filter left");
1085 		k_mutex_unlock(&data->lock);
1086 		return -ENOSPC;
1087 	}
1088 
1089 	/* TODO proper fifo balancing */
1090 	filter_element.efec = filter_id & 0x01 ? CAN_MCAN_XFEC_FIFO1 : CAN_MCAN_XFEC_FIFO0;
1091 
1092 	err = can_mcan_write_mram(dev, config->mram_offsets[CAN_MCAN_MRAM_CFG_EXT_FILTER] +
1093 				  filter_id * sizeof(struct can_mcan_ext_filter),
1094 				  &filter_element, sizeof(filter_element));
1095 	if (err != 0) {
1096 		LOG_ERR("failed to write std filter element (err %d)", err);
1097 		return err;
1098 	}
1099 
1100 	k_mutex_unlock(&data->lock);
1101 
1102 	LOG_DBG("Attached ext filter at %d", filter_id);
1103 
1104 	__ASSERT_NO_MSG(filter_id <= cbs->num_ext);
1105 	cbs->ext[filter_id].function = callback;
1106 	cbs->ext[filter_id].user_data = user_data;
1107 	cbs->ext[filter_id].flags = filter->flags;
1108 
1109 	return filter_id;
1110 }
1111 
can_mcan_add_rx_filter(const struct device * dev,can_rx_callback_t callback,void * user_data,const struct can_filter * filter)1112 int can_mcan_add_rx_filter(const struct device *dev, can_rx_callback_t callback, void *user_data,
1113 			   const struct can_filter *filter)
1114 {
1115 	const struct can_mcan_config *config = dev->config;
1116 	const struct can_mcan_callbacks *cbs = config->callbacks;
1117 	int filter_id;
1118 
1119 	if (callback == NULL) {
1120 		return -EINVAL;
1121 	}
1122 
1123 #ifdef CONFIG_CAN_FD_MODE
1124 	if ((filter->flags &
1125 	     ~(CAN_FILTER_IDE | CAN_FILTER_DATA | CAN_FILTER_RTR | CAN_FILTER_FDF)) != 0U) {
1126 #else  /* CONFIG_CAN_FD_MODE */
1127 	if ((filter->flags & ~(CAN_FILTER_IDE | CAN_FILTER_DATA | CAN_FILTER_RTR)) != 0U) {
1128 #endif /* !CONFIG_CAN_FD_MODE */
1129 		LOG_ERR("unsupported CAN filter flags 0x%02x", filter->flags);
1130 		return -ENOTSUP;
1131 	}
1132 
1133 	if ((filter->flags & CAN_FILTER_IDE) != 0U) {
1134 		filter_id = can_mcan_add_rx_filter_ext(dev, callback, user_data, filter);
1135 		if (filter_id >= 0) {
1136 			filter_id += cbs->num_std;
1137 		}
1138 	} else {
1139 		filter_id = can_mcan_add_rx_filter_std(dev, callback, user_data, filter);
1140 	}
1141 
1142 	return filter_id;
1143 }
1144 
1145 void can_mcan_remove_rx_filter(const struct device *dev, int filter_id)
1146 {
1147 	const struct can_mcan_config *config = dev->config;
1148 	const struct can_mcan_callbacks *cbs = config->callbacks;
1149 	struct can_mcan_data *data = dev->data;
1150 	int err;
1151 
1152 	k_mutex_lock(&data->lock, K_FOREVER);
1153 
1154 	if (filter_id >= cbs->num_std) {
1155 		filter_id -= cbs->num_std;
1156 		if (filter_id >= cbs->num_ext) {
1157 			LOG_ERR("Wrong filter id");
1158 			k_mutex_unlock(&data->lock);
1159 			return;
1160 		}
1161 
1162 		cbs->ext[filter_id].function = NULL;
1163 		cbs->ext[filter_id].user_data = NULL;
1164 
1165 		err = can_mcan_clear_mram(dev, config->mram_offsets[CAN_MCAN_MRAM_CFG_EXT_FILTER] +
1166 					filter_id * sizeof(struct can_mcan_ext_filter),
1167 					sizeof(struct can_mcan_ext_filter));
1168 		if (err != 0) {
1169 			LOG_ERR("failed to clear ext filter element (err %d)", err);
1170 		}
1171 	} else {
1172 		cbs->std[filter_id].function = NULL;
1173 		cbs->std[filter_id].user_data = NULL;
1174 
1175 		err = can_mcan_clear_mram(dev, config->mram_offsets[CAN_MCAN_MRAM_CFG_STD_FILTER] +
1176 					filter_id * sizeof(struct can_mcan_std_filter),
1177 					sizeof(struct can_mcan_std_filter));
1178 		if (err != 0) {
1179 			LOG_ERR("failed to clear std filter element (err %d)", err);
1180 		}
1181 	}
1182 
1183 	k_mutex_unlock(&data->lock);
1184 }
1185 
1186 void can_mcan_set_state_change_callback(const struct device *dev,
1187 					can_state_change_callback_t callback, void *user_data)
1188 {
1189 	struct can_mcan_data *data = dev->data;
1190 
1191 	data->state_change_cb = callback;
1192 	data->state_change_cb_data = user_data;
1193 }
1194 
1195 int can_mcan_get_max_bitrate(const struct device *dev, uint32_t *max_bitrate)
1196 {
1197 	const struct can_mcan_config *config = dev->config;
1198 
1199 	*max_bitrate = config->max_bitrate;
1200 
1201 	return 0;
1202 }
1203 
1204 /* helper function allowing mcan drivers without access to private mcan
1205  * definitions to set CCCR_CCE, which might be needed to disable write
1206  * protection for some registers.
1207  */
1208 void can_mcan_enable_configuration_change(const struct device *dev)
1209 {
1210 	struct can_mcan_data *data = dev->data;
1211 	uint32_t cccr;
1212 	int err;
1213 
1214 	k_mutex_lock(&data->lock, K_FOREVER);
1215 
1216 	err = can_mcan_read_reg(dev, CAN_MCAN_CCCR, &cccr);
1217 	if (err != 0) {
1218 		goto unlock;
1219 	}
1220 
1221 	cccr |= CAN_MCAN_CCCR_CCE;
1222 
1223 	err = can_mcan_write_reg(dev, CAN_MCAN_CCCR, cccr);
1224 	if (err != 0) {
1225 		goto unlock;
1226 	}
1227 
1228 unlock:
1229 	k_mutex_unlock(&data->lock);
1230 }
1231 
1232 int can_mcan_configure_mram(const struct device *dev, uintptr_t mrba, uintptr_t mram)
1233 {
1234 	const struct can_mcan_config *config = dev->config;
1235 	uint32_t addr;
1236 	uint32_t reg;
1237 	int err;
1238 
1239 	err = can_mcan_exit_sleep_mode(dev);
1240 	if (err != 0) {
1241 		LOG_ERR("Failed to exit sleep mode");
1242 		return -EIO;
1243 	}
1244 
1245 	err = can_mcan_enter_init_mode(dev, K_MSEC(CAN_INIT_TIMEOUT_MS));
1246 	if (err != 0) {
1247 		LOG_ERR("Failed to enter init mode");
1248 		return -EIO;
1249 	}
1250 
1251 	can_mcan_enable_configuration_change(dev);
1252 
1253 	addr = mram - mrba + config->mram_offsets[CAN_MCAN_MRAM_CFG_STD_FILTER];
1254 	reg = (addr & CAN_MCAN_SIDFC_FLSSA) | FIELD_PREP(CAN_MCAN_SIDFC_LSS,
1255 		config->mram_elements[CAN_MCAN_MRAM_CFG_STD_FILTER]);
1256 	err = can_mcan_write_reg(dev, CAN_MCAN_SIDFC, reg);
1257 	if (err != 0) {
1258 		return err;
1259 	}
1260 
1261 	addr = mram - mrba + config->mram_offsets[CAN_MCAN_MRAM_CFG_EXT_FILTER];
1262 	reg = (addr & CAN_MCAN_XIDFC_FLESA) | FIELD_PREP(CAN_MCAN_XIDFC_LSS,
1263 		config->mram_elements[CAN_MCAN_MRAM_CFG_EXT_FILTER]);
1264 	err = can_mcan_write_reg(dev, CAN_MCAN_XIDFC, reg);
1265 	if (err != 0) {
1266 		return err;
1267 	}
1268 
1269 	addr = mram - mrba + config->mram_offsets[CAN_MCAN_MRAM_CFG_RX_FIFO0];
1270 	reg = (addr & CAN_MCAN_RXF0C_F0SA) | FIELD_PREP(CAN_MCAN_RXF0C_F0S,
1271 		config->mram_elements[CAN_MCAN_MRAM_CFG_RX_FIFO0]);
1272 	err = can_mcan_write_reg(dev, CAN_MCAN_RXF0C, reg);
1273 	if (err != 0) {
1274 		return err;
1275 	}
1276 
1277 	addr = mram - mrba + config->mram_offsets[CAN_MCAN_MRAM_CFG_RX_FIFO1];
1278 	reg = (addr & CAN_MCAN_RXF1C_F1SA) | FIELD_PREP(CAN_MCAN_RXF1C_F1S,
1279 		config->mram_elements[CAN_MCAN_MRAM_CFG_RX_FIFO1]);
1280 	err = can_mcan_write_reg(dev, CAN_MCAN_RXF1C, reg);
1281 	if (err != 0) {
1282 		return err;
1283 	}
1284 
1285 	addr = mram - mrba + config->mram_offsets[CAN_MCAN_MRAM_CFG_RX_BUFFER];
1286 	reg = (addr & CAN_MCAN_RXBC_RBSA);
1287 	err = can_mcan_write_reg(dev, CAN_MCAN_RXBC, reg);
1288 	if (err != 0) {
1289 		return err;
1290 	}
1291 
1292 	addr = mram - mrba + config->mram_offsets[CAN_MCAN_MRAM_CFG_TX_EVENT_FIFO];
1293 	reg = (addr & CAN_MCAN_TXEFC_EFSA) | FIELD_PREP(CAN_MCAN_TXEFC_EFS,
1294 		config->mram_elements[CAN_MCAN_MRAM_CFG_TX_EVENT_FIFO]);
1295 	err = can_mcan_write_reg(dev, CAN_MCAN_TXEFC, reg);
1296 	if (err != 0) {
1297 		return err;
1298 	}
1299 
1300 	addr = mram - mrba + config->mram_offsets[CAN_MCAN_MRAM_CFG_TX_BUFFER];
1301 	reg = (addr & CAN_MCAN_TXBC_TBSA) | FIELD_PREP(CAN_MCAN_TXBC_TFQS,
1302 		config->mram_elements[CAN_MCAN_MRAM_CFG_TX_BUFFER]) | CAN_MCAN_TXBC_TFQM;
1303 	err = can_mcan_write_reg(dev, CAN_MCAN_TXBC, reg);
1304 	if (err != 0) {
1305 		return err;
1306 	}
1307 
1308 	/* 64 byte Tx Buffer data fields size */
1309 	reg = CAN_MCAN_TXESC_TBDS;
1310 	err = can_mcan_write_reg(dev, CAN_MCAN_TXESC, reg);
1311 	if (err != 0) {
1312 		return err;
1313 	}
1314 
1315 	/* 64 byte Rx Buffer/FIFO1/FIFO0 data fields size */
1316 	reg = CAN_MCAN_RXESC_RBDS | CAN_MCAN_RXESC_F1DS | CAN_MCAN_RXESC_F0DS;
1317 	err = can_mcan_write_reg(dev, CAN_MCAN_RXESC, reg);
1318 	if (err != 0) {
1319 		return err;
1320 	}
1321 
1322 	return 0;
1323 }
1324 
1325 int can_mcan_init(const struct device *dev)
1326 {
1327 	const struct can_mcan_config *config = dev->config;
1328 	const struct can_mcan_callbacks *cbs = config->callbacks;
1329 	struct can_mcan_data *data = dev->data;
1330 	struct can_timing timing = { 0 };
1331 #ifdef CONFIG_CAN_FD_MODE
1332 	struct can_timing timing_data = { 0 };
1333 #endif /* CONFIG_CAN_FD_MODE */
1334 	uint32_t reg;
1335 	int err;
1336 
1337 	__ASSERT_NO_MSG(config->ops->read_reg != NULL);
1338 	__ASSERT_NO_MSG(config->ops->write_reg != NULL);
1339 	__ASSERT_NO_MSG(config->ops->read_mram != NULL);
1340 	__ASSERT_NO_MSG(config->ops->write_mram != NULL);
1341 	__ASSERT_NO_MSG(config->ops->clear_mram != NULL);
1342 	__ASSERT_NO_MSG(config->callbacks != NULL);
1343 
1344 	__ASSERT_NO_MSG(cbs->num_tx <= config->mram_elements[CAN_MCAN_MRAM_CFG_TX_BUFFER]);
1345 	__ASSERT_NO_MSG(cbs->num_std <= config->mram_elements[CAN_MCAN_MRAM_CFG_STD_FILTER]);
1346 	__ASSERT_NO_MSG(cbs->num_ext <= config->mram_elements[CAN_MCAN_MRAM_CFG_EXT_FILTER]);
1347 
1348 	k_mutex_init(&data->lock);
1349 	k_mutex_init(&data->tx_mtx);
1350 	k_sem_init(&data->tx_sem, cbs->num_tx, cbs->num_tx);
1351 
1352 	if (config->phy != NULL) {
1353 		if (!device_is_ready(config->phy)) {
1354 			LOG_ERR("CAN transceiver not ready");
1355 			return -ENODEV;
1356 		}
1357 	}
1358 
1359 	err = can_mcan_exit_sleep_mode(dev);
1360 	if (err != 0) {
1361 		LOG_ERR("Failed to exit sleep mode");
1362 		return -EIO;
1363 	}
1364 
1365 	err = can_mcan_enter_init_mode(dev, K_MSEC(CAN_INIT_TIMEOUT_MS));
1366 	if (err != 0) {
1367 		LOG_ERR("Failed to enter init mode");
1368 		return -EIO;
1369 	}
1370 
1371 	can_mcan_enable_configuration_change(dev);
1372 
1373 #if CONFIG_CAN_LOG_LEVEL >= LOG_LEVEL_DBG
1374 	err = can_mcan_read_reg(dev, CAN_MCAN_CREL, &reg);
1375 	if (err != 0) {
1376 		return -EIO;
1377 	}
1378 
1379 	LOG_DBG("IP rel: %lu.%lu.%lu %02lu.%lu.%lu", FIELD_GET(CAN_MCAN_CREL_REL, reg),
1380 		FIELD_GET(CAN_MCAN_CREL_STEP, reg), FIELD_GET(CAN_MCAN_CREL_SUBSTEP, reg),
1381 		FIELD_GET(CAN_MCAN_CREL_YEAR, reg), FIELD_GET(CAN_MCAN_CREL_MON, reg),
1382 		FIELD_GET(CAN_MCAN_CREL_DAY, reg));
1383 #endif /* CONFIG_CAN_LOG_LEVEL >= LOG_LEVEL_DBG */
1384 
1385 	err = can_mcan_read_reg(dev, CAN_MCAN_CCCR, &reg);
1386 	if (err != 0) {
1387 		return err;
1388 	}
1389 
1390 	reg &= ~(CAN_MCAN_CCCR_FDOE | CAN_MCAN_CCCR_BRSE | CAN_MCAN_CCCR_TEST | CAN_MCAN_CCCR_MON |
1391 		 CAN_MCAN_CCCR_ASM);
1392 
1393 	err = can_mcan_write_reg(dev, CAN_MCAN_CCCR, reg);
1394 	if (err != 0) {
1395 		return err;
1396 	}
1397 
1398 	err = can_mcan_read_reg(dev, CAN_MCAN_TEST, &reg);
1399 	if (err != 0) {
1400 		return err;
1401 	}
1402 
1403 	reg &= ~(CAN_MCAN_TEST_LBCK);
1404 
1405 	err = can_mcan_write_reg(dev, CAN_MCAN_TEST, reg);
1406 	if (err != 0) {
1407 		return err;
1408 	}
1409 
1410 #if defined(CONFIG_CAN_DELAY_COMP) && defined(CONFIG_CAN_FD_MODE)
1411 	err = can_mcan_read_reg(dev, CAN_MCAN_DBTP, &reg);
1412 	if (err != 0) {
1413 		return err;
1414 	}
1415 
1416 	reg |= CAN_MCAN_DBTP_TDC;
1417 
1418 	err = can_mcan_write_reg(dev, CAN_MCAN_DBTP, reg);
1419 	if (err != 0) {
1420 		return err;
1421 	}
1422 
1423 	err = can_mcan_read_reg(dev, CAN_MCAN_TDCR, &reg);
1424 	if (err != 0) {
1425 		return err;
1426 	}
1427 
1428 	reg |= FIELD_PREP(CAN_MCAN_TDCR_TDCO, config->tx_delay_comp_offset);
1429 
1430 	err = can_mcan_write_reg(dev, CAN_MCAN_TDCR, reg);
1431 	if (err != 0) {
1432 		return err;
1433 	}
1434 #endif /* defined(CONFIG_CAN_DELAY_COMP) && defined(CONFIG_CAN_FD_MODE) */
1435 
1436 	err = can_mcan_read_reg(dev, CAN_MCAN_GFC, &reg);
1437 	if (err != 0) {
1438 		return err;
1439 	}
1440 
1441 	reg |= FIELD_PREP(CAN_MCAN_GFC_ANFE, 0x2) | FIELD_PREP(CAN_MCAN_GFC_ANFS, 0x2);
1442 
1443 	err = can_mcan_write_reg(dev, CAN_MCAN_GFC, reg);
1444 	if (err != 0) {
1445 		return err;
1446 	}
1447 
1448 	if (config->sample_point) {
1449 		err = can_calc_timing(dev, &timing, config->bus_speed, config->sample_point);
1450 		if (err == -EINVAL) {
1451 			LOG_ERR("Can't find timing for given param");
1452 			return -EIO;
1453 		}
1454 		LOG_DBG("Presc: %d, TS1: %d, TS2: %d", timing.prescaler, timing.phase_seg1,
1455 			timing.phase_seg2);
1456 		LOG_DBG("Sample-point err : %d", err);
1457 	} else if (config->prop_ts1) {
1458 		timing.sjw = config->sjw;
1459 		timing.prop_seg = 0U;
1460 		timing.phase_seg1 = config->prop_ts1;
1461 		timing.phase_seg2 = config->ts2;
1462 		err = can_calc_prescaler(dev, &timing, config->bus_speed);
1463 		if (err != 0) {
1464 			LOG_WRN("Bitrate error: %d", err);
1465 		}
1466 	}
1467 #ifdef CONFIG_CAN_FD_MODE
1468 	if (config->sample_point_data) {
1469 		err = can_calc_timing_data(dev, &timing_data, config->bus_speed_data,
1470 					   config->sample_point_data);
1471 		if (err == -EINVAL) {
1472 			LOG_ERR("Can't find timing for given dataphase param");
1473 			return -EIO;
1474 		}
1475 
1476 		LOG_DBG("Sample-point err data phase: %d", err);
1477 	} else if (config->prop_ts1_data) {
1478 		timing_data.sjw = config->sjw_data;
1479 		timing_data.prop_seg = 0U;
1480 		timing_data.phase_seg1 = config->prop_ts1_data;
1481 		timing_data.phase_seg2 = config->ts2_data;
1482 		err = can_calc_prescaler(dev, &timing_data, config->bus_speed_data);
1483 		if (err != 0) {
1484 			LOG_WRN("Dataphase bitrate error: %d", err);
1485 		}
1486 	}
1487 #endif /* CONFIG_CAN_FD_MODE */
1488 
1489 	err = can_set_timing(dev, &timing);
1490 	if (err != 0) {
1491 		LOG_ERR("failed to set timing (err %d)", err);
1492 		return -ENODEV;
1493 	}
1494 
1495 #ifdef CONFIG_CAN_FD_MODE
1496 	err = can_set_timing_data(dev, &timing_data);
1497 	if (err != 0) {
1498 		LOG_ERR("failed to set data phase timing (err %d)", err);
1499 		return -ENODEV;
1500 	}
1501 #endif /* CONFIG_CAN_FD_MODE */
1502 
1503 	reg = CAN_MCAN_IE_BOE | CAN_MCAN_IE_EWE | CAN_MCAN_IE_EPE | CAN_MCAN_IE_MRAFE |
1504 	      CAN_MCAN_IE_TEFLE | CAN_MCAN_IE_TEFNE | CAN_MCAN_IE_RF0NE | CAN_MCAN_IE_RF1NE |
1505 	      CAN_MCAN_IE_RF0LE | CAN_MCAN_IE_RF1LE;
1506 #ifdef CONFIG_CAN_STATS
1507 	/* These ISRs are only enabled/used for statistics, they are otherwise
1508 	 * disabled as they may produce a significant amount of frequent ISRs.
1509 	 */
1510 	reg |= CAN_MCAN_IE_PEAE | CAN_MCAN_IE_PEDE;
1511 #endif
1512 
1513 	err = can_mcan_write_reg(dev, CAN_MCAN_IE, reg);
1514 	if (err != 0) {
1515 		return err;
1516 	}
1517 
1518 	reg = CAN_MCAN_ILS_RF0NL | CAN_MCAN_ILS_RF1NL | CAN_MCAN_ILS_RF0LL | CAN_MCAN_ILS_RF1LL;
1519 	err = can_mcan_write_reg(dev, CAN_MCAN_ILS, reg);
1520 	if (err != 0) {
1521 		return err;
1522 	}
1523 
1524 	reg = CAN_MCAN_ILE_EINT0 | CAN_MCAN_ILE_EINT1;
1525 	err = can_mcan_write_reg(dev, CAN_MCAN_ILE, reg);
1526 	if (err != 0) {
1527 		return err;
1528 	}
1529 
1530 	/* Interrupt on every TX fifo element*/
1531 	reg = CAN_MCAN_TXBTIE_TIE;
1532 	err = can_mcan_write_reg(dev, CAN_MCAN_TXBTIE, reg);
1533 	if (err != 0) {
1534 		return err;
1535 	}
1536 
1537 	return can_mcan_clear_mram(dev, 0, config->mram_size);
1538 }
1539