1 /*
2  * Copyright (c) 2022-2023 Vestas Wind Systems A/S
3  * Copyright (c) 2020 Alexander Wachter
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <zephyr/drivers/can.h>
9 #include <zephyr/drivers/can/can_mcan.h>
10 #include <zephyr/drivers/can/transceiver.h>
11 #include <zephyr/kernel.h>
12 #include <zephyr/logging/log.h>
13 #include <zephyr/sys/sys_io.h>
14 #include <zephyr/sys/util.h>
15 
16 LOG_MODULE_REGISTER(can_mcan, CONFIG_CAN_LOG_LEVEL);
17 
18 #define CAN_INIT_TIMEOUT_MS 100
19 
can_mcan_read_reg(const struct device * dev,uint16_t reg,uint32_t * val)20 int can_mcan_read_reg(const struct device *dev, uint16_t reg, uint32_t *val)
21 {
22 	const struct can_mcan_config *config = dev->config;
23 	int err;
24 
25 	err = config->ops->read_reg(dev, reg, val);
26 	if (err != 0) {
27 		LOG_ERR("failed to read reg 0x%03x (err %d)", reg, err);
28 	}
29 
30 	return err;
31 }
32 
can_mcan_write_reg(const struct device * dev,uint16_t reg,uint32_t val)33 int can_mcan_write_reg(const struct device *dev, uint16_t reg, uint32_t val)
34 {
35 	const struct can_mcan_config *config = dev->config;
36 	int err;
37 
38 	err = config->ops->write_reg(dev, reg, val);
39 	if (err != 0) {
40 		LOG_ERR("failed to write reg 0x%03x (err %d)", reg, err);
41 	}
42 
43 	return err;
44 }
45 
can_mcan_exit_sleep_mode(const struct device * dev)46 static int can_mcan_exit_sleep_mode(const struct device *dev)
47 {
48 	struct can_mcan_data *data = dev->data;
49 	uint32_t start_time;
50 	uint32_t cccr;
51 	int err;
52 
53 	k_mutex_lock(&data->lock, K_FOREVER);
54 
55 	err = can_mcan_read_reg(dev, CAN_MCAN_CCCR, &cccr);
56 	if (err != 0) {
57 		goto unlock;
58 	}
59 
60 	cccr &= ~CAN_MCAN_CCCR_CSR;
61 
62 	err = can_mcan_write_reg(dev, CAN_MCAN_CCCR, cccr);
63 	if (err != 0) {
64 		goto unlock;
65 	}
66 
67 	start_time = k_cycle_get_32();
68 
69 	err = can_mcan_read_reg(dev, CAN_MCAN_CCCR, &cccr);
70 	if (err != 0) {
71 		goto unlock;
72 	}
73 
74 	while ((cccr & CAN_MCAN_CCCR_CSA) == CAN_MCAN_CCCR_CSA) {
75 		if (k_cycle_get_32() - start_time > k_ms_to_cyc_ceil32(CAN_INIT_TIMEOUT_MS)) {
76 			cccr |= CAN_MCAN_CCCR_CSR;
77 			err = can_mcan_write_reg(dev, CAN_MCAN_CCCR, cccr);
78 			if (err != 0) {
79 				goto unlock;
80 			}
81 
82 			err = -EAGAIN;
83 			goto unlock;
84 		}
85 
86 		err = can_mcan_read_reg(dev, CAN_MCAN_CCCR, &cccr);
87 		if (err != 0) {
88 			goto unlock;
89 		}
90 	}
91 
92 
93 unlock:
94 	k_mutex_unlock(&data->lock);
95 
96 	return err;
97 }
98 
can_mcan_enter_init_mode(const struct device * dev,k_timeout_t timeout)99 static int can_mcan_enter_init_mode(const struct device *dev, k_timeout_t timeout)
100 {
101 	struct can_mcan_data *data = dev->data;
102 	int64_t start_time;
103 	uint32_t cccr;
104 	int err;
105 
106 	k_mutex_lock(&data->lock, K_FOREVER);
107 
108 	err = can_mcan_read_reg(dev, CAN_MCAN_CCCR, &cccr);
109 	if (err != 0) {
110 		goto unlock;
111 	}
112 
113 	cccr |= CAN_MCAN_CCCR_INIT;
114 
115 	err = can_mcan_write_reg(dev, CAN_MCAN_CCCR, cccr);
116 	if (err != 0) {
117 		goto unlock;
118 	}
119 
120 	start_time = k_uptime_ticks();
121 
122 	err = can_mcan_read_reg(dev, CAN_MCAN_CCCR, &cccr);
123 	if (err != 0) {
124 		goto unlock;
125 	}
126 
127 	while ((cccr & CAN_MCAN_CCCR_INIT) == 0U) {
128 		if (k_uptime_ticks() - start_time > timeout.ticks) {
129 			cccr &= ~CAN_MCAN_CCCR_INIT;
130 			err = can_mcan_write_reg(dev, CAN_MCAN_CCCR, cccr);
131 			if (err != 0) {
132 				goto unlock;
133 			}
134 
135 			err = -EAGAIN;
136 			goto unlock;
137 		}
138 
139 		err = can_mcan_read_reg(dev, CAN_MCAN_CCCR, &cccr);
140 		if (err != 0) {
141 			goto unlock;
142 		}
143 	}
144 
145 unlock:
146 	k_mutex_unlock(&data->lock);
147 
148 	return err;
149 }
150 
can_mcan_leave_init_mode(const struct device * dev,k_timeout_t timeout)151 static int can_mcan_leave_init_mode(const struct device *dev, k_timeout_t timeout)
152 {
153 	struct can_mcan_data *data = dev->data;
154 	int64_t start_time;
155 	uint32_t cccr;
156 	int err;
157 
158 	k_mutex_lock(&data->lock, K_FOREVER);
159 
160 	err = can_mcan_read_reg(dev, CAN_MCAN_CCCR, &cccr);
161 	if (err != 0) {
162 		goto unlock;
163 	}
164 
165 	cccr &= ~CAN_MCAN_CCCR_INIT;
166 
167 	err = can_mcan_write_reg(dev, CAN_MCAN_CCCR, cccr);
168 	if (err != 0) {
169 		goto unlock;
170 	}
171 
172 	start_time = k_uptime_ticks();
173 
174 	err = can_mcan_read_reg(dev, CAN_MCAN_CCCR, &cccr);
175 	if (err != 0) {
176 		goto unlock;
177 	}
178 
179 	while ((cccr & CAN_MCAN_CCCR_INIT) != 0U) {
180 		if (k_uptime_ticks() - start_time > timeout.ticks) {
181 			err = -EAGAIN;
182 			goto unlock;
183 		}
184 
185 		err = can_mcan_read_reg(dev, CAN_MCAN_CCCR, &cccr);
186 		if (err != 0) {
187 			goto unlock;
188 		}
189 	}
190 
191 unlock:
192 	k_mutex_unlock(&data->lock);
193 
194 	return err;
195 }
196 
can_mcan_set_timing(const struct device * dev,const struct can_timing * timing)197 int can_mcan_set_timing(const struct device *dev, const struct can_timing *timing)
198 {
199 	struct can_mcan_data *data = dev->data;
200 	uint32_t nbtp = 0U;
201 	int err;
202 
203 	if (data->started) {
204 		return -EBUSY;
205 	}
206 
207 	__ASSERT_NO_MSG(timing->prop_seg == 0U);
208 	__ASSERT_NO_MSG(timing->phase_seg1 <= 0x100 && timing->phase_seg1 > 1U);
209 	__ASSERT_NO_MSG(timing->phase_seg2 <= 0x80 && timing->phase_seg2 > 1U);
210 	__ASSERT_NO_MSG(timing->prescaler <= 0x200 && timing->prescaler > 0U);
211 	__ASSERT_NO_MSG(timing->sjw == CAN_SJW_NO_CHANGE ||
212 			(timing->sjw <= 0x80 && timing->sjw > 0U));
213 
214 	k_mutex_lock(&data->lock, K_FOREVER);
215 
216 	if (timing->sjw == CAN_SJW_NO_CHANGE) {
217 		err = can_mcan_read_reg(dev, CAN_MCAN_NBTP, &nbtp);
218 		if (err != 0) {
219 			goto unlock;
220 		}
221 
222 		nbtp &= CAN_MCAN_NBTP_NSJW;
223 	} else {
224 		nbtp |= FIELD_PREP(CAN_MCAN_NBTP_NSJW, timing->sjw - 1UL);
225 	}
226 
227 	nbtp |= FIELD_PREP(CAN_MCAN_NBTP_NTSEG1, timing->phase_seg1 - 1UL) |
228 		FIELD_PREP(CAN_MCAN_NBTP_NTSEG2, timing->phase_seg2 - 1UL) |
229 		FIELD_PREP(CAN_MCAN_NBTP_NBRP, timing->prescaler - 1UL);
230 
231 	err = can_mcan_write_reg(dev, CAN_MCAN_NBTP, nbtp);
232 	if (err != 0) {
233 		goto unlock;
234 	}
235 
236 unlock:
237 	k_mutex_unlock(&data->lock);
238 
239 	return err;
240 }
241 
242 #ifdef CONFIG_CAN_FD_MODE
can_mcan_set_timing_data(const struct device * dev,const struct can_timing * timing_data)243 int can_mcan_set_timing_data(const struct device *dev, const struct can_timing *timing_data)
244 {
245 	struct can_mcan_data *data = dev->data;
246 	uint32_t dbtp = 0U;
247 	int err;
248 
249 	if (data->started) {
250 		return -EBUSY;
251 	}
252 
253 	__ASSERT_NO_MSG(timing_data->prop_seg == 0U);
254 	__ASSERT_NO_MSG(timing_data->phase_seg1 <= 0x20 && timing_data->phase_seg1 > 0U);
255 	__ASSERT_NO_MSG(timing_data->phase_seg2 <= 0x10 && timing_data->phase_seg2 > 0U);
256 	__ASSERT_NO_MSG(timing_data->prescaler <= 0x20 && timing_data->prescaler > 0U);
257 	__ASSERT_NO_MSG(timing_data->sjw == CAN_SJW_NO_CHANGE ||
258 			(timing_data->sjw <= 0x10 && timing_data->sjw > 0U));
259 
260 	k_mutex_lock(&data->lock, K_FOREVER);
261 
262 	if (timing_data->sjw == CAN_SJW_NO_CHANGE) {
263 		err = can_mcan_read_reg(dev, CAN_MCAN_DBTP, &dbtp);
264 		if (err != 0) {
265 			goto unlock;
266 		}
267 
268 		dbtp &= CAN_MCAN_DBTP_DSJW;
269 	} else {
270 		dbtp |= FIELD_PREP(CAN_MCAN_DBTP_DSJW, timing_data->sjw - 1UL);
271 	}
272 
273 	dbtp |= FIELD_PREP(CAN_MCAN_DBTP_DTSEG1, timing_data->phase_seg1 - 1UL) |
274 		FIELD_PREP(CAN_MCAN_DBTP_DTSEG2, timing_data->phase_seg2 - 1UL) |
275 		FIELD_PREP(CAN_MCAN_DBTP_DBRP, timing_data->prescaler - 1UL);
276 
277 	err = can_mcan_write_reg(dev, CAN_MCAN_DBTP, dbtp);
278 	if (err != 0) {
279 		goto unlock;
280 	}
281 
282 unlock:
283 	k_mutex_unlock(&data->lock);
284 
285 	return err;
286 }
287 #endif /* CONFIG_CAN_FD_MODE */
288 
can_mcan_get_capabilities(const struct device * dev,can_mode_t * cap)289 int can_mcan_get_capabilities(const struct device *dev, can_mode_t *cap)
290 {
291 	ARG_UNUSED(dev);
292 
293 	*cap = CAN_MODE_NORMAL | CAN_MODE_LOOPBACK | CAN_MODE_LISTENONLY;
294 
295 #if CONFIG_CAN_FD_MODE
296 	*cap |= CAN_MODE_FD;
297 #endif /* CONFIG_CAN_FD_MODE */
298 
299 	return 0;
300 }
301 
can_mcan_start(const struct device * dev)302 int can_mcan_start(const struct device *dev)
303 {
304 	const struct can_mcan_config *config = dev->config;
305 	struct can_mcan_data *data = dev->data;
306 	int err;
307 
308 	if (data->started) {
309 		return -EALREADY;
310 	}
311 
312 	if (config->phy != NULL) {
313 		err = can_transceiver_enable(config->phy);
314 		if (err != 0) {
315 			LOG_ERR("failed to enable CAN transceiver (err %d)", err);
316 			return err;
317 		}
318 	}
319 
320 	err = can_mcan_leave_init_mode(dev, K_MSEC(CAN_INIT_TIMEOUT_MS));
321 	if (err != 0) {
322 		LOG_ERR("failed to leave init mode");
323 
324 		if (config->phy != NULL) {
325 			/* Attempt to disable the CAN transceiver in case of error */
326 			(void)can_transceiver_disable(config->phy);
327 		}
328 
329 		return -EIO;
330 	}
331 
332 	data->started = true;
333 
334 	return 0;
335 }
336 
can_mcan_stop(const struct device * dev)337 int can_mcan_stop(const struct device *dev)
338 {
339 	const struct can_mcan_config *config = dev->config;
340 	const struct can_mcan_callbacks *cbs = config->callbacks;
341 	struct can_mcan_data *data = dev->data;
342 	can_tx_callback_t tx_cb;
343 	uint32_t tx_idx;
344 	int err;
345 
346 	if (!data->started) {
347 		return -EALREADY;
348 	}
349 
350 	/* CAN transmissions are automatically stopped when entering init mode */
351 	err = can_mcan_enter_init_mode(dev, K_MSEC(CAN_INIT_TIMEOUT_MS));
352 	if (err != 0) {
353 		LOG_ERR("Failed to enter init mode");
354 		return -EIO;
355 	}
356 
357 	if (config->phy != NULL) {
358 		err = can_transceiver_disable(config->phy);
359 		if (err != 0) {
360 			LOG_ERR("failed to disable CAN transceiver (err %d)", err);
361 			return err;
362 		}
363 	}
364 
365 	can_mcan_enable_configuration_change(dev);
366 
367 	data->started = false;
368 
369 	for (tx_idx = 0U; tx_idx < cbs->num_tx; tx_idx++) {
370 		tx_cb = cbs->tx[tx_idx].function;
371 
372 		if (tx_cb != NULL) {
373 			cbs->tx[tx_idx].function = NULL;
374 			tx_cb(dev, -ENETDOWN, cbs->tx[tx_idx].user_data);
375 			k_sem_give(&data->tx_sem);
376 		}
377 	}
378 
379 	return 0;
380 }
381 
can_mcan_set_mode(const struct device * dev,can_mode_t mode)382 int can_mcan_set_mode(const struct device *dev, can_mode_t mode)
383 {
384 	struct can_mcan_data *data = dev->data;
385 	uint32_t cccr;
386 	uint32_t test;
387 	int err;
388 
389 #ifdef CONFIG_CAN_FD_MODE
390 	if ((mode & ~(CAN_MODE_LOOPBACK | CAN_MODE_LISTENONLY | CAN_MODE_FD)) != 0U) {
391 		LOG_ERR("unsupported mode: 0x%08x", mode);
392 		return -ENOTSUP;
393 	}
394 #else  /* CONFIG_CAN_FD_MODE */
395 	if ((mode & ~(CAN_MODE_LOOPBACK | CAN_MODE_LISTENONLY)) != 0U) {
396 		LOG_ERR("unsupported mode: 0x%08x", mode);
397 		return -ENOTSUP;
398 	}
399 #endif /* !CONFIG_CAN_FD_MODE */
400 
401 	if (data->started) {
402 		return -EBUSY;
403 	}
404 
405 	k_mutex_lock(&data->lock, K_FOREVER);
406 
407 	err = can_mcan_read_reg(dev, CAN_MCAN_CCCR, &cccr);
408 	if (err != 0) {
409 		goto unlock;
410 	}
411 
412 	err = can_mcan_read_reg(dev, CAN_MCAN_TEST, &test);
413 	if (err != 0) {
414 		goto unlock;
415 	}
416 
417 	if ((mode & CAN_MODE_LOOPBACK) != 0) {
418 		/* Loopback mode */
419 		cccr |= CAN_MCAN_CCCR_TEST;
420 		test |= CAN_MCAN_TEST_LBCK;
421 	} else {
422 		cccr &= ~CAN_MCAN_CCCR_TEST;
423 	}
424 
425 	if ((mode & CAN_MODE_LISTENONLY) != 0) {
426 		/* Bus monitoring mode */
427 		cccr |= CAN_MCAN_CCCR_MON;
428 	} else {
429 		cccr &= ~CAN_MCAN_CCCR_MON;
430 	}
431 
432 #ifdef CONFIG_CAN_FD_MODE
433 	if ((mode & CAN_MODE_FD) != 0) {
434 		cccr |= CAN_MCAN_CCCR_FDOE | CAN_MCAN_CCCR_BRSE;
435 		data->fd = true;
436 	} else {
437 		cccr &= ~(CAN_MCAN_CCCR_FDOE | CAN_MCAN_CCCR_BRSE);
438 		data->fd = false;
439 	}
440 #endif /* CONFIG_CAN_FD_MODE */
441 
442 	err = can_mcan_write_reg(dev, CAN_MCAN_CCCR, cccr);
443 	if (err != 0) {
444 		goto unlock;
445 	}
446 
447 	err = can_mcan_write_reg(dev, CAN_MCAN_TEST, test);
448 	if (err != 0) {
449 		goto unlock;
450 	}
451 
452 unlock:
453 	k_mutex_unlock(&data->lock);
454 
455 	return err;
456 }
457 
can_mcan_state_change_handler(const struct device * dev)458 static void can_mcan_state_change_handler(const struct device *dev)
459 {
460 	struct can_mcan_data *data = dev->data;
461 	const can_state_change_callback_t cb = data->state_change_cb;
462 	void *cb_data = data->state_change_cb_data;
463 	struct can_bus_err_cnt err_cnt;
464 	enum can_state state;
465 
466 	(void)can_mcan_get_state(dev, &state, &err_cnt);
467 
468 	if (cb != NULL) {
469 		cb(dev, state, err_cnt, cb_data);
470 	}
471 }
472 
can_mcan_tx_event_handler(const struct device * dev)473 static void can_mcan_tx_event_handler(const struct device *dev)
474 {
475 	const struct can_mcan_config *config = dev->config;
476 	const struct can_mcan_callbacks *cbs = config->callbacks;
477 	struct can_mcan_data *data = dev->data;
478 	struct can_mcan_tx_event_fifo tx_event;
479 	can_tx_callback_t tx_cb;
480 	uint32_t event_idx;
481 	uint32_t tx_idx;
482 	uint32_t txefs;
483 	int err;
484 
485 	err = can_mcan_read_reg(dev, CAN_MCAN_TXEFS, &txefs);
486 	if (err != 0) {
487 		return;
488 	}
489 
490 	while ((txefs & CAN_MCAN_TXEFS_EFFL) != 0U) {
491 		event_idx = FIELD_GET(CAN_MCAN_TXEFS_EFGI, txefs);
492 		err = can_mcan_read_mram(dev,
493 					 config->mram_offsets[CAN_MCAN_MRAM_CFG_TX_EVENT_FIFO] +
494 					 event_idx * sizeof(struct can_mcan_tx_event_fifo),
495 					 &tx_event,
496 					 sizeof(struct can_mcan_tx_event_fifo));
497 		if (err != 0) {
498 			LOG_ERR("failed to read tx event fifo (err %d)", err);
499 			return;
500 		}
501 
502 		tx_idx = tx_event.mm;
503 
504 		/* Acknowledge TX event */
505 		err = can_mcan_write_reg(dev, CAN_MCAN_TXEFA, event_idx);
506 		if (err != 0) {
507 			return;
508 		}
509 
510 		k_sem_give(&data->tx_sem);
511 
512 		__ASSERT_NO_MSG(tx_idx <= cbs->num_tx);
513 		tx_cb = cbs->tx[tx_idx].function;
514 		cbs->tx[tx_idx].function = NULL;
515 		tx_cb(dev, 0, cbs->tx[tx_idx].user_data);
516 
517 		err = can_mcan_read_reg(dev, CAN_MCAN_TXEFS, &txefs);
518 		if (err != 0) {
519 			return;
520 		}
521 	}
522 }
523 
can_mcan_line_0_isr(const struct device * dev)524 void can_mcan_line_0_isr(const struct device *dev)
525 {
526 	const uint32_t events = CAN_MCAN_IR_BO | CAN_MCAN_IR_EP | CAN_MCAN_IR_EW |
527 				CAN_MCAN_IR_TEFN | CAN_MCAN_IR_TEFL | CAN_MCAN_IR_ARA |
528 				CAN_MCAN_IR_MRAF;
529 	struct can_mcan_data *data = dev->data;
530 	uint32_t ir;
531 	int err;
532 
533 	err = can_mcan_read_reg(dev, CAN_MCAN_IR, &ir);
534 	if (err != 0) {
535 		return;
536 	}
537 
538 	while ((ir & events) != 0U) {
539 		err = can_mcan_write_reg(dev, CAN_MCAN_IR, ir & events);
540 		if (err != 0) {
541 			return;
542 		}
543 
544 		if ((ir & (CAN_MCAN_IR_BO | CAN_MCAN_IR_EP | CAN_MCAN_IR_EW)) != 0U) {
545 			can_mcan_state_change_handler(dev);
546 		}
547 
548 		/* TX event FIFO new entry */
549 		if ((ir & CAN_MCAN_IR_TEFN) != 0U) {
550 			can_mcan_tx_event_handler(dev);
551 		}
552 
553 		if ((ir & CAN_MCAN_IR_TEFL) != 0U) {
554 			LOG_ERR("TX FIFO element lost");
555 			k_sem_give(&data->tx_sem);
556 		}
557 
558 		if ((ir & CAN_MCAN_IR_ARA) != 0U) {
559 			LOG_ERR("Access to reserved address");
560 		}
561 
562 		if (ir & CAN_MCAN_IR_MRAF) {
563 			LOG_ERR("Message RAM access failure");
564 		}
565 
566 		err = can_mcan_read_reg(dev, CAN_MCAN_IR, &ir);
567 		if (err != 0) {
568 			return;
569 		}
570 	}
571 }
572 
can_mcan_get_message(const struct device * dev,uint16_t fifo_offset,uint16_t fifo_status_reg,uint16_t fifo_ack_reg)573 static void can_mcan_get_message(const struct device *dev, uint16_t fifo_offset,
574 				 uint16_t fifo_status_reg, uint16_t fifo_ack_reg)
575 {
576 	const struct can_mcan_config *config = dev->config;
577 	const struct can_mcan_callbacks *cbs = config->callbacks;
578 	struct can_mcan_rx_fifo_hdr hdr;
579 	struct can_frame frame = {0};
580 	can_rx_callback_t cb;
581 	void *user_data;
582 	uint8_t flags;
583 	uint32_t get_idx;
584 	uint32_t filt_idx;
585 	int data_length;
586 	uint32_t fifo_status;
587 	int err;
588 
589 	err = can_mcan_read_reg(dev, fifo_status_reg, &fifo_status);
590 	if (err != 0) {
591 		return;
592 	}
593 
594 	while (FIELD_GET(CAN_MCAN_RXF0S_F0FL, fifo_status) != 0U) {
595 		get_idx = FIELD_GET(CAN_MCAN_RXF0S_F0GI, fifo_status);
596 
597 		err = can_mcan_read_mram(dev, fifo_offset + get_idx *
598 					 sizeof(struct can_mcan_rx_fifo) +
599 					 offsetof(struct can_mcan_rx_fifo, hdr),
600 					 &hdr, sizeof(struct can_mcan_rx_fifo_hdr));
601 		if (err != 0) {
602 			LOG_ERR("failed to read Rx FIFO header (err %d)", err);
603 			return;
604 		}
605 
606 		frame.dlc = hdr.dlc;
607 
608 		if (hdr.rtr != 0) {
609 			frame.flags |= CAN_FRAME_RTR;
610 		}
611 
612 		if (hdr.fdf != 0) {
613 			frame.flags |= CAN_FRAME_FDF;
614 		}
615 
616 		if (hdr.brs != 0) {
617 			frame.flags |= CAN_FRAME_BRS;
618 		}
619 
620 		if (hdr.esi != 0) {
621 			frame.flags |= CAN_FRAME_ESI;
622 		}
623 
624 #ifdef CONFIG_CAN_RX_TIMESTAMP
625 		frame.timestamp = hdr.rxts;
626 #endif /* CONFIG_CAN_RX_TIMESTAMP */
627 
628 		filt_idx = hdr.fidx;
629 
630 		if (hdr.xtd != 0) {
631 			frame.id = hdr.ext_id;
632 			frame.flags |= CAN_FRAME_IDE;
633 			flags = cbs->ext[filt_idx].flags;
634 		} else {
635 			frame.id = hdr.std_id;
636 			flags = cbs->std[filt_idx].flags;
637 		}
638 
639 		if (((frame.flags & CAN_FRAME_RTR) == 0U && (flags & CAN_FILTER_DATA) == 0U) ||
640 		    ((frame.flags & CAN_FRAME_RTR) != 0U && (flags & CAN_FILTER_RTR) == 0U)) {
641 			/* RTR bit does not match filter, drop frame */
642 			err = can_mcan_write_reg(dev, fifo_ack_reg, get_idx);
643 			if (err != 0) {
644 				return;
645 			}
646 			goto ack;
647 		}
648 
649 		if (((frame.flags & CAN_FRAME_FDF) != 0U && (flags & CAN_FILTER_FDF) == 0U) ||
650 		    ((frame.flags & CAN_FRAME_FDF) == 0U && (flags & CAN_FILTER_FDF) != 0U)) {
651 			/* FDF bit does not match filter, drop frame */
652 			err = can_mcan_write_reg(dev, fifo_ack_reg, get_idx);
653 			if (err != 0) {
654 				return;
655 			}
656 			goto ack;
657 		}
658 
659 		data_length = can_dlc_to_bytes(frame.dlc);
660 		if (data_length <= sizeof(frame.data)) {
661 			err = can_mcan_read_mram(dev, fifo_offset + get_idx *
662 						 sizeof(struct can_mcan_rx_fifo) +
663 						 offsetof(struct can_mcan_rx_fifo, data_32),
664 						 &frame.data_32,
665 						 ROUND_UP(data_length, sizeof(uint32_t)));
666 			if (err != 0) {
667 				LOG_ERR("failed to read Rx FIFO data (err %d)", err);
668 				return;
669 			}
670 
671 			if ((frame.flags & CAN_FRAME_IDE) != 0) {
672 				LOG_DBG("Frame on filter %d, ID: 0x%x",
673 					filt_idx + cbs->num_std, frame.id);
674 				__ASSERT_NO_MSG(filt_idx <= cbs->num_ext);
675 				cb = cbs->ext[filt_idx].function;
676 				user_data = cbs->ext[filt_idx].user_data;
677 			} else {
678 				LOG_DBG("Frame on filter %d, ID: 0x%x", filt_idx, frame.id);
679 				__ASSERT_NO_MSG(filt_idx <= cbs->num_std);
680 				cb = cbs->std[filt_idx].function;
681 				user_data = cbs->std[filt_idx].user_data;
682 			}
683 
684 			if (cb) {
685 				cb(dev, &frame, user_data);
686 			} else {
687 				LOG_DBG("cb missing");
688 			}
689 		} else {
690 			LOG_ERR("Frame is too big");
691 		}
692 
693 ack:
694 		err = can_mcan_write_reg(dev, fifo_ack_reg, get_idx);
695 		if (err != 0) {
696 			return;
697 		}
698 
699 		err = can_mcan_read_reg(dev, fifo_status_reg, &fifo_status);
700 		if (err != 0) {
701 			return;
702 		}
703 	}
704 }
705 
can_mcan_line_1_isr(const struct device * dev)706 void can_mcan_line_1_isr(const struct device *dev)
707 {
708 	const struct can_mcan_config *config = dev->config;
709 	const uint32_t events =
710 		CAN_MCAN_IR_RF0N | CAN_MCAN_IR_RF1N | CAN_MCAN_IR_RF0L | CAN_MCAN_IR_RF1L;
711 	uint32_t ir;
712 	int err;
713 
714 	err = can_mcan_read_reg(dev, CAN_MCAN_IR, &ir);
715 	if (err != 0) {
716 		return;
717 	}
718 
719 	while ((ir & events) != 0U) {
720 		err = can_mcan_write_reg(dev, CAN_MCAN_IR, events & ir);
721 		if (err != 0) {
722 			return;
723 		}
724 
725 		if ((ir & CAN_MCAN_IR_RF0N) != 0U) {
726 			LOG_DBG("RX FIFO0 INT");
727 			can_mcan_get_message(dev, config->mram_offsets[CAN_MCAN_MRAM_CFG_RX_FIFO0],
728 					     CAN_MCAN_RXF0S, CAN_MCAN_RXF0A);
729 		}
730 
731 		if ((ir & CAN_MCAN_IR_RF1N) != 0U) {
732 			LOG_DBG("RX FIFO1 INT");
733 			can_mcan_get_message(dev, config->mram_offsets[CAN_MCAN_MRAM_CFG_RX_FIFO1],
734 					     CAN_MCAN_RXF1S, CAN_MCAN_RXF1A);
735 		}
736 
737 		if ((ir & CAN_MCAN_IR_RF0L) != 0U) {
738 			LOG_ERR("Message lost on FIFO0");
739 		}
740 
741 		if ((ir & CAN_MCAN_IR_RF1L) != 0U) {
742 			LOG_ERR("Message lost on FIFO1");
743 		}
744 
745 		err = can_mcan_read_reg(dev, CAN_MCAN_IR, &ir);
746 		if (err != 0) {
747 			return;
748 		}
749 	}
750 }
751 
can_mcan_get_state(const struct device * dev,enum can_state * state,struct can_bus_err_cnt * err_cnt)752 int can_mcan_get_state(const struct device *dev, enum can_state *state,
753 		       struct can_bus_err_cnt *err_cnt)
754 {
755 	struct can_mcan_data *data = dev->data;
756 	uint32_t reg;
757 	int err;
758 
759 	if (state != NULL) {
760 		err = can_mcan_read_reg(dev, CAN_MCAN_PSR, &reg);
761 		if (err != 0) {
762 			return err;
763 		}
764 
765 		if (!data->started) {
766 			*state = CAN_STATE_STOPPED;
767 		} else if ((reg & CAN_MCAN_PSR_BO) != 0U) {
768 			*state = CAN_STATE_BUS_OFF;
769 		} else if ((reg & CAN_MCAN_PSR_EP) != 0U) {
770 			*state = CAN_STATE_ERROR_PASSIVE;
771 		} else if ((reg & CAN_MCAN_PSR_EW) != 0U) {
772 			*state = CAN_STATE_ERROR_WARNING;
773 		} else {
774 			*state = CAN_STATE_ERROR_ACTIVE;
775 		}
776 	}
777 
778 	if (err_cnt != NULL) {
779 		err = can_mcan_read_reg(dev, CAN_MCAN_ECR, &reg);
780 		if (err != 0) {
781 			return err;
782 		}
783 
784 		err_cnt->tx_err_cnt = FIELD_GET(CAN_MCAN_ECR_TEC, reg);
785 		err_cnt->rx_err_cnt = FIELD_GET(CAN_MCAN_ECR_REC, reg);
786 	}
787 
788 	return 0;
789 }
790 
791 #ifndef CONFIG_CAN_AUTO_BUS_OFF_RECOVERY
can_mcan_recover(const struct device * dev,k_timeout_t timeout)792 int can_mcan_recover(const struct device *dev, k_timeout_t timeout)
793 {
794 	struct can_mcan_data *data = dev->data;
795 
796 	if (!data->started) {
797 		return -ENETDOWN;
798 	}
799 
800 	return can_mcan_leave_init_mode(dev, timeout);
801 }
802 #endif /* CONFIG_CAN_AUTO_BUS_OFF_RECOVERY */
803 
can_mcan_send(const struct device * dev,const struct can_frame * frame,k_timeout_t timeout,can_tx_callback_t callback,void * user_data)804 int can_mcan_send(const struct device *dev, const struct can_frame *frame, k_timeout_t timeout,
805 		  can_tx_callback_t callback, void *user_data)
806 {
807 	const struct can_mcan_config *config = dev->config;
808 	const struct can_mcan_callbacks *cbs = config->callbacks;
809 	struct can_mcan_data *data = dev->data;
810 	size_t data_length = can_dlc_to_bytes(frame->dlc);
811 	struct can_mcan_tx_buffer_hdr tx_hdr = {
812 		.rtr = (frame->flags & CAN_FRAME_RTR) != 0U ? 1U : 0U,
813 		.xtd = (frame->flags & CAN_FRAME_IDE) != 0U ? 1U : 0U,
814 		.esi = 0U,
815 		.dlc = frame->dlc,
816 #ifdef CONFIG_CAN_FD_MODE
817 		.fdf = (frame->flags & CAN_FRAME_FDF) != 0U ? 1U : 0U,
818 		.brs = (frame->flags & CAN_FRAME_BRS) != 0U ? 1U : 0U,
819 #else  /* CONFIG_CAN_FD_MODE */
820 		.fdf = 0U,
821 		.brs = 0U,
822 #endif /* !CONFIG_CAN_FD_MODE */
823 		.efc = 1U,
824 	};
825 	uint32_t put_idx;
826 	uint32_t reg;
827 	int err;
828 
829 	LOG_DBG("Sending %d bytes. Id: 0x%x, ID type: %s %s %s %s", data_length, frame->id,
830 		(frame->flags & CAN_FRAME_IDE) != 0U ? "extended" : "standard",
831 		(frame->flags & CAN_FRAME_RTR) != 0U ? "RTR" : "",
832 		(frame->flags & CAN_FRAME_FDF) != 0U ? "FD frame" : "",
833 		(frame->flags & CAN_FRAME_BRS) != 0U ? "BRS" : "");
834 
835 	__ASSERT_NO_MSG(callback != NULL);
836 
837 #ifdef CONFIG_CAN_FD_MODE
838 	if ((frame->flags & ~(CAN_FRAME_IDE | CAN_FRAME_RTR | CAN_FRAME_FDF | CAN_FRAME_BRS)) !=
839 	    0) {
840 		LOG_ERR("unsupported CAN frame flags 0x%02x", frame->flags);
841 		return -ENOTSUP;
842 	}
843 
844 	if (!data->fd && ((frame->flags & (CAN_FRAME_FDF | CAN_FRAME_BRS)) != 0U)) {
845 		LOG_ERR("CAN-FD format not supported in non-FD mode");
846 		return -ENOTSUP;
847 	}
848 #else  /* CONFIG_CAN_FD_MODE */
849 	if ((frame->flags & ~(CAN_FRAME_IDE | CAN_FRAME_RTR)) != 0U) {
850 		LOG_ERR("unsupported CAN frame flags 0x%02x", frame->flags);
851 		return -ENOTSUP;
852 	}
853 #endif /* !CONFIG_CAN_FD_MODE */
854 
855 	if (data_length > sizeof(frame->data)) {
856 		LOG_ERR("data length (%zu) > max frame data length (%zu)", data_length,
857 			sizeof(frame->data));
858 		return -EINVAL;
859 	}
860 
861 	if ((frame->flags & CAN_FRAME_FDF) != 0U) {
862 		if (frame->dlc > CANFD_MAX_DLC) {
863 			LOG_ERR("DLC of %d for CAN-FD format frame", frame->dlc);
864 			return -EINVAL;
865 		}
866 	} else {
867 		if (frame->dlc > CAN_MAX_DLC) {
868 			LOG_ERR("DLC of %d for non-FD format frame", frame->dlc);
869 			return -EINVAL;
870 		}
871 	}
872 
873 	if (!data->started) {
874 		return -ENETDOWN;
875 	}
876 
877 	err = can_mcan_read_reg(dev, CAN_MCAN_PSR, &reg);
878 	if (err != 0) {
879 		return err;
880 	}
881 
882 	if ((reg & CAN_MCAN_PSR_BO) != 0U) {
883 		return -ENETUNREACH;
884 	}
885 
886 	err = k_sem_take(&data->tx_sem, timeout);
887 	if (err != 0) {
888 		return -EAGAIN;
889 	}
890 
891 	err = can_mcan_read_reg(dev, CAN_MCAN_TXFQS, &reg);
892 	if (err != 0) {
893 		return err;
894 	}
895 
896 	__ASSERT_NO_MSG((reg & CAN_MCAN_TXFQS_TFQF) != CAN_MCAN_TXFQS_TFQF);
897 
898 	k_mutex_lock(&data->tx_mtx, K_FOREVER);
899 
900 	put_idx = FIELD_GET(CAN_MCAN_TXFQS_TFQPI, reg);
901 	tx_hdr.mm = put_idx;
902 
903 	if ((frame->flags & CAN_FRAME_IDE) != 0U) {
904 		tx_hdr.ext_id = frame->id;
905 	} else {
906 		tx_hdr.std_id = frame->id & CAN_STD_ID_MASK;
907 	}
908 
909 	err = can_mcan_write_mram(dev, config->mram_offsets[CAN_MCAN_MRAM_CFG_TX_BUFFER] + put_idx *
910 				  sizeof(struct can_mcan_tx_buffer) +
911 				  offsetof(struct can_mcan_tx_buffer, hdr),
912 				  &tx_hdr, sizeof(struct can_mcan_tx_buffer_hdr));
913 	if (err != 0) {
914 		LOG_ERR("failed to write Tx Buffer header (err %d)", err);
915 		return err;
916 	}
917 
918 	err = can_mcan_write_mram(dev, config->mram_offsets[CAN_MCAN_MRAM_CFG_TX_BUFFER] + put_idx *
919 				  sizeof(struct can_mcan_tx_buffer) +
920 				  offsetof(struct can_mcan_tx_buffer, data_32),
921 				  &frame->data_32, ROUND_UP(data_length, sizeof(uint32_t)));
922 	if (err != 0) {
923 		LOG_ERR("failed to write Tx Buffer data (err %d)", err);
924 		return err;
925 	}
926 
927 	__ASSERT_NO_MSG(put_idx <= cbs->num_tx);
928 	cbs->tx[put_idx].function = callback;
929 	cbs->tx[put_idx].user_data = user_data;
930 
931 	err = can_mcan_write_reg(dev, CAN_MCAN_TXBAR, BIT(put_idx));
932 	if (err != 0) {
933 		goto unlock;
934 	}
935 
936 unlock:
937 	k_mutex_unlock(&data->tx_mtx);
938 
939 	return err;
940 }
941 
can_mcan_get_max_filters(const struct device * dev,bool ide)942 int can_mcan_get_max_filters(const struct device *dev, bool ide)
943 {
944 	const struct can_mcan_config *config = dev->config;
945 	const struct can_mcan_callbacks *cbs = config->callbacks;
946 
947 	if (ide) {
948 		return cbs->num_ext;
949 	} else {
950 		return cbs->num_std;
951 	}
952 }
953 
954 /* Use masked configuration only for simplicity. If someone needs more than
955  * 28 standard filters, dual mode needs to be implemented.
956  * Dual mode gets tricky, because we can only activate both filters.
957  * If one of the IDs is not used anymore, we would need to mark it as unused.
958  */
can_mcan_add_rx_filter_std(const struct device * dev,can_rx_callback_t callback,void * user_data,const struct can_filter * filter)959 int can_mcan_add_rx_filter_std(const struct device *dev, can_rx_callback_t callback,
960 			       void *user_data, const struct can_filter *filter)
961 {
962 	const struct can_mcan_config *config = dev->config;
963 	const struct can_mcan_callbacks *cbs = config->callbacks;
964 	struct can_mcan_data *data = dev->data;
965 	struct can_mcan_std_filter filter_element = {
966 		.sfid1 = filter->id,
967 		.sfid2 = filter->mask,
968 		.sft = CAN_MCAN_SFT_CLASSIC
969 	};
970 	int filter_id = -ENOSPC;
971 	int err;
972 	int i;
973 
974 	k_mutex_lock(&data->lock, K_FOREVER);
975 
976 	for (i = 0; i < cbs->num_std; i++) {
977 		if (cbs->std[i].function == NULL) {
978 			filter_id = i;
979 			break;
980 		}
981 	}
982 
983 	if (filter_id == -ENOSPC) {
984 		LOG_WRN("No free standard id filter left");
985 		k_mutex_unlock(&data->lock);
986 		return -ENOSPC;
987 	}
988 
989 	/* TODO proper fifo balancing */
990 	filter_element.sfec = filter_id & 0x01 ? CAN_MCAN_XFEC_FIFO1 : CAN_MCAN_XFEC_FIFO0;
991 
992 	err = can_mcan_write_mram(dev, config->mram_offsets[CAN_MCAN_MRAM_CFG_STD_FILTER] +
993 				  filter_id * sizeof(struct can_mcan_std_filter),
994 				  &filter_element, sizeof(filter_element));
995 	if (err != 0) {
996 		LOG_ERR("failed to write std filter element (err %d)", err);
997 		return err;
998 	}
999 
1000 	k_mutex_unlock(&data->lock);
1001 
1002 	LOG_DBG("Attached std filter at %d", filter_id);
1003 
1004 	__ASSERT_NO_MSG(filter_id <= cbs->num_std);
1005 	cbs->std[filter_id].function = callback;
1006 	cbs->std[filter_id].user_data = user_data;
1007 	cbs->std[filter_id].flags = filter->flags;
1008 
1009 	return filter_id;
1010 }
1011 
can_mcan_add_rx_filter_ext(const struct device * dev,can_rx_callback_t callback,void * user_data,const struct can_filter * filter)1012 static int can_mcan_add_rx_filter_ext(const struct device *dev, can_rx_callback_t callback,
1013 				      void *user_data, const struct can_filter *filter)
1014 {
1015 	const struct can_mcan_config *config = dev->config;
1016 	const struct can_mcan_callbacks *cbs = config->callbacks;
1017 	struct can_mcan_data *data = dev->data;
1018 	struct can_mcan_ext_filter filter_element = {
1019 		.efid2 = filter->mask,
1020 		.efid1 = filter->id,
1021 		.eft = CAN_MCAN_EFT_CLASSIC
1022 	};
1023 	int filter_id = -ENOSPC;
1024 	int err;
1025 	int i;
1026 
1027 	k_mutex_lock(&data->lock, K_FOREVER);
1028 
1029 	for (i = 0; i < cbs->num_ext; i++) {
1030 		if (cbs->ext[i].function == NULL) {
1031 			filter_id = i;
1032 			break;
1033 		}
1034 	}
1035 
1036 	if (filter_id == -ENOSPC) {
1037 		LOG_WRN("No free extended id filter left");
1038 		k_mutex_unlock(&data->lock);
1039 		return -ENOSPC;
1040 	}
1041 
1042 	/* TODO proper fifo balancing */
1043 	filter_element.efec = filter_id & 0x01 ? CAN_MCAN_XFEC_FIFO1 : CAN_MCAN_XFEC_FIFO0;
1044 
1045 	err = can_mcan_write_mram(dev, config->mram_offsets[CAN_MCAN_MRAM_CFG_EXT_FILTER] +
1046 				  filter_id * sizeof(struct can_mcan_ext_filter),
1047 				  &filter_element, sizeof(filter_element));
1048 	if (err != 0) {
1049 		LOG_ERR("failed to write std filter element (err %d)", err);
1050 		return err;
1051 	}
1052 
1053 	k_mutex_unlock(&data->lock);
1054 
1055 	LOG_DBG("Attached ext filter at %d", filter_id);
1056 
1057 	__ASSERT_NO_MSG(filter_id <= cbs->num_ext);
1058 	cbs->ext[filter_id].function = callback;
1059 	cbs->ext[filter_id].user_data = user_data;
1060 	cbs->ext[filter_id].flags = filter->flags;
1061 
1062 	return filter_id;
1063 }
1064 
can_mcan_add_rx_filter(const struct device * dev,can_rx_callback_t callback,void * user_data,const struct can_filter * filter)1065 int can_mcan_add_rx_filter(const struct device *dev, can_rx_callback_t callback, void *user_data,
1066 			   const struct can_filter *filter)
1067 {
1068 	const struct can_mcan_config *config = dev->config;
1069 	const struct can_mcan_callbacks *cbs = config->callbacks;
1070 	int filter_id;
1071 
1072 	if (callback == NULL) {
1073 		return -EINVAL;
1074 	}
1075 
1076 #ifdef CONFIG_CAN_FD_MODE
1077 	if ((filter->flags &
1078 	     ~(CAN_FILTER_IDE | CAN_FILTER_DATA | CAN_FILTER_RTR | CAN_FILTER_FDF)) != 0U) {
1079 #else  /* CONFIG_CAN_FD_MODE */
1080 	if ((filter->flags & ~(CAN_FILTER_IDE | CAN_FILTER_DATA | CAN_FILTER_RTR)) != 0U) {
1081 #endif /* !CONFIG_CAN_FD_MODE */
1082 		LOG_ERR("unsupported CAN filter flags 0x%02x", filter->flags);
1083 		return -ENOTSUP;
1084 	}
1085 
1086 	if ((filter->flags & CAN_FILTER_IDE) != 0U) {
1087 		filter_id = can_mcan_add_rx_filter_ext(dev, callback, user_data, filter);
1088 		if (filter_id >= 0) {
1089 			filter_id += cbs->num_std;
1090 		}
1091 	} else {
1092 		filter_id = can_mcan_add_rx_filter_std(dev, callback, user_data, filter);
1093 	}
1094 
1095 	return filter_id;
1096 }
1097 
1098 void can_mcan_remove_rx_filter(const struct device *dev, int filter_id)
1099 {
1100 	const struct can_mcan_config *config = dev->config;
1101 	const struct can_mcan_callbacks *cbs = config->callbacks;
1102 	struct can_mcan_data *data = dev->data;
1103 	int err;
1104 
1105 	k_mutex_lock(&data->lock, K_FOREVER);
1106 
1107 	if (filter_id >= cbs->num_std) {
1108 		filter_id -= cbs->num_std;
1109 		if (filter_id >= cbs->num_ext) {
1110 			LOG_ERR("Wrong filter id");
1111 			k_mutex_unlock(&data->lock);
1112 			return;
1113 		}
1114 
1115 		cbs->ext[filter_id].function = NULL;
1116 		cbs->ext[filter_id].user_data = NULL;
1117 
1118 		err = can_mcan_clear_mram(dev, config->mram_offsets[CAN_MCAN_MRAM_CFG_EXT_FILTER] +
1119 					filter_id * sizeof(struct can_mcan_ext_filter),
1120 					sizeof(struct can_mcan_ext_filter));
1121 		if (err != 0) {
1122 			LOG_ERR("failed to clear ext filter element (err %d)", err);
1123 		}
1124 	} else {
1125 		cbs->std[filter_id].function = NULL;
1126 		cbs->std[filter_id].user_data = NULL;
1127 
1128 		err = can_mcan_clear_mram(dev, config->mram_offsets[CAN_MCAN_MRAM_CFG_STD_FILTER] +
1129 					filter_id * sizeof(struct can_mcan_std_filter),
1130 					sizeof(struct can_mcan_std_filter));
1131 		if (err != 0) {
1132 			LOG_ERR("failed to clear std filter element (err %d)", err);
1133 		}
1134 	}
1135 
1136 	k_mutex_unlock(&data->lock);
1137 }
1138 
1139 void can_mcan_set_state_change_callback(const struct device *dev,
1140 					can_state_change_callback_t callback, void *user_data)
1141 {
1142 	struct can_mcan_data *data = dev->data;
1143 
1144 	data->state_change_cb = callback;
1145 	data->state_change_cb_data = user_data;
1146 }
1147 
1148 int can_mcan_get_max_bitrate(const struct device *dev, uint32_t *max_bitrate)
1149 {
1150 	const struct can_mcan_config *config = dev->config;
1151 
1152 	*max_bitrate = config->max_bitrate;
1153 
1154 	return 0;
1155 }
1156 
1157 /* helper function allowing mcan drivers without access to private mcan
1158  * definitions to set CCCR_CCE, which might be needed to disable write
1159  * protection for some registers.
1160  */
1161 void can_mcan_enable_configuration_change(const struct device *dev)
1162 {
1163 	struct can_mcan_data *data = dev->data;
1164 	uint32_t cccr;
1165 	int err;
1166 
1167 	k_mutex_lock(&data->lock, K_FOREVER);
1168 
1169 	err = can_mcan_read_reg(dev, CAN_MCAN_CCCR, &cccr);
1170 	if (err != 0) {
1171 		goto unlock;
1172 	}
1173 
1174 	cccr |= CAN_MCAN_CCCR_CCE;
1175 
1176 	err = can_mcan_write_reg(dev, CAN_MCAN_CCCR, cccr);
1177 	if (err != 0) {
1178 		goto unlock;
1179 	}
1180 
1181 unlock:
1182 	k_mutex_unlock(&data->lock);
1183 }
1184 
1185 int can_mcan_configure_mram(const struct device *dev, uintptr_t mrba, uintptr_t mram)
1186 {
1187 	const struct can_mcan_config *config = dev->config;
1188 	uint32_t addr;
1189 	uint32_t reg;
1190 	int err;
1191 
1192 	err = can_mcan_exit_sleep_mode(dev);
1193 	if (err != 0) {
1194 		LOG_ERR("Failed to exit sleep mode");
1195 		return -EIO;
1196 	}
1197 
1198 	err = can_mcan_enter_init_mode(dev, K_MSEC(CAN_INIT_TIMEOUT_MS));
1199 	if (err != 0) {
1200 		LOG_ERR("Failed to enter init mode");
1201 		return -EIO;
1202 	}
1203 
1204 	can_mcan_enable_configuration_change(dev);
1205 
1206 	addr = mram - mrba + config->mram_offsets[CAN_MCAN_MRAM_CFG_STD_FILTER];
1207 	reg = (addr & CAN_MCAN_SIDFC_FLSSA) | FIELD_PREP(CAN_MCAN_SIDFC_LSS,
1208 		config->mram_elements[CAN_MCAN_MRAM_CFG_STD_FILTER]);
1209 	err = can_mcan_write_reg(dev, CAN_MCAN_SIDFC, reg);
1210 	if (err != 0) {
1211 		return err;
1212 	}
1213 
1214 	addr = mram - mrba + config->mram_offsets[CAN_MCAN_MRAM_CFG_EXT_FILTER];
1215 	reg = (addr & CAN_MCAN_XIDFC_FLESA) | FIELD_PREP(CAN_MCAN_XIDFC_LSS,
1216 		config->mram_elements[CAN_MCAN_MRAM_CFG_EXT_FILTER]);
1217 	err = can_mcan_write_reg(dev, CAN_MCAN_XIDFC, reg);
1218 	if (err != 0) {
1219 		return err;
1220 	}
1221 
1222 	addr = mram - mrba + config->mram_offsets[CAN_MCAN_MRAM_CFG_RX_FIFO0];
1223 	reg = (addr & CAN_MCAN_RXF0C_F0SA) | FIELD_PREP(CAN_MCAN_RXF0C_F0S,
1224 		config->mram_elements[CAN_MCAN_MRAM_CFG_RX_FIFO0]);
1225 	err = can_mcan_write_reg(dev, CAN_MCAN_RXF0C, reg);
1226 	if (err != 0) {
1227 		return err;
1228 	}
1229 
1230 	addr = mram - mrba + config->mram_offsets[CAN_MCAN_MRAM_CFG_RX_FIFO1];
1231 	reg = (addr & CAN_MCAN_RXF1C_F1SA) | FIELD_PREP(CAN_MCAN_RXF1C_F1S,
1232 		config->mram_elements[CAN_MCAN_MRAM_CFG_RX_FIFO1]);
1233 	err = can_mcan_write_reg(dev, CAN_MCAN_RXF1C, reg);
1234 	if (err != 0) {
1235 		return err;
1236 	}
1237 
1238 	addr = mram - mrba + config->mram_offsets[CAN_MCAN_MRAM_CFG_RX_BUFFER];
1239 	reg = (addr & CAN_MCAN_RXBC_RBSA);
1240 	err = can_mcan_write_reg(dev, CAN_MCAN_RXBC, reg);
1241 	if (err != 0) {
1242 		return err;
1243 	}
1244 
1245 	addr = mram - mrba + config->mram_offsets[CAN_MCAN_MRAM_CFG_TX_EVENT_FIFO];
1246 	reg = (addr & CAN_MCAN_TXEFC_EFSA) | FIELD_PREP(CAN_MCAN_TXEFC_EFS,
1247 		config->mram_elements[CAN_MCAN_MRAM_CFG_TX_EVENT_FIFO]);
1248 	err = can_mcan_write_reg(dev, CAN_MCAN_TXEFC, reg);
1249 	if (err != 0) {
1250 		return err;
1251 	}
1252 
1253 	addr = mram - mrba + config->mram_offsets[CAN_MCAN_MRAM_CFG_TX_BUFFER];
1254 	reg = (addr & CAN_MCAN_TXBC_TBSA) | FIELD_PREP(CAN_MCAN_TXBC_TFQS,
1255 		config->mram_elements[CAN_MCAN_MRAM_CFG_TX_BUFFER]) | CAN_MCAN_TXBC_TFQM;
1256 	err = can_mcan_write_reg(dev, CAN_MCAN_TXBC, reg);
1257 	if (err != 0) {
1258 		return err;
1259 	}
1260 
1261 	/* 64 byte Tx Buffer data fields size */
1262 	reg = CAN_MCAN_TXESC_TBDS;
1263 	err = can_mcan_write_reg(dev, CAN_MCAN_TXESC, reg);
1264 	if (err != 0) {
1265 		return err;
1266 	}
1267 
1268 	/* 64 byte Rx Buffer/FIFO1/FIFO0 data fields size */
1269 	reg = CAN_MCAN_RXESC_RBDS | CAN_MCAN_RXESC_F1DS | CAN_MCAN_RXESC_F0DS;
1270 	err = can_mcan_write_reg(dev, CAN_MCAN_RXESC, reg);
1271 	if (err != 0) {
1272 		return err;
1273 	}
1274 
1275 	return 0;
1276 }
1277 
1278 int can_mcan_init(const struct device *dev)
1279 {
1280 	const struct can_mcan_config *config = dev->config;
1281 	const struct can_mcan_callbacks *cbs = config->callbacks;
1282 	struct can_mcan_data *data = dev->data;
1283 	struct can_timing timing;
1284 #ifdef CONFIG_CAN_FD_MODE
1285 	struct can_timing timing_data;
1286 #endif /* CONFIG_CAN_FD_MODE */
1287 	uint32_t reg;
1288 	int err;
1289 
1290 	__ASSERT_NO_MSG(config->ops->read_reg != NULL);
1291 	__ASSERT_NO_MSG(config->ops->write_reg != NULL);
1292 	__ASSERT_NO_MSG(config->ops->read_mram != NULL);
1293 	__ASSERT_NO_MSG(config->ops->write_mram != NULL);
1294 	__ASSERT_NO_MSG(config->ops->clear_mram != NULL);
1295 	__ASSERT_NO_MSG(config->callbacks != NULL);
1296 
1297 	__ASSERT_NO_MSG(cbs->num_tx <= config->mram_elements[CAN_MCAN_MRAM_CFG_TX_BUFFER]);
1298 	__ASSERT_NO_MSG(cbs->num_std <= config->mram_elements[CAN_MCAN_MRAM_CFG_STD_FILTER]);
1299 	__ASSERT_NO_MSG(cbs->num_ext <= config->mram_elements[CAN_MCAN_MRAM_CFG_EXT_FILTER]);
1300 
1301 	k_mutex_init(&data->lock);
1302 	k_mutex_init(&data->tx_mtx);
1303 	k_sem_init(&data->tx_sem, cbs->num_tx, cbs->num_tx);
1304 
1305 	if (config->phy != NULL) {
1306 		if (!device_is_ready(config->phy)) {
1307 			LOG_ERR("CAN transceiver not ready");
1308 			return -ENODEV;
1309 		}
1310 	}
1311 
1312 	err = can_mcan_exit_sleep_mode(dev);
1313 	if (err != 0) {
1314 		LOG_ERR("Failed to exit sleep mode");
1315 		return -EIO;
1316 	}
1317 
1318 	err = can_mcan_enter_init_mode(dev, K_MSEC(CAN_INIT_TIMEOUT_MS));
1319 	if (err != 0) {
1320 		LOG_ERR("Failed to enter init mode");
1321 		return -EIO;
1322 	}
1323 
1324 	can_mcan_enable_configuration_change(dev);
1325 
1326 #if CONFIG_CAN_LOG_LEVEL >= LOG_LEVEL_DBG
1327 	err = can_mcan_read_reg(dev, CAN_MCAN_CREL, &reg);
1328 	if (err != 0) {
1329 		return -EIO;
1330 	}
1331 
1332 	LOG_DBG("IP rel: %lu.%lu.%lu %02lu.%lu.%lu", FIELD_GET(CAN_MCAN_CREL_REL, reg),
1333 		FIELD_GET(CAN_MCAN_CREL_STEP, reg), FIELD_GET(CAN_MCAN_CREL_SUBSTEP, reg),
1334 		FIELD_GET(CAN_MCAN_CREL_YEAR, reg), FIELD_GET(CAN_MCAN_CREL_MON, reg),
1335 		FIELD_GET(CAN_MCAN_CREL_DAY, reg));
1336 #endif /* CONFIG_CAN_LOG_LEVEL >= LOG_LEVEL_DBG */
1337 
1338 	err = can_mcan_read_reg(dev, CAN_MCAN_CCCR, &reg);
1339 	if (err != 0) {
1340 		return err;
1341 	}
1342 
1343 	reg &= ~(CAN_MCAN_CCCR_FDOE | CAN_MCAN_CCCR_BRSE | CAN_MCAN_CCCR_TEST | CAN_MCAN_CCCR_MON |
1344 		 CAN_MCAN_CCCR_ASM);
1345 
1346 	err = can_mcan_write_reg(dev, CAN_MCAN_CCCR, reg);
1347 	if (err != 0) {
1348 		return err;
1349 	}
1350 
1351 	err = can_mcan_read_reg(dev, CAN_MCAN_TEST, &reg);
1352 	if (err != 0) {
1353 		return err;
1354 	}
1355 
1356 	reg &= ~(CAN_MCAN_TEST_LBCK);
1357 
1358 	err = can_mcan_write_reg(dev, CAN_MCAN_TEST, reg);
1359 	if (err != 0) {
1360 		return err;
1361 	}
1362 
1363 #if defined(CONFIG_CAN_DELAY_COMP) && defined(CONFIG_CAN_FD_MODE)
1364 	err = can_mcan_read_reg(dev, CAN_MCAN_DBTP, &reg);
1365 	if (err != 0) {
1366 		return err;
1367 	}
1368 
1369 	reg |= CAN_MCAN_DBTP_TDC;
1370 
1371 	err = can_mcan_write_reg(dev, CAN_MCAN_DBTP, reg);
1372 	if (err != 0) {
1373 		return err;
1374 	}
1375 
1376 	err = can_mcan_read_reg(dev, CAN_MCAN_TDCR, &reg);
1377 	if (err != 0) {
1378 		return err;
1379 	}
1380 
1381 	reg |= FIELD_PREP(CAN_MCAN_TDCR_TDCO, config->tx_delay_comp_offset);
1382 
1383 	err = can_mcan_write_reg(dev, CAN_MCAN_TDCR, reg);
1384 	if (err != 0) {
1385 		return err;
1386 	}
1387 #endif /* defined(CONFIG_CAN_DELAY_COMP) && defined(CONFIG_CAN_FD_MODE) */
1388 
1389 	err = can_mcan_read_reg(dev, CAN_MCAN_GFC, &reg);
1390 	if (err != 0) {
1391 		return err;
1392 	}
1393 
1394 	reg |= FIELD_PREP(CAN_MCAN_GFC_ANFE, 0x2) | FIELD_PREP(CAN_MCAN_GFC_ANFS, 0x2);
1395 
1396 	err = can_mcan_write_reg(dev, CAN_MCAN_GFC, reg);
1397 	if (err != 0) {
1398 		return err;
1399 	}
1400 
1401 	if (config->sample_point) {
1402 		err = can_calc_timing(dev, &timing, config->bus_speed, config->sample_point);
1403 		if (err == -EINVAL) {
1404 			LOG_ERR("Can't find timing for given param");
1405 			return -EIO;
1406 		}
1407 		LOG_DBG("Presc: %d, TS1: %d, TS2: %d", timing.prescaler, timing.phase_seg1,
1408 			timing.phase_seg2);
1409 		LOG_DBG("Sample-point err : %d", err);
1410 	} else if (config->prop_ts1) {
1411 		timing.prop_seg = 0U;
1412 		timing.phase_seg1 = config->prop_ts1;
1413 		timing.phase_seg2 = config->ts2;
1414 		err = can_calc_prescaler(dev, &timing, config->bus_speed);
1415 		if (err != 0) {
1416 			LOG_WRN("Bitrate error: %d", err);
1417 		}
1418 	}
1419 #ifdef CONFIG_CAN_FD_MODE
1420 	if (config->sample_point_data) {
1421 		err = can_calc_timing_data(dev, &timing_data, config->bus_speed_data,
1422 					   config->sample_point_data);
1423 		if (err == -EINVAL) {
1424 			LOG_ERR("Can't find timing for given dataphase param");
1425 			return -EIO;
1426 		}
1427 
1428 		LOG_DBG("Sample-point err data phase: %d", err);
1429 	} else if (config->prop_ts1_data) {
1430 		timing_data.prop_seg = 0U;
1431 		timing_data.phase_seg1 = config->prop_ts1_data;
1432 		timing_data.phase_seg2 = config->ts2_data;
1433 		err = can_calc_prescaler(dev, &timing_data, config->bus_speed_data);
1434 		if (err != 0) {
1435 			LOG_WRN("Dataphase bitrate error: %d", err);
1436 		}
1437 	}
1438 #endif /* CONFIG_CAN_FD_MODE */
1439 
1440 	timing.sjw = config->sjw;
1441 	can_mcan_set_timing(dev, &timing);
1442 
1443 #ifdef CONFIG_CAN_FD_MODE
1444 	timing_data.sjw = config->sjw_data;
1445 	can_mcan_set_timing_data(dev, &timing_data);
1446 #endif /* CONFIG_CAN_FD_MODE */
1447 
1448 	reg = CAN_MCAN_IE_BOE | CAN_MCAN_IE_EWE | CAN_MCAN_IE_EPE | CAN_MCAN_IE_MRAFE |
1449 	      CAN_MCAN_IE_TEFLE | CAN_MCAN_IE_TEFNE | CAN_MCAN_IE_RF0NE | CAN_MCAN_IE_RF1NE |
1450 	      CAN_MCAN_IE_RF0LE | CAN_MCAN_IE_RF1LE;
1451 
1452 	err = can_mcan_write_reg(dev, CAN_MCAN_IE, reg);
1453 	if (err != 0) {
1454 		return err;
1455 	}
1456 
1457 	reg = CAN_MCAN_ILS_RF0NL | CAN_MCAN_ILS_RF1NL;
1458 	err = can_mcan_write_reg(dev, CAN_MCAN_ILS, reg);
1459 	if (err != 0) {
1460 		return err;
1461 	}
1462 
1463 	reg = CAN_MCAN_ILE_EINT0 | CAN_MCAN_ILE_EINT1;
1464 	err = can_mcan_write_reg(dev, CAN_MCAN_ILE, reg);
1465 	if (err != 0) {
1466 		return err;
1467 	}
1468 
1469 	/* Interrupt on every TX fifo element*/
1470 	reg = CAN_MCAN_TXBTIE_TIE;
1471 	err = can_mcan_write_reg(dev, CAN_MCAN_TXBTIE, reg);
1472 	if (err != 0) {
1473 		return err;
1474 	}
1475 
1476 	return can_mcan_clear_mram(dev, 0, config->mram_size);
1477 }
1478