1 /*
2  * Copyright (c) 2020 Abram Early
3  * Copyright (c) 2023 Andriy Gelman
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #define DT_DRV_COMPAT microchip_mcp251xfd
9 
10 #include "can_mcp251xfd.h"
11 
12 #include <zephyr/device.h>
13 #include <zephyr/drivers/can/transceiver.h>
14 #include <zephyr/drivers/clock_control.h>
15 #include <zephyr/kernel.h>
16 #include <zephyr/sys/byteorder.h>
17 #include <zephyr/sys/crc.h>
18 
19 #include <zephyr/logging/log.h>
20 LOG_MODULE_REGISTER(can_mcp251xfd, CONFIG_CAN_LOG_LEVEL);
21 
mcp251xfd_canframe_to_txobj(const struct can_frame * src,int mailbox_idx,struct mcp251xfd_txobj * dst)22 static void mcp251xfd_canframe_to_txobj(const struct can_frame *src, int mailbox_idx,
23 					struct mcp251xfd_txobj *dst)
24 {
25 	memset(dst, 0, sizeof(*dst));
26 
27 	if ((src->flags & CAN_FRAME_IDE) != 0) {
28 		dst->id = FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, src->id >> 18);
29 		dst->id |= FIELD_PREP(MCP251XFD_OBJ_ID_EID_MASK, src->id);
30 
31 		dst->flags |= MCP251XFD_OBJ_FLAGS_IDE;
32 	} else {
33 		dst->id = FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, src->id);
34 	}
35 
36 	if ((src->flags & CAN_FRAME_BRS) != 0) {
37 		dst->flags |= MCP251XFD_OBJ_FLAGS_BRS;
38 	}
39 
40 	dst->flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_DLC_MASK, src->dlc);
41 #if defined(CONFIG_CAN_FD_MODE)
42 	if ((src->flags & CAN_FRAME_FDF) != 0) {
43 		dst->flags |= MCP251XFD_OBJ_FLAGS_FDF;
44 	}
45 #endif
46 	dst->flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_SEQ_MASK, mailbox_idx);
47 
48 	dst->id = sys_cpu_to_le32(dst->id);
49 	dst->flags = sys_cpu_to_le32(dst->flags);
50 
51 	if ((src->flags & CAN_FRAME_RTR) != 0) {
52 		dst->flags |= MCP251XFD_OBJ_FLAGS_RTR;
53 	} else {
54 		memcpy(dst->data, src->data, MIN(can_dlc_to_bytes(src->dlc), CAN_MAX_DLEN));
55 	}
56 }
57 
mcp251xfd_read_reg(const struct device * dev,uint16_t addr,int len)58 static void *mcp251xfd_read_reg(const struct device *dev, uint16_t addr, int len)
59 {
60 	const struct mcp251xfd_config *dev_cfg = dev->config;
61 	struct mcp251xfd_data *dev_data = dev->data;
62 	struct mcp251xfd_spi_data *spi_data = &dev_data->spi_data;
63 	uint16_t spi_cmd;
64 	int ret;
65 
66 	spi_cmd = sys_cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_READ | addr);
67 	memcpy(&spi_data->header[1], &spi_cmd, sizeof(spi_cmd));
68 
69 	struct spi_buf tx_buf = {.buf = &spi_data->header[1], .len = MCP251XFD_SPI_CMD_LEN + len};
70 	struct spi_buf rx_buf = {.buf = &spi_data->header[1], .len = MCP251XFD_SPI_CMD_LEN + len};
71 
72 	const struct spi_buf_set tx = {.buffers = &tx_buf, .count = 1};
73 	const struct spi_buf_set rx = {.buffers = &rx_buf, .count = 1};
74 
75 	ret = spi_transceive_dt(&dev_cfg->bus, &tx, &rx);
76 	if (ret < 0) {
77 		return NULL;
78 	}
79 
80 	return &spi_data->buf[0];
81 }
82 
mcp251xfd_read_crc(const struct device * dev,uint16_t addr,int len)83 static void *mcp251xfd_read_crc(const struct device *dev, uint16_t addr, int len)
84 {
85 	const struct mcp251xfd_config *dev_cfg = dev->config;
86 	struct mcp251xfd_data *dev_data = dev->data;
87 	struct mcp251xfd_spi_data *spi_data = &dev_data->spi_data;
88 	int num_retries = CONFIG_CAN_MCP251XFD_READ_CRC_RETRIES + 1;
89 	int ret;
90 
91 	while (num_retries-- > 0) {
92 		uint16_t crc_in, crc, spi_cmd;
93 
94 		struct spi_buf tx_buf = {.buf = &spi_data->header[0],
95 					 .len = MCP251XFD_SPI_CMD_LEN +
96 						MCP251XFD_SPI_LEN_FIELD_LEN + len +
97 						MCP251XFD_SPI_CRC_LEN};
98 
99 		struct spi_buf rx_buf = {.buf = &spi_data->header[0],
100 					 .len = MCP251XFD_SPI_CMD_LEN +
101 						MCP251XFD_SPI_LEN_FIELD_LEN + len +
102 						MCP251XFD_SPI_CRC_LEN};
103 
104 		const struct spi_buf_set tx = {.buffers = &tx_buf, .count = 1};
105 		const struct spi_buf_set rx = {.buffers = &rx_buf, .count = 1};
106 
107 		spi_cmd = sys_cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_READ_CRC | addr);
108 		memcpy(&spi_data->header[0], &spi_cmd, sizeof(spi_cmd));
109 		spi_data->header[2] = len;
110 
111 		/*
112 		 * Evaluate initial crc over spi_cmd and length as these value will change after
113 		 * spi transaction is finished.
114 		 */
115 		crc_in = crc16(MCP251XFD_CRC_POLY, MCP251XFD_CRC_SEED,
116 			       (uint8_t *)(&spi_data->header[0]),
117 			       MCP251XFD_SPI_CMD_LEN + MCP251XFD_SPI_LEN_FIELD_LEN);
118 
119 		ret = spi_transceive_dt(&dev_cfg->bus, &tx, &rx);
120 		if (ret < 0) {
121 			continue;
122 		}
123 
124 		/* Continue crc calculation over the data field and the crc field */
125 		crc = crc16(MCP251XFD_CRC_POLY, crc_in, &spi_data->buf[0],
126 			    len + MCP251XFD_SPI_CRC_LEN);
127 		if (crc == 0) {
128 			return &spi_data->buf[0];
129 		}
130 	}
131 
132 	return NULL;
133 }
134 
mcp251xfd_get_spi_buf_ptr(const struct device * dev)135 static inline void *mcp251xfd_get_spi_buf_ptr(const struct device *dev)
136 {
137 	struct mcp251xfd_data *dev_data = dev->data;
138 	struct mcp251xfd_spi_data *spi_data = &dev_data->spi_data;
139 
140 	return &spi_data->buf[0];
141 }
142 
mcp251xfd_write(const struct device * dev,uint16_t addr,int len)143 static int mcp251xfd_write(const struct device *dev, uint16_t addr, int len)
144 {
145 	const struct mcp251xfd_config *dev_cfg = dev->config;
146 	struct mcp251xfd_data *dev_data = dev->data;
147 	struct mcp251xfd_spi_data *spi_data = &dev_data->spi_data;
148 	uint16_t spi_cmd;
149 
150 	struct spi_buf tx_buf = {.buf = &spi_data->header[1], .len = MCP251XFD_SPI_CMD_LEN + len};
151 	const struct spi_buf_set tx = {.buffers = &tx_buf, .count = 1};
152 
153 	spi_cmd = sys_cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_WRITE | addr);
154 	memcpy(&spi_data->header[1], &spi_cmd, sizeof(spi_cmd));
155 
156 	return spi_write_dt(&dev_cfg->bus, &tx);
157 }
158 
mcp251xfd_fifo_write(const struct device * dev,int mailbox_idx,const struct can_frame * msg)159 static int mcp251xfd_fifo_write(const struct device *dev, int mailbox_idx,
160 				const struct can_frame *msg)
161 {
162 	uint32_t *regs;
163 	struct mcp251xfd_txobj *txobj;
164 	uint8_t *reg_byte;
165 	uint16_t address;
166 	int tx_len;
167 	int ret;
168 
169 	/* read fifosta and ua at the same time */
170 	regs = mcp251xfd_read_crc(dev, MCP251XFD_REG_TXQSTA, MCP251XFD_REG_SIZE * 2);
171 	if (!regs) {
172 		LOG_ERR("Failed to read 8 bytes from REG_TXQSTA");
173 		return -EINVAL;
174 	}
175 
176 	/* check if fifo is full */
177 	if (!(regs[0] & MCP251XFD_REG_TXQSTA_TXQNIF)) {
178 		return -ENOMEM;
179 	}
180 
181 	address = MCP251XFD_RAM_START_ADDR + regs[1];
182 
183 	txobj = mcp251xfd_get_spi_buf_ptr(dev);
184 	mcp251xfd_canframe_to_txobj(msg, mailbox_idx, txobj);
185 
186 	tx_len = MCP251XFD_OBJ_HEADER_SIZE;
187 	if ((msg->flags & CAN_FRAME_RTR) == 0) {
188 		tx_len += ROUND_UP(can_dlc_to_bytes(msg->dlc), MCP251XFD_RAM_ALIGNMENT);
189 	}
190 
191 	ret = mcp251xfd_write(dev, address, tx_len);
192 	if (ret < 0) {
193 		return ret;
194 	}
195 
196 	reg_byte = mcp251xfd_get_spi_buf_ptr(dev);
197 	*reg_byte = MCP251XFD_UINT32_FLAG_TO_BYTE_MASK(MCP251XFD_REG_TXQCON_UINC |
198 						       MCP251XFD_REG_TXQCON_TXREQ);
199 
200 	return mcp251xfd_write(dev, MCP251XFD_REG_TXQCON + 1, 1);
201 }
202 
mcp251xfd_rxobj_to_canframe(struct mcp251xfd_rxobj * src,struct can_frame * dst)203 static void mcp251xfd_rxobj_to_canframe(struct mcp251xfd_rxobj *src, struct can_frame *dst)
204 {
205 	memset(dst, 0, sizeof(*dst));
206 
207 	src->id = sys_le32_to_cpu(src->id);
208 	src->flags = sys_le32_to_cpu(src->flags);
209 
210 	if ((src->flags & MCP251XFD_OBJ_FLAGS_IDE) != 0) {
211 		dst->id = FIELD_GET(MCP251XFD_OBJ_ID_EID_MASK, src->id);
212 		dst->id |= FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK, src->id) << 18;
213 		dst->flags |= CAN_FRAME_IDE;
214 	} else {
215 		dst->id = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK, src->id);
216 	}
217 
218 	if ((src->flags & MCP251XFD_OBJ_FLAGS_BRS) != 0) {
219 		dst->flags |= CAN_FRAME_BRS;
220 	}
221 
222 #if defined(CONFIG_CAN_FD_MODE)
223 	if ((src->flags & MCP251XFD_OBJ_FLAGS_FDF) != 0) {
224 		dst->flags |= CAN_FRAME_FDF;
225 	}
226 #endif
227 
228 	dst->dlc = FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC_MASK, src->flags);
229 
230 #if defined(CONFIG_CAN_RX_TIMESTAMP)
231 	dst->timestamp = sys_le32_to_cpu(src->timestamp);
232 #endif
233 
234 	if ((src->flags & MCP251XFD_OBJ_FLAGS_RTR) != 0) {
235 		dst->flags |= CAN_FRAME_RTR;
236 	} else {
237 		memcpy(dst->data, src->data, MIN(can_dlc_to_bytes(dst->dlc), CAN_MAX_DLEN));
238 	}
239 }
240 
mcp251xfd_get_mode_internal(const struct device * dev,uint8_t * mode)241 static int mcp251xfd_get_mode_internal(const struct device *dev, uint8_t *mode)
242 {
243 	uint8_t *reg_byte;
244 	uint32_t mask = MCP251XFD_UINT32_FLAG_TO_BYTE_MASK(MCP251XFD_REG_CON_OPMOD_MASK);
245 
246 	reg_byte = mcp251xfd_read_crc(dev, MCP251XFD_REG_CON_B2, 1);
247 	if (!reg_byte) {
248 		return -EINVAL;
249 	}
250 
251 	*mode = FIELD_GET(mask, *reg_byte);
252 
253 	return 0;
254 }
255 
mcp251xfd_reg_check_value_wtimeout(const struct device * dev,uint16_t addr,uint32_t value,uint32_t mask,uint32_t timeout_usec,int retries,bool allow_yield)256 static int mcp251xfd_reg_check_value_wtimeout(const struct device *dev, uint16_t addr,
257 					      uint32_t value, uint32_t mask,
258 					      uint32_t timeout_usec, int retries, bool allow_yield)
259 {
260 	uint32_t *reg;
261 	uint32_t delay = timeout_usec / retries;
262 
263 	for (;;) {
264 		reg = mcp251xfd_read_crc(dev, addr, MCP251XFD_REG_SIZE);
265 		if (!reg) {
266 			return -EINVAL;
267 		}
268 
269 		*reg = sys_le32_to_cpu(*reg);
270 
271 		if ((*reg & mask) == value) {
272 			return 0;
273 		}
274 
275 		if (--retries < 0) {
276 			LOG_ERR("Timeout validing 0x%x", addr);
277 			return -EIO;
278 		}
279 
280 		if (allow_yield) {
281 			k_sleep(K_USEC(delay));
282 		} else {
283 			k_busy_wait(delay);
284 		}
285 	}
286 	return 0;
287 }
288 
mcp251xfd_set_tdc(const struct device * dev,bool is_enabled)289 static int mcp251xfd_set_tdc(const struct device *dev, bool is_enabled)
290 {
291 	uint32_t *reg;
292 	uint32_t tmp;
293 	struct mcp251xfd_data *dev_data = dev->data;
294 
295 	reg = mcp251xfd_get_spi_buf_ptr(dev);
296 
297 	if (is_enabled) {
298 		tmp = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK, MCP251XFD_REG_TDC_TDCMOD_AUTO);
299 		tmp |= FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, dev_data->tdco);
300 	} else {
301 		tmp = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK, MCP251XFD_REG_TDC_TDCMOD_DISABLED);
302 	}
303 
304 	*reg = sys_cpu_to_le32(tmp);
305 
306 	return mcp251xfd_write(dev, MCP251XFD_REG_TDC, MCP251XFD_REG_SIZE);
307 }
308 
mcp251xfd_set_mode_internal(const struct device * dev,uint8_t requested_mode)309 static int mcp251xfd_set_mode_internal(const struct device *dev, uint8_t requested_mode)
310 {
311 	struct mcp251xfd_data *dev_data = dev->data;
312 	uint32_t *reg;
313 	uint32_t opmod, reg_con;
314 	int ret = 0;
315 
316 	k_mutex_lock(&dev_data->mutex, K_FOREVER);
317 
318 	reg = mcp251xfd_read_crc(dev, MCP251XFD_REG_CON, MCP251XFD_REG_SIZE);
319 	if (!reg) {
320 		ret = -EINVAL;
321 		goto done;
322 	}
323 
324 	reg_con = sys_le32_to_cpu(*reg);
325 
326 	opmod = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, reg_con);
327 	if (opmod == requested_mode) {
328 		goto done;
329 	}
330 
331 #if defined(CONFIG_CAN_FD_MODE)
332 	if (dev_data->current_mcp251xfd_mode == MCP251XFD_REG_CON_MODE_CONFIG) {
333 		if (requested_mode ==  MCP251XFD_REG_CON_MODE_CAN2_0 ||
334 		    requested_mode ==  MCP251XFD_REG_CON_MODE_EXT_LOOPBACK ||
335 		    requested_mode == MCP251XFD_REG_CON_MODE_INT_LOOPBACK) {
336 			ret = mcp251xfd_set_tdc(dev, false);
337 		} else if (requested_mode == MCP251XFD_REG_CON_MODE_MIXED) {
338 			ret = mcp251xfd_set_tdc(dev, true);
339 		}
340 
341 		if (ret < 0) {
342 			goto done;
343 		}
344 	}
345 #endif
346 
347 	reg_con &= ~MCP251XFD_REG_CON_REQOP_MASK;
348 	reg_con |= FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK, requested_mode);
349 
350 	*reg = sys_cpu_to_le32(reg_con);
351 
352 	ret = mcp251xfd_write(dev, MCP251XFD_REG_CON, MCP251XFD_REG_SIZE);
353 	if (ret < 0) {
354 		LOG_ERR("Failed to write REG_CON register [%d]", MCP251XFD_REG_CON);
355 		goto done;
356 	}
357 
358 	ret = mcp251xfd_reg_check_value_wtimeout(
359 		dev, MCP251XFD_REG_CON, FIELD_PREP(MCP251XFD_REG_CON_OPMOD_MASK, requested_mode),
360 		MCP251XFD_REG_CON_OPMOD_MASK, MCP251XFD_MODE_CHANGE_TIMEOUT_USEC,
361 		MCP251XFD_MODE_CHANGE_RETRIES, true);
362 done:
363 	k_mutex_unlock(&dev_data->mutex);
364 	return ret;
365 }
366 
mcp251xfd_set_mode(const struct device * dev,can_mode_t mode)367 static int mcp251xfd_set_mode(const struct device *dev, can_mode_t mode)
368 {
369 	struct mcp251xfd_data *dev_data = dev->data;
370 
371 	if (dev_data->common.started) {
372 		return -EBUSY;
373 	}
374 
375 	/* todo: Add CAN_MODE_ONE_SHOT support */
376 	if ((mode & (CAN_MODE_3_SAMPLES | CAN_MODE_ONE_SHOT)) != 0) {
377 		return -ENOTSUP;
378 	}
379 
380 	if (mode == CAN_MODE_NORMAL) {
381 		dev_data->next_mcp251xfd_mode = MCP251XFD_REG_CON_MODE_CAN2_0;
382 	}
383 
384 	if ((mode & CAN_MODE_FD) != 0) {
385 #if defined(CONFIG_CAN_FD_MODE)
386 		dev_data->next_mcp251xfd_mode = MCP251XFD_REG_CON_MODE_MIXED;
387 #else
388 		return -ENOTSUP;
389 #endif
390 	}
391 
392 	if ((mode & CAN_MODE_LISTENONLY) != 0) {
393 		dev_data->next_mcp251xfd_mode = MCP251XFD_REG_CON_MODE_LISTENONLY;
394 	}
395 
396 	if ((mode & CAN_MODE_LOOPBACK) != 0) {
397 		dev_data->next_mcp251xfd_mode = MCP251XFD_REG_CON_MODE_EXT_LOOPBACK;
398 	}
399 
400 	dev_data->common.mode = mode;
401 
402 	return 0;
403 }
404 
mcp251xfd_set_timing(const struct device * dev,const struct can_timing * timing)405 static int mcp251xfd_set_timing(const struct device *dev, const struct can_timing *timing)
406 {
407 	struct mcp251xfd_data *dev_data = dev->data;
408 	uint32_t *reg;
409 	uint32_t tmp;
410 	int ret;
411 
412 	if (!timing) {
413 		return -EINVAL;
414 	}
415 
416 	if (dev_data->common.started) {
417 		return -EBUSY;
418 	}
419 
420 	k_mutex_lock(&dev_data->mutex, K_FOREVER);
421 
422 	reg = mcp251xfd_get_spi_buf_ptr(dev);
423 	tmp = FIELD_PREP(MCP251XFD_REG_NBTCFG_BRP_MASK, timing->prescaler - 1);
424 	tmp |= FIELD_PREP(MCP251XFD_REG_NBTCFG_TSEG1_MASK,
425 			   timing->prop_seg + timing->phase_seg1 - 1);
426 	tmp |= FIELD_PREP(MCP251XFD_REG_NBTCFG_TSEG2_MASK, timing->phase_seg2 - 1);
427 	tmp |= FIELD_PREP(MCP251XFD_REG_NBTCFG_SJW_MASK, timing->sjw - 1);
428 	*reg = tmp;
429 
430 	ret = mcp251xfd_write(dev, MCP251XFD_REG_NBTCFG, MCP251XFD_REG_SIZE);
431 	if (ret < 0) {
432 		LOG_ERR("Failed to write NBTCFG register [%d]", ret);
433 	}
434 
435 	k_mutex_unlock(&dev_data->mutex);
436 
437 	return ret;
438 }
439 
440 
441 #if defined(CONFIG_CAN_FD_MODE)
mcp251xfd_set_timing_data(const struct device * dev,const struct can_timing * timing)442 static int mcp251xfd_set_timing_data(const struct device *dev, const struct can_timing *timing)
443 {
444 	struct mcp251xfd_data *dev_data = dev->data;
445 	uint32_t *reg;
446 	uint32_t tmp;
447 	int ret;
448 
449 	if (!timing) {
450 		return -EINVAL;
451 	}
452 
453 	if (dev_data->common.started) {
454 		return -EBUSY;
455 	}
456 
457 	k_mutex_lock(&dev_data->mutex, K_FOREVER);
458 
459 	reg = mcp251xfd_get_spi_buf_ptr(dev);
460 
461 	tmp = FIELD_PREP(MCP251XFD_REG_DBTCFG_BRP_MASK, timing->prescaler - 1);
462 	tmp |= FIELD_PREP(MCP251XFD_REG_DBTCFG_TSEG1_MASK,
463 			  timing->prop_seg + timing->phase_seg1 - 1);
464 	tmp |= FIELD_PREP(MCP251XFD_REG_DBTCFG_TSEG2_MASK, timing->phase_seg2 - 1);
465 	tmp |= FIELD_PREP(MCP251XFD_REG_DBTCFG_SJW_MASK, timing->sjw - 1);
466 
467 	*reg = sys_cpu_to_le32(tmp);
468 
469 	/* actual TDCO minimum is -64 but driver implementation only sets >= 0 values */
470 	dev_data->tdco = CAN_CALC_TDCO(timing, 0U, MCP251XFD_REG_TDC_TDCO_MAX);
471 
472 	ret = mcp251xfd_write(dev, MCP251XFD_REG_DBTCFG, MCP251XFD_REG_SIZE);
473 	if (ret < 0) {
474 		LOG_ERR("Failed to write DBTCFG register [%d]", ret);
475 	}
476 
477 	k_mutex_unlock(&dev_data->mutex);
478 
479 	return ret;
480 }
481 #endif
482 
mcp251xfd_send(const struct device * dev,const struct can_frame * msg,k_timeout_t timeout,can_tx_callback_t callback,void * callback_arg)483 static int mcp251xfd_send(const struct device *dev, const struct can_frame *msg,
484 			  k_timeout_t timeout, can_tx_callback_t callback, void *callback_arg)
485 {
486 	struct mcp251xfd_data *dev_data = dev->data;
487 	uint8_t mailbox_idx;
488 	int ret = 0;
489 
490 	LOG_DBG("Sending %d bytes. Id: 0x%x, ID type: %s %s %s %s", can_dlc_to_bytes(msg->dlc),
491 		msg->id, msg->flags & CAN_FRAME_IDE ? "extended" : "standard",
492 		msg->flags & CAN_FRAME_RTR ? "RTR" : "",
493 		msg->flags & CAN_FRAME_FDF ? "FD frame" : "",
494 		msg->flags & CAN_FRAME_BRS ? "BRS" : "");
495 
496 	if (!dev_data->common.started) {
497 		return -ENETDOWN;
498 	}
499 
500 	if (dev_data->state == CAN_STATE_BUS_OFF) {
501 		return -ENETUNREACH;
502 	}
503 
504 	if ((msg->flags & CAN_FRAME_FDF) == 0 && msg->dlc > CAN_MAX_DLC) {
505 		LOG_ERR("DLC of %d without fd flag set.", msg->dlc);
506 		return -EINVAL;
507 	}
508 
509 	if ((msg->flags & CAN_FRAME_FDF) && !(dev_data->common.mode & CAN_MODE_FD)) {
510 		return -ENOTSUP;
511 	}
512 
513 	if (k_sem_take(&dev_data->tx_sem, timeout) != 0) {
514 		return -EAGAIN;
515 	}
516 
517 	k_mutex_lock(&dev_data->mutex, K_FOREVER);
518 	for (mailbox_idx = 0; mailbox_idx < MCP251XFD_TX_QUEUE_ITEMS; mailbox_idx++) {
519 		if ((BIT(mailbox_idx) & dev_data->mailbox_usage) == 0) {
520 			dev_data->mailbox_usage |= BIT(mailbox_idx);
521 			break;
522 		}
523 	}
524 
525 	if (mailbox_idx >= MCP251XFD_TX_QUEUE_ITEMS) {
526 		k_sem_give(&dev_data->tx_sem);
527 		ret = -EIO;
528 		goto done;
529 	}
530 
531 	dev_data->mailbox[mailbox_idx].cb = callback;
532 	dev_data->mailbox[mailbox_idx].cb_arg = callback_arg;
533 
534 	ret = mcp251xfd_fifo_write(dev, mailbox_idx, msg);
535 
536 	if (ret < 0) {
537 		dev_data->mailbox_usage &= ~BIT(mailbox_idx);
538 		dev_data->mailbox[mailbox_idx].cb = NULL;
539 		k_sem_give(&dev_data->tx_sem);
540 	}
541 
542 done:
543 	k_mutex_unlock(&dev_data->mutex);
544 	return ret;
545 }
546 
mcp251xfd_add_rx_filter(const struct device * dev,can_rx_callback_t rx_cb,void * cb_arg,const struct can_filter * filter)547 static int mcp251xfd_add_rx_filter(const struct device *dev, can_rx_callback_t rx_cb, void *cb_arg,
548 				   const struct can_filter *filter)
549 {
550 	struct mcp251xfd_data *dev_data = dev->data;
551 	uint32_t *reg;
552 	uint32_t tmp;
553 	uint8_t *reg_byte;
554 	int filter_idx;
555 	int ret;
556 
557 	k_mutex_lock(&dev_data->mutex, K_FOREVER);
558 
559 	for (filter_idx = 0; filter_idx < CONFIG_CAN_MAX_FILTER ; filter_idx++) {
560 		if ((BIT(filter_idx) & dev_data->filter_usage) == 0) {
561 			break;
562 		}
563 	}
564 
565 	if (filter_idx >= CONFIG_CAN_MAX_FILTER) {
566 		filter_idx = -ENOSPC;
567 		goto done;
568 	}
569 
570 	reg = mcp251xfd_get_spi_buf_ptr(dev);
571 
572 	if ((filter->flags & CAN_FILTER_IDE) != 0) {
573 		tmp = FIELD_PREP(MCP251XFD_REG_FLTOBJ_SID_MASK, filter->id >> 18);
574 		tmp |= FIELD_PREP(MCP251XFD_REG_FLTOBJ_EID_MASK, filter->id);
575 		tmp |= MCP251XFD_REG_FLTOBJ_EXIDE;
576 	} else {
577 		tmp = FIELD_PREP(MCP251XFD_REG_FLTOBJ_SID_MASK, filter->id);
578 	}
579 
580 	*reg = sys_cpu_to_le32(tmp);
581 	ret = mcp251xfd_write(dev, MCP251XFD_REG_FLTOBJ(filter_idx), MCP251XFD_REG_SIZE);
582 	if (ret < 0) {
583 		LOG_ERR("Failed to write FLTOBJ register [%d]", ret);
584 		goto done;
585 	}
586 
587 	reg = mcp251xfd_get_spi_buf_ptr(dev);
588 	if ((filter->flags & CAN_FILTER_IDE) != 0) {
589 		tmp = FIELD_PREP(MCP251XFD_REG_MASK_MSID_MASK, filter->mask >> 18);
590 		tmp |= FIELD_PREP(MCP251XFD_REG_MASK_MEID_MASK, filter->mask);
591 	} else {
592 		tmp = FIELD_PREP(MCP251XFD_REG_MASK_MSID_MASK, filter->mask);
593 	}
594 	tmp |= MCP251XFD_REG_MASK_MIDE;
595 
596 	*reg = sys_cpu_to_le32(tmp);
597 
598 	ret = mcp251xfd_write(dev, MCP251XFD_REG_FLTMASK(filter_idx), MCP251XFD_REG_SIZE);
599 	if (ret < 0) {
600 		LOG_ERR("Failed to write FLTMASK register [%d]", ret);
601 		goto done;
602 	}
603 
604 	reg_byte = mcp251xfd_get_spi_buf_ptr(dev);
605 	*reg_byte = MCP251XFD_REG_BYTE_FLTCON_FLTEN;
606 	*reg_byte |= FIELD_PREP(MCP251XFD_REG_BYTE_FLTCON_FBP_MASK, MCP251XFD_RX_FIFO_IDX);
607 
608 	ret = mcp251xfd_write(dev, MCP251XFD_REG_BYTE_FLTCON(filter_idx), 1);
609 	if (ret < 0) {
610 		LOG_ERR("Failed to write FLTCON register [%d]", ret);
611 		goto done;
612 	}
613 
614 	dev_data->filter_usage |= BIT(filter_idx);
615 	dev_data->filter[filter_idx] = *filter;
616 	dev_data->rx_cb[filter_idx] = rx_cb;
617 	dev_data->cb_arg[filter_idx] = cb_arg;
618 
619 done:
620 	k_mutex_unlock(&dev_data->mutex);
621 
622 	return filter_idx;
623 }
624 
mcp251xfd_remove_rx_filter(const struct device * dev,int filter_idx)625 static void mcp251xfd_remove_rx_filter(const struct device *dev, int filter_idx)
626 {
627 	struct mcp251xfd_data *dev_data = dev->data;
628 	uint8_t *reg_byte;
629 	uint32_t *reg;
630 	int ret;
631 
632 	if (filter_idx < 0 || filter_idx >= CONFIG_CAN_MAX_FILTER) {
633 		LOG_ERR("Filter ID %d out of bounds", filter_idx);
634 		return;
635 	}
636 
637 	k_mutex_lock(&dev_data->mutex, K_FOREVER);
638 
639 	reg_byte = mcp251xfd_get_spi_buf_ptr(dev);
640 	*reg_byte = 0;
641 
642 	ret = mcp251xfd_write(dev, MCP251XFD_REG_BYTE_FLTCON(filter_idx), 1);
643 	if (ret < 0) {
644 		LOG_ERR("Failed to write FLTCON register [%d]", ret);
645 		goto done;
646 	}
647 
648 	dev_data->filter_usage &= ~BIT(filter_idx);
649 
650 	reg = mcp251xfd_get_spi_buf_ptr(dev);
651 	reg[0] = 0;
652 
653 	ret = mcp251xfd_write(dev, MCP251XFD_REG_FLTCON(filter_idx), MCP251XFD_REG_SIZE);
654 	if (ret < 0) {
655 		LOG_ERR("Failed to write FLTCON register [%d]", ret);
656 	}
657 
658 done:
659 	k_mutex_unlock(&dev_data->mutex);
660 }
661 
mcp251xfd_set_state_change_callback(const struct device * dev,can_state_change_callback_t cb,void * user_data)662 static void mcp251xfd_set_state_change_callback(const struct device *dev,
663 						can_state_change_callback_t cb, void *user_data)
664 {
665 	struct mcp251xfd_data *dev_data = dev->data;
666 
667 	dev_data->common.state_change_cb = cb;
668 	dev_data->common.state_change_cb_user_data = user_data;
669 }
670 
mcp251xfd_get_state(const struct device * dev,enum can_state * state,struct can_bus_err_cnt * err_cnt)671 static int mcp251xfd_get_state(const struct device *dev, enum can_state *state,
672 			       struct can_bus_err_cnt *err_cnt)
673 {
674 	struct mcp251xfd_data *dev_data = dev->data;
675 	uint32_t *reg;
676 	uint32_t tmp;
677 	int ret = 0;
678 
679 	k_mutex_lock(&dev_data->mutex, K_FOREVER);
680 
681 	reg = mcp251xfd_read_crc(dev, MCP251XFD_REG_TREC, MCP251XFD_REG_SIZE);
682 	if (!reg) {
683 		ret = -EINVAL;
684 		goto done;
685 	}
686 
687 	tmp = sys_le32_to_cpu(*reg);
688 
689 	if (err_cnt != NULL) {
690 		err_cnt->tx_err_cnt = FIELD_GET(MCP251XFD_REG_TREC_TEC_MASK, tmp);
691 		err_cnt->rx_err_cnt = FIELD_GET(MCP251XFD_REG_TREC_REC_MASK, tmp);
692 	}
693 
694 	if (state == NULL) {
695 		goto done;
696 	}
697 
698 	if (!dev_data->common.started) {
699 		*state = CAN_STATE_STOPPED;
700 		goto done;
701 	}
702 
703 	if ((tmp & MCP251XFD_REG_TREC_TXBO) != 0) {
704 		*state = CAN_STATE_BUS_OFF;
705 	} else if ((tmp & MCP251XFD_REG_TREC_TXBP) != 0) {
706 		*state = CAN_STATE_ERROR_PASSIVE;
707 	} else if ((tmp & MCP251XFD_REG_TREC_RXBP) != 0) {
708 		*state = CAN_STATE_ERROR_PASSIVE;
709 	} else if ((tmp & MCP251XFD_REG_TREC_TXWARN) != 0) {
710 		*state = CAN_STATE_ERROR_WARNING;
711 	} else if ((tmp & MCP251XFD_REG_TREC_RXWARN) != 0) {
712 		*state = CAN_STATE_ERROR_WARNING;
713 	} else {
714 		*state = CAN_STATE_ERROR_ACTIVE;
715 	}
716 
717 done:
718 	k_mutex_unlock(&dev_data->mutex);
719 	return 0;
720 }
721 
mcp251xfd_get_core_clock(const struct device * dev,uint32_t * rate)722 static int mcp251xfd_get_core_clock(const struct device *dev, uint32_t *rate)
723 {
724 	const struct mcp251xfd_config *dev_cfg = dev->config;
725 
726 	*rate = dev_cfg->osc_freq;
727 	return 0;
728 }
729 
mcp251xfd_get_max_filters(const struct device * dev,bool ide)730 static int mcp251xfd_get_max_filters(const struct device *dev, bool ide)
731 {
732 	ARG_UNUSED(ide);
733 
734 	return CONFIG_CAN_MAX_FILTER;
735 }
736 
mcp251xfd_handle_fifo_read(const struct device * dev,const struct mcp251xfd_fifo * fifo,uint8_t fifo_type)737 static int mcp251xfd_handle_fifo_read(const struct device *dev, const struct mcp251xfd_fifo *fifo,
738 				      uint8_t fifo_type)
739 {
740 	int ret = 0;
741 	struct mcp251xfd_data *dev_data = dev->data;
742 	uint32_t *regs, fifosta, ua;
743 	uint8_t *reg_byte;
744 
745 	int len;
746 	int fetch_total = 0;
747 	int ui_inc = 0;
748 	uint32_t fifo_tail_index, fifo_tail_addr;
749 	uint8_t fifo_head_index;
750 
751 	k_mutex_lock(&dev_data->mutex, K_FOREVER);
752 
753 	/* read in FIFOSTA and FIFOUA at the same time */
754 	regs = mcp251xfd_read_crc(dev, MCP251XFD_REG_FIFOCON_TO_STA(fifo->reg_fifocon_addr),
755 				  2 * MCP251XFD_REG_SIZE);
756 	if (!regs) {
757 		ret = -EINVAL;
758 		goto done;
759 	}
760 	fifosta = sys_le32_to_cpu(regs[0]);
761 	ua = sys_le32_to_cpu(regs[1]);
762 
763 	/* is there any data in the fifo? */
764 	if (!(fifosta & MCP251XFD_REG_FIFOSTA_TFNRFNIF)) {
765 		goto done;
766 	}
767 
768 	fifo_tail_addr = ua;
769 	fifo_tail_index = (fifo_tail_addr - fifo->ram_start_addr) / fifo->item_size;
770 
771 	if (fifo_type == MCP251XFD_FIFO_TYPE_RX) {
772 		/*
773 		 * fifo_head_index points where the next message will be written.
774 		 * It points to one past the end of the fifo.
775 		 */
776 		fifo_head_index = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifosta);
777 		if (fifo_head_index == 0) {
778 			fifo_head_index = fifo->capacity - 1;
779 		} else {
780 			fifo_head_index -= 1;
781 		}
782 
783 		if (fifo_tail_index > fifo_head_index) {
784 			/* fetch to the end of the memory and then wrap to the start */
785 			fetch_total = fifo->capacity - 1 - fifo_tail_index + 1;
786 			fetch_total += fifo_head_index + 1;
787 		} else {
788 			fetch_total = fifo_head_index - fifo_tail_index + 1;
789 		}
790 	} else if (fifo_type == MCP251XFD_FIFO_TYPE_TEF) {
791 		/* FIFOCI doesn't exist for TEF queues, so fetch one message at a time */
792 		fifo_head_index = fifo_tail_index;
793 		fetch_total = 1;
794 	} else {
795 		ret = -EINVAL;
796 		goto done;
797 	}
798 
799 	while (fetch_total > 0) {
800 		uint16_t memory_addr;
801 		uint8_t *data;
802 
803 		if (fifo_tail_index > fifo_head_index) {
804 			len = fifo->capacity - 1 - fifo_tail_index + 1;
805 		} else {
806 			len = fifo_head_index - fifo_tail_index + 1;
807 		}
808 
809 		memory_addr = MCP251XFD_RAM_START_ADDR + fifo->ram_start_addr +
810 			      fifo_tail_index * fifo->item_size;
811 
812 		data = mcp251xfd_read_reg(dev, memory_addr, len * fifo->item_size);
813 		if (!data) {
814 			LOG_ERR("Error fetching batch message");
815 			ret = -EINVAL;
816 			goto done;
817 		}
818 
819 		for (int i = 0; i < len; i++) {
820 			fifo->msg_handler(dev, (void *)(&data[i * fifo->item_size]));
821 		}
822 
823 		fifo_tail_index = (fifo_tail_index + len) % fifo->capacity;
824 		fetch_total -= len;
825 		ui_inc += len;
826 	}
827 
828 	reg_byte = mcp251xfd_get_spi_buf_ptr(dev);
829 	*reg_byte = MCP251XFD_UINT32_FLAG_TO_BYTE_MASK(MCP251XFD_REG_FIFOCON_UINC);
830 
831 	for (int i = 0; i < ui_inc; i++) {
832 		ret = mcp251xfd_write(dev, fifo->reg_fifocon_addr + 1, 1);
833 		if (ret < 0) {
834 			LOG_ERR("Failed to increment pointer");
835 			goto done;
836 		}
837 	}
838 
839 done:
840 	k_mutex_unlock(&dev_data->mutex);
841 	return ret;
842 }
843 
mcp251xfd_reset_tx_fifos(const struct device * dev,int status)844 static void mcp251xfd_reset_tx_fifos(const struct device *dev, int status)
845 {
846 	struct mcp251xfd_data *dev_data = dev->data;
847 
848 	LOG_INF("All FIFOs Reset");
849 	k_mutex_lock(&dev_data->mutex, K_FOREVER);
850 	for (int i = 0; i < MCP251XFD_TX_QUEUE_ITEMS; i++) {
851 		can_tx_callback_t callback;
852 
853 		if (!(dev_data->mailbox_usage & BIT(i))) {
854 			continue;
855 		}
856 
857 		callback = dev_data->mailbox[i].cb;
858 		if (callback) {
859 			callback(dev, status, dev_data->mailbox[i].cb_arg);
860 		}
861 
862 		dev_data->mailbox_usage &= ~BIT(i);
863 		dev_data->mailbox[i].cb = NULL;
864 		k_sem_give(&dev_data->tx_sem);
865 	}
866 	k_mutex_unlock(&dev_data->mutex);
867 }
868 
869 /*
870  * CERRIF will be set each time a threshold in the TEC/REC counter is crossed by the following
871  * conditions:
872  * • TEC or REC exceeds the Error Warning state threshold
873  * • The transmitter or receiver transitions to Error Passive state
874  * • The transmitter transitions to Bus Off state
875  * • The transmitter or receiver transitions from Error Passive to Error Active state
876  * • The module transitions from Bus Off to Error Active state, after the bus off recovery
877  * sequence
878  * When the user clears CERRIF, it will remain clear until a new counter crossing occurs.
879  */
mcp251xfd_handle_cerrif(const struct device * dev)880 static int mcp251xfd_handle_cerrif(const struct device *dev)
881 {
882 	enum can_state new_state;
883 	struct mcp251xfd_data *dev_data = dev->data;
884 	struct can_bus_err_cnt err_cnt;
885 	int ret;
886 
887 	k_mutex_lock(&dev_data->mutex, K_FOREVER);
888 
889 	ret = mcp251xfd_get_state(dev, &new_state, &err_cnt);
890 	if (ret < 0) {
891 		goto done;
892 	}
893 
894 	if (new_state == dev_data->state) {
895 		goto done;
896 	}
897 
898 	LOG_INF("State %d -> %d (tx: %d, rx: %d)", dev_data->state, new_state, err_cnt.tx_err_cnt,
899 		err_cnt.rx_err_cnt);
900 
901 	/* Upon entering bus-off, all the fifos are reset. */
902 	dev_data->state = new_state;
903 	if (new_state == CAN_STATE_BUS_OFF) {
904 		mcp251xfd_reset_tx_fifos(dev, -ENETDOWN);
905 	}
906 
907 	if (dev_data->common.state_change_cb) {
908 		dev_data->common.state_change_cb(dev, new_state, err_cnt,
909 						 dev_data->common.state_change_cb_user_data);
910 	}
911 
912 done:
913 	k_mutex_unlock(&dev_data->mutex);
914 	return ret;
915 }
916 
mcp251xfd_handle_modif(const struct device * dev)917 static int mcp251xfd_handle_modif(const struct device *dev)
918 {
919 	struct mcp251xfd_data *dev_data = dev->data;
920 	uint8_t mode;
921 	int ret;
922 
923 	k_mutex_lock(&dev_data->mutex, K_FOREVER);
924 
925 	ret = mcp251xfd_get_mode_internal(dev, &mode);
926 	if (ret < 0) {
927 		goto finish;
928 	}
929 
930 	dev_data->current_mcp251xfd_mode = mode;
931 
932 	LOG_INF("Switched to mode %d", mode);
933 
934 	if (mode == dev_data->next_mcp251xfd_mode) {
935 		ret = 0;
936 		goto finish;
937 	}
938 
939 	/* try to transition back into our target mode */
940 	if (dev_data->common.started) {
941 		LOG_INF("Switching back into mode %d", dev_data->next_mcp251xfd_mode);
942 		ret =  mcp251xfd_set_mode_internal(dev, dev_data->next_mcp251xfd_mode);
943 	}
944 
945 finish:
946 	k_mutex_unlock(&dev_data->mutex);
947 	return ret;
948 }
949 
mcp251xfd_handle_ivmif(const struct device * dev)950 static int mcp251xfd_handle_ivmif(const struct device *dev)
951 {
952 	uint32_t *reg;
953 	struct mcp251xfd_data *dev_data = dev->data;
954 	int ret;
955 	uint32_t tmp;
956 
957 	k_mutex_lock(&dev_data->mutex, K_FOREVER);
958 
959 	reg = mcp251xfd_read_crc(dev, MCP251XFD_REG_BDIAG1, MCP251XFD_REG_SIZE);
960 	if (!reg) {
961 		ret = -EINVAL;
962 		goto done;
963 	}
964 
965 	tmp = sys_le32_to_cpu(*reg);
966 
967 	if ((tmp & MCP251XFD_REG_BDIAG1_TXBOERR) != 0) {
968 		LOG_INF("ivmif bus-off error");
969 		mcp251xfd_reset_tx_fifos(dev, -ENETDOWN);
970 	}
971 
972 	/* Clear the values in diag */
973 	reg = mcp251xfd_get_spi_buf_ptr(dev);
974 	reg[0] = 0;
975 	ret = mcp251xfd_write(dev, MCP251XFD_REG_BDIAG1, MCP251XFD_REG_SIZE);
976 	if (ret < 0) {
977 		goto done;
978 	}
979 
980 	/* There's no flag for DACKERR */
981 	if ((tmp & MCP251XFD_REG_BDIAG1_NACKERR) != 0) {
982 		CAN_STATS_ACK_ERROR_INC(dev);
983 	}
984 
985 	if ((tmp & (MCP251XFD_REG_BDIAG1_NBIT0ERR | MCP251XFD_REG_BDIAG1_DBIT0ERR)) != 0) {
986 		CAN_STATS_BIT0_ERROR_INC(dev);
987 	}
988 
989 	if ((tmp & (MCP251XFD_REG_BDIAG1_NBIT1ERR | MCP251XFD_REG_BDIAG1_DBIT1ERR)) != 0) {
990 		CAN_STATS_BIT1_ERROR_INC(dev);
991 	}
992 
993 	if ((tmp & (MCP251XFD_REG_BDIAG1_NCRCERR | MCP251XFD_REG_BDIAG1_DCRCERR)) != 0) {
994 		CAN_STATS_CRC_ERROR_INC(dev);
995 	}
996 
997 	if ((tmp & (MCP251XFD_REG_BDIAG1_NFORMERR | MCP251XFD_REG_BDIAG1_DFORMERR)) != 0) {
998 		CAN_STATS_FORM_ERROR_INC(dev);
999 	}
1000 
1001 	if ((tmp & (MCP251XFD_REG_BDIAG1_NSTUFERR | MCP251XFD_REG_BDIAG1_DSTUFERR)) != 0) {
1002 		CAN_STATS_STUFF_ERROR_INC(dev);
1003 	}
1004 
1005 done:
1006 	k_mutex_unlock(&dev_data->mutex);
1007 	return ret;
1008 }
1009 
1010 #if defined(CONFIG_CAN_STATS)
mcp251xfd_handle_rxovif(const struct device * dev)1011 static int mcp251xfd_handle_rxovif(const struct device *dev)
1012 {
1013 	uint8_t *reg_byte;
1014 	struct mcp251xfd_data *dev_data = dev->data;
1015 	int ret;
1016 
1017 	k_mutex_lock(&dev_data->mutex, K_FOREVER);
1018 
1019 	reg_byte = mcp251xfd_get_spi_buf_ptr(dev);
1020 	*reg_byte = 0;
1021 
1022 	ret = mcp251xfd_write(dev, MCP251XFD_REG_FIFOSTA(MCP251XFD_RX_FIFO_IDX), 1);
1023 	if (ret < 0) {
1024 		goto done;
1025 	}
1026 
1027 	CAN_STATS_RX_OVERRUN_INC(dev);
1028 
1029 done:
1030 	k_mutex_unlock(&dev_data->mutex);
1031 	return ret;
1032 }
1033 #endif
1034 
mcp251xfd_handle_interrupts(const struct device * dev)1035 static void mcp251xfd_handle_interrupts(const struct device *dev)
1036 {
1037 	const struct mcp251xfd_config *dev_cfg = dev->config;
1038 	struct mcp251xfd_data *dev_data = dev->data;
1039 	uint16_t *reg_int_hw;
1040 	uint32_t reg_int;
1041 	int ret;
1042 	uint8_t consecutive_calls = 0;
1043 
1044 	while (1) {
1045 		k_mutex_lock(&dev_data->mutex, K_FOREVER);
1046 		reg_int_hw = mcp251xfd_read_crc(dev, MCP251XFD_REG_INT, sizeof(*reg_int_hw));
1047 
1048 		if (!reg_int_hw) {
1049 			k_mutex_unlock(&dev_data->mutex);
1050 			continue;
1051 		}
1052 
1053 		*reg_int_hw = sys_le16_to_cpu(*reg_int_hw);
1054 
1055 		reg_int = *reg_int_hw;
1056 
1057 		/* these interrupt flags need to be explicitly cleared */
1058 		if (reg_int & MCP251XFD_REG_INT_IF_CLEARABLE_MASK) {
1059 
1060 			*reg_int_hw &= ~MCP251XFD_REG_INT_IF_CLEARABLE_MASK;
1061 
1062 			*reg_int_hw = sys_cpu_to_le16(*reg_int_hw);
1063 
1064 			ret = mcp251xfd_write(dev, MCP251XFD_REG_INT, sizeof(*reg_int_hw));
1065 			if (ret) {
1066 				LOG_ERR("Error clearing REG_INT interrupts [%d]", ret);
1067 			}
1068 		}
1069 
1070 		k_mutex_unlock(&dev_data->mutex);
1071 
1072 		if ((reg_int & MCP251XFD_REG_INT_RXIF) != 0) {
1073 			ret = mcp251xfd_handle_fifo_read(dev, &dev_cfg->rx_fifo,
1074 							 MCP251XFD_FIFO_TYPE_RX);
1075 			if (ret < 0) {
1076 				LOG_ERR("Error handling RXIF [%d]", ret);
1077 			}
1078 		}
1079 
1080 		if ((reg_int & MCP251XFD_REG_INT_TEFIF) != 0) {
1081 			ret = mcp251xfd_handle_fifo_read(dev, &dev_cfg->tef_fifo,
1082 							 MCP251XFD_FIFO_TYPE_TEF);
1083 			if (ret < 0) {
1084 				LOG_ERR("Error handling TEFIF [%d]", ret);
1085 			}
1086 		}
1087 
1088 		if ((reg_int & MCP251XFD_REG_INT_IVMIF) != 0) {
1089 			ret = mcp251xfd_handle_ivmif(dev);
1090 			if (ret < 0) {
1091 				LOG_ERR("Error handling IVMIF [%d]", ret);
1092 			}
1093 		}
1094 
1095 		if ((reg_int & MCP251XFD_REG_INT_MODIF) != 0) {
1096 			ret = mcp251xfd_handle_modif(dev);
1097 			if (ret < 0) {
1098 				LOG_ERR("Error handling MODIF [%d]", ret);
1099 			}
1100 		}
1101 
1102 		/*
1103 		 * From Linux mcp251xfd driver
1104 		 * On the MCP2527FD and MCP2518FD, we don't get a CERRIF IRQ on the transition
1105 		 * TX ERROR_WARNING -> TX ERROR_ACTIVE.
1106 		 */
1107 		if ((reg_int & MCP251XFD_REG_INT_CERRIF) ||
1108 		    dev_data->state > CAN_STATE_ERROR_ACTIVE) {
1109 			ret = mcp251xfd_handle_cerrif(dev);
1110 			if (ret < 0) {
1111 				LOG_ERR("Error handling CERRIF [%d]", ret);
1112 			}
1113 		}
1114 
1115 #if defined(CONFIG_CAN_STATS)
1116 		if ((reg_int & MCP251XFD_REG_INT_RXOVIF) != 0) {
1117 			ret = mcp251xfd_handle_rxovif(dev);
1118 			if (ret < 0) {
1119 				LOG_ERR("Error handling RXOVIF [%d]", ret);
1120 			}
1121 		}
1122 #endif
1123 
1124 		/* Break from loop if INT pin is inactive */
1125 		consecutive_calls++;
1126 		ret = gpio_pin_get_dt(&dev_cfg->int_gpio_dt);
1127 		if (ret < 0) {
1128 			LOG_ERR("Couldn't read INT pin [%d]", ret);
1129 		} else if (ret == 0) {
1130 			/* All interrupt flags handled */
1131 			break;
1132 		} else if (consecutive_calls % MCP251XFD_MAX_INT_HANDLER_CALLS == 0) {
1133 			/* If there are clock problems, then MODIF cannot be cleared. */
1134 			/* This is detected if there are too many consecutive calls. */
1135 			/* Sleep this thread if this happens. */
1136 			k_sleep(K_USEC(MCP251XFD_INT_HANDLER_SLEEP_USEC));
1137 		}
1138 	}
1139 }
1140 
mcp251xfd_int_thread(const struct device * dev)1141 static void mcp251xfd_int_thread(const struct device *dev)
1142 {
1143 	const struct mcp251xfd_config *dev_cfg = dev->config;
1144 	struct mcp251xfd_data *dev_data = dev->data;
1145 
1146 	while (1) {
1147 		int ret;
1148 
1149 		k_sem_take(&dev_data->int_sem, K_FOREVER);
1150 		mcp251xfd_handle_interrupts(dev);
1151 
1152 		/* Re-enable pin interrupts */
1153 		ret = gpio_pin_interrupt_configure_dt(&dev_cfg->int_gpio_dt, GPIO_INT_LEVEL_ACTIVE);
1154 		if (ret < 0) {
1155 			LOG_ERR("Couldn't enable pin interrupt [%d]", ret);
1156 			k_oops();
1157 		}
1158 	}
1159 }
1160 
mcp251xfd_int_gpio_callback(const struct device * dev_gpio,struct gpio_callback * cb,uint32_t pins)1161 static void mcp251xfd_int_gpio_callback(const struct device *dev_gpio, struct gpio_callback *cb,
1162 					uint32_t pins)
1163 {
1164 	ARG_UNUSED(dev_gpio);
1165 	struct mcp251xfd_data *dev_data = CONTAINER_OF(cb, struct mcp251xfd_data, int_gpio_cb);
1166 	const struct device *dev = dev_data->dev;
1167 	const struct mcp251xfd_config *dev_cfg = dev->config;
1168 	int ret;
1169 
1170 	/* Disable pin interrupts */
1171 	ret = gpio_pin_interrupt_configure_dt(&dev_cfg->int_gpio_dt, GPIO_INT_DISABLE);
1172 	if (ret < 0) {
1173 		LOG_ERR("Couldn't disable pin interrupt [%d]", ret);
1174 		k_oops();
1175 	}
1176 
1177 	k_sem_give(&dev_data->int_sem);
1178 }
1179 
mcp251xfd_get_capabilities(const struct device * dev,can_mode_t * cap)1180 static int mcp251xfd_get_capabilities(const struct device *dev, can_mode_t *cap)
1181 {
1182 	ARG_UNUSED(dev);
1183 
1184 	*cap = CAN_MODE_NORMAL | CAN_MODE_LISTENONLY | CAN_MODE_LOOPBACK;
1185 
1186 #if defined(CONFIG_CAN_FD_MODE)
1187 	*cap |= CAN_MODE_FD;
1188 #endif
1189 
1190 	return 0;
1191 }
1192 
mcp251xfd_start(const struct device * dev)1193 static int mcp251xfd_start(const struct device *dev)
1194 {
1195 	struct mcp251xfd_data *dev_data = dev->data;
1196 	const struct mcp251xfd_config *dev_cfg = dev->config;
1197 	int ret;
1198 
1199 	if (dev_data->common.started) {
1200 		return -EALREADY;
1201 	}
1202 
1203 	/* in case of a race between mcp251xfd_send() and mcp251xfd_stop() */
1204 	mcp251xfd_reset_tx_fifos(dev, -ENETDOWN);
1205 
1206 	if (dev_cfg->common.phy != NULL) {
1207 		ret = can_transceiver_enable(dev_cfg->common.phy, dev_data->common.mode);
1208 		if (ret < 0) {
1209 			LOG_ERR("Failed to enable CAN transceiver [%d]", ret);
1210 			return ret;
1211 		}
1212 	}
1213 
1214 	k_mutex_lock(&dev_data->mutex, K_FOREVER);
1215 
1216 	CAN_STATS_RESET(dev);
1217 
1218 	ret = mcp251xfd_set_mode_internal(dev, dev_data->next_mcp251xfd_mode);
1219 	if (ret < 0) {
1220 		LOG_ERR("Failed to set the mode [%d]", ret);
1221 		if (dev_cfg->common.phy != NULL) {
1222 			/* Attempt to disable the CAN transceiver in case of error */
1223 			(void)can_transceiver_disable(dev_cfg->common.phy);
1224 		}
1225 	} else {
1226 		dev_data->common.started = true;
1227 	}
1228 
1229 	k_mutex_unlock(&dev_data->mutex);
1230 
1231 	return ret;
1232 }
1233 
mcp251xfd_stop(const struct device * dev)1234 static int mcp251xfd_stop(const struct device *dev)
1235 {
1236 	struct mcp251xfd_data *dev_data = dev->data;
1237 	const struct mcp251xfd_config *dev_cfg = dev->config;
1238 	uint8_t *reg_byte;
1239 	int ret;
1240 
1241 	if (!dev_data->common.started) {
1242 		return -EALREADY;
1243 	}
1244 
1245 	k_mutex_lock(&dev_data->mutex, K_FOREVER);
1246 
1247 	/* abort all transmissions */
1248 	reg_byte = mcp251xfd_get_spi_buf_ptr(dev);
1249 	*reg_byte = MCP251XFD_UINT32_FLAG_TO_BYTE_MASK(MCP251XFD_REG_CON_ABAT);
1250 
1251 	ret = mcp251xfd_write(dev, MCP251XFD_REG_CON_B3, 1);
1252 	if (ret < 0) {
1253 		k_mutex_unlock(&dev_data->mutex);
1254 		return ret;
1255 	}
1256 
1257 	/* wait for all the messages to be aborted */
1258 	while (1) {
1259 		reg_byte = mcp251xfd_read_crc(dev, MCP251XFD_REG_CON_B3, 1);
1260 
1261 		if (!reg_byte ||
1262 		    (*reg_byte & MCP251XFD_UINT32_FLAG_TO_BYTE_MASK(MCP251XFD_REG_CON_ABAT)) == 0) {
1263 			break;
1264 		}
1265 	}
1266 
1267 	mcp251xfd_reset_tx_fifos(dev, -ENETDOWN);
1268 
1269 	ret = mcp251xfd_set_mode_internal(dev, MCP251XFD_REG_CON_MODE_CONFIG);
1270 	if (ret < 0) {
1271 		k_mutex_unlock(&dev_data->mutex);
1272 		return ret;
1273 	}
1274 
1275 	dev_data->common.started = false;
1276 	k_mutex_unlock(&dev_data->mutex);
1277 
1278 	if (dev_cfg->common.phy != NULL) {
1279 		ret = can_transceiver_disable(dev_cfg->common.phy);
1280 		if (ret < 0) {
1281 			LOG_ERR("Failed to disable CAN transceiver [%d]", ret);
1282 			return ret;
1283 		}
1284 	}
1285 
1286 	return 0;
1287 }
1288 
mcp251xfd_rx_fifo_handler(const struct device * dev,void * data)1289 static void mcp251xfd_rx_fifo_handler(const struct device *dev, void *data)
1290 {
1291 	struct can_frame dst;
1292 	struct mcp251xfd_data *dev_data = dev->data;
1293 	struct mcp251xfd_rxobj *rxobj = data;
1294 	uint32_t filhit;
1295 
1296 	mcp251xfd_rxobj_to_canframe(rxobj, &dst);
1297 
1298 #ifndef CONFIG_CAN_ACCEPT_RTR
1299 	if ((dst.flags & CAN_FRAME_RTR) != 0U) {
1300 		return;
1301 	}
1302 #endif /* !CONFIG_CAN_ACCEPT_RTR */
1303 
1304 	filhit = FIELD_GET(MCP251XFD_OBJ_FILHIT_MASK, rxobj->flags);
1305 	if ((dev_data->filter_usage & BIT(filhit)) != 0) {
1306 		LOG_DBG("Received msg CAN id: 0x%x", dst.id);
1307 		dev_data->rx_cb[filhit](dev, &dst, dev_data->cb_arg[filhit]);
1308 	}
1309 }
1310 
mcp251xfd_tef_fifo_handler(const struct device * dev,void * data)1311 static void mcp251xfd_tef_fifo_handler(const struct device *dev, void *data)
1312 {
1313 	struct mcp251xfd_data *dev_data = dev->data;
1314 	can_tx_callback_t callback;
1315 	struct mcp251xfd_tefobj *tefobj = data;
1316 	uint8_t mailbox_idx;
1317 
1318 	mailbox_idx = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MASK, tefobj->flags);
1319 	if (mailbox_idx >= MCP251XFD_TX_QUEUE_ITEMS) {
1320 		mcp251xfd_reset_tx_fifos(dev, -EIO);
1321 		LOG_ERR("Invalid mailbox index");
1322 		return;
1323 	}
1324 
1325 	callback = dev_data->mailbox[mailbox_idx].cb;
1326 	if (callback != NULL) {
1327 		callback(dev, 0, dev_data->mailbox[mailbox_idx].cb_arg);
1328 	}
1329 
1330 	dev_data->mailbox_usage &= ~BIT(mailbox_idx);
1331 	dev_data->mailbox[mailbox_idx].cb = NULL;
1332 	k_sem_give(&dev_data->tx_sem);
1333 }
1334 
mcp251xfd_init_con_reg(const struct device * dev)1335 static inline int mcp251xfd_init_con_reg(const struct device *dev)
1336 {
1337 	uint32_t *reg;
1338 	uint32_t tmp;
1339 
1340 	reg = mcp251xfd_get_spi_buf_ptr(dev);
1341 	tmp = MCP251XFD_REG_CON_ISOCRCEN | MCP251XFD_REG_CON_WAKFIL | MCP251XFD_REG_CON_TXQEN |
1342 	      MCP251XFD_REG_CON_STEF;
1343 	tmp |= FIELD_PREP(MCP251XFD_REG_CON_WFT_MASK, MCP251XFD_REG_CON_WFT_T11FILTER) |
1344 		FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK, MCP251XFD_REG_CON_MODE_CONFIG);
1345 	*reg = tmp;
1346 
1347 	return mcp251xfd_write(dev, MCP251XFD_REG_CON, MCP251XFD_REG_SIZE);
1348 }
1349 
mcp251xfd_init_osc_reg(const struct device * dev)1350 static inline int mcp251xfd_init_osc_reg(const struct device *dev)
1351 {
1352 	int ret;
1353 	const struct mcp251xfd_config *dev_cfg = dev->config;
1354 	uint32_t *reg = mcp251xfd_get_spi_buf_ptr(dev);
1355 	uint32_t reg_value = MCP251XFD_REG_OSC_OSCRDY;
1356 	uint32_t tmp;
1357 
1358 	tmp = FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK, dev_cfg->clko_div);
1359 	if (dev_cfg->pll_enable) {
1360 		tmp |= MCP251XFD_REG_OSC_PLLEN;
1361 		reg_value |= MCP251XFD_REG_OSC_PLLRDY;
1362 	}
1363 
1364 	*reg = sys_cpu_to_le32(tmp);
1365 
1366 	ret = mcp251xfd_write(dev, MCP251XFD_REG_OSC, MCP251XFD_REG_SIZE);
1367 	if (ret < 0) {
1368 		return ret;
1369 	}
1370 
1371 	return mcp251xfd_reg_check_value_wtimeout(dev, MCP251XFD_REG_OSC, reg_value, reg_value,
1372 						  MCP251XFD_PLLRDY_TIMEOUT_USEC,
1373 						  MCP251XFD_PLLRDY_RETRIES, false);
1374 }
1375 
mcp251xfd_init_iocon_reg(const struct device * dev)1376 static inline int mcp251xfd_init_iocon_reg(const struct device *dev)
1377 {
1378 	const struct mcp251xfd_config *dev_cfg = dev->config;
1379 	uint32_t *reg = mcp251xfd_get_spi_buf_ptr(dev);
1380 	uint32_t tmp;
1381 
1382 /*
1383  *         MCP2518FD Errata: DS80000789
1384  *         Writing Byte 2/3 of the IOCON register using single SPI write cleat LAT0 and LAT1.
1385  *         This has no effect in the current version since LAT0/1 are set to zero anyway.
1386  *         However, it needs to be properly handled if other values are needed. Errata suggests
1387  *         to do single byte writes instead.
1388  */
1389 
1390 	tmp = MCP251XFD_REG_IOCON_TRIS0 | MCP251XFD_REG_IOCON_TRIS1 | MCP251XFD_REG_IOCON_PM0 |
1391 	      MCP251XFD_REG_IOCON_PM1;
1392 
1393 	if (dev_cfg->sof_on_clko) {
1394 		tmp |= MCP251XFD_REG_IOCON_SOF;
1395 	}
1396 
1397 	*reg = sys_cpu_to_le32(tmp);
1398 
1399 	return  mcp251xfd_write(dev, MCP251XFD_REG_IOCON, MCP251XFD_REG_SIZE);
1400 }
1401 
mcp251xfd_init_int_reg(const struct device * dev)1402 static inline int mcp251xfd_init_int_reg(const struct device *dev)
1403 {
1404 	uint32_t *reg = mcp251xfd_get_spi_buf_ptr(dev);
1405 	uint32_t tmp;
1406 
1407 	tmp = MCP251XFD_REG_INT_RXIE | MCP251XFD_REG_INT_MODIE | MCP251XFD_REG_INT_TEFIE |
1408 	      MCP251XFD_REG_INT_CERRIE;
1409 #if defined(CONFIG_CAN_STATS)
1410 	tmp |= MCP251XFD_REG_INT_RXOVIE;
1411 #endif
1412 
1413 	*reg = sys_cpu_to_le32(tmp);
1414 
1415 	return mcp251xfd_write(dev, MCP251XFD_REG_INT, MCP251XFD_REG_SIZE);
1416 }
1417 
mcp251xfd_init_tef_fifo(const struct device * dev)1418 static inline int mcp251xfd_init_tef_fifo(const struct device *dev)
1419 {
1420 	uint32_t *reg = mcp251xfd_get_spi_buf_ptr(dev);
1421 	uint32_t tmp;
1422 
1423 	tmp = MCP251XFD_REG_TEFCON_TEFNEIE | MCP251XFD_REG_TEFCON_FRESET;
1424 	tmp |= FIELD_PREP(MCP251XFD_REG_TEFCON_FSIZE_MASK, MCP251XFD_TX_QUEUE_ITEMS - 1);
1425 
1426 	*reg = sys_cpu_to_le32(tmp);
1427 
1428 	return mcp251xfd_write(dev, MCP251XFD_REG_TEFCON, MCP251XFD_REG_SIZE);
1429 }
1430 
mcp251xfd_init_tx_queue(const struct device * dev)1431 static inline int mcp251xfd_init_tx_queue(const struct device *dev)
1432 {
1433 	uint32_t *reg = mcp251xfd_get_spi_buf_ptr(dev);
1434 	uint32_t tmp;
1435 
1436 	tmp = MCP251XFD_REG_TXQCON_TXEN | MCP251XFD_REG_TXQCON_FRESET;
1437 	tmp |= FIELD_PREP(MCP251XFD_REG_TXQCON_TXAT_MASK, MCP251XFD_REG_TXQCON_TXAT_UNLIMITED);
1438 	tmp |= FIELD_PREP(MCP251XFD_REG_TXQCON_FSIZE_MASK, MCP251XFD_TX_QUEUE_ITEMS - 1);
1439 	tmp |= FIELD_PREP(MCP251XFD_REG_TXQCON_PLSIZE_MASK,
1440 			  can_bytes_to_dlc(MCP251XFD_PAYLOAD_SIZE) - 8);
1441 
1442 	*reg = sys_cpu_to_le32(tmp);
1443 
1444 	return mcp251xfd_write(dev, MCP251XFD_REG_TXQCON, MCP251XFD_REG_SIZE);
1445 }
1446 
mcp251xfd_init_rx_fifo(const struct device * dev)1447 static inline int mcp251xfd_init_rx_fifo(const struct device *dev)
1448 {
1449 	uint32_t *reg = mcp251xfd_get_spi_buf_ptr(dev);
1450 	uint32_t tmp;
1451 
1452 	tmp = MCP251XFD_REG_FIFOCON_TFNRFNIE | MCP251XFD_REG_FIFOCON_FRESET;
1453 #if defined(CONFIG_CAN_STATS)
1454 	tmp |= MCP251XFD_REG_FIFOCON_RXOVIE;
1455 #endif
1456 	tmp |= FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK, MCP251XFD_RX_FIFO_ITEMS - 1);
1457 	tmp |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
1458 			  can_bytes_to_dlc(MCP251XFD_PAYLOAD_SIZE) - 8);
1459 #if defined(CONFIG_CAN_RX_TIMESTAMP)
1460 	tmp |= MCP251XFD_REG_FIFOCON_RXTSEN;
1461 #endif
1462 
1463 	*reg = sys_cpu_to_le32(tmp);
1464 
1465 	return mcp251xfd_write(dev, MCP251XFD_REG_FIFOCON(MCP251XFD_RX_FIFO_IDX),
1466 			       MCP251XFD_REG_SIZE);
1467 }
1468 
1469 #if defined(CONFIG_CAN_RX_TIMESTAMP)
mcp251xfd_init_tscon(const struct device * dev)1470 static int mcp251xfd_init_tscon(const struct device *dev)
1471 {
1472 	uint32_t *reg = mcp251xfd_get_spi_buf_ptr(dev);
1473 	const struct mcp251xfd_config *dev_cfg = dev->config;
1474 	uint32_t tmp;
1475 
1476 	tmp = MCP251XFD_REG_TSCON_TBCEN;
1477 	tmp |= FIELD_PREP(MCP251XFD_REG_TSCON_TBCPRE_MASK,
1478 			  dev_cfg->timestamp_prescaler - 1);
1479 
1480 	*reg = sys_cpu_to_le32(tmp);
1481 
1482 	return mcp251xfd_write(dev, MCP251XFD_REG_TSCON, MCP251XFD_REG_SIZE);
1483 }
1484 #endif
1485 
mcp251xfd_reset(const struct device * dev)1486 static int mcp251xfd_reset(const struct device *dev)
1487 {
1488 	const struct mcp251xfd_config *dev_cfg = dev->config;
1489 	uint16_t cmd = sys_cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_RESET);
1490 	const struct spi_buf tx_buf = {.buf = &cmd, .len = sizeof(cmd),};
1491 	const struct spi_buf_set tx = {.buffers = &tx_buf, .count = 1};
1492 	int ret;
1493 
1494 	/* device can only be reset when in configuration mode */
1495 	ret = mcp251xfd_set_mode_internal(dev, MCP251XFD_REG_CON_MODE_CONFIG);
1496 	if (ret < 0) {
1497 		return ret;
1498 	}
1499 
1500 	return spi_write_dt(&dev_cfg->bus, &tx);
1501 }
1502 
mcp251xfd_init(const struct device * dev)1503 static int mcp251xfd_init(const struct device *dev)
1504 {
1505 	const struct mcp251xfd_config *dev_cfg = dev->config;
1506 	struct mcp251xfd_data *dev_data = dev->data;
1507 	uint32_t *reg;
1508 	uint8_t opmod;
1509 	int ret;
1510 	struct can_timing timing = { 0 };
1511 #if defined(CONFIG_CAN_FD_MODE)
1512 	struct can_timing timing_data = { 0 };
1513 #endif
1514 
1515 	dev_data->dev = dev;
1516 
1517 	if (dev_cfg->clk_dev != NULL) {
1518 		uint32_t clk_id = dev_cfg->clk_id;
1519 
1520 		if (!device_is_ready(dev_cfg->clk_dev)) {
1521 			LOG_ERR("Clock controller not ready");
1522 			return -ENODEV;
1523 		}
1524 
1525 		ret = clock_control_on(dev_cfg->clk_dev, (clock_control_subsys_t)clk_id);
1526 		if (ret < 0) {
1527 			LOG_ERR("Failed to enable clock [%d]", ret);
1528 			return ret;
1529 		}
1530 	}
1531 
1532 	k_sem_init(&dev_data->int_sem, 0, 1);
1533 	k_sem_init(&dev_data->tx_sem, MCP251XFD_TX_QUEUE_ITEMS, MCP251XFD_TX_QUEUE_ITEMS);
1534 
1535 	k_mutex_init(&dev_data->mutex);
1536 
1537 	if (!spi_is_ready_dt(&dev_cfg->bus)) {
1538 		LOG_ERR("SPI bus %s not ready", dev_cfg->bus.bus->name);
1539 		return -ENODEV;
1540 	}
1541 
1542 	if (!gpio_is_ready_dt(&dev_cfg->int_gpio_dt)) {
1543 		LOG_ERR("GPIO port not ready");
1544 		return -ENODEV;
1545 	}
1546 
1547 	if (gpio_pin_configure_dt(&dev_cfg->int_gpio_dt, GPIO_INPUT) < 0) {
1548 		LOG_ERR("Unable to configure GPIO pin");
1549 		return -EINVAL;
1550 	}
1551 
1552 	gpio_init_callback(&dev_data->int_gpio_cb, mcp251xfd_int_gpio_callback,
1553 			   BIT(dev_cfg->int_gpio_dt.pin));
1554 
1555 	if (gpio_add_callback_dt(&dev_cfg->int_gpio_dt, &dev_data->int_gpio_cb) < 0) {
1556 		return -EINVAL;
1557 	}
1558 
1559 	if (gpio_pin_interrupt_configure_dt(&dev_cfg->int_gpio_dt, GPIO_INT_LEVEL_ACTIVE) < 0) {
1560 		return -EINVAL;
1561 	}
1562 
1563 	k_thread_create(&dev_data->int_thread, dev_data->int_thread_stack,
1564 			CONFIG_CAN_MCP251XFD_INT_THREAD_STACK_SIZE,
1565 			(k_thread_entry_t)mcp251xfd_int_thread, (void *)dev, NULL, NULL,
1566 			K_PRIO_COOP(CONFIG_CAN_MCP251XFD_INT_THREAD_PRIO), 0, K_NO_WAIT);
1567 
1568 	(void)k_thread_name_set(&dev_data->int_thread, "MCP251XFD interrupt thread");
1569 
1570 	ret = mcp251xfd_reset(dev);
1571 	if (ret < 0) {
1572 		LOG_ERR("Failed to reset the device [%d]", ret);
1573 		goto done;
1574 	}
1575 
1576 	ret = can_calc_timing(dev, &timing, dev_cfg->common.bitrate,
1577 			      dev_cfg->common.sample_point);
1578 	if (ret < 0) {
1579 		LOG_ERR("Can't find timing for given param");
1580 		goto done;
1581 	}
1582 
1583 	LOG_DBG("Presc: %d, BS1: %d, BS2: %d", timing.prescaler, timing.phase_seg1,
1584 		timing.phase_seg2);
1585 	LOG_DBG("Sample-point err : %d", ret);
1586 
1587 #if defined(CONFIG_CAN_FD_MODE)
1588 	ret = can_calc_timing_data(dev, &timing_data, dev_cfg->common.bitrate_data,
1589 				   dev_cfg->common.sample_point_data);
1590 	if (ret < 0) {
1591 		LOG_ERR("Can't find data timing for given param");
1592 		goto done;
1593 	}
1594 
1595 	LOG_DBG("Data phase Presc: %d, BS1: %d, BS2: %d", timing_data.prescaler,
1596 		timing_data.phase_seg1, timing_data.phase_seg2);
1597 	LOG_DBG("Data phase Sample-point err : %d", ret);
1598 #endif
1599 
1600 	reg = mcp251xfd_read_crc(dev, MCP251XFD_REG_CON, MCP251XFD_REG_SIZE);
1601 	if (!reg) {
1602 		ret = -EINVAL;
1603 		goto done;
1604 	}
1605 
1606 	*reg = sys_le32_to_cpu(*reg);
1607 
1608 	opmod = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, *reg);
1609 
1610 	if (opmod != MCP251XFD_REG_CON_MODE_CONFIG) {
1611 		LOG_ERR("Device did not reset into configuration mode [%d]", opmod);
1612 		ret = -EIO;
1613 		goto done;
1614 	}
1615 
1616 	dev_data->current_mcp251xfd_mode = MCP251XFD_REG_CON_MODE_CONFIG;
1617 
1618 	ret = mcp251xfd_init_con_reg(dev);
1619 	if (ret < 0) {
1620 		goto done;
1621 	}
1622 
1623 	ret = mcp251xfd_init_osc_reg(dev);
1624 	if (ret < 0) {
1625 		goto done;
1626 	}
1627 
1628 	ret = mcp251xfd_init_iocon_reg(dev);
1629 	if (ret < 0) {
1630 		goto done;
1631 	}
1632 
1633 	ret = mcp251xfd_init_int_reg(dev);
1634 	if (ret < 0) {
1635 		goto done;
1636 	}
1637 
1638 	ret = mcp251xfd_set_tdc(dev, false);
1639 	if (ret < 0) {
1640 		goto done;
1641 	}
1642 
1643 #if defined(CONFIG_CAN_RX_TIMESTAMP)
1644 	ret = mcp251xfd_init_tscon(dev);
1645 	if (ret < 0) {
1646 		goto done;
1647 	}
1648 #endif
1649 
1650 	ret = mcp251xfd_init_tef_fifo(dev);
1651 	if (ret < 0) {
1652 		goto done;
1653 	}
1654 
1655 	ret = mcp251xfd_init_tx_queue(dev);
1656 	if (ret < 0) {
1657 		goto done;
1658 	}
1659 
1660 	ret = mcp251xfd_init_rx_fifo(dev);
1661 	if (ret < 0) {
1662 		goto done;
1663 	}
1664 
1665 	LOG_DBG("%d TX FIFOS: 1 element", MCP251XFD_TX_QUEUE_ITEMS);
1666 	LOG_DBG("1 RX FIFO: %d elements", MCP251XFD_RX_FIFO_ITEMS);
1667 	LOG_DBG("%db of %db RAM Allocated",
1668 		MCP251XFD_TEF_FIFO_SIZE + MCP251XFD_TX_QUEUE_SIZE + MCP251XFD_RX_FIFO_SIZE,
1669 		MCP251XFD_RAM_SIZE);
1670 
1671 done:
1672 	ret = can_set_timing(dev, &timing);
1673 	if (ret < 0) {
1674 		return ret;
1675 	}
1676 
1677 #if defined(CONFIG_CAN_FD_MODE)
1678 	ret = can_set_timing_data(dev, &timing_data);
1679 	if (ret < 0) {
1680 		return ret;
1681 	}
1682 #endif
1683 
1684 	return ret;
1685 }
1686 
1687 static const struct can_driver_api mcp251xfd_api_funcs = {
1688 	.get_capabilities = mcp251xfd_get_capabilities,
1689 	.set_mode = mcp251xfd_set_mode,
1690 	.set_timing = mcp251xfd_set_timing,
1691 #if defined(CONFIG_CAN_FD_MODE)
1692 	.set_timing_data = mcp251xfd_set_timing_data,
1693 #endif
1694 	.start = mcp251xfd_start,
1695 	.stop = mcp251xfd_stop,
1696 	.send = mcp251xfd_send,
1697 	.add_rx_filter = mcp251xfd_add_rx_filter,
1698 	.remove_rx_filter = mcp251xfd_remove_rx_filter,
1699 	.get_state = mcp251xfd_get_state,
1700 	.set_state_change_callback = mcp251xfd_set_state_change_callback,
1701 	.get_core_clock = mcp251xfd_get_core_clock,
1702 	.get_max_filters = mcp251xfd_get_max_filters,
1703 	.timing_min = {
1704 		.sjw = 1,
1705 		.prop_seg = 0,
1706 		.phase_seg1 = 2,
1707 		.phase_seg2 = 1,
1708 		.prescaler = 1,
1709 	},
1710 	.timing_max = {
1711 		.sjw = 128,
1712 		.prop_seg = 0,
1713 		.phase_seg1 = 256,
1714 		.phase_seg2 = 128,
1715 		.prescaler = 256,
1716 	},
1717 #if defined(CONFIG_CAN_FD_MODE)
1718 	.timing_data_min = {
1719 		.sjw = 1,
1720 		.prop_seg = 0,
1721 		.phase_seg1 = 1,
1722 		.phase_seg2 = 1,
1723 		.prescaler = 1,
1724 	},
1725 	.timing_data_max = {
1726 		.sjw = 16,
1727 		.prop_seg = 0,
1728 		.phase_seg1 = 32,
1729 		.phase_seg2 = 16,
1730 		.prescaler = 256,
1731 	},
1732 #endif
1733 };
1734 
1735 #define MCP251XFD_SET_CLOCK(inst)                                                                  \
1736 	COND_CODE_1(DT_INST_NODE_HAS_PROP(inst, clocks),                                           \
1737 		    (.clk_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(inst)),                          \
1738 		     .clk_id = DT_INST_CLOCKS_CELL(inst, id)),                                     \
1739 		    ())
1740 
1741 #define MCP251XFD_INIT(inst)                                                                       \
1742 	static K_KERNEL_STACK_DEFINE(mcp251xfd_int_stack_##inst,                                   \
1743 				     CONFIG_CAN_MCP251XFD_INT_THREAD_STACK_SIZE);                  \
1744                                                                                                    \
1745 	static struct mcp251xfd_data mcp251xfd_data_##inst = {                                     \
1746 		.int_thread_stack = mcp251xfd_int_stack_##inst,                                    \
1747 	};                                                                                         \
1748 	static const struct mcp251xfd_config mcp251xfd_config_##inst = {                           \
1749 		.common = CAN_DT_DRIVER_CONFIG_INST_GET(inst, 0, 8000000),                         \
1750 		.bus = SPI_DT_SPEC_INST_GET(inst, SPI_WORD_SET(8), 0),                             \
1751 		.int_gpio_dt = GPIO_DT_SPEC_INST_GET(inst, int_gpios),                             \
1752                                                                                                    \
1753 		.sof_on_clko = DT_INST_PROP(inst, sof_on_clko),                                    \
1754 		.clko_div = DT_INST_ENUM_IDX(inst, clko_div),                                      \
1755 		.pll_enable = DT_INST_PROP(inst, pll_enable),                                      \
1756 		.timestamp_prescaler = DT_INST_PROP(inst, timestamp_prescaler),                    \
1757                                                                                                    \
1758 		.osc_freq = DT_INST_PROP(inst, osc_freq),                                          \
1759                                                                                                    \
1760 		.rx_fifo = {.ram_start_addr = MCP251XFD_RX_FIFO_START_ADDR,                        \
1761 			    .reg_fifocon_addr = MCP251XFD_REG_FIFOCON(MCP251XFD_RX_FIFO_IDX),      \
1762 			    .capacity = MCP251XFD_RX_FIFO_ITEMS,                                   \
1763 			    .item_size = MCP251XFD_RX_FIFO_ITEM_SIZE,                              \
1764 			    .msg_handler = mcp251xfd_rx_fifo_handler},                             \
1765 		.tef_fifo = {.ram_start_addr = MCP251XFD_TEF_FIFO_START_ADDR,                      \
1766 			     .reg_fifocon_addr = MCP251XFD_REG_TEFCON,                             \
1767 			     .capacity = MCP251XFD_TEF_FIFO_ITEMS,                                 \
1768 			     .item_size = MCP251XFD_TEF_FIFO_ITEM_SIZE,                            \
1769 			     .msg_handler = mcp251xfd_tef_fifo_handler},                           \
1770 		MCP251XFD_SET_CLOCK(inst)                                                          \
1771 	};                                                                                         \
1772                                                                                                    \
1773 	CAN_DEVICE_DT_INST_DEFINE(inst, mcp251xfd_init, NULL, &mcp251xfd_data_##inst,             \
1774 				  &mcp251xfd_config_##inst, POST_KERNEL, CONFIG_CAN_INIT_PRIORITY, \
1775 				  &mcp251xfd_api_funcs);
1776 
1777 DT_INST_FOREACH_STATUS_OKAY(MCP251XFD_INIT)
1778