1 /*
2  * Copyright (c) 2020 Abram Early
3  * Copyright (c) 2023 Andriy Gelman
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #define DT_DRV_COMPAT microchip_mcp251xfd
9 
10 #include "can_mcp251xfd.h"
11 
12 #include <zephyr/device.h>
13 #include <zephyr/drivers/can/transceiver.h>
14 #include <zephyr/drivers/clock_control.h>
15 #include <zephyr/kernel.h>
16 #include <zephyr/sys/byteorder.h>
17 #include <zephyr/sys/crc.h>
18 
19 #include <zephyr/logging/log.h>
20 LOG_MODULE_REGISTER(can_mcp251xfd, CONFIG_CAN_LOG_LEVEL);
21 
22 #define SP_IS_SET(inst) DT_INST_NODE_HAS_PROP(inst, sample_point) ||
23 
24 /*
25  * Macro to exclude the sample point algorithm from compilation if not used
26  * Without the macro, the algorithm would always waste ROM
27  */
28 #define USE_SP_ALGO (DT_INST_FOREACH_STATUS_OKAY(SP_IS_SET) 0)
29 
mcp251xfd_canframe_to_txobj(const struct can_frame * src,int mailbox_idx,struct mcp251xfd_txobj * dst)30 static void mcp251xfd_canframe_to_txobj(const struct can_frame *src, int mailbox_idx,
31 					struct mcp251xfd_txobj *dst)
32 {
33 	memset(dst, 0, sizeof(*dst));
34 
35 	if ((src->flags & CAN_FRAME_IDE) != 0) {
36 		dst->id = FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, src->id >> 18);
37 		dst->id |= FIELD_PREP(MCP251XFD_OBJ_ID_EID_MASK, src->id);
38 
39 		dst->flags |= MCP251XFD_OBJ_FLAGS_IDE;
40 	} else {
41 		dst->id = FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, src->id);
42 	}
43 
44 	if ((src->flags & CAN_FRAME_BRS) != 0) {
45 		dst->flags |= MCP251XFD_OBJ_FLAGS_BRS;
46 	}
47 
48 	dst->flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_DLC_MASK, src->dlc);
49 #if defined(CONFIG_CAN_FD_MODE)
50 	if ((src->flags & CAN_FRAME_FDF) != 0) {
51 		dst->flags |= MCP251XFD_OBJ_FLAGS_FDF;
52 	}
53 #endif
54 	dst->flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_SEQ_MASK, mailbox_idx);
55 
56 	dst->id = sys_cpu_to_le32(dst->id);
57 	dst->flags = sys_cpu_to_le32(dst->flags);
58 
59 	if ((src->flags & CAN_FRAME_RTR) != 0) {
60 		dst->flags |= MCP251XFD_OBJ_FLAGS_RTR;
61 	} else {
62 		memcpy(dst->data, src->data, MIN(can_dlc_to_bytes(src->dlc), CAN_MAX_DLEN));
63 	}
64 }
65 
mcp251xfd_read_reg(const struct device * dev,uint16_t addr,int len)66 static void *mcp251xfd_read_reg(const struct device *dev, uint16_t addr, int len)
67 {
68 	const struct mcp251xfd_config *dev_cfg = dev->config;
69 	struct mcp251xfd_data *dev_data = dev->data;
70 	struct mcp251xfd_spi_data *spi_data = &dev_data->spi_data;
71 	uint16_t spi_cmd;
72 	int ret;
73 
74 	spi_cmd = sys_cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_READ | addr);
75 	memcpy(&spi_data->header[1], &spi_cmd, sizeof(spi_cmd));
76 
77 	struct spi_buf tx_buf = {.buf = &spi_data->header[1], .len = MCP251XFD_SPI_CMD_LEN + len};
78 	struct spi_buf rx_buf = {.buf = &spi_data->header[1], .len = MCP251XFD_SPI_CMD_LEN + len};
79 
80 	const struct spi_buf_set tx = {.buffers = &tx_buf, .count = 1};
81 	const struct spi_buf_set rx = {.buffers = &rx_buf, .count = 1};
82 
83 	ret = spi_transceive_dt(&dev_cfg->bus, &tx, &rx);
84 	if (ret < 0) {
85 		return NULL;
86 	}
87 
88 	return &spi_data->buf[0];
89 }
90 
mcp251xfd_read_crc(const struct device * dev,uint16_t addr,int len)91 static void *mcp251xfd_read_crc(const struct device *dev, uint16_t addr, int len)
92 {
93 	const struct mcp251xfd_config *dev_cfg = dev->config;
94 	struct mcp251xfd_data *dev_data = dev->data;
95 	struct mcp251xfd_spi_data *spi_data = &dev_data->spi_data;
96 	int num_retries = CONFIG_CAN_MCP251XFD_READ_CRC_RETRIES + 1;
97 	int ret;
98 
99 	while (num_retries-- > 0) {
100 		uint16_t crc_in, crc, spi_cmd;
101 
102 		struct spi_buf tx_buf = {.buf = &spi_data->header[0],
103 					 .len = MCP251XFD_SPI_CMD_LEN +
104 						MCP251XFD_SPI_LEN_FIELD_LEN + len +
105 						MCP251XFD_SPI_CRC_LEN};
106 
107 		struct spi_buf rx_buf = {.buf = &spi_data->header[0],
108 					 .len = MCP251XFD_SPI_CMD_LEN +
109 						MCP251XFD_SPI_LEN_FIELD_LEN + len +
110 						MCP251XFD_SPI_CRC_LEN};
111 
112 		const struct spi_buf_set tx = {.buffers = &tx_buf, .count = 1};
113 		const struct spi_buf_set rx = {.buffers = &rx_buf, .count = 1};
114 
115 		spi_cmd = sys_cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_READ_CRC | addr);
116 		memcpy(&spi_data->header[0], &spi_cmd, sizeof(spi_cmd));
117 		spi_data->header[2] = len;
118 
119 		/*
120 		 * Evaluate initial crc over spi_cmd and length as these value will change after
121 		 * spi transaction is finished.
122 		 */
123 		crc_in = crc16(MCP251XFD_CRC_POLY, MCP251XFD_CRC_SEED,
124 			       (uint8_t *)(&spi_data->header[0]),
125 			       MCP251XFD_SPI_CMD_LEN + MCP251XFD_SPI_LEN_FIELD_LEN);
126 
127 		ret = spi_transceive_dt(&dev_cfg->bus, &tx, &rx);
128 		if (ret < 0) {
129 			continue;
130 		}
131 
132 		/* Continue crc calculation over the data field and the crc field */
133 		crc = crc16(MCP251XFD_CRC_POLY, crc_in, &spi_data->buf[0],
134 			    len + MCP251XFD_SPI_CRC_LEN);
135 		if (crc == 0) {
136 			return &spi_data->buf[0];
137 		}
138 	}
139 
140 	return NULL;
141 }
142 
mcp251xfd_get_spi_buf_ptr(const struct device * dev)143 static inline void *mcp251xfd_get_spi_buf_ptr(const struct device *dev)
144 {
145 	struct mcp251xfd_data *dev_data = dev->data;
146 	struct mcp251xfd_spi_data *spi_data = &dev_data->spi_data;
147 
148 	return &spi_data->buf[0];
149 }
150 
mcp251xfd_write(const struct device * dev,uint16_t addr,int len)151 static int mcp251xfd_write(const struct device *dev, uint16_t addr, int len)
152 {
153 	const struct mcp251xfd_config *dev_cfg = dev->config;
154 	struct mcp251xfd_data *dev_data = dev->data;
155 	struct mcp251xfd_spi_data *spi_data = &dev_data->spi_data;
156 	uint16_t spi_cmd;
157 
158 	struct spi_buf tx_buf = {.buf = &spi_data->header[1], .len = MCP251XFD_SPI_CMD_LEN + len};
159 	const struct spi_buf_set tx = {.buffers = &tx_buf, .count = 1};
160 
161 	spi_cmd = sys_cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_WRITE | addr);
162 	memcpy(&spi_data->header[1], &spi_cmd, sizeof(spi_cmd));
163 
164 	return spi_write_dt(&dev_cfg->bus, &tx);
165 }
166 
mcp251xfd_fifo_write(const struct device * dev,int mailbox_idx,const struct can_frame * msg)167 static int mcp251xfd_fifo_write(const struct device *dev, int mailbox_idx,
168 				const struct can_frame *msg)
169 {
170 	uint32_t *regs;
171 	struct mcp251xfd_txobj *txobj;
172 	uint8_t *reg_byte;
173 	uint16_t address;
174 	int tx_len;
175 	int ret;
176 
177 	/* read fifosta and ua at the same time */
178 	regs = mcp251xfd_read_crc(dev, MCP251XFD_REG_TXQSTA, MCP251XFD_REG_SIZE * 2);
179 	if (!regs) {
180 		LOG_ERR("Failed to read 8 bytes from REG_TXQSTA");
181 		return -EINVAL;
182 	}
183 
184 	/* check if fifo is full */
185 	if (!(regs[0] & MCP251XFD_REG_TXQSTA_TXQNIF)) {
186 		return -ENOMEM;
187 	}
188 
189 	address = MCP251XFD_RAM_START_ADDR + regs[1];
190 
191 	txobj = mcp251xfd_get_spi_buf_ptr(dev);
192 	mcp251xfd_canframe_to_txobj(msg, mailbox_idx, txobj);
193 
194 	tx_len = MCP251XFD_OBJ_HEADER_SIZE +
195 		 ROUND_UP(can_dlc_to_bytes(msg->dlc), MCP251XFD_RAM_ALIGNMENT);
196 
197 	ret = mcp251xfd_write(dev, address, tx_len);
198 	if (ret < 0) {
199 		return ret;
200 	}
201 
202 	reg_byte = mcp251xfd_get_spi_buf_ptr(dev);
203 	*reg_byte = MCP251XFD_UINT32_FLAG_TO_BYTE_MASK(MCP251XFD_REG_TXQCON_UINC |
204 						       MCP251XFD_REG_TXQCON_TXREQ);
205 
206 	return mcp251xfd_write(dev, MCP251XFD_REG_TXQCON + 1, 1);
207 }
208 
mcp251xfd_rxobj_to_canframe(struct mcp251xfd_rxobj * src,struct can_frame * dst)209 static void mcp251xfd_rxobj_to_canframe(struct mcp251xfd_rxobj *src, struct can_frame *dst)
210 {
211 	memset(dst, 0, sizeof(*dst));
212 
213 	src->id = sys_le32_to_cpu(src->id);
214 	src->flags = sys_le32_to_cpu(src->flags);
215 
216 	if ((src->flags & MCP251XFD_OBJ_FLAGS_IDE) != 0) {
217 		dst->id = FIELD_GET(MCP251XFD_OBJ_ID_EID_MASK, src->id);
218 		dst->id |= FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK, src->id) << 18;
219 		dst->flags |= CAN_FRAME_IDE;
220 	} else {
221 		dst->id = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK, src->id);
222 	}
223 
224 	if ((src->flags & MCP251XFD_OBJ_FLAGS_BRS) != 0) {
225 		dst->flags |= CAN_FRAME_BRS;
226 	}
227 
228 #if defined(CONFIG_CAN_FD_MODE)
229 	if ((src->flags & MCP251XFD_OBJ_FLAGS_FDF) != 0) {
230 		dst->flags |= CAN_FRAME_FDF;
231 	}
232 #endif
233 
234 	dst->dlc = FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC_MASK, src->flags);
235 
236 #if defined(CONFIG_CAN_RX_TIMESTAMP)
237 	dst->timestamp = sys_le32_to_cpu(src->timestamp);
238 #endif
239 
240 	if ((src->flags & MCP251XFD_OBJ_FLAGS_RTR) != 0) {
241 		dst->flags |= CAN_FRAME_RTR;
242 	} else {
243 		memcpy(dst->data, src->data, MIN(can_dlc_to_bytes(dst->dlc), CAN_MAX_DLEN));
244 	}
245 }
246 
mcp251xfd_get_mode_internal(const struct device * dev,uint8_t * mode)247 static int mcp251xfd_get_mode_internal(const struct device *dev, uint8_t *mode)
248 {
249 	uint8_t *reg_byte;
250 	uint32_t mask = MCP251XFD_UINT32_FLAG_TO_BYTE_MASK(MCP251XFD_REG_CON_OPMOD_MASK);
251 
252 	reg_byte = mcp251xfd_read_crc(dev, MCP251XFD_REG_CON_B2, 1);
253 	if (!reg_byte) {
254 		return -EINVAL;
255 	}
256 
257 	*mode = FIELD_GET(mask, *reg_byte);
258 
259 	return 0;
260 }
261 
mcp251xfd_reg_check_value_wtimeout(const struct device * dev,uint16_t addr,uint32_t value,uint32_t mask,uint32_t timeout_usec,int retries,bool allow_yield)262 static int mcp251xfd_reg_check_value_wtimeout(const struct device *dev, uint16_t addr,
263 					      uint32_t value, uint32_t mask,
264 					      uint32_t timeout_usec, int retries, bool allow_yield)
265 {
266 	uint32_t *reg;
267 	uint32_t delay = timeout_usec / retries;
268 
269 	for (;;) {
270 		reg = mcp251xfd_read_crc(dev, addr, MCP251XFD_REG_SIZE);
271 		if (!reg) {
272 			return -EINVAL;
273 		}
274 
275 		*reg = sys_le32_to_cpu(*reg);
276 
277 		if ((*reg & mask) == value) {
278 			return 0;
279 		}
280 
281 		if (--retries < 0) {
282 			LOG_ERR("Timeout validing 0x%x", addr);
283 			return -EIO;
284 		}
285 
286 		if (allow_yield) {
287 			k_sleep(K_USEC(delay));
288 		} else {
289 			k_busy_wait(delay);
290 		}
291 	}
292 	return 0;
293 }
294 
mcp251xfd_set_tdc(const struct device * dev,bool is_enabled,int tdc_offset)295 static int mcp251xfd_set_tdc(const struct device *dev, bool is_enabled, int tdc_offset)
296 {
297 	uint32_t *reg;
298 
299 	if (is_enabled &&
300 	    (tdc_offset < MCP251XFD_REG_TDC_TDCO_MIN || tdc_offset > MCP251XFD_REG_TDC_TDCO_MAX)) {
301 		return -EINVAL;
302 	}
303 
304 	reg = mcp251xfd_get_spi_buf_ptr(dev);
305 
306 	if (is_enabled) {
307 		*reg = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK, MCP251XFD_REG_TDC_TDCMOD_AUTO);
308 		*reg |= FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, tdc_offset);
309 	} else {
310 		*reg = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK, MCP251XFD_REG_TDC_TDCMOD_DISABLED);
311 	}
312 
313 	*reg = sys_cpu_to_le32(*reg);
314 
315 	return mcp251xfd_write(dev, MCP251XFD_REG_TDC, MCP251XFD_REG_SIZE);
316 }
317 
mcp251xfd_set_mode_internal(const struct device * dev,uint8_t requested_mode)318 static int mcp251xfd_set_mode_internal(const struct device *dev, uint8_t requested_mode)
319 {
320 	struct mcp251xfd_data *dev_data = dev->data;
321 	uint32_t *reg;
322 	uint32_t opmod, reg_con;
323 	int ret = 0;
324 
325 	k_mutex_lock(&dev_data->mutex, K_FOREVER);
326 
327 	reg = mcp251xfd_read_crc(dev, MCP251XFD_REG_CON, MCP251XFD_REG_SIZE);
328 	if (!reg) {
329 		ret = -EINVAL;
330 		goto done;
331 	}
332 
333 	reg_con = sys_le32_to_cpu(*reg);
334 
335 	opmod = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, reg_con);
336 	if (opmod == requested_mode) {
337 		goto done;
338 	}
339 
340 #if defined(CONFIG_CAN_FD_MODE)
341 	if (dev_data->current_mcp251xfd_mode == MCP251XFD_REG_CON_MODE_CONFIG) {
342 		if (requested_mode ==  MCP251XFD_REG_CON_MODE_CAN2_0 ||
343 		    requested_mode ==  MCP251XFD_REG_CON_MODE_EXT_LOOPBACK ||
344 		    requested_mode == MCP251XFD_REG_CON_MODE_INT_LOOPBACK) {
345 			ret = mcp251xfd_set_tdc(dev, false, 0);
346 		} else if (requested_mode == MCP251XFD_REG_CON_MODE_MIXED) {
347 			ret = mcp251xfd_set_tdc(dev, true, dev_data->tdco);
348 		}
349 
350 		if (ret < 0) {
351 			goto done;
352 		}
353 	}
354 #endif
355 
356 	reg_con &= ~MCP251XFD_REG_CON_REQOP_MASK;
357 	reg_con |= FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK, requested_mode);
358 
359 	*reg = sys_cpu_to_le32(reg_con);
360 
361 	ret = mcp251xfd_write(dev, MCP251XFD_REG_CON, MCP251XFD_REG_SIZE);
362 	if (ret < 0) {
363 		LOG_ERR("Failed to write REG_CON register [%d]", MCP251XFD_REG_CON);
364 		goto done;
365 	}
366 
367 	ret = mcp251xfd_reg_check_value_wtimeout(
368 		dev, MCP251XFD_REG_CON, FIELD_PREP(MCP251XFD_REG_CON_OPMOD_MASK, requested_mode),
369 		MCP251XFD_REG_CON_OPMOD_MASK, MCP251XFD_MODE_CHANGE_TIMEOUT_USEC,
370 		MCP251XFD_MODE_CHANGE_RETRIES, true);
371 done:
372 	k_mutex_unlock(&dev_data->mutex);
373 	return ret;
374 }
375 
mcp251xfd_set_mode(const struct device * dev,can_mode_t mode)376 static int mcp251xfd_set_mode(const struct device *dev, can_mode_t mode)
377 {
378 	struct mcp251xfd_data *dev_data = dev->data;
379 
380 	if (dev_data->started) {
381 		return -EBUSY;
382 	}
383 
384 	/* todo: Add CAN_MODE_ONE_SHOT support */
385 	if ((mode & (CAN_MODE_3_SAMPLES | CAN_MODE_ONE_SHOT)) != 0) {
386 		return -ENOTSUP;
387 	}
388 
389 	if (mode == CAN_MODE_NORMAL) {
390 		dev_data->next_mcp251xfd_mode = MCP251XFD_REG_CON_MODE_CAN2_0;
391 	}
392 
393 	if ((mode & CAN_MODE_FD) != 0) {
394 #if defined(CONFIG_CAN_FD_MODE)
395 		dev_data->next_mcp251xfd_mode = MCP251XFD_REG_CON_MODE_MIXED;
396 #else
397 		return -ENOTSUP;
398 #endif
399 	}
400 
401 	if ((mode & CAN_MODE_LISTENONLY) != 0) {
402 		dev_data->next_mcp251xfd_mode = MCP251XFD_REG_CON_MODE_LISTENONLY;
403 	}
404 
405 	if ((mode & CAN_MODE_LOOPBACK) != 0) {
406 		dev_data->next_mcp251xfd_mode = MCP251XFD_REG_CON_MODE_EXT_LOOPBACK;
407 	}
408 
409 	dev_data->mode = mode;
410 
411 	return 0;
412 }
413 
mcp251xfd_set_timing(const struct device * dev,const struct can_timing * timing)414 static int mcp251xfd_set_timing(const struct device *dev, const struct can_timing *timing)
415 {
416 	struct mcp251xfd_data *dev_data = dev->data;
417 	uint32_t *reg;
418 	int ret;
419 
420 	if (!timing) {
421 		return -EINVAL;
422 	}
423 
424 	if (dev_data->started) {
425 		return -EBUSY;
426 	}
427 
428 	k_mutex_lock(&dev_data->mutex, K_FOREVER);
429 
430 	reg = mcp251xfd_get_spi_buf_ptr(dev);
431 	*reg = FIELD_PREP(MCP251XFD_REG_NBTCFG_BRP_MASK, timing->prescaler - 1);
432 	*reg |= FIELD_PREP(MCP251XFD_REG_NBTCFG_TSEG1_MASK,
433 			   timing->prop_seg + timing->phase_seg1 - 1);
434 	*reg |= FIELD_PREP(MCP251XFD_REG_NBTCFG_TSEG2_MASK, timing->phase_seg2 - 1);
435 	*reg |= FIELD_PREP(MCP251XFD_REG_NBTCFG_SJW_MASK, timing->sjw - 1);
436 
437 	ret = mcp251xfd_write(dev, MCP251XFD_REG_NBTCFG, MCP251XFD_REG_SIZE);
438 	if (ret < 0) {
439 		LOG_ERR("Failed to write NBTCFG register [%d]", ret);
440 	}
441 
442 	k_mutex_unlock(&dev_data->mutex);
443 
444 	return ret;
445 }
446 
447 
448 #if defined(CONFIG_CAN_FD_MODE)
mcp251xfd_set_timing_data(const struct device * dev,const struct can_timing * timing)449 static int mcp251xfd_set_timing_data(const struct device *dev, const struct can_timing *timing)
450 {
451 	struct mcp251xfd_data *dev_data = dev->data;
452 	uint32_t *reg;
453 	int ret;
454 
455 	if (!timing) {
456 		return -EINVAL;
457 	}
458 
459 	if (dev_data->started) {
460 		return -EBUSY;
461 	}
462 
463 	k_mutex_lock(&dev_data->mutex, K_FOREVER);
464 
465 	reg = mcp251xfd_get_spi_buf_ptr(dev);
466 
467 	*reg = FIELD_PREP(MCP251XFD_REG_DBTCFG_BRP_MASK, timing->prescaler - 1);
468 	*reg |= FIELD_PREP(MCP251XFD_REG_DBTCFG_TSEG1_MASK,
469 			   timing->prop_seg + timing->phase_seg1 - 1);
470 	*reg |= FIELD_PREP(MCP251XFD_REG_DBTCFG_TSEG2_MASK, timing->phase_seg2 - 1);
471 	*reg |= FIELD_PREP(MCP251XFD_REG_DBTCFG_SJW_MASK, timing->sjw - 1);
472 
473 	*reg = sys_cpu_to_le32(*reg);
474 
475 	dev_data->tdco = timing->prescaler * (timing->prop_seg + timing->phase_seg1);
476 
477 	ret = mcp251xfd_write(dev, MCP251XFD_REG_DBTCFG, MCP251XFD_REG_SIZE);
478 	if (ret < 0) {
479 		LOG_ERR("Failed to write DBTCFG register [%d]", ret);
480 	}
481 
482 	k_mutex_unlock(&dev_data->mutex);
483 
484 	return ret;
485 }
486 #endif
487 
mcp251xfd_send(const struct device * dev,const struct can_frame * msg,k_timeout_t timeout,can_tx_callback_t callback,void * callback_arg)488 static int mcp251xfd_send(const struct device *dev, const struct can_frame *msg,
489 			  k_timeout_t timeout, can_tx_callback_t callback, void *callback_arg)
490 {
491 	struct mcp251xfd_data *dev_data = dev->data;
492 	uint8_t mailbox_idx;
493 	int ret = 0;
494 
495 	LOG_DBG("Sending %d bytes. Id: 0x%x, ID type: %s %s %s %s", can_dlc_to_bytes(msg->dlc),
496 		msg->id, msg->flags & CAN_FRAME_IDE ? "extended" : "standard",
497 		msg->flags & CAN_FRAME_RTR ? "RTR" : "",
498 		msg->flags & CAN_FRAME_FDF ? "FD frame" : "",
499 		msg->flags & CAN_FRAME_BRS ? "BRS" : "");
500 
501 	__ASSERT_NO_MSG(callback != NULL);
502 
503 	if (!dev_data->started) {
504 		return -ENETDOWN;
505 	}
506 
507 	if (dev_data->state == CAN_STATE_BUS_OFF) {
508 		return -ENETUNREACH;
509 	}
510 
511 	if ((msg->flags & CAN_FRAME_FDF) == 0 && msg->dlc > CAN_MAX_DLC) {
512 		LOG_ERR("DLC of %d without fd flag set.", msg->dlc);
513 		return -EINVAL;
514 	}
515 
516 	if ((msg->flags & CAN_FRAME_FDF) && !(dev_data->mode & CAN_MODE_FD)) {
517 		return -ENOTSUP;
518 	}
519 
520 	if (k_sem_take(&dev_data->tx_sem, timeout) != 0) {
521 		return -EAGAIN;
522 	}
523 
524 	k_mutex_lock(&dev_data->mutex, K_FOREVER);
525 	for (mailbox_idx = 0; mailbox_idx < MCP251XFD_TX_QUEUE_ITEMS; mailbox_idx++) {
526 		if ((BIT(mailbox_idx) & dev_data->mailbox_usage) == 0) {
527 			dev_data->mailbox_usage |= BIT(mailbox_idx);
528 			break;
529 		}
530 	}
531 
532 	if (mailbox_idx >= MCP251XFD_TX_QUEUE_ITEMS) {
533 		k_sem_give(&dev_data->tx_sem);
534 		ret = -EIO;
535 		goto done;
536 	}
537 
538 	dev_data->mailbox[mailbox_idx].cb = callback;
539 	dev_data->mailbox[mailbox_idx].cb_arg = callback_arg;
540 
541 	ret = mcp251xfd_fifo_write(dev, mailbox_idx, msg);
542 
543 	if (ret < 0) {
544 		dev_data->mailbox_usage &= ~BIT(mailbox_idx);
545 		dev_data->mailbox[mailbox_idx].cb = NULL;
546 		k_sem_give(&dev_data->tx_sem);
547 	}
548 
549 done:
550 	k_mutex_unlock(&dev_data->mutex);
551 	return ret;
552 }
553 
mcp251xfd_add_rx_filter(const struct device * dev,can_rx_callback_t rx_cb,void * cb_arg,const struct can_filter * filter)554 static int mcp251xfd_add_rx_filter(const struct device *dev, can_rx_callback_t rx_cb, void *cb_arg,
555 				   const struct can_filter *filter)
556 {
557 	struct mcp251xfd_data *dev_data = dev->data;
558 	uint32_t *reg;
559 	uint8_t *reg_byte;
560 	int filter_idx;
561 	int ret;
562 
563 	__ASSERT(rx_cb != NULL, "rx_cb can not be null");
564 	k_mutex_lock(&dev_data->mutex, K_FOREVER);
565 
566 	for (filter_idx = 0; filter_idx < CONFIG_CAN_MAX_FILTER ; filter_idx++) {
567 		if ((BIT(filter_idx) & dev_data->filter_usage) == 0) {
568 			break;
569 		}
570 	}
571 
572 	if (filter_idx >= CONFIG_CAN_MAX_FILTER) {
573 		filter_idx = -ENOSPC;
574 		goto done;
575 	}
576 
577 	if ((filter->flags & CAN_FILTER_RTR) != 0) {
578 		filter_idx = -ENOTSUP;
579 		goto done;
580 	}
581 
582 	reg = mcp251xfd_get_spi_buf_ptr(dev);
583 
584 	if ((filter->flags & CAN_FILTER_IDE) != 0) {
585 		*reg = FIELD_PREP(MCP251XFD_REG_FLTOBJ_SID_MASK, filter->id >> 18);
586 		*reg |= FIELD_PREP(MCP251XFD_REG_FLTOBJ_EID_MASK, filter->id);
587 		*reg |= MCP251XFD_REG_FLTOBJ_EXIDE;
588 	} else {
589 		*reg = FIELD_PREP(MCP251XFD_REG_FLTOBJ_SID_MASK, filter->id);
590 	}
591 
592 	*reg = sys_cpu_to_le32(*reg);
593 	ret = mcp251xfd_write(dev, MCP251XFD_REG_FLTOBJ(filter_idx), MCP251XFD_REG_SIZE);
594 	if (ret < 0) {
595 		LOG_ERR("Failed to write FLTOBJ register [%d]", ret);
596 		goto done;
597 	}
598 
599 	reg = mcp251xfd_get_spi_buf_ptr(dev);
600 	if ((filter->flags & CAN_FILTER_IDE) != 0) {
601 		*reg = FIELD_PREP(MCP251XFD_REG_MASK_MSID_MASK, filter->mask >> 18);
602 		*reg |= FIELD_PREP(MCP251XFD_REG_MASK_MEID_MASK, filter->mask);
603 	} else {
604 		*reg = FIELD_PREP(MCP251XFD_REG_MASK_MSID_MASK, filter->mask);
605 	}
606 	*reg |= MCP251XFD_REG_MASK_MIDE;
607 
608 	*reg = sys_cpu_to_le32(*reg);
609 
610 	ret = mcp251xfd_write(dev, MCP251XFD_REG_FLTMASK(filter_idx), MCP251XFD_REG_SIZE);
611 	if (ret < 0) {
612 		LOG_ERR("Failed to write FLTMASK register [%d]", ret);
613 		goto done;
614 	}
615 
616 	reg_byte = mcp251xfd_get_spi_buf_ptr(dev);
617 	*reg_byte = MCP251XFD_REG_BYTE_FLTCON_FLTEN;
618 	*reg_byte |= FIELD_PREP(MCP251XFD_REG_BYTE_FLTCON_FBP_MASK, MCP251XFD_RX_FIFO_IDX);
619 
620 	ret = mcp251xfd_write(dev, MCP251XFD_REG_BYTE_FLTCON(filter_idx), 1);
621 	if (ret < 0) {
622 		LOG_ERR("Failed to write FLTCON register [%d]", ret);
623 		goto done;
624 	}
625 
626 	dev_data->filter_usage |= BIT(filter_idx);
627 	dev_data->filter[filter_idx] = *filter;
628 	dev_data->rx_cb[filter_idx] = rx_cb;
629 	dev_data->cb_arg[filter_idx] = cb_arg;
630 
631 done:
632 	k_mutex_unlock(&dev_data->mutex);
633 
634 	return filter_idx;
635 }
636 
mcp251xfd_remove_rx_filter(const struct device * dev,int filter_idx)637 static void mcp251xfd_remove_rx_filter(const struct device *dev, int filter_idx)
638 {
639 	struct mcp251xfd_data *dev_data = dev->data;
640 	uint8_t *reg_byte;
641 	uint32_t *reg;
642 	int ret;
643 
644 	if (filter_idx < 0 || filter_idx >= CONFIG_CAN_MAX_FILTER) {
645 		LOG_ERR("Filter ID %d out of bounds", filter_idx);
646 		return;
647 	}
648 
649 	k_mutex_lock(&dev_data->mutex, K_FOREVER);
650 
651 	reg_byte = mcp251xfd_get_spi_buf_ptr(dev);
652 	*reg_byte = 0;
653 
654 	ret = mcp251xfd_write(dev, MCP251XFD_REG_BYTE_FLTCON(filter_idx), 1);
655 	if (ret < 0) {
656 		LOG_ERR("Failed to write FLTCON register [%d]", ret);
657 		goto done;
658 	}
659 
660 	dev_data->filter_usage &= ~BIT(filter_idx);
661 
662 	reg = mcp251xfd_get_spi_buf_ptr(dev);
663 	reg[0] = 0;
664 
665 	ret = mcp251xfd_write(dev, MCP251XFD_REG_FLTCON(filter_idx), MCP251XFD_REG_SIZE);
666 	if (ret < 0) {
667 		LOG_ERR("Failed to write FLTCON register [%d]", ret);
668 	}
669 
670 done:
671 	k_mutex_unlock(&dev_data->mutex);
672 }
673 
mcp251xfd_set_state_change_callback(const struct device * dev,can_state_change_callback_t cb,void * user_data)674 static void mcp251xfd_set_state_change_callback(const struct device *dev,
675 						can_state_change_callback_t cb, void *user_data)
676 {
677 	struct mcp251xfd_data *dev_data = dev->data;
678 
679 	dev_data->state_change_cb = cb;
680 	dev_data->state_change_cb_data = user_data;
681 }
682 
mcp251xfd_get_state(const struct device * dev,enum can_state * state,struct can_bus_err_cnt * err_cnt)683 static int mcp251xfd_get_state(const struct device *dev, enum can_state *state,
684 			       struct can_bus_err_cnt *err_cnt)
685 {
686 	struct mcp251xfd_data *dev_data = dev->data;
687 	uint32_t *reg;
688 	int ret = 0;
689 
690 	k_mutex_lock(&dev_data->mutex, K_FOREVER);
691 
692 	reg = mcp251xfd_read_crc(dev, MCP251XFD_REG_TREC, MCP251XFD_REG_SIZE);
693 	if (!reg) {
694 		ret = -EINVAL;
695 		goto done;
696 	}
697 
698 	*reg = sys_le32_to_cpu(*reg);
699 
700 	if (err_cnt != NULL) {
701 		err_cnt->tx_err_cnt = FIELD_GET(MCP251XFD_REG_TREC_TEC_MASK, *reg);
702 		err_cnt->rx_err_cnt = FIELD_GET(MCP251XFD_REG_TREC_REC_MASK, *reg);
703 	}
704 
705 	if (state == NULL) {
706 		goto done;
707 	}
708 
709 	if (!dev_data->started) {
710 		*state = CAN_STATE_STOPPED;
711 		goto done;
712 	}
713 
714 	if ((*reg & MCP251XFD_REG_TREC_TXBO) != 0) {
715 		*state = CAN_STATE_BUS_OFF;
716 	} else if ((*reg & MCP251XFD_REG_TREC_TXBP) != 0) {
717 		*state = CAN_STATE_ERROR_PASSIVE;
718 	} else if ((*reg & MCP251XFD_REG_TREC_RXBP) != 0) {
719 		*state = CAN_STATE_ERROR_PASSIVE;
720 	} else if ((*reg & MCP251XFD_REG_TREC_TXWARN) != 0) {
721 		*state = CAN_STATE_ERROR_WARNING;
722 	} else if ((*reg & MCP251XFD_REG_TREC_RXWARN) != 0) {
723 		*state = CAN_STATE_ERROR_WARNING;
724 	} else {
725 		*state = CAN_STATE_ERROR_ACTIVE;
726 	}
727 
728 done:
729 	k_mutex_unlock(&dev_data->mutex);
730 	return 0;
731 }
732 
mcp251xfd_get_core_clock(const struct device * dev,uint32_t * rate)733 static int mcp251xfd_get_core_clock(const struct device *dev, uint32_t *rate)
734 {
735 	const struct mcp251xfd_config *dev_cfg = dev->config;
736 
737 	*rate = dev_cfg->osc_freq;
738 	return 0;
739 }
740 
mcp251xfd_get_max_filters(const struct device * dev,bool ide)741 static int mcp251xfd_get_max_filters(const struct device *dev, bool ide)
742 {
743 	ARG_UNUSED(ide);
744 
745 	return CONFIG_CAN_MAX_FILTER;
746 }
747 
mcp251xfd_get_max_bitrate(const struct device * dev,uint32_t * max_bitrate)748 static int mcp251xfd_get_max_bitrate(const struct device *dev, uint32_t *max_bitrate)
749 {
750 	const struct mcp251xfd_config *dev_cfg = dev->config;
751 
752 	*max_bitrate = dev_cfg->max_bitrate;
753 
754 	return 0;
755 }
756 
757 #ifndef CONFIG_CAN_AUTO_BUS_OFF_RECOVERY
mcp251xfd_recover(const struct device * dev,k_timeout_t timeout)758 static int mcp251xfd_recover(const struct device *dev, k_timeout_t timeout)
759 {
760 	struct mcp251xfd_data *dev_data = dev->data;
761 
762 	ARG_UNUSED(timeout);
763 
764 	if (!dev_data->started) {
765 		return -ENETDOWN;
766 	}
767 
768 	return -ENOTSUP;
769 }
770 #endif
771 
mcp251xfd_handle_fifo_read(const struct device * dev,const struct mcp251xfd_fifo * fifo,uint8_t fifo_type)772 static int mcp251xfd_handle_fifo_read(const struct device *dev, const struct mcp251xfd_fifo *fifo,
773 				      uint8_t fifo_type)
774 {
775 	int ret = 0;
776 	struct mcp251xfd_data *dev_data = dev->data;
777 	uint32_t *regs, fifosta, ua;
778 	uint8_t *reg_byte;
779 
780 	int len;
781 	int fetch_total = 0;
782 	int ui_inc = 0;
783 	uint32_t fifo_tail_index, fifo_tail_addr;
784 	uint8_t fifo_head_index;
785 
786 	k_mutex_lock(&dev_data->mutex, K_FOREVER);
787 
788 	/* read in FIFOSTA and FIFOUA at the same time */
789 	regs = mcp251xfd_read_crc(dev, MCP251XFD_REG_FIFOCON_TO_STA(fifo->reg_fifocon_addr),
790 				  2 * MCP251XFD_REG_SIZE);
791 	if (!regs) {
792 		ret = -EINVAL;
793 		goto done;
794 	}
795 	fifosta = sys_le32_to_cpu(regs[0]);
796 	ua = sys_le32_to_cpu(regs[1]);
797 
798 	/* is there any data in the fifo? */
799 	if (!(fifosta & MCP251XFD_REG_FIFOSTA_TFNRFNIF)) {
800 		goto done;
801 	}
802 
803 	fifo_tail_addr = ua;
804 	fifo_tail_index = (fifo_tail_addr - fifo->ram_start_addr) / fifo->item_size;
805 
806 	if (fifo_type == MCP251XFD_FIFO_TYPE_RX) {
807 		/*
808 		 * fifo_head_index points where the next message will be written.
809 		 * It points to one past the end of the fifo.
810 		 */
811 		fifo_head_index = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifosta);
812 		if (fifo_head_index == 0) {
813 			fifo_head_index = fifo->capacity - 1;
814 		} else {
815 			fifo_head_index -= 1;
816 		}
817 
818 		if (fifo_tail_index > fifo_head_index) {
819 			/* fetch to the end of the memory and then wrap to the start */
820 			fetch_total = fifo->capacity - 1 - fifo_tail_index + 1;
821 			fetch_total += fifo_head_index + 1;
822 		} else {
823 			fetch_total = fifo_head_index - fifo_tail_index + 1;
824 		}
825 	} else if (fifo_type == MCP251XFD_FIFO_TYPE_TEF) {
826 		/* FIFOCI doesn't exist for TEF queues, so fetch one message at a time */
827 		fifo_head_index = fifo_tail_index;
828 		fetch_total = 1;
829 	} else {
830 		ret = -EINVAL;
831 		goto done;
832 	}
833 
834 	while (fetch_total > 0) {
835 		uint16_t memory_addr;
836 		uint8_t *data;
837 
838 		if (fifo_tail_index > fifo_head_index) {
839 			len = fifo->capacity - 1 - fifo_tail_index + 1;
840 		} else {
841 			len = fifo_head_index - fifo_tail_index + 1;
842 		}
843 
844 		memory_addr = MCP251XFD_RAM_START_ADDR + fifo->ram_start_addr +
845 			      fifo_tail_index * fifo->item_size;
846 
847 		data = mcp251xfd_read_reg(dev, memory_addr, len * fifo->item_size);
848 		if (!data) {
849 			LOG_ERR("Error fetching batch message");
850 			ret = -EINVAL;
851 			goto done;
852 		}
853 
854 		for (int i = 0; i < len; i++) {
855 			fifo->msg_handler(dev, (void *)(&data[i * fifo->item_size]));
856 		}
857 
858 		fifo_tail_index = (fifo_tail_index + len) % fifo->capacity;
859 		fetch_total -= len;
860 		ui_inc += len;
861 	}
862 
863 	reg_byte = mcp251xfd_get_spi_buf_ptr(dev);
864 	*reg_byte = MCP251XFD_UINT32_FLAG_TO_BYTE_MASK(MCP251XFD_REG_FIFOCON_UINC);
865 
866 	for (int i = 0; i < ui_inc; i++) {
867 		ret = mcp251xfd_write(dev, fifo->reg_fifocon_addr + 1, 1);
868 		if (ret < 0) {
869 			LOG_ERR("Failed to increment pointer");
870 			goto done;
871 		}
872 	}
873 
874 done:
875 	k_mutex_unlock(&dev_data->mutex);
876 	return ret;
877 }
878 
mcp251xfd_reset_tx_fifos(const struct device * dev,int status)879 static void mcp251xfd_reset_tx_fifos(const struct device *dev, int status)
880 {
881 	struct mcp251xfd_data *dev_data = dev->data;
882 
883 	LOG_INF("All FIFOs Reset");
884 	k_mutex_lock(&dev_data->mutex, K_FOREVER);
885 	for (int i = 0; i < MCP251XFD_TX_QUEUE_ITEMS; i++) {
886 		can_tx_callback_t callback;
887 
888 		if (!(dev_data->mailbox_usage & BIT(i))) {
889 			continue;
890 		}
891 
892 		callback = dev_data->mailbox[i].cb;
893 		if (callback) {
894 			callback(dev, status, dev_data->mailbox[i].cb_arg);
895 		}
896 
897 		dev_data->mailbox_usage &= ~BIT(i);
898 		dev_data->mailbox[i].cb = NULL;
899 		k_sem_give(&dev_data->tx_sem);
900 	}
901 	k_mutex_unlock(&dev_data->mutex);
902 }
903 
904 /*
905  * CERRIF will be set each time a threshold in the TEC/REC counter is crossed by the following
906  * conditions:
907  * • TEC or REC exceeds the Error Warning state threshold
908  * • The transmitter or receiver transitions to Error Passive state
909  * • The transmitter transitions to Bus Off state
910  * • The transmitter or receiver transitions from Error Passive to Error Active state
911  * • The module transitions from Bus Off to Error Active state, after the bus off recovery
912  * sequence
913  * When the user clears CERRIF, it will remain clear until a new counter crossing occurs.
914  */
mcp251xfd_handle_cerrif(const struct device * dev)915 static int mcp251xfd_handle_cerrif(const struct device *dev)
916 {
917 	enum can_state new_state;
918 	struct mcp251xfd_data *dev_data = dev->data;
919 	struct can_bus_err_cnt err_cnt;
920 	int ret = 0;
921 
922 	k_mutex_lock(&dev_data->mutex, K_FOREVER);
923 
924 	ret = mcp251xfd_get_state(dev, &new_state, &err_cnt);
925 	if (ret < 0) {
926 		goto done;
927 	}
928 
929 	if (new_state == dev_data->state) {
930 		goto done;
931 	}
932 
933 	LOG_INF("State %d -> %d (tx: %d, rx: %d)", dev_data->state, new_state, err_cnt.tx_err_cnt,
934 		err_cnt.rx_err_cnt);
935 
936 	/* Upon entering bus-off, all the fifos are reset. */
937 	dev_data->state = new_state;
938 	if (new_state == CAN_STATE_BUS_OFF) {
939 		mcp251xfd_reset_tx_fifos(dev, -ENETDOWN);
940 	}
941 
942 	if (dev_data->state_change_cb) {
943 		dev_data->state_change_cb(dev, new_state, err_cnt, dev_data->state_change_cb_data);
944 	}
945 
946 done:
947 	k_mutex_unlock(&dev_data->mutex);
948 	return ret;
949 }
950 
mcp251xfd_handle_modif(const struct device * dev)951 static int mcp251xfd_handle_modif(const struct device *dev)
952 {
953 	struct mcp251xfd_data *dev_data = dev->data;
954 	uint8_t mode;
955 	int ret;
956 
957 	k_mutex_lock(&dev_data->mutex, K_FOREVER);
958 
959 	ret = mcp251xfd_get_mode_internal(dev, &mode);
960 	if (ret < 0) {
961 		goto finish;
962 	}
963 
964 	dev_data->current_mcp251xfd_mode = mode;
965 
966 	LOG_INF("Switched to mode %d", mode);
967 
968 	if (mode == dev_data->next_mcp251xfd_mode) {
969 		ret = 0;
970 		goto finish;
971 	}
972 
973 	/* try to transition back into our target mode */
974 	if (dev_data->started) {
975 		LOG_INF("Switching back into mode %d", dev_data->next_mcp251xfd_mode);
976 		ret =  mcp251xfd_set_mode_internal(dev, dev_data->next_mcp251xfd_mode);
977 	}
978 
979 finish:
980 	k_mutex_unlock(&dev_data->mutex);
981 	return ret;
982 }
983 
mcp251xfd_handle_ivmif(const struct device * dev)984 static int mcp251xfd_handle_ivmif(const struct device *dev)
985 {
986 	uint32_t *reg;
987 	struct mcp251xfd_data *dev_data = dev->data;
988 	int ret;
989 
990 	k_mutex_lock(&dev_data->mutex, K_FOREVER);
991 
992 	reg = mcp251xfd_read_crc(dev, MCP251XFD_REG_BDIAG1, MCP251XFD_REG_SIZE);
993 	if (!reg) {
994 		ret = -EINVAL;
995 		goto done;
996 	}
997 
998 	*reg = sys_le32_to_cpu(*reg);
999 
1000 	if ((*reg & MCP251XFD_REG_BDIAG1_TXBOERR) != 0) {
1001 		LOG_INF("ivmif bus-off error");
1002 		mcp251xfd_reset_tx_fifos(dev, -ENETDOWN);
1003 	}
1004 
1005 	/* Clear the values in diag */
1006 	reg = mcp251xfd_get_spi_buf_ptr(dev);
1007 	reg[0] = 0;
1008 	ret = mcp251xfd_write(dev, MCP251XFD_REG_BDIAG1, MCP251XFD_REG_SIZE);
1009 
1010 done:
1011 	k_mutex_unlock(&dev_data->mutex);
1012 	return ret;
1013 }
1014 
mcp251xfd_handle_interrupts(const struct device * dev)1015 static void mcp251xfd_handle_interrupts(const struct device *dev)
1016 {
1017 	const struct mcp251xfd_config *dev_cfg = dev->config;
1018 	struct mcp251xfd_data *dev_data = dev->data;
1019 	uint16_t *reg_int_hw;
1020 	uint32_t reg_int;
1021 	int ret;
1022 	uint8_t consecutive_calls = 0;
1023 
1024 	while (1) {
1025 		k_mutex_lock(&dev_data->mutex, K_FOREVER);
1026 		reg_int_hw = mcp251xfd_read_crc(dev, MCP251XFD_REG_INT, sizeof(*reg_int_hw));
1027 
1028 		if (!reg_int_hw) {
1029 			k_mutex_unlock(&dev_data->mutex);
1030 			continue;
1031 		}
1032 
1033 		*reg_int_hw = sys_le16_to_cpu(*reg_int_hw);
1034 
1035 		reg_int = *reg_int_hw;
1036 
1037 		/* these interrupt flags need to be explicitly cleared */
1038 		if (*reg_int_hw & MCP251XFD_REG_INT_IF_CLEARABLE_MASK) {
1039 
1040 			*reg_int_hw &= ~MCP251XFD_REG_INT_IF_CLEARABLE_MASK;
1041 
1042 			*reg_int_hw = sys_cpu_to_le16(*reg_int_hw);
1043 
1044 			ret = mcp251xfd_write(dev, MCP251XFD_REG_INT, sizeof(*reg_int_hw));
1045 			if (ret) {
1046 				LOG_ERR("Error clearing REG_INT interrupts [%d]", ret);
1047 			}
1048 		}
1049 
1050 		k_mutex_unlock(&dev_data->mutex);
1051 
1052 		if ((reg_int & MCP251XFD_REG_INT_RXIF) != 0) {
1053 			ret = mcp251xfd_handle_fifo_read(dev, &dev_cfg->rx_fifo,
1054 							 MCP251XFD_FIFO_TYPE_RX);
1055 			if (ret < 0) {
1056 				LOG_ERR("Error handling RXIF [%d]", ret);
1057 			}
1058 		}
1059 
1060 		if ((reg_int & MCP251XFD_REG_INT_TEFIF) != 0) {
1061 			ret = mcp251xfd_handle_fifo_read(dev, &dev_cfg->tef_fifo,
1062 							 MCP251XFD_FIFO_TYPE_TEF);
1063 			if (ret < 0) {
1064 				LOG_ERR("Error handling TEFIF [%d]", ret);
1065 			}
1066 		}
1067 
1068 		if ((reg_int & MCP251XFD_REG_INT_IVMIF) != 0) {
1069 			ret = mcp251xfd_handle_ivmif(dev);
1070 			if (ret < 0) {
1071 				LOG_ERR("Error handling IVMIF [%d]", ret);
1072 			}
1073 		}
1074 
1075 		if ((reg_int & MCP251XFD_REG_INT_MODIF) != 0) {
1076 			ret = mcp251xfd_handle_modif(dev);
1077 			if (ret < 0) {
1078 				LOG_ERR("Error handling MODIF [%d]", ret);
1079 			}
1080 		}
1081 
1082 		/*
1083 		 * From Linux mcp251xfd driver
1084 		 * On the MCP2527FD and MCP2518FD, we don't get a CERRIF IRQ on the transition
1085 		 * TX ERROR_WARNING -> TX ERROR_ACTIVE.
1086 		 */
1087 		if ((reg_int & MCP251XFD_REG_INT_CERRIF) ||
1088 		    dev_data->state > CAN_STATE_ERROR_ACTIVE) {
1089 			ret = mcp251xfd_handle_cerrif(dev);
1090 			if (ret < 0) {
1091 				LOG_ERR("Error handling CERRIF [%d]", ret);
1092 			}
1093 		}
1094 
1095 		/* Break from loop if INT pin is inactive */
1096 		consecutive_calls++;
1097 		ret = gpio_pin_get_dt(&dev_cfg->int_gpio_dt);
1098 		if (ret < 0) {
1099 			LOG_ERR("Couldn't read INT pin [%d]", ret);
1100 		} else if (ret == 0) {
1101 			/* All interrupt flags handled */
1102 			break;
1103 		} else if (consecutive_calls % MCP251XFD_MAX_INT_HANDLER_CALLS == 0) {
1104 			/* If there are clock problems, then MODIF cannot be cleared. */
1105 			/* This is detected if there are too many consecutive calls. */
1106 			/* Sleep this thread if this happens. */
1107 			k_sleep(K_USEC(MCP251XFD_INT_HANDLER_SLEEP_USEC));
1108 		}
1109 	}
1110 }
1111 
mcp251xfd_int_thread(const struct device * dev)1112 static void mcp251xfd_int_thread(const struct device *dev)
1113 {
1114 	const struct mcp251xfd_config *dev_cfg = dev->config;
1115 	struct mcp251xfd_data *dev_data = dev->data;
1116 
1117 	while (1) {
1118 		int ret;
1119 
1120 		k_sem_take(&dev_data->int_sem, K_FOREVER);
1121 		mcp251xfd_handle_interrupts(dev);
1122 
1123 		/* Re-enable pin interrupts */
1124 		ret = gpio_pin_interrupt_configure_dt(&dev_cfg->int_gpio_dt, GPIO_INT_LEVEL_ACTIVE);
1125 		if (ret < 0) {
1126 			LOG_ERR("Couldn't enable pin interrupt [%d]", ret);
1127 			k_oops();
1128 		}
1129 	}
1130 }
1131 
mcp251xfd_int_gpio_callback(const struct device * dev_gpio,struct gpio_callback * cb,uint32_t pins)1132 static void mcp251xfd_int_gpio_callback(const struct device *dev_gpio, struct gpio_callback *cb,
1133 					uint32_t pins)
1134 {
1135 	ARG_UNUSED(dev_gpio);
1136 	struct mcp251xfd_data *dev_data = CONTAINER_OF(cb, struct mcp251xfd_data, int_gpio_cb);
1137 	const struct device *dev = dev_data->dev;
1138 	const struct mcp251xfd_config *dev_cfg = dev->config;
1139 	int ret;
1140 
1141 	/* Disable pin interrupts */
1142 	ret = gpio_pin_interrupt_configure_dt(&dev_cfg->int_gpio_dt, GPIO_INT_DISABLE);
1143 	if (ret < 0) {
1144 		LOG_ERR("Couldn't disable pin interrupt [%d]", ret);
1145 		k_oops();
1146 	}
1147 
1148 	k_sem_give(&dev_data->int_sem);
1149 }
1150 
mcp251xfd_get_capabilities(const struct device * dev,can_mode_t * cap)1151 static int mcp251xfd_get_capabilities(const struct device *dev, can_mode_t *cap)
1152 {
1153 	ARG_UNUSED(dev);
1154 
1155 	*cap = CAN_MODE_NORMAL | CAN_MODE_LISTENONLY | CAN_MODE_LOOPBACK;
1156 
1157 #if defined(CONFIG_CAN_FD_MODE)
1158 	*cap |= CAN_MODE_FD;
1159 #endif
1160 
1161 	return 0;
1162 }
1163 
mcp251xfd_start(const struct device * dev)1164 static int mcp251xfd_start(const struct device *dev)
1165 {
1166 	struct mcp251xfd_data *dev_data = dev->data;
1167 	const struct mcp251xfd_config *dev_cfg = dev->config;
1168 	int ret;
1169 
1170 	if (dev_data->started) {
1171 		return -EALREADY;
1172 	}
1173 
1174 	/* in case of a race between mcp251xfd_send() and mcp251xfd_stop() */
1175 	mcp251xfd_reset_tx_fifos(dev, -ENETDOWN);
1176 
1177 	if (dev_cfg->phy != NULL) {
1178 		ret = can_transceiver_enable(dev_cfg->phy);
1179 		if (ret < 0) {
1180 			LOG_ERR("Failed to enable CAN transceiver [%d]", ret);
1181 			return ret;
1182 		}
1183 	}
1184 
1185 	k_mutex_lock(&dev_data->mutex, K_FOREVER);
1186 
1187 	ret = mcp251xfd_set_mode_internal(dev, dev_data->next_mcp251xfd_mode);
1188 	if (ret < 0) {
1189 		LOG_ERR("Failed to set the mode [%d]", ret);
1190 		if (dev_cfg->phy != NULL) {
1191 			/* Attempt to disable the CAN transceiver in case of error */
1192 			(void)can_transceiver_disable(dev_cfg->phy);
1193 		}
1194 	} else {
1195 		dev_data->started = true;
1196 	}
1197 
1198 	k_mutex_unlock(&dev_data->mutex);
1199 
1200 	return ret;
1201 }
1202 
mcp251xfd_stop(const struct device * dev)1203 static int mcp251xfd_stop(const struct device *dev)
1204 {
1205 	struct mcp251xfd_data *dev_data = dev->data;
1206 	const struct mcp251xfd_config *dev_cfg = dev->config;
1207 	uint8_t *reg_byte;
1208 	int ret;
1209 
1210 	if (!dev_data->started) {
1211 		return -EALREADY;
1212 	}
1213 
1214 	k_mutex_lock(&dev_data->mutex, K_FOREVER);
1215 
1216 	/* abort all transmissions */
1217 	reg_byte = mcp251xfd_get_spi_buf_ptr(dev);
1218 	*reg_byte = MCP251XFD_UINT32_FLAG_TO_BYTE_MASK(MCP251XFD_REG_CON_ABAT);
1219 
1220 	ret = mcp251xfd_write(dev, MCP251XFD_REG_CON_B3, 1);
1221 	if (ret < 0) {
1222 		k_mutex_unlock(&dev_data->mutex);
1223 		return ret;
1224 	}
1225 
1226 	/* wait for all the messages to be aborted */
1227 	while (1) {
1228 		reg_byte = mcp251xfd_read_crc(dev, MCP251XFD_REG_CON_B3, 1);
1229 
1230 		if (!reg_byte ||
1231 		    (*reg_byte & MCP251XFD_UINT32_FLAG_TO_BYTE_MASK(MCP251XFD_REG_CON_ABAT)) == 0) {
1232 			break;
1233 		}
1234 	}
1235 
1236 	mcp251xfd_reset_tx_fifos(dev, -ENETDOWN);
1237 
1238 	ret = mcp251xfd_set_mode_internal(dev, MCP251XFD_REG_CON_MODE_CONFIG);
1239 	if (ret < 0) {
1240 		k_mutex_unlock(&dev_data->mutex);
1241 		return ret;
1242 	}
1243 
1244 	dev_data->started = false;
1245 	k_mutex_unlock(&dev_data->mutex);
1246 
1247 	if (dev_cfg->phy != NULL) {
1248 		ret = can_transceiver_disable(dev_cfg->phy);
1249 		if (ret < 0) {
1250 			LOG_ERR("Failed to disable CAN transceiver [%d]", ret);
1251 			return ret;
1252 		}
1253 	}
1254 
1255 	return 0;
1256 }
1257 
mcp251xfd_rx_fifo_handler(const struct device * dev,void * data)1258 static void mcp251xfd_rx_fifo_handler(const struct device *dev, void *data)
1259 {
1260 	struct can_frame dst;
1261 	struct mcp251xfd_data *dev_data = dev->data;
1262 	struct mcp251xfd_rxobj *rxobj = data;
1263 	uint32_t filhit;
1264 
1265 	mcp251xfd_rxobj_to_canframe(rxobj, &dst);
1266 
1267 	filhit = FIELD_GET(MCP251XFD_OBJ_FILHIT_MASK, rxobj->flags);
1268 	if ((dev_data->filter_usage & BIT(filhit)) != 0) {
1269 		LOG_DBG("Received msg CAN id: 0x%x", dst.id);
1270 		dev_data->rx_cb[filhit](dev, &dst, dev_data->cb_arg[filhit]);
1271 	}
1272 }
1273 
mcp251xfd_tef_fifo_handler(const struct device * dev,void * data)1274 static void mcp251xfd_tef_fifo_handler(const struct device *dev, void *data)
1275 {
1276 	struct mcp251xfd_data *dev_data = dev->data;
1277 	can_tx_callback_t callback;
1278 	struct mcp251xfd_tefobj *tefobj = data;
1279 	uint8_t mailbox_idx;
1280 
1281 	mailbox_idx = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MASK, tefobj->flags);
1282 	if (mailbox_idx >= MCP251XFD_TX_QUEUE_ITEMS) {
1283 		mcp251xfd_reset_tx_fifos(dev, -EIO);
1284 		LOG_ERR("Invalid mailbox index");
1285 		return;
1286 	}
1287 
1288 	callback = dev_data->mailbox[mailbox_idx].cb;
1289 	if (callback != NULL) {
1290 		callback(dev, 0, dev_data->mailbox[mailbox_idx].cb_arg);
1291 	}
1292 
1293 	dev_data->mailbox_usage &= ~BIT(mailbox_idx);
1294 	dev_data->mailbox[mailbox_idx].cb = NULL;
1295 	k_sem_give(&dev_data->tx_sem);
1296 }
1297 
mcp251xfd_init_timing_struct(struct can_timing * timing,const struct device * dev,const struct mcp251xfd_timing_params * timing_params,bool is_nominal)1298 static int mcp251xfd_init_timing_struct(struct can_timing *timing,
1299 					const struct device *dev,
1300 					const struct mcp251xfd_timing_params *timing_params,
1301 					bool is_nominal)
1302 {
1303 	int ret;
1304 
1305 	if (USE_SP_ALGO && timing_params->sample_point > 0) {
1306 		if (is_nominal) {
1307 			ret = can_calc_timing(dev, timing, timing_params->bus_speed,
1308 					      timing_params->sample_point);
1309 		} else {
1310 			ret = can_calc_timing_data(dev, timing, timing_params->bus_speed,
1311 						   timing_params->sample_point);
1312 		}
1313 		if (ret < 0) {
1314 			return ret;
1315 		}
1316 		LOG_DBG("Presc: %d, BS1: %d, BS2: %d", timing->prescaler, timing->phase_seg1,
1317 			timing->phase_seg2);
1318 		LOG_DBG("Sample-point err : %d", ret);
1319 	} else {
1320 		timing->sjw = timing_params->sjw;
1321 		timing->prop_seg = timing_params->prop_seg;
1322 		timing->phase_seg1 = timing_params->phase_seg1;
1323 		timing->phase_seg2 = timing_params->phase_seg2;
1324 		ret = can_calc_prescaler(dev, timing, timing_params->bus_speed);
1325 		if (ret > 0) {
1326 			LOG_WRN("Bitrate error: %d", ret);
1327 		}
1328 	}
1329 
1330 	return ret;
1331 }
1332 
mcp251xfd_init_con_reg(const struct device * dev)1333 static inline int mcp251xfd_init_con_reg(const struct device *dev)
1334 {
1335 	uint32_t *reg;
1336 
1337 	reg = mcp251xfd_get_spi_buf_ptr(dev);
1338 	*reg = MCP251XFD_REG_CON_ISOCRCEN | MCP251XFD_REG_CON_WAKFIL | MCP251XFD_REG_CON_TXQEN |
1339 	       MCP251XFD_REG_CON_STEF;
1340 	*reg |= FIELD_PREP(MCP251XFD_REG_CON_WFT_MASK, MCP251XFD_REG_CON_WFT_T11FILTER) |
1341 		FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK, MCP251XFD_REG_CON_MODE_CONFIG);
1342 
1343 	return mcp251xfd_write(dev, MCP251XFD_REG_CON, MCP251XFD_REG_SIZE);
1344 }
1345 
mcp251xfd_init_osc_reg(const struct device * dev)1346 static inline int mcp251xfd_init_osc_reg(const struct device *dev)
1347 {
1348 	int ret;
1349 	const struct mcp251xfd_config *dev_cfg = dev->config;
1350 	uint32_t *reg = mcp251xfd_get_spi_buf_ptr(dev);
1351 	uint32_t reg_value = MCP251XFD_REG_OSC_OSCRDY;
1352 
1353 	*reg = FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK, dev_cfg->clko_div);
1354 	if (dev_cfg->pll_enable) {
1355 		*reg |= MCP251XFD_REG_OSC_PLLEN;
1356 		reg_value |= MCP251XFD_REG_OSC_PLLRDY;
1357 	}
1358 
1359 	*reg = sys_cpu_to_le32(*reg);
1360 
1361 	ret = mcp251xfd_write(dev, MCP251XFD_REG_OSC, MCP251XFD_REG_SIZE);
1362 	if (ret < 0) {
1363 		return ret;
1364 	}
1365 
1366 	return mcp251xfd_reg_check_value_wtimeout(dev, MCP251XFD_REG_OSC, reg_value, reg_value,
1367 						  MCP251XFD_PLLRDY_TIMEOUT_USEC,
1368 						  MCP251XFD_PLLRDY_RETRIES, false);
1369 }
1370 
mcp251xfd_init_iocon_reg(const struct device * dev)1371 static inline int mcp251xfd_init_iocon_reg(const struct device *dev)
1372 {
1373 	const struct mcp251xfd_config *dev_cfg = dev->config;
1374 	uint32_t *reg = mcp251xfd_get_spi_buf_ptr(dev);
1375 
1376 /*
1377  *         MCP2518FD Errata: DS80000789
1378  *         Writing Byte 2/3 of the IOCON register using single SPI write cleat LAT0 and LAT1.
1379  *         This has no effect in the current version since LAT0/1 are set to zero anyway.
1380  *         However, it needs to be properly handled if other values are needed. Errata suggests
1381  *         to do single byte writes instead.
1382  */
1383 
1384 	*reg = MCP251XFD_REG_IOCON_TRIS0 | MCP251XFD_REG_IOCON_TRIS1 | MCP251XFD_REG_IOCON_PM0 |
1385 	       MCP251XFD_REG_IOCON_PM1;
1386 
1387 	if (dev_cfg->sof_on_clko) {
1388 		*reg |= MCP251XFD_REG_IOCON_SOF;
1389 	}
1390 
1391 	*reg = sys_cpu_to_le32(*reg);
1392 
1393 	return  mcp251xfd_write(dev, MCP251XFD_REG_IOCON, MCP251XFD_REG_SIZE);
1394 }
1395 
mcp251xfd_init_int_reg(const struct device * dev)1396 static inline int mcp251xfd_init_int_reg(const struct device *dev)
1397 {
1398 	uint32_t *reg = mcp251xfd_get_spi_buf_ptr(dev);
1399 
1400 	*reg = MCP251XFD_REG_INT_RXIE | MCP251XFD_REG_INT_MODIE | MCP251XFD_REG_INT_TEFIE |
1401 	       MCP251XFD_REG_INT_CERRIE;
1402 
1403 	*reg = sys_cpu_to_le32(*reg);
1404 
1405 	return mcp251xfd_write(dev, MCP251XFD_REG_INT, MCP251XFD_REG_SIZE);
1406 }
1407 
mcp251xfd_init_tef_fifo(const struct device * dev)1408 static inline int mcp251xfd_init_tef_fifo(const struct device *dev)
1409 {
1410 	uint32_t *reg = mcp251xfd_get_spi_buf_ptr(dev);
1411 
1412 	*reg = MCP251XFD_REG_TEFCON_TEFNEIE | MCP251XFD_REG_TEFCON_FRESET;
1413 	*reg |= FIELD_PREP(MCP251XFD_REG_TEFCON_FSIZE_MASK, MCP251XFD_TX_QUEUE_ITEMS - 1);
1414 
1415 	*reg = sys_cpu_to_le32(*reg);
1416 
1417 	return mcp251xfd_write(dev, MCP251XFD_REG_TEFCON, MCP251XFD_REG_SIZE);
1418 }
1419 
mcp251xfd_init_tx_queue(const struct device * dev)1420 static inline int mcp251xfd_init_tx_queue(const struct device *dev)
1421 {
1422 	uint32_t *reg = mcp251xfd_get_spi_buf_ptr(dev);
1423 
1424 	*reg = MCP251XFD_REG_TXQCON_TXEN | MCP251XFD_REG_TXQCON_FRESET;
1425 	*reg |= FIELD_PREP(MCP251XFD_REG_TXQCON_TXAT_MASK, MCP251XFD_REG_TXQCON_TXAT_UNLIMITED);
1426 	*reg |= FIELD_PREP(MCP251XFD_REG_TXQCON_FSIZE_MASK, MCP251XFD_TX_QUEUE_ITEMS - 1);
1427 	*reg |= FIELD_PREP(MCP251XFD_REG_TXQCON_PLSIZE_MASK,
1428 			   can_bytes_to_dlc(MCP251XFD_PAYLOAD_SIZE) - 8);
1429 
1430 	*reg = sys_cpu_to_le32(*reg);
1431 
1432 	return mcp251xfd_write(dev, MCP251XFD_REG_TXQCON, MCP251XFD_REG_SIZE);
1433 }
1434 
mcp251xfd_init_rx_fifo(const struct device * dev)1435 static inline int mcp251xfd_init_rx_fifo(const struct device *dev)
1436 {
1437 	uint32_t *reg = mcp251xfd_get_spi_buf_ptr(dev);
1438 
1439 	*reg = MCP251XFD_REG_FIFOCON_TFNRFNIE | MCP251XFD_REG_FIFOCON_FRESET;
1440 	*reg |= FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK, MCP251XFD_RX_FIFO_ITEMS - 1);
1441 	*reg |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
1442 			   can_bytes_to_dlc(MCP251XFD_PAYLOAD_SIZE) - 8);
1443 #if defined(CONFIG_CAN_RX_TIMESTAMP)
1444 	*reg |= MCP251XFD_REG_FIFOCON_RXTSEN;
1445 #endif
1446 
1447 	*reg = sys_cpu_to_le32(*reg);
1448 
1449 	return mcp251xfd_write(dev, MCP251XFD_REG_FIFOCON(MCP251XFD_RX_FIFO_IDX),
1450 			       MCP251XFD_REG_SIZE);
1451 }
1452 
1453 #if defined(CONFIG_CAN_RX_TIMESTAMP)
mcp251xfd_init_tscon(const struct device * dev)1454 static int mcp251xfd_init_tscon(const struct device *dev)
1455 {
1456 	uint32_t *reg = mcp251xfd_get_spi_buf_ptr(dev);
1457 	const struct mcp251xfd_config *dev_cfg = dev->config;
1458 
1459 	*reg = MCP251XFD_REG_TSCON_TBCEN;
1460 	*reg |= FIELD_PREP(MCP251XFD_REG_TSCON_TBCPRE_MASK,
1461 			   dev_cfg->timestamp_prescaler - 1);
1462 
1463 	*reg = sys_cpu_to_le32(*reg);
1464 
1465 	return mcp251xfd_write(dev, MCP251XFD_REG_TSCON, MCP251XFD_REG_SIZE);
1466 }
1467 #endif
1468 
mcp251xfd_reset(const struct device * dev)1469 static int mcp251xfd_reset(const struct device *dev)
1470 {
1471 	const struct mcp251xfd_config *dev_cfg = dev->config;
1472 	uint16_t cmd = sys_cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_RESET);
1473 	const struct spi_buf tx_buf = {.buf = &cmd, .len = sizeof(cmd),};
1474 	const struct spi_buf_set tx = {.buffers = &tx_buf, .count = 1};
1475 	int ret;
1476 
1477 	/* device can only be reset when in configuration mode */
1478 	ret = mcp251xfd_set_mode_internal(dev, MCP251XFD_REG_CON_MODE_CONFIG);
1479 	if (ret < 0) {
1480 		return ret;
1481 	}
1482 
1483 	return spi_write_dt(&dev_cfg->bus, &tx);
1484 }
1485 
mcp251xfd_init(const struct device * dev)1486 static int mcp251xfd_init(const struct device *dev)
1487 {
1488 	const struct mcp251xfd_config *dev_cfg = dev->config;
1489 	struct mcp251xfd_data *dev_data = dev->data;
1490 	uint32_t *reg;
1491 	uint8_t opmod;
1492 	int ret;
1493 	struct can_timing timing = { 0 };
1494 #if defined(CONFIG_CAN_FD_MODE)
1495 	struct can_timing timing_data = { 0 };
1496 #endif
1497 
1498 	dev_data->dev = dev;
1499 
1500 	if (dev_cfg->clk_dev != NULL) {
1501 		uint32_t clk_id = dev_cfg->clk_id;
1502 
1503 		if (!device_is_ready(dev_cfg->clk_dev)) {
1504 			LOG_ERR("Clock controller not ready");
1505 			return -ENODEV;
1506 		}
1507 
1508 		ret = clock_control_on(dev_cfg->clk_dev, (clock_control_subsys_t)clk_id);
1509 		if (ret < 0) {
1510 			LOG_ERR("Failed to enable clock [%d]", ret);
1511 			return ret;
1512 		}
1513 	}
1514 
1515 	k_sem_init(&dev_data->int_sem, 0, 1);
1516 	k_sem_init(&dev_data->tx_sem, MCP251XFD_TX_QUEUE_ITEMS, MCP251XFD_TX_QUEUE_ITEMS);
1517 
1518 	k_mutex_init(&dev_data->mutex);
1519 
1520 	if (!spi_is_ready_dt(&dev_cfg->bus)) {
1521 		LOG_ERR("SPI bus %s not ready", dev_cfg->bus.bus->name);
1522 		return -ENODEV;
1523 	}
1524 
1525 	if (!gpio_is_ready_dt(&dev_cfg->int_gpio_dt)) {
1526 		LOG_ERR("GPIO port not ready");
1527 		return -ENODEV;
1528 	}
1529 
1530 	if (gpio_pin_configure_dt(&dev_cfg->int_gpio_dt, GPIO_INPUT) < 0) {
1531 		LOG_ERR("Unable to configure GPIO pin");
1532 		return -EINVAL;
1533 	}
1534 
1535 	gpio_init_callback(&dev_data->int_gpio_cb, mcp251xfd_int_gpio_callback,
1536 			   BIT(dev_cfg->int_gpio_dt.pin));
1537 
1538 	if (gpio_add_callback_dt(&dev_cfg->int_gpio_dt, &dev_data->int_gpio_cb) < 0) {
1539 		return -EINVAL;
1540 	}
1541 
1542 	if (gpio_pin_interrupt_configure_dt(&dev_cfg->int_gpio_dt, GPIO_INT_LEVEL_ACTIVE) < 0) {
1543 		return -EINVAL;
1544 	}
1545 
1546 	k_thread_create(&dev_data->int_thread, dev_data->int_thread_stack,
1547 			CONFIG_CAN_MCP251XFD_INT_THREAD_STACK_SIZE,
1548 			(k_thread_entry_t)mcp251xfd_int_thread, (void *)dev, NULL, NULL,
1549 			K_PRIO_COOP(CONFIG_CAN_MCP251XFD_INT_THREAD_PRIO), 0, K_NO_WAIT);
1550 
1551 	(void)k_thread_name_set(&dev_data->int_thread, "MCP251XFD interrupt thread");
1552 
1553 	ret = mcp251xfd_reset(dev);
1554 	if (ret < 0) {
1555 		LOG_ERR("Failed to reset the device [%d]", ret);
1556 		goto done;
1557 	}
1558 
1559 	ret = mcp251xfd_init_timing_struct(&timing, dev, &dev_cfg->timing_params, true);
1560 	if (ret < 0) {
1561 		LOG_ERR("Can't find timing for given param");
1562 		goto done;
1563 	}
1564 
1565 #if defined(CONFIG_CAN_FD_MODE)
1566 	ret = mcp251xfd_init_timing_struct(&timing_data, dev, &dev_cfg->timing_params_data, false);
1567 	if (ret < 0) {
1568 		LOG_ERR("Can't find timing for given param");
1569 		goto done;
1570 	}
1571 #endif
1572 
1573 	reg = mcp251xfd_read_crc(dev, MCP251XFD_REG_CON, MCP251XFD_REG_SIZE);
1574 	if (!reg) {
1575 		ret = -EINVAL;
1576 		goto done;
1577 	}
1578 
1579 	*reg = sys_le32_to_cpu(*reg);
1580 
1581 	opmod = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, *reg);
1582 
1583 	if (opmod != MCP251XFD_REG_CON_MODE_CONFIG) {
1584 		LOG_ERR("Device did not reset into configuration mode [%d]", opmod);
1585 		ret = -EIO;
1586 		goto done;
1587 	}
1588 
1589 	dev_data->current_mcp251xfd_mode = MCP251XFD_REG_CON_MODE_CONFIG;
1590 
1591 	ret = mcp251xfd_init_con_reg(dev);
1592 	if (ret < 0) {
1593 		goto done;
1594 	}
1595 
1596 	ret = mcp251xfd_init_osc_reg(dev);
1597 	if (ret < 0) {
1598 		goto done;
1599 	}
1600 
1601 	ret = mcp251xfd_init_iocon_reg(dev);
1602 	if (ret < 0) {
1603 		goto done;
1604 	}
1605 
1606 	ret = mcp251xfd_init_int_reg(dev);
1607 	if (ret < 0) {
1608 		goto done;
1609 	}
1610 
1611 	ret = mcp251xfd_set_tdc(dev, false, 0);
1612 	if (ret < 0) {
1613 		goto done;
1614 	}
1615 
1616 #if defined(CONFIG_CAN_RX_TIMESTAMP)
1617 	ret = mcp251xfd_init_tscon(dev);
1618 	if (ret < 0) {
1619 		goto done;
1620 	}
1621 #endif
1622 
1623 	ret = mcp251xfd_init_tef_fifo(dev);
1624 	if (ret < 0) {
1625 		goto done;
1626 	}
1627 
1628 	ret = mcp251xfd_init_tx_queue(dev);
1629 	if (ret < 0) {
1630 		goto done;
1631 	}
1632 
1633 	ret = mcp251xfd_init_rx_fifo(dev);
1634 	if (ret < 0) {
1635 		goto done;
1636 	}
1637 
1638 	LOG_DBG("%d TX FIFOS: 1 element", MCP251XFD_TX_QUEUE_ITEMS);
1639 	LOG_DBG("1 RX FIFO: %d elements", MCP251XFD_RX_FIFO_ITEMS);
1640 	LOG_DBG("%db of %db RAM Allocated",
1641 		MCP251XFD_TEF_FIFO_SIZE + MCP251XFD_TX_QUEUE_SIZE + MCP251XFD_RX_FIFO_SIZE,
1642 		MCP251XFD_RAM_SIZE);
1643 
1644 done:
1645 	ret = can_set_timing(dev, &timing);
1646 	if (ret < 0) {
1647 		return ret;
1648 	}
1649 
1650 #if defined(CONFIG_CAN_FD_MODE)
1651 	ret = can_set_timing_data(dev, &timing_data);
1652 	if (ret < 0) {
1653 		return ret;
1654 	}
1655 #endif
1656 
1657 	return ret;
1658 }
1659 
1660 static const struct can_driver_api mcp251xfd_api_funcs = {
1661 	.get_capabilities = mcp251xfd_get_capabilities,
1662 	.set_mode = mcp251xfd_set_mode,
1663 	.set_timing = mcp251xfd_set_timing,
1664 #if defined(CONFIG_CAN_FD_MODE)
1665 	.set_timing_data = mcp251xfd_set_timing_data,
1666 #endif
1667 	.start = mcp251xfd_start,
1668 	.stop = mcp251xfd_stop,
1669 	.send = mcp251xfd_send,
1670 	.add_rx_filter = mcp251xfd_add_rx_filter,
1671 	.remove_rx_filter = mcp251xfd_remove_rx_filter,
1672 #ifndef CONFIG_CAN_AUTO_BUS_OFF_RECOVERY
1673 	.recover = mcp251xfd_recover,
1674 #endif
1675 	.get_state = mcp251xfd_get_state,
1676 	.set_state_change_callback = mcp251xfd_set_state_change_callback,
1677 	.get_core_clock = mcp251xfd_get_core_clock,
1678 	.get_max_filters = mcp251xfd_get_max_filters,
1679 	.get_max_bitrate = mcp251xfd_get_max_bitrate,
1680 	.timing_min = {
1681 		.sjw = 1,
1682 		.prop_seg = 0,
1683 		.phase_seg1 = 2,
1684 		.phase_seg2 = 1,
1685 		.prescaler = 1,
1686 	},
1687 	.timing_max = {
1688 		.sjw = 128,
1689 		.prop_seg = 0,
1690 		.phase_seg1 = 256,
1691 		.phase_seg2 = 128,
1692 		.prescaler = 256,
1693 	},
1694 #if defined(CONFIG_CAN_FD_MODE)
1695 	.timing_data_min = {
1696 		.sjw = 1,
1697 		.prop_seg = 0,
1698 		.phase_seg1 = 1,
1699 		.phase_seg2 = 1,
1700 		.prescaler = 1,
1701 	},
1702 	.timing_data_max = {
1703 		.sjw = 16,
1704 		.prop_seg = 0,
1705 		.phase_seg1 = 32,
1706 		.phase_seg2 = 16,
1707 		.prescaler = 256,
1708 	},
1709 #endif
1710 };
1711 
1712 #define MCP251XFD_SET_TIMING_MACRO(inst, type)                                                     \
1713 	.timing_params##type = {                                                                   \
1714 		.sjw = DT_INST_PROP(inst, sjw##type),                                              \
1715 		.prop_seg = DT_INST_PROP_OR(inst, prop_seg##type, 0),                              \
1716 		.phase_seg1 = DT_INST_PROP_OR(inst, phase_seg1##type, 0),                          \
1717 		.phase_seg2 = DT_INST_PROP_OR(inst, phase_seg2##type, 0),                          \
1718 		.bus_speed = DT_INST_PROP(inst, bus_speed##type),                                  \
1719 		.sample_point = DT_INST_PROP_OR(inst, sample_point##type, 0),                      \
1720 	}
1721 
1722 #if defined(CONFIG_CAN_FD_MODE)
1723 #define MCP251XFD_SET_TIMING(inst)                                                                 \
1724 	MCP251XFD_SET_TIMING_MACRO(inst,),                                                         \
1725 	MCP251XFD_SET_TIMING_MACRO(inst, _data)
1726 #else
1727 #define MCP251XFD_SET_TIMING(inst)                                                                 \
1728 	MCP251XFD_SET_TIMING_MACRO(inst,)
1729 #endif
1730 
1731 #define MCP251XFD_SET_CLOCK(inst)                                                                  \
1732 	COND_CODE_1(DT_INST_NODE_HAS_PROP(inst, clocks),                                           \
1733 		    (.clk_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(inst)),                          \
1734 		     .clk_id = DT_INST_CLOCKS_CELL(inst, id)),                                     \
1735 		    ())
1736 
1737 #define MCP251XFD_INIT(inst)                                                                       \
1738 	static K_KERNEL_STACK_DEFINE(mcp251xfd_int_stack_##inst,                                   \
1739 				     CONFIG_CAN_MCP251XFD_INT_THREAD_STACK_SIZE);                  \
1740                                                                                                    \
1741 	static struct mcp251xfd_data mcp251xfd_data_##inst = {                                     \
1742 		.int_thread_stack = mcp251xfd_int_stack_##inst,                                    \
1743 	};                                                                                         \
1744 	static const struct mcp251xfd_config mcp251xfd_config_##inst = {                           \
1745 		.bus = SPI_DT_SPEC_INST_GET(inst, SPI_WORD_SET(8), 0),                             \
1746 		.int_gpio_dt = GPIO_DT_SPEC_INST_GET(inst, int_gpios),                             \
1747                                                                                                    \
1748 		.sof_on_clko = DT_INST_PROP(inst, sof_on_clko),                                    \
1749 		.clko_div = DT_INST_ENUM_IDX(inst, clko_div),                                      \
1750 		.pll_enable = DT_INST_PROP(inst, pll_enable),                                      \
1751 		.timestamp_prescaler = DT_INST_PROP(inst, timestamp_prescaler),                    \
1752                                                                                                    \
1753 		.osc_freq = DT_INST_PROP(inst, osc_freq),                                          \
1754 		MCP251XFD_SET_TIMING(inst),                                                        \
1755 		.phy = DEVICE_DT_GET_OR_NULL(DT_INST_PHANDLE(inst, phys)),                         \
1756 		.max_bitrate = DT_INST_CAN_TRANSCEIVER_MAX_BITRATE(inst, 8000000),                 \
1757 		.rx_fifo = {.ram_start_addr = MCP251XFD_RX_FIFO_START_ADDR,                        \
1758 			    .reg_fifocon_addr = MCP251XFD_REG_FIFOCON(MCP251XFD_RX_FIFO_IDX),      \
1759 			    .capacity = MCP251XFD_RX_FIFO_ITEMS,                                   \
1760 			    .item_size = MCP251XFD_RX_FIFO_ITEM_SIZE,                              \
1761 			    .msg_handler = mcp251xfd_rx_fifo_handler},                             \
1762 		.tef_fifo = {.ram_start_addr = MCP251XFD_TEF_FIFO_START_ADDR,                      \
1763 			     .reg_fifocon_addr = MCP251XFD_REG_TEFCON,                             \
1764 			     .capacity = MCP251XFD_TEF_FIFO_ITEMS,                                 \
1765 			     .item_size = MCP251XFD_TEF_FIFO_ITEM_SIZE,                            \
1766 			     .msg_handler = mcp251xfd_tef_fifo_handler},                           \
1767 		MCP251XFD_SET_CLOCK(inst)                                                          \
1768 	};                                                                                         \
1769                                                                                                    \
1770 	CAN_DEVICE_DT_INST_DEFINE(inst, &mcp251xfd_init, NULL, &mcp251xfd_data_##inst,             \
1771 				  &mcp251xfd_config_##inst, POST_KERNEL, CONFIG_CAN_INIT_PRIORITY, \
1772 				  &mcp251xfd_api_funcs);
1773 
1774 DT_INST_FOREACH_STATUS_OKAY(MCP251XFD_INIT)
1775