1 /*
2 * Copyright (c) 2023 Andriy Gelman
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT infineon_xmc4xxx_can_node
8
9 #include <zephyr/device.h>
10 #include <zephyr/drivers/can.h>
11 #include <zephyr/drivers/can/transceiver.h>
12 #include <zephyr/drivers/pinctrl.h>
13 #include <zephyr/kernel.h>
14 #include <zephyr/sys/bitarray.h>
15
16 #include <soc.h>
17 #include <xmc_can.h>
18
19 #include <zephyr/logging/log.h>
20 LOG_MODULE_REGISTER(can_xmc4xxx, CONFIG_CAN_LOG_LEVEL);
21
22 #define CAN_XMC4XXX_MULTICAN_NODE DT_INST(0, infineon_xmc4xxx_can)
23
24 #define CAN_XMC4XXX_NUM_MESSAGE_OBJECTS DT_PROP(CAN_XMC4XXX_MULTICAN_NODE, message_objects)
25 #define CAN_XMC4XXX_CLOCK_PRESCALER DT_PROP(CAN_XMC4XXX_MULTICAN_NODE, clock_prescaler)
26
27 static CAN_GLOBAL_TypeDef *const can_xmc4xxx_global_reg =
28 (CAN_GLOBAL_TypeDef *)DT_REG_ADDR(CAN_XMC4XXX_MULTICAN_NODE);
29
30 static bool can_xmc4xxx_global_init;
31 static uint32_t can_xmc4xxx_clock_frequency;
32
33 SYS_BITARRAY_DEFINE_STATIC(mo_usage_bitarray, CAN_XMC4XXX_NUM_MESSAGE_OBJECTS);
34 static int can_xmc4xxx_num_free_mo = CAN_XMC4XXX_NUM_MESSAGE_OBJECTS;
35
36 #define CAN_XMC4XXX_IRQ_MIN 76
37 #define CAN_XMC4XXX_MAX_DLC 8
38
39 #define CAN_XMC4XXX_REG_TO_NODE_IND(reg) (((uint32_t)(reg) - (uint32_t)CAN_NODE0_BASE) / 0x100)
40
41 struct can_xmc4xxx_tx_callback {
42 can_tx_callback_t function;
43 void *user_data;
44 };
45
46 struct can_xmc4xxx_rx_callback {
47 can_rx_callback_t function;
48 void *user_data;
49 };
50
51 struct can_xmc4xxx_rx_fifo {
52 CAN_MO_TypeDef *base;
53 CAN_MO_TypeDef *top;
54 CAN_MO_TypeDef *tail;
55 CAN_MO_TypeDef *head;
56 };
57
58 struct can_xmc4xxx_data {
59 struct can_driver_data common;
60
61 enum can_state state;
62 struct k_mutex mutex;
63
64 struct k_sem tx_sem;
65 struct can_xmc4xxx_tx_callback tx_callbacks[CONFIG_CAN_XMC4XXX_MAX_TX_QUEUE];
66
67 uint32_t filter_usage;
68 struct can_xmc4xxx_rx_callback rx_callbacks[CONFIG_CAN_MAX_FILTER];
69 struct can_xmc4xxx_rx_fifo rx_fifos[CONFIG_CAN_MAX_FILTER];
70 #if defined(CONFIG_CAN_ACCEPT_RTR)
71 struct can_xmc4xxx_rx_fifo rtr_fifos[CONFIG_CAN_MAX_FILTER];
72 #endif
73
74 CAN_MO_TypeDef *tx_mo[CONFIG_CAN_XMC4XXX_MAX_TX_QUEUE];
75 };
76
77 struct can_xmc4xxx_config {
78 struct can_driver_config common;
79
80 CAN_NODE_TypeDef *can;
81 bool clock_div8;
82
83 uint8_t service_request;
84 void (*irq_config_func)(void);
85
86 uint8_t input_src;
87 const struct pinctrl_dev_config *pcfg;
88 };
89
can_xmc4xxx_set_mode(const struct device * dev,can_mode_t mode)90 static int can_xmc4xxx_set_mode(const struct device *dev, can_mode_t mode)
91 {
92 struct can_xmc4xxx_data *dev_data = dev->data;
93 const struct can_xmc4xxx_config *dev_cfg = dev->config;
94
95 if (dev_data->common.started) {
96 return -EBUSY;
97 }
98
99 if ((mode & (CAN_MODE_3_SAMPLES | CAN_MODE_ONE_SHOT |
100 CAN_MODE_LOOPBACK | CAN_MODE_FD)) != 0) {
101 return -ENOTSUP;
102 }
103
104 if ((mode & CAN_MODE_LISTENONLY) != 0) {
105 XMC_CAN_NODE_SetAnalyzerMode(dev_cfg->can);
106 } else {
107 XMC_CAN_NODE_ReSetAnalyzerMode(dev_cfg->can);
108 }
109
110 dev_data->common.mode = mode;
111
112 return 0;
113 }
114
can_xmc4xxx_set_timing(const struct device * dev,const struct can_timing * timing)115 static int can_xmc4xxx_set_timing(const struct device *dev, const struct can_timing *timing)
116 {
117 struct can_xmc4xxx_data *dev_data = dev->data;
118 const struct can_xmc4xxx_config *dev_cfg = dev->config;
119 uint32_t reg;
120
121 if (!timing) {
122 return -EINVAL;
123 }
124
125 if (dev_data->common.started) {
126 return -EBUSY;
127 }
128
129 k_mutex_lock(&dev_data->mutex, K_FOREVER);
130
131 reg = FIELD_PREP(CAN_NODE_NBTR_DIV8_Msk, dev_cfg->clock_div8);
132 reg |= FIELD_PREP(CAN_NODE_NBTR_BRP_Msk, timing->prescaler - 1);
133 reg |= FIELD_PREP(CAN_NODE_NBTR_TSEG1_Msk, timing->prop_seg + timing->phase_seg1 - 1);
134 reg |= FIELD_PREP(CAN_NODE_NBTR_TSEG2_Msk, timing->phase_seg2 - 1);
135 reg |= FIELD_PREP(CAN_NODE_NBTR_SJW_Msk, timing->sjw - 1);
136
137 dev_cfg->can->NBTR = reg;
138
139 k_mutex_unlock(&dev_data->mutex);
140
141 return 0;
142 }
143
can_xmc4xxx_send(const struct device * dev,const struct can_frame * msg,k_timeout_t timeout,can_tx_callback_t callback,void * callback_arg)144 static int can_xmc4xxx_send(const struct device *dev, const struct can_frame *msg,
145 k_timeout_t timeout, can_tx_callback_t callback, void *callback_arg)
146 {
147 struct can_xmc4xxx_data *dev_data = dev->data;
148 uint8_t mailbox_idx;
149 struct can_xmc4xxx_tx_callback *callbacks = &dev_data->tx_callbacks[0];
150 CAN_MO_TypeDef *mo;
151 unsigned int key;
152
153 LOG_DBG("Sending %d bytes. Id: 0x%x, ID type: %s %s %s %s", can_dlc_to_bytes(msg->dlc),
154 msg->id, msg->flags & CAN_FRAME_IDE ? "extended" : "standard",
155 msg->flags & CAN_FRAME_RTR ? "RTR" : "",
156 msg->flags & CAN_FRAME_FDF ? "FD frame" : "",
157 msg->flags & CAN_FRAME_BRS ? "BRS" : "");
158
159 if (msg->dlc > CAN_XMC4XXX_MAX_DLC) {
160 return -EINVAL;
161 }
162
163 if (!dev_data->common.started) {
164 return -ENETDOWN;
165 }
166
167 if (dev_data->state == CAN_STATE_BUS_OFF) {
168 return -ENETUNREACH;
169 }
170
171 if ((msg->flags & (CAN_FRAME_FDF | CAN_FRAME_BRS)) != 0) {
172 return -ENOTSUP;
173 }
174
175 if (k_sem_take(&dev_data->tx_sem, timeout) != 0) {
176 return -EAGAIN;
177 }
178
179 k_mutex_lock(&dev_data->mutex, K_FOREVER);
180
181 for (mailbox_idx = 0; mailbox_idx < CONFIG_CAN_XMC4XXX_MAX_TX_QUEUE; mailbox_idx++) {
182 if (callbacks[mailbox_idx].function == NULL) {
183 break;
184 }
185 }
186
187 __ASSERT_NO_MSG(mailbox_idx < CONFIG_CAN_XMC4XXX_MAX_TX_QUEUE);
188
189 key = irq_lock();
190 /* critical section in case can_xmc4xxx_reset_tx_fifos() called in isr */
191 /* so that callback function and callback_arg are consistent */
192 callbacks[mailbox_idx].function = callback;
193 callbacks[mailbox_idx].user_data = callback_arg;
194 irq_unlock(key);
195
196 mo = dev_data->tx_mo[mailbox_idx];
197 mo->MOCTR = CAN_MO_MOCTR_RESMSGVAL_Msk;
198
199 if ((msg->flags & CAN_FRAME_IDE) != 0) {
200 /* MOAR - message object arbitration register */
201 mo->MOAR = FIELD_PREP(CAN_MO_MOAR_PRI_Msk, 1) |
202 FIELD_PREP(CAN_MO_MOAR_ID_Msk, msg->id) | CAN_MO_MOAR_IDE_Msk;
203 } else {
204 mo->MOAR = FIELD_PREP(CAN_MO_MOAR_PRI_Msk, 1) |
205 FIELD_PREP(XMC_CAN_MO_MOAR_STDID_Msk, msg->id);
206 }
207
208 mo->MOFCR &= ~CAN_MO_MOFCR_DLC_Msk;
209 mo->MOFCR |= FIELD_PREP(CAN_MO_MOFCR_DLC_Msk, msg->dlc);
210
211 if ((msg->flags & CAN_FRAME_RTR) != 0) {
212 mo->MOCTR = CAN_MO_MOCTR_RESDIR_Msk;
213 } else {
214 mo->MOCTR = CAN_MO_MOCTR_SETDIR_Msk;
215 memcpy((void *)&mo->MODATAL, &msg->data[0], sizeof(uint32_t));
216 memcpy((void *)&mo->MODATAH, &msg->data[4], sizeof(uint32_t));
217 }
218
219 mo->MOCTR = CAN_MO_MOCTR_SETTXEN0_Msk | CAN_MO_MOCTR_SETTXEN1_Msk |
220 CAN_MO_MOCTR_SETMSGVAL_Msk | CAN_MO_MOCTR_RESRXEN_Msk |
221 CAN_MO_MOCTR_RESRTSEL_Msk;
222 mo->MOCTR = CAN_MO_MOCTR_SETTXRQ_Msk;
223
224 k_mutex_unlock(&dev_data->mutex);
225 return 0;
226 }
227
can_xmc4xxx_get_mo(uint8_t * mo_index)228 static CAN_MO_TypeDef *can_xmc4xxx_get_mo(uint8_t *mo_index)
229 {
230 int i;
231
232 for (i = 0; i < CAN_XMC4XXX_NUM_MESSAGE_OBJECTS; i++) {
233 int prev_val;
234
235 sys_bitarray_test_and_set_bit(&mo_usage_bitarray, i, &prev_val);
236 if (prev_val == 0) {
237 *mo_index = i;
238 can_xmc4xxx_num_free_mo--;
239 return &CAN_MO->MO[i];
240 }
241 }
242
243 return NULL;
244 }
245
can_xmc4xxx_deinit_fifo(const struct device * dev,struct can_xmc4xxx_rx_fifo * fifo)246 static void can_xmc4xxx_deinit_fifo(const struct device *dev, struct can_xmc4xxx_rx_fifo *fifo)
247 {
248 CAN_MO_TypeDef *mo = fifo->base;
249
250 while (mo != NULL) {
251 int next_index;
252 int index;
253
254 /* invalidate message */
255 mo->MOCTR = CAN_MO_MOCTR_RESMSGVAL_Msk;
256
257 next_index = FIELD_GET(CAN_MO_MOSTAT_PNEXT_Msk, mo->MOSTAT);
258 index = ((uint32_t)mo - (uint32_t)&CAN_MO->MO[0]) / sizeof(*mo);
259
260 if ((uint32_t)mo == (uint32_t)fifo->top) {
261 mo = NULL;
262 } else {
263 mo = &CAN_MO->MO[next_index];
264 }
265
266 /* we need to move the node back to the list of unallocated message objects, */
267 /* which is list index = 0. 255 gets rolled over to 0 in the function below */
268 XMC_CAN_AllocateMOtoNodeList(can_xmc4xxx_global_reg, 255, index);
269
270 sys_bitarray_clear_bit(&mo_usage_bitarray, index);
271 can_xmc4xxx_num_free_mo++;
272 }
273 }
274
can_xmc4xxx_init_fifo(const struct device * dev,const struct can_filter * filter,struct can_xmc4xxx_rx_fifo * fifo,bool is_rtr)275 static int can_xmc4xxx_init_fifo(const struct device *dev, const struct can_filter *filter,
276 struct can_xmc4xxx_rx_fifo *fifo, bool is_rtr)
277 {
278 const struct can_xmc4xxx_config *dev_cfg = dev->config;
279 CAN_MO_TypeDef *mo;
280 uint32_t reg;
281 uint8_t mo_index = 0, base_index;
282
283 if (can_xmc4xxx_num_free_mo < CONFIG_CAN_XMC4XXX_RX_FIFO_ITEMS) {
284 return -ENOMEM;
285 }
286
287 mo = can_xmc4xxx_get_mo(&mo_index);
288 __ASSERT_NO_MSG(mo != NULL);
289
290 base_index = mo_index;
291 fifo->base = mo;
292 fifo->tail = mo;
293
294 XMC_CAN_AllocateMOtoNodeList(can_xmc4xxx_global_reg,
295 CAN_XMC4XXX_REG_TO_NODE_IND(dev_cfg->can), mo_index);
296
297 /* setup the base object - this controls the filtering for the fifo */
298 mo->MOCTR = CAN_MO_MOCTR_RESMSGVAL_Msk;
299 mo->MOAMR &= ~(CAN_MO_MOAMR_AM_Msk | CAN_MO_MOAMR_MIDE_Msk);
300 mo->MOAR = 0;
301
302 if ((filter->flags & CAN_FILTER_IDE) != 0) {
303 mo->MOAMR |= FIELD_PREP(CAN_MO_MOAMR_AM_Msk, filter->mask) | CAN_MO_MOAMR_MIDE_Msk;
304 mo->MOAR |= FIELD_PREP(CAN_MO_MOAR_ID_Msk, filter->id) | CAN_MO_MOAR_IDE_Msk;
305 } else {
306 mo->MOAMR |= FIELD_PREP(XMC_CAN_MO_MOAR_STDID_Msk, filter->mask);
307 mo->MOAR |= FIELD_PREP(XMC_CAN_MO_MOAR_STDID_Msk, filter->id);
308 }
309
310 mo->MOFCR = FIELD_PREP(CAN_MO_MOFCR_MMC_Msk, 1) | CAN_MO_MOFCR_RXIE_Msk;
311 if (is_rtr) {
312 mo->MOFCR |= CAN_MO_MOFCR_RMM_Msk;
313 mo->MOCTR = CAN_MO_MOCTR_SETDIR_Msk;
314 } else {
315 mo->MOCTR = CAN_MO_MOCTR_RESDIR_Msk;
316 }
317
318 /* Writing to MOCTR sets or resets message object properties */
319 mo->MOCTR = CAN_MO_MOCTR_RESTXEN0_Msk | CAN_MO_MOCTR_RESTXEN1_Msk |
320 CAN_MO_MOCTR_SETMSGVAL_Msk | CAN_MO_MOCTR_SETRXEN_Msk |
321 CAN_MO_MOCTR_RESRTSEL_Msk;
322
323 mo->MOIPR = FIELD_PREP(CAN_MO_MOIPR_RXINP_Msk, dev_cfg->service_request);
324
325 /* setup the remaining message objects in the fifo */
326 for (int i = 1; i < CONFIG_CAN_XMC4XXX_RX_FIFO_ITEMS; i++) {
327 mo = can_xmc4xxx_get_mo(&mo_index);
328 __ASSERT_NO_MSG(mo != NULL);
329
330 XMC_CAN_AllocateMOtoNodeList(can_xmc4xxx_global_reg,
331 CAN_XMC4XXX_REG_TO_NODE_IND(dev_cfg->can), mo_index);
332
333 mo->MOCTR = CAN_MO_MOCTR_RESMSGVAL_Msk;
334 mo->MOCTR = CAN_MO_MOCTR_SETMSGVAL_Msk | CAN_MO_MOCTR_RESRXEN_Msk;
335
336 /* all the other message objects in the fifo must point to the base object */
337 mo->MOFGPR = FIELD_PREP(CAN_MO_MOFGPR_CUR_Msk, base_index);
338 }
339
340 reg = 0;
341 reg |= FIELD_PREP(CAN_MO_MOFGPR_CUR_Msk, base_index);
342 reg |= FIELD_PREP(CAN_MO_MOFGPR_TOP_Msk, mo_index);
343 reg |= FIELD_PREP(CAN_MO_MOFGPR_BOT_Msk, base_index);
344 reg |= FIELD_PREP(CAN_MO_MOFGPR_SEL_Msk, base_index);
345
346 fifo->base->MOFGPR = reg;
347 fifo->top = mo;
348
349 return 0;
350 }
351
can_xmc4xxx_add_rx_filter(const struct device * dev,can_rx_callback_t callback,void * user_data,const struct can_filter * filter)352 static int can_xmc4xxx_add_rx_filter(const struct device *dev, can_rx_callback_t callback,
353 void *user_data, const struct can_filter *filter)
354 {
355 struct can_xmc4xxx_data *dev_data = dev->data;
356 int filter_idx;
357
358 if ((filter->flags & ~CAN_FILTER_IDE) != 0) {
359 LOG_ERR("Unsupported CAN filter flags 0x%02x", filter->flags);
360 return -ENOTSUP;
361 }
362
363 k_mutex_lock(&dev_data->mutex, K_FOREVER);
364
365 for (filter_idx = 0; filter_idx < CONFIG_CAN_MAX_FILTER; filter_idx++) {
366 if ((BIT(filter_idx) & dev_data->filter_usage) == 0) {
367 break;
368 }
369 }
370
371 if (filter_idx >= CONFIG_CAN_MAX_FILTER) {
372 filter_idx = -ENOSPC;
373 } else {
374 unsigned int key = irq_lock();
375 int ret;
376
377 ret = can_xmc4xxx_init_fifo(dev, filter, &dev_data->rx_fifos[filter_idx], false);
378 if (ret < 0) {
379 irq_unlock(key);
380 k_mutex_unlock(&dev_data->mutex);
381 return ret;
382 }
383
384 #if defined(CONFIG_CAN_ACCEPT_RTR)
385 ret = can_xmc4xxx_init_fifo(dev, filter, &dev_data->rtr_fifos[filter_idx], true);
386 if (ret < 0) {
387 can_xmc4xxx_deinit_fifo(dev, &dev_data->rx_fifos[filter_idx]);
388 irq_unlock(key);
389 k_mutex_unlock(&dev_data->mutex);
390 return ret;
391 }
392 #endif
393
394 dev_data->filter_usage |= BIT(filter_idx);
395 dev_data->rx_callbacks[filter_idx].function = callback;
396 dev_data->rx_callbacks[filter_idx].user_data = user_data;
397
398 irq_unlock(key);
399 }
400
401 k_mutex_unlock(&dev_data->mutex);
402
403 return filter_idx;
404 }
405
can_xmc4xxx_remove_rx_filter(const struct device * dev,int filter_idx)406 static void can_xmc4xxx_remove_rx_filter(const struct device *dev, int filter_idx)
407 {
408 struct can_xmc4xxx_data *dev_data = dev->data;
409 unsigned int key;
410
411 if (filter_idx < 0 || filter_idx >= CONFIG_CAN_MAX_FILTER) {
412 LOG_ERR("Filter ID %d out of bounds", filter_idx);
413 return;
414 }
415
416 k_mutex_lock(&dev_data->mutex, K_FOREVER);
417
418 if ((dev_data->filter_usage & BIT(filter_idx)) == 0) {
419 k_mutex_unlock(&dev_data->mutex);
420 return;
421 }
422
423 key = irq_lock();
424 can_xmc4xxx_deinit_fifo(dev, &dev_data->rx_fifos[filter_idx]);
425 #if defined(CONFIG_CAN_ACCEPT_RTR)
426 can_xmc4xxx_deinit_fifo(dev, &dev_data->rtr_fifos[filter_idx]);
427 #endif
428
429 dev_data->filter_usage &= ~BIT(filter_idx);
430 dev_data->rx_callbacks[filter_idx].function = NULL;
431 dev_data->rx_callbacks[filter_idx].user_data = NULL;
432 irq_unlock(key);
433
434 k_mutex_unlock(&dev_data->mutex);
435 }
436
can_xmc4xxx_set_state_change_callback(const struct device * dev,can_state_change_callback_t cb,void * user_data)437 static void can_xmc4xxx_set_state_change_callback(const struct device *dev,
438 can_state_change_callback_t cb, void *user_data)
439 {
440 struct can_xmc4xxx_data *dev_data = dev->data;
441 unsigned int key;
442
443 key = irq_lock();
444 /* critical section so that state_change_cb and state_change_cb_data are consistent */
445 dev_data->common.state_change_cb = cb;
446 dev_data->common.state_change_cb_user_data = user_data;
447 irq_unlock(key);
448 }
449
can_xmc4xxx_get_state_from_status(const struct device * dev,enum can_state * state,struct can_bus_err_cnt * err_cnt,uint32_t * status)450 static void can_xmc4xxx_get_state_from_status(const struct device *dev, enum can_state *state,
451 struct can_bus_err_cnt *err_cnt, uint32_t *status)
452 {
453 struct can_xmc4xxx_data *dev_data = dev->data;
454 const struct can_xmc4xxx_config *dev_cfg = dev->config;
455 uint8_t tec = XMC_CAN_NODE_GetTransmitErrorCounter(dev_cfg->can);
456 uint8_t rec = XMC_CAN_NODE_GetTransmitErrorCounter(dev_cfg->can);
457
458 if (err_cnt != NULL) {
459 err_cnt->tx_err_cnt = tec;
460 err_cnt->rx_err_cnt = rec;
461 }
462
463 if (state == NULL) {
464 return;
465 }
466
467 if (!dev_data->common.started) {
468 *state = CAN_STATE_STOPPED;
469 return;
470 }
471
472 if ((*status & XMC_CAN_NODE_STATUS_BUS_OFF) != 0) {
473 *state = CAN_STATE_BUS_OFF;
474 } else if (tec >= 128 || rec >= 128) {
475 *state = CAN_STATE_ERROR_PASSIVE;
476 } else if ((*status & XMC_CAN_NODE_STATUS_ERROR_WARNING_STATUS) != 0) {
477 *state = CAN_STATE_ERROR_WARNING;
478 } else {
479 *state = CAN_STATE_ERROR_ACTIVE;
480 }
481 }
482
can_xmc4xxx_get_state(const struct device * dev,enum can_state * state,struct can_bus_err_cnt * err_cnt)483 static int can_xmc4xxx_get_state(const struct device *dev, enum can_state *state,
484 struct can_bus_err_cnt *err_cnt)
485 {
486 const struct can_xmc4xxx_config *dev_cfg = dev->config;
487 uint32_t status;
488
489 status = XMC_CAN_NODE_GetStatus(dev_cfg->can);
490
491 can_xmc4xxx_get_state_from_status(dev, state, err_cnt, &status);
492
493 return 0;
494 }
495
can_xmc4xxx_get_core_clock(const struct device * dev,uint32_t * rate)496 static int can_xmc4xxx_get_core_clock(const struct device *dev, uint32_t *rate)
497 {
498 const struct can_xmc4xxx_config *dev_cfg = dev->config;
499
500 *rate = can_xmc4xxx_clock_frequency;
501 if (dev_cfg->clock_div8) {
502 *rate /= 8;
503 }
504
505 return 0;
506 }
507
can_xmc4xxx_get_max_filters(const struct device * dev,bool ide)508 static int can_xmc4xxx_get_max_filters(const struct device *dev, bool ide)
509 {
510 ARG_UNUSED(ide);
511
512 return CONFIG_CAN_MAX_FILTER;
513 }
514
can_xmc4xxx_reset_tx_fifos(const struct device * dev,int status)515 static void can_xmc4xxx_reset_tx_fifos(const struct device *dev, int status)
516 {
517 struct can_xmc4xxx_data *dev_data = dev->data;
518 struct can_xmc4xxx_tx_callback *tx_callbacks = &dev_data->tx_callbacks[0];
519
520 LOG_DBG("All Tx message objects reset");
521 for (int i = 0; i < CONFIG_CAN_XMC4XXX_MAX_TX_QUEUE; i++) {
522 can_tx_callback_t callback;
523 void *user_data;
524
525 callback = tx_callbacks[i].function;
526 user_data = tx_callbacks[i].user_data;
527
528 tx_callbacks[i].function = NULL;
529
530 if (callback) {
531 dev_data->tx_mo[i]->MOCTR = CAN_MO_MOCTR_RESMSGVAL_Msk;
532 callback(dev, status, user_data);
533 k_sem_give(&dev_data->tx_sem);
534 }
535 }
536 }
537
can_xmc4xxx_tx_handler(const struct device * dev)538 static void can_xmc4xxx_tx_handler(const struct device *dev)
539 {
540 struct can_xmc4xxx_data *dev_data = dev->data;
541 struct can_xmc4xxx_tx_callback *tx_callbacks = &dev_data->tx_callbacks[0];
542
543 for (int i = 0; i < CONFIG_CAN_XMC4XXX_MAX_TX_QUEUE; i++) {
544 CAN_MO_TypeDef *mo = dev_data->tx_mo[i];
545
546 if ((mo->MOSTAT & XMC_CAN_MO_STATUS_TX_PENDING) != 0) {
547 can_tx_callback_t callback;
548 void *user_data;
549
550 mo->MOCTR = XMC_CAN_MO_RESET_STATUS_TX_PENDING;
551
552 callback = tx_callbacks[i].function;
553 user_data = tx_callbacks[i].user_data;
554
555 tx_callbacks[i].function = NULL;
556
557 if (callback) {
558 callback(dev, 0, user_data);
559 k_sem_give(&dev_data->tx_sem);
560 }
561 }
562 }
563 }
564
can_xmc4xxx_increment_fifo_tail(struct can_xmc4xxx_rx_fifo * fifo)565 static inline void can_xmc4xxx_increment_fifo_tail(struct can_xmc4xxx_rx_fifo *fifo)
566 {
567 uint8_t next_index;
568
569 if ((uint32_t)fifo->tail == (uint32_t)fifo->top) {
570 fifo->tail = fifo->base;
571 return;
572 }
573
574 next_index = FIELD_GET(CAN_MO_MOSTAT_PNEXT_Msk, fifo->tail->MOSTAT);
575 fifo->tail = &CAN_MO->MO[next_index];
576 }
577
can_xmc4xxx_is_fifo_empty(struct can_xmc4xxx_rx_fifo * fifo)578 static inline bool can_xmc4xxx_is_fifo_empty(struct can_xmc4xxx_rx_fifo *fifo)
579 {
580 if (fifo->tail->MOSTAT & XMC_CAN_MO_STATUS_RX_PENDING) {
581 return false;
582 }
583
584 return true;
585 }
586
can_xmc4xxx_update_fifo_head(struct can_xmc4xxx_rx_fifo * fifo)587 static inline void can_xmc4xxx_update_fifo_head(struct can_xmc4xxx_rx_fifo *fifo)
588 {
589 uint32_t reg = fifo->base->MOFGPR;
590 uint8_t top_index, bot_index, cur_index;
591 uint8_t head_index = FIELD_GET(CAN_MO_MOFGPR_CUR_Msk, reg);
592
593 fifo->head = &CAN_MO->MO[head_index];
594 top_index = FIELD_GET(CAN_MO_MOFGPR_TOP_Msk, reg);
595 bot_index = FIELD_GET(CAN_MO_MOFGPR_BOT_Msk, reg);
596 cur_index = FIELD_GET(CAN_MO_MOFGPR_CUR_Msk, reg);
597
598 LOG_DBG("Fifo: top %d, bot %d, cur %d", top_index, bot_index, cur_index);
599 }
600
can_xmc4xxx_rx_fifo_handler(const struct device * dev,struct can_xmc4xxx_rx_fifo * fifo,struct can_xmc4xxx_rx_callback * rx_callback)601 static void can_xmc4xxx_rx_fifo_handler(const struct device *dev, struct can_xmc4xxx_rx_fifo *fifo,
602 struct can_xmc4xxx_rx_callback *rx_callback)
603 {
604 bool is_rtr = (fifo->base->MOSTAT & CAN_MO_MOSTAT_DIR_Msk) != 0;
605
606 while (!can_xmc4xxx_is_fifo_empty(fifo)) {
607 struct can_frame frame;
608 CAN_MO_TypeDef *mo_tail = fifo->tail;
609
610 memset(&frame, 0, sizeof(frame));
611
612 if ((mo_tail->MOAR & CAN_MO_MOAR_IDE_Msk) != 0) {
613 frame.flags |= CAN_FRAME_IDE;
614 frame.id = FIELD_GET(CAN_MO_MOAR_ID_Msk, mo_tail->MOAR);
615 } else {
616 frame.id = FIELD_GET(XMC_CAN_MO_MOAR_STDID_Msk, mo_tail->MOAR);
617 }
618
619 frame.dlc = FIELD_GET(CAN_MO_MOFCR_DLC_Msk, mo_tail->MOFCR);
620
621 if (!is_rtr) {
622 memcpy(&frame.data[0], (void *)&mo_tail->MODATAL, sizeof(uint32_t));
623 memcpy(&frame.data[4], (void *)&mo_tail->MODATAH, sizeof(uint32_t));
624 } else {
625 frame.flags |= CAN_FRAME_RTR;
626 memset(&frame.data[0], 0, CAN_MAX_DLEN);
627 }
628
629 if (rx_callback->function != NULL) {
630 rx_callback->function(dev, &frame, rx_callback->user_data);
631 }
632
633 /* reset the rx pending bit on the tail */
634 mo_tail->MOCTR = XMC_CAN_MO_RESET_STATUS_RX_PENDING;
635 can_xmc4xxx_increment_fifo_tail(fifo);
636 }
637 }
638
can_xmc4xxx_rx_handler(const struct device * dev)639 static void can_xmc4xxx_rx_handler(const struct device *dev)
640 {
641 struct can_xmc4xxx_data *dev_data = dev->data;
642
643 for (int i = 0; i < CONFIG_CAN_MAX_FILTER; i++) {
644 if ((BIT(i) & dev_data->filter_usage) == 0) {
645 continue;
646 }
647
648 can_xmc4xxx_update_fifo_head(&dev_data->rx_fifos[i]);
649 can_xmc4xxx_rx_fifo_handler(dev, &dev_data->rx_fifos[i],
650 &dev_data->rx_callbacks[i]);
651 #if defined(CONFIG_CAN_ACCEPT_RTR)
652 can_xmc4xxx_update_fifo_head(&dev_data->rtr_fifos[i]);
653 can_xmc4xxx_rx_fifo_handler(dev, &dev_data->rtr_fifos[i],
654 &dev_data->rx_callbacks[i]);
655 #endif
656 }
657 }
658
can_xmc4xxx_state_change_handler(const struct device * dev,uint32_t status)659 static void can_xmc4xxx_state_change_handler(const struct device *dev, uint32_t status)
660 {
661 const struct can_xmc4xxx_config *dev_cfg = dev->config;
662 struct can_xmc4xxx_data *dev_data = dev->data;
663 enum can_state new_state;
664 struct can_bus_err_cnt err_cnt;
665
666 can_xmc4xxx_get_state_from_status(dev, &new_state, &err_cnt, &status);
667 if (dev_data->state != new_state) {
668 if (dev_data->common.state_change_cb) {
669 dev_data->common.state_change_cb(
670 dev, new_state, err_cnt,
671 dev_data->common.state_change_cb_user_data);
672 }
673
674 if (dev_data->state != CAN_STATE_STOPPED && new_state == CAN_STATE_BUS_OFF) {
675 /* re-enable the node after auto bus-off recovery completes */
676 XMC_CAN_NODE_ResetInitBit(dev_cfg->can);
677 }
678
679 dev_data->state = new_state;
680
681 if (dev_data->state == CAN_STATE_BUS_OFF) {
682 can_xmc4xxx_reset_tx_fifos(dev, -ENETDOWN);
683 }
684 }
685 }
686
can_xmc4xxx_isr(const struct device * dev)687 static void can_xmc4xxx_isr(const struct device *dev)
688 {
689 const struct can_xmc4xxx_config *dev_cfg = dev->config;
690 uint32_t status;
691
692 status = XMC_CAN_NODE_GetStatus(dev_cfg->can);
693 XMC_CAN_NODE_ClearStatus(dev_cfg->can, status);
694
695 if ((status & XMC_CAN_NODE_STATUS_TX_OK) != 0) {
696 can_xmc4xxx_tx_handler(dev);
697 }
698
699 if ((status & XMC_CAN_NODE_STATUS_RX_OK) != 0) {
700 can_xmc4xxx_rx_handler(dev);
701 }
702
703 if ((status & XMC_CAN_NODE_STATUS_ALERT_WARNING) != 0) {
704 /* change of bit NSRx.BOFF */
705 /* change of bit NSRx.EWRN */
706 can_xmc4xxx_state_change_handler(dev, status);
707 }
708 }
709
can_xmc4xxx_get_capabilities(const struct device * dev,can_mode_t * cap)710 static int can_xmc4xxx_get_capabilities(const struct device *dev, can_mode_t *cap)
711 {
712 ARG_UNUSED(dev);
713
714 *cap = CAN_MODE_NORMAL | CAN_MODE_LISTENONLY;
715
716 return 0;
717 }
718
can_xmc4xxx_start(const struct device * dev)719 static int can_xmc4xxx_start(const struct device *dev)
720 {
721 struct can_xmc4xxx_data *dev_data = dev->data;
722 const struct can_xmc4xxx_config *dev_cfg = dev->config;
723 int ret = 0;
724 unsigned int key;
725
726 if (dev_data->common.started) {
727 return -EALREADY;
728 }
729
730 key = irq_lock();
731 can_xmc4xxx_reset_tx_fifos(dev, -ENETDOWN);
732 irq_unlock(key);
733
734 if (dev_cfg->common.phy != NULL) {
735 ret = can_transceiver_enable(dev_cfg->common.phy, dev_data->common.mode);
736 if (ret < 0) {
737 LOG_ERR("Failed to enable CAN transceiver [%d]", ret);
738 return ret;
739 }
740 }
741
742 k_mutex_lock(&dev_data->mutex, K_FOREVER);
743
744 XMC_CAN_NODE_DisableConfigurationChange(dev_cfg->can);
745
746 dev_data->common.started = true;
747 XMC_CAN_NODE_ResetInitBit(dev_cfg->can);
748
749 k_mutex_unlock(&dev_data->mutex);
750
751 return ret;
752 }
753
can_xmc4xxx_stop(const struct device * dev)754 static int can_xmc4xxx_stop(const struct device *dev)
755 {
756 struct can_xmc4xxx_data *dev_data = dev->data;
757 const struct can_xmc4xxx_config *dev_cfg = dev->config;
758 int ret = 0;
759 unsigned int key;
760
761 if (!dev_data->common.started) {
762 return -EALREADY;
763 }
764
765 key = irq_lock();
766 XMC_CAN_NODE_SetInitBit(dev_cfg->can);
767
768 XMC_CAN_NODE_EnableConfigurationChange(dev_cfg->can);
769
770 can_xmc4xxx_reset_tx_fifos(dev, -ENETDOWN);
771 dev_data->common.started = false;
772 irq_unlock(key);
773
774 if (dev_cfg->common.phy != NULL) {
775 ret = can_transceiver_disable(dev_cfg->common.phy);
776 if (ret < 0) {
777 LOG_ERR("Failed to disable CAN transceiver [%d]", ret);
778 return ret;
779 }
780 }
781
782 return 0;
783 }
784
can_xmc4xxx_init(const struct device * dev)785 static int can_xmc4xxx_init(const struct device *dev)
786 {
787 struct can_xmc4xxx_data *dev_data = dev->data;
788 const struct can_xmc4xxx_config *dev_cfg = dev->config;
789 int ret;
790 struct can_timing timing = {0};
791 CAN_MO_TypeDef *mo;
792 uint8_t mo_index = 0;
793
794 k_sem_init(&dev_data->tx_sem, CONFIG_CAN_XMC4XXX_MAX_TX_QUEUE,
795 CONFIG_CAN_XMC4XXX_MAX_TX_QUEUE);
796 k_mutex_init(&dev_data->mutex);
797
798 if (!can_xmc4xxx_global_init) {
799 uint32_t fdr_step;
800 uint32_t clk_module;
801
802 XMC_CAN_Enable(can_xmc4xxx_global_reg);
803 XMC_CAN_SetBaudrateClockSource(can_xmc4xxx_global_reg, XMC_CAN_CANCLKSRC_FPERI);
804
805 clk_module = XMC_CAN_GetBaudrateClockFrequency(can_xmc4xxx_global_reg);
806 fdr_step = 1024 - CAN_XMC4XXX_CLOCK_PRESCALER;
807 can_xmc4xxx_clock_frequency = clk_module / CAN_XMC4XXX_CLOCK_PRESCALER;
808
809 LOG_DBG("Clock frequency %dHz\n", can_xmc4xxx_clock_frequency);
810
811 can_xmc4xxx_global_reg->FDR &= ~(CAN_FDR_DM_Msk | CAN_FDR_STEP_Msk);
812 can_xmc4xxx_global_reg->FDR |= FIELD_PREP(CAN_FDR_DM_Msk, XMC_CAN_DM_NORMAL) |
813 FIELD_PREP(CAN_FDR_STEP_Msk, fdr_step);
814
815 can_xmc4xxx_global_init = true;
816 }
817
818 XMC_CAN_NODE_EnableConfigurationChange(dev_cfg->can);
819
820 XMC_CAN_NODE_SetReceiveInput(dev_cfg->can, dev_cfg->input_src);
821
822 XMC_CAN_NODE_SetInitBit(dev_cfg->can);
823
824 XMC_CAN_NODE_SetEventNodePointer(dev_cfg->can, XMC_CAN_NODE_POINTER_EVENT_ALERT,
825 dev_cfg->service_request);
826
827 XMC_CAN_NODE_SetEventNodePointer(dev_cfg->can, XMC_CAN_NODE_POINTER_EVENT_LEC,
828 dev_cfg->service_request);
829
830 XMC_CAN_NODE_SetEventNodePointer(dev_cfg->can, XMC_CAN_NODE_POINTER_EVENT_TRANSFER_OK,
831 dev_cfg->service_request);
832
833 XMC_CAN_NODE_SetEventNodePointer(dev_cfg->can, XMC_CAN_NODE_POINTER_EVENT_FRAME_COUNTER,
834 dev_cfg->service_request);
835
836 XMC_CAN_NODE_EnableEvent(dev_cfg->can, XMC_CAN_NODE_EVENT_TX_INT |
837 XMC_CAN_NODE_EVENT_ALERT);
838
839 /* set up tx messages */
840 for (int i = 0; i < CONFIG_CAN_XMC4XXX_MAX_TX_QUEUE; i++) {
841 mo = can_xmc4xxx_get_mo(&mo_index);
842 if (mo == NULL) {
843 return -ENOMEM;
844 }
845
846 dev_data->tx_mo[i] = mo;
847
848 XMC_CAN_AllocateMOtoNodeList(can_xmc4xxx_global_reg,
849 CAN_XMC4XXX_REG_TO_NODE_IND(dev_cfg->can), mo_index);
850
851 mo->MOIPR = FIELD_PREP(CAN_MO_MOIPR_TXINP_Msk, dev_cfg->service_request);
852 mo->MOFCR = FIELD_PREP(CAN_MO_MOFCR_MMC_Msk, 0) | CAN_MO_MOFCR_TXIE_Msk;
853 }
854
855 #ifdef CONFIG_CAN_XMC4XXX_INTERNAL_BUS_MODE
856 /* The name of this function is misleading. It doesn't actually enable */
857 /* loopback on a single node, but connects all CAN devices to an internal bus. */
858 XMC_CAN_NODE_EnableLoopBack(dev_cfg->can);
859 #endif
860
861 dev_cfg->irq_config_func();
862
863 dev_data->state = CAN_STATE_STOPPED;
864
865 #ifndef CONFIG_CAN_XMC4XXX_INTERNAL_BUS_MODE
866 ret = pinctrl_apply_state(dev_cfg->pcfg, PINCTRL_STATE_DEFAULT);
867 if (ret < 0) {
868 return ret;
869 }
870 #endif
871
872 ret = can_calc_timing(dev, &timing, dev_cfg->common.bitrate,
873 dev_cfg->common.sample_point);
874 if (ret < 0) {
875 return ret;
876 }
877
878 LOG_DBG("Presc: %d, BS1: %d, BS2: %d", timing.prescaler, timing.phase_seg1,
879 timing.phase_seg2);
880 LOG_DBG("Sample-point err : %d", ret);
881
882 return can_set_timing(dev, &timing);
883 }
884
885 static DEVICE_API(can, can_xmc4xxx_api_funcs) = {
886 .get_capabilities = can_xmc4xxx_get_capabilities,
887 .set_mode = can_xmc4xxx_set_mode,
888 .set_timing = can_xmc4xxx_set_timing,
889 .start = can_xmc4xxx_start,
890 .stop = can_xmc4xxx_stop,
891 .send = can_xmc4xxx_send,
892 .add_rx_filter = can_xmc4xxx_add_rx_filter,
893 .remove_rx_filter = can_xmc4xxx_remove_rx_filter,
894 .get_state = can_xmc4xxx_get_state,
895 .set_state_change_callback = can_xmc4xxx_set_state_change_callback,
896 .get_core_clock = can_xmc4xxx_get_core_clock,
897 .get_max_filters = can_xmc4xxx_get_max_filters,
898 .timing_min = {
899 .sjw = 1,
900 .prop_seg = 0,
901 .phase_seg1 = 3,
902 .phase_seg2 = 2,
903 .prescaler = 1,
904 },
905 .timing_max = {
906 .sjw = 4,
907 .prop_seg = 0,
908 .phase_seg1 = 16,
909 .phase_seg2 = 8,
910 .prescaler = 64,
911 },
912 };
913
914 #define CAN_XMC4XXX_INIT(inst) \
915 static void can_xmc4xxx_irq_config_##inst(void) \
916 { \
917 IRQ_CONNECT(DT_INST_IRQN(inst), DT_INST_IRQ(inst, priority), can_xmc4xxx_isr, \
918 DEVICE_DT_INST_GET(inst), 0); \
919 irq_enable(DT_INST_IRQN(inst)); \
920 } \
921 \
922 PINCTRL_DT_INST_DEFINE(inst); \
923 \
924 static struct can_xmc4xxx_data can_xmc4xxx_data_##inst; \
925 static const struct can_xmc4xxx_config can_xmc4xxx_config_##inst = { \
926 .common = CAN_DT_DRIVER_CONFIG_INST_GET(inst, 0, 1000000), \
927 .can = (CAN_NODE_TypeDef *)DT_INST_REG_ADDR(inst), \
928 .clock_div8 = DT_INST_PROP(inst, clock_div8), \
929 .irq_config_func = can_xmc4xxx_irq_config_##inst, \
930 .service_request = DT_INST_IRQN(inst) - CAN_XMC4XXX_IRQ_MIN, \
931 .input_src = DT_INST_ENUM_IDX(inst, input_src), \
932 .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(inst), \
933 }; \
934 \
935 CAN_DEVICE_DT_INST_DEFINE(inst, can_xmc4xxx_init, NULL, &can_xmc4xxx_data_##inst, \
936 &can_xmc4xxx_config_##inst, POST_KERNEL, \
937 CONFIG_CAN_INIT_PRIORITY, &can_xmc4xxx_api_funcs);
938
939 DT_INST_FOREACH_STATUS_OKAY(CAN_XMC4XXX_INIT)
940