Lines Matching +full:lock +full:- +full:mode

2  * Copyright (c) 2022-2023 Vestas Wind Systems A/S
5 * SPDX-License-Identifier: Apache-2.0
22 const struct can_mcan_config *config = dev->config; in can_mcan_read_reg()
25 err = config->ops->read_reg(dev, reg, val); in can_mcan_read_reg()
35 const struct can_mcan_config *config = dev->config; in can_mcan_write_reg()
38 err = config->ops->write_reg(dev, reg, val); in can_mcan_write_reg()
48 struct can_mcan_data *data = dev->data; in can_mcan_exit_sleep_mode()
53 k_mutex_lock(&data->lock, K_FOREVER); in can_mcan_exit_sleep_mode()
75 if (k_cycle_get_32() - start_time > k_ms_to_cyc_ceil32(CAN_INIT_TIMEOUT_MS)) { in can_mcan_exit_sleep_mode()
82 err = -EAGAIN; in can_mcan_exit_sleep_mode()
94 k_mutex_unlock(&data->lock); in can_mcan_exit_sleep_mode()
101 struct can_mcan_data *data = dev->data; in can_mcan_enter_init_mode()
106 k_mutex_lock(&data->lock, K_FOREVER); in can_mcan_enter_init_mode()
128 if (k_uptime_ticks() - start_time > timeout.ticks) { in can_mcan_enter_init_mode()
135 err = -EAGAIN; in can_mcan_enter_init_mode()
146 k_mutex_unlock(&data->lock); in can_mcan_enter_init_mode()
153 struct can_mcan_data *data = dev->data; in can_mcan_leave_init_mode()
158 k_mutex_lock(&data->lock, K_FOREVER); in can_mcan_leave_init_mode()
180 if (k_uptime_ticks() - start_time > timeout.ticks) { in can_mcan_leave_init_mode()
181 err = -EAGAIN; in can_mcan_leave_init_mode()
192 k_mutex_unlock(&data->lock); in can_mcan_leave_init_mode()
199 struct can_mcan_data *data = dev->data; in can_mcan_set_timing()
203 if (data->common.started) { in can_mcan_set_timing()
204 return -EBUSY; in can_mcan_set_timing()
207 k_mutex_lock(&data->lock, K_FOREVER); in can_mcan_set_timing()
209 nbtp |= FIELD_PREP(CAN_MCAN_NBTP_NSJW, timing->sjw - 1UL) | in can_mcan_set_timing()
210 FIELD_PREP(CAN_MCAN_NBTP_NTSEG1, timing->phase_seg1 - 1UL) | in can_mcan_set_timing()
211 FIELD_PREP(CAN_MCAN_NBTP_NTSEG2, timing->phase_seg2 - 1UL) | in can_mcan_set_timing()
212 FIELD_PREP(CAN_MCAN_NBTP_NBRP, timing->prescaler - 1UL); in can_mcan_set_timing()
220 k_mutex_unlock(&data->lock); in can_mcan_set_timing()
229 struct can_mcan_data *data = dev->data; in can_mcan_set_timing_data()
234 if (data->common.started) { in can_mcan_set_timing_data()
235 return -EBUSY; in can_mcan_set_timing_data()
238 k_mutex_lock(&data->lock, K_FOREVER); in can_mcan_set_timing_data()
240 dbtp |= FIELD_PREP(CAN_MCAN_DBTP_DSJW, timing_data->sjw - 1UL) | in can_mcan_set_timing_data()
241 FIELD_PREP(CAN_MCAN_DBTP_DTSEG1, timing_data->phase_seg1 - 1UL) | in can_mcan_set_timing_data()
242 FIELD_PREP(CAN_MCAN_DBTP_DTSEG2, timing_data->phase_seg2 - 1UL) | in can_mcan_set_timing_data()
243 FIELD_PREP(CAN_MCAN_DBTP_DBRP, timing_data->prescaler - 1UL); in can_mcan_set_timing_data()
245 if (timing_data->prescaler == 1U || timing_data->prescaler == 2U) { in can_mcan_set_timing_data()
259 timing_data->prescaler); in can_mcan_set_timing_data()
268 k_mutex_unlock(&data->lock); in can_mcan_set_timing_data()
293 const struct can_mcan_config *config = dev->config; in can_mcan_start()
294 struct can_mcan_data *data = dev->data; in can_mcan_start()
297 if (data->common.started) { in can_mcan_start()
298 return -EALREADY; in can_mcan_start()
301 if (config->common.phy != NULL) { in can_mcan_start()
302 err = can_transceiver_enable(config->common.phy, data->common.mode); in can_mcan_start()
314 LOG_ERR("failed to leave init mode (err %d)", err); in can_mcan_start()
316 if (config->common.phy != NULL) { in can_mcan_start()
318 (void)can_transceiver_disable(config->common.phy); in can_mcan_start()
324 data->common.started = true; in can_mcan_start()
331 const struct can_mcan_config *config = dev->config; in can_mcan_stop()
332 const struct can_mcan_callbacks *cbs = config->callbacks; in can_mcan_stop()
333 struct can_mcan_data *data = dev->data; in can_mcan_stop()
338 if (!data->common.started) { in can_mcan_stop()
339 return -EALREADY; in can_mcan_stop()
342 /* CAN transmissions are automatically stopped when entering init mode */ in can_mcan_stop()
345 LOG_ERR("Failed to enter init mode"); in can_mcan_stop()
346 return -EIO; in can_mcan_stop()
349 if (config->common.phy != NULL) { in can_mcan_stop()
350 err = can_transceiver_disable(config->common.phy); in can_mcan_stop()
359 data->common.started = false; in can_mcan_stop()
361 for (tx_idx = 0U; tx_idx < cbs->num_tx; tx_idx++) { in can_mcan_stop()
362 tx_cb = cbs->tx[tx_idx].function; in can_mcan_stop()
365 cbs->tx[tx_idx].function = NULL; in can_mcan_stop()
366 tx_cb(dev, -ENETDOWN, cbs->tx[tx_idx].user_data); in can_mcan_stop()
367 k_sem_give(&data->tx_sem); in can_mcan_stop()
374 int can_mcan_set_mode(const struct device *dev, can_mode_t mode) in can_mcan_set_mode() argument
377 struct can_mcan_data *data = dev->data; in can_mcan_set_mode()
390 if ((mode & ~(supported)) != 0U) { in can_mcan_set_mode()
391 LOG_ERR("unsupported mode: 0x%08x", mode); in can_mcan_set_mode()
392 return -ENOTSUP; in can_mcan_set_mode()
395 if (data->common.started) { in can_mcan_set_mode()
396 return -EBUSY; in can_mcan_set_mode()
399 k_mutex_lock(&data->lock, K_FOREVER); in can_mcan_set_mode()
411 if ((mode & CAN_MODE_LOOPBACK) != 0) { in can_mcan_set_mode()
412 /* Loopback mode */ in can_mcan_set_mode()
419 if ((mode & CAN_MODE_LISTENONLY) != 0) { in can_mcan_set_mode()
420 /* Bus monitoring mode */ in can_mcan_set_mode()
427 if ((mode & CAN_MODE_FD) != 0) { in can_mcan_set_mode()
444 data->common.mode = mode; in can_mcan_set_mode()
447 k_mutex_unlock(&data->lock); in can_mcan_set_mode()
454 const struct can_mcan_config *config = dev->config; in can_mcan_state_change_handler()
455 struct can_mcan_data *data = dev->data; in can_mcan_state_change_handler()
456 const can_state_change_callback_t state_cb = data->common.state_change_cb; in can_mcan_state_change_handler()
457 void *state_cb_data = data->common.state_change_cb_user_data; in can_mcan_state_change_handler()
458 const struct can_mcan_callbacks *cbs = config->callbacks; in can_mcan_state_change_handler()
482 /* Call all TX queue callbacks with -ENETUNREACH */ in can_mcan_state_change_handler()
483 for (tx_idx = 0U; tx_idx < cbs->num_tx; tx_idx++) { in can_mcan_state_change_handler()
484 tx_cb = cbs->tx[tx_idx].function; in can_mcan_state_change_handler()
487 cbs->tx[tx_idx].function = NULL; in can_mcan_state_change_handler()
488 tx_cb(dev, -ENETUNREACH, cbs->tx[tx_idx].user_data); in can_mcan_state_change_handler()
489 k_sem_give(&data->tx_sem); in can_mcan_state_change_handler()
494 (data->common.mode & CAN_MODE_MANUAL_RECOVERY) == 0U) { in can_mcan_state_change_handler()
496 * Request leaving init mode, but do not take the lock (as we are in ISR in can_mcan_state_change_handler()
516 const struct can_mcan_config *config = dev->config; in can_mcan_tx_event_handler()
517 const struct can_mcan_callbacks *cbs = config->callbacks; in can_mcan_tx_event_handler()
518 struct can_mcan_data *data = dev->data; in can_mcan_tx_event_handler()
535 config->mram_offsets[CAN_MCAN_MRAM_CFG_TX_EVENT_FIFO] + in can_mcan_tx_event_handler()
552 __ASSERT_NO_MSG(tx_idx < cbs->num_tx); in can_mcan_tx_event_handler()
553 tx_cb = cbs->tx[tx_idx].function; in can_mcan_tx_event_handler()
554 user_data = cbs->tx[tx_idx].user_data; in can_mcan_tx_event_handler()
555 cbs->tx[tx_idx].function = NULL; in can_mcan_tx_event_handler()
557 k_sem_give(&data->tx_sem); in can_mcan_tx_event_handler()
629 struct can_mcan_data *data = dev->data; in can_mcan_line_0_isr()
655 k_sem_give(&data->tx_sem); in can_mcan_line_0_isr()
684 const struct can_mcan_config *config = dev->config; in can_mcan_get_message()
685 const struct can_mcan_callbacks *cbs = config->callbacks; in can_mcan_get_message()
760 filt_idx + cbs->num_std, frame.id); in can_mcan_get_message()
761 __ASSERT_NO_MSG(filt_idx < cbs->num_ext); in can_mcan_get_message()
762 cb = cbs->ext[filt_idx].function; in can_mcan_get_message()
763 user_data = cbs->ext[filt_idx].user_data; in can_mcan_get_message()
766 __ASSERT_NO_MSG(filt_idx < cbs->num_std); in can_mcan_get_message()
767 cb = cbs->std[filt_idx].function; in can_mcan_get_message()
768 user_data = cbs->std[filt_idx].user_data; in can_mcan_get_message()
794 const struct can_mcan_config *config = dev->config; in can_mcan_line_1_isr()
821 can_mcan_get_message(dev, config->mram_offsets[CAN_MCAN_MRAM_CFG_RX_FIFO0], in can_mcan_line_1_isr()
827 can_mcan_get_message(dev, config->mram_offsets[CAN_MCAN_MRAM_CFG_RX_FIFO1], in can_mcan_line_1_isr()
851 struct can_mcan_data *data = dev->data; in can_mcan_get_state()
861 if (!data->common.started) { in can_mcan_get_state()
880 err_cnt->tx_err_cnt = FIELD_GET(CAN_MCAN_ECR_TEC, reg); in can_mcan_get_state()
881 err_cnt->rx_err_cnt = FIELD_GET(CAN_MCAN_ECR_REC, reg); in can_mcan_get_state()
890 struct can_mcan_data *data = dev->data; in can_mcan_recover()
892 if (!data->common.started) { in can_mcan_recover()
893 return -ENETDOWN; in can_mcan_recover()
896 if ((data->common.mode & CAN_MODE_MANUAL_RECOVERY) == 0U) { in can_mcan_recover()
897 return -ENOTSUP; in can_mcan_recover()
907 const struct can_mcan_config *config = dev->config; in can_mcan_send()
908 const struct can_mcan_callbacks *cbs = config->callbacks; in can_mcan_send()
909 struct can_mcan_data *data = dev->data; in can_mcan_send()
910 size_t data_length = can_dlc_to_bytes(frame->dlc); in can_mcan_send()
912 .rtr = (frame->flags & CAN_FRAME_RTR) != 0U ? 1U : 0U, in can_mcan_send()
913 .xtd = (frame->flags & CAN_FRAME_IDE) != 0U ? 1U : 0U, in can_mcan_send()
915 .dlc = frame->dlc, in can_mcan_send()
917 .fdf = (frame->flags & CAN_FRAME_FDF) != 0U ? 1U : 0U, in can_mcan_send()
918 .brs = (frame->flags & CAN_FRAME_BRS) != 0U ? 1U : 0U, in can_mcan_send()
925 uint32_t put_idx = -1; in can_mcan_send()
929 LOG_DBG("Sending %zu bytes. Id: 0x%x, ID type: %s %s %s %s", data_length, frame->id, in can_mcan_send()
930 (frame->flags & CAN_FRAME_IDE) != 0U ? "extended" : "standard", in can_mcan_send()
931 (frame->flags & CAN_FRAME_RTR) != 0U ? "RTR" : "", in can_mcan_send()
932 (frame->flags & CAN_FRAME_FDF) != 0U ? "FD frame" : "", in can_mcan_send()
933 (frame->flags & CAN_FRAME_BRS) != 0U ? "BRS" : ""); in can_mcan_send()
936 if ((frame->flags & ~(CAN_FRAME_IDE | CAN_FRAME_RTR | CAN_FRAME_FDF | CAN_FRAME_BRS)) != in can_mcan_send()
938 LOG_ERR("unsupported CAN frame flags 0x%02x", frame->flags); in can_mcan_send()
939 return -ENOTSUP; in can_mcan_send()
942 if ((data->common.mode & CAN_MODE_FD) == 0U && in can_mcan_send()
943 ((frame->flags & (CAN_FRAME_FDF | CAN_FRAME_BRS)) != 0U)) { in can_mcan_send()
944 LOG_ERR("CAN FD format not supported in non-FD mode"); in can_mcan_send()
945 return -ENOTSUP; in can_mcan_send()
948 if ((frame->flags & ~(CAN_FRAME_IDE | CAN_FRAME_RTR)) != 0U) { in can_mcan_send()
949 LOG_ERR("unsupported CAN frame flags 0x%02x", frame->flags); in can_mcan_send()
950 return -ENOTSUP; in can_mcan_send()
954 if (data_length > sizeof(frame->data)) { in can_mcan_send()
956 sizeof(frame->data)); in can_mcan_send()
957 return -EINVAL; in can_mcan_send()
960 if ((frame->flags & CAN_FRAME_FDF) != 0U) { in can_mcan_send()
961 if (frame->dlc > CANFD_MAX_DLC) { in can_mcan_send()
962 LOG_ERR("DLC of %d for CAN FD format frame", frame->dlc); in can_mcan_send()
963 return -EINVAL; in can_mcan_send()
966 if (frame->dlc > CAN_MAX_DLC) { in can_mcan_send()
967 LOG_ERR("DLC of %d for non-FD format frame", frame->dlc); in can_mcan_send()
968 return -EINVAL; in can_mcan_send()
972 if (!data->common.started) { in can_mcan_send()
973 return -ENETDOWN; in can_mcan_send()
982 return -ENETUNREACH; in can_mcan_send()
985 err = k_sem_take(&data->tx_sem, timeout); in can_mcan_send()
987 return -EAGAIN; in can_mcan_send()
990 k_mutex_lock(&data->tx_mtx, K_FOREVER); in can_mcan_send()
993 for (int i = 0; i < cbs->num_tx; i++) { in can_mcan_send()
994 if (cbs->tx[i].function == NULL) { in can_mcan_send()
1002 if ((frame->flags & CAN_FRAME_IDE) != 0U) { in can_mcan_send()
1003 tx_hdr.ext_id = frame->id; in can_mcan_send()
1005 tx_hdr.std_id = frame->id & CAN_STD_ID_MASK; in can_mcan_send()
1008 err = can_mcan_write_mram(dev, config->mram_offsets[CAN_MCAN_MRAM_CFG_TX_BUFFER] + put_idx * in can_mcan_send()
1017 if ((frame->flags & CAN_FRAME_RTR) == 0U && data_length != 0U) { in can_mcan_send()
1018 err = can_mcan_write_mram(dev, config->mram_offsets[CAN_MCAN_MRAM_CFG_TX_BUFFER] + in can_mcan_send()
1021 &frame->data_32, ROUND_UP(data_length, sizeof(uint32_t))); in can_mcan_send()
1028 __ASSERT_NO_MSG(put_idx < cbs->num_tx); in can_mcan_send()
1029 cbs->tx[put_idx].function = callback; in can_mcan_send()
1030 cbs->tx[put_idx].user_data = user_data; in can_mcan_send()
1034 cbs->tx[put_idx].function = NULL; in can_mcan_send()
1038 k_mutex_unlock(&data->tx_mtx); in can_mcan_send()
1042 k_mutex_unlock(&data->tx_mtx); in can_mcan_send()
1043 k_sem_give(&data->tx_sem); in can_mcan_send()
1050 const struct can_mcan_config *config = dev->config; in can_mcan_get_max_filters()
1051 const struct can_mcan_callbacks *cbs = config->callbacks; in can_mcan_get_max_filters()
1054 return cbs->num_ext; in can_mcan_get_max_filters()
1056 return cbs->num_std; in can_mcan_get_max_filters()
1061 * 28 standard filters, dual mode needs to be implemented.
1062 * Dual mode gets tricky, because we can only activate both filters.
1068 const struct can_mcan_config *config = dev->config; in can_mcan_add_rx_filter_std()
1069 const struct can_mcan_callbacks *cbs = config->callbacks; in can_mcan_add_rx_filter_std()
1070 struct can_mcan_data *data = dev->data; in can_mcan_add_rx_filter_std()
1072 .sfid1 = filter->id, in can_mcan_add_rx_filter_std()
1073 .sfid2 = filter->mask, in can_mcan_add_rx_filter_std()
1076 int filter_id = -ENOSPC; in can_mcan_add_rx_filter_std()
1080 k_mutex_lock(&data->lock, K_FOREVER); in can_mcan_add_rx_filter_std()
1082 for (i = 0; i < cbs->num_std; i++) { in can_mcan_add_rx_filter_std()
1083 if (cbs->std[i].function == NULL) { in can_mcan_add_rx_filter_std()
1089 if (filter_id == -ENOSPC) { in can_mcan_add_rx_filter_std()
1091 k_mutex_unlock(&data->lock); in can_mcan_add_rx_filter_std()
1092 return -ENOSPC; in can_mcan_add_rx_filter_std()
1098 err = can_mcan_write_mram(dev, config->mram_offsets[CAN_MCAN_MRAM_CFG_STD_FILTER] + in can_mcan_add_rx_filter_std()
1106 k_mutex_unlock(&data->lock); in can_mcan_add_rx_filter_std()
1110 __ASSERT_NO_MSG(filter_id < cbs->num_std); in can_mcan_add_rx_filter_std()
1111 cbs->std[filter_id].function = callback; in can_mcan_add_rx_filter_std()
1112 cbs->std[filter_id].user_data = user_data; in can_mcan_add_rx_filter_std()
1120 const struct can_mcan_config *config = dev->config; in can_mcan_add_rx_filter_ext()
1121 const struct can_mcan_callbacks *cbs = config->callbacks; in can_mcan_add_rx_filter_ext()
1122 struct can_mcan_data *data = dev->data; in can_mcan_add_rx_filter_ext()
1124 .efid2 = filter->mask, in can_mcan_add_rx_filter_ext()
1125 .efid1 = filter->id, in can_mcan_add_rx_filter_ext()
1128 int filter_id = -ENOSPC; in can_mcan_add_rx_filter_ext()
1132 k_mutex_lock(&data->lock, K_FOREVER); in can_mcan_add_rx_filter_ext()
1134 for (i = 0; i < cbs->num_ext; i++) { in can_mcan_add_rx_filter_ext()
1135 if (cbs->ext[i].function == NULL) { in can_mcan_add_rx_filter_ext()
1141 if (filter_id == -ENOSPC) { in can_mcan_add_rx_filter_ext()
1143 k_mutex_unlock(&data->lock); in can_mcan_add_rx_filter_ext()
1144 return -ENOSPC; in can_mcan_add_rx_filter_ext()
1150 err = can_mcan_write_mram(dev, config->mram_offsets[CAN_MCAN_MRAM_CFG_EXT_FILTER] + in can_mcan_add_rx_filter_ext()
1158 k_mutex_unlock(&data->lock); in can_mcan_add_rx_filter_ext()
1162 __ASSERT_NO_MSG(filter_id < cbs->num_ext); in can_mcan_add_rx_filter_ext()
1163 cbs->ext[filter_id].function = callback; in can_mcan_add_rx_filter_ext()
1164 cbs->ext[filter_id].user_data = user_data; in can_mcan_add_rx_filter_ext()
1172 const struct can_mcan_config *config = dev->config; in can_mcan_add_rx_filter()
1173 const struct can_mcan_callbacks *cbs = config->callbacks; in can_mcan_add_rx_filter()
1176 if ((filter->flags & ~(CAN_FILTER_IDE)) != 0U) { in can_mcan_add_rx_filter()
1177 LOG_ERR("unsupported CAN filter flags 0x%02x", filter->flags); in can_mcan_add_rx_filter()
1178 return -ENOTSUP; in can_mcan_add_rx_filter()
1181 if ((filter->flags & CAN_FILTER_IDE) != 0U) { in can_mcan_add_rx_filter()
1184 filter_id += cbs->num_std; in can_mcan_add_rx_filter()
1195 const struct can_mcan_config *config = dev->config; in can_mcan_remove_rx_filter()
1196 const struct can_mcan_callbacks *cbs = config->callbacks; in can_mcan_remove_rx_filter()
1197 struct can_mcan_data *data = dev->data; in can_mcan_remove_rx_filter()
1205 k_mutex_lock(&data->lock, K_FOREVER); in can_mcan_remove_rx_filter()
1207 if (filter_id >= cbs->num_std) { in can_mcan_remove_rx_filter()
1208 filter_id -= cbs->num_std; in can_mcan_remove_rx_filter()
1209 if (filter_id >= cbs->num_ext) { in can_mcan_remove_rx_filter()
1211 k_mutex_unlock(&data->lock); in can_mcan_remove_rx_filter()
1215 cbs->ext[filter_id].function = NULL; in can_mcan_remove_rx_filter()
1216 cbs->ext[filter_id].user_data = NULL; in can_mcan_remove_rx_filter()
1218 err = can_mcan_clear_mram(dev, config->mram_offsets[CAN_MCAN_MRAM_CFG_EXT_FILTER] + in can_mcan_remove_rx_filter()
1225 cbs->std[filter_id].function = NULL; in can_mcan_remove_rx_filter()
1226 cbs->std[filter_id].user_data = NULL; in can_mcan_remove_rx_filter()
1228 err = can_mcan_clear_mram(dev, config->mram_offsets[CAN_MCAN_MRAM_CFG_STD_FILTER] + in can_mcan_remove_rx_filter()
1236 k_mutex_unlock(&data->lock); in can_mcan_remove_rx_filter()
1242 struct can_mcan_data *data = dev->data; in can_mcan_set_state_change_callback()
1244 data->common.state_change_cb = callback; in can_mcan_set_state_change_callback()
1245 data->common.state_change_cb_user_data = user_data; in can_mcan_set_state_change_callback()
1254 struct can_mcan_data *data = dev->data; in can_mcan_enable_configuration_change()
1258 k_mutex_lock(&data->lock, K_FOREVER); in can_mcan_enable_configuration_change()
1273 k_mutex_unlock(&data->lock); in can_mcan_enable_configuration_change()
1278 const struct can_mcan_config *config = dev->config; in can_mcan_configure_mram()
1285 LOG_ERR("Failed to exit sleep mode"); in can_mcan_configure_mram()
1286 return -EIO; in can_mcan_configure_mram()
1291 LOG_ERR("Failed to enter init mode"); in can_mcan_configure_mram()
1292 return -EIO; in can_mcan_configure_mram()
1297 addr = mram - mrba + config->mram_offsets[CAN_MCAN_MRAM_CFG_STD_FILTER]; in can_mcan_configure_mram()
1299 config->mram_elements[CAN_MCAN_MRAM_CFG_STD_FILTER]); in can_mcan_configure_mram()
1305 addr = mram - mrba + config->mram_offsets[CAN_MCAN_MRAM_CFG_EXT_FILTER]; in can_mcan_configure_mram()
1307 config->mram_elements[CAN_MCAN_MRAM_CFG_EXT_FILTER]); in can_mcan_configure_mram()
1313 addr = mram - mrba + config->mram_offsets[CAN_MCAN_MRAM_CFG_RX_FIFO0]; in can_mcan_configure_mram()
1315 config->mram_elements[CAN_MCAN_MRAM_CFG_RX_FIFO0]); in can_mcan_configure_mram()
1321 addr = mram - mrba + config->mram_offsets[CAN_MCAN_MRAM_CFG_RX_FIFO1]; in can_mcan_configure_mram()
1323 config->mram_elements[CAN_MCAN_MRAM_CFG_RX_FIFO1]); in can_mcan_configure_mram()
1329 addr = mram - mrba + config->mram_offsets[CAN_MCAN_MRAM_CFG_RX_BUFFER]; in can_mcan_configure_mram()
1336 addr = mram - mrba + config->mram_offsets[CAN_MCAN_MRAM_CFG_TX_EVENT_FIFO]; in can_mcan_configure_mram()
1338 config->mram_elements[CAN_MCAN_MRAM_CFG_TX_EVENT_FIFO]); in can_mcan_configure_mram()
1344 addr = mram - mrba + config->mram_offsets[CAN_MCAN_MRAM_CFG_TX_BUFFER]; in can_mcan_configure_mram()
1346 config->mram_elements[CAN_MCAN_MRAM_CFG_TX_BUFFER]) | CAN_MCAN_TXBC_TFQM; in can_mcan_configure_mram()
1371 const struct can_mcan_config *config = dev->config; in can_mcan_init()
1372 const struct can_mcan_callbacks *cbs = config->callbacks; in can_mcan_init()
1373 struct can_mcan_data *data = dev->data; in can_mcan_init()
1381 __ASSERT_NO_MSG(config->ops->read_reg != NULL); in can_mcan_init()
1382 __ASSERT_NO_MSG(config->ops->write_reg != NULL); in can_mcan_init()
1383 __ASSERT_NO_MSG(config->ops->read_mram != NULL); in can_mcan_init()
1384 __ASSERT_NO_MSG(config->ops->write_mram != NULL); in can_mcan_init()
1385 __ASSERT_NO_MSG(config->ops->clear_mram != NULL); in can_mcan_init()
1386 __ASSERT_NO_MSG(config->callbacks != NULL); in can_mcan_init()
1388 __ASSERT_NO_MSG(cbs->num_tx <= config->mram_elements[CAN_MCAN_MRAM_CFG_TX_BUFFER]); in can_mcan_init()
1389 __ASSERT_NO_MSG(cbs->num_std <= config->mram_elements[CAN_MCAN_MRAM_CFG_STD_FILTER]); in can_mcan_init()
1390 __ASSERT_NO_MSG(cbs->num_ext <= config->mram_elements[CAN_MCAN_MRAM_CFG_EXT_FILTER]); in can_mcan_init()
1392 k_mutex_init(&data->lock); in can_mcan_init()
1393 k_mutex_init(&data->tx_mtx); in can_mcan_init()
1394 k_sem_init(&data->tx_sem, cbs->num_tx, cbs->num_tx); in can_mcan_init()
1396 if (config->common.phy != NULL) { in can_mcan_init()
1397 if (!device_is_ready(config->common.phy)) { in can_mcan_init()
1399 return -ENODEV; in can_mcan_init()
1405 LOG_ERR("Failed to exit sleep mode"); in can_mcan_init()
1406 return -EIO; in can_mcan_init()
1411 LOG_ERR("Failed to enter init mode"); in can_mcan_init()
1412 return -EIO; in can_mcan_init()
1420 return -EIO; in can_mcan_init()
1469 err = can_calc_timing(dev, &timing, config->common.bitrate, in can_mcan_init()
1470 config->common.sample_point); in can_mcan_init()
1471 if (err == -EINVAL) { in can_mcan_init()
1473 return -EIO; in can_mcan_init()
1478 LOG_DBG("Sample-point err : %d", err); in can_mcan_init()
1480 err = can_calc_timing_data(dev, &timing_data, config->common.bitrate_data, in can_mcan_init()
1481 config->common.sample_point_data); in can_mcan_init()
1482 if (err == -EINVAL) { in can_mcan_init()
1484 return -EIO; in can_mcan_init()
1487 LOG_DBG("Sample-point err data phase: %d", err); in can_mcan_init()
1493 return -ENODEV; in can_mcan_init()
1500 return -ENODEV; in can_mcan_init()
1538 return can_mcan_clear_mram(dev, 0, config->mram_size); in can_mcan_init()