Lines Matching +full:dead +full:- +full:battery
4 * SPDX-License-Identifier: Apache-2.0
65 * UCPDx_STROBE: UCPDx pull-down configuration strobe:
72 SYSCFG->CFGR1 |= SYSCFG_CFGR1_UCPD1_STROBE_Msk; in update_stm32g0x_cc_line()
74 SYSCFG->CFGR1 |= SYSCFG_CFGR1_UCPD2_STROBE_Msk; in update_stm32g0x_cc_line()
84 struct tcpc_data *data = dev->data; in ucpd_tx_data_byte()
85 const struct tcpc_config *const config = dev->config; in ucpd_tx_data_byte()
86 int index = data->ucpd_tx_active_buffer->msg_index++; in ucpd_tx_data_byte()
88 LL_UCPD_WriteData(config->ucpd_port, in ucpd_tx_data_byte()
89 data->ucpd_tx_active_buffer->data.msg[index]); in ucpd_tx_data_byte()
97 struct tcpc_data *data = dev->data; in ucpd_rx_data_byte()
98 const struct tcpc_config *const config = dev->config; in ucpd_rx_data_byte()
100 if (data->ucpd_rx_byte_count < UCPD_BUF_LEN) { in ucpd_rx_data_byte()
101 data->ucpd_rx_buffer[data->ucpd_rx_byte_count++] = in ucpd_rx_data_byte()
102 LL_UCPD_ReadData(config->ucpd_port); in ucpd_rx_data_byte()
111 const struct tcpc_config *const config = dev->config; in ucpd_tx_interrupts_enable()
114 imr = LL_UCPD_ReadReg(config->ucpd_port, IMR); in ucpd_tx_interrupts_enable()
117 LL_UCPD_WriteReg(config->ucpd_port, ICR, UCPD_ICR_TX_INT_MASK); in ucpd_tx_interrupts_enable()
118 LL_UCPD_WriteReg(config->ucpd_port, IMR, in ucpd_tx_interrupts_enable()
121 LL_UCPD_WriteReg(config->ucpd_port, IMR, in ucpd_tx_interrupts_enable()
131 struct tcpc_data *data = dev->data; in stm32_ucpd_state_init()
134 data->ucpd_tx_request = 0; in stm32_ucpd_state_init()
135 data->tx_retry_count = 0; in stm32_ucpd_state_init()
136 data->ucpd_tx_state = STATE_IDLE; in stm32_ucpd_state_init()
139 data->ucpd_rx_sop_prime_enabled = false; in stm32_ucpd_state_init()
140 data->ucpd_rx_msg_active = false; in stm32_ucpd_state_init()
141 data->ucpd_rx_bist_mode = false; in stm32_ucpd_state_init()
144 data->ucpd_vconn_enable = false; in stm32_ucpd_state_init()
155 struct tcpc_data *data = dev->data; in ucpd_get_cc_enable_mask()
156 const struct tcpc_config *const config = dev->config; in ucpd_get_cc_enable_mask()
163 if (data->ucpd_vconn_enable) { in ucpd_get_cc_enable_mask()
164 uint32_t cr = LL_UCPD_ReadReg(config->ucpd_port, CR); in ucpd_get_cc_enable_mask()
178 * @retval -EIO on failure
184 const struct tcpc_config *const config = dev->config; in ucpd_get_cc()
203 * ------------------ in ucpd_get_cc()
204 * 0 -> 1 in ucpd_get_cc()
205 * 1 -> 2 in ucpd_get_cc()
206 * 2 -> 0 in ucpd_get_cc()
210 sr = LL_UCPD_ReadReg(config->ucpd_port, SR); in ucpd_get_cc()
213 anamode = LL_UCPD_GetRole(config->ucpd_port); in ucpd_get_cc()
261 * @retval -EIO on failure
262 * @retval -ENOTSUP if not supported
266 struct tcpc_data *data = dev->data; in ucpd_set_vconn()
267 const struct tcpc_config *const config = dev->config; in ucpd_set_vconn()
271 if (data->vconn_cb == NULL) { in ucpd_set_vconn()
272 return -ENOTSUP; in ucpd_set_vconn()
276 data->ucpd_vconn_enable = enable; in ucpd_set_vconn()
278 cr = LL_UCPD_ReadReg(config->ucpd_port, CR); in ucpd_set_vconn()
283 LL_UCPD_WriteReg(config->ucpd_port, CR, cr); in ucpd_set_vconn()
286 update_stm32g0x_cc_line(config->ucpd_port); in ucpd_set_vconn()
290 data->ucpd_vconn_cc = (cr & UCPD_CR_CCENABLE_0) ? in ucpd_set_vconn()
294 ret = data->vconn_cb(dev, data->ucpd_vconn_cc, enable); in ucpd_set_vconn()
303 * @retval -EIO on failure
304 * @retval -ENOTSUP if not supported
308 struct tcpc_data *data = dev->data; in ucpd_vconn_discharge()
309 const struct tcpc_config *const config = dev->config; in ucpd_vconn_discharge()
312 if (data->ucpd_vconn_enable) { in ucpd_vconn_discharge()
313 return -EIO; in ucpd_vconn_discharge()
316 if (data->vconn_discharge_cb) { in ucpd_vconn_discharge()
318 return data->vconn_discharge_cb(dev, data->ucpd_vconn_cc, enable); in ucpd_vconn_discharge()
323 LL_UCPD_VconnDischargeEnable(config->ucpd_port); in ucpd_vconn_discharge()
325 LL_UCPD_VconnDischargeDisable(config->ucpd_port); in ucpd_vconn_discharge()
329 update_stm32g0x_cc_line(config->ucpd_port); in ucpd_vconn_discharge()
342 struct tcpc_data *data = dev->data; in ucpd_select_rp_value()
344 data->rp = rp; in ucpd_select_rp_value()
356 struct tcpc_data *data = dev->data; in ucpd_get_rp_value()
358 *rp = data->rp; in ucpd_get_rp_value()
364 * @brief Enable or disable Dead Battery resistors
368 struct tcpc_data *data = dev->data; in dead_battery()
371 const struct tcpc_config *const config = dev->config; in dead_battery()
374 cr = LL_UCPD_ReadReg(config->ucpd_port, CR); in dead_battery()
382 LL_UCPD_WriteReg(config->ucpd_port, CR, cr); in dead_battery()
383 update_stm32g0x_cc_line(config->ucpd_port); in dead_battery()
386 CLEAR_BIT(PWR->CR3, PWR_CR3_UCPD_DBDIS); in dead_battery()
388 SET_BIT(PWR->CR3, PWR_CR3_UCPD_DBDIS); in dead_battery()
391 data->dead_battery_active = en; in dead_battery()
398 * @retval -EIO on failure
403 const struct tcpc_config *const config = dev->config; in ucpd_set_cc()
404 struct tcpc_data *data = dev->data; in ucpd_set_cc()
407 /* Disable dead battery if it's active */ in ucpd_set_cc()
408 if (data->dead_battery_active) { in ucpd_set_cc()
412 cr = LL_UCPD_ReadReg(config->ucpd_port, CR); in ucpd_set_cc()
420 cr |= STM32_UCPD_CR_ANASUBMODE_VAL(UCPD_RP_TO_ANASUB(data->rp)); in ucpd_set_cc()
434 LL_UCPD_WriteReg(config->ucpd_port, CR, cr); in ucpd_set_cc()
437 update_stm32g0x_cc_line(config->ucpd_port); in ucpd_set_cc()
448 * @retval -EIO on failure
449 * @retval -ENOTSUP if polarity is not supported
454 const struct tcpc_config *const config = dev->config; in ucpd_cc_set_polarity()
457 cr = LL_UCPD_ReadReg(config->ucpd_port, CR); in ucpd_cc_set_polarity()
471 return -ENOTSUP; in ucpd_cc_set_polarity()
475 LL_UCPD_WriteReg(config->ucpd_port, CR, cr); in ucpd_cc_set_polarity()
484 * @retval -EIO on failure
488 const struct tcpc_config *const config = dev->config; in ucpd_set_rx_enable()
492 imr = LL_UCPD_ReadReg(config->ucpd_port, IMR); in ucpd_set_rx_enable()
493 cr = LL_UCPD_ReadReg(config->ucpd_port, CR); in ucpd_set_rx_enable()
501 LL_UCPD_WriteReg(config->ucpd_port, ICR, UCPD_ICR_RX_INT_MASK); in ucpd_set_rx_enable()
504 LL_UCPD_WriteReg(config->ucpd_port, IMR, imr); in ucpd_set_rx_enable()
505 LL_UCPD_WriteReg(config->ucpd_port, CR, cr); in ucpd_set_rx_enable()
509 LL_UCPD_WriteReg(config->ucpd_port, CR, cr); in ucpd_set_rx_enable()
510 LL_UCPD_WriteReg(config->ucpd_port, IMR, imr); in ucpd_set_rx_enable()
520 * @retval -EIO on failure
526 struct tcpc_data *data = dev->data; in ucpd_set_roles()
528 data->msg_header.pr = power_role; in ucpd_set_roles()
529 data->msg_header.dr = data_role; in ucpd_set_roles()
538 * @retval -EIO on failure
542 struct tcpc_data *data = dev->data; in ucpd_sop_prime_enable()
545 data->ucpd_rx_sop_prime_enabled = enable; in ucpd_sop_prime_enable()
556 struct tcpc_data *data = dev->data; in ucpd_start_transmit()
557 const struct tcpc_config *const config = dev->config; in ucpd_start_transmit()
562 cr = LL_UCPD_ReadReg(config->ucpd_port, CR); in ucpd_start_transmit()
565 data->ucpd_tx_active_buffer = &data->ucpd_tx_buffers[msg_type]; in ucpd_start_transmit()
566 type = data->ucpd_tx_active_buffer->type; in ucpd_start_transmit()
575 * respect to an on-going Tx message, which (if still in in ucpd_start_transmit()
577 * sequence and directly appending an EOP K-code sequence. No in ucpd_start_transmit()
587 LL_UCPD_WriteReg(config->ucpd_port, ICR, in ucpd_start_transmit()
590 imr = LL_UCPD_ReadReg(config->ucpd_port, IMR); in ucpd_start_transmit()
592 LL_UCPD_WriteReg(config->ucpd_port, IMR, imr); in ucpd_start_transmit()
596 LL_UCPD_WriteReg(config->ucpd_port, CR, cr); in ucpd_start_transmit()
605 * Normal -> 0 in ucpd_start_transmit()
606 * Cable Reset -> 1 in ucpd_start_transmit()
607 * Bist -> 2 in ucpd_start_transmit()
627 msg_len = data->ucpd_tx_active_buffer->msg_len; in ucpd_start_transmit()
630 LL_UCPD_WriteTxPaySize(config->ucpd_port, msg_len); in ucpd_start_transmit()
635 LL_UCPD_WriteReg(config->ucpd_port, CR, cr); in ucpd_start_transmit()
639 LL_UCPD_WriteTxOrderSet(config->ucpd_port, in ucpd_start_transmit()
644 data->ucpd_tx_active_buffer->msg_index = 0; in ucpd_start_transmit()
650 LL_UCPD_SendMessage(config->ucpd_port); in ucpd_start_transmit()
659 struct tcpc_data *data = dev->data; in ucpd_set_tx_state()
661 data->ucpd_tx_state = state; in ucpd_set_tx_state()
669 if (info->handler) { in ucpd_notify_handler()
670 info->handler(info->dev, info->data, alert); in ucpd_notify_handler()
679 struct tcpc_data *data = info->dev->data; in ucpd_manage_tx()
683 if (atomic_test_and_clear_bit(&info->evt, UCPD_EVT_HR_REQ)) { in ucpd_manage_tx()
691 ucpd_set_tx_state(info->dev, STATE_HARD_RESET); in ucpd_manage_tx()
693 data->ucpd_tx_request &= ~BIT(msg_src); in ucpd_manage_tx()
696 switch (data->ucpd_tx_state) { in ucpd_manage_tx()
698 if (data->ucpd_tx_request & MSG_GOOD_CRC_MASK) { in ucpd_manage_tx()
699 ucpd_set_tx_state(info->dev, STATE_ACTIVE_CRC); in ucpd_manage_tx()
701 } else if (data->ucpd_tx_request & MSG_TCPM_MASK) { in ucpd_manage_tx()
702 if (atomic_test_and_clear_bit(&info->evt, UCPD_EVT_RX_MSG)) { in ucpd_manage_tx()
704 * USB-PD Specification rev 3.0, section 6.10 in ucpd_manage_tx()
713 data->ucpd_tx_request &= ~MSG_TCPM_MASK; in ucpd_manage_tx()
714 } else if (!data->ucpd_rx_msg_active) { in ucpd_manage_tx()
715 ucpd_set_tx_state(info->dev, STATE_ACTIVE_TCPM); in ucpd_manage_tx()
719 data->ucpd_tx_buffers[TX_MSG_TCPM].data.header; in ucpd_manage_tx()
720 data->msg_id_match = hdr.message_id; in ucpd_manage_tx()
721 data->tx_retry_max = hdr.specification_revision == PD_REV30 ? in ucpd_manage_tx()
728 if (data->ucpd_tx_state != STATE_IDLE) { in ucpd_manage_tx()
729 data->ucpd_tx_request &= ~BIT(msg_src); in ucpd_manage_tx()
730 data->tx_retry_count = 0; in ucpd_manage_tx()
743 if (atomic_test_and_clear_bit(&info->evt, UCPD_EVT_TX_MSG_SUCCESS)) { in ucpd_manage_tx()
744 ucpd_set_tx_state(info->dev, STATE_WAIT_CRC_ACK); in ucpd_manage_tx()
746 k_timer_start(&data->goodcrc_rx_timer, K_USEC(1000), K_NO_WAIT); in ucpd_manage_tx()
747 } else if (atomic_test_and_clear_bit(&info->evt, UCPD_EVT_TX_MSG_DISC) || in ucpd_manage_tx()
748 atomic_test_and_clear_bit(&info->evt, UCPD_EVT_TX_MSG_FAIL)) { in ucpd_manage_tx()
749 if (data->tx_retry_count < data->tx_retry_max) { in ucpd_manage_tx()
750 if (atomic_test_and_clear_bit(&info->evt, UCPD_EVT_RX_MSG)) { in ucpd_manage_tx()
759 ucpd_set_tx_state(info->dev, in ucpd_manage_tx()
763 ucpd_set_tx_state(info->dev, in ucpd_manage_tx()
771 data->tx_retry_count++; in ucpd_manage_tx()
776 status = (atomic_test_and_clear_bit(&info->evt, in ucpd_manage_tx()
780 ucpd_set_tx_state(info->dev, STATE_IDLE); in ucpd_manage_tx()
787 if (atomic_test_bit(&info->evt, UCPD_EVT_TX_MSG_SUCCESS) || in ucpd_manage_tx()
788 atomic_test_bit(&info->evt, UCPD_EVT_TX_MSG_FAIL) || in ucpd_manage_tx()
789 atomic_test_bit(&info->evt, UCPD_EVT_TX_MSG_DISC)) { in ucpd_manage_tx()
790 atomic_clear_bit(&info->evt, UCPD_EVT_TX_MSG_SUCCESS); in ucpd_manage_tx()
791 atomic_clear_bit(&info->evt, UCPD_EVT_TX_MSG_FAIL); in ucpd_manage_tx()
792 atomic_clear_bit(&info->evt, UCPD_EVT_TX_MSG_DISC); in ucpd_manage_tx()
793 ucpd_set_tx_state(info->dev, STATE_IDLE); in ucpd_manage_tx()
794 if (atomic_test_and_clear_bit(&info->evt, UCPD_EVT_TX_MSG_FAIL)) { in ucpd_manage_tx()
796 } else if (atomic_test_and_clear_bit(&info->evt, UCPD_EVT_TX_MSG_DISC)) { in ucpd_manage_tx()
803 if (atomic_test_and_clear_bit(&info->evt, UCPD_EVT_RX_GOOD_CRC) && in ucpd_manage_tx()
804 data->ucpd_crc_id == data->msg_id_match) { in ucpd_manage_tx()
807 ucpd_set_tx_state(info->dev, STATE_IDLE); in ucpd_manage_tx()
808 } else if (k_timer_status_get(&data->goodcrc_rx_timer)) { in ucpd_manage_tx()
810 k_timer_stop(&data->goodcrc_rx_timer); in ucpd_manage_tx()
813 if (data->tx_retry_count < data->tx_retry_max) { in ucpd_manage_tx()
814 ucpd_set_tx_state(info->dev, STATE_ACTIVE_TCPM); in ucpd_manage_tx()
816 data->tx_retry_count++; in ucpd_manage_tx()
818 ucpd_set_tx_state(info->dev, STATE_IDLE); in ucpd_manage_tx()
821 } else if (atomic_test_and_clear_bit(&info->evt, UCPD_EVT_RX_MSG)) { in ucpd_manage_tx()
830 ucpd_set_tx_state(info->dev, STATE_IDLE); in ucpd_manage_tx()
835 if (atomic_test_bit(&info->evt, UCPD_EVT_HR_DONE) || in ucpd_manage_tx()
836 atomic_test_bit(&info->evt, UCPD_EVT_HR_FAIL)) { in ucpd_manage_tx()
837 atomic_clear_bit(&info->evt, UCPD_EVT_HR_DONE); in ucpd_manage_tx()
838 atomic_clear_bit(&info->evt, UCPD_EVT_HR_FAIL); in ucpd_manage_tx()
840 ucpd_set_tx_state(info->dev, STATE_IDLE); in ucpd_manage_tx()
841 data->ucpd_tx_request = 0; in ucpd_manage_tx()
842 data->tx_retry_count = 0; in ucpd_manage_tx()
854 ucpd_start_transmit(info->dev, msg_src); in ucpd_manage_tx()
864 struct tcpc_data *data = info->dev->data; in ucpd_alert_handler()
866 if (atomic_test_and_clear_bit(&info->evt, UCPD_EVT_EVENT_CC)) { in ucpd_alert_handler()
870 if (atomic_test_and_clear_bit(&info->evt, UCPD_EVT_HARD_RESET_RECEIVED)) { in ucpd_alert_handler()
874 if (atomic_test_and_clear_bit(&info->evt, UCPD_EVT_RX_MSG)) { in ucpd_alert_handler()
879 * USB-PD messages are initiated in TCPM stack (PRL in ucpd_alert_handler()
881 * UCPD driver based on USB-PD rx messages. These 2 types of in ucpd_alert_handler()
885 * they must be sent immediately following a successful USB-PD in ucpd_alert_handler()
897 if (atomic_test_and_clear_bit(&info->evt, UCPD_EVT_GOOD_CRC_REQ)) { in ucpd_alert_handler()
898 data->ucpd_tx_request |= MSG_GOOD_CRC_MASK; in ucpd_alert_handler()
901 if (atomic_test_and_clear_bit(&info->evt, UCPD_EVT_TCPM_MSG_REQ)) { in ucpd_alert_handler()
902 data->ucpd_tx_request |= MSG_TCPM_MASK; in ucpd_alert_handler()
914 } while (data->ucpd_tx_state != STATE_IDLE); in ucpd_alert_handler()
923 struct tcpc_data *data = dev->data; in ucpd_send_good_crc()
924 const struct tcpc_config *const config = dev->config; in ucpd_send_good_crc()
927 struct alert_info *info = &data->alert_info; in ucpd_send_good_crc()
940 * Get the rx ordered set code just detected. SOP -> SOP''_Debug are in in ucpd_send_good_crc()
944 tx_type = LL_UCPD_ReadRxOrderSet(config->ucpd_port); in ucpd_send_good_crc()
948 * Extended b15 -> set to 0 for control messages in ucpd_send_good_crc()
949 * Count b14:12 -> number of 32 bit data objects = 0 for ctrl msg in ucpd_send_good_crc()
950 * MsgID b11:9 -> running byte counter (extracted from rx msg) in ucpd_send_good_crc()
951 * Power Role b8 -> stored in static, from set_msg_header() in ucpd_send_good_crc()
952 * Spec Rev b7:b6 -> PD spec revision (extracted from rx msg) in ucpd_send_good_crc()
953 * Data Role b5 -> stored in static, from set_msg_header in ucpd_send_good_crc()
954 * Msg Type b4:b0 -> data or ctrl type = PD_CTRL_GOOD_CRC in ucpd_send_good_crc()
959 tx_header.port_power_role = data->msg_header.pr; in ucpd_send_good_crc()
960 tx_header.port_data_role = data->msg_header.dr; in ucpd_send_good_crc()
971 data->ucpd_tx_buffers[TX_MSG_GOOD_CRC].msg_len = MSG_HEADER_SIZE; in ucpd_send_good_crc()
972 data->ucpd_tx_buffers[TX_MSG_GOOD_CRC].data.header = in ucpd_send_good_crc()
974 data->ucpd_tx_buffers[TX_MSG_GOOD_CRC].type = tx_type; in ucpd_send_good_crc()
977 atomic_set_bit(&info->evt, UCPD_EVT_GOOD_CRC_REQ); in ucpd_send_good_crc()
987 * @retval -EFAULT on failure
992 struct tcpc_data *data = dev->data; in ucpd_transmit_data()
995 int len = PD_CONVERT_PD_HEADER_COUNT_TO_BYTES(msg->header.number_of_data_objects) + 2; in ucpd_transmit_data()
998 return -EFAULT; in ucpd_transmit_data()
1002 data->ucpd_tx_buffers[TX_MSG_TCPM].msg_len = len; in ucpd_transmit_data()
1003 data->ucpd_tx_buffers[TX_MSG_TCPM].type = msg->type; in ucpd_transmit_data()
1004 data->ucpd_tx_buffers[TX_MSG_TCPM].data.header = msg->header.raw_value; in ucpd_transmit_data()
1007 memcpy(data->ucpd_tx_buffers[TX_MSG_TCPM].data.msg + 2, in ucpd_transmit_data()
1008 (uint8_t *)msg->data, len - 2); in ucpd_transmit_data()
1015 if (msg->type == PD_PACKET_TX_HARD_RESET) { in ucpd_transmit_data()
1016 atomic_set_bit(&data->alert_info.evt, UCPD_EVT_HR_REQ); in ucpd_transmit_data()
1018 atomic_set_bit(&data->alert_info.evt, UCPD_EVT_TCPM_MSG_REQ); in ucpd_transmit_data()
1022 k_work_submit(&data->alert_info.work); in ucpd_transmit_data()
1032 * @retval -ENODATA if there is no pending message
1036 struct tcpc_data *data = dev->data; in ucpd_get_rx_pending_msg()
1040 if (*(uint32_t *)data->ucpd_rx_buffer == 0) { in ucpd_get_rx_pending_msg()
1041 return -ENODATA; in ucpd_get_rx_pending_msg()
1048 msg->type = *(uint16_t *)data->ucpd_rx_buffer; in ucpd_get_rx_pending_msg()
1049 msg->header.raw_value = *((uint16_t *)data->ucpd_rx_buffer + 1); in ucpd_get_rx_pending_msg()
1050 msg->len = PD_CONVERT_PD_HEADER_COUNT_TO_BYTES(msg->header.number_of_data_objects); in ucpd_get_rx_pending_msg()
1051 memcpy(msg->data, (data->ucpd_rx_buffer + in ucpd_get_rx_pending_msg()
1053 MSG_HEADER_SIZE), msg->len); in ucpd_get_rx_pending_msg()
1054 ret = msg->len + MSG_HEADER_SIZE; in ucpd_get_rx_pending_msg()
1057 *(uint32_t *)data->ucpd_rx_buffer = 0; in ucpd_get_rx_pending_msg()
1066 * return -EIO on failure
1071 struct tcpc_data *data = dev->data; in ucpd_set_bist_test_mode()
1073 data->ucpd_rx_bist_mode = enable; in ucpd_set_bist_test_mode()
1112 LL_UCPD_ReadReg(((const struct tcpc_config *)dev_inst[0]->config)->ucpd_port, SR); in ucpd_isr()
1114 LL_UCPD_ReadReg(((const struct tcpc_config *)dev_inst[1]->config)->ucpd_port, SR); in ucpd_isr()
1135 config = dev->config; in ucpd_isr()
1136 data = dev->data; in ucpd_isr()
1137 info = &data->alert_info; in ucpd_isr()
1140 sr = LL_UCPD_ReadReg(config->ucpd_port, SR); in ucpd_isr()
1145 atomic_set_bit(&info->evt, UCPD_EVT_EVENT_CC); in ucpd_isr()
1150 * end of a USB-PD tx message. If any of these bits are set, the in ucpd_isr()
1157 atomic_set_bit(&info->evt, UCPD_EVT_TX_MSG_SUCCESS); in ucpd_isr()
1159 atomic_set_bit(&info->evt, UCPD_EVT_TX_MSG_FAIL); in ucpd_isr()
1161 atomic_set_bit(&info->evt, UCPD_EVT_TX_MSG_DISC); in ucpd_isr()
1163 atomic_set_bit(&info->evt, UCPD_EVT_HR_DONE); in ucpd_isr()
1165 atomic_set_bit(&info->evt, UCPD_EVT_HR_FAIL); in ucpd_isr()
1181 *(uint16_t *)data->ucpd_rx_buffer = in ucpd_isr()
1182 LL_UCPD_ReadRxOrderSet(config->ucpd_port); in ucpd_isr()
1184 data->ucpd_rx_byte_count = 2; in ucpd_isr()
1185 data->ucpd_rx_msg_active = true; in ucpd_isr()
1194 data->ucpd_rx_msg_active = false; in ucpd_isr()
1201 type = *(uint16_t *)data->ucpd_rx_buffer; in ucpd_isr()
1203 *((uint16_t *)data->ucpd_rx_buffer + 1); in ucpd_isr()
1214 if (!good_crc && (data->ucpd_rx_sop_prime_enabled || in ucpd_isr()
1222 if (!data->ucpd_rx_bist_mode) { in ucpd_isr()
1223 atomic_set_bit(&info->evt, UCPD_EVT_RX_MSG); in ucpd_isr()
1228 atomic_set_bit(&info->evt, UCPD_EVT_RX_GOOD_CRC); in ucpd_isr()
1229 data->ucpd_crc_id = rx_header.message_id; in ucpd_isr()
1239 atomic_set_bit(&info->evt, UCPD_EVT_HARD_RESET_RECEIVED); in ucpd_isr()
1243 LL_UCPD_WriteReg(config->ucpd_port, ICR, sr & UCPD_ICR_ALL_INT_MASK); in ucpd_isr()
1246 k_work_submit(&info->work); in ucpd_isr()
1253 * @retval -EIO on failure
1257 const struct tcpc_config *const config = dev->config; in ucpd_dump_std_reg()
1259 LOG_INF("CFGR1: %08x", LL_UCPD_ReadReg(config->ucpd_port, CFG1)); in ucpd_dump_std_reg()
1260 LOG_INF("CFGR2: %08x", LL_UCPD_ReadReg(config->ucpd_port, CFG2)); in ucpd_dump_std_reg()
1261 LOG_INF("CR: %08x", LL_UCPD_ReadReg(config->ucpd_port, CR)); in ucpd_dump_std_reg()
1262 LOG_INF("IMR: %08x", LL_UCPD_ReadReg(config->ucpd_port, IMR)); in ucpd_dump_std_reg()
1263 LOG_INF("SR: %08x", LL_UCPD_ReadReg(config->ucpd_port, SR)); in ucpd_dump_std_reg()
1264 LOG_INF("ICR: %08x\n", LL_UCPD_ReadReg(config->ucpd_port, ICR)); in ucpd_dump_std_reg()
1274 * @retval -EINVAL on failure
1279 struct tcpc_data *data = dev->data; in ucpd_set_alert_handler_cb()
1281 data->alert_info.handler = handler; in ucpd_set_alert_handler_cb()
1282 data->alert_info.data = alert_data; in ucpd_set_alert_handler_cb()
1296 struct tcpc_data *data = dev->data; in ucpd_set_vconn_cb()
1298 data->vconn_cb = vconn_cb; in ucpd_set_vconn_cb()
1310 struct tcpc_data *data = dev->data; in ucpd_set_vconn_discharge_cb()
1312 data->vconn_discharge_cb = cb; in ucpd_set_vconn_discharge_cb()
1320 const struct tcpc_config *const config = dev->config; in ucpd_isr_init()
1321 struct tcpc_data *data = dev->data; in ucpd_isr_init()
1322 struct alert_info *info = &data->alert_info; in ucpd_isr_init()
1325 k_timer_init(&data->goodcrc_rx_timer, NULL, NULL); in ucpd_isr_init()
1328 LL_UCPD_WriteReg(config->ucpd_port, IMR, 0); in ucpd_isr_init()
1334 info->dev = dev; in ucpd_isr_init()
1337 k_work_init(&info->work, ucpd_alert_handler); in ucpd_isr_init()
1340 LL_UCPD_WriteReg(config->ucpd_port, IMR, in ucpd_isr_init()
1342 LL_UCPD_WriteReg(config->ucpd_port, ICR, in ucpd_isr_init()
1346 data->ucpd_rx_sop_prime_enabled = false; in ucpd_isr_init()
1358 * @retval -EIO on failure
1362 const struct tcpc_config *const config = dev->config; in ucpd_init()
1363 struct tcpc_data *data = dev->data; in ucpd_init()
1368 ret = pinctrl_apply_state(config->ucpd_pcfg, PINCTRL_STATE_DEFAULT); in ucpd_init()
1382 ret = LL_UCPD_Init(config->ucpd_port, in ucpd_init()
1383 (LL_UCPD_InitTypeDef *)&config->ucpd_params); in ucpd_init()
1387 data->rp = TC_RP_USB; in ucpd_init()
1393 cfg1 = LL_UCPD_ReadReg(config->ucpd_port, CFG1); in ucpd_init()
1396 LL_UCPD_WriteReg(config->ucpd_port, CFG1, cfg1); in ucpd_init()
1399 LL_UCPD_Enable(config->ucpd_port); in ucpd_init()
1401 /* Enable Dead Battery Support */ in ucpd_init()
1402 if (config->ucpd_dead_battery) { in ucpd_init()
1406 * Some devices have dead battery enabled by default in ucpd_init()
1415 return -EIO; in ucpd_init()
1471 .ucpd_params.transwin = DT_INST_PROP(inst, transwin) - 1, \
1472 .ucpd_params.IfrGap = DT_INST_PROP(inst, ifrgap) - 1, \
1473 .ucpd_params.HbitClockDiv = DT_INST_PROP(inst, hbitclkdiv) - 1, \