Lines Matching +full:transmit +full:- +full:retries

4  * SPDX-License-Identifier: Apache-2.0
122 struct usbc_port_data *data = dev->data; in prl_subsys_init()
123 struct protocol_layer_tx_t *prl_tx = data->prl_tx; in prl_subsys_init()
124 struct protocol_hard_reset_t *prl_hr = data->prl_hr; in prl_subsys_init()
127 prl_tx->dev = dev; in prl_subsys_init()
128 prl_hr->dev = dev; in prl_subsys_init()
143 struct usbc_port_data *data = dev->data; in prl_is_running()
145 return data->prl_sm_state == SM_RUN; in prl_is_running()
154 struct usbc_port_data *data = dev->data; in prl_execute_hard_reset()
155 struct protocol_hard_reset_t *prl_hr = data->prl_hr; in prl_execute_hard_reset()
162 atomic_set_bit(&prl_hr->flags, PRL_FLAGS_PE_HARD_RESET); in prl_execute_hard_reset()
172 struct usbc_port_data *data = dev->data; in prl_hard_reset_complete()
173 struct protocol_hard_reset_t *prl_hr = data->prl_hr; in prl_hard_reset_complete()
175 atomic_set_bit(&prl_hr->flags, PRL_FLAGS_HARD_RESET_COMPLETE); in prl_hard_reset_complete()
179 * @brief Directs the Protocol Layer to construct and transmit a Power Delivery
185 struct usbc_port_data *data = dev->data; in prl_send_ctrl_msg()
186 struct protocol_layer_tx_t *prl_tx = data->prl_tx; in prl_send_ctrl_msg()
189 prl_tx->emsg.type = type; in prl_send_ctrl_msg()
191 prl_tx->msg_type = msg; in prl_send_ctrl_msg()
193 prl_tx->emsg.len = 0; in prl_send_ctrl_msg()
195 atomic_set_bit(&prl_tx->flags, PRL_FLAGS_MSG_XMIT); in prl_send_ctrl_msg()
199 * @brief Directs the Protocol Layer to construct and transmit a Power Delivery
202 * @note: Before calling this function prl_tx->emsg.data and prl_tx->emsg.len
208 struct usbc_port_data *data = dev->data; in prl_send_data_msg()
209 struct protocol_layer_tx_t *prl_tx = data->prl_tx; in prl_send_data_msg()
212 prl_tx->emsg.type = type; in prl_send_data_msg()
214 prl_tx->msg_type = msg; in prl_send_data_msg()
216 atomic_set_bit(&prl_tx->flags, PRL_FLAGS_MSG_XMIT); in prl_send_data_msg()
225 struct usbc_port_data *data = dev->data; in prl_set_default_pd_revision()
232 data->rev[PD_PACKET_SOP] = PD_REV30; in prl_set_default_pd_revision()
233 data->rev[PD_PACKET_SOP_PRIME] = PD_REV30; in prl_set_default_pd_revision()
234 data->rev[PD_PACKET_PRIME_PRIME] = PD_REV30; in prl_set_default_pd_revision()
235 data->rev[PD_PACKET_DEBUG_PRIME] = PD_REV30; in prl_set_default_pd_revision()
236 data->rev[PD_PACKET_DEBUG_PRIME_PRIME] = PD_REV30; in prl_set_default_pd_revision()
244 struct usbc_port_data *data = dev->data; in prl_start()
246 data->prl_enabled = true; in prl_start()
254 struct usbc_port_data *data = dev->data; in prl_suspend()
256 data->prl_enabled = false; in prl_suspend()
271 struct usbc_port_data *data = dev->data; in prl_reset()
273 if (data->prl_enabled) { in prl_reset()
274 data->prl_sm_state = SM_INIT; in prl_reset()
283 struct usbc_port_data *data = dev->data; in prl_first_msg_notificaiton()
284 struct protocol_layer_tx_t *prl_tx = data->prl_tx; in prl_first_msg_notificaiton()
286 atomic_set_bit(&prl_tx->flags, PRL_FLAGS_FIRST_MSG_PENDING); in prl_first_msg_notificaiton()
294 struct usbc_port_data *data = dev->data; in prl_run()
295 struct protocol_layer_tx_t *prl_tx = data->prl_tx; in prl_run()
296 struct protocol_hard_reset_t *prl_hr = data->prl_hr; in prl_run()
298 switch (data->prl_sm_state) { in prl_run()
300 if (data->prl_enabled == false) { in prl_run()
306 data->prl_sm_state = SM_RUN; in prl_run()
309 if (data->prl_enabled == false) { in prl_run()
310 data->prl_sm_state = SM_PAUSED; in prl_run()
312 tcpc_set_rx_enable(data->tcpc, false); in prl_run()
344 struct usbc_port_data *data = dev->data; in prl_set_rev()
346 data->rev[type] = rev; in prl_set_rev()
355 struct usbc_port_data *data = dev->data; in prl_get_rev()
357 return data->rev[type]; in prl_get_rev()
368 struct usbc_port_data *data = dev->data; in alert_handler()
369 struct protocol_layer_tx_t *prl_tx = data->prl_tx; in alert_handler()
370 struct protocol_hard_reset_t *prl_hr = data->prl_hr; in alert_handler()
374 atomic_set_bit(&prl_hr->flags, PRL_FLAGS_PORT_PARTNER_HARD_RESET); in alert_handler()
377 atomic_set_bit(&prl_tx->flags, PRL_FLAGS_TX_ERROR); in alert_handler()
380 atomic_set_bit(&prl_tx->flags, PRL_FLAGS_TX_DISCARDED); in alert_handler()
383 atomic_set_bit(&prl_tx->flags, PRL_FLAGS_TX_COMPLETE); in alert_handler()
391 k_wakeup(data->port_thread); in alert_handler()
399 struct usbc_port_data *data = dev->data; in prl_tx_set_state()
400 struct protocol_layer_tx_t *prl_tx = data->prl_tx; in prl_tx_set_state()
411 struct usbc_port_data *data = dev->data; in prl_hr_set_state()
412 struct protocol_hard_reset_t *prl_hr = data->prl_hr; in prl_hr_set_state()
423 struct usbc_port_data *data = dev->data; in prl_hr_get_state()
424 struct protocol_hard_reset_t *prl_hr = data->prl_hr; in prl_hr_get_state()
426 return prl_hr->ctx.current - &prl_hr_states[0]; in prl_hr_get_state()
434 struct usbc_port_data *data = dev->data; in increment_msgid_counter()
435 struct protocol_layer_tx_t *prl_tx = data->prl_tx; in increment_msgid_counter()
438 if (prl_tx->last_xmit_type >= NUM_SOP_STAR_TYPES) { in increment_msgid_counter()
442 prl_tx->msg_id_counter[prl_tx->last_xmit_type] = in increment_msgid_counter()
443 (prl_tx->msg_id_counter[prl_tx->last_xmit_type] + 1) & PD_MESSAGE_ID_COUNT; in increment_msgid_counter()
451 struct usbc_port_data *data = dev->data; in get_sop_star_header()
452 struct protocol_layer_tx_t *prl_tx = data->prl_tx; in get_sop_star_header()
453 const bool is_sop_packet = prl_tx->emsg.type == PD_PACKET_SOP; in get_sop_star_header()
457 header.message_type = prl_tx->msg_type; in get_sop_star_header()
459 header.specification_revision = data->rev[prl_tx->emsg.type]; in get_sop_star_header()
461 header.message_id = prl_tx->msg_id_counter[prl_tx->emsg.type]; in get_sop_star_header()
462 header.number_of_data_objects = PD_CONVERT_BYTES_TO_PD_HEADER_COUNT(prl_tx->emsg.len); in get_sop_star_header()
469 * @brief Construct and transmit a message
473 struct usbc_port_data *data = dev->data; in prl_tx_construct_message()
474 struct protocol_layer_tx_t *prl_tx = data->prl_tx; in prl_tx_construct_message()
475 const struct device *tcpc = data->tcpc; in prl_tx_construct_message()
478 prl_tx->emsg.header.raw_value = in prl_tx_construct_message()
479 prl_tx->emsg.type < NUM_SOP_STAR_TYPES ? get_sop_star_header(dev) : 0; in prl_tx_construct_message()
482 prl_tx->last_xmit_type = prl_tx->emsg.type; in prl_tx_construct_message()
489 atomic_clear_bit(&prl_tx->flags, PRL_FLAGS_TX_COMPLETE); in prl_tx_construct_message()
492 atomic_clear_bit(&prl_tx->flags, PRL_FLAGS_MSG_XMIT); in prl_tx_construct_message()
495 * Pass message to PHY Layer. It handles retries in hardware as in prl_tx_construct_message()
498 tcpc_transmit_data(tcpc, &prl_tx->emsg); in prl_tx_construct_message()
502 * @brief Transmit a Hard Reset Message
506 struct usbc_port_data *data = dev->data; in prl_hr_send_msg_to_phy()
507 struct protocol_layer_tx_t *prl_tx = data->prl_tx; in prl_hr_send_msg_to_phy()
508 const struct device *tcpc = data->tcpc; in prl_hr_send_msg_to_phy()
511 prl_tx->emsg.header.raw_value = 0; in prl_hr_send_msg_to_phy()
512 prl_tx->emsg.type = PD_PACKET_TX_HARD_RESET; in prl_hr_send_msg_to_phy()
519 data->prl_tx->flags = ATOMIC_INIT(0); in prl_hr_send_msg_to_phy()
522 tcpc_transmit_data(tcpc, &prl_tx->emsg); in prl_hr_send_msg_to_phy()
530 struct usbc_port_data *data = dev->data; in prl_init()
531 struct protocol_layer_rx_t *prl_rx = data->prl_rx; in prl_init()
532 struct protocol_layer_tx_t *prl_tx = data->prl_tx; in prl_init()
533 struct protocol_hard_reset_t *prl_hr = data->prl_hr; in prl_init()
545 tcpc_set_alert_handler_cb(data->tcpc, alert_handler, (void *)dev); in prl_init()
548 prl_hr->flags = ATOMIC_INIT(0); in prl_init()
549 usbc_timer_init(&prl_hr->pd_t_hard_reset_complete, PD_T_HARD_RESET_COMPLETE_MAX_MS); in prl_init()
553 prl_tx->flags = ATOMIC_INIT(0); in prl_init()
554 prl_tx->last_xmit_type = PD_PACKET_SOP; in prl_init()
556 prl_tx->msg_id_counter[i] = 0; in prl_init()
558 usbc_timer_init(&prl_tx->pd_t_tx_timeout, PD_T_TX_TIMEOUT_MS); in prl_init()
559 usbc_timer_init(&prl_tx->pd_t_sink_tx, PD_T_SINK_TX_MAX_MS); in prl_init()
563 prl_rx->flags = ATOMIC_INIT(0); in prl_init()
565 prl_rx->msg_id[i] = -1; in prl_init()
575 const struct device *dev = prl_tx->dev; in prl_tx_phy_layer_reset_entry()
576 struct usbc_port_data *data = dev->data; in prl_tx_phy_layer_reset_entry()
577 const struct device *tcpc = data->tcpc; in prl_tx_phy_layer_reset_entry()
598 prl_tx->flags = ATOMIC_INIT(0); in prl_tx_wait_for_message_request_entry()
607 const struct device *dev = prl_tx->dev; in prl_tx_wait_for_message_request_run()
608 struct usbc_port_data *data = dev->data; in prl_tx_wait_for_message_request_run()
614 if (atomic_test_and_clear_bit(&prl_tx->flags, PRL_FLAGS_SINK_NG)) { in prl_tx_wait_for_message_request_run()
618 atomic_clear_bit(&prl_tx->flags, PRL_FLAGS_WAIT_SINK_OK); in prl_tx_wait_for_message_request_run()
625 if (data->rev[PD_PACKET_SOP] == PD_REV30 && pe_dpm_initiated_ams(dev)) { in prl_tx_wait_for_message_request_run()
626 if (atomic_test_bit(&prl_tx->flags, PRL_FLAGS_WAIT_SINK_OK) || in prl_tx_wait_for_message_request_run()
627 atomic_test_bit(&prl_tx->flags, PRL_FLAGS_SINK_NG)) { in prl_tx_wait_for_message_request_run()
630 * multi-message AMS to continue. in prl_tx_wait_for_message_request_run()
641 atomic_set_bit(&prl_tx->flags, PRL_FLAGS_SINK_NG); in prl_tx_wait_for_message_request_run()
644 atomic_set_bit(&prl_tx->flags, PRL_FLAGS_WAIT_SINK_OK); in prl_tx_wait_for_message_request_run()
652 if (atomic_test_and_clear_bit(&prl_tx->flags, PRL_FLAGS_MSG_XMIT)) { in prl_tx_wait_for_message_request_run()
656 if ((prl_tx->msg_type == PD_CTRL_SOFT_RESET) && (prl_tx->emsg.len == 0)) { in prl_tx_wait_for_message_request_run()
675 const struct device *dev = prl_tx->dev; in prl_tx_layer_reset_for_transmit_entry()
676 struct usbc_port_data *data = dev->data; in prl_tx_layer_reset_for_transmit_entry()
677 struct protocol_layer_rx_t *prl_rx = data->prl_rx; in prl_tx_layer_reset_for_transmit_entry()
681 if (prl_tx->emsg.type < NUM_SOP_STAR_TYPES) { in prl_tx_layer_reset_for_transmit_entry()
690 prl_tx->msg_id_counter[prl_tx->emsg.type] = 0; in prl_tx_layer_reset_for_transmit_entry()
697 prl_rx->msg_id[prl_tx->emsg.type] = -1; in prl_tx_layer_reset_for_transmit_entry()
713 usbc_timer_start(&prl_tx->pd_t_tx_timeout); in prl_tx_wait_for_phy_response_entry()
722 const struct device *dev = prl_tx->dev; in prl_tx_wait_for_phy_response_run()
725 if (atomic_test_and_clear_bit(&prl_tx->flags, PRL_FLAGS_TX_DISCARDED)) { in prl_tx_wait_for_phy_response_run()
732 if (atomic_test_bit(&prl_tx->flags, PRL_FLAGS_TX_COMPLETE)) { in prl_tx_wait_for_phy_response_run()
742 } else if (usbc_timer_expired(&prl_tx->pd_t_tx_timeout) || in prl_tx_wait_for_phy_response_run()
743 atomic_test_bit(&prl_tx->flags, PRL_FLAGS_TX_ERROR)) { in prl_tx_wait_for_phy_response_run()
749 pe_report_error(dev, ERR_XMIT, prl_tx->last_xmit_type); in prl_tx_wait_for_phy_response_run()
761 const struct device *dev = prl_tx->dev; in prl_tx_wait_for_phy_response_exit()
763 usbc_timer_stop(&prl_tx->pd_t_tx_timeout); in prl_tx_wait_for_phy_response_exit()
776 const struct device *dev = prl_tx->dev; in prl_tx_src_source_tx_entry()
787 const struct device *dev = prl_tx->dev; in prl_tx_src_source_tx_run()
789 if (atomic_test_bit(&prl_tx->flags, PRL_FLAGS_MSG_XMIT)) { in prl_tx_src_source_tx_run()
813 const struct device *dev = prl_tx->dev; in prl_tx_snk_start_ams_run()
815 if (atomic_test_bit(&prl_tx->flags, PRL_FLAGS_MSG_XMIT)) { in prl_tx_snk_start_ams_run()
835 usbc_timer_start(&prl_tx->pd_t_sink_tx); in prl_tx_src_pending_entry()
844 const struct device *dev = prl_tx->dev; in prl_tx_src_pending_run()
846 if (usbc_timer_expired(&prl_tx->pd_t_sink_tx)) { in prl_tx_src_pending_run()
851 atomic_clear_bit(&prl_tx->flags, PRL_FLAGS_MSG_XMIT); in prl_tx_src_pending_run()
854 if ((prl_tx->msg_type == PD_CTRL_SOFT_RESET) && (prl_tx->emsg.len == 0)) { in prl_tx_src_pending_run()
860 if (atomic_test_bit(&prl_tx->flags, PRL_FLAGS_FIRST_MSG_PENDING)) { in prl_tx_src_pending_run()
861 atomic_clear_bit(&prl_tx->flags, PRL_FLAGS_FIRST_MSG_PENDING); in prl_tx_src_pending_run()
879 usbc_timer_stop(&prl_tx->pd_t_sink_tx); in prl_tx_src_pending_exit()
898 const struct device *dev = prl_tx->dev; in prl_tx_snk_pending_run()
899 struct usbc_port_data *data = dev->data; in prl_tx_snk_pending_run()
900 const struct device *tcpc = data->tcpc; in prl_tx_snk_pending_run()
905 * Wait unit the SRC applies SINK_TX_OK so we can transmit. in prl_tx_snk_pending_run()
913 atomic_clear_bit(&prl_tx->flags, PRL_FLAGS_MSG_XMIT); in prl_tx_snk_pending_run()
920 if ((prl_tx->msg_type == PD_CTRL_SOFT_RESET) && (prl_tx->emsg.len == 0)) { in prl_tx_snk_pending_run()
924 if (atomic_test_bit(&prl_tx->flags, PRL_FLAGS_FIRST_MSG_PENDING)) { in prl_tx_snk_pending_run()
925 atomic_clear_bit(&prl_tx->flags, PRL_FLAGS_FIRST_MSG_PENDING); in prl_tx_snk_pending_run()
963 * Figure 6-66. The PRL_HR state machine waits here until a
974 prl_hr->flags = ATOMIC_INIT(0); in prl_hr_wait_for_request_entry()
983 const struct device *dev = prl_hr->dev; in prl_hr_wait_for_request_run()
992 if (atomic_test_bit(&prl_hr->flags, PRL_FLAGS_PE_HARD_RESET) || in prl_hr_wait_for_request_run()
993 atomic_test_bit(&prl_hr->flags, PRL_FLAGS_PORT_PARTNER_HARD_RESET)) { in prl_hr_wait_for_request_run()
1005 const struct device *dev = prl_hr->dev; in prl_hr_reset_layer_entry()
1006 struct usbc_port_data *data = dev->data; in prl_hr_reset_layer_entry()
1007 struct protocol_layer_rx_t *prl_rx = data->prl_rx; in prl_hr_reset_layer_entry()
1008 struct protocol_layer_tx_t *prl_tx = data->prl_tx; in prl_hr_reset_layer_entry()
1009 const struct device *tcpc = data->tcpc; in prl_hr_reset_layer_entry()
1015 prl_rx->flags = ATOMIC_INIT(0); in prl_hr_reset_layer_entry()
1017 prl_tx->flags = ATOMIC_INIT(0); in prl_hr_reset_layer_entry()
1021 prl_rx->msg_id[i] = -1; in prl_hr_reset_layer_entry()
1022 prl_tx->msg_id_counter[i] = 0; in prl_hr_reset_layer_entry()
1030 * After a physical or logical (USB Type-C Error Recovery) Attach, a in prl_hr_reset_layer_entry()
1058 if (atomic_test_bit(&prl_hr->flags, PRL_FLAGS_PE_HARD_RESET)) { in prl_hr_reset_layer_entry()
1090 usbc_timer_start(&prl_hr->pd_t_hard_reset_complete); in prl_hr_wait_for_phy_hard_reset_complete_entry()
1099 const struct device *dev = prl_hr->dev; in prl_hr_wait_for_phy_hard_reset_complete_run()
1100 struct usbc_port_data *data = dev->data; in prl_hr_wait_for_phy_hard_reset_complete_run()
1101 struct protocol_layer_tx_t *prl_tx = data->prl_tx; in prl_hr_wait_for_phy_hard_reset_complete_run()
1106 if (atomic_test_bit(&prl_tx->flags, PRL_FLAGS_TX_COMPLETE) || in prl_hr_wait_for_phy_hard_reset_complete_run()
1107 usbc_timer_expired(&prl_hr->pd_t_hard_reset_complete)) { in prl_hr_wait_for_phy_hard_reset_complete_run()
1123 usbc_timer_stop(&prl_hr->pd_t_hard_reset_complete); in prl_hr_wait_for_phy_hard_reset_complete_exit()
1140 const struct device *dev = prl_hr->dev; in prl_hr_wait_for_pe_hard_reset_complete_run()
1143 if (atomic_test_bit(&prl_hr->flags, PRL_FLAGS_HARD_RESET_COMPLETE)) { in prl_hr_wait_for_pe_hard_reset_complete_run()
1160 * State Machine. See Figure 6-55 Protocol layer Message reception
1167 struct usbc_port_data *data = dev->data; in prl_rx_wait_for_phy_message()
1168 struct protocol_layer_rx_t *prl_rx = data->prl_rx; in prl_rx_wait_for_phy_message()
1169 struct protocol_layer_tx_t *prl_tx = data->prl_tx; in prl_rx_wait_for_phy_message()
1170 struct pd_msg *rx_emsg = &prl_rx->emsg; in prl_rx_wait_for_phy_message()
1171 const struct device *tcpc = data->tcpc; in prl_rx_wait_for_phy_message()
1185 num_data_objs = rx_emsg->header.number_of_data_objects; in prl_rx_wait_for_phy_message()
1186 msid = rx_emsg->header.message_id; in prl_rx_wait_for_phy_message()
1187 msg_type = rx_emsg->header.message_type; in prl_rx_wait_for_phy_message()
1188 ext = rx_emsg->header.extended; in prl_rx_wait_for_phy_message()
1189 pkt_type = rx_emsg->type; in prl_rx_wait_for_phy_message()
1190 power_role = rx_emsg->header.port_power_role; in prl_rx_wait_for_phy_message()
1196 LOG_INF("RECV %04x/%d ", rx_emsg->header.raw_value, num_data_objs); in prl_rx_wait_for_phy_message()
1198 LOG_INF("\t[%d]%08x ", p, *((uint32_t *)rx_emsg->data + p)); in prl_rx_wait_for_phy_message()
1212 prl_tx->msg_id_counter[pkt_type] = 0; in prl_rx_wait_for_phy_message()
1215 prl_rx->msg_id[pkt_type] = -1; in prl_rx_wait_for_phy_message()
1233 if (prl_rx->msg_id[pkt_type] == msid) { in prl_rx_wait_for_phy_message()
1242 /* Check if message transmit is pending */ in prl_rx_wait_for_phy_message()
1243 if (atomic_test_bit(&prl_tx->flags, PRL_FLAGS_MSG_XMIT)) { in prl_rx_wait_for_phy_message()
1248 atomic_set_bit(&prl_tx->flags, PRL_FLAGS_TX_DISCARDED); in prl_rx_wait_for_phy_message()
1254 prl_rx->msg_id[pkt_type] = msid; in prl_rx_wait_for_phy_message()
1261 * @brief Protocol Layer Transmit State table