Lines Matching +full:transmit +full:- +full:retries
4 * SPDX-License-Identifier: Apache-2.0
27 struct usbc_port_data *data = dev->data; in pe_set_ready_state()
29 if (data->pe->power_role == TC_ROLE_SOURCE) { in pe_set_ready_state()
43 struct usbc_port_data *data = dev->data; in common_dpm_requests()
44 struct policy_engine *pe = data->pe; in common_dpm_requests()
46 if (pe->dpm_request > REQUEST_TC_END) { in common_dpm_requests()
47 atomic_set_bit(pe->flags, PE_FLAGS_DPM_INITIATED_AMS); in common_dpm_requests()
49 if (pe->dpm_request == REQUEST_PE_DR_SWAP) { in common_dpm_requests()
52 } else if (pe->dpm_request == REQUEST_PE_SOFT_RESET_SEND) { in common_dpm_requests()
66 struct usbc_port_data *data = dev->data; in pe_subsys_init()
67 struct policy_engine *pe = data->pe; in pe_subsys_init()
70 pe->dev = dev; in pe_subsys_init()
81 struct usbc_port_data *data = dev->data; in pe_start()
83 data->pe_enabled = true; in pe_start()
91 struct usbc_port_data *data = dev->data; in pe_suspend()
93 data->pe_enabled = false; in pe_suspend()
107 struct usbc_port_data *data = dev->data; in pe_init()
108 struct policy_engine *pe = data->pe; in pe_init()
111 atomic_clear(pe->flags); in pe_init()
114 usbc_timer_init(&pe->pd_t_sender_response, PD_T_NO_RESPONSE_MAX_MS); in pe_init()
115 usbc_timer_init(&pe->pd_t_chunking_not_supported, PD_T_CHUNKING_NOT_SUPPORTED_NOM_MS); in pe_init()
118 pe->hard_reset_counter = 0; in pe_init()
132 struct usbc_port_data *data = dev->data; in pe_is_running()
134 return data->pe_sm_state == SM_RUN; in pe_is_running()
142 struct usbc_port_data *data = dev->data; in pe_run()
143 struct policy_engine *pe = data->pe; in pe_run()
145 switch (data->pe_sm_state) { in pe_run()
147 if (data->pe_enabled == false) { in pe_run()
153 data->pe_sm_state = SM_RUN; in pe_run()
156 if (data->pe_enabled == false) { in pe_run()
157 data->pe_sm_state = SM_PAUSED; in pe_run()
166 pe->dpm_request = dpm_request; in pe_run()
172 * - Hard Reset request from Device Policy Manager in pe_run()
190 struct usbc_port_data *data = dev->data; in pe_set_data_role()
191 struct policy_engine *pe = data->pe; in pe_set_data_role()
194 pe->data_role = dr; in pe_set_data_role()
197 tcpc_set_roles(data->tcpc, pe->power_role, pe->data_role); in pe_set_data_role()
205 struct usbc_port_data *data = dev->data; in pe_get_data_role()
207 return data->pe->data_role; in pe_get_data_role()
215 struct usbc_port_data *data = dev->data; in pe_get_power_role()
217 return data->pe->power_role; in pe_get_power_role()
245 struct usbc_port_data *data = dev->data; in pe_message_sent()
246 struct policy_engine *pe = data->pe; in pe_message_sent()
248 atomic_set_bit(pe->flags, PE_FLAGS_TX_COMPLETE); in pe_message_sent()
257 * Non-Interruptible AMS or
258 * * A message has not been sent after retries or
268 struct usbc_port_data *data = dev->data; in pe_soft_reset_is_required()
269 struct policy_engine *pe = data->pe; in pe_soft_reset_is_required()
276 if (atomic_test_bit(pe->flags, PE_FLAGS_EXPLICIT_CONTRACT)) { in pe_soft_reset_is_required()
283 if (!atomic_test_bit(pe->flags, PE_FLAGS_FIRST_MSG_SENT)) { in pe_soft_reset_is_required()
291 if (atomic_test_bit(pe->flags, PE_FLAGS_INTERRUPTIBLE_AMS)) { in pe_soft_reset_is_required()
305 struct usbc_port_data *data = dev->data; in pe_report_error()
306 struct policy_engine *pe = data->pe; in pe_report_error()
314 atomic_set_bit(pe->flags, PE_FLAGS_PROTOCOL_ERROR); in pe_report_error()
318 /* Transmit error */ in pe_report_error()
320 atomic_set_bit(pe->flags, PE_FLAGS_MSG_XMIT_ERROR); in pe_report_error()
322 /* All error types besides transmit errors are Protocol Errors. */ in pe_report_error()
341 struct usbc_port_data *data = dev->data; in pe_report_discard()
342 struct policy_engine *pe = data->pe; in pe_report_discard()
349 atomic_set_bit(pe->flags, PE_FLAGS_MSG_DISCARDED); in pe_report_discard()
358 struct usbc_port_data *data = dev->data; in pe_message_received()
359 struct policy_engine *pe = data->pe; in pe_message_received()
361 atomic_set_bit(pe->flags, PE_FLAGS_MSG_RECEIVED); in pe_message_received()
380 struct usbc_port_data *data = dev->data; in pe_hard_reset_sent()
381 struct policy_engine *pe = data->pe; in pe_hard_reset_sent()
383 atomic_clear_bit(pe->flags, PE_FLAGS_HARD_RESET_PENDING); in pe_hard_reset_sent()
391 struct usbc_port_data *data = dev->data; in pe_is_explicit_contract()
392 struct policy_engine *pe = data->pe; in pe_is_explicit_contract()
394 return atomic_test_bit(pe->flags, PE_FLAGS_EXPLICIT_CONTRACT); in pe_is_explicit_contract()
403 struct usbc_port_data *data = dev->data; in pe_dpm_initiated_ams()
404 struct policy_engine *pe = data->pe; in pe_dpm_initiated_ams()
406 return atomic_test_bit(pe->flags, PE_FLAGS_DPM_INITIATED_AMS); in pe_dpm_initiated_ams()
414 struct usbc_port_data *data = dev->data; in pe_dpm_end_ams()
415 struct policy_engine *pe = data->pe; in pe_dpm_end_ams()
417 atomic_clear_bit(pe->flags, PE_FLAGS_DPM_INITIATED_AMS); in pe_dpm_end_ams()
425 struct usbc_port_data *data = dev->data; in pe_first_msg_sent()
426 struct policy_engine *pe = data->pe; in pe_first_msg_sent()
428 atomic_set_bit(pe->flags, PE_FLAGS_FIRST_MSG_SENT); in pe_first_msg_sent()
438 struct usbc_port_data *data = dev->data; in pe_set_state()
441 smf_set_state(SMF_CTX(data->pe), &pe_states[state]); in pe_set_state()
452 struct usbc_port_data *data = dev->data; in pe_get_state()
454 return data->pe->ctx.current - &pe_states[0]; in pe_get_state()
462 struct usbc_port_data *data = dev->data; in pe_get_last_state()
464 return data->pe->ctx.previous - &pe_states[0]; in pe_get_last_state()
472 struct usbc_port_data *data = dev->data; in pe_send_soft_reset()
474 data->pe->soft_reset_sop = type; in pe_send_soft_reset()
484 struct usbc_port_data *data = dev->data; in pe_send_data_msg()
485 struct policy_engine *pe = data->pe; in pe_send_data_msg()
488 atomic_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE); in pe_send_data_msg()
498 struct usbc_port_data *data = dev->data; in pe_send_ctrl_msg()
499 struct policy_engine *pe = data->pe; in pe_send_ctrl_msg()
502 atomic_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE); in pe_send_ctrl_msg()
511 struct usbc_port_data *data = dev->data; in pe_send_request_msg()
512 struct protocol_layer_tx_t *prl_tx = data->prl_tx; in pe_send_request_msg()
513 struct pd_msg *msg = &prl_tx->emsg; in pe_send_request_msg()
516 msg->len = sizeof(rdo); in pe_send_request_msg()
518 memcpy(msg->data, rdo_bytes, msg->len); in pe_send_request_msg()
527 struct usbc_port_data *data = dev->data; in extended_message_not_supported()
528 struct protocol_layer_rx_t *prl_rx = data->prl_rx; in extended_message_not_supported()
529 uint32_t *payload = (uint32_t *)prl_rx->emsg.data; in extended_message_not_supported()
547 struct usbc_port_data *data = dev->data; in received_control_message()
548 struct protocol_layer_rx_t *prl_rx = data->prl_rx; in received_control_message()
550 if (prl_rx->emsg.len == 0 && header.message_type == mt && header.extended == 0) { in received_control_message()
563 struct usbc_port_data *data = dev->data; in received_data_message()
564 struct protocol_layer_rx_t *prl_rx = data->prl_rx; in received_data_message()
566 if (prl_rx->emsg.len > 0 && header.message_type == mt && header.extended == 0) { in received_data_message()
578 struct usbc_port_data *data = dev->data; in policy_check()
580 if (data->policy_cb_check) { in policy_check()
581 return data->policy_cb_check(dev, pc); in policy_check()
592 struct usbc_port_data *data = dev->data; in policy_notify()
594 if (data->policy_cb_notify) { in policy_notify()
595 data->policy_cb_notify(dev, notify); in policy_notify()
604 struct usbc_port_data *data = dev->data; in policy_wait_notify()
606 if (data->policy_cb_wait_notify) { in policy_wait_notify()
607 return data->policy_cb_wait_notify(dev, notify); in policy_wait_notify()
620 struct usbc_port_data *data = dev->data; in policy_get_request_data_object()
623 __ASSERT(data->policy_cb_get_rdo != NULL, "Callback pointer should not be NULL"); in policy_get_request_data_object()
625 return data->policy_cb_get_rdo(dev); in policy_get_request_data_object()
633 struct usbc_port_data *data = dev->data; in policy_set_src_cap()
635 if (data->policy_cb_set_src_cap) { in policy_set_src_cap()
636 data->policy_cb_set_src_cap(dev, pdos, num_pdos); in policy_set_src_cap()
645 struct usbc_port_data *data = dev->data; in policy_is_snk_at_default()
647 if (data->policy_cb_is_snk_at_default) { in policy_is_snk_at_default()
648 return data->policy_cb_is_snk_at_default(dev); in policy_is_snk_at_default()
659 struct usbc_port_data *data = dev->data; in policy_get_snk_cap()
662 __ASSERT(data->policy_cb_get_snk_cap != NULL, "Callback pointer should not be NULL"); in policy_get_snk_cap()
664 data->policy_cb_get_snk_cap(dev, pdos, num_pdos); in policy_get_snk_cap()
676 struct usbc_port_data *data = dev->data; in policy_set_port_partner_snk_cap()
678 if (data->policy_cb_set_port_partner_snk_cap) { in policy_set_port_partner_snk_cap()
679 data->policy_cb_set_port_partner_snk_cap(dev, pdos, num_pdos); in policy_set_port_partner_snk_cap()
689 struct usbc_port_data *data = dev->data; in policy_check_sink_request()
692 __ASSERT(data->policy_cb_check_sink_request != NULL, in policy_check_sink_request()
695 return data->policy_cb_check_sink_request(dev, request_msg); in policy_check_sink_request()
704 struct usbc_port_data *data = dev->data; in policy_present_contract_is_valid()
707 __ASSERT(data->policy_present_contract_is_valid != NULL, in policy_present_contract_is_valid()
710 return data->policy_present_contract_is_valid(dev, present_contract); in policy_present_contract_is_valid()
718 struct usbc_port_data *data = dev->data; in policy_is_ps_ready()
721 __ASSERT(data->policy_is_ps_ready != NULL, in policy_is_ps_ready()
724 return data->policy_is_ps_ready(dev); in policy_is_ps_ready()
733 struct usbc_port_data *data = dev->data; in policy_change_src_caps()
735 if (data->policy_change_src_caps == NULL) { in policy_change_src_caps()
739 return data->policy_change_src_caps(dev); in policy_change_src_caps()
750 const struct device *dev = pe->dev; in pe_drs_evaluate_swap_entry()
753 if (policy_check(dev, (pe->data_role == TC_ROLE_UFP) ? CHECK_DATA_ROLE_SWAP_TO_DFP in pe_drs_evaluate_swap_entry()
777 const struct device *dev = pe->dev; in pe_drs_evaluate_swap_run()
778 struct usbc_port_data *data = dev->data; in pe_drs_evaluate_swap_run()
779 struct protocol_layer_tx_t *prl_tx = data->prl_tx; in pe_drs_evaluate_swap_run()
780 struct protocol_layer_rx_t *prl_rx = data->prl_rx; in pe_drs_evaluate_swap_run()
782 if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE)) { in pe_drs_evaluate_swap_run()
784 if (prl_tx->msg_type == PD_CTRL_ACCEPT) { in pe_drs_evaluate_swap_run()
786 pe_set_data_role(dev, (pe->data_role == TC_ROLE_UFP) in pe_drs_evaluate_swap_run()
789 policy_notify(dev, (pe->data_role == TC_ROLE_UFP) ? DATA_ROLE_IS_UFP in pe_drs_evaluate_swap_run()
793 } else if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_DISCARDED)) { in pe_drs_evaluate_swap_run()
799 pe_send_soft_reset(dev, prl_rx->emsg.type); in pe_drs_evaluate_swap_run()
811 const struct device *dev = pe->dev; in pe_drs_send_swap_entry()
824 const struct device *dev = pe->dev; in pe_drs_send_swap_run()
825 struct usbc_port_data *data = dev->data; in pe_drs_send_swap_run()
826 struct protocol_layer_rx_t *prl_rx = data->prl_rx; in pe_drs_send_swap_run()
829 if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_RECEIVED)) { in pe_drs_send_swap_run()
830 header = prl_rx->emsg.header; in pe_drs_send_swap_run()
843 atomic_set_bit(pe->flags, PE_FLAGS_WAIT_DATA_ROLE_SWAP); in pe_drs_send_swap_run()
844 usbc_timer_start(&pe->pd_t_wait_to_resend); in pe_drs_send_swap_run()
848 pe->data_role = (pe->data_role == TC_ROLE_UFP) ? TC_ROLE_DFP : TC_ROLE_UFP; in pe_drs_send_swap_run()
850 tcpc_set_roles(data->tcpc, pe->power_role, pe->data_role); in pe_drs_send_swap_run()
852 policy_notify(dev, (pe->data_role == TC_ROLE_UFP) ? DATA_ROLE_IS_UFP in pe_drs_send_swap_run()
858 * a Type-C Error Recovery. in pe_drs_send_swap_run()
867 } else if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_DISCARDED)) { in pe_drs_send_swap_run()
886 const struct device *dev = pe->dev; in pe_get_sink_cap_entry()
895 pe->submachine = SM_WAIT_FOR_TX; in pe_get_sink_cap_entry()
905 const struct device *dev = pe->dev; in pe_get_sink_cap_run()
906 struct usbc_port_data *data = dev->data; in pe_get_sink_cap_run()
907 struct protocol_layer_rx_t *prl_rx = data->prl_rx; in pe_get_sink_cap_run()
910 switch (pe->submachine) { in pe_get_sink_cap_run()
912 if (!atomic_test_and_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE)) { in pe_get_sink_cap_run()
915 pe->submachine = SM_WAIT_FOR_RX; in pe_get_sink_cap_run()
918 if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_RECEIVED)) { in pe_get_sink_cap_run()
919 header = prl_rx->emsg.header; in pe_get_sink_cap_run()
921 if (prl_rx->emsg.type == PD_PACKET_SOP) { in pe_get_sink_cap_run()
924 uint32_t *pdos = (uint32_t *)prl_rx->emsg.data; in pe_get_sink_cap_run()
926 PD_CONVERT_BYTES_TO_PD_HEADER_COUNT(prl_rx->emsg.len); in pe_get_sink_cap_run()
947 else if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_DISCARDED)) { in pe_get_sink_cap_run()
982 const struct device *dev = pe->dev; in pe_soft_reset_entry()
988 pe->submachine = PE_SOFT_RESET_RUN_SEND_ACCEPT_MSG; in pe_soft_reset_entry()
994 const struct device *dev = pe->dev; in pe_soft_reset_run()
1000 switch (pe->submachine) { in pe_soft_reset_run()
1005 pe->submachine = PE_SOFT_RESET_RUN_SEND_ACCEPT_MSG_COMPLETE; in pe_soft_reset_run()
1013 if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE)) { in pe_soft_reset_run()
1015 } else if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_PROTOCOL_ERROR)) { in pe_soft_reset_run()
1035 const struct device *dev = pe->dev; in pe_send_soft_reset_entry()
1041 atomic_set_bit(pe->flags, PE_FLAGS_SEND_SOFT_RESET); in pe_send_soft_reset_entry()
1050 const struct device *dev = pe->dev; in pe_send_soft_reset_run()
1051 struct usbc_port_data *data = dev->data; in pe_send_soft_reset_run()
1052 struct protocol_layer_rx_t *prl_rx = data->prl_rx; in pe_send_soft_reset_run()
1059 if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_SEND_SOFT_RESET)) { in pe_send_soft_reset_run()
1061 pe_send_ctrl_msg(dev, pe->soft_reset_sop, PD_CTRL_SOFT_RESET); in pe_send_soft_reset_run()
1065 if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_DISCARDED)) { in pe_send_soft_reset_run()
1069 } else if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_RECEIVED)) { in pe_send_soft_reset_run()
1075 header = prl_rx->emsg.header; in pe_send_soft_reset_run()
1080 } else if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_PROTOCOL_ERROR)) { in pe_send_soft_reset_run()
1096 const struct device *dev = pe->dev; in pe_send_not_supported_entry()
1114 const struct device *dev = pe->dev; in pe_send_not_supported_run()
1116 if (atomic_test_bit(pe->flags, PE_FLAGS_TX_COMPLETE) || in pe_send_not_supported_run()
1117 atomic_test_bit(pe->flags, PE_FLAGS_MSG_DISCARDED)) { in pe_send_not_supported_run()
1118 atomic_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE); in pe_send_not_supported_run()
1119 atomic_clear_bit(pe->flags, PE_FLAGS_MSG_DISCARDED); in pe_send_not_supported_run()
1137 usbc_timer_start(&pe->pd_t_chunking_not_supported); in pe_chunk_received_entry()
1146 const struct device *dev = pe->dev; in pe_chunk_received_run()
1153 if (usbc_timer_expired(&pe->pd_t_chunking_not_supported)) { in pe_chunk_received_run()
1165 const struct device *dev = pe->dev; in pe_sender_response_run()
1169 if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE)) { in pe_sender_response_run()
1171 usbc_timer_start(&pe->pd_t_sender_response); in pe_sender_response_run()
1175 if (usbc_timer_expired(&pe->pd_t_sender_response)) { in pe_sender_response_run()
1197 if ((atomic_test_bit(pe->flags, PE_FLAGS_HAS_BEEN_PD_CONNECTED) == false) in pe_sender_response_run()
1198 && pe->hard_reset_counter > PD_N_HARD_RESET_COUNT) { in pe_sender_response_run()
1209 if (atomic_test_bit(pe->flags, PE_FLAGS_HAS_BEEN_PD_CONNECTED) in pe_sender_response_run()
1210 && pe->hard_reset_counter > PD_N_HARD_RESET_COUNT) { in pe_sender_response_run()
1250 usbc_timer_stop(&pe->pd_t_sender_response); in pe_sender_response_exit()