1 /*
2 * Copyright (c) 2022 The Chromium OS Authors
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8 #include <zephyr/sys/byteorder.h>
9 #include <zephyr/smf.h>
10 #include <zephyr/usb_c/usbc.h>
11 #include <zephyr/drivers/usb_c/usbc_pd.h>
12 #include <zephyr/logging/log.h>
13 LOG_MODULE_DECLARE(usbc_stack, CONFIG_USBC_STACK_LOG_LEVEL);
14
15 #include "usbc_stack.h"
16 #include "usbc_pe_common_internal.h"
17 #include "usbc_pe_snk_states_internal.h"
18 #include "usbc_pe_src_states_internal.h"
19
20 static const struct smf_state pe_states[PE_STATE_COUNT];
21
22 /**
23 * @brief Handle common DPM requests
24 *
25 * @retval true if request was handled, else false
26 */
common_dpm_requests(const struct device * dev)27 bool common_dpm_requests(const struct device *dev)
28 {
29 struct usbc_port_data *data = dev->data;
30 struct policy_engine *pe = data->pe;
31
32 if (pe->dpm_request > REQUEST_TC_END) {
33 atomic_set_bit(pe->flags, PE_FLAGS_DPM_INITIATED_AMS);
34
35 if (pe->dpm_request == REQUEST_PE_DR_SWAP) {
36 pe_set_state(dev, PE_DRS_SEND_SWAP);
37 return true;
38 } else if (pe->dpm_request == REQUEST_PE_SOFT_RESET_SEND) {
39 pe_set_state(dev, PE_SEND_SOFT_RESET);
40 return true;
41 }
42 }
43
44 return false;
45 }
46
47 /**
48 * @brief Initializes the PE state machine and enters the PE_SUSPEND state.
49 */
pe_subsys_init(const struct device * dev)50 void pe_subsys_init(const struct device *dev)
51 {
52 struct usbc_port_data *data = dev->data;
53 struct policy_engine *pe = data->pe;
54
55 /* Save the port device object so states can access it */
56 pe->dev = dev;
57
58 /* Initialize the state machine */
59 smf_set_initial(SMF_CTX(pe), &pe_states[PE_SUSPEND]);
60 }
61
62 /**
63 * @brief Starts the Policy Engine layer
64 */
pe_start(const struct device * dev)65 void pe_start(const struct device *dev)
66 {
67 struct usbc_port_data *data = dev->data;
68
69 data->pe_enabled = true;
70 }
71
72 /**
73 * @brief Suspend the Policy Engine layer
74 */
pe_suspend(const struct device * dev)75 void pe_suspend(const struct device *dev)
76 {
77 struct usbc_port_data *data = dev->data;
78
79 data->pe_enabled = false;
80
81 /*
82 * While we are paused, exit all states
83 * and wait until initialized again.
84 */
85 pe_set_state(dev, PE_SUSPEND);
86 }
87
88 /**
89 * @brief Initialize the Policy Engine layer
90 */
pe_init(const struct device * dev)91 static void pe_init(const struct device *dev)
92 {
93 struct usbc_port_data *data = dev->data;
94 struct policy_engine *pe = data->pe;
95
96 /* Clear all flags */
97 atomic_clear(pe->flags);
98
99 /* Initialize common timers */
100 usbc_timer_init(&pe->pd_t_sender_response, PD_T_NO_RESPONSE_MAX_MS);
101 usbc_timer_init(&pe->pd_t_chunking_not_supported, PD_T_CHUNKING_NOT_SUPPORTED_NOM_MS);
102
103 /* Initialize common counters */
104 pe->hard_reset_counter = 0;
105
106 #ifdef CONFIG_USBC_CSM_SINK_ONLY
107 pe_snk_init(dev);
108 #else
109 pe_src_init(dev);
110 #endif
111 }
112
113 /**
114 * @brief Tests if the Policy Engine layer is running
115 */
pe_is_running(const struct device * dev)116 bool pe_is_running(const struct device *dev)
117 {
118 struct usbc_port_data *data = dev->data;
119
120 return data->pe_sm_state == SM_RUN;
121 }
122
123 /**
124 * @brief Run the Policy Engine layer
125 */
pe_run(const struct device * dev,const int32_t dpm_request)126 void pe_run(const struct device *dev, const int32_t dpm_request)
127 {
128 struct usbc_port_data *data = dev->data;
129 struct policy_engine *pe = data->pe;
130
131 switch (data->pe_sm_state) {
132 case SM_PAUSED:
133 if (data->pe_enabled == false) {
134 break;
135 }
136 /* fall through */
137 case SM_INIT:
138 pe_init(dev);
139 data->pe_sm_state = SM_RUN;
140 /* fall through */
141 case SM_RUN:
142 if (data->pe_enabled == false) {
143 data->pe_sm_state = SM_PAUSED;
144 break;
145 }
146
147 if (prl_is_running(dev) == false) {
148 break;
149 }
150
151 /* Get any DPM Requests */
152 pe->dpm_request = dpm_request;
153
154 /*
155 * 8.3.3.3.8 PE_SNK_Hard_Reset State
156 * The Policy Engine Shall transition to the PE_SNK_Hard_Reset
157 * state from any state when:
158 * - Hard Reset request from Device Policy Manager
159 */
160 if (dpm_request == REQUEST_PE_HARD_RESET_SEND) {
161 pe_set_state(dev, PE_SNK_HARD_RESET);
162 }
163
164 /* Run state machine */
165 smf_run_state(SMF_CTX(pe));
166
167 break;
168 }
169 }
170
171 /**
172 * @brief Sets Data Role
173 */
pe_set_data_role(const struct device * dev,enum tc_data_role dr)174 void pe_set_data_role(const struct device *dev, enum tc_data_role dr)
175 {
176 struct usbc_port_data *data = dev->data;
177 struct policy_engine *pe = data->pe;
178
179 /* Update data role */
180 pe->data_role = dr;
181
182 /* Notify TCPC of role update */
183 tcpc_set_roles(data->tcpc, pe->power_role, pe->data_role);
184 }
185
186 /**
187 * @brief Gets the current data role
188 */
pe_get_data_role(const struct device * dev)189 enum tc_data_role pe_get_data_role(const struct device *dev)
190 {
191 struct usbc_port_data *data = dev->data;
192
193 return data->pe->data_role;
194 }
195
196 /**
197 * @brief Gets the current power role
198 */
pe_get_power_role(const struct device * dev)199 enum tc_power_role pe_get_power_role(const struct device *dev)
200 {
201 struct usbc_port_data *data = dev->data;
202
203 return data->pe->power_role;
204 }
205
206 /**
207 * @brief Gets the current cable plug role
208 */
pe_get_cable_plug(const struct device * dev)209 enum tc_cable_plug pe_get_cable_plug(const struct device *dev)
210 {
211 return PD_PLUG_FROM_DFP_UFP;
212 }
213
214 /**
215 * @brief Informs the Policy Engine that a soft reset was received.
216 */
pe_got_soft_reset(const struct device * dev)217 void pe_got_soft_reset(const struct device *dev)
218 {
219 /*
220 * The PE_SRC_Soft_Reset state Shall be entered from any state when a
221 * Soft_Reset Message is received from the Protocol Layer.
222 */
223 pe_set_state(dev, PE_SOFT_RESET);
224 }
225
226 /**
227 * @brief Informs the Policy Engine that a message was successfully sent
228 */
pe_message_sent(const struct device * dev)229 void pe_message_sent(const struct device *dev)
230 {
231 struct usbc_port_data *data = dev->data;
232 struct policy_engine *pe = data->pe;
233
234 atomic_set_bit(pe->flags, PE_FLAGS_TX_COMPLETE);
235 }
236
237 /**
238 * @brief See section 8.3.3.4.1.1 PE_SRC_Send_Soft_Reset State:
239 *
240 * The PE_Send_Soft_Reset state shall be entered from
241 * any state when
242 * * A Protocol Error is detected by Protocol Layer during a
243 * Non-Interruptible AMS or
244 * * A message has not been sent after retries or
245 * * When not in an explicit contract and
246 * * Protocol Errors occurred on SOP during an Interruptible AMS or
247 * * Protocol Errors occurred on SOP during any AMS where the first
248 * Message in the sequence has not yet been sent i.e. an unexpected
249 * Message is received instead of the expected GoodCRC Message
250 * response.
251 */
pe_soft_reset_is_required(const struct device * dev,const enum pd_packet_type type)252 static bool pe_soft_reset_is_required(const struct device *dev, const enum pd_packet_type type)
253 {
254 struct usbc_port_data *data = dev->data;
255 struct policy_engine *pe = data->pe;
256
257 /* Protocol Error not on SOP */
258 if (type != PD_PACKET_SOP) {
259 return false;
260 }
261
262 if (atomic_test_bit(pe->flags, PE_FLAGS_EXPLICIT_CONTRACT)) {
263 /*
264 * If the first Message in an AMS has been passed to the
265 * Protocol Layer by the Policy Engine but has not yet been sent
266 * when the Protocol Error occurs, the Policy Engine Shall Not
267 * issue a Soft Reset
268 */
269 if (!atomic_test_bit(pe->flags, PE_FLAGS_FIRST_MSG_SENT)) {
270 return false;
271 }
272
273 /*
274 * If the Protocol Error occurs during an Interruptible AMS then
275 * the Policy Engine Shall Not issue a Soft Reset
276 */
277 if (atomic_test_bit(pe->flags, PE_FLAGS_INTERRUPTIBLE_AMS)) {
278 return false;
279 }
280 }
281
282 return true;
283 }
284
285 /**
286 * @brief Informs the Policy Engine of an error.
287 */
pe_report_error(const struct device * dev,const enum pe_error e,const enum pd_packet_type type)288 void pe_report_error(const struct device *dev, const enum pe_error e,
289 const enum pd_packet_type type)
290 {
291 struct usbc_port_data *data = dev->data;
292 struct policy_engine *pe = data->pe;
293
294 /*
295 * Generate Hard Reset if Protocol Error occurred
296 * while in PE_Send_Soft_Reset state.
297 */
298 if (pe_get_state(dev) == PE_SEND_SOFT_RESET ||
299 pe_get_state(dev) == PE_SOFT_RESET) {
300 atomic_set_bit(pe->flags, PE_FLAGS_PROTOCOL_ERROR);
301 return;
302 }
303
304 /* Transmit error */
305 if (e == ERR_XMIT) {
306 atomic_set_bit(pe->flags, PE_FLAGS_MSG_XMIT_ERROR);
307 }
308 /* All error types besides transmit errors are Protocol Errors. */
309 else if (pe_soft_reset_is_required(dev, type)) {
310 policy_notify(dev, PROTOCOL_ERROR);
311 pe_send_soft_reset(dev, type);
312 }
313 /*
314 * Transition to PE_Snk_Ready by a Protocol
315 * Error during an Interruptible AMS.
316 */
317 else {
318 pe_set_state(dev, PE_SNK_READY);
319 }
320 }
321
322 /**
323 * @brief Informs the Policy Engine of a discard.
324 */
pe_report_discard(const struct device * dev)325 void pe_report_discard(const struct device *dev)
326 {
327 struct usbc_port_data *data = dev->data;
328 struct policy_engine *pe = data->pe;
329
330 /*
331 * Clear local AMS indicator as our AMS message was discarded, and flag
332 * the discard for the PE
333 */
334 pe_dpm_end_ams(dev);
335 atomic_set_bit(pe->flags, PE_FLAGS_MSG_DISCARDED);
336 }
337
338 /**
339 * @brief Called by the Protocol Layer to informs the Policy Engine
340 * that a message has been received.
341 */
pe_message_received(const struct device * dev)342 void pe_message_received(const struct device *dev)
343 {
344 struct usbc_port_data *data = dev->data;
345 struct policy_engine *pe = data->pe;
346
347 atomic_set_bit(pe->flags, PE_FLAGS_MSG_RECEIVED);
348 }
349
350 /**
351 * @brief Informs the Policy Engine that a hard reset was received.
352 */
pe_got_hard_reset(const struct device * dev)353 void pe_got_hard_reset(const struct device *dev)
354 {
355 pe_set_state(dev, PE_SNK_TRANSITION_TO_DEFAULT);
356 }
357
358 /**
359 * @brief Informs the Policy Engine that a hard reset was sent.
360 */
pe_hard_reset_sent(const struct device * dev)361 void pe_hard_reset_sent(const struct device *dev)
362 {
363 struct usbc_port_data *data = dev->data;
364 struct policy_engine *pe = data->pe;
365
366 atomic_clear_bit(pe->flags, PE_FLAGS_HARD_RESET_PENDING);
367 }
368
369 /**
370 * @brief Indicates if an explicit contract is in place
371 */
pe_is_explicit_contract(const struct device * dev)372 bool pe_is_explicit_contract(const struct device *dev)
373 {
374 struct usbc_port_data *data = dev->data;
375 struct policy_engine *pe = data->pe;
376
377 return atomic_test_bit(pe->flags, PE_FLAGS_EXPLICIT_CONTRACT);
378 }
379
380 /**
381 * @brief Return true if the PE is is within an atomic messaging sequence
382 * that it initiated with a SOP* port partner.
383 */
pe_dpm_initiated_ams(const struct device * dev)384 bool pe_dpm_initiated_ams(const struct device *dev)
385 {
386 struct usbc_port_data *data = dev->data;
387 struct policy_engine *pe = data->pe;
388
389 return atomic_test_bit(pe->flags, PE_FLAGS_DPM_INITIATED_AMS);
390 }
391
392 /**
393 * @brief End and atomic messaging sequence
394 */
pe_dpm_end_ams(const struct device * dev)395 void pe_dpm_end_ams(const struct device *dev)
396 {
397 struct usbc_port_data *data = dev->data;
398 struct policy_engine *pe = data->pe;
399
400 atomic_clear_bit(pe->flags, PE_FLAGS_DPM_INITIATED_AMS);
401 }
402
403 /**
404 * @brief First message in AMS has been sent
405 */
pe_first_msg_sent(const struct device * dev)406 void pe_first_msg_sent(const struct device *dev)
407 {
408 struct usbc_port_data *data = dev->data;
409 struct policy_engine *pe = data->pe;
410
411 atomic_set_bit(pe->flags, PE_FLAGS_FIRST_MSG_SENT);
412 }
413
414 /** Private Policy Engine Layer API below */
415
416 /**
417 * @brief Sets a Policy Engine state
418 */
pe_set_state(const struct device * dev,const enum usbc_pe_state state)419 void pe_set_state(const struct device *dev, const enum usbc_pe_state state)
420 {
421 struct usbc_port_data *data = dev->data;
422
423 __ASSERT(state < ARRAY_SIZE(pe_states), "invalid pe_state %d", state);
424 smf_set_state(SMF_CTX(data->pe), &pe_states[state]);
425 }
426
427 /**
428 * @brief Get the Policy Engine's current state
429 */
pe_get_state(const struct device * dev)430 enum usbc_pe_state pe_get_state(const struct device *dev)
431 {
432 struct usbc_port_data *data = dev->data;
433
434 return data->pe->ctx.current - &pe_states[0];
435 }
436
437 /**
438 * @brief Get the Policy Engine's previous state
439 */
pe_get_last_state(const struct device * dev)440 enum usbc_pe_state pe_get_last_state(const struct device *dev)
441 {
442 struct usbc_port_data *data = dev->data;
443
444 return data->pe->ctx.previous - &pe_states[0];
445 }
446
447 /**
448 * @brief Send a soft reset message
449 */
pe_send_soft_reset(const struct device * dev,const enum pd_packet_type type)450 void pe_send_soft_reset(const struct device *dev, const enum pd_packet_type type)
451 {
452 struct usbc_port_data *data = dev->data;
453
454 data->pe->soft_reset_sop = type;
455 pe_set_state(dev, PE_SEND_SOFT_RESET);
456 }
457
458 /**
459 * @brief Send a Power Delivery Data Message
460 */
pe_send_data_msg(const struct device * dev,const enum pd_packet_type type,const enum pd_data_msg_type msg)461 void pe_send_data_msg(const struct device *dev, const enum pd_packet_type type,
462 const enum pd_data_msg_type msg)
463 {
464 struct usbc_port_data *data = dev->data;
465 struct policy_engine *pe = data->pe;
466
467 /* Clear any previous TX status before sending a new message */
468 atomic_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE);
469 prl_send_data_msg(dev, type, msg);
470 }
471
472 /**
473 * @brief Send a Power Delivery Control Message
474 */
pe_send_ctrl_msg(const struct device * dev,const enum pd_packet_type type,const enum pd_ctrl_msg_type msg)475 void pe_send_ctrl_msg(const struct device *dev, const enum pd_packet_type type,
476 const enum pd_ctrl_msg_type msg)
477 {
478 struct usbc_port_data *data = dev->data;
479 struct policy_engine *pe = data->pe;
480
481 /* Clear any previous TX status before sending a new message */
482 atomic_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE);
483 prl_send_ctrl_msg(dev, type, msg);
484 }
485
486 /**
487 * @brief Request desired voltage from source.
488 */
pe_send_request_msg(const struct device * dev,const uint32_t rdo)489 void pe_send_request_msg(const struct device *dev, const uint32_t rdo)
490 {
491 struct usbc_port_data *data = dev->data;
492 struct protocol_layer_tx_t *prl_tx = data->prl_tx;
493 struct pd_msg *msg = &prl_tx->emsg;
494 uint8_t rdo_bytes[4];
495
496 msg->len = sizeof(rdo);
497 sys_put_le32(rdo, rdo_bytes);
498 memcpy(msg->data, rdo_bytes, msg->len);
499 pe_send_data_msg(dev, PD_PACKET_SOP, PD_DATA_REQUEST);
500 }
501
502 /**
503 * @brief Transitions state after receiving an extended message.
504 */
extended_message_not_supported(const struct device * dev)505 void extended_message_not_supported(const struct device *dev)
506 {
507 struct usbc_port_data *data = dev->data;
508 struct protocol_layer_rx_t *prl_rx = data->prl_rx;
509 uint32_t *payload = (uint32_t *)prl_rx->emsg.data;
510 union pd_ext_header ext_header;
511
512 ext_header.raw_value = *payload;
513
514 if (ext_header.chunked && ext_header.data_size > PD_MAX_EXTENDED_MSG_CHUNK_LEN) {
515 pe_set_state(dev, PE_CHUNK_RECEIVED);
516 } else {
517 pe_set_state(dev, PE_SEND_NOT_SUPPORTED);
518 }
519 }
520
521 /**
522 * @brief Check if a specific control message was received
523 */
received_control_message(const struct device * dev,const union pd_header header,const enum pd_ctrl_msg_type mt)524 bool received_control_message(const struct device *dev, const union pd_header header,
525 const enum pd_ctrl_msg_type mt)
526 {
527 struct usbc_port_data *data = dev->data;
528 struct protocol_layer_rx_t *prl_rx = data->prl_rx;
529
530 if (prl_rx->emsg.len == 0 && header.message_type == mt && header.extended == 0) {
531 return true;
532 }
533
534 return false;
535 }
536
537 /**
538 * @brief Check if a specific data message was received
539 */
received_data_message(const struct device * dev,const union pd_header header,const enum pd_data_msg_type mt)540 bool received_data_message(const struct device *dev, const union pd_header header,
541 const enum pd_data_msg_type mt)
542 {
543 struct usbc_port_data *data = dev->data;
544 struct protocol_layer_rx_t *prl_rx = data->prl_rx;
545
546 if (prl_rx->emsg.len > 0 && header.message_type == mt && header.extended == 0) {
547 return true;
548 }
549
550 return false;
551 }
552
553 /**
554 * @brief Check a DPM policy
555 */
policy_check(const struct device * dev,const enum usbc_policy_check_t pc)556 bool policy_check(const struct device *dev, const enum usbc_policy_check_t pc)
557 {
558 struct usbc_port_data *data = dev->data;
559
560 if (data->policy_cb_check) {
561 return data->policy_cb_check(dev, pc);
562 } else {
563 return false;
564 }
565 }
566
567 /**
568 * @brief Notify the DPM of a policy change
569 */
policy_notify(const struct device * dev,const enum usbc_policy_notify_t notify)570 void policy_notify(const struct device *dev, const enum usbc_policy_notify_t notify)
571 {
572 struct usbc_port_data *data = dev->data;
573
574 if (data->policy_cb_notify) {
575 data->policy_cb_notify(dev, notify);
576 }
577 }
578
579 /**
580 * @brief Notify the DPM of a WAIT message reception
581 */
policy_wait_notify(const struct device * dev,const enum usbc_policy_wait_t notify)582 bool policy_wait_notify(const struct device *dev, const enum usbc_policy_wait_t notify)
583 {
584 struct usbc_port_data *data = dev->data;
585
586 if (data->policy_cb_wait_notify) {
587 return data->policy_cb_wait_notify(dev, notify);
588 }
589
590 return false;
591 }
592
593 #ifdef CONFIG_USBC_CSM_SINK_ONLY
594
595 /**
596 * @brief Get a Request Data Object from the DPM
597 */
policy_get_request_data_object(const struct device * dev)598 uint32_t policy_get_request_data_object(const struct device *dev)
599 {
600 struct usbc_port_data *data = dev->data;
601
602 /* This callback must be implemented */
603 __ASSERT(data->policy_cb_get_rdo != NULL, "Callback pointer should not be NULL");
604
605 return data->policy_cb_get_rdo(dev);
606 }
607
608 /**
609 * @brief Send the received source caps to the DPM
610 */
policy_set_src_cap(const struct device * dev,const uint32_t * pdos,const int num_pdos)611 void policy_set_src_cap(const struct device *dev, const uint32_t *pdos, const int num_pdos)
612 {
613 struct usbc_port_data *data = dev->data;
614
615 if (data->policy_cb_set_src_cap) {
616 data->policy_cb_set_src_cap(dev, pdos, num_pdos);
617 }
618 }
619
620 /**
621 * @brief Check if the sink is a default level
622 */
policy_is_snk_at_default(const struct device * dev)623 bool policy_is_snk_at_default(const struct device *dev)
624 {
625 struct usbc_port_data *data = dev->data;
626
627 if (data->policy_cb_is_snk_at_default) {
628 return data->policy_cb_is_snk_at_default(dev);
629 }
630
631 return true;
632 }
633
634 /**
635 * @brief Get sink caps from the DPM
636 */
policy_get_snk_cap(const struct device * dev,uint32_t ** pdos,int * num_pdos)637 void policy_get_snk_cap(const struct device *dev, uint32_t **pdos, int *num_pdos)
638 {
639 struct usbc_port_data *data = dev->data;
640
641 /* This callback must be implemented */
642 __ASSERT(data->policy_cb_get_snk_cap != NULL, "Callback pointer should not be NULL");
643
644 data->policy_cb_get_snk_cap(dev, pdos, num_pdos);
645 }
646
647 #else /* CONFIG_USBC_CSM_SOURCE_ONLY */
648
649 /**
650 * @brief Send the received sink caps to the DPM
651 */
policy_set_port_partner_snk_cap(const struct device * dev,const uint32_t * pdos,const int num_pdos)652 void policy_set_port_partner_snk_cap(const struct device *dev,
653 const uint32_t *pdos,
654 const int num_pdos)
655 {
656 struct usbc_port_data *data = dev->data;
657
658 if (data->policy_cb_set_port_partner_snk_cap) {
659 data->policy_cb_set_port_partner_snk_cap(dev, pdos, num_pdos);
660 }
661 }
662
663 /**
664 * @brief Check if Sink Request can be met by DPM
665 */
policy_check_sink_request(const struct device * dev,const uint32_t request_msg)666 enum usbc_snk_req_reply_t policy_check_sink_request(const struct device *dev,
667 const uint32_t request_msg)
668 {
669 struct usbc_port_data *data = dev->data;
670
671 /* This callback must be implemented */
672 __ASSERT(data->policy_cb_check_sink_request != NULL,
673 "Callback pointer should not be NULL");
674
675 return data->policy_cb_check_sink_request(dev, request_msg);
676 }
677
678 /**
679 * @brief Check if the present contract is still valid
680 */
policy_present_contract_is_valid(const struct device * dev,const uint32_t present_contract)681 bool policy_present_contract_is_valid(const struct device *dev,
682 const uint32_t present_contract)
683 {
684 struct usbc_port_data *data = dev->data;
685
686 /* This callback must be implemented */
687 __ASSERT(data->policy_present_contract_is_valid != NULL,
688 "Callback pointer should not be NULL");
689
690 return data->policy_present_contract_is_valid(dev, present_contract);
691 }
692
693 /**
694 * @brief Check if the power supply is ready
695 */
policy_is_ps_ready(const struct device * dev)696 bool policy_is_ps_ready(const struct device *dev)
697 {
698 struct usbc_port_data *data = dev->data;
699
700 /* This callback must be implemented */
701 __ASSERT(data->policy_is_ps_ready != NULL,
702 "Callback pointer should not be NULL");
703
704 return data->policy_is_ps_ready(dev);
705 }
706
707 /**
708 * @brief Ask the DPM to change the Source Caps.
709 * Returns true if source caps have been updated, else false
710 */
policy_change_src_caps(const struct device * dev)711 bool policy_change_src_caps(const struct device *dev)
712 {
713 struct usbc_port_data *data = dev->data;
714
715 if (data->policy_change_src_caps == NULL) {
716 return false;
717 }
718
719 return data->policy_change_src_caps(dev);
720 }
721
722 #endif /* CONFIG_USBC_CSM_SINK_ONLY */
723
724 /**
725 * @brief PE_DRS_Evaluate_Swap Entry state
726 */
pe_drs_evaluate_swap_entry(void * obj)727 static void pe_drs_evaluate_swap_entry(void *obj)
728 {
729 struct policy_engine *pe = (struct policy_engine *)obj;
730 const struct device *dev = pe->dev;
731
732 /* Get evaluation of Data Role Swap request from Device Policy Manager */
733 if (policy_check(dev, (pe->data_role == TC_ROLE_UFP) ? CHECK_DATA_ROLE_SWAP_TO_DFP
734 : CHECK_DATA_ROLE_SWAP_TO_UFP)) {
735 /*
736 * PE_DRS_DFP_UFP_Accept_Swap and PE_DRS_UFP_DFP_Accept_Swap
737 * State embedded here
738 */
739 /* Send Accept message */
740 pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_ACCEPT);
741 } else {
742 /*
743 * PE_DRS_DFP_UFP_Reject_Swap and PE_DRS_UFP_DFP_Reject_Swap
744 * State embedded here
745 */
746 /* Send Reject message */
747 pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_REJECT);
748 }
749 }
750
751 /**
752 * @brief PE_DRS_Evaluate_Swap Run state
753 */
pe_drs_evaluate_swap_run(void * obj)754 static void pe_drs_evaluate_swap_run(void *obj)
755 {
756 struct policy_engine *pe = (struct policy_engine *)obj;
757 const struct device *dev = pe->dev;
758 struct usbc_port_data *data = dev->data;
759 struct protocol_layer_tx_t *prl_tx = data->prl_tx;
760 struct protocol_layer_rx_t *prl_rx = data->prl_rx;
761
762 if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE)) {
763 /* Only update data roles if last message sent was Accept */
764 if (prl_tx->msg_type == PD_CTRL_ACCEPT) {
765 /* Update Data Role */
766 pe_set_data_role(dev, (pe->data_role == TC_ROLE_UFP)
767 ? TC_ROLE_DFP : TC_ROLE_UFP);
768 /* Inform Device Policy Manager of Data Role Change */
769 policy_notify(dev, (pe->data_role == TC_ROLE_UFP) ? DATA_ROLE_IS_UFP
770 : DATA_ROLE_IS_DFP);
771 }
772 pe_set_state(dev, PE_SNK_READY);
773 } else if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_DISCARDED)) {
774 /*
775 * Inform Device Policy Manager that the message was
776 * discarded
777 */
778 policy_notify(dev, MSG_DISCARDED);
779 pe_send_soft_reset(dev, prl_rx->emsg.type);
780 }
781 }
782
783 /**
784 * @brief PE_DRS_Send_Swap Entry state
785 * NOTE: 8.3.3.18.1.5 PE_DRS_DFP_UFP_Send_Swap State
786 * 8.3.3.18.2.5 PE_DRS_UFP_DFP_Send_Swap State
787 */
pe_drs_send_swap_entry(void * obj)788 static void pe_drs_send_swap_entry(void *obj)
789 {
790 struct policy_engine *pe = (struct policy_engine *)obj;
791 const struct device *dev = pe->dev;
792
793 /* Send Swap DR message */
794 pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_DR_SWAP);
795 }
796
797 /**
798 * @brief PE_DRS_Send_Swap Run state
799 * NOTE: Sender Response Timer is handled in super state.
800 */
pe_drs_send_swap_run(void * obj)801 static void pe_drs_send_swap_run(void *obj)
802 {
803 struct policy_engine *pe = (struct policy_engine *)obj;
804 const struct device *dev = pe->dev;
805 struct usbc_port_data *data = dev->data;
806 struct protocol_layer_rx_t *prl_rx = data->prl_rx;
807 union pd_header header;
808
809 if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_RECEIVED)) {
810 header = prl_rx->emsg.header;
811 if (received_control_message(dev, header, PD_CTRL_REJECT)) {
812 /*
813 * Inform Device Policy Manager that Data Role Swap
814 * was Rejected
815 */
816 policy_notify(dev, MSG_REJECTED_RECEIVED);
817 } else if (received_control_message(dev, header, PD_CTRL_WAIT)) {
818 /*
819 * Inform Device Policy Manager that Data Role Swap
820 * needs to Wait
821 */
822 if (policy_wait_notify(dev, WAIT_DATA_ROLE_SWAP)) {
823 atomic_set_bit(pe->flags, PE_FLAGS_WAIT_DATA_ROLE_SWAP);
824 usbc_timer_start(&pe->pd_t_wait_to_resend);
825 }
826 } else if (received_control_message(dev, header, PD_CTRL_ACCEPT)) {
827 /* Update Data Role */
828 pe->data_role = (pe->data_role == TC_ROLE_UFP) ? TC_ROLE_DFP : TC_ROLE_UFP;
829 /* Notify TCPC of role update */
830 tcpc_set_roles(data->tcpc, pe->power_role, pe->data_role);
831 /* Inform Device Policy Manager of Data Role Change */
832 policy_notify(dev, (pe->data_role == TC_ROLE_UFP) ? DATA_ROLE_IS_UFP
833 : DATA_ROLE_IS_DFP);
834 } else {
835 /*
836 * A Protocol Error during a Data Role Swap when the
837 * DFP/UFP roles are changing shall directly trigger
838 * a Type-C Error Recovery.
839 */
840 usbc_request(dev, REQUEST_TC_ERROR_RECOVERY);
841 return;
842 }
843
844 /* return to ready state */
845 pe_set_state(dev, PE_SNK_READY);
846 return;
847 } else if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_DISCARDED)) {
848 /*
849 * Inform Device Policy Manager that the message
850 * was discarded
851 */
852 policy_notify(dev, MSG_DISCARDED);
853 pe_set_state(dev, PE_SNK_READY);
854 return;
855 }
856 }
857
858 /**
859 * PE_Get_Sink_Cap Entry state
860 * @brief 8.3.3.18.7.1 PE_DR_SRC_Get_Source_Cap State
861 * @brief 8.3.3.18.9.1 PE_DR_SNK_Get_Sink_Cap State
862 */
pe_get_sink_cap_entry(void * obj)863 void pe_get_sink_cap_entry(void *obj)
864 {
865 struct policy_engine *pe = (struct policy_engine *)obj;
866 const struct device *dev = pe->dev;
867
868 /*
869 * On entry to the PE_DR_SNK_Get_Sink_Cap state the Policy Engine
870 * Shall send a Get_Sink_Cap Message and initialize and run the
871 * SenderResponseTimer.
872 */
873 pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_GET_SINK_CAP);
874 /* Initialize Submachine */
875 pe->submachine = SM_WAIT_FOR_TX;
876 }
877
878 /**
879 * @brief PE_Get_Sink_Cap Run state
880 * NOTE: Sender Response Timer is handled in super state.
881 */
pe_get_sink_cap_run(void * obj)882 void pe_get_sink_cap_run(void *obj)
883 {
884 struct policy_engine *pe = (struct policy_engine *)obj;
885 const struct device *dev = pe->dev;
886 struct usbc_port_data *data = dev->data;
887 struct protocol_layer_rx_t *prl_rx = data->prl_rx;
888 union pd_header header;
889
890 switch (pe->submachine) {
891 case SM_WAIT_FOR_TX:
892 if (!atomic_test_and_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE)) {
893 break;
894 }
895 pe->submachine = SM_WAIT_FOR_RX;
896 /* fall through */
897 case SM_WAIT_FOR_RX:
898 if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_RECEIVED)) {
899 header = prl_rx->emsg.header;
900
901 if (prl_rx->emsg.type == PD_PACKET_SOP) {
902 if (received_data_message(dev, header, PD_DATA_SINK_CAP)) {
903 #ifdef CONFIG_USBC_CSM_SOURCE_ONLY
904 uint32_t *pdos = (uint32_t *)prl_rx->emsg.data;
905 uint32_t num_pdos =
906 PD_CONVERT_BYTES_TO_PD_HEADER_COUNT(prl_rx->emsg.len);
907
908 policy_set_port_partner_snk_cap(dev, pdos, num_pdos);
909 pe_set_state(dev, PE_SRC_READY);
910 #else
911 pe_set_state(dev, PE_SNK_READY);
912 #endif
913 return;
914 } else if (received_control_message(dev, header, PD_CTRL_REJECT) ||
915 received_control_message(dev,
916 header, PD_CTRL_NOT_SUPPORTED)) {
917 #ifdef CONFIG_USBC_CSM_SOURCE_ONLY
918 pe_set_state(dev, PE_SRC_READY);
919 #else
920 pe_set_state(dev, PE_SNK_READY);
921 #endif
922 return;
923 }
924 /* Unexpected messages fall through to soft reset */
925 }
926 pe_send_soft_reset(dev, PD_PACKET_SOP);
927 return;
928 }
929 /*
930 * Inform Device Policy Manager that the message
931 * was discarded
932 */
933 else if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_DISCARDED)) {
934 policy_notify(dev, MSG_DISCARDED);
935 pe_set_state(dev, PE_SNK_READY);
936 return;
937 }
938 }
939 }
940
pe_suspend_entry(void * obj)941 static void pe_suspend_entry(void *obj)
942 {
943 LOG_INF("PE_SUSPEND");
944 }
945
pe_suspend_run(void * obj)946 static void pe_suspend_run(void *obj)
947 {
948 /* DO NOTHING */
949 }
950
951 /**
952 * @brief The PE_SOFT_RESET state has two embedded states
953 * that handle sending an accept message.
954 */
955 enum pe_soft_reset_submachine_states {
956 /* Send Accept message sub state */
957 PE_SOFT_RESET_RUN_SEND_ACCEPT_MSG,
958 /* Wait for Accept message to be sent or an error sub state */
959 PE_SOFT_RESET_RUN_SEND_ACCEPT_MSG_COMPLETE
960 };
961
962 /**
963 * @brief 8.3.3.4.2.2 PE_SNK_Soft_Reset State
964 */
pe_soft_reset_entry(void * obj)965 static void pe_soft_reset_entry(void *obj)
966 {
967 struct policy_engine *pe = (struct policy_engine *)obj;
968 const struct device *dev = pe->dev;
969
970 /* Reset the protocol layer */
971 prl_reset(dev);
972
973 /* Initialize PE Submachine */
974 pe->submachine = PE_SOFT_RESET_RUN_SEND_ACCEPT_MSG;
975 }
976
pe_soft_reset_run(void * obj)977 static void pe_soft_reset_run(void *obj)
978 {
979 struct policy_engine *pe = (struct policy_engine *)obj;
980 const struct device *dev = pe->dev;
981
982 if (!prl_is_running(dev)) {
983 return;
984 }
985
986 switch (pe->submachine) {
987 case PE_SOFT_RESET_RUN_SEND_ACCEPT_MSG:
988 /* Send Accept message to SOP */
989 pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_ACCEPT);
990 /* Move to next substate */
991 pe->submachine = PE_SOFT_RESET_RUN_SEND_ACCEPT_MSG_COMPLETE;
992 break;
993 case PE_SOFT_RESET_RUN_SEND_ACCEPT_MSG_COMPLETE:
994 /*
995 * The Policy Engine Shall transition to the
996 * PE_SRC_Send_Capabilities state when:
997 * 1: Accept message sent to SOP
998 */
999 if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE)) {
1000 pe_set_state(dev, PE_SNK_WAIT_FOR_CAPABILITIES);
1001 } else if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_PROTOCOL_ERROR)) {
1002 /*
1003 * The Policy Engine Shall transition to the
1004 * PE_SRC_Hard_Reset state when:
1005 * 1: Protocol Layer indicates that a
1006 * transmission error has occurred.
1007 */
1008 pe_set_state(dev, PE_SNK_HARD_RESET);
1009 }
1010 break;
1011 }
1012 }
1013
1014 /**
1015 * @brief PE_Send_Soft_Reset Entry State
1016 * NOTE: Sender Response Timer is handled in super state.
1017 */
pe_send_soft_reset_entry(void * obj)1018 static void pe_send_soft_reset_entry(void *obj)
1019 {
1020 struct policy_engine *pe = (struct policy_engine *)obj;
1021 const struct device *dev = pe->dev;
1022
1023 LOG_INF("PE_SNK_Send_Soft_Reset");
1024
1025 /* Reset Protocol Layer */
1026 prl_reset(dev);
1027 atomic_set_bit(pe->flags, PE_FLAGS_SEND_SOFT_RESET);
1028 }
1029
1030 /**
1031 * @brief PE_Send_Soft_Reset Run State
1032 */
pe_send_soft_reset_run(void * obj)1033 static void pe_send_soft_reset_run(void *obj)
1034 {
1035 struct policy_engine *pe = (struct policy_engine *)obj;
1036 const struct device *dev = pe->dev;
1037 struct usbc_port_data *data = dev->data;
1038 struct protocol_layer_rx_t *prl_rx = data->prl_rx;
1039 union pd_header header;
1040
1041 if (prl_is_running(dev) == false) {
1042 return;
1043 }
1044
1045 if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_SEND_SOFT_RESET)) {
1046 /* Send Soft Reset message */
1047 pe_send_ctrl_msg(dev, pe->soft_reset_sop, PD_CTRL_SOFT_RESET);
1048 return;
1049 }
1050
1051 if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_DISCARDED)) {
1052 /* Inform Device Policy Manager that the message was discarded */
1053 policy_notify(dev, MSG_DISCARDED);
1054 pe_set_state(dev, PE_SNK_READY);
1055 } else if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_RECEIVED)) {
1056 /*
1057 * The Policy Engine Shall transition to the PE_SNK_Wait_for_Capabilities
1058 * state when:
1059 * 1: An Accept Message has been received on SOP
1060 */
1061 header = prl_rx->emsg.header;
1062
1063 if (received_control_message(dev, header, PD_CTRL_ACCEPT)) {
1064 pe_set_state(dev, PE_SNK_WAIT_FOR_CAPABILITIES);
1065 }
1066 } else if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_PROTOCOL_ERROR)) {
1067 /*
1068 * The Policy Engine Shall transition to the PE_SNK_Hard_Reset state when:
1069 * 1: A SenderResponseTimer timeout occurs (Handled in Super State)
1070 * 2: Or the Protocol Layer indicates that a transmission error has occurred
1071 */
1072 pe_set_state(dev, PE_SNK_HARD_RESET);
1073 }
1074 }
1075
1076 /**
1077 * @brief 8.3.3.6.2.1 PE_SNK_Send_Not_Supported State
1078 */
pe_send_not_supported_entry(void * obj)1079 static void pe_send_not_supported_entry(void *obj)
1080 {
1081 struct policy_engine *pe = (struct policy_engine *)obj;
1082 const struct device *dev = pe->dev;
1083
1084 LOG_INF("PE_Not_Supported");
1085
1086 /* Notify the Device Policy Manager of unsupported message reception */
1087 policy_notify(dev, MSG_NOT_SUPPORTED_RECEIVED);
1088
1089 /* Request the Protocol Layer to send a Not_Supported or Reject Message. */
1090 if (prl_get_rev(dev, PD_PACKET_SOP) > PD_REV20) {
1091 pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_NOT_SUPPORTED);
1092 } else {
1093 pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_REJECT);
1094 }
1095 }
1096
pe_send_not_supported_run(void * obj)1097 static void pe_send_not_supported_run(void *obj)
1098 {
1099 struct policy_engine *pe = (struct policy_engine *)obj;
1100 const struct device *dev = pe->dev;
1101
1102 if (atomic_test_bit(pe->flags, PE_FLAGS_TX_COMPLETE) ||
1103 atomic_test_bit(pe->flags, PE_FLAGS_MSG_DISCARDED)) {
1104 atomic_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE);
1105 atomic_clear_bit(pe->flags, PE_FLAGS_MSG_DISCARDED);
1106 pe_set_state(dev, PE_SNK_READY);
1107 }
1108 }
1109
1110 /**
1111 * @brief 8.3.3.6.2.3 PE_SNK_Chunk_Received State
1112 */
pe_chunk_received_entry(void * obj)1113 static void pe_chunk_received_entry(void *obj)
1114 {
1115 struct policy_engine *pe = (struct policy_engine *)obj;
1116
1117 LOG_INF("PE_SNK_Chunk_Received");
1118
1119 /*
1120 * On entry to the PE_SNK_Chunk_Received state, the Policy Engine
1121 * Shall initialize and run the ChunkingNotSupportedTimer.
1122 */
1123 usbc_timer_start(&pe->pd_t_chunking_not_supported);
1124 }
1125
1126 /**
1127 * @brief PE_Chunk_Received Run State
1128 */
pe_chunk_received_run(void * obj)1129 static void pe_chunk_received_run(void *obj)
1130 {
1131 struct policy_engine *pe = (struct policy_engine *)obj;
1132 const struct device *dev = pe->dev;
1133
1134 /*
1135 * The Policy Engine Shall transition to PE_SNK_Send_Not_Supported
1136 * when:
1137 * 1: The ChunkingNotSupportedTimer has timed out.
1138 */
1139 if (usbc_timer_expired(&pe->pd_t_chunking_not_supported)) {
1140 pe_set_state(dev, PE_SEND_NOT_SUPPORTED);
1141 }
1142 }
1143
1144 /*
1145 * @brief Super State for any message that requires
1146 * Sender Response Timer functionality
1147 */
pe_sender_response_run(void * obj)1148 static void pe_sender_response_run(void *obj)
1149 {
1150 struct policy_engine *pe = (struct policy_engine *)obj;
1151 const struct device *dev = pe->dev;
1152 enum usbc_pe_state current_state = pe_get_state(dev);
1153
1154 /* Start the Sender Response Timer after the message is sent */
1155 if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE)) {
1156 /* Start Sender Response Timer */
1157 usbc_timer_start(&pe->pd_t_sender_response);
1158 }
1159
1160 /* Check if the Sender Response Timer has expired */
1161 if (usbc_timer_expired(&pe->pd_t_sender_response)) {
1162 /*
1163 * Handle Sender Response Timeouts
1164 */
1165 switch (current_state) {
1166 #if CONFIG_USBC_CSM_SINK_ONLY
1167 /* Sink states */
1168 case PE_SNK_SELECT_CAPABILITY:
1169 pe_set_state(dev, PE_SNK_HARD_RESET);
1170 break;
1171 case PE_SNK_GET_SOURCE_CAP:
1172 pe_set_state(dev, PE_SNK_READY);
1173 break;
1174 #else
1175 /* Source states */
1176 case PE_SRC_DISCOVERY:
1177 /*
1178 * The Policy Engine Shall go to the PE_SRC_Disabled state when:
1179 * 1) The Port Partners have not been PD Connected
1180 * 2) And the NoResponseTimer times out
1181 * 3) And the HardResetCounter > nHardResetCount.
1182 */
1183 if ((atomic_test_bit(pe->flags, PE_FLAGS_HAS_BEEN_PD_CONNECTED) == false)
1184 && pe->hard_reset_counter > PD_N_HARD_RESET_COUNT) {
1185 pe_set_state(dev, PE_SUSPEND);
1186 }
1187 break;
1188 case PE_SRC_SEND_CAPABILITIES:
1189 /*
1190 * The Policy Engine Shall go to the ErrorRecovery state when:
1191 * 1) The Port Partners have previously been PD Connected
1192 * 2) And the NoResponseTimer times out
1193 * 3) And the HardResetCounter > nHardResetCount
1194 */
1195 if (atomic_test_bit(pe->flags, PE_FLAGS_HAS_BEEN_PD_CONNECTED)
1196 && pe->hard_reset_counter > PD_N_HARD_RESET_COUNT) {
1197 usbc_request(dev, REQUEST_TC_ERROR_RECOVERY);
1198 }
1199 /*
1200 * The Policy Engine Shall transition to the PE_SRC_Hard_Reset
1201 * state when:
1202 * 1) The SenderResponseTimer times out
1203 */
1204 else {
1205 pe_set_state(dev, PE_SRC_HARD_RESET);
1206 }
1207 break;
1208 case PE_GET_SINK_CAP:
1209 pe_send_soft_reset(dev, PD_PACKET_SOP);
1210 break;
1211 #endif
1212 /*
1213 * Common states:
1214 * Could transition to a Sink or Source states,
1215 * depending on the current Data Role
1216 */
1217 case PE_SEND_SOFT_RESET:
1218 pe_set_state(dev, PE_SNK_HARD_RESET);
1219 break;
1220 case PE_DRS_SEND_SWAP:
1221 pe_set_state(dev, PE_SNK_READY);
1222 break;
1223
1224 /* This should not happen. Implementation error */
1225 default:
1226 LOG_INF("Unhandled Sender Response Timeout State!");
1227 }
1228 }
1229 }
1230
pe_sender_response_exit(void * obj)1231 static void pe_sender_response_exit(void *obj)
1232 {
1233 struct policy_engine *pe = (struct policy_engine *)obj;
1234
1235 /* Stop Sender Response Timer */
1236 usbc_timer_stop(&pe->pd_t_sender_response);
1237 }
1238
1239 /**
1240 * @brief Policy engine State table
1241 */
1242 static const struct smf_state pe_states[PE_STATE_COUNT] = {
1243 /* PE Super States */
1244 [PE_SENDER_RESPONSE_PARENT] = SMF_CREATE_STATE(
1245 NULL,
1246 pe_sender_response_run,
1247 pe_sender_response_exit,
1248 NULL),
1249 #ifdef CONFIG_USBC_CSM_SOURCE_ONLY
1250 [PE_SRC_HARD_RESET_PARENT] = SMF_CREATE_STATE(
1251 pe_src_hard_reset_parent_entry,
1252 pe_src_hard_reset_parent_run,
1253 pe_src_hard_reset_parent_exit,
1254 NULL),
1255 #endif
1256 #ifdef CONFIG_USBC_CSM_SINK_ONLY
1257 [PE_SNK_STARTUP] = SMF_CREATE_STATE(
1258 pe_snk_startup_entry,
1259 pe_snk_startup_run,
1260 NULL,
1261 NULL),
1262 [PE_SNK_DISCOVERY] = SMF_CREATE_STATE(
1263 pe_snk_discovery_entry,
1264 pe_snk_discovery_run,
1265 NULL,
1266 NULL),
1267 [PE_SNK_WAIT_FOR_CAPABILITIES] = SMF_CREATE_STATE(
1268 pe_snk_wait_for_capabilities_entry,
1269 pe_snk_wait_for_capabilities_run,
1270 pe_snk_wait_for_capabilities_exit,
1271 NULL),
1272 [PE_SNK_EVALUATE_CAPABILITY] = SMF_CREATE_STATE(
1273 pe_snk_evaluate_capability_entry,
1274 NULL,
1275 NULL,
1276 NULL),
1277 [PE_SNK_SELECT_CAPABILITY] = SMF_CREATE_STATE(
1278 pe_snk_select_capability_entry,
1279 pe_snk_select_capability_run,
1280 NULL,
1281 &pe_states[PE_SENDER_RESPONSE_PARENT]),
1282 [PE_SNK_READY] = SMF_CREATE_STATE(
1283 pe_snk_ready_entry,
1284 pe_snk_ready_run,
1285 pe_snk_ready_exit,
1286 NULL),
1287 [PE_SNK_HARD_RESET] = SMF_CREATE_STATE(
1288 pe_snk_hard_reset_entry,
1289 pe_snk_hard_reset_run,
1290 NULL,
1291 NULL),
1292 [PE_SNK_TRANSITION_TO_DEFAULT] = SMF_CREATE_STATE(
1293 pe_snk_transition_to_default_entry,
1294 pe_snk_transition_to_default_run,
1295 NULL,
1296 NULL),
1297 [PE_SNK_GIVE_SINK_CAP] = SMF_CREATE_STATE(
1298 pe_snk_give_sink_cap_entry,
1299 pe_snk_give_sink_cap_run,
1300 NULL,
1301 NULL),
1302 [PE_SNK_GET_SOURCE_CAP] = SMF_CREATE_STATE(
1303 pe_snk_get_source_cap_entry,
1304 pe_snk_get_source_cap_run,
1305 NULL,
1306 &pe_states[PE_SENDER_RESPONSE_PARENT]),
1307 [PE_SNK_TRANSITION_SINK] = SMF_CREATE_STATE(
1308 pe_snk_transition_sink_entry,
1309 pe_snk_transition_sink_run,
1310 pe_snk_transition_sink_exit,
1311 NULL),
1312 #else
1313 [PE_SRC_STARTUP] = SMF_CREATE_STATE(
1314 pe_src_startup_entry,
1315 pe_src_startup_run,
1316 NULL,
1317 NULL),
1318 [PE_SRC_DISCOVERY] = SMF_CREATE_STATE(
1319 pe_src_discovery_entry,
1320 pe_src_discovery_run,
1321 pe_src_discovery_exit,
1322 &pe_states[PE_SENDER_RESPONSE_PARENT]),
1323 [PE_SRC_SEND_CAPABILITIES] = SMF_CREATE_STATE(
1324 pe_src_send_capabilities_entry,
1325 pe_src_send_capabilities_run,
1326 NULL,
1327 &pe_states[PE_SENDER_RESPONSE_PARENT]),
1328 [PE_SRC_NEGOTIATE_CAPABILITY] = SMF_CREATE_STATE(
1329 pe_src_negotiate_capability_entry,
1330 NULL,
1331 NULL,
1332 NULL),
1333 [PE_SRC_CAPABILITY_RESPONSE] = SMF_CREATE_STATE(
1334 pe_src_capability_response_entry,
1335 pe_src_capability_response_run,
1336 NULL,
1337 NULL),
1338 [PE_SRC_TRANSITION_SUPPLY] = SMF_CREATE_STATE(
1339 pe_src_transition_supply_entry,
1340 pe_src_transition_supply_run,
1341 pe_src_transition_supply_exit,
1342 NULL),
1343 [PE_SRC_READY] = SMF_CREATE_STATE(
1344 pe_src_ready_entry,
1345 pe_src_ready_run,
1346 pe_src_ready_exit,
1347 NULL),
1348 [PE_SRC_TRANSITION_TO_DEFAULT] = SMF_CREATE_STATE(
1349 pe_src_transition_to_default_entry,
1350 pe_src_transition_to_default_run,
1351 pe_src_transition_to_default_exit,
1352 NULL),
1353 [PE_SRC_HARD_RESET_RECEIVED] = SMF_CREATE_STATE(
1354 NULL,
1355 NULL,
1356 NULL,
1357 &pe_states[PE_SRC_HARD_RESET_PARENT]),
1358 [PE_SRC_HARD_RESET] = SMF_CREATE_STATE(
1359 pe_src_hard_reset_entry,
1360 NULL,
1361 NULL,
1362 &pe_states[PE_SRC_HARD_RESET_PARENT]),
1363 #endif
1364 [PE_GET_SINK_CAP] = SMF_CREATE_STATE(
1365 pe_get_sink_cap_entry,
1366 pe_get_sink_cap_run,
1367 NULL,
1368 &pe_states[PE_SENDER_RESPONSE_PARENT]),
1369 [PE_SEND_SOFT_RESET] = SMF_CREATE_STATE(
1370 pe_send_soft_reset_entry,
1371 pe_send_soft_reset_run,
1372 NULL,
1373 &pe_states[PE_SENDER_RESPONSE_PARENT]),
1374 [PE_SOFT_RESET] = SMF_CREATE_STATE(
1375 pe_soft_reset_entry,
1376 pe_soft_reset_run,
1377 NULL,
1378 NULL),
1379 [PE_SEND_NOT_SUPPORTED] = SMF_CREATE_STATE(
1380 pe_send_not_supported_entry,
1381 pe_send_not_supported_run,
1382 NULL,
1383 NULL),
1384 [PE_DRS_EVALUATE_SWAP] = SMF_CREATE_STATE(
1385 pe_drs_evaluate_swap_entry,
1386 pe_drs_evaluate_swap_run,
1387 NULL,
1388 NULL),
1389 [PE_DRS_SEND_SWAP] = SMF_CREATE_STATE(
1390 pe_drs_send_swap_entry,
1391 pe_drs_send_swap_run,
1392 NULL,
1393 &pe_states[PE_SENDER_RESPONSE_PARENT]),
1394 [PE_CHUNK_RECEIVED] = SMF_CREATE_STATE(
1395 pe_chunk_received_entry,
1396 pe_chunk_received_run,
1397 NULL,
1398 NULL),
1399 [PE_SUSPEND] = SMF_CREATE_STATE(
1400 pe_suspend_entry,
1401 pe_suspend_run,
1402 NULL,
1403 NULL),
1404 };
1405 BUILD_ASSERT(ARRAY_SIZE(pe_states) == PE_STATE_COUNT);
1406