1 /*
2 * Copyright (c) 2022 The Chromium OS Authors
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8 #include <zephyr/sys/byteorder.h>
9 #include <zephyr/smf.h>
10 #include <zephyr/usb_c/usbc.h>
11 #include <zephyr/drivers/usb_c/usbc_pd.h>
12 #include <zephyr/logging/log.h>
13 LOG_MODULE_DECLARE(usbc_stack, CONFIG_USBC_STACK_LOG_LEVEL);
14
15 #include "usbc_stack.h"
16 #include "usbc_pe_common_internal.h"
17 #include "usbc_pe_snk_states_internal.h"
18 #include "usbc_pe_src_states_internal.h"
19
20 static const struct smf_state pe_states[PE_STATE_COUNT];
21
22 /**
23 * @brief Set the ready state for sink or source.
24 */
pe_set_ready_state(const struct device * dev)25 static void pe_set_ready_state(const struct device *dev)
26 {
27 struct usbc_port_data *data = dev->data;
28
29 if (data->pe->power_role == TC_ROLE_SOURCE) {
30 pe_set_state(dev, PE_SRC_READY);
31 } else {
32 pe_set_state(dev, PE_SNK_READY);
33 }
34 }
35
36 /**
37 * @brief Handle common DPM requests
38 *
39 * @retval true if request was handled, else false
40 */
common_dpm_requests(const struct device * dev)41 bool common_dpm_requests(const struct device *dev)
42 {
43 struct usbc_port_data *data = dev->data;
44 struct policy_engine *pe = data->pe;
45
46 if (pe->dpm_request > REQUEST_TC_END) {
47 atomic_set_bit(pe->flags, PE_FLAGS_DPM_INITIATED_AMS);
48
49 if (pe->dpm_request == REQUEST_PE_DR_SWAP) {
50 pe_set_state(dev, PE_DRS_SEND_SWAP);
51 return true;
52 } else if (pe->dpm_request == REQUEST_PE_SOFT_RESET_SEND) {
53 pe_set_state(dev, PE_SEND_SOFT_RESET);
54 return true;
55 }
56 }
57
58 return false;
59 }
60
61 /**
62 * @brief Initializes the PE state machine and enters the PE_SUSPEND state.
63 */
pe_subsys_init(const struct device * dev)64 void pe_subsys_init(const struct device *dev)
65 {
66 struct usbc_port_data *data = dev->data;
67 struct policy_engine *pe = data->pe;
68
69 /* Save the port device object so states can access it */
70 pe->dev = dev;
71
72 /* Initialize the state machine */
73 smf_set_initial(SMF_CTX(pe), &pe_states[PE_SUSPEND]);
74 }
75
76 /**
77 * @brief Starts the Policy Engine layer
78 */
pe_start(const struct device * dev)79 void pe_start(const struct device *dev)
80 {
81 struct usbc_port_data *data = dev->data;
82
83 data->pe_enabled = true;
84 }
85
86 /**
87 * @brief Suspend the Policy Engine layer
88 */
pe_suspend(const struct device * dev)89 void pe_suspend(const struct device *dev)
90 {
91 struct usbc_port_data *data = dev->data;
92
93 data->pe_enabled = false;
94
95 /*
96 * While we are paused, exit all states
97 * and wait until initialized again.
98 */
99 pe_set_state(dev, PE_SUSPEND);
100 }
101
102 /**
103 * @brief Initialize the Policy Engine layer
104 */
pe_init(const struct device * dev)105 static void pe_init(const struct device *dev)
106 {
107 struct usbc_port_data *data = dev->data;
108 struct policy_engine *pe = data->pe;
109
110 /* Clear all flags */
111 atomic_clear(pe->flags);
112
113 /* Initialize common timers */
114 usbc_timer_init(&pe->pd_t_sender_response, PD_T_NO_RESPONSE_MAX_MS);
115 usbc_timer_init(&pe->pd_t_chunking_not_supported, PD_T_CHUNKING_NOT_SUPPORTED_NOM_MS);
116
117 /* Initialize common counters */
118 pe->hard_reset_counter = 0;
119
120 #ifdef CONFIG_USBC_CSM_SINK_ONLY
121 pe_snk_init(dev);
122 #else
123 pe_src_init(dev);
124 #endif
125 }
126
127 /**
128 * @brief Tests if the Policy Engine layer is running
129 */
pe_is_running(const struct device * dev)130 bool pe_is_running(const struct device *dev)
131 {
132 struct usbc_port_data *data = dev->data;
133
134 return data->pe_sm_state == SM_RUN;
135 }
136
137 /**
138 * @brief Run the Policy Engine layer
139 */
pe_run(const struct device * dev,const int32_t dpm_request)140 void pe_run(const struct device *dev, const int32_t dpm_request)
141 {
142 struct usbc_port_data *data = dev->data;
143 struct policy_engine *pe = data->pe;
144
145 switch (data->pe_sm_state) {
146 case SM_PAUSED:
147 if (data->pe_enabled == false) {
148 break;
149 }
150 /* fall through */
151 case SM_INIT:
152 pe_init(dev);
153 data->pe_sm_state = SM_RUN;
154 /* fall through */
155 case SM_RUN:
156 if (data->pe_enabled == false) {
157 data->pe_sm_state = SM_PAUSED;
158 break;
159 }
160
161 if (prl_is_running(dev) == false) {
162 break;
163 }
164
165 /* Get any DPM Requests */
166 pe->dpm_request = dpm_request;
167
168 /*
169 * 8.3.3.3.8 PE_SNK_Hard_Reset State
170 * The Policy Engine Shall transition to the PE_SNK_Hard_Reset
171 * state from any state when:
172 * - Hard Reset request from Device Policy Manager
173 */
174 if (dpm_request == REQUEST_PE_HARD_RESET_SEND) {
175 pe_set_state(dev, PE_SNK_HARD_RESET);
176 }
177
178 /* Run state machine */
179 smf_run_state(SMF_CTX(pe));
180
181 break;
182 }
183 }
184
185 /**
186 * @brief Sets Data Role
187 */
pe_set_data_role(const struct device * dev,enum tc_data_role dr)188 void pe_set_data_role(const struct device *dev, enum tc_data_role dr)
189 {
190 struct usbc_port_data *data = dev->data;
191 struct policy_engine *pe = data->pe;
192
193 /* Update data role */
194 pe->data_role = dr;
195
196 /* Notify TCPC of role update */
197 tcpc_set_roles(data->tcpc, pe->power_role, pe->data_role);
198 }
199
200 /**
201 * @brief Gets the current data role
202 */
pe_get_data_role(const struct device * dev)203 enum tc_data_role pe_get_data_role(const struct device *dev)
204 {
205 struct usbc_port_data *data = dev->data;
206
207 return data->pe->data_role;
208 }
209
210 /**
211 * @brief Gets the current power role
212 */
pe_get_power_role(const struct device * dev)213 enum tc_power_role pe_get_power_role(const struct device *dev)
214 {
215 struct usbc_port_data *data = dev->data;
216
217 return data->pe->power_role;
218 }
219
220 /**
221 * @brief Gets the current cable plug role
222 */
pe_get_cable_plug(const struct device * dev)223 enum tc_cable_plug pe_get_cable_plug(const struct device *dev)
224 {
225 return PD_PLUG_FROM_DFP_UFP;
226 }
227
228 /**
229 * @brief Informs the Policy Engine that a soft reset was received.
230 */
pe_got_soft_reset(const struct device * dev)231 void pe_got_soft_reset(const struct device *dev)
232 {
233 /*
234 * The PE_SRC_Soft_Reset state Shall be entered from any state when a
235 * Soft_Reset Message is received from the Protocol Layer.
236 */
237 pe_set_state(dev, PE_SOFT_RESET);
238 }
239
240 /**
241 * @brief Informs the Policy Engine that a message was successfully sent
242 */
pe_message_sent(const struct device * dev)243 void pe_message_sent(const struct device *dev)
244 {
245 struct usbc_port_data *data = dev->data;
246 struct policy_engine *pe = data->pe;
247
248 atomic_set_bit(pe->flags, PE_FLAGS_TX_COMPLETE);
249 }
250
251 /**
252 * @brief See section 8.3.3.4.1.1 PE_SRC_Send_Soft_Reset State:
253 *
254 * The PE_Send_Soft_Reset state shall be entered from
255 * any state when
256 * * A Protocol Error is detected by Protocol Layer during a
257 * Non-Interruptible AMS or
258 * * A message has not been sent after retries or
259 * * When not in an explicit contract and
260 * * Protocol Errors occurred on SOP during an Interruptible AMS or
261 * * Protocol Errors occurred on SOP during any AMS where the first
262 * Message in the sequence has not yet been sent i.e. an unexpected
263 * Message is received instead of the expected GoodCRC Message
264 * response.
265 */
pe_soft_reset_is_required(const struct device * dev,const enum pd_packet_type type)266 static bool pe_soft_reset_is_required(const struct device *dev, const enum pd_packet_type type)
267 {
268 struct usbc_port_data *data = dev->data;
269 struct policy_engine *pe = data->pe;
270
271 /* Protocol Error not on SOP */
272 if (type != PD_PACKET_SOP) {
273 return false;
274 }
275
276 if (atomic_test_bit(pe->flags, PE_FLAGS_EXPLICIT_CONTRACT)) {
277 /*
278 * If the first Message in an AMS has been passed to the
279 * Protocol Layer by the Policy Engine but has not yet been sent
280 * when the Protocol Error occurs, the Policy Engine Shall Not
281 * issue a Soft Reset
282 */
283 if (!atomic_test_bit(pe->flags, PE_FLAGS_FIRST_MSG_SENT)) {
284 return false;
285 }
286
287 /*
288 * If the Protocol Error occurs during an Interruptible AMS then
289 * the Policy Engine Shall Not issue a Soft Reset
290 */
291 if (atomic_test_bit(pe->flags, PE_FLAGS_INTERRUPTIBLE_AMS)) {
292 return false;
293 }
294 }
295
296 return true;
297 }
298
299 /**
300 * @brief Informs the Policy Engine of an error.
301 */
pe_report_error(const struct device * dev,const enum pe_error e,const enum pd_packet_type type)302 void pe_report_error(const struct device *dev, const enum pe_error e,
303 const enum pd_packet_type type)
304 {
305 struct usbc_port_data *data = dev->data;
306 struct policy_engine *pe = data->pe;
307
308 /*
309 * Generate Hard Reset if Protocol Error occurred
310 * while in PE_Send_Soft_Reset state.
311 */
312 if (pe_get_state(dev) == PE_SEND_SOFT_RESET ||
313 pe_get_state(dev) == PE_SOFT_RESET) {
314 atomic_set_bit(pe->flags, PE_FLAGS_PROTOCOL_ERROR);
315 return;
316 }
317
318 /* Transmit error */
319 if (e == ERR_XMIT) {
320 atomic_set_bit(pe->flags, PE_FLAGS_MSG_XMIT_ERROR);
321 }
322 /* All error types besides transmit errors are Protocol Errors. */
323 else if (pe_soft_reset_is_required(dev, type)) {
324 policy_notify(dev, PROTOCOL_ERROR);
325 pe_send_soft_reset(dev, type);
326 }
327 /*
328 * Transition to PE_Snk_Ready by a Protocol
329 * Error during an Interruptible AMS.
330 */
331 else {
332 pe_set_ready_state(dev);
333 }
334 }
335
336 /**
337 * @brief Informs the Policy Engine of a discard.
338 */
pe_report_discard(const struct device * dev)339 void pe_report_discard(const struct device *dev)
340 {
341 struct usbc_port_data *data = dev->data;
342 struct policy_engine *pe = data->pe;
343
344 /*
345 * Clear local AMS indicator as our AMS message was discarded, and flag
346 * the discard for the PE
347 */
348 pe_dpm_end_ams(dev);
349 atomic_set_bit(pe->flags, PE_FLAGS_MSG_DISCARDED);
350 }
351
352 /**
353 * @brief Called by the Protocol Layer to informs the Policy Engine
354 * that a message has been received.
355 */
pe_message_received(const struct device * dev)356 void pe_message_received(const struct device *dev)
357 {
358 struct usbc_port_data *data = dev->data;
359 struct policy_engine *pe = data->pe;
360
361 atomic_set_bit(pe->flags, PE_FLAGS_MSG_RECEIVED);
362
363 /* Allow the PE to be executed once more and respond faster for the received message */
364 usbc_bypass_next_sleep(dev);
365 }
366
367 /**
368 * @brief Informs the Policy Engine that a hard reset was received.
369 */
pe_got_hard_reset(const struct device * dev)370 void pe_got_hard_reset(const struct device *dev)
371 {
372 pe_set_state(dev, PE_SNK_TRANSITION_TO_DEFAULT);
373 }
374
375 /**
376 * @brief Informs the Policy Engine that a hard reset was sent.
377 */
pe_hard_reset_sent(const struct device * dev)378 void pe_hard_reset_sent(const struct device *dev)
379 {
380 struct usbc_port_data *data = dev->data;
381 struct policy_engine *pe = data->pe;
382
383 atomic_clear_bit(pe->flags, PE_FLAGS_HARD_RESET_PENDING);
384 }
385
386 /**
387 * @brief Indicates if an explicit contract is in place
388 */
pe_is_explicit_contract(const struct device * dev)389 bool pe_is_explicit_contract(const struct device *dev)
390 {
391 struct usbc_port_data *data = dev->data;
392 struct policy_engine *pe = data->pe;
393
394 return atomic_test_bit(pe->flags, PE_FLAGS_EXPLICIT_CONTRACT);
395 }
396
397 /**
398 * @brief Return true if the PE is within an atomic messaging sequence
399 * that it initiated with a SOP* port partner.
400 */
pe_dpm_initiated_ams(const struct device * dev)401 bool pe_dpm_initiated_ams(const struct device *dev)
402 {
403 struct usbc_port_data *data = dev->data;
404 struct policy_engine *pe = data->pe;
405
406 return atomic_test_bit(pe->flags, PE_FLAGS_DPM_INITIATED_AMS);
407 }
408
409 /**
410 * @brief End and atomic messaging sequence
411 */
pe_dpm_end_ams(const struct device * dev)412 void pe_dpm_end_ams(const struct device *dev)
413 {
414 struct usbc_port_data *data = dev->data;
415 struct policy_engine *pe = data->pe;
416
417 atomic_clear_bit(pe->flags, PE_FLAGS_DPM_INITIATED_AMS);
418 }
419
420 /**
421 * @brief First message in AMS has been sent
422 */
pe_first_msg_sent(const struct device * dev)423 void pe_first_msg_sent(const struct device *dev)
424 {
425 struct usbc_port_data *data = dev->data;
426 struct policy_engine *pe = data->pe;
427
428 atomic_set_bit(pe->flags, PE_FLAGS_FIRST_MSG_SENT);
429 }
430
431 /** Private Policy Engine Layer API below */
432
433 /**
434 * @brief Sets a Policy Engine state
435 */
pe_set_state(const struct device * dev,const enum usbc_pe_state state)436 void pe_set_state(const struct device *dev, const enum usbc_pe_state state)
437 {
438 struct usbc_port_data *data = dev->data;
439
440 __ASSERT(state < ARRAY_SIZE(pe_states), "invalid pe_state %d", state);
441 smf_set_state(SMF_CTX(data->pe), &pe_states[state]);
442
443 /* Allow the PE to execute logic from the new state without additional delay */
444 usbc_bypass_next_sleep(dev);
445 }
446
447 /**
448 * @brief Get the Policy Engine's current state
449 */
pe_get_state(const struct device * dev)450 enum usbc_pe_state pe_get_state(const struct device *dev)
451 {
452 struct usbc_port_data *data = dev->data;
453
454 return data->pe->ctx.current - &pe_states[0];
455 }
456
457 /**
458 * @brief Get the Policy Engine's previous state
459 */
pe_get_last_state(const struct device * dev)460 enum usbc_pe_state pe_get_last_state(const struct device *dev)
461 {
462 struct usbc_port_data *data = dev->data;
463
464 return data->pe->ctx.previous - &pe_states[0];
465 }
466
467 /**
468 * @brief Send a soft reset message
469 */
pe_send_soft_reset(const struct device * dev,const enum pd_packet_type type)470 void pe_send_soft_reset(const struct device *dev, const enum pd_packet_type type)
471 {
472 struct usbc_port_data *data = dev->data;
473
474 data->pe->soft_reset_sop = type;
475 pe_set_state(dev, PE_SEND_SOFT_RESET);
476 }
477
478 /**
479 * @brief Send a Power Delivery Data Message
480 */
pe_send_data_msg(const struct device * dev,const enum pd_packet_type type,const enum pd_data_msg_type msg)481 void pe_send_data_msg(const struct device *dev, const enum pd_packet_type type,
482 const enum pd_data_msg_type msg)
483 {
484 struct usbc_port_data *data = dev->data;
485 struct policy_engine *pe = data->pe;
486
487 /* Clear any previous TX status before sending a new message */
488 atomic_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE);
489 prl_send_data_msg(dev, type, msg);
490 }
491
492 /**
493 * @brief Send a Power Delivery Control Message
494 */
pe_send_ctrl_msg(const struct device * dev,const enum pd_packet_type type,const enum pd_ctrl_msg_type msg)495 void pe_send_ctrl_msg(const struct device *dev, const enum pd_packet_type type,
496 const enum pd_ctrl_msg_type msg)
497 {
498 struct usbc_port_data *data = dev->data;
499 struct policy_engine *pe = data->pe;
500
501 /* Clear any previous TX status before sending a new message */
502 atomic_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE);
503 prl_send_ctrl_msg(dev, type, msg);
504 }
505
506 /**
507 * @brief Request desired voltage from source.
508 */
pe_send_request_msg(const struct device * dev,const uint32_t rdo)509 void pe_send_request_msg(const struct device *dev, const uint32_t rdo)
510 {
511 struct usbc_port_data *data = dev->data;
512 struct protocol_layer_tx_t *prl_tx = data->prl_tx;
513 struct pd_msg *msg = &prl_tx->emsg;
514 uint8_t rdo_bytes[4];
515
516 msg->len = sizeof(rdo);
517 sys_put_le32(rdo, rdo_bytes);
518 memcpy(msg->data, rdo_bytes, msg->len);
519 pe_send_data_msg(dev, PD_PACKET_SOP, PD_DATA_REQUEST);
520 }
521
522 /**
523 * @brief Transitions state after receiving an extended message.
524 */
extended_message_not_supported(const struct device * dev)525 void extended_message_not_supported(const struct device *dev)
526 {
527 struct usbc_port_data *data = dev->data;
528 struct protocol_layer_rx_t *prl_rx = data->prl_rx;
529 uint32_t *payload = (uint32_t *)prl_rx->emsg.data;
530 union pd_ext_header ext_header;
531
532 ext_header.raw_value = *payload;
533
534 if (ext_header.chunked && ext_header.data_size > PD_MAX_EXTENDED_MSG_CHUNK_LEN) {
535 pe_set_state(dev, PE_CHUNK_RECEIVED);
536 } else {
537 pe_set_state(dev, PE_SEND_NOT_SUPPORTED);
538 }
539 }
540
541 /**
542 * @brief Check if a specific control message was received
543 */
received_control_message(const struct device * dev,const union pd_header header,const enum pd_ctrl_msg_type mt)544 bool received_control_message(const struct device *dev, const union pd_header header,
545 const enum pd_ctrl_msg_type mt)
546 {
547 struct usbc_port_data *data = dev->data;
548 struct protocol_layer_rx_t *prl_rx = data->prl_rx;
549
550 if (prl_rx->emsg.len == 0 && header.message_type == mt && header.extended == 0) {
551 return true;
552 }
553
554 return false;
555 }
556
557 /**
558 * @brief Check if a specific data message was received
559 */
received_data_message(const struct device * dev,const union pd_header header,const enum pd_data_msg_type mt)560 bool received_data_message(const struct device *dev, const union pd_header header,
561 const enum pd_data_msg_type mt)
562 {
563 struct usbc_port_data *data = dev->data;
564 struct protocol_layer_rx_t *prl_rx = data->prl_rx;
565
566 if (prl_rx->emsg.len > 0 && header.message_type == mt && header.extended == 0) {
567 return true;
568 }
569
570 return false;
571 }
572
573 /**
574 * @brief Check a DPM policy
575 */
policy_check(const struct device * dev,const enum usbc_policy_check_t pc)576 bool policy_check(const struct device *dev, const enum usbc_policy_check_t pc)
577 {
578 struct usbc_port_data *data = dev->data;
579
580 if (data->policy_cb_check) {
581 return data->policy_cb_check(dev, pc);
582 } else {
583 return false;
584 }
585 }
586
587 /**
588 * @brief Notify the DPM of a policy change
589 */
policy_notify(const struct device * dev,const enum usbc_policy_notify_t notify)590 void policy_notify(const struct device *dev, const enum usbc_policy_notify_t notify)
591 {
592 struct usbc_port_data *data = dev->data;
593
594 if (data->policy_cb_notify) {
595 data->policy_cb_notify(dev, notify);
596 }
597 }
598
599 /**
600 * @brief Notify the DPM of a WAIT message reception
601 */
policy_wait_notify(const struct device * dev,const enum usbc_policy_wait_t notify)602 bool policy_wait_notify(const struct device *dev, const enum usbc_policy_wait_t notify)
603 {
604 struct usbc_port_data *data = dev->data;
605
606 if (data->policy_cb_wait_notify) {
607 return data->policy_cb_wait_notify(dev, notify);
608 }
609
610 return false;
611 }
612
613 #ifdef CONFIG_USBC_CSM_SINK_ONLY
614
615 /**
616 * @brief Get a Request Data Object from the DPM
617 */
policy_get_request_data_object(const struct device * dev)618 uint32_t policy_get_request_data_object(const struct device *dev)
619 {
620 struct usbc_port_data *data = dev->data;
621
622 /* This callback must be implemented */
623 __ASSERT(data->policy_cb_get_rdo != NULL, "Callback pointer should not be NULL");
624
625 return data->policy_cb_get_rdo(dev);
626 }
627
628 /**
629 * @brief Send the received source caps to the DPM
630 */
policy_set_src_cap(const struct device * dev,const uint32_t * pdos,const int num_pdos)631 void policy_set_src_cap(const struct device *dev, const uint32_t *pdos, const int num_pdos)
632 {
633 struct usbc_port_data *data = dev->data;
634
635 if (data->policy_cb_set_src_cap) {
636 data->policy_cb_set_src_cap(dev, pdos, num_pdos);
637 }
638 }
639
640 /**
641 * @brief Check if the sink is a default level
642 */
policy_is_snk_at_default(const struct device * dev)643 bool policy_is_snk_at_default(const struct device *dev)
644 {
645 struct usbc_port_data *data = dev->data;
646
647 if (data->policy_cb_is_snk_at_default) {
648 return data->policy_cb_is_snk_at_default(dev);
649 }
650
651 return true;
652 }
653
654 /**
655 * @brief Get sink caps from the DPM
656 */
policy_get_snk_cap(const struct device * dev,uint32_t ** pdos,int * num_pdos)657 void policy_get_snk_cap(const struct device *dev, uint32_t **pdos, int *num_pdos)
658 {
659 struct usbc_port_data *data = dev->data;
660
661 /* This callback must be implemented */
662 __ASSERT(data->policy_cb_get_snk_cap != NULL, "Callback pointer should not be NULL");
663
664 data->policy_cb_get_snk_cap(dev, pdos, num_pdos);
665 }
666
667 #else /* CONFIG_USBC_CSM_SOURCE_ONLY */
668
669 /**
670 * @brief Send the received sink caps to the DPM
671 */
policy_set_port_partner_snk_cap(const struct device * dev,const uint32_t * pdos,const int num_pdos)672 void policy_set_port_partner_snk_cap(const struct device *dev,
673 const uint32_t *pdos,
674 const int num_pdos)
675 {
676 struct usbc_port_data *data = dev->data;
677
678 if (data->policy_cb_set_port_partner_snk_cap) {
679 data->policy_cb_set_port_partner_snk_cap(dev, pdos, num_pdos);
680 }
681 }
682
683 /**
684 * @brief Check if Sink Request can be met by DPM
685 */
policy_check_sink_request(const struct device * dev,const uint32_t request_msg)686 enum usbc_snk_req_reply_t policy_check_sink_request(const struct device *dev,
687 const uint32_t request_msg)
688 {
689 struct usbc_port_data *data = dev->data;
690
691 /* This callback must be implemented */
692 __ASSERT(data->policy_cb_check_sink_request != NULL,
693 "Callback pointer should not be NULL");
694
695 return data->policy_cb_check_sink_request(dev, request_msg);
696 }
697
698 /**
699 * @brief Check if the present contract is still valid
700 */
policy_present_contract_is_valid(const struct device * dev,const uint32_t present_contract)701 bool policy_present_contract_is_valid(const struct device *dev,
702 const uint32_t present_contract)
703 {
704 struct usbc_port_data *data = dev->data;
705
706 /* This callback must be implemented */
707 __ASSERT(data->policy_present_contract_is_valid != NULL,
708 "Callback pointer should not be NULL");
709
710 return data->policy_present_contract_is_valid(dev, present_contract);
711 }
712
713 /**
714 * @brief Check if the power supply is ready
715 */
policy_is_ps_ready(const struct device * dev)716 bool policy_is_ps_ready(const struct device *dev)
717 {
718 struct usbc_port_data *data = dev->data;
719
720 /* This callback must be implemented */
721 __ASSERT(data->policy_is_ps_ready != NULL,
722 "Callback pointer should not be NULL");
723
724 return data->policy_is_ps_ready(dev);
725 }
726
727 /**
728 * @brief Ask the DPM to change the Source Caps.
729 * Returns true if source caps have been updated, else false
730 */
policy_change_src_caps(const struct device * dev)731 bool policy_change_src_caps(const struct device *dev)
732 {
733 struct usbc_port_data *data = dev->data;
734
735 if (data->policy_change_src_caps == NULL) {
736 return false;
737 }
738
739 return data->policy_change_src_caps(dev);
740 }
741
742 #endif /* CONFIG_USBC_CSM_SINK_ONLY */
743
744 /**
745 * @brief PE_DRS_Evaluate_Swap Entry state
746 */
pe_drs_evaluate_swap_entry(void * obj)747 static void pe_drs_evaluate_swap_entry(void *obj)
748 {
749 struct policy_engine *pe = (struct policy_engine *)obj;
750 const struct device *dev = pe->dev;
751
752 /* Get evaluation of Data Role Swap request from Device Policy Manager */
753 if (policy_check(dev, (pe->data_role == TC_ROLE_UFP) ? CHECK_DATA_ROLE_SWAP_TO_DFP
754 : CHECK_DATA_ROLE_SWAP_TO_UFP)) {
755 /*
756 * PE_DRS_DFP_UFP_Accept_Swap and PE_DRS_UFP_DFP_Accept_Swap
757 * State embedded here
758 */
759 /* Send Accept message */
760 pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_ACCEPT);
761 } else {
762 /*
763 * PE_DRS_DFP_UFP_Reject_Swap and PE_DRS_UFP_DFP_Reject_Swap
764 * State embedded here
765 */
766 /* Send Reject message */
767 pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_REJECT);
768 }
769 }
770
771 /**
772 * @brief PE_DRS_Evaluate_Swap Run state
773 */
pe_drs_evaluate_swap_run(void * obj)774 static void pe_drs_evaluate_swap_run(void *obj)
775 {
776 struct policy_engine *pe = (struct policy_engine *)obj;
777 const struct device *dev = pe->dev;
778 struct usbc_port_data *data = dev->data;
779 struct protocol_layer_tx_t *prl_tx = data->prl_tx;
780 struct protocol_layer_rx_t *prl_rx = data->prl_rx;
781
782 if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE)) {
783 /* Only update data roles if last message sent was Accept */
784 if (prl_tx->msg_type == PD_CTRL_ACCEPT) {
785 /* Update Data Role */
786 pe_set_data_role(dev, (pe->data_role == TC_ROLE_UFP)
787 ? TC_ROLE_DFP : TC_ROLE_UFP);
788 /* Inform Device Policy Manager of Data Role Change */
789 policy_notify(dev, (pe->data_role == TC_ROLE_UFP) ? DATA_ROLE_IS_UFP
790 : DATA_ROLE_IS_DFP);
791 }
792 pe_set_ready_state(dev);
793 } else if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_DISCARDED)) {
794 /*
795 * Inform Device Policy Manager that the message was
796 * discarded
797 */
798 policy_notify(dev, MSG_DISCARDED);
799 pe_send_soft_reset(dev, prl_rx->emsg.type);
800 }
801 }
802
803 /**
804 * @brief PE_DRS_Send_Swap Entry state
805 * NOTE: 8.3.3.18.1.5 PE_DRS_DFP_UFP_Send_Swap State
806 * 8.3.3.18.2.5 PE_DRS_UFP_DFP_Send_Swap State
807 */
pe_drs_send_swap_entry(void * obj)808 static void pe_drs_send_swap_entry(void *obj)
809 {
810 struct policy_engine *pe = (struct policy_engine *)obj;
811 const struct device *dev = pe->dev;
812
813 /* Send Swap DR message */
814 pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_DR_SWAP);
815 }
816
817 /**
818 * @brief PE_DRS_Send_Swap Run state
819 * NOTE: Sender Response Timer is handled in super state.
820 */
pe_drs_send_swap_run(void * obj)821 static void pe_drs_send_swap_run(void *obj)
822 {
823 struct policy_engine *pe = (struct policy_engine *)obj;
824 const struct device *dev = pe->dev;
825 struct usbc_port_data *data = dev->data;
826 struct protocol_layer_rx_t *prl_rx = data->prl_rx;
827 union pd_header header;
828
829 if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_RECEIVED)) {
830 header = prl_rx->emsg.header;
831 if (received_control_message(dev, header, PD_CTRL_REJECT)) {
832 /*
833 * Inform Device Policy Manager that Data Role Swap
834 * was Rejected
835 */
836 policy_notify(dev, MSG_REJECTED_RECEIVED);
837 } else if (received_control_message(dev, header, PD_CTRL_WAIT)) {
838 /*
839 * Inform Device Policy Manager that Data Role Swap
840 * needs to Wait
841 */
842 if (policy_wait_notify(dev, WAIT_DATA_ROLE_SWAP)) {
843 atomic_set_bit(pe->flags, PE_FLAGS_WAIT_DATA_ROLE_SWAP);
844 usbc_timer_start(&pe->pd_t_wait_to_resend);
845 }
846 } else if (received_control_message(dev, header, PD_CTRL_ACCEPT)) {
847 /* Update Data Role */
848 pe->data_role = (pe->data_role == TC_ROLE_UFP) ? TC_ROLE_DFP : TC_ROLE_UFP;
849 /* Notify TCPC of role update */
850 tcpc_set_roles(data->tcpc, pe->power_role, pe->data_role);
851 /* Inform Device Policy Manager of Data Role Change */
852 policy_notify(dev, (pe->data_role == TC_ROLE_UFP) ? DATA_ROLE_IS_UFP
853 : DATA_ROLE_IS_DFP);
854 } else {
855 /*
856 * A Protocol Error during a Data Role Swap when the
857 * DFP/UFP roles are changing shall directly trigger
858 * a Type-C Error Recovery.
859 */
860 usbc_request(dev, REQUEST_TC_ERROR_RECOVERY);
861 return;
862 }
863
864 /* return to ready state */
865 pe_set_ready_state(dev);
866 return;
867 } else if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_DISCARDED)) {
868 /*
869 * Inform Device Policy Manager that the message
870 * was discarded
871 */
872 policy_notify(dev, MSG_DISCARDED);
873 pe_set_ready_state(dev);
874 return;
875 }
876 }
877
878 /**
879 * PE_Get_Sink_Cap Entry state
880 * @brief 8.3.3.18.7.1 PE_DR_SRC_Get_Source_Cap State
881 * @brief 8.3.3.18.9.1 PE_DR_SNK_Get_Sink_Cap State
882 */
pe_get_sink_cap_entry(void * obj)883 void pe_get_sink_cap_entry(void *obj)
884 {
885 struct policy_engine *pe = (struct policy_engine *)obj;
886 const struct device *dev = pe->dev;
887
888 /*
889 * On entry to the PE_DR_SNK_Get_Sink_Cap state the Policy Engine
890 * Shall send a Get_Sink_Cap Message and initialize and run the
891 * SenderResponseTimer.
892 */
893 pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_GET_SINK_CAP);
894 /* Initialize Submachine */
895 pe->submachine = SM_WAIT_FOR_TX;
896 }
897
898 /**
899 * @brief PE_Get_Sink_Cap Run state
900 * NOTE: Sender Response Timer is handled in super state.
901 */
pe_get_sink_cap_run(void * obj)902 void pe_get_sink_cap_run(void *obj)
903 {
904 struct policy_engine *pe = (struct policy_engine *)obj;
905 const struct device *dev = pe->dev;
906 struct usbc_port_data *data = dev->data;
907 struct protocol_layer_rx_t *prl_rx = data->prl_rx;
908 union pd_header header;
909
910 switch (pe->submachine) {
911 case SM_WAIT_FOR_TX:
912 if (!atomic_test_and_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE)) {
913 break;
914 }
915 pe->submachine = SM_WAIT_FOR_RX;
916 /* fall through */
917 case SM_WAIT_FOR_RX:
918 if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_RECEIVED)) {
919 header = prl_rx->emsg.header;
920
921 if (prl_rx->emsg.type == PD_PACKET_SOP) {
922 if (received_data_message(dev, header, PD_DATA_SINK_CAP)) {
923 #ifdef CONFIG_USBC_CSM_SOURCE_ONLY
924 uint32_t *pdos = (uint32_t *)prl_rx->emsg.data;
925 uint32_t num_pdos =
926 PD_CONVERT_BYTES_TO_PD_HEADER_COUNT(prl_rx->emsg.len);
927
928 policy_set_port_partner_snk_cap(dev, pdos, num_pdos);
929 #endif
930 pe_set_ready_state(dev);
931 return;
932 } else if (received_control_message(dev, header, PD_CTRL_REJECT) ||
933 received_control_message(dev,
934 header, PD_CTRL_NOT_SUPPORTED)) {
935 pe_set_ready_state(dev);
936 return;
937 }
938 /* Unexpected messages fall through to soft reset */
939 }
940 pe_send_soft_reset(dev, PD_PACKET_SOP);
941 return;
942 }
943 /*
944 * Inform Device Policy Manager that the message
945 * was discarded
946 */
947 else if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_DISCARDED)) {
948 policy_notify(dev, MSG_DISCARDED);
949 pe_set_ready_state(dev);
950 return;
951 }
952 }
953 }
954
pe_suspend_entry(void * obj)955 static void pe_suspend_entry(void *obj)
956 {
957 LOG_INF("PE_SUSPEND");
958 }
959
pe_suspend_run(void * obj)960 static void pe_suspend_run(void *obj)
961 {
962 /* DO NOTHING */
963 }
964
965 /**
966 * @brief The PE_SOFT_RESET state has two embedded states
967 * that handle sending an accept message.
968 */
969 enum pe_soft_reset_submachine_states {
970 /* Send Accept message sub state */
971 PE_SOFT_RESET_RUN_SEND_ACCEPT_MSG,
972 /* Wait for Accept message to be sent or an error sub state */
973 PE_SOFT_RESET_RUN_SEND_ACCEPT_MSG_COMPLETE
974 };
975
976 /**
977 * @brief 8.3.3.4.2.2 PE_SNK_Soft_Reset State
978 */
pe_soft_reset_entry(void * obj)979 static void pe_soft_reset_entry(void *obj)
980 {
981 struct policy_engine *pe = (struct policy_engine *)obj;
982 const struct device *dev = pe->dev;
983
984 /* Reset the protocol layer */
985 prl_reset(dev);
986
987 /* Initialize PE Submachine */
988 pe->submachine = PE_SOFT_RESET_RUN_SEND_ACCEPT_MSG;
989 }
990
pe_soft_reset_run(void * obj)991 static void pe_soft_reset_run(void *obj)
992 {
993 struct policy_engine *pe = (struct policy_engine *)obj;
994 const struct device *dev = pe->dev;
995
996 if (!prl_is_running(dev)) {
997 return;
998 }
999
1000 switch (pe->submachine) {
1001 case PE_SOFT_RESET_RUN_SEND_ACCEPT_MSG:
1002 /* Send Accept message to SOP */
1003 pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_ACCEPT);
1004 /* Move to next substate */
1005 pe->submachine = PE_SOFT_RESET_RUN_SEND_ACCEPT_MSG_COMPLETE;
1006 break;
1007 case PE_SOFT_RESET_RUN_SEND_ACCEPT_MSG_COMPLETE:
1008 /*
1009 * The Policy Engine Shall transition to the
1010 * PE_SRC_Send_Capabilities state when:
1011 * 1: Accept message sent to SOP
1012 */
1013 if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE)) {
1014 pe_set_state(dev, PE_SNK_WAIT_FOR_CAPABILITIES);
1015 } else if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_PROTOCOL_ERROR)) {
1016 /*
1017 * The Policy Engine Shall transition to the
1018 * PE_SRC_Hard_Reset state when:
1019 * 1: Protocol Layer indicates that a
1020 * transmission error has occurred.
1021 */
1022 pe_set_state(dev, PE_SNK_HARD_RESET);
1023 }
1024 break;
1025 }
1026 }
1027
1028 /**
1029 * @brief PE_Send_Soft_Reset Entry State
1030 * NOTE: Sender Response Timer is handled in super state.
1031 */
pe_send_soft_reset_entry(void * obj)1032 static void pe_send_soft_reset_entry(void *obj)
1033 {
1034 struct policy_engine *pe = (struct policy_engine *)obj;
1035 const struct device *dev = pe->dev;
1036
1037 LOG_INF("PE_SNK_Send_Soft_Reset");
1038
1039 /* Reset Protocol Layer */
1040 prl_reset(dev);
1041 atomic_set_bit(pe->flags, PE_FLAGS_SEND_SOFT_RESET);
1042 }
1043
1044 /**
1045 * @brief PE_Send_Soft_Reset Run State
1046 */
pe_send_soft_reset_run(void * obj)1047 static void pe_send_soft_reset_run(void *obj)
1048 {
1049 struct policy_engine *pe = (struct policy_engine *)obj;
1050 const struct device *dev = pe->dev;
1051 struct usbc_port_data *data = dev->data;
1052 struct protocol_layer_rx_t *prl_rx = data->prl_rx;
1053 union pd_header header;
1054
1055 if (prl_is_running(dev) == false) {
1056 return;
1057 }
1058
1059 if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_SEND_SOFT_RESET)) {
1060 /* Send Soft Reset message */
1061 pe_send_ctrl_msg(dev, pe->soft_reset_sop, PD_CTRL_SOFT_RESET);
1062 return;
1063 }
1064
1065 if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_DISCARDED)) {
1066 /* Inform Device Policy Manager that the message was discarded */
1067 policy_notify(dev, MSG_DISCARDED);
1068 pe_set_ready_state(dev);
1069 } else if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_RECEIVED)) {
1070 /*
1071 * The Policy Engine Shall transition to the PE_SNK_Wait_for_Capabilities
1072 * state when:
1073 * 1: An Accept Message has been received on SOP
1074 */
1075 header = prl_rx->emsg.header;
1076
1077 if (received_control_message(dev, header, PD_CTRL_ACCEPT)) {
1078 pe_set_state(dev, PE_SNK_WAIT_FOR_CAPABILITIES);
1079 }
1080 } else if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_PROTOCOL_ERROR)) {
1081 /*
1082 * The Policy Engine Shall transition to the PE_SNK_Hard_Reset state when:
1083 * 1: A SenderResponseTimer timeout occurs (Handled in Super State)
1084 * 2: Or the Protocol Layer indicates that a transmission error has occurred
1085 */
1086 pe_set_state(dev, PE_SNK_HARD_RESET);
1087 }
1088 }
1089
1090 /**
1091 * @brief 8.3.3.6.2.1 PE_SNK_Send_Not_Supported State
1092 */
pe_send_not_supported_entry(void * obj)1093 static void pe_send_not_supported_entry(void *obj)
1094 {
1095 struct policy_engine *pe = (struct policy_engine *)obj;
1096 const struct device *dev = pe->dev;
1097
1098 LOG_INF("PE_Not_Supported");
1099
1100 /* Notify the Device Policy Manager of unsupported message reception */
1101 policy_notify(dev, MSG_NOT_SUPPORTED_RECEIVED);
1102
1103 /* Request the Protocol Layer to send a Not_Supported or Reject Message. */
1104 if (prl_get_rev(dev, PD_PACKET_SOP) > PD_REV20) {
1105 pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_NOT_SUPPORTED);
1106 } else {
1107 pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_REJECT);
1108 }
1109 }
1110
pe_send_not_supported_run(void * obj)1111 static void pe_send_not_supported_run(void *obj)
1112 {
1113 struct policy_engine *pe = (struct policy_engine *)obj;
1114 const struct device *dev = pe->dev;
1115
1116 if (atomic_test_bit(pe->flags, PE_FLAGS_TX_COMPLETE) ||
1117 atomic_test_bit(pe->flags, PE_FLAGS_MSG_DISCARDED)) {
1118 atomic_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE);
1119 atomic_clear_bit(pe->flags, PE_FLAGS_MSG_DISCARDED);
1120 pe_set_ready_state(dev);
1121 }
1122 }
1123
1124 /**
1125 * @brief 8.3.3.6.2.3 PE_SNK_Chunk_Received State
1126 */
pe_chunk_received_entry(void * obj)1127 static void pe_chunk_received_entry(void *obj)
1128 {
1129 struct policy_engine *pe = (struct policy_engine *)obj;
1130
1131 LOG_INF("PE_SNK_Chunk_Received");
1132
1133 /*
1134 * On entry to the PE_SNK_Chunk_Received state, the Policy Engine
1135 * Shall initialize and run the ChunkingNotSupportedTimer.
1136 */
1137 usbc_timer_start(&pe->pd_t_chunking_not_supported);
1138 }
1139
1140 /**
1141 * @brief PE_Chunk_Received Run State
1142 */
pe_chunk_received_run(void * obj)1143 static void pe_chunk_received_run(void *obj)
1144 {
1145 struct policy_engine *pe = (struct policy_engine *)obj;
1146 const struct device *dev = pe->dev;
1147
1148 /*
1149 * The Policy Engine Shall transition to PE_SNK_Send_Not_Supported
1150 * when:
1151 * 1: The ChunkingNotSupportedTimer has timed out.
1152 */
1153 if (usbc_timer_expired(&pe->pd_t_chunking_not_supported)) {
1154 pe_set_state(dev, PE_SEND_NOT_SUPPORTED);
1155 }
1156 }
1157
1158 /*
1159 * @brief Super State for any message that requires
1160 * Sender Response Timer functionality
1161 */
pe_sender_response_run(void * obj)1162 static void pe_sender_response_run(void *obj)
1163 {
1164 struct policy_engine *pe = (struct policy_engine *)obj;
1165 const struct device *dev = pe->dev;
1166 enum usbc_pe_state current_state = pe_get_state(dev);
1167
1168 /* Start the Sender Response Timer after the message is sent */
1169 if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE)) {
1170 /* Start Sender Response Timer */
1171 usbc_timer_start(&pe->pd_t_sender_response);
1172 }
1173
1174 /* Check if the Sender Response Timer has expired */
1175 if (usbc_timer_expired(&pe->pd_t_sender_response)) {
1176 /*
1177 * Handle Sender Response Timeouts
1178 */
1179 switch (current_state) {
1180 #if CONFIG_USBC_CSM_SINK_ONLY
1181 /* Sink states */
1182 case PE_SNK_SELECT_CAPABILITY:
1183 pe_set_state(dev, PE_SNK_HARD_RESET);
1184 break;
1185 case PE_SNK_GET_SOURCE_CAP:
1186 pe_set_state(dev, PE_SNK_READY);
1187 break;
1188 #else
1189 /* Source states */
1190 case PE_SRC_DISCOVERY:
1191 /*
1192 * The Policy Engine Shall go to the PE_SRC_Disabled state when:
1193 * 1) The Port Partners have not been PD Connected
1194 * 2) And the NoResponseTimer times out
1195 * 3) And the HardResetCounter > nHardResetCount.
1196 */
1197 if ((atomic_test_bit(pe->flags, PE_FLAGS_HAS_BEEN_PD_CONNECTED) == false)
1198 && pe->hard_reset_counter > PD_N_HARD_RESET_COUNT) {
1199 pe_set_state(dev, PE_SUSPEND);
1200 }
1201 break;
1202 case PE_SRC_SEND_CAPABILITIES:
1203 /*
1204 * The Policy Engine Shall go to the ErrorRecovery state when:
1205 * 1) The Port Partners have previously been PD Connected
1206 * 2) And the NoResponseTimer times out
1207 * 3) And the HardResetCounter > nHardResetCount
1208 */
1209 if (atomic_test_bit(pe->flags, PE_FLAGS_HAS_BEEN_PD_CONNECTED)
1210 && pe->hard_reset_counter > PD_N_HARD_RESET_COUNT) {
1211 usbc_request(dev, REQUEST_TC_ERROR_RECOVERY);
1212 }
1213 /*
1214 * The Policy Engine Shall transition to the PE_SRC_Hard_Reset
1215 * state when:
1216 * 1) The SenderResponseTimer times out
1217 */
1218 else {
1219 pe_set_state(dev, PE_SRC_HARD_RESET);
1220 }
1221 break;
1222 case PE_GET_SINK_CAP:
1223 pe_send_soft_reset(dev, PD_PACKET_SOP);
1224 break;
1225 #endif
1226 /*
1227 * Common states:
1228 * Could transition to a Sink or Source states,
1229 * depending on the current Data Role
1230 */
1231 case PE_SEND_SOFT_RESET:
1232 pe_set_state(dev, PE_SNK_HARD_RESET);
1233 break;
1234 case PE_DRS_SEND_SWAP:
1235 pe_set_state(dev, PE_SNK_READY);
1236 break;
1237
1238 /* This should not happen. Implementation error */
1239 default:
1240 LOG_INF("Unhandled Sender Response Timeout State!");
1241 }
1242 }
1243 }
1244
pe_sender_response_exit(void * obj)1245 static void pe_sender_response_exit(void *obj)
1246 {
1247 struct policy_engine *pe = (struct policy_engine *)obj;
1248
1249 /* Stop Sender Response Timer */
1250 usbc_timer_stop(&pe->pd_t_sender_response);
1251 }
1252
1253 /**
1254 * @brief Policy engine State table
1255 */
1256 static const struct smf_state pe_states[PE_STATE_COUNT] = {
1257 /* PE Super States */
1258 [PE_SENDER_RESPONSE_PARENT] = SMF_CREATE_STATE(
1259 NULL,
1260 pe_sender_response_run,
1261 pe_sender_response_exit,
1262 NULL,
1263 NULL),
1264 #ifdef CONFIG_USBC_CSM_SOURCE_ONLY
1265 [PE_SRC_HARD_RESET_PARENT] = SMF_CREATE_STATE(
1266 pe_src_hard_reset_parent_entry,
1267 pe_src_hard_reset_parent_run,
1268 pe_src_hard_reset_parent_exit,
1269 NULL,
1270 NULL),
1271 #endif
1272 #ifdef CONFIG_USBC_CSM_SINK_ONLY
1273 [PE_SNK_STARTUP] = SMF_CREATE_STATE(
1274 pe_snk_startup_entry,
1275 pe_snk_startup_run,
1276 NULL,
1277 NULL,
1278 NULL),
1279 [PE_SNK_DISCOVERY] = SMF_CREATE_STATE(
1280 pe_snk_discovery_entry,
1281 pe_snk_discovery_run,
1282 NULL,
1283 NULL,
1284 NULL),
1285 [PE_SNK_WAIT_FOR_CAPABILITIES] = SMF_CREATE_STATE(
1286 pe_snk_wait_for_capabilities_entry,
1287 pe_snk_wait_for_capabilities_run,
1288 pe_snk_wait_for_capabilities_exit,
1289 NULL,
1290 NULL),
1291 [PE_SNK_EVALUATE_CAPABILITY] = SMF_CREATE_STATE(
1292 pe_snk_evaluate_capability_entry,
1293 NULL,
1294 NULL,
1295 NULL,
1296 NULL),
1297 [PE_SNK_SELECT_CAPABILITY] = SMF_CREATE_STATE(
1298 pe_snk_select_capability_entry,
1299 pe_snk_select_capability_run,
1300 NULL,
1301 &pe_states[PE_SENDER_RESPONSE_PARENT],
1302 NULL),
1303 [PE_SNK_READY] = SMF_CREATE_STATE(
1304 pe_snk_ready_entry,
1305 pe_snk_ready_run,
1306 pe_snk_ready_exit,
1307 NULL,
1308 NULL),
1309 [PE_SNK_HARD_RESET] = SMF_CREATE_STATE(
1310 pe_snk_hard_reset_entry,
1311 pe_snk_hard_reset_run,
1312 NULL,
1313 NULL,
1314 NULL),
1315 [PE_SNK_TRANSITION_TO_DEFAULT] = SMF_CREATE_STATE(
1316 pe_snk_transition_to_default_entry,
1317 pe_snk_transition_to_default_run,
1318 NULL,
1319 NULL,
1320 NULL),
1321 [PE_SNK_GIVE_SINK_CAP] = SMF_CREATE_STATE(
1322 pe_snk_give_sink_cap_entry,
1323 pe_snk_give_sink_cap_run,
1324 NULL,
1325 NULL,
1326 NULL),
1327 [PE_SNK_GET_SOURCE_CAP] = SMF_CREATE_STATE(
1328 pe_snk_get_source_cap_entry,
1329 pe_snk_get_source_cap_run,
1330 NULL,
1331 &pe_states[PE_SENDER_RESPONSE_PARENT],
1332 NULL),
1333 [PE_SNK_TRANSITION_SINK] = SMF_CREATE_STATE(
1334 pe_snk_transition_sink_entry,
1335 pe_snk_transition_sink_run,
1336 pe_snk_transition_sink_exit,
1337 NULL,
1338 NULL),
1339 #else
1340 [PE_SRC_STARTUP] = SMF_CREATE_STATE(
1341 pe_src_startup_entry,
1342 pe_src_startup_run,
1343 NULL,
1344 NULL,
1345 NULL),
1346 [PE_SRC_DISCOVERY] = SMF_CREATE_STATE(
1347 pe_src_discovery_entry,
1348 pe_src_discovery_run,
1349 pe_src_discovery_exit,
1350 &pe_states[PE_SENDER_RESPONSE_PARENT],
1351 NULL),
1352 [PE_SRC_SEND_CAPABILITIES] = SMF_CREATE_STATE(
1353 pe_src_send_capabilities_entry,
1354 pe_src_send_capabilities_run,
1355 NULL,
1356 &pe_states[PE_SENDER_RESPONSE_PARENT],
1357 NULL),
1358 [PE_SRC_NEGOTIATE_CAPABILITY] = SMF_CREATE_STATE(
1359 pe_src_negotiate_capability_entry,
1360 NULL,
1361 NULL,
1362 NULL,
1363 NULL),
1364 [PE_SRC_CAPABILITY_RESPONSE] = SMF_CREATE_STATE(
1365 pe_src_capability_response_entry,
1366 pe_src_capability_response_run,
1367 NULL,
1368 NULL,
1369 NULL),
1370 [PE_SRC_TRANSITION_SUPPLY] = SMF_CREATE_STATE(
1371 pe_src_transition_supply_entry,
1372 pe_src_transition_supply_run,
1373 pe_src_transition_supply_exit,
1374 NULL,
1375 NULL),
1376 [PE_SRC_READY] = SMF_CREATE_STATE(
1377 pe_src_ready_entry,
1378 pe_src_ready_run,
1379 pe_src_ready_exit,
1380 NULL,
1381 NULL),
1382 [PE_SRC_DISABLED] = SMF_CREATE_STATE(
1383 pe_src_disabled_entry,
1384 NULL,
1385 NULL,
1386 NULL,
1387 NULL),
1388 [PE_SRC_TRANSITION_TO_DEFAULT] = SMF_CREATE_STATE(
1389 pe_src_transition_to_default_entry,
1390 pe_src_transition_to_default_run,
1391 pe_src_transition_to_default_exit,
1392 NULL,
1393 NULL),
1394 [PE_SRC_HARD_RESET_RECEIVED] = SMF_CREATE_STATE(
1395 NULL,
1396 NULL,
1397 NULL,
1398 &pe_states[PE_SRC_HARD_RESET_PARENT],
1399 NULL),
1400 [PE_SRC_HARD_RESET] = SMF_CREATE_STATE(
1401 pe_src_hard_reset_entry,
1402 NULL,
1403 NULL,
1404 &pe_states[PE_SRC_HARD_RESET_PARENT],
1405 NULL),
1406 #endif
1407 [PE_GET_SINK_CAP] = SMF_CREATE_STATE(
1408 pe_get_sink_cap_entry,
1409 pe_get_sink_cap_run,
1410 NULL,
1411 &pe_states[PE_SENDER_RESPONSE_PARENT],
1412 NULL),
1413 [PE_SEND_SOFT_RESET] = SMF_CREATE_STATE(
1414 pe_send_soft_reset_entry,
1415 pe_send_soft_reset_run,
1416 NULL,
1417 &pe_states[PE_SENDER_RESPONSE_PARENT],
1418 NULL),
1419 [PE_SOFT_RESET] = SMF_CREATE_STATE(
1420 pe_soft_reset_entry,
1421 pe_soft_reset_run,
1422 NULL,
1423 NULL,
1424 NULL),
1425 [PE_SEND_NOT_SUPPORTED] = SMF_CREATE_STATE(
1426 pe_send_not_supported_entry,
1427 pe_send_not_supported_run,
1428 NULL,
1429 NULL,
1430 NULL),
1431 [PE_DRS_EVALUATE_SWAP] = SMF_CREATE_STATE(
1432 pe_drs_evaluate_swap_entry,
1433 pe_drs_evaluate_swap_run,
1434 NULL,
1435 NULL,
1436 NULL),
1437 [PE_DRS_SEND_SWAP] = SMF_CREATE_STATE(
1438 pe_drs_send_swap_entry,
1439 pe_drs_send_swap_run,
1440 NULL,
1441 &pe_states[PE_SENDER_RESPONSE_PARENT],
1442 NULL),
1443 [PE_CHUNK_RECEIVED] = SMF_CREATE_STATE(
1444 pe_chunk_received_entry,
1445 pe_chunk_received_run,
1446 NULL,
1447 NULL,
1448 NULL),
1449 [PE_SUSPEND] = SMF_CREATE_STATE(
1450 pe_suspend_entry,
1451 pe_suspend_run,
1452 NULL,
1453 NULL,
1454 NULL),
1455 };
1456 BUILD_ASSERT(ARRAY_SIZE(pe_states) == PE_STATE_COUNT);
1457