1 /*
2 * Copyright (c) 2023 The Chromium OS Authors
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file
9 * @brief USB-C Power Policy Engine (PE)
10 *
11 * The information in this file was taken from the USB PD
12 * Specification Revision 3.0, Version 2.0
13 */
14
15 #include <zephyr/kernel.h>
16 #include <zephyr/sys/byteorder.h>
17 #include <zephyr/smf.h>
18 #include <zephyr/usb_c/usbc.h>
19 #include <zephyr/drivers/usb_c/usbc_pd.h>
20
21 #include <zephyr/logging/log.h>
22 LOG_MODULE_DECLARE(usbc_stack, CONFIG_USBC_STACK_LOG_LEVEL);
23
24 #include "usbc_stack.h"
25
26 /**
27 * @brief Initialize the Source Policy Engine layer
28 */
pe_src_init(const struct device * dev)29 void pe_src_init(const struct device *dev)
30 {
31 struct usbc_port_data *data = dev->data;
32 struct policy_engine *pe = data->pe;
33
34 /* Initial role of source is DFP */
35 pe_set_data_role(dev, TC_ROLE_DFP);
36
37 /* Reject Sink Request by default */
38 pe->snk_request_reply = SNK_REQUEST_REJECT;
39
40 /* Initialize timers */
41 usbc_timer_init(&pe->pd_t_typec_send_source_cap, PD_T_TYPEC_SEND_SOURCE_CAP_MIN_MS);
42 usbc_timer_init(&pe->pd_t_ps_hard_reset, PD_T_PS_HARD_RESET_MAX_MS);
43
44 /* Goto startup state */
45 pe_set_state(dev, PE_SRC_STARTUP);
46 }
47
48 /**
49 * @brief Handle source-specific DPM requests
50 */
source_dpm_requests(const struct device * dev)51 bool source_dpm_requests(const struct device *dev)
52 {
53 struct usbc_port_data *data = dev->data;
54 struct policy_engine *pe = data->pe;
55
56 if (pe->dpm_request == REQUEST_GET_SNK_CAPS) {
57 atomic_set_bit(pe->flags, PE_FLAGS_DPM_INITIATED_AMS);
58 pe_set_state(dev, PE_GET_SINK_CAP);
59 return true;
60 } else if (pe->dpm_request == REQUEST_PE_GOTO_MIN) {
61 atomic_set_bit(pe->flags, PE_FLAGS_DPM_INITIATED_AMS);
62 pe_set_state(dev, PE_SRC_TRANSITION_SUPPLY);
63 return true;
64 }
65
66 return false;
67 }
68
69 /**
70 * @brief Send Source Caps to Sink
71 */
send_src_caps(struct policy_engine * pe)72 static void send_src_caps(struct policy_engine *pe)
73 {
74 const struct device *dev = pe->dev;
75 struct usbc_port_data *data = dev->data;
76 struct protocol_layer_tx_t *prl_tx = data->prl_tx;
77 struct pd_msg *msg = &prl_tx->emsg;
78 const uint32_t *pdos;
79 uint32_t num_pdos = 0;
80
81 /* This callback must be implemented */
82 __ASSERT(data->policy_cb_get_src_caps != NULL,
83 "Callback pointer should not be NULL");
84
85 data->policy_cb_get_src_caps(dev, &pdos, &num_pdos);
86
87 msg->len = PD_CONVERT_PD_HEADER_COUNT_TO_BYTES(num_pdos);
88 memcpy(msg->data, pdos, msg->len);
89 pe_send_data_msg(dev, PD_PACKET_SOP, PD_DATA_SOURCE_CAP);
90 }
91
92 /**
93 * @brief 8.3.3.2.1 PE_SRC_Startup State
94 */
pe_src_startup_entry(void * obj)95 void pe_src_startup_entry(void *obj)
96 {
97 struct policy_engine *pe = (struct policy_engine *)obj;
98 const struct device *dev = pe->dev;
99
100 LOG_INF("PE_SRC_Startup");
101
102 /* Reset CapsCounter */
103 pe->caps_counter = 0;
104
105 /* Reset the protocol layer */
106 prl_reset(dev);
107
108 /* Set power role to Source */
109 pe->power_role = TC_ROLE_SOURCE;
110
111 /* Invalidate explicit contract */
112 atomic_clear_bit(pe->flags, PE_FLAGS_EXPLICIT_CONTRACT);
113
114 policy_notify(dev, NOT_PD_CONNECTED);
115 }
116
pe_src_startup_run(void * obj)117 void pe_src_startup_run(void *obj)
118 {
119 struct policy_engine *pe = (struct policy_engine *)obj;
120 const struct device *dev = pe->dev;
121
122 /*
123 * Once the reset process completes, the Policy Engine Shall
124 * transition to the PE_SRC_Send_Capabilities state
125 */
126 if (prl_is_running(dev)) {
127 pe_set_state(dev, PE_SRC_SEND_CAPABILITIES);
128 }
129 }
130
131 /**
132 * @brief 8.3.3.2.2 PE_SRC_Discovery State
133 */
pe_src_discovery_entry(void * obj)134 void pe_src_discovery_entry(void *obj)
135 {
136 struct policy_engine *pe = (struct policy_engine *)obj;
137
138 LOG_INF("PE_SRC_Discovery");
139
140 /*
141 * Start the SourceCapabilityTimer in order to trigger sending a
142 * Source_Capabilities message
143 */
144 usbc_timer_start(&pe->pd_t_typec_send_source_cap);
145 }
146
pe_src_discovery_run(void * obj)147 void pe_src_discovery_run(void *obj)
148 {
149 struct policy_engine *pe = (struct policy_engine *)obj;
150 const struct device *dev = pe->dev;
151
152 /*
153 * The Policy Engine Shall transition to the PE_SRC_Send_Capabilities state when:
154 * 1) The SourceCapabilityTimer times out
155 * 2) And CapsCounter ≤ nCapsCount
156 */
157 if (usbc_timer_expired(&pe->pd_t_typec_send_source_cap)
158 && pe->caps_counter <= PD_N_CAPS_COUNT) {
159 pe_set_state(dev, PE_SRC_SEND_CAPABILITIES);
160 }
161 }
162
pe_src_discovery_exit(void * obj)163 void pe_src_discovery_exit(void *obj)
164 {
165 struct policy_engine *pe = (struct policy_engine *)obj;
166
167 usbc_timer_stop(&pe->pd_t_typec_send_source_cap);
168 }
169
170 /**
171 * @brief 8.3.3.2.3 PE_SRC_Send_Capabilities State
172 */
pe_src_send_capabilities_entry(void * obj)173 void pe_src_send_capabilities_entry(void *obj)
174 {
175 struct policy_engine *pe = (struct policy_engine *)obj;
176
177 /* Request present source capabilities from Device Policy Manager */
178 send_src_caps(pe);
179 /* Increment CapsCounter */
180 pe->caps_counter++;
181 /* Init submachine */
182 pe->submachine = SM_WAIT_FOR_TX;
183
184 LOG_INF("PE_SRC_Send_Capabilities");
185 }
186
pe_src_send_capabilities_run(void * obj)187 void pe_src_send_capabilities_run(void *obj)
188 {
189 struct policy_engine *pe = (struct policy_engine *)obj;
190 const struct device *dev = pe->dev;
191 struct usbc_port_data *data = dev->data;
192 struct protocol_layer_rx_t *prl_rx = data->prl_rx;
193
194 switch (pe->submachine) {
195 case SM_WAIT_FOR_TX:
196 /*
197 * When message is sent, the Policy Engine Shall:
198 * 1) Stop the NoResponseTimer .
199 * 2) Reset the HardResetCounter and CapsCounter to zero.
200 * 3) Initialize and run the SenderResponseTimer
201 */
202 if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE)) {
203 usbc_timer_stop(&pe->pd_t_no_response);
204 pe->hard_reset_counter = 0;
205 pe->caps_counter = 0;
206 pe->submachine = SM_WAIT_FOR_RX;
207 }
208 /*
209 * The Policy Engine Shall transition to the PE_SRC_Discovery
210 * state when:
211 * 1) The Protocol Layer indicates that the Message has
212 * not been sent
213 * 2) And we are presently not Connected.
214 */
215 else if ((atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_XMIT_ERROR) ||
216 atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_DISCARDED))
217 && (atomic_test_bit(pe->flags, PE_FLAGS_PD_CONNECTED) == false)) {
218 pe_set_state(dev, PE_SRC_DISCOVERY);
219 }
220 break;
221 case SM_WAIT_FOR_RX:
222 /*
223 * The Policy Engine Shall transition to the PE_SRC_Negotiate_Capability state when:
224 * 1) A Request Message is received from the Sink.
225 */
226 if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_RECEIVED)) {
227 union pd_header header = prl_rx->emsg.header;
228
229 if (received_data_message(dev, header, PD_DATA_REQUEST)) {
230 /* Set to highest revision supported by both ports */
231 prl_set_rev(dev, PD_PACKET_SOP,
232 MIN(PD_REV30, header.specification_revision));
233 pe_set_state(dev, PE_SRC_NEGOTIATE_CAPABILITY);
234 }
235 }
236 /*
237 * The Policy Engine Shall transition to the PE_SRC_Hard_Reset
238 * state when:
239 * 1) The SenderResponseTimer times out
240 */
241 else if (usbc_timer_expired(&pe->pd_t_sender_response)) {
242 pe_set_state(dev, PE_SRC_HARD_RESET);
243 }
244 break;
245 }
246 }
247
248 /**
249 * @brief 8.3.3.2.4 PE_SRC_Negotiate_Capability State
250 */
pe_src_negotiate_capability_entry(void * obj)251 void pe_src_negotiate_capability_entry(void *obj)
252 {
253 struct policy_engine *pe = (struct policy_engine *)obj;
254 const struct device *dev = pe->dev;
255 struct usbc_port_data *data = dev->data;
256 struct protocol_layer_rx_t *prl_rx = data->prl_rx;
257
258 LOG_INF("PE_SRC_Negotiate_Capability");
259
260 /* Get sink request */
261 pe->snk_request = *(uint32_t *)prl_rx->emsg.data;
262
263 /*
264 * Ask the Device Policy Manager to evaluate the Request
265 * from the Attached Sink.
266 */
267 pe->snk_request_reply =
268 policy_check_sink_request(dev, pe->snk_request);
269
270 /*
271 * The Policy Engine Shall transition to the
272 * PE_SRC_Transition_Supply state when:
273 * 1) The Request can be met.
274 */
275 if (pe->snk_request_reply == SNK_REQUEST_VALID) {
276 pe_set_state(dev, PE_SRC_TRANSITION_SUPPLY);
277 }
278 /*
279 * The Policy Engine Shall transition to the
280 * PE_SRC_Capability_Response state when:
281 * 1) The Request cannot be met.
282 * 2) Or the Request can be met later from the Power Reserve.
283 */
284 else {
285 pe_set_state(dev, PE_SRC_CAPABILITY_RESPONSE);
286 }
287 }
288
289 /**
290 * @brief 8.3.3.2.5 PE_SRC_Transition_Supply State
291 */
pe_src_transition_supply_entry(void * obj)292 void pe_src_transition_supply_entry(void *obj)
293 {
294 struct policy_engine *pe = (struct policy_engine *)obj;
295 const struct device *dev = pe->dev;
296
297 LOG_INF("PE_SRC_Transition_Supply");
298
299 /*
300 * If snk_request_reply is set, this state was entered
301 * from PE_SRC_Negotiate_Capability. So send Accept Message
302 * and inform the Device Policy Manager that it Shall transition
303 * the power supply to the Requested power level.
304 */
305 if (pe->snk_request_reply == SNK_REQUEST_VALID) {
306 pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_ACCEPT);
307 policy_notify(dev, TRANSITION_PS);
308 }
309 /*
310 * If snk_request_reply is not valid, this state was entered
311 * from PE_SRC_Ready. So send GotoMin Message.
312 */
313 else {
314 pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_GOTO_MIN);
315 }
316 }
317
pe_src_transition_supply_run(void * obj)318 void pe_src_transition_supply_run(void *obj)
319 {
320 struct policy_engine *pe = (struct policy_engine *)obj;
321 const struct device *dev = pe->dev;
322
323 /*
324 * The Policy Engine Shall transition to the PE_SRC_Ready state when:
325 * 1) The Device Policy Manager informs the Policy Engine that
326 * the power supply is ready.
327 */
328 if (atomic_test_bit(pe->flags, PE_FLAGS_TX_COMPLETE)) {
329 if (policy_is_ps_ready(dev)) {
330 pe_set_state(dev, PE_SRC_READY);
331 }
332 }
333 /*
334 * The Policy Engine Shall transition to the PE_SRC_Hard_Reset
335 * state when:
336 * 1) A Protocol Error occurs.
337 */
338 else if (atomic_test_bit(pe->flags, PE_FLAGS_PROTOCOL_ERROR)) {
339 pe_set_state(dev, PE_SRC_HARD_RESET);
340 }
341 }
342
pe_src_transition_supply_exit(void * obj)343 void pe_src_transition_supply_exit(void *obj)
344 {
345 struct policy_engine *pe = (struct policy_engine *)obj;
346 const struct device *dev = pe->dev;
347
348 /* Send PS_RDY message */
349 if (pe->snk_request_reply == SNK_REQUEST_VALID) {
350 /* Clear request reply and reject by default */
351 pe->snk_request_reply = SNK_REQUEST_REJECT;
352 /* Send PS Ready */
353 pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_PS_RDY);
354 /* Explicit Contract is now in place */
355 atomic_set_bit(pe->flags, PE_FLAGS_EXPLICIT_CONTRACT);
356 /* Update present contract */
357 pe->present_contract = pe->snk_request;
358 }
359 }
360
361 /**
362 * @brief 8.3.3.2.6 PE_SRC_Ready State
363 */
pe_src_ready_entry(void * obj)364 void pe_src_ready_entry(void *obj)
365 {
366 struct policy_engine *pe = (struct policy_engine *)obj;
367 const struct device *dev = pe->dev;
368
369 LOG_INF("PE_SRC_Ready");
370
371 /*
372 * If the transition into PE_SRC_Ready is the result of Protocol Error
373 * that has not caused a Soft Reset then the notification to the
374 * Protocol Layer of the end of the AMS Shall Not be sent since there
375 * is a Message to be processed.
376 *
377 * Else on entry to the PE_SRC_Ready state the Source Shall notify the
378 * Protocol Layer of the end of the Atomic Message Sequence (AMS).
379 */
380 if (atomic_test_and_clear_bit(pe->flags,
381 PE_FLAGS_PROTOCOL_ERROR_NO_SOFT_RESET)) {
382 pe_dpm_end_ams(dev);
383 }
384 }
385
pe_src_ready_run(void * obj)386 void pe_src_ready_run(void *obj)
387 {
388 struct policy_engine *pe = (struct policy_engine *)obj;
389 const struct device *dev = pe->dev;
390 struct usbc_port_data *data = dev->data;
391 struct protocol_layer_rx_t *prl_rx = data->prl_rx;
392
393 /* Handle incoming messages */
394 if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_RECEIVED)) {
395 union pd_header header = prl_rx->emsg.header;
396
397 /*
398 * Extended Message Requests
399 */
400 if (header.extended) {
401 extended_message_not_supported(dev);
402 }
403 /*
404 * Data Message Requests
405 */
406 else if (header.number_of_data_objects > 0) {
407 switch (header.message_type) {
408 case PD_DATA_REQUEST:
409 pe_set_state(dev, PE_SRC_NEGOTIATE_CAPABILITY);
410 break;
411 default:
412 pe_set_state(dev, PE_SEND_NOT_SUPPORTED);
413 }
414 }
415 /*
416 * Control Message Requests
417 */
418 else {
419 switch (header.message_type) {
420 case PD_CTRL_GOOD_CRC:
421 /* Do nothing */
422 break;
423 case PD_CTRL_NOT_SUPPORTED:
424 /* Notify DPM */
425 policy_notify(dev, MSG_NOT_SUPPORTED_RECEIVED);
426 break;
427 case PD_CTRL_PING:
428 /* Do nothing */
429 break;
430 case PD_CTRL_GET_SOURCE_CAP:
431 pe_set_state(dev, PE_SRC_SEND_CAPABILITIES);
432 break;
433 case PD_CTRL_DR_SWAP:
434 pe_set_state(dev, PE_DRS_EVALUATE_SWAP);
435 break;
436 /*
437 * USB PD 3.0 6.8.1:
438 * Receiving an unexpected message shall be responded
439 * to with a soft reset message.
440 */
441 case PD_CTRL_ACCEPT:
442 case PD_CTRL_REJECT:
443 case PD_CTRL_WAIT:
444 case PD_CTRL_PS_RDY:
445 pe_send_soft_reset(dev, prl_rx->emsg.type);
446 break;
447 /*
448 * Receiving an unknown or unsupported message
449 * shall be responded to with a not supported
450 * message.
451 */
452 default:
453 pe_set_state(dev, PE_SEND_NOT_SUPPORTED);
454 break;
455 }
456 }
457 } else {
458 /* Handle Source DPManager Requests */
459 source_dpm_requests(dev);
460 }
461 }
462
pe_src_ready_exit(void * obj)463 void pe_src_ready_exit(void *obj)
464 {
465 struct policy_engine *pe = (struct policy_engine *)obj;
466 const struct device *dev = pe->dev;
467
468 /*
469 * If the Source is initiating an AMS, then notify the
470 * PRL that the first message in an AMS will follow.
471 */
472 if (pe_dpm_initiated_ams(dev)) {
473 prl_first_msg_notificaiton(dev);
474 }
475 }
476
477 /**
478 * @brief 8.3.3.2.11 PE_SRC_Transition_to_default State
479 */
pe_src_transition_to_default_entry(void * obj)480 void pe_src_transition_to_default_entry(void *obj)
481 {
482 struct policy_engine *pe = (struct policy_engine *)obj;
483 const struct device *dev = pe->dev;
484
485 /*
486 * On entry to the PE_SRC_Transition_to_default state the
487 * Policy Engine Shall:
488 * 1: indicate to the Device Policy Manager that the power
489 * supply Shall Hard Reset
490 * 2: request a reset of the local hardware
491 * 3: request the Device Policy Manager to set the Port
492 * Data Role to DFP and turn off VCONN.
493 *
494 * NOTE: 1, 2 and VCONN off are done by Device Policy Manager when
495 * it receives the HARD_RESET_RECEIVED notification.
496 */
497 policy_notify(dev, HARD_RESET_RECEIVED);
498 pe->data_role = TC_ROLE_DFP;
499 policy_notify(dev, DATA_ROLE_IS_DFP);
500 }
501
pe_src_transition_to_default_run(void * obj)502 void pe_src_transition_to_default_run(void *obj)
503 {
504 struct policy_engine *pe = (struct policy_engine *)obj;
505 const struct device *dev = pe->dev;
506
507 /*
508 * The Policy Engine Shall transition to the PE_SRC_Startup
509 * state when:
510 * 1: The Device Policy Manager indicates that the power
511 * supply has reached the default level.
512 */
513 if (policy_check(dev, CHECK_SRC_PS_AT_DEFAULT_LEVEL)) {
514 pe_set_state(dev, PE_SRC_STARTUP);
515 }
516 }
517
pe_src_transition_to_default_exit(void * obj)518 void pe_src_transition_to_default_exit(void *obj)
519 {
520 struct policy_engine *pe = (struct policy_engine *)obj;
521 const struct device *dev = pe->dev;
522
523 /*
524 * On exit from the PE_SRC_Transition_to_default state the
525 * Policy Engine Shall:
526 * 1: request the Device Policy Manager to turn on VCONN
527 * 2: inform the Protocol Layer that the Hard Reset is complete.
528 *
529 * NOTE: The Device Policy Manager turns on VCONN when it notifies the
530 * PE that the Power Supply is at the default level.
531 */
532 prl_hard_reset_complete(dev);
533 }
534
535 /**
536 * 8.3.3.2.8 PE_SRC_Capability_Response State
537 */
pe_src_capability_response_entry(void * obj)538 void pe_src_capability_response_entry(void *obj)
539 {
540 struct policy_engine *pe = (struct policy_engine *)obj;
541 const struct device *dev = pe->dev;
542
543 /*
544 * On entry to the PE_SRC_Capability_Response state the Policy Engine
545 * Shall request the Protocol Layer to send one of the following:
546 */
547
548 /*
549 * 1: Reject Message – if the request cannot be met or the present
550 * Contract is Invalid.
551 */
552 if (pe->snk_request_reply == SNK_REQUEST_REJECT) {
553 pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_REJECT);
554 }
555 /*
556 * 2: Wait Message – if the request could be met later from the Power
557 * Reserve. A Wait Message Shall Not be sent if the present Contract
558 * is Invalid.
559 */
560 else {
561 pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_WAIT);
562 }
563 }
564
pe_src_capability_response_run(void * obj)565 void pe_src_capability_response_run(void *obj)
566 {
567 struct policy_engine *pe = (struct policy_engine *)obj;
568 const struct device *dev = pe->dev;
569
570 /* Wait until message has been sent */
571 if (!atomic_test_and_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE)) {
572 return;
573 }
574
575 /*
576 * The Policy Engine Shall transition to the PE_SRC_Ready state when:
577 * 1: There is an Explicit Contract AND
578 * 2: A Reject Message has been sent and the present Contract
579 * is still Valid OR
580 * 3: A Wait Message has been sent.
581 */
582 if (atomic_test_bit(pe->flags, PE_FLAGS_EXPLICIT_CONTRACT) &&
583 ((pe->snk_request_reply == SNK_REQUEST_REJECT &&
584 policy_present_contract_is_valid(dev, pe->present_contract)) ||
585 (pe->snk_request_reply == SNK_REQUEST_WAIT))) {
586 pe_set_state(dev, PE_SRC_READY);
587 }
588 /*
589 * The Policy Engine Shall transition to the PE_SRC_Hard_Reset state
590 * when:
591 * 1: There is an Explicit Contract and
592 * 2: The Reject Message has been sent and the present Contract
593 * is Invalid
594 */
595 else if (atomic_test_bit(pe->flags, PE_FLAGS_EXPLICIT_CONTRACT) &&
596 policy_present_contract_is_valid(dev, pe->present_contract) == false) {
597 pe_set_state(dev, PE_SRC_HARD_RESET);
598 }
599 /*
600 * The Policy Engine Shall transition to the PE_SRC_Wait_New_Capabilities
601 * state when:
602 * 1: There is no Explicit Contract and
603 * 2: A Reject Message has been sent or
604 * 3: A Wait Message has been sent.
605 */
606 else {
607 /* 8.3.3.2.13 PE_SRC_Wait_New_Capabilities embedded here */
608
609 /*
610 * In the PE_SRC_Wait_New_Capabilities State the Device Policy Manager
611 * Should either decide to send no further Source Capabilities or
612 * Should send a different set of Source Capabilities. Continuing
613 * to send the same set of Source Capabilities could result in a live
614 * lock situation.
615 */
616
617 /* Notify DPM to send a different set of Source Capabilities */
618 if (policy_change_src_caps(dev)) {
619 /* DPM will send different set of Source Capabilities */
620 pe_set_state(dev, PE_SRC_SEND_CAPABILITIES);
621 } else {
622 /*
623 * DPM can not send a different set of Source
624 * Capabilities, so disable port.
625 */
626 pe_set_state(dev, PE_SUSPEND);
627 }
628 }
629 }
630
pe_src_hard_reset_parent_entry(void * obj)631 void pe_src_hard_reset_parent_entry(void *obj)
632 {
633 struct policy_engine *pe = (struct policy_engine *)obj;
634
635 pe->submachine = SM_HARD_RESET_START;
636 }
637
pe_src_hard_reset_parent_run(void * obj)638 void pe_src_hard_reset_parent_run(void *obj)
639 {
640 struct policy_engine *pe = (struct policy_engine *)obj;
641 const struct device *dev = pe->dev;
642
643 switch (pe->submachine) {
644 case SM_HARD_RESET_START:
645 /*
646 * Initialize and run the NoResponseTimer.
647 * Note that the NoResponseTimer Shall continue to run
648 * in every state until it is stopped or times out.
649 */
650 usbc_timer_start(&pe->pd_t_no_response);
651
652 /* Initialize and run the PSHardResetTimer */
653 usbc_timer_start(&pe->pd_t_ps_hard_reset);
654
655 pe->submachine = SM_HARD_RESET_WAIT;
656 break;
657 case SM_HARD_RESET_WAIT:
658 /*
659 * The Policy Engine Shall transition to the
660 * PE_SRC_Transition_to_default state when:
661 * The PSHardResetTimer times out.
662 */
663 if (usbc_timer_expired(&pe->pd_t_ps_hard_reset)) {
664 pe_set_state(dev, PE_SRC_TRANSITION_TO_DEFAULT);
665 }
666 break;
667 }
668 }
669
pe_src_hard_reset_parent_exit(void * obj)670 void pe_src_hard_reset_parent_exit(void *obj)
671 {
672 struct policy_engine *pe = (struct policy_engine *)obj;
673
674 /* Stop the Hard Reset Timer */
675 usbc_timer_stop(&pe->pd_t_ps_hard_reset);
676 }
677
678 /**
679 * @brief 8.3.3.2.9 PE_SRC_Hard_Reset State
680 */
pe_src_hard_reset_entry(void * obj)681 void pe_src_hard_reset_entry(void *obj)
682 {
683 struct policy_engine *pe = (struct policy_engine *)obj;
684 const struct device *dev = pe->dev;
685
686 /*
687 * On entry to the PE_SRC_Hard_Reset state the
688 * Policy Engine Shall:
689 */
690
691 /*
692 * Request the generation of Hard Reset Signaling by
693 * the PHY Layer
694 */
695 prl_execute_hard_reset(dev);
696
697 }
698