1 /*
2 * Copyright (c) 2023 The Chromium OS Authors
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file
9 * @brief USB-C Power Policy Engine (PE)
10 *
11 * The information in this file was taken from the USB PD
12 * Specification Revision 3.0, Version 2.0
13 */
14
15 #include <zephyr/kernel.h>
16 #include <zephyr/sys/byteorder.h>
17 #include <zephyr/smf.h>
18 #include <zephyr/usb_c/usbc.h>
19 #include <zephyr/drivers/usb_c/usbc_pd.h>
20
21 #include <zephyr/logging/log.h>
22 LOG_MODULE_DECLARE(usbc_stack, CONFIG_USBC_STACK_LOG_LEVEL);
23
24 #include "usbc_stack.h"
25
26 /**
27 * @brief Initialize the Source Policy Engine layer
28 */
pe_src_init(const struct device * dev)29 void pe_src_init(const struct device *dev)
30 {
31 struct usbc_port_data *data = dev->data;
32 struct policy_engine *pe = data->pe;
33
34 /* Initial role of source is DFP */
35 pe_set_data_role(dev, TC_ROLE_DFP);
36
37 /* Reject Sink Request by default */
38 pe->snk_request_reply = SNK_REQUEST_REJECT;
39
40 /* Initialize timers */
41 usbc_timer_init(&pe->pd_t_typec_send_source_cap, PD_T_TYPEC_SEND_SOURCE_CAP_MIN_MS);
42 usbc_timer_init(&pe->pd_t_ps_hard_reset, PD_T_PS_HARD_RESET_MAX_MS);
43
44 /* Goto startup state */
45 pe_set_state(dev, PE_SRC_STARTUP);
46 }
47
48 /**
49 * @brief Handle source-specific DPM requests
50 */
source_dpm_requests(const struct device * dev)51 bool source_dpm_requests(const struct device *dev)
52 {
53 struct usbc_port_data *data = dev->data;
54 struct policy_engine *pe = data->pe;
55
56 if (pe->dpm_request == REQUEST_GET_SNK_CAPS) {
57 atomic_set_bit(pe->flags, PE_FLAGS_DPM_INITIATED_AMS);
58 pe_set_state(dev, PE_GET_SINK_CAP);
59 return true;
60 } else if (pe->dpm_request == REQUEST_PE_GOTO_MIN) {
61 atomic_set_bit(pe->flags, PE_FLAGS_DPM_INITIATED_AMS);
62 pe_set_state(dev, PE_SRC_TRANSITION_SUPPLY);
63 return true;
64 }
65
66 return false;
67 }
68
69 /**
70 * @brief Send Source Caps to Sink
71 */
send_src_caps(struct policy_engine * pe)72 static void send_src_caps(struct policy_engine *pe)
73 {
74 const struct device *dev = pe->dev;
75 struct usbc_port_data *data = dev->data;
76 struct protocol_layer_tx_t *prl_tx = data->prl_tx;
77 struct pd_msg *msg = &prl_tx->emsg;
78 const uint32_t *pdos;
79 uint32_t num_pdos = 0;
80
81 /* This callback must be implemented */
82 __ASSERT(data->policy_cb_get_src_caps != NULL,
83 "Callback pointer should not be NULL");
84
85 data->policy_cb_get_src_caps(dev, &pdos, &num_pdos);
86
87 msg->len = PD_CONVERT_PD_HEADER_COUNT_TO_BYTES(num_pdos);
88 memcpy(msg->data, pdos, msg->len);
89 pe_send_data_msg(dev, PD_PACKET_SOP, PD_DATA_SOURCE_CAP);
90 }
91
92 /**
93 * @brief 8.3.3.2.1 PE_SRC_Startup State
94 */
pe_src_startup_entry(void * obj)95 void pe_src_startup_entry(void *obj)
96 {
97 struct policy_engine *pe = (struct policy_engine *)obj;
98 const struct device *dev = pe->dev;
99
100 LOG_INF("PE_SRC_Startup");
101
102 /* Reset CapsCounter */
103 pe->caps_counter = 0;
104
105 /* Reset the protocol layer */
106 prl_reset(dev);
107
108 /* Set power role to Source */
109 pe->power_role = TC_ROLE_SOURCE;
110
111 /* Invalidate explicit contract */
112 atomic_clear_bit(pe->flags, PE_FLAGS_EXPLICIT_CONTRACT);
113
114 policy_notify(dev, NOT_PD_CONNECTED);
115 }
116
pe_src_startup_run(void * obj)117 void pe_src_startup_run(void *obj)
118 {
119 struct policy_engine *pe = (struct policy_engine *)obj;
120 const struct device *dev = pe->dev;
121
122 /*
123 * Once the reset process completes, the Policy Engine Shall
124 * transition to the PE_SRC_Send_Capabilities state
125 */
126 if (prl_is_running(dev)) {
127 pe_set_state(dev, PE_SRC_SEND_CAPABILITIES);
128 }
129 }
130
131 /**
132 * @brief 8.3.3.2.2 PE_SRC_Discovery State
133 */
pe_src_discovery_entry(void * obj)134 void pe_src_discovery_entry(void *obj)
135 {
136 struct policy_engine *pe = (struct policy_engine *)obj;
137
138 LOG_INF("PE_SRC_Discovery");
139
140 /*
141 * Start the SourceCapabilityTimer in order to trigger sending a
142 * Source_Capabilities message
143 */
144 usbc_timer_start(&pe->pd_t_typec_send_source_cap);
145 }
146
pe_src_discovery_run(void * obj)147 void pe_src_discovery_run(void *obj)
148 {
149 struct policy_engine *pe = (struct policy_engine *)obj;
150 const struct device *dev = pe->dev;
151
152 /*
153 * The Policy Engine Shall transition to the PE_SRC_Send_Capabilities state when:
154 * 1) The SourceCapabilityTimer times out
155 * 2) And CapsCounter ≤ nCapsCount
156 */
157 if (usbc_timer_expired(&pe->pd_t_typec_send_source_cap)) {
158 if (pe->caps_counter <= PD_N_CAPS_COUNT) {
159 pe_set_state(dev, PE_SRC_SEND_CAPABILITIES);
160 } else {
161 pe_set_state(dev, PE_SRC_DISABLED);
162 }
163 }
164 }
165
pe_src_discovery_exit(void * obj)166 void pe_src_discovery_exit(void *obj)
167 {
168 struct policy_engine *pe = (struct policy_engine *)obj;
169
170 usbc_timer_stop(&pe->pd_t_typec_send_source_cap);
171 }
172
173 /**
174 * @brief 8.3.3.2.3 PE_SRC_Send_Capabilities State
175 */
pe_src_send_capabilities_entry(void * obj)176 void pe_src_send_capabilities_entry(void *obj)
177 {
178 struct policy_engine *pe = (struct policy_engine *)obj;
179
180 /* Request present source capabilities from Device Policy Manager */
181 send_src_caps(pe);
182 /* Increment CapsCounter */
183 pe->caps_counter++;
184 /* Init submachine */
185 pe->submachine = SM_WAIT_FOR_TX;
186
187 LOG_INF("PE_SRC_Send_Capabilities");
188 }
189
pe_src_send_capabilities_run(void * obj)190 void pe_src_send_capabilities_run(void *obj)
191 {
192 struct policy_engine *pe = (struct policy_engine *)obj;
193 const struct device *dev = pe->dev;
194 struct usbc_port_data *data = dev->data;
195 struct protocol_layer_rx_t *prl_rx = data->prl_rx;
196
197 switch (pe->submachine) {
198 case SM_WAIT_FOR_TX:
199 /*
200 * When message is sent, the Policy Engine Shall:
201 * 1) Stop the NoResponseTimer .
202 * 2) Reset the HardResetCounter and CapsCounter to zero.
203 * 3) Initialize and run the SenderResponseTimer
204 */
205 if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE)) {
206 usbc_timer_stop(&pe->pd_t_no_response);
207 pe->hard_reset_counter = 0;
208 pe->caps_counter = 0;
209 pe->submachine = SM_WAIT_FOR_RX;
210 }
211 /*
212 * The Policy Engine Shall transition to the PE_SRC_Discovery
213 * state when:
214 * 1) The Protocol Layer indicates that the Message has
215 * not been sent
216 * 2) And we are presently not Connected.
217 */
218 else if ((atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_XMIT_ERROR) ||
219 atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_DISCARDED))
220 && (atomic_test_bit(pe->flags, PE_FLAGS_PD_CONNECTED) == false)) {
221 pe_set_state(dev, PE_SRC_DISCOVERY);
222 }
223 break;
224 case SM_WAIT_FOR_RX:
225 /*
226 * The Policy Engine Shall transition to the PE_SRC_Negotiate_Capability state when:
227 * 1) A Request Message is received from the Sink.
228 */
229 if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_RECEIVED)) {
230 union pd_header header = prl_rx->emsg.header;
231
232 if (received_data_message(dev, header, PD_DATA_REQUEST)) {
233 /* Set to highest revision supported by both ports */
234 prl_set_rev(dev, PD_PACKET_SOP,
235 MIN(PD_REV30, header.specification_revision));
236 pe_set_state(dev, PE_SRC_NEGOTIATE_CAPABILITY);
237 }
238 }
239 /*
240 * The Policy Engine Shall transition to the PE_SRC_Hard_Reset
241 * state when:
242 * 1) The SenderResponseTimer times out
243 */
244 else if (usbc_timer_expired(&pe->pd_t_sender_response)) {
245 pe_set_state(dev, PE_SRC_HARD_RESET);
246 }
247 break;
248 }
249 }
250
251 /**
252 * @brief 8.3.3.2.4 PE_SRC_Negotiate_Capability State
253 */
pe_src_negotiate_capability_entry(void * obj)254 void pe_src_negotiate_capability_entry(void *obj)
255 {
256 struct policy_engine *pe = (struct policy_engine *)obj;
257 const struct device *dev = pe->dev;
258 struct usbc_port_data *data = dev->data;
259 struct protocol_layer_rx_t *prl_rx = data->prl_rx;
260
261 LOG_INF("PE_SRC_Negotiate_Capability");
262
263 /* Get sink request */
264 pe->snk_request = *(uint32_t *)prl_rx->emsg.data;
265
266 /*
267 * Ask the Device Policy Manager to evaluate the Request
268 * from the Attached Sink.
269 */
270 pe->snk_request_reply =
271 policy_check_sink_request(dev, pe->snk_request);
272
273 /*
274 * The Policy Engine Shall transition to the
275 * PE_SRC_Transition_Supply state when:
276 * 1) The Request can be met.
277 */
278 if (pe->snk_request_reply == SNK_REQUEST_VALID) {
279 pe_set_state(dev, PE_SRC_TRANSITION_SUPPLY);
280 }
281 /*
282 * The Policy Engine Shall transition to the
283 * PE_SRC_Capability_Response state when:
284 * 1) The Request cannot be met.
285 * 2) Or the Request can be met later from the Power Reserve.
286 */
287 else {
288 pe_set_state(dev, PE_SRC_CAPABILITY_RESPONSE);
289 }
290 }
291
292 /**
293 * @brief 8.3.3.2.5 PE_SRC_Transition_Supply State
294 */
pe_src_transition_supply_entry(void * obj)295 void pe_src_transition_supply_entry(void *obj)
296 {
297 struct policy_engine *pe = (struct policy_engine *)obj;
298 const struct device *dev = pe->dev;
299
300 LOG_INF("PE_SRC_Transition_Supply");
301
302 /*
303 * If snk_request_reply is set, this state was entered
304 * from PE_SRC_Negotiate_Capability. So send Accept Message
305 * and inform the Device Policy Manager that it Shall transition
306 * the power supply to the Requested power level.
307 */
308 if (pe->snk_request_reply == SNK_REQUEST_VALID) {
309 pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_ACCEPT);
310 policy_notify(dev, TRANSITION_PS);
311 }
312 /*
313 * If snk_request_reply is not valid, this state was entered
314 * from PE_SRC_Ready. So send GotoMin Message.
315 */
316 else {
317 pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_GOTO_MIN);
318 }
319 }
320
pe_src_transition_supply_run(void * obj)321 void pe_src_transition_supply_run(void *obj)
322 {
323 struct policy_engine *pe = (struct policy_engine *)obj;
324 const struct device *dev = pe->dev;
325
326 /*
327 * The Policy Engine Shall transition to the PE_SRC_Ready state when:
328 * 1) The Device Policy Manager informs the Policy Engine that
329 * the power supply is ready.
330 */
331 if (atomic_test_bit(pe->flags, PE_FLAGS_TX_COMPLETE)) {
332 if (policy_is_ps_ready(dev)) {
333 pe_set_state(dev, PE_SRC_READY);
334 }
335 }
336 /*
337 * The Policy Engine Shall transition to the PE_SRC_Hard_Reset
338 * state when:
339 * 1) A Protocol Error occurs.
340 */
341 else if (atomic_test_bit(pe->flags, PE_FLAGS_PROTOCOL_ERROR)) {
342 pe_set_state(dev, PE_SRC_HARD_RESET);
343 }
344 }
345
pe_src_transition_supply_exit(void * obj)346 void pe_src_transition_supply_exit(void *obj)
347 {
348 struct policy_engine *pe = (struct policy_engine *)obj;
349 const struct device *dev = pe->dev;
350
351 /* Send PS_RDY message */
352 if (pe->snk_request_reply == SNK_REQUEST_VALID) {
353 /* Clear request reply and reject by default */
354 pe->snk_request_reply = SNK_REQUEST_REJECT;
355 /* Send PS Ready */
356 pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_PS_RDY);
357 /* Explicit Contract is now in place */
358 atomic_set_bit(pe->flags, PE_FLAGS_EXPLICIT_CONTRACT);
359 /* Update present contract */
360 pe->present_contract = pe->snk_request;
361 }
362 }
363
364 /**
365 * @brief 8.3.3.2.6 PE_SRC_Ready State
366 */
pe_src_ready_entry(void * obj)367 void pe_src_ready_entry(void *obj)
368 {
369 struct policy_engine *pe = (struct policy_engine *)obj;
370 const struct device *dev = pe->dev;
371
372 LOG_INF("PE_SRC_Ready");
373
374 /*
375 * If the transition into PE_SRC_Ready is the result of Protocol Error
376 * that has not caused a Soft Reset then the notification to the
377 * Protocol Layer of the end of the AMS Shall Not be sent since there
378 * is a Message to be processed.
379 *
380 * Else on entry to the PE_SRC_Ready state the Source Shall notify the
381 * Protocol Layer of the end of the Atomic Message Sequence (AMS).
382 */
383 if (atomic_test_and_clear_bit(pe->flags,
384 PE_FLAGS_PROTOCOL_ERROR_NO_SOFT_RESET)) {
385 pe_dpm_end_ams(dev);
386 }
387 }
388
pe_src_ready_run(void * obj)389 void pe_src_ready_run(void *obj)
390 {
391 struct policy_engine *pe = (struct policy_engine *)obj;
392 const struct device *dev = pe->dev;
393 struct usbc_port_data *data = dev->data;
394 struct protocol_layer_rx_t *prl_rx = data->prl_rx;
395
396 /* Handle incoming messages */
397 if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_RECEIVED)) {
398 union pd_header header = prl_rx->emsg.header;
399
400 /*
401 * Extended Message Requests
402 */
403 if (header.extended) {
404 extended_message_not_supported(dev);
405 }
406 /*
407 * Data Message Requests
408 */
409 else if (header.number_of_data_objects > 0) {
410 switch (header.message_type) {
411 case PD_DATA_REQUEST:
412 pe_set_state(dev, PE_SRC_NEGOTIATE_CAPABILITY);
413 break;
414 case PD_DATA_VENDOR_DEF:
415 /**
416 * VDM is unsupported. PD2.0 ignores and PD3.0
417 * reply with not supported.
418 */
419 if (prl_get_rev(dev, PD_PACKET_SOP) > PD_REV20) {
420 pe_set_state(dev, PE_SEND_NOT_SUPPORTED);
421 }
422 break;
423 default:
424 pe_set_state(dev, PE_SEND_NOT_SUPPORTED);
425 }
426 }
427 /*
428 * Control Message Requests
429 */
430 else {
431 switch (header.message_type) {
432 case PD_CTRL_GOOD_CRC:
433 /* Do nothing */
434 break;
435 case PD_CTRL_NOT_SUPPORTED:
436 /* Notify DPM */
437 policy_notify(dev, MSG_NOT_SUPPORTED_RECEIVED);
438 break;
439 case PD_CTRL_PING:
440 /* Do nothing */
441 break;
442 case PD_CTRL_GET_SOURCE_CAP:
443 pe_set_state(dev, PE_SRC_SEND_CAPABILITIES);
444 break;
445 case PD_CTRL_DR_SWAP:
446 pe_set_state(dev, PE_DRS_EVALUATE_SWAP);
447 break;
448 /*
449 * USB PD 3.0 6.8.1:
450 * Receiving an unexpected message shall be responded
451 * to with a soft reset message.
452 */
453 case PD_CTRL_ACCEPT:
454 case PD_CTRL_REJECT:
455 case PD_CTRL_WAIT:
456 case PD_CTRL_PS_RDY:
457 pe_send_soft_reset(dev, prl_rx->emsg.type);
458 break;
459 /*
460 * Receiving an unknown or unsupported message
461 * shall be responded to with a not supported
462 * message.
463 */
464 default:
465 pe_set_state(dev, PE_SEND_NOT_SUPPORTED);
466 break;
467 }
468 }
469 } else {
470 /* Handle Source DPManager Requests */
471 source_dpm_requests(dev);
472 }
473 }
474
pe_src_ready_exit(void * obj)475 void pe_src_ready_exit(void *obj)
476 {
477 struct policy_engine *pe = (struct policy_engine *)obj;
478 const struct device *dev = pe->dev;
479
480 /*
481 * If the Source is initiating an AMS, then notify the
482 * PRL that the first message in an AMS will follow.
483 */
484 if (pe_dpm_initiated_ams(dev)) {
485 prl_first_msg_notificaiton(dev);
486 }
487 }
488
489 /**
490 * @brief 8.3.3.2.7 PE_SRC_Disabled State
491 */
pe_src_disabled_entry(void * obj)492 void pe_src_disabled_entry(void *obj)
493 {
494 LOG_INF("PE_SRC_Disabled");
495
496 /*
497 * Unresponsive to USB Power Delivery messaging, but not to Hard Reset
498 * Signaling. See pe_got_hard_reset
499 */
500 }
501
502 /**
503 * @brief 8.3.3.2.11 PE_SRC_Transition_to_default State
504 */
pe_src_transition_to_default_entry(void * obj)505 void pe_src_transition_to_default_entry(void *obj)
506 {
507 struct policy_engine *pe = (struct policy_engine *)obj;
508 const struct device *dev = pe->dev;
509
510 /*
511 * On entry to the PE_SRC_Transition_to_default state the
512 * Policy Engine Shall:
513 * 1: indicate to the Device Policy Manager that the power
514 * supply Shall Hard Reset
515 * 2: request a reset of the local hardware
516 * 3: request the Device Policy Manager to set the Port
517 * Data Role to DFP and turn off VCONN.
518 *
519 * NOTE: 1, 2 and VCONN off are done by Device Policy Manager when
520 * it receives the HARD_RESET_RECEIVED notification.
521 */
522 policy_notify(dev, HARD_RESET_RECEIVED);
523 pe->data_role = TC_ROLE_DFP;
524 policy_notify(dev, DATA_ROLE_IS_DFP);
525 }
526
pe_src_transition_to_default_run(void * obj)527 void pe_src_transition_to_default_run(void *obj)
528 {
529 struct policy_engine *pe = (struct policy_engine *)obj;
530 const struct device *dev = pe->dev;
531
532 /*
533 * The Policy Engine Shall transition to the PE_SRC_Startup
534 * state when:
535 * 1: The Device Policy Manager indicates that the power
536 * supply has reached the default level.
537 */
538 if (policy_check(dev, CHECK_SRC_PS_AT_DEFAULT_LEVEL)) {
539 pe_set_state(dev, PE_SRC_STARTUP);
540 }
541 }
542
pe_src_transition_to_default_exit(void * obj)543 void pe_src_transition_to_default_exit(void *obj)
544 {
545 struct policy_engine *pe = (struct policy_engine *)obj;
546 const struct device *dev = pe->dev;
547
548 /*
549 * On exit from the PE_SRC_Transition_to_default state the
550 * Policy Engine Shall:
551 * 1: request the Device Policy Manager to turn on VCONN
552 * 2: inform the Protocol Layer that the Hard Reset is complete.
553 *
554 * NOTE: The Device Policy Manager turns on VCONN when it notifies the
555 * PE that the Power Supply is at the default level.
556 */
557 prl_hard_reset_complete(dev);
558 }
559
560 /**
561 * 8.3.3.2.8 PE_SRC_Capability_Response State
562 */
pe_src_capability_response_entry(void * obj)563 void pe_src_capability_response_entry(void *obj)
564 {
565 struct policy_engine *pe = (struct policy_engine *)obj;
566 const struct device *dev = pe->dev;
567
568 /*
569 * On entry to the PE_SRC_Capability_Response state the Policy Engine
570 * Shall request the Protocol Layer to send one of the following:
571 */
572
573 /*
574 * 1: Reject Message – if the request cannot be met or the present
575 * Contract is Invalid.
576 */
577 if (pe->snk_request_reply == SNK_REQUEST_REJECT) {
578 pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_REJECT);
579 }
580 /*
581 * 2: Wait Message – if the request could be met later from the Power
582 * Reserve. A Wait Message Shall Not be sent if the present Contract
583 * is Invalid.
584 */
585 else {
586 pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_WAIT);
587 }
588 }
589
pe_src_capability_response_run(void * obj)590 void pe_src_capability_response_run(void *obj)
591 {
592 struct policy_engine *pe = (struct policy_engine *)obj;
593 const struct device *dev = pe->dev;
594
595 /* Wait until message has been sent */
596 if (!atomic_test_and_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE)) {
597 return;
598 }
599
600 /*
601 * The Policy Engine Shall transition to the PE_SRC_Ready state when:
602 * 1: There is an Explicit Contract AND
603 * 2: A Reject Message has been sent and the present Contract
604 * is still Valid OR
605 * 3: A Wait Message has been sent.
606 */
607 if (atomic_test_bit(pe->flags, PE_FLAGS_EXPLICIT_CONTRACT) &&
608 ((pe->snk_request_reply == SNK_REQUEST_REJECT &&
609 policy_present_contract_is_valid(dev, pe->present_contract)) ||
610 (pe->snk_request_reply == SNK_REQUEST_WAIT))) {
611 pe_set_state(dev, PE_SRC_READY);
612 }
613 /*
614 * The Policy Engine Shall transition to the PE_SRC_Hard_Reset state
615 * when:
616 * 1: There is an Explicit Contract and
617 * 2: The Reject Message has been sent and the present Contract
618 * is Invalid
619 */
620 else if (atomic_test_bit(pe->flags, PE_FLAGS_EXPLICIT_CONTRACT) &&
621 policy_present_contract_is_valid(dev, pe->present_contract) == false) {
622 pe_set_state(dev, PE_SRC_HARD_RESET);
623 }
624 /*
625 * The Policy Engine Shall transition to the PE_SRC_Wait_New_Capabilities
626 * state when:
627 * 1: There is no Explicit Contract and
628 * 2: A Reject Message has been sent or
629 * 3: A Wait Message has been sent.
630 */
631 else {
632 /* 8.3.3.2.13 PE_SRC_Wait_New_Capabilities embedded here */
633
634 /*
635 * In the PE_SRC_Wait_New_Capabilities State the Device Policy Manager
636 * Should either decide to send no further Source Capabilities or
637 * Should send a different set of Source Capabilities. Continuing
638 * to send the same set of Source Capabilities could result in a live
639 * lock situation.
640 */
641
642 /* Notify DPM to send a different set of Source Capabilities */
643 if (policy_change_src_caps(dev)) {
644 /* DPM will send different set of Source Capabilities */
645 pe_set_state(dev, PE_SRC_SEND_CAPABILITIES);
646 } else {
647 /*
648 * DPM can not send a different set of Source
649 * Capabilities, so disable port.
650 */
651 pe_set_state(dev, PE_SUSPEND);
652 }
653 }
654 }
655
pe_src_hard_reset_parent_entry(void * obj)656 void pe_src_hard_reset_parent_entry(void *obj)
657 {
658 struct policy_engine *pe = (struct policy_engine *)obj;
659
660 pe->submachine = SM_HARD_RESET_START;
661 }
662
pe_src_hard_reset_parent_run(void * obj)663 void pe_src_hard_reset_parent_run(void *obj)
664 {
665 struct policy_engine *pe = (struct policy_engine *)obj;
666 const struct device *dev = pe->dev;
667
668 switch (pe->submachine) {
669 case SM_HARD_RESET_START:
670 /*
671 * Initialize and run the NoResponseTimer.
672 * Note that the NoResponseTimer Shall continue to run
673 * in every state until it is stopped or times out.
674 */
675 usbc_timer_start(&pe->pd_t_no_response);
676
677 /* Initialize and run the PSHardResetTimer */
678 usbc_timer_start(&pe->pd_t_ps_hard_reset);
679
680 pe->submachine = SM_HARD_RESET_WAIT;
681 break;
682 case SM_HARD_RESET_WAIT:
683 /*
684 * The Policy Engine Shall transition to the
685 * PE_SRC_Transition_to_default state when:
686 * The PSHardResetTimer times out.
687 */
688 if (usbc_timer_expired(&pe->pd_t_ps_hard_reset)) {
689 pe_set_state(dev, PE_SRC_TRANSITION_TO_DEFAULT);
690 }
691 break;
692 }
693 }
694
pe_src_hard_reset_parent_exit(void * obj)695 void pe_src_hard_reset_parent_exit(void *obj)
696 {
697 struct policy_engine *pe = (struct policy_engine *)obj;
698
699 /* Stop the Hard Reset Timer */
700 usbc_timer_stop(&pe->pd_t_ps_hard_reset);
701 }
702
703 /**
704 * @brief 8.3.3.2.9 PE_SRC_Hard_Reset State
705 */
pe_src_hard_reset_entry(void * obj)706 void pe_src_hard_reset_entry(void *obj)
707 {
708 struct policy_engine *pe = (struct policy_engine *)obj;
709 const struct device *dev = pe->dev;
710
711 /*
712 * On entry to the PE_SRC_Hard_Reset state the
713 * Policy Engine Shall:
714 */
715
716 /*
717 * Request the generation of Hard Reset Signaling by
718 * the PHY Layer
719 */
720 prl_execute_hard_reset(dev);
721
722 }
723