1 /*
2 * Copyright (c) 2022 The Chromium OS Authors
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8 #include <zephyr/sys/byteorder.h>
9 #include <zephyr/smf.h>
10 #include <zephyr/usb_c/usbc.h>
11 #include <zephyr/drivers/usb_c/usbc_pd.h>
12 #include <zephyr/logging/log.h>
13 LOG_MODULE_DECLARE(usbc_stack, CONFIG_USBC_STACK_LOG_LEVEL);
14
15 #include "usbc_pe_common_internal.h"
16 #include "usbc_stack.h"
17
18 /**
19 * @brief Initialize the Source Policy Engine layer
20 */
pe_snk_init(const struct device * dev)21 void pe_snk_init(const struct device *dev)
22 {
23 struct usbc_port_data *data = dev->data;
24 struct policy_engine *pe = data->pe;
25
26 /* Initial role of sink is UFP */
27 pe_set_data_role(dev, TC_ROLE_UFP);
28
29 /* Initialize timers */
30 usbc_timer_init(&pe->pd_t_typec_sink_wait_cap, PD_T_TYPEC_SINK_WAIT_CAP_MAX_MS);
31 usbc_timer_init(&pe->pd_t_ps_transition, PD_T_SPR_PS_TRANSITION_NOM_MS);
32 usbc_timer_init(&pe->pd_t_wait_to_resend, PD_T_SINK_REQUEST_MIN_MS);
33
34 /* Goto startup state */
35 pe_set_state(dev, PE_SNK_STARTUP);
36 }
37
38 /**
39 * @brief Handle sink-specific DPM requests
40 */
sink_dpm_requests(const struct device * dev)41 void sink_dpm_requests(const struct device *dev)
42 {
43 struct usbc_port_data *data = dev->data;
44 struct policy_engine *pe = data->pe;
45
46 /*
47 * Handle any common DPM Requests
48 */
49 if (common_dpm_requests(dev)) {
50 return;
51 }
52
53 /*
54 * Handle Sink DPM Requests
55 */
56 if (pe->dpm_request > REQUEST_TC_END) {
57 atomic_set_bit(pe->flags, PE_FLAGS_DPM_INITIATED_AMS);
58
59 if (pe->dpm_request == REQUEST_PE_GET_SRC_CAPS) {
60 pe_set_state(dev, PE_SNK_GET_SOURCE_CAP);
61 }
62 }
63 }
64
65 /**
66 * @brief PE_SNK_Startup Entry State
67 */
pe_snk_startup_entry(void * obj)68 void pe_snk_startup_entry(void *obj)
69 {
70 struct policy_engine *pe = (struct policy_engine *)obj;
71 const struct device *dev = pe->dev;
72
73 LOG_INF("PE_SNK_Startup");
74
75 /* Reset the protocol layer */
76 prl_reset(dev);
77
78 /* Set power role to Sink */
79 pe->power_role = TC_ROLE_SINK;
80
81 /* Invalidate explicit contract */
82 atomic_clear_bit(pe->flags, PE_FLAGS_EXPLICIT_CONTRACT);
83
84 policy_notify(dev, NOT_PD_CONNECTED);
85 }
86
87 /**
88 * @brief PE_SNK_Startup Run State
89 */
pe_snk_startup_run(void * obj)90 void pe_snk_startup_run(void *obj)
91 {
92 struct policy_engine *pe = (struct policy_engine *)obj;
93 const struct device *dev = pe->dev;
94
95 /*
96 * Once the reset process completes, the Policy Engine Shall
97 * transition to the PE_SNK_Discovery state
98 */
99 if (prl_is_running(dev)) {
100 pe_set_state(dev, PE_SNK_DISCOVERY);
101 }
102 }
103
104 /**
105 * @brief PE_SNK_Discovery Entry State
106 */
pe_snk_discovery_entry(void * obj)107 void pe_snk_discovery_entry(void *obj)
108 {
109 LOG_INF("PE_SNK_Discovery");
110 }
111
112 /**
113 * @brief PE_SNK_Discovery Run State
114 */
pe_snk_discovery_run(void * obj)115 void pe_snk_discovery_run(void *obj)
116 {
117 struct policy_engine *pe = (struct policy_engine *)obj;
118 const struct device *dev = pe->dev;
119 struct usbc_port_data *data = dev->data;
120 const struct device *vbus = data->vbus;
121
122 /*
123 * Transition to the PE_SNK_Wait_for_Capabilities state when
124 * VBUS has been detected
125 */
126 if (usbc_vbus_check_level(vbus, TC_VBUS_PRESENT)) {
127 pe_set_state(dev, PE_SNK_WAIT_FOR_CAPABILITIES);
128 }
129 }
130
131 /**
132 * @brief PE_SNK_Wait_For_Capabilities Entry State
133 */
pe_snk_wait_for_capabilities_entry(void * obj)134 void pe_snk_wait_for_capabilities_entry(void *obj)
135 {
136 struct policy_engine *pe = (struct policy_engine *)obj;
137
138 LOG_INF("PE_SNK_Wait_For_Capabilities");
139
140 /* Initialize and start the SinkWaitCapTimer */
141 usbc_timer_start(&pe->pd_t_typec_sink_wait_cap);
142 }
143
144 /**
145 * @brief PE_SNK_Wait_For_Capabilities Run State
146 */
pe_snk_wait_for_capabilities_run(void * obj)147 void pe_snk_wait_for_capabilities_run(void *obj)
148 {
149 struct policy_engine *pe = (struct policy_engine *)obj;
150 const struct device *dev = pe->dev;
151 struct usbc_port_data *data = dev->data;
152 struct protocol_layer_rx_t *prl_rx = data->prl_rx;
153 union pd_header header;
154
155 /*
156 * Transition to the PE_SNK_Evaluate_Capability state when:
157 * 1) A Source_Capabilities Message is received.
158 */
159 if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_RECEIVED)) {
160 header = prl_rx->emsg.header;
161 if (received_data_message(dev, header, PD_DATA_SOURCE_CAP)) {
162 pe_set_state(dev, PE_SNK_EVALUATE_CAPABILITY);
163 return;
164 }
165 }
166
167 /* When the SinkWaitCapTimer times out, perform a Hard Reset. */
168 if (usbc_timer_expired(&pe->pd_t_typec_sink_wait_cap)) {
169 atomic_set_bit(pe->flags, PE_FLAGS_SNK_WAIT_CAP_TIMEOUT);
170 pe_set_state(dev, PE_SNK_HARD_RESET);
171 }
172 }
173
174 /**
175 * @brief PE_SNK_Wait_For_Capabilities Exit State
176 */
pe_snk_wait_for_capabilities_exit(void * obj)177 void pe_snk_wait_for_capabilities_exit(void *obj)
178 {
179 struct policy_engine *pe = (struct policy_engine *)obj;
180
181 /* Stop SinkWaitCapTimer */
182 usbc_timer_stop(&pe->pd_t_typec_sink_wait_cap);
183 }
184
185 /**
186 * @brief PE_SNK_Evaluate_Capability Entry State
187 */
pe_snk_evaluate_capability_entry(void * obj)188 void pe_snk_evaluate_capability_entry(void *obj)
189 {
190 struct policy_engine *pe = (struct policy_engine *)obj;
191 const struct device *dev = pe->dev;
192 struct usbc_port_data *data = dev->data;
193 struct protocol_layer_rx_t *prl_rx = data->prl_rx;
194 union pd_header header;
195 uint32_t *pdos = (uint32_t *)prl_rx->emsg.data;
196 uint32_t num_pdo_objs = PD_CONVERT_BYTES_TO_PD_HEADER_COUNT(prl_rx->emsg.len);
197
198 LOG_INF("PE_SNK_Evaluate_Capability");
199
200 /* Inform the DPM of the reception of the source capabilities */
201 policy_notify(dev, SOURCE_CAPABILITIES_RECEIVED);
202
203 header = prl_rx->emsg.header;
204
205 /* Reset Hard Reset counter to zero */
206 pe->hard_reset_counter = 0;
207
208 /* Set to highest revision supported by both ports */
209 prl_set_rev(dev, PD_PACKET_SOP, MIN(PD_REV30, header.specification_revision));
210
211 /* Send source caps to Device Policy Manager for saving */
212 policy_set_src_cap(dev, pdos, num_pdo_objs);
213
214 /* Transition to PE_Snk_Select_Capability */
215 pe_set_state(dev, PE_SNK_SELECT_CAPABILITY);
216 }
217
218 /**
219 * @brief PE_SNK_Select_Capability Entry State
220 */
pe_snk_select_capability_entry(void * obj)221 void pe_snk_select_capability_entry(void *obj)
222 {
223 struct policy_engine *pe = (struct policy_engine *)obj;
224 const struct device *dev = pe->dev;
225 uint32_t rdo;
226
227 LOG_INF("PE_SNK_Select_Capability");
228
229 /* Get selected source cap from Device Policy Manager */
230 rdo = policy_get_request_data_object(dev);
231
232 /* Send Request */
233 pe_send_request_msg(dev, rdo);
234 /* Inform Device Policy Manager that we are PD Connected */
235 policy_notify(dev, PD_CONNECTED);
236 }
237
238 /**
239 * @brief PE_SNK_Select_Capability Run State
240 * NOTE: Sender Response Timer is handled in super state.
241 */
pe_snk_select_capability_run(void * obj)242 void pe_snk_select_capability_run(void *obj)
243 {
244 struct policy_engine *pe = (struct policy_engine *)obj;
245 const struct device *dev = pe->dev;
246 struct usbc_port_data *data = dev->data;
247 struct protocol_layer_rx_t *prl_rx = data->prl_rx;
248 union pd_header header;
249
250 if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_DISCARDED)) {
251 /*
252 * The sent REQUEST message was discarded. This can be at
253 * the start of an AMS or in the middle. Handle what to
254 * do based on where we came from.
255 * 1) SE_SNK_EVALUATE_CAPABILITY: sends SoftReset
256 * 2) SE_SNK_READY: goes back to SNK Ready
257 */
258 if (pe_get_last_state(dev) == PE_SNK_EVALUATE_CAPABILITY) {
259 pe_send_soft_reset(dev, PD_PACKET_SOP);
260 } else {
261 pe_set_state(dev, PE_SNK_READY);
262 }
263 }
264
265 if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_RECEIVED)) {
266 header = prl_rx->emsg.header;
267
268 /*
269 * Transition to the PE_SNK_Transition_Sink state when:
270 * 1) An Accept Message is received from the Source.
271 *
272 * Transition to the PE_SNK_Wait_for_Capabilities state when:
273 * 1) There is no Explicit Contract in place and
274 * 2) A Reject Message is received from the Source or
275 * 3) A Wait Message is received from the Source.
276 *
277 * Transition to the PE_SNK_Ready state when:
278 * 1) There is an Explicit Contract in place and
279 * 2) A Reject Message is received from the Source or
280 * 3) A Wait Message is received from the Source.
281 *
282 * Transition to the PE_SNK_Hard_Reset state when:
283 * 1) A SenderResponseTimer timeout occurs.
284 */
285 /* Only look at control messages */
286 if (received_control_message(dev, header, PD_CTRL_ACCEPT)) {
287 /* explicit contract is now in place */
288 atomic_set_bit(pe->flags, PE_FLAGS_EXPLICIT_CONTRACT);
289 pe_set_state(dev, PE_SNK_TRANSITION_SINK);
290 } else if (received_control_message(dev, header, PD_CTRL_REJECT) ||
291 received_control_message(dev, header, PD_CTRL_WAIT)) {
292 /*
293 * We had a previous explicit contract, so transition to
294 * PE_SNK_Ready
295 */
296 if (atomic_test_bit(pe->flags, PE_FLAGS_EXPLICIT_CONTRACT)) {
297 if (received_control_message(dev, header, PD_CTRL_WAIT)) {
298 /*
299 * Inform Device Policy Manager that Sink
300 * Request needs to Wait
301 */
302 if (policy_wait_notify(dev, WAIT_SINK_REQUEST)) {
303 atomic_set_bit(pe->flags,
304 PE_FLAGS_WAIT_SINK_REQUEST);
305 usbc_timer_start(&pe->pd_t_wait_to_resend);
306 }
307 }
308
309 pe_set_state(dev, PE_SNK_READY);
310 } else {
311 /*
312 * No previous explicit contract, so transition
313 * to PE_SNK_Wait_For_Capabilities
314 */
315 pe_set_state(dev, PE_SNK_WAIT_FOR_CAPABILITIES);
316 }
317 } else {
318 pe_send_soft_reset(dev, prl_rx->emsg.type);
319 }
320 return;
321 }
322 }
323
324 /**
325 * @brief PE_SNK_Transition_Sink Entry State
326 */
pe_snk_transition_sink_entry(void * obj)327 void pe_snk_transition_sink_entry(void *obj)
328 {
329 struct policy_engine *pe = (struct policy_engine *)obj;
330
331 LOG_INF("PE_SNK_Transition_Sink");
332
333 /* Initialize and run PSTransitionTimer */
334 usbc_timer_start(&pe->pd_t_ps_transition);
335 }
336
337 /**
338 * @brief PE_SNK_Transition_Sink Run State
339 */
pe_snk_transition_sink_run(void * obj)340 void pe_snk_transition_sink_run(void *obj)
341 {
342 struct policy_engine *pe = (struct policy_engine *)obj;
343 const struct device *dev = pe->dev;
344 struct usbc_port_data *data = dev->data;
345 struct protocol_layer_rx_t *prl_rx = data->prl_rx;
346 union pd_header header;
347
348 /*
349 * Transition to the PE_SNK_Ready state when:
350 * 1) A PS_RDY Message is received from the Source.
351 *
352 * Transition to the PE_SNK_Hard_Reset state when:
353 * 1) A Protocol Error occurs.
354 */
355 if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_RECEIVED)) {
356 header = prl_rx->emsg.header;
357
358 /*
359 * PS_RDY message received
360 */
361 if (received_control_message(dev, header, PD_CTRL_PS_RDY)) {
362 /*
363 * Inform the Device Policy Manager to Transition
364 * the Power Supply
365 */
366 policy_notify(dev, TRANSITION_PS);
367 pe_set_state(dev, PE_SNK_READY);
368 } else {
369 /* Protocol Error */
370 pe_set_state(dev, PE_SNK_HARD_RESET);
371 }
372 return;
373 }
374
375 /*
376 * Timeout will lead to a Hard Reset
377 */
378 if (usbc_timer_expired(&pe->pd_t_ps_transition)) {
379 pe_set_state(dev, PE_SNK_HARD_RESET);
380 }
381 }
382
383 /**
384 * @brief PE_SNK_Transition_Sink Exit State
385 */
pe_snk_transition_sink_exit(void * obj)386 void pe_snk_transition_sink_exit(void *obj)
387 {
388 struct policy_engine *pe = (struct policy_engine *)obj;
389
390 /* Initialize and run PSTransitionTimer */
391 usbc_timer_stop(&pe->pd_t_ps_transition);
392 }
393
394 /**
395 * @brief PE_SNK_Ready Entry State
396 */
pe_snk_ready_entry(void * obj)397 void pe_snk_ready_entry(void *obj)
398 {
399 struct policy_engine *pe = (struct policy_engine *)obj;
400
401 LOG_INF("PE_SNK_Ready");
402
403 /* Clear AMS Flags */
404 atomic_clear_bit(pe->flags, PE_FLAGS_INTERRUPTIBLE_AMS);
405 atomic_clear_bit(pe->flags, PE_FLAGS_DPM_INITIATED_AMS);
406 }
407
408 /**
409 * @brief PE_SNK_Ready Run State
410 */
pe_snk_ready_run(void * obj)411 void pe_snk_ready_run(void *obj)
412 {
413 struct policy_engine *pe = (struct policy_engine *)obj;
414 const struct device *dev = pe->dev;
415 struct usbc_port_data *data = dev->data;
416 struct protocol_layer_rx_t *prl_rx = data->prl_rx;
417
418 /*
419 * Handle incoming messages before discovery and DPMs other than hard
420 * reset
421 */
422 if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_RECEIVED)) {
423 union pd_header header = prl_rx->emsg.header;
424
425 /* Extended Message Request */
426 if (header.extended) {
427 extended_message_not_supported(dev);
428 return;
429 } else if (header.number_of_data_objects > 0) {
430 /* Data Messages */
431 switch (header.message_type) {
432 case PD_DATA_SOURCE_CAP:
433 pe_set_state(dev, PE_SNK_EVALUATE_CAPABILITY);
434 break;
435 case PD_DATA_VENDOR_DEF:
436 /**
437 * VDM is unsupported. PD2.0 ignores and PD3.0
438 * reply with not supported.
439 */
440 if (prl_get_rev(dev, PD_PACKET_SOP) > PD_REV20) {
441 pe_set_state(dev, PE_SEND_NOT_SUPPORTED);
442 }
443 break;
444 default:
445 pe_set_state(dev, PE_SEND_NOT_SUPPORTED);
446 }
447 return;
448 } else {
449 /* Control Messages */
450 switch (header.message_type) {
451 case PD_CTRL_GOOD_CRC:
452 /* Do nothing */
453 break;
454 case PD_CTRL_PING:
455 /* Do nothing */
456 break;
457 case PD_CTRL_GET_SINK_CAP:
458 pe_set_state(dev, PE_SNK_GIVE_SINK_CAP);
459 return;
460 case PD_CTRL_DR_SWAP:
461 pe_set_state(dev, PE_DRS_EVALUATE_SWAP);
462 return;
463 case PD_CTRL_NOT_SUPPORTED:
464 /* Do nothing */
465 break;
466 /*
467 * USB PD 3.0 6.8.1:
468 * Receiving an unexpected message shall be responded
469 * to with a soft reset message.
470 */
471 case PD_CTRL_ACCEPT:
472 case PD_CTRL_REJECT:
473 case PD_CTRL_WAIT:
474 case PD_CTRL_PS_RDY:
475 pe_send_soft_reset(dev, prl_rx->emsg.type);
476 return;
477 /*
478 * Receiving an unknown or unsupported message
479 * shall be responded to with a not supported message.
480 */
481 default:
482 pe_set_state(dev, PE_SEND_NOT_SUPPORTED);
483 return;
484 }
485 }
486 }
487
488 /*
489 * Check if we are waiting to resend any messages
490 */
491 if (usbc_timer_expired(&pe->pd_t_wait_to_resend)) {
492 if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_WAIT_SINK_REQUEST)) {
493 pe_set_state(dev, PE_SNK_SELECT_CAPABILITY);
494 return;
495 } else if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_WAIT_DATA_ROLE_SWAP)) {
496 pe_set_state(dev, PE_DRS_SEND_SWAP);
497 return;
498 }
499 }
500
501 /*
502 * Handle Device Policy Manager Requests
503 */
504 sink_dpm_requests(dev);
505 }
506
pe_snk_ready_exit(void * obj)507 void pe_snk_ready_exit(void *obj)
508 {
509 struct policy_engine *pe = (struct policy_engine *)obj;
510 const struct device *dev = pe->dev;
511
512 /*
513 * If the Source is initiating an AMS, then notify the
514 * PRL that the first message in an AMS will follow.
515 */
516 if (pe_dpm_initiated_ams(dev)) {
517 prl_first_msg_notificaiton(dev);
518 }
519 }
520
521 /**
522 * @brief PE_SNK_Hard_Reset Entry State
523 */
pe_snk_hard_reset_entry(void * obj)524 void pe_snk_hard_reset_entry(void *obj)
525 {
526 struct policy_engine *pe = (struct policy_engine *)obj;
527 const struct device *dev = pe->dev;
528 struct usbc_port_data *data = dev->data;
529
530 LOG_INF("PE_SNK_Hard_Reset");
531
532 /*
533 * Note: If the SinkWaitCapTimer times out and the HardResetCounter is
534 * greater than nHardResetCount the Sink Shall assume that the
535 * Source is non-responsive.
536 */
537 if (atomic_test_bit(pe->flags, PE_FLAGS_SNK_WAIT_CAP_TIMEOUT) &&
538 pe->hard_reset_counter > PD_N_HARD_RESET_COUNT) {
539 /* Inform the DPM that the port partner is not responsive */
540 policy_notify(dev, PORT_PARTNER_NOT_RESPONSIVE);
541
542 /* Pause the Policy Engine */
543 data->pe_enabled = false;
544 return;
545 }
546
547 /* Set Hard Reset Pending Flag */
548 atomic_set_bit(pe->flags, PE_FLAGS_HARD_RESET_PENDING);
549
550 atomic_clear_bit(pe->flags, PE_FLAGS_SNK_WAIT_CAP_TIMEOUT);
551
552 /* Request the generation of Hard Reset Signaling by the PHY Layer */
553 prl_execute_hard_reset(dev);
554 /* Increment the HardResetCounter */
555 pe->hard_reset_counter++;
556 }
557
558 /**
559 * @brief PE_SNK_Hard_Reset Run State
560 */
pe_snk_hard_reset_run(void * obj)561 void pe_snk_hard_reset_run(void *obj)
562 {
563 struct policy_engine *pe = (struct policy_engine *)obj;
564 const struct device *dev = pe->dev;
565
566 /*
567 * Transition to the PE_SNK_Transition_to_default state when:
568 * 1) The Hard Reset is complete.
569 */
570 if (atomic_test_bit(pe->flags, PE_FLAGS_HARD_RESET_PENDING)) {
571 return;
572 }
573
574 pe_set_state(dev, PE_SNK_TRANSITION_TO_DEFAULT);
575 }
576
577 /**
578 * @brief PE_SNK_Transition_to_default Entry State
579 */
pe_snk_transition_to_default_entry(void * obj)580 void pe_snk_transition_to_default_entry(void *obj)
581 {
582 struct policy_engine *pe = (struct policy_engine *)obj;
583 const struct device *dev = pe->dev;
584
585 LOG_INF("PE_SNK_Transition_to_default");
586
587 /* Reset flags */
588 atomic_clear(pe->flags);
589 pe->data_role = TC_ROLE_UFP;
590
591 /*
592 * Indicate to the Device Policy Manager that the Sink Shall
593 * transition to default
594 */
595 policy_notify(dev, SNK_TRANSITION_TO_DEFAULT);
596 /*
597 * Request the Device Policy Manger that the Port Data Role is
598 * set to UFP
599 */
600 policy_notify(dev, DATA_ROLE_IS_UFP);
601 }
602
603 /**
604 * @brief PE_SNK_Transition_to_default Run State
605 */
pe_snk_transition_to_default_run(void * obj)606 void pe_snk_transition_to_default_run(void *obj)
607 {
608 struct policy_engine *pe = (struct policy_engine *)obj;
609 const struct device *dev = pe->dev;
610
611 /*
612 * Wait until Device Policy Manager has transitioned the sink to
613 * default level
614 */
615 if (policy_is_snk_at_default(dev)) {
616 /* Inform the Protocol Layer that the Hard Reset is complete */
617 prl_hard_reset_complete(dev);
618 pe_set_state(dev, PE_SNK_STARTUP);
619 }
620 }
621
622 /**
623 * @brief PE_SNK_Get_Source_Cap Entry State
624 *
625 */
pe_snk_get_source_cap_entry(void * obj)626 void pe_snk_get_source_cap_entry(void *obj)
627 {
628 struct policy_engine *pe = (struct policy_engine *)obj;
629 const struct device *dev = pe->dev;
630
631 LOG_INF("PE_SNK_Get_Source_Cap");
632
633 /*
634 * On entry to the PE_SNK_Get_Source_Cap state the Policy Engine
635 * Shall request the Protocol Layer to send a get Source
636 * Capabilities message in order to retrieve the Source’s
637 * capabilities.
638 */
639 pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_GET_SOURCE_CAP);
640 }
641
642 /**
643 * @brief PE_SNK_Get_Source_Cap Run State
644 * NOTE: Sender Response Timer is handled in super state.
645 */
pe_snk_get_source_cap_run(void * obj)646 void pe_snk_get_source_cap_run(void *obj)
647 {
648 struct policy_engine *pe = (struct policy_engine *)obj;
649 const struct device *dev = pe->dev;
650 struct usbc_port_data *data = dev->data;
651 struct protocol_layer_rx_t *prl_rx = data->prl_rx;
652 union pd_header header;
653
654 /* Wait until message is sent or dropped */
655 if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_RECEIVED)) {
656 /*
657 * The Policy Engine Shall transition to the PE_SNK_Evaluate_Capability
658 * State when:
659 * 1: In SPR Mode and SPR Source Capabilities were requested and
660 * a Source_Capabilities Message is received
661 */
662 header = prl_rx->emsg.header;
663
664 if (received_control_message(dev, header, PD_DATA_SOURCE_CAP)) {
665 pe_set_state(dev, PE_SNK_EVALUATE_CAPABILITY);
666 }
667 }
668 }
669
670 /**
671 * @brief PE_SNK_Give_Sink_Cap Entry state
672 */
pe_snk_give_sink_cap_entry(void * obj)673 void pe_snk_give_sink_cap_entry(void *obj)
674 {
675 struct policy_engine *pe = (struct policy_engine *)obj;
676 const struct device *dev = pe->dev;
677 struct usbc_port_data *data = dev->data;
678 struct protocol_layer_tx_t *prl_tx = data->prl_tx;
679 struct pd_msg *msg = &prl_tx->emsg;
680 uint32_t *pdos;
681 uint32_t num_pdos;
682
683 /* Get present sink capabilities from Device Policy Manager */
684 policy_get_snk_cap(dev, &pdos, &num_pdos);
685
686 msg->len = PD_CONVERT_PD_HEADER_COUNT_TO_BYTES(num_pdos);
687 memcpy(msg->data, (uint8_t *)pdos, msg->len);
688 pe_send_data_msg(dev, PD_PACKET_SOP, PD_DATA_SINK_CAP);
689 }
690
691 /**
692 * @brief PE_SNK_Give_Sink_Cap Run state
693 */
pe_snk_give_sink_cap_run(void * obj)694 void pe_snk_give_sink_cap_run(void *obj)
695 {
696 struct policy_engine *pe = (struct policy_engine *)obj;
697 const struct device *dev = pe->dev;
698 struct usbc_port_data *data = dev->data;
699 struct protocol_layer_rx_t *prl_rx = data->prl_rx;
700
701 /* Wait until message is sent or dropped */
702 if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE)) {
703 pe_set_state(dev, PE_SNK_READY);
704 } else if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_DISCARDED)) {
705 pe_send_soft_reset(dev, prl_rx->emsg.type);
706 }
707 }
708