1 /*
2  * Copyright (c) 2022 The Chromium OS Authors
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/kernel.h>
8 #include <zephyr/sys/byteorder.h>
9 #include <zephyr/smf.h>
10 #include <zephyr/usb_c/usbc.h>
11 #include <zephyr/drivers/usb_c/usbc_pd.h>
12 #include <zephyr/logging/log.h>
13 LOG_MODULE_DECLARE(usbc_stack, CONFIG_USBC_STACK_LOG_LEVEL);
14 
15 #include "usbc_stack.h"
16 #include "usbc_pe_common_internal.h"
17 #include "usbc_pe_snk_states_internal.h"
18 #include "usbc_pe_src_states_internal.h"
19 
20 static const struct smf_state pe_states[PE_STATE_COUNT];
21 
22 /**
23  * @brief Handle common DPM requests
24  *
25  * @retval true if request was handled, else false
26  */
common_dpm_requests(const struct device * dev)27 bool common_dpm_requests(const struct device *dev)
28 {
29 	struct usbc_port_data *data = dev->data;
30 	struct policy_engine *pe = data->pe;
31 
32 	if (pe->dpm_request > REQUEST_TC_END) {
33 		atomic_set_bit(pe->flags, PE_FLAGS_DPM_INITIATED_AMS);
34 
35 		if (pe->dpm_request == REQUEST_PE_DR_SWAP) {
36 			pe_set_state(dev, PE_DRS_SEND_SWAP);
37 			return true;
38 		} else if (pe->dpm_request == REQUEST_PE_SOFT_RESET_SEND) {
39 			pe_set_state(dev, PE_SEND_SOFT_RESET);
40 			return true;
41 		}
42 	}
43 
44 	return false;
45 }
46 
47 /**
48  * @brief Initializes the PE state machine and enters the PE_SUSPEND state.
49  */
pe_subsys_init(const struct device * dev)50 void pe_subsys_init(const struct device *dev)
51 {
52 	struct usbc_port_data *data = dev->data;
53 	struct policy_engine *pe = data->pe;
54 
55 	/* Save the port device object so states can access it */
56 	pe->dev = dev;
57 
58 	/* Initialize the state machine */
59 	smf_set_initial(SMF_CTX(pe), &pe_states[PE_SUSPEND]);
60 }
61 
62 /**
63  * @brief Starts the Policy Engine layer
64  */
pe_start(const struct device * dev)65 void pe_start(const struct device *dev)
66 {
67 	struct usbc_port_data *data = dev->data;
68 
69 	data->pe_enabled = true;
70 }
71 
72 /**
73  * @brief Suspend the Policy Engine layer
74  */
pe_suspend(const struct device * dev)75 void pe_suspend(const struct device *dev)
76 {
77 	struct usbc_port_data *data = dev->data;
78 
79 	data->pe_enabled = false;
80 
81 	/*
82 	 * While we are paused, exit all states
83 	 * and wait until initialized again.
84 	 */
85 	pe_set_state(dev, PE_SUSPEND);
86 }
87 
88 /**
89  * @brief Initialize the Policy Engine layer
90  */
pe_init(const struct device * dev)91 static void pe_init(const struct device *dev)
92 {
93 	struct usbc_port_data *data = dev->data;
94 	struct policy_engine *pe = data->pe;
95 
96 	/* Clear all flags */
97 	atomic_clear(pe->flags);
98 
99 	/* Initialize common timers */
100 	usbc_timer_init(&pe->pd_t_sender_response, PD_T_NO_RESPONSE_MAX_MS);
101 	usbc_timer_init(&pe->pd_t_chunking_not_supported, PD_T_CHUNKING_NOT_SUPPORTED_NOM_MS);
102 
103 	/* Initialize common counters */
104 	pe->hard_reset_counter = 0;
105 
106 #ifdef CONFIG_USBC_CSM_SINK_ONLY
107 	pe_snk_init(dev);
108 #else
109 	pe_src_init(dev);
110 #endif
111 }
112 
113 /**
114  * @brief Tests if the Policy Engine layer is running
115  */
pe_is_running(const struct device * dev)116 bool pe_is_running(const struct device *dev)
117 {
118 	struct usbc_port_data *data = dev->data;
119 
120 	return data->pe_sm_state == SM_RUN;
121 }
122 
123 /**
124  * @brief Run the Policy Engine layer
125  */
pe_run(const struct device * dev,const int32_t dpm_request)126 void pe_run(const struct device *dev, const int32_t dpm_request)
127 {
128 	struct usbc_port_data *data = dev->data;
129 	struct policy_engine *pe = data->pe;
130 
131 	switch (data->pe_sm_state) {
132 	case SM_PAUSED:
133 		if (data->pe_enabled == false) {
134 			break;
135 		}
136 	/* fall through */
137 	case SM_INIT:
138 		pe_init(dev);
139 		data->pe_sm_state = SM_RUN;
140 	/* fall through */
141 	case SM_RUN:
142 		if (data->pe_enabled == false) {
143 			data->pe_sm_state = SM_PAUSED;
144 			break;
145 		}
146 
147 		if (prl_is_running(dev) == false) {
148 			break;
149 		}
150 
151 		/* Get any DPM Requests */
152 		pe->dpm_request = dpm_request;
153 
154 		/*
155 		 * 8.3.3.3.8 PE_SNK_Hard_Reset State
156 		 * The Policy Engine Shall transition to the PE_SNK_Hard_Reset
157 		 * state from any state when:
158 		 * - Hard Reset request from Device Policy Manager
159 		 */
160 		if (dpm_request == REQUEST_PE_HARD_RESET_SEND) {
161 			pe_set_state(dev, PE_SNK_HARD_RESET);
162 		}
163 
164 		/* Run state machine */
165 		smf_run_state(SMF_CTX(pe));
166 
167 		break;
168 	}
169 }
170 
171 /**
172  * @brief Sets Data Role
173  */
pe_set_data_role(const struct device * dev,enum tc_data_role dr)174 void pe_set_data_role(const struct device *dev, enum tc_data_role dr)
175 {
176 	struct usbc_port_data *data = dev->data;
177 	struct policy_engine *pe = data->pe;
178 
179 	/* Update data role */
180 	pe->data_role = dr;
181 
182 	/* Notify TCPC of role update */
183 	tcpc_set_roles(data->tcpc, pe->power_role, pe->data_role);
184 }
185 
186 /**
187  * @brief Gets the current data role
188  */
pe_get_data_role(const struct device * dev)189 enum tc_data_role pe_get_data_role(const struct device *dev)
190 {
191 	struct usbc_port_data *data = dev->data;
192 
193 	return data->pe->data_role;
194 }
195 
196 /**
197  * @brief Gets the current power role
198  */
pe_get_power_role(const struct device * dev)199 enum tc_power_role pe_get_power_role(const struct device *dev)
200 {
201 	struct usbc_port_data *data = dev->data;
202 
203 	return data->pe->power_role;
204 }
205 
206 /**
207  * @brief Gets the current cable plug role
208  */
pe_get_cable_plug(const struct device * dev)209 enum tc_cable_plug pe_get_cable_plug(const struct device *dev)
210 {
211 	return PD_PLUG_FROM_DFP_UFP;
212 }
213 
214 /**
215  * @brief Informs the Policy Engine that a soft reset was received.
216  */
pe_got_soft_reset(const struct device * dev)217 void pe_got_soft_reset(const struct device *dev)
218 {
219 	/*
220 	 * The PE_SRC_Soft_Reset state Shall be entered from any state when a
221 	 * Soft_Reset Message is received from the Protocol Layer.
222 	 */
223 	pe_set_state(dev, PE_SOFT_RESET);
224 }
225 
226 /**
227  * @brief Informs the Policy Engine that a message was successfully sent
228  */
pe_message_sent(const struct device * dev)229 void pe_message_sent(const struct device *dev)
230 {
231 	struct usbc_port_data *data = dev->data;
232 	struct policy_engine *pe = data->pe;
233 
234 	atomic_set_bit(pe->flags, PE_FLAGS_TX_COMPLETE);
235 }
236 
237 /**
238  * @brief See section 8.3.3.4.1.1 PE_SRC_Send_Soft_Reset State:
239  *
240  * The PE_Send_Soft_Reset state shall be entered from
241  * any state when
242  * * A Protocol Error is detected by Protocol Layer during a
243  *   Non-Interruptible AMS or
244  * * A message has not been sent after retries or
245  * * When not in an explicit contract and
246  *   * Protocol Errors occurred on SOP during an Interruptible AMS or
247  *   * Protocol Errors occurred on SOP during any AMS where the first
248  *     Message in the sequence has not yet been sent i.e. an unexpected
249  *     Message is received instead of the expected GoodCRC Message
250  *     response.
251  */
pe_soft_reset_is_required(const struct device * dev,const enum pd_packet_type type)252 static bool pe_soft_reset_is_required(const struct device *dev, const enum pd_packet_type type)
253 {
254 	struct usbc_port_data *data = dev->data;
255 	struct policy_engine *pe = data->pe;
256 
257 	/* Protocol Error not on SOP */
258 	if (type != PD_PACKET_SOP) {
259 		return false;
260 	}
261 
262 	if (atomic_test_bit(pe->flags, PE_FLAGS_EXPLICIT_CONTRACT)) {
263 		/*
264 		 * If the first Message in an AMS has been passed to the
265 		 * Protocol Layer by the Policy Engine but has not yet been sent
266 		 * when the Protocol Error occurs, the Policy Engine Shall Not
267 		 * issue a Soft Reset
268 		 */
269 		if (!atomic_test_bit(pe->flags, PE_FLAGS_FIRST_MSG_SENT)) {
270 			return false;
271 		}
272 
273 		/*
274 		 * If the Protocol Error occurs during an Interruptible AMS then
275 		 * the Policy Engine Shall Not issue a Soft Reset
276 		 */
277 		if (atomic_test_bit(pe->flags, PE_FLAGS_INTERRUPTIBLE_AMS)) {
278 			return false;
279 		}
280 	}
281 
282 	return true;
283 }
284 
285 /**
286  * @brief Informs the Policy Engine of an error.
287  */
pe_report_error(const struct device * dev,const enum pe_error e,const enum pd_packet_type type)288 void pe_report_error(const struct device *dev, const enum pe_error e,
289 		     const enum pd_packet_type type)
290 {
291 	struct usbc_port_data *data = dev->data;
292 	struct policy_engine *pe = data->pe;
293 
294 	/*
295 	 * Generate Hard Reset if Protocol Error occurred
296 	 * while in PE_Send_Soft_Reset state.
297 	 */
298 	if (pe_get_state(dev) == PE_SEND_SOFT_RESET ||
299 	    pe_get_state(dev) == PE_SOFT_RESET) {
300 		atomic_set_bit(pe->flags, PE_FLAGS_PROTOCOL_ERROR);
301 		return;
302 	}
303 
304 	/* Transmit error */
305 	if (e == ERR_XMIT) {
306 		atomic_set_bit(pe->flags, PE_FLAGS_MSG_XMIT_ERROR);
307 	}
308 	/* All error types besides transmit errors are Protocol Errors. */
309 	else if (pe_soft_reset_is_required(dev, type)) {
310 		policy_notify(dev, PROTOCOL_ERROR);
311 		pe_send_soft_reset(dev, type);
312 	}
313 	/*
314 	 * Transition to PE_Snk_Ready by a Protocol
315 	 * Error during an Interruptible AMS.
316 	 */
317 	else {
318 		pe_set_state(dev, PE_SNK_READY);
319 	}
320 }
321 
322 /**
323  * @brief Informs the Policy Engine of a discard.
324  */
pe_report_discard(const struct device * dev)325 void pe_report_discard(const struct device *dev)
326 {
327 	struct usbc_port_data *data = dev->data;
328 	struct policy_engine *pe = data->pe;
329 
330 	/*
331 	 * Clear local AMS indicator as our AMS message was discarded, and flag
332 	 * the discard for the PE
333 	 */
334 	pe_dpm_end_ams(dev);
335 	atomic_set_bit(pe->flags, PE_FLAGS_MSG_DISCARDED);
336 }
337 
338 /**
339  * @brief Called by the Protocol Layer to informs the Policy Engine
340  *	  that a message has been received.
341  */
pe_message_received(const struct device * dev)342 void pe_message_received(const struct device *dev)
343 {
344 	struct usbc_port_data *data = dev->data;
345 	struct policy_engine *pe = data->pe;
346 
347 	atomic_set_bit(pe->flags, PE_FLAGS_MSG_RECEIVED);
348 
349 	/* Allow the PE to be executed once more and respond faster for the received message */
350 	usbc_bypass_next_sleep(dev);
351 }
352 
353 /**
354  * @brief Informs the Policy Engine that a hard reset was received.
355  */
pe_got_hard_reset(const struct device * dev)356 void pe_got_hard_reset(const struct device *dev)
357 {
358 	pe_set_state(dev, PE_SNK_TRANSITION_TO_DEFAULT);
359 }
360 
361 /**
362  * @brief Informs the Policy Engine that a hard reset was sent.
363  */
pe_hard_reset_sent(const struct device * dev)364 void pe_hard_reset_sent(const struct device *dev)
365 {
366 	struct usbc_port_data *data = dev->data;
367 	struct policy_engine *pe = data->pe;
368 
369 	atomic_clear_bit(pe->flags, PE_FLAGS_HARD_RESET_PENDING);
370 }
371 
372 /**
373  * @brief Indicates if an explicit contract is in place
374  */
pe_is_explicit_contract(const struct device * dev)375 bool pe_is_explicit_contract(const struct device *dev)
376 {
377 	struct usbc_port_data *data = dev->data;
378 	struct policy_engine *pe = data->pe;
379 
380 	return atomic_test_bit(pe->flags, PE_FLAGS_EXPLICIT_CONTRACT);
381 }
382 
383 /**
384  * @brief Return true if the PE is within an atomic messaging sequence
385  *	  that it initiated with a SOP* port partner.
386  */
pe_dpm_initiated_ams(const struct device * dev)387 bool pe_dpm_initiated_ams(const struct device *dev)
388 {
389 	struct usbc_port_data *data = dev->data;
390 	struct policy_engine *pe = data->pe;
391 
392 	return atomic_test_bit(pe->flags, PE_FLAGS_DPM_INITIATED_AMS);
393 }
394 
395 /**
396  * @brief End and atomic messaging sequence
397  */
pe_dpm_end_ams(const struct device * dev)398 void pe_dpm_end_ams(const struct device *dev)
399 {
400 	struct usbc_port_data *data = dev->data;
401 	struct policy_engine *pe = data->pe;
402 
403 	atomic_clear_bit(pe->flags, PE_FLAGS_DPM_INITIATED_AMS);
404 }
405 
406 /**
407  * @brief First message in AMS has been sent
408  */
pe_first_msg_sent(const struct device * dev)409 void pe_first_msg_sent(const struct device *dev)
410 {
411 	struct usbc_port_data *data = dev->data;
412 	struct policy_engine *pe = data->pe;
413 
414 	atomic_set_bit(pe->flags, PE_FLAGS_FIRST_MSG_SENT);
415 }
416 
417 /** Private Policy Engine Layer API below */
418 
419 /**
420  * @brief Sets a Policy Engine state
421  */
pe_set_state(const struct device * dev,const enum usbc_pe_state state)422 void pe_set_state(const struct device *dev, const enum usbc_pe_state state)
423 {
424 	struct usbc_port_data *data = dev->data;
425 
426 	__ASSERT(state < ARRAY_SIZE(pe_states), "invalid pe_state %d", state);
427 	smf_set_state(SMF_CTX(data->pe), &pe_states[state]);
428 
429 	/* Allow the PE to execute logic from the new state without additional delay */
430 	usbc_bypass_next_sleep(dev);
431 }
432 
433 /**
434  * @brief Get the Policy Engine's current state
435  */
pe_get_state(const struct device * dev)436 enum usbc_pe_state pe_get_state(const struct device *dev)
437 {
438 	struct usbc_port_data *data = dev->data;
439 
440 	return data->pe->ctx.current - &pe_states[0];
441 }
442 
443 /**
444  * @brief Get the Policy Engine's previous state
445  */
pe_get_last_state(const struct device * dev)446 enum usbc_pe_state pe_get_last_state(const struct device *dev)
447 {
448 	struct usbc_port_data *data = dev->data;
449 
450 	return data->pe->ctx.previous - &pe_states[0];
451 }
452 
453 /**
454  * @brief Send a soft reset message
455  */
pe_send_soft_reset(const struct device * dev,const enum pd_packet_type type)456 void pe_send_soft_reset(const struct device *dev, const enum pd_packet_type type)
457 {
458 	struct usbc_port_data *data = dev->data;
459 
460 	data->pe->soft_reset_sop = type;
461 	pe_set_state(dev, PE_SEND_SOFT_RESET);
462 }
463 
464 /**
465  * @brief Send a Power Delivery Data Message
466  */
pe_send_data_msg(const struct device * dev,const enum pd_packet_type type,const enum pd_data_msg_type msg)467 void pe_send_data_msg(const struct device *dev, const enum pd_packet_type type,
468 			const enum pd_data_msg_type msg)
469 {
470 	struct usbc_port_data *data = dev->data;
471 	struct policy_engine *pe = data->pe;
472 
473 	/* Clear any previous TX status before sending a new message */
474 	atomic_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE);
475 	prl_send_data_msg(dev, type, msg);
476 }
477 
478 /**
479  * @brief Send a Power Delivery Control Message
480  */
pe_send_ctrl_msg(const struct device * dev,const enum pd_packet_type type,const enum pd_ctrl_msg_type msg)481 void pe_send_ctrl_msg(const struct device *dev, const enum pd_packet_type type,
482 			const enum pd_ctrl_msg_type msg)
483 {
484 	struct usbc_port_data *data = dev->data;
485 	struct policy_engine *pe = data->pe;
486 
487 	/* Clear any previous TX status before sending a new message */
488 	atomic_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE);
489 	prl_send_ctrl_msg(dev, type, msg);
490 }
491 
492 /**
493  * @brief Request desired voltage from source.
494  */
pe_send_request_msg(const struct device * dev,const uint32_t rdo)495 void pe_send_request_msg(const struct device *dev, const uint32_t rdo)
496 {
497 	struct usbc_port_data *data = dev->data;
498 	struct protocol_layer_tx_t *prl_tx = data->prl_tx;
499 	struct pd_msg *msg = &prl_tx->emsg;
500 	uint8_t rdo_bytes[4];
501 
502 	msg->len = sizeof(rdo);
503 	sys_put_le32(rdo, rdo_bytes);
504 	memcpy(msg->data, rdo_bytes, msg->len);
505 	pe_send_data_msg(dev, PD_PACKET_SOP, PD_DATA_REQUEST);
506 }
507 
508 /**
509  * @brief Transitions state after receiving an extended message.
510  */
extended_message_not_supported(const struct device * dev)511 void extended_message_not_supported(const struct device *dev)
512 {
513 	struct usbc_port_data *data = dev->data;
514 	struct protocol_layer_rx_t *prl_rx = data->prl_rx;
515 	uint32_t *payload = (uint32_t *)prl_rx->emsg.data;
516 	union pd_ext_header ext_header;
517 
518 	ext_header.raw_value = *payload;
519 
520 	if (ext_header.chunked && ext_header.data_size > PD_MAX_EXTENDED_MSG_CHUNK_LEN) {
521 		pe_set_state(dev, PE_CHUNK_RECEIVED);
522 	} else {
523 		pe_set_state(dev, PE_SEND_NOT_SUPPORTED);
524 	}
525 }
526 
527 /**
528  * @brief Check if a specific control message was received
529  */
received_control_message(const struct device * dev,const union pd_header header,const enum pd_ctrl_msg_type mt)530 bool received_control_message(const struct device *dev, const union pd_header header,
531 			      const enum pd_ctrl_msg_type mt)
532 {
533 	struct usbc_port_data *data = dev->data;
534 	struct protocol_layer_rx_t *prl_rx = data->prl_rx;
535 
536 	if (prl_rx->emsg.len == 0 && header.message_type == mt && header.extended == 0) {
537 		return true;
538 	}
539 
540 	return false;
541 }
542 
543 /**
544  * @brief Check if a specific data message was received
545  */
received_data_message(const struct device * dev,const union pd_header header,const enum pd_data_msg_type mt)546 bool received_data_message(const struct device *dev, const union pd_header header,
547 			   const enum pd_data_msg_type mt)
548 {
549 	struct usbc_port_data *data = dev->data;
550 	struct protocol_layer_rx_t *prl_rx = data->prl_rx;
551 
552 	if (prl_rx->emsg.len > 0 && header.message_type == mt && header.extended == 0) {
553 		return true;
554 	}
555 
556 	return false;
557 }
558 
559 /**
560  * @brief Check a DPM policy
561  */
policy_check(const struct device * dev,const enum usbc_policy_check_t pc)562 bool policy_check(const struct device *dev, const enum usbc_policy_check_t pc)
563 {
564 	struct usbc_port_data *data = dev->data;
565 
566 	if (data->policy_cb_check) {
567 		return data->policy_cb_check(dev, pc);
568 	} else {
569 		return false;
570 	}
571 }
572 
573 /**
574  * @brief Notify the DPM of a policy change
575  */
policy_notify(const struct device * dev,const enum usbc_policy_notify_t notify)576 void policy_notify(const struct device *dev, const enum usbc_policy_notify_t notify)
577 {
578 	struct usbc_port_data *data = dev->data;
579 
580 	if (data->policy_cb_notify) {
581 		data->policy_cb_notify(dev, notify);
582 	}
583 }
584 
585 /**
586  * @brief Notify the DPM of a WAIT message reception
587  */
policy_wait_notify(const struct device * dev,const enum usbc_policy_wait_t notify)588 bool policy_wait_notify(const struct device *dev, const enum usbc_policy_wait_t notify)
589 {
590 	struct usbc_port_data *data = dev->data;
591 
592 	if (data->policy_cb_wait_notify) {
593 		return data->policy_cb_wait_notify(dev, notify);
594 	}
595 
596 	return false;
597 }
598 
599 #ifdef CONFIG_USBC_CSM_SINK_ONLY
600 
601 /**
602  * @brief Get a Request Data Object from the DPM
603  */
policy_get_request_data_object(const struct device * dev)604 uint32_t policy_get_request_data_object(const struct device *dev)
605 {
606 	struct usbc_port_data *data = dev->data;
607 
608 	/* This callback must be implemented */
609 	__ASSERT(data->policy_cb_get_rdo != NULL, "Callback pointer should not be NULL");
610 
611 	return data->policy_cb_get_rdo(dev);
612 }
613 
614 /**
615  * @brief Send the received source caps to the DPM
616  */
policy_set_src_cap(const struct device * dev,const uint32_t * pdos,const int num_pdos)617 void policy_set_src_cap(const struct device *dev, const uint32_t *pdos, const int num_pdos)
618 {
619 	struct usbc_port_data *data = dev->data;
620 
621 	if (data->policy_cb_set_src_cap) {
622 		data->policy_cb_set_src_cap(dev, pdos, num_pdos);
623 	}
624 }
625 
626 /**
627  * @brief Check if the sink is a default level
628  */
policy_is_snk_at_default(const struct device * dev)629 bool policy_is_snk_at_default(const struct device *dev)
630 {
631 	struct usbc_port_data *data = dev->data;
632 
633 	if (data->policy_cb_is_snk_at_default) {
634 		return data->policy_cb_is_snk_at_default(dev);
635 	}
636 
637 	return true;
638 }
639 
640 /**
641  * @brief Get sink caps from the DPM
642  */
policy_get_snk_cap(const struct device * dev,uint32_t ** pdos,int * num_pdos)643 void policy_get_snk_cap(const struct device *dev, uint32_t **pdos, int *num_pdos)
644 {
645 	struct usbc_port_data *data = dev->data;
646 
647 	/* This callback must be implemented */
648 	__ASSERT(data->policy_cb_get_snk_cap != NULL, "Callback pointer should not be NULL");
649 
650 	data->policy_cb_get_snk_cap(dev, pdos, num_pdos);
651 }
652 
653 #else /* CONFIG_USBC_CSM_SOURCE_ONLY */
654 
655 /**
656  * @brief Send the received sink caps to the DPM
657  */
policy_set_port_partner_snk_cap(const struct device * dev,const uint32_t * pdos,const int num_pdos)658 void policy_set_port_partner_snk_cap(const struct device *dev,
659 			const uint32_t *pdos,
660 			const int num_pdos)
661 {
662 	struct usbc_port_data *data = dev->data;
663 
664 	if (data->policy_cb_set_port_partner_snk_cap) {
665 		data->policy_cb_set_port_partner_snk_cap(dev, pdos, num_pdos);
666 	}
667 }
668 
669 /**
670  * @brief Check if Sink Request can be met by DPM
671  */
policy_check_sink_request(const struct device * dev,const uint32_t request_msg)672 enum usbc_snk_req_reply_t policy_check_sink_request(const struct device *dev,
673 				const uint32_t request_msg)
674 {
675 	struct usbc_port_data *data = dev->data;
676 
677 	/* This callback must be implemented */
678 	__ASSERT(data->policy_cb_check_sink_request != NULL,
679 		"Callback pointer should not be NULL");
680 
681 	return data->policy_cb_check_sink_request(dev, request_msg);
682 }
683 
684 /**
685  * @brief Check if the present contract is still valid
686  */
policy_present_contract_is_valid(const struct device * dev,const uint32_t present_contract)687 bool policy_present_contract_is_valid(const struct device *dev,
688 				const uint32_t present_contract)
689 {
690 	struct usbc_port_data *data = dev->data;
691 
692 	/* This callback must be implemented */
693 	__ASSERT(data->policy_present_contract_is_valid != NULL,
694 		"Callback pointer should not be NULL");
695 
696 	return data->policy_present_contract_is_valid(dev, present_contract);
697 }
698 
699 /**
700  * @brief Check if the power supply is ready
701  */
policy_is_ps_ready(const struct device * dev)702 bool policy_is_ps_ready(const struct device *dev)
703 {
704 	struct usbc_port_data *data = dev->data;
705 
706 	/* This callback must be implemented */
707 	__ASSERT(data->policy_is_ps_ready != NULL,
708 		"Callback pointer should not be NULL");
709 
710 	return data->policy_is_ps_ready(dev);
711 }
712 
713 /**
714  * @brief Ask the DPM to change the Source Caps.
715  *	  Returns true if source caps have been updated, else false
716  */
policy_change_src_caps(const struct device * dev)717 bool policy_change_src_caps(const struct device *dev)
718 {
719 	struct usbc_port_data *data = dev->data;
720 
721 	if (data->policy_change_src_caps == NULL) {
722 		return false;
723 	}
724 
725 	return data->policy_change_src_caps(dev);
726 }
727 
728 #endif /* CONFIG_USBC_CSM_SINK_ONLY */
729 
730 /**
731  * @brief PE_DRS_Evaluate_Swap Entry state
732  */
pe_drs_evaluate_swap_entry(void * obj)733 static void pe_drs_evaluate_swap_entry(void *obj)
734 {
735 	struct policy_engine *pe = (struct policy_engine *)obj;
736 	const struct device *dev = pe->dev;
737 
738 	/* Get evaluation of Data Role Swap request from Device Policy Manager */
739 	if (policy_check(dev, (pe->data_role == TC_ROLE_UFP) ? CHECK_DATA_ROLE_SWAP_TO_DFP
740 							     : CHECK_DATA_ROLE_SWAP_TO_UFP)) {
741 		/*
742 		 * PE_DRS_DFP_UFP_Accept_Swap and PE_DRS_UFP_DFP_Accept_Swap
743 		 * State embedded here
744 		 */
745 		/* Send Accept message */
746 		pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_ACCEPT);
747 	} else {
748 		/*
749 		 * PE_DRS_DFP_UFP_Reject_Swap and PE_DRS_UFP_DFP_Reject_Swap
750 		 * State embedded here
751 		 */
752 		/* Send Reject message */
753 		pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_REJECT);
754 	}
755 }
756 
757 /**
758  * @brief PE_DRS_Evaluate_Swap Run state
759  */
pe_drs_evaluate_swap_run(void * obj)760 static void pe_drs_evaluate_swap_run(void *obj)
761 {
762 	struct policy_engine *pe = (struct policy_engine *)obj;
763 	const struct device *dev = pe->dev;
764 	struct usbc_port_data *data = dev->data;
765 	struct protocol_layer_tx_t *prl_tx = data->prl_tx;
766 	struct protocol_layer_rx_t *prl_rx = data->prl_rx;
767 
768 	if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE)) {
769 		/* Only update data roles if last message sent was Accept */
770 		if (prl_tx->msg_type == PD_CTRL_ACCEPT) {
771 			/* Update Data Role */
772 			pe_set_data_role(dev, (pe->data_role == TC_ROLE_UFP)
773 						? TC_ROLE_DFP : TC_ROLE_UFP);
774 			/* Inform Device Policy Manager of Data Role Change */
775 			policy_notify(dev, (pe->data_role == TC_ROLE_UFP) ? DATA_ROLE_IS_UFP
776 									  : DATA_ROLE_IS_DFP);
777 		}
778 		pe_set_state(dev, PE_SNK_READY);
779 	} else if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_DISCARDED)) {
780 		/*
781 		 * Inform Device Policy Manager that the message was
782 		 * discarded
783 		 */
784 		policy_notify(dev, MSG_DISCARDED);
785 		pe_send_soft_reset(dev, prl_rx->emsg.type);
786 	}
787 }
788 
789 /**
790  * @brief PE_DRS_Send_Swap Entry state
791  *	  NOTE: 8.3.3.18.1.5 PE_DRS_DFP_UFP_Send_Swap State
792  *		8.3.3.18.2.5 PE_DRS_UFP_DFP_Send_Swap State
793  */
pe_drs_send_swap_entry(void * obj)794 static void pe_drs_send_swap_entry(void *obj)
795 {
796 	struct policy_engine *pe = (struct policy_engine *)obj;
797 	const struct device *dev = pe->dev;
798 
799 	/* Send Swap DR message */
800 	pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_DR_SWAP);
801 }
802 
803 /**
804  * @brief PE_DRS_Send_Swap Run state
805  *	  NOTE: Sender Response Timer is handled in super state.
806  */
pe_drs_send_swap_run(void * obj)807 static void pe_drs_send_swap_run(void *obj)
808 {
809 	struct policy_engine *pe = (struct policy_engine *)obj;
810 	const struct device *dev = pe->dev;
811 	struct usbc_port_data *data = dev->data;
812 	struct protocol_layer_rx_t *prl_rx = data->prl_rx;
813 	union pd_header header;
814 
815 	if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_RECEIVED)) {
816 		header = prl_rx->emsg.header;
817 		if (received_control_message(dev, header, PD_CTRL_REJECT)) {
818 			/*
819 			 * Inform Device Policy Manager that Data Role Swap
820 			 * was Rejected
821 			 */
822 			policy_notify(dev, MSG_REJECTED_RECEIVED);
823 		} else if (received_control_message(dev, header, PD_CTRL_WAIT)) {
824 			/*
825 			 * Inform Device Policy Manager that Data Role Swap
826 			 * needs to Wait
827 			 */
828 			if (policy_wait_notify(dev, WAIT_DATA_ROLE_SWAP)) {
829 				atomic_set_bit(pe->flags, PE_FLAGS_WAIT_DATA_ROLE_SWAP);
830 				usbc_timer_start(&pe->pd_t_wait_to_resend);
831 			}
832 		} else if (received_control_message(dev, header, PD_CTRL_ACCEPT)) {
833 			/* Update Data Role */
834 			pe->data_role = (pe->data_role == TC_ROLE_UFP) ? TC_ROLE_DFP : TC_ROLE_UFP;
835 			/* Notify TCPC of role update */
836 			tcpc_set_roles(data->tcpc, pe->power_role, pe->data_role);
837 			/* Inform Device Policy Manager of Data Role Change */
838 			policy_notify(dev, (pe->data_role == TC_ROLE_UFP) ? DATA_ROLE_IS_UFP
839 									  : DATA_ROLE_IS_DFP);
840 		} else {
841 			/*
842 			 * A Protocol Error during a Data Role Swap when the
843 			 * DFP/UFP roles are changing shall directly trigger
844 			 * a Type-C Error Recovery.
845 			 */
846 			usbc_request(dev, REQUEST_TC_ERROR_RECOVERY);
847 			return;
848 		}
849 
850 		/* return to ready state */
851 		pe_set_state(dev, PE_SNK_READY);
852 		return;
853 	} else if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_DISCARDED)) {
854 		/*
855 		 * Inform Device Policy Manager that the message
856 		 * was discarded
857 		 */
858 		policy_notify(dev, MSG_DISCARDED);
859 		pe_set_state(dev, PE_SNK_READY);
860 		return;
861 	}
862 }
863 
864 /**
865  * PE_Get_Sink_Cap Entry state
866  * @brief 8.3.3.18.7.1 PE_DR_SRC_Get_Source_Cap State
867  * @brief 8.3.3.18.9.1 PE_DR_SNK_Get_Sink_Cap State
868  */
pe_get_sink_cap_entry(void * obj)869 void pe_get_sink_cap_entry(void *obj)
870 {
871 	struct policy_engine *pe = (struct policy_engine *)obj;
872 	const struct device *dev = pe->dev;
873 
874 	/*
875 	 * On entry to the PE_DR_SNK_Get_Sink_Cap state the Policy Engine
876 	 * Shall send a Get_Sink_Cap Message and initialize and run the
877 	 * SenderResponseTimer.
878 	 */
879 	pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_GET_SINK_CAP);
880 	/* Initialize Submachine */
881 	pe->submachine = SM_WAIT_FOR_TX;
882 }
883 
884 /**
885  * @brief PE_Get_Sink_Cap Run state
886  *        NOTE: Sender Response Timer is handled in super state.
887  */
pe_get_sink_cap_run(void * obj)888 void pe_get_sink_cap_run(void *obj)
889 {
890 	struct policy_engine *pe = (struct policy_engine *)obj;
891 	const struct device *dev = pe->dev;
892 	struct usbc_port_data *data = dev->data;
893 	struct protocol_layer_rx_t *prl_rx = data->prl_rx;
894 	union pd_header header;
895 
896 	switch (pe->submachine) {
897 	case SM_WAIT_FOR_TX:
898 		if (!atomic_test_and_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE)) {
899 			break;
900 		}
901 		pe->submachine = SM_WAIT_FOR_RX;
902 		/* fall through */
903 	case SM_WAIT_FOR_RX:
904 		if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_RECEIVED)) {
905 			header = prl_rx->emsg.header;
906 
907 			if (prl_rx->emsg.type == PD_PACKET_SOP) {
908 				if (received_data_message(dev, header, PD_DATA_SINK_CAP)) {
909 #ifdef CONFIG_USBC_CSM_SOURCE_ONLY
910 					uint32_t *pdos = (uint32_t *)prl_rx->emsg.data;
911 					uint32_t num_pdos =
912 					PD_CONVERT_BYTES_TO_PD_HEADER_COUNT(prl_rx->emsg.len);
913 
914 					policy_set_port_partner_snk_cap(dev, pdos, num_pdos);
915 					pe_set_state(dev, PE_SRC_READY);
916 #else
917 					pe_set_state(dev, PE_SNK_READY);
918 #endif
919 					return;
920 				} else if (received_control_message(dev, header, PD_CTRL_REJECT) ||
921 					received_control_message(dev,
922 							header, PD_CTRL_NOT_SUPPORTED)) {
923 #ifdef CONFIG_USBC_CSM_SOURCE_ONLY
924 					pe_set_state(dev, PE_SRC_READY);
925 #else
926 					pe_set_state(dev, PE_SNK_READY);
927 #endif
928 					return;
929 				}
930 				/* Unexpected messages fall through to soft reset */
931 			}
932 			pe_send_soft_reset(dev, PD_PACKET_SOP);
933 			return;
934 		}
935 		/*
936 		 * Inform Device Policy Manager that the message
937 		 * was discarded
938 		 */
939 		else if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_DISCARDED)) {
940 			policy_notify(dev, MSG_DISCARDED);
941 			pe_set_state(dev, PE_SNK_READY);
942 			return;
943 		}
944 	}
945 }
946 
pe_suspend_entry(void * obj)947 static void pe_suspend_entry(void *obj)
948 {
949 	LOG_INF("PE_SUSPEND");
950 }
951 
pe_suspend_run(void * obj)952 static void pe_suspend_run(void *obj)
953 {
954 	/* DO NOTHING */
955 }
956 
957 /**
958  * @brief The PE_SOFT_RESET state has two embedded states
959  *	  that handle sending an accept message.
960  */
961 enum pe_soft_reset_submachine_states {
962 	/* Send Accept message sub state */
963 	PE_SOFT_RESET_RUN_SEND_ACCEPT_MSG,
964 	/* Wait for Accept message to be sent or an error sub state */
965 	PE_SOFT_RESET_RUN_SEND_ACCEPT_MSG_COMPLETE
966 };
967 
968 /**
969  * @brief 8.3.3.4.2.2 PE_SNK_Soft_Reset State
970  */
pe_soft_reset_entry(void * obj)971 static void pe_soft_reset_entry(void *obj)
972 {
973 	struct policy_engine *pe = (struct policy_engine *)obj;
974 	const struct device *dev = pe->dev;
975 
976 	/* Reset the protocol layer */
977 	prl_reset(dev);
978 
979 	/* Initialize PE Submachine */
980 	pe->submachine = PE_SOFT_RESET_RUN_SEND_ACCEPT_MSG;
981 }
982 
pe_soft_reset_run(void * obj)983 static void pe_soft_reset_run(void *obj)
984 {
985 	struct policy_engine *pe = (struct policy_engine *)obj;
986 	const struct device *dev = pe->dev;
987 
988 	if (!prl_is_running(dev)) {
989 		return;
990 	}
991 
992 	switch (pe->submachine) {
993 	case PE_SOFT_RESET_RUN_SEND_ACCEPT_MSG:
994 		/* Send Accept message to SOP */
995 		pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_ACCEPT);
996 		/* Move to next substate */
997 		pe->submachine = PE_SOFT_RESET_RUN_SEND_ACCEPT_MSG_COMPLETE;
998 		break;
999 	case PE_SOFT_RESET_RUN_SEND_ACCEPT_MSG_COMPLETE:
1000 		/*
1001 		 * The Policy Engine Shall transition to the
1002 		 * PE_SRC_Send_Capabilities state when:
1003 		 *      1: Accept message sent to SOP
1004 		 */
1005 		if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE)) {
1006 			pe_set_state(dev, PE_SNK_WAIT_FOR_CAPABILITIES);
1007 		} else if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_PROTOCOL_ERROR)) {
1008 			/*
1009 			 * The Policy Engine Shall transition to the
1010 			 * PE_SRC_Hard_Reset state when:
1011 			 *      1: Protocol Layer indicates that a
1012 			 *         transmission error has occurred.
1013 			 */
1014 			pe_set_state(dev, PE_SNK_HARD_RESET);
1015 		}
1016 		break;
1017 	}
1018 }
1019 
1020 /**
1021  * @brief PE_Send_Soft_Reset Entry State
1022  *	  NOTE: Sender Response Timer is handled in super state.
1023  */
pe_send_soft_reset_entry(void * obj)1024 static void pe_send_soft_reset_entry(void *obj)
1025 {
1026 	struct policy_engine *pe = (struct policy_engine *)obj;
1027 	const struct device *dev = pe->dev;
1028 
1029 	LOG_INF("PE_SNK_Send_Soft_Reset");
1030 
1031 	/* Reset Protocol Layer */
1032 	prl_reset(dev);
1033 	atomic_set_bit(pe->flags, PE_FLAGS_SEND_SOFT_RESET);
1034 }
1035 
1036 /**
1037  * @brief PE_Send_Soft_Reset Run State
1038  */
pe_send_soft_reset_run(void * obj)1039 static void pe_send_soft_reset_run(void *obj)
1040 {
1041 	struct policy_engine *pe = (struct policy_engine *)obj;
1042 	const struct device *dev = pe->dev;
1043 	struct usbc_port_data *data = dev->data;
1044 	struct protocol_layer_rx_t *prl_rx = data->prl_rx;
1045 	union pd_header header;
1046 
1047 	if (prl_is_running(dev) == false) {
1048 		return;
1049 	}
1050 
1051 	if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_SEND_SOFT_RESET)) {
1052 		/* Send Soft Reset message */
1053 		pe_send_ctrl_msg(dev, pe->soft_reset_sop, PD_CTRL_SOFT_RESET);
1054 		return;
1055 	}
1056 
1057 	if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_DISCARDED)) {
1058 		/* Inform Device Policy Manager that the message was discarded */
1059 		policy_notify(dev, MSG_DISCARDED);
1060 		pe_set_state(dev, PE_SNK_READY);
1061 	} else if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_MSG_RECEIVED)) {
1062 		/*
1063 		 * The Policy Engine Shall transition to the PE_SNK_Wait_for_Capabilities
1064 		 * state when:
1065 		 *      1: An Accept Message has been received on SOP
1066 		 */
1067 		header = prl_rx->emsg.header;
1068 
1069 		if (received_control_message(dev, header, PD_CTRL_ACCEPT)) {
1070 			pe_set_state(dev, PE_SNK_WAIT_FOR_CAPABILITIES);
1071 		}
1072 	} else if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_PROTOCOL_ERROR)) {
1073 		/*
1074 		 * The Policy Engine Shall transition to the PE_SNK_Hard_Reset state when:
1075 		 *      1: A SenderResponseTimer timeout occurs (Handled in Super State)
1076 		 *      2: Or the Protocol Layer indicates that a transmission error has occurred
1077 		 */
1078 		pe_set_state(dev, PE_SNK_HARD_RESET);
1079 	}
1080 }
1081 
1082 /**
1083  * @brief 8.3.3.6.2.1 PE_SNK_Send_Not_Supported State
1084  */
pe_send_not_supported_entry(void * obj)1085 static void pe_send_not_supported_entry(void *obj)
1086 {
1087 	struct policy_engine *pe = (struct policy_engine *)obj;
1088 	const struct device *dev = pe->dev;
1089 
1090 	LOG_INF("PE_Not_Supported");
1091 
1092 	/* Notify the Device Policy Manager of unsupported message reception */
1093 	policy_notify(dev, MSG_NOT_SUPPORTED_RECEIVED);
1094 
1095 	/* Request the Protocol Layer to send a Not_Supported or Reject Message. */
1096 	if (prl_get_rev(dev, PD_PACKET_SOP) > PD_REV20) {
1097 		pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_NOT_SUPPORTED);
1098 	} else {
1099 		pe_send_ctrl_msg(dev, PD_PACKET_SOP, PD_CTRL_REJECT);
1100 	}
1101 }
1102 
pe_send_not_supported_run(void * obj)1103 static void pe_send_not_supported_run(void *obj)
1104 {
1105 	struct policy_engine *pe = (struct policy_engine *)obj;
1106 	const struct device *dev = pe->dev;
1107 
1108 	if (atomic_test_bit(pe->flags, PE_FLAGS_TX_COMPLETE) ||
1109 			atomic_test_bit(pe->flags, PE_FLAGS_MSG_DISCARDED)) {
1110 		atomic_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE);
1111 		atomic_clear_bit(pe->flags, PE_FLAGS_MSG_DISCARDED);
1112 		pe_set_state(dev, PE_SNK_READY);
1113 	}
1114 }
1115 
1116 /**
1117  * @brief 8.3.3.6.2.3 PE_SNK_Chunk_Received State
1118  */
pe_chunk_received_entry(void * obj)1119 static void pe_chunk_received_entry(void *obj)
1120 {
1121 	struct policy_engine *pe = (struct policy_engine *)obj;
1122 
1123 	LOG_INF("PE_SNK_Chunk_Received");
1124 
1125 	/*
1126 	 * On entry to the PE_SNK_Chunk_Received state, the Policy Engine
1127 	 * Shall initialize and run the ChunkingNotSupportedTimer.
1128 	 */
1129 	usbc_timer_start(&pe->pd_t_chunking_not_supported);
1130 }
1131 
1132 /**
1133  * @brief PE_Chunk_Received Run State
1134  */
pe_chunk_received_run(void * obj)1135 static void pe_chunk_received_run(void *obj)
1136 {
1137 	struct policy_engine *pe = (struct policy_engine *)obj;
1138 	const struct device *dev = pe->dev;
1139 
1140 	/*
1141 	 * The Policy Engine Shall transition to PE_SNK_Send_Not_Supported
1142 	 * when:
1143 	 *	1: The ChunkingNotSupportedTimer has timed out.
1144 	 */
1145 	if (usbc_timer_expired(&pe->pd_t_chunking_not_supported)) {
1146 		pe_set_state(dev, PE_SEND_NOT_SUPPORTED);
1147 	}
1148 }
1149 
1150 /*
1151  * @brief Super State for any message that requires
1152  *	  Sender Response Timer functionality
1153  */
pe_sender_response_run(void * obj)1154 static void pe_sender_response_run(void *obj)
1155 {
1156 	struct policy_engine *pe = (struct policy_engine *)obj;
1157 	const struct device *dev = pe->dev;
1158 	enum usbc_pe_state current_state = pe_get_state(dev);
1159 
1160 	/* Start the Sender Response Timer after the message is sent */
1161 	if (atomic_test_and_clear_bit(pe->flags, PE_FLAGS_TX_COMPLETE)) {
1162 		/* Start Sender Response Timer */
1163 		usbc_timer_start(&pe->pd_t_sender_response);
1164 	}
1165 
1166 	/* Check if the Sender Response Timer has expired */
1167 	if (usbc_timer_expired(&pe->pd_t_sender_response)) {
1168 		/*
1169 		 * Handle Sender Response Timeouts
1170 		 */
1171 		switch (current_state) {
1172 #if CONFIG_USBC_CSM_SINK_ONLY
1173 		/* Sink states */
1174 		case PE_SNK_SELECT_CAPABILITY:
1175 			pe_set_state(dev, PE_SNK_HARD_RESET);
1176 			break;
1177 		case PE_SNK_GET_SOURCE_CAP:
1178 			pe_set_state(dev, PE_SNK_READY);
1179 			break;
1180 #else
1181 		/* Source states */
1182 		case PE_SRC_DISCOVERY:
1183 			/*
1184 			 * The Policy Engine Shall go to the PE_SRC_Disabled state when:
1185 			 *      1) The Port Partners have not been PD Connected
1186 			 *      2) And the NoResponseTimer times out
1187 			 *      3) And the HardResetCounter > nHardResetCount.
1188 			 */
1189 			if ((atomic_test_bit(pe->flags, PE_FLAGS_HAS_BEEN_PD_CONNECTED) == false)
1190 					&& pe->hard_reset_counter > PD_N_HARD_RESET_COUNT) {
1191 				pe_set_state(dev, PE_SUSPEND);
1192 			}
1193 			break;
1194 		case PE_SRC_SEND_CAPABILITIES:
1195 			/*
1196 			 * The Policy Engine Shall go to the ErrorRecovery state when:
1197 			 *      1) The Port Partners have previously been PD Connected
1198 			 *      2) And the NoResponseTimer times out
1199 			 *      3) And the HardResetCounter > nHardResetCount
1200 			 */
1201 			if (atomic_test_bit(pe->flags, PE_FLAGS_HAS_BEEN_PD_CONNECTED)
1202 					&& pe->hard_reset_counter > PD_N_HARD_RESET_COUNT) {
1203 				usbc_request(dev, REQUEST_TC_ERROR_RECOVERY);
1204 			}
1205 			/*
1206 			 * The Policy Engine Shall transition to the PE_SRC_Hard_Reset
1207 			 * state when:
1208 			 *      1) The SenderResponseTimer times out
1209 			 */
1210 			else {
1211 				pe_set_state(dev, PE_SRC_HARD_RESET);
1212 			}
1213 			break;
1214 		case PE_GET_SINK_CAP:
1215 			pe_send_soft_reset(dev, PD_PACKET_SOP);
1216 			break;
1217 #endif
1218 		/*
1219 		 * Common states:
1220 		 * Could transition to a Sink or Source states,
1221 		 * depending on the current Data Role
1222 		 */
1223 		case PE_SEND_SOFT_RESET:
1224 			pe_set_state(dev, PE_SNK_HARD_RESET);
1225 			break;
1226 		case PE_DRS_SEND_SWAP:
1227 			pe_set_state(dev, PE_SNK_READY);
1228 			break;
1229 
1230 		/* This should not happen. Implementation error */
1231 		default:
1232 			LOG_INF("Unhandled Sender Response Timeout State!");
1233 		}
1234 	}
1235 }
1236 
pe_sender_response_exit(void * obj)1237 static void pe_sender_response_exit(void *obj)
1238 {
1239 	struct policy_engine *pe = (struct policy_engine *)obj;
1240 
1241 	/* Stop Sender Response Timer */
1242 	usbc_timer_stop(&pe->pd_t_sender_response);
1243 }
1244 
1245 /**
1246  * @brief Policy engine State table
1247  */
1248 static const struct smf_state pe_states[PE_STATE_COUNT] = {
1249 	/* PE Super States */
1250 	[PE_SENDER_RESPONSE_PARENT] = SMF_CREATE_STATE(
1251 		NULL,
1252 		pe_sender_response_run,
1253 		pe_sender_response_exit,
1254 		NULL,
1255 		NULL),
1256 #ifdef CONFIG_USBC_CSM_SOURCE_ONLY
1257 	[PE_SRC_HARD_RESET_PARENT] = SMF_CREATE_STATE(
1258 		pe_src_hard_reset_parent_entry,
1259 		pe_src_hard_reset_parent_run,
1260 		pe_src_hard_reset_parent_exit,
1261 		NULL,
1262 		NULL),
1263 #endif
1264 #ifdef CONFIG_USBC_CSM_SINK_ONLY
1265 	[PE_SNK_STARTUP] = SMF_CREATE_STATE(
1266 		pe_snk_startup_entry,
1267 		pe_snk_startup_run,
1268 		NULL,
1269 		NULL,
1270 		NULL),
1271 	[PE_SNK_DISCOVERY] = SMF_CREATE_STATE(
1272 		pe_snk_discovery_entry,
1273 		pe_snk_discovery_run,
1274 		NULL,
1275 		NULL,
1276 		NULL),
1277 	[PE_SNK_WAIT_FOR_CAPABILITIES] = SMF_CREATE_STATE(
1278 		pe_snk_wait_for_capabilities_entry,
1279 		pe_snk_wait_for_capabilities_run,
1280 		pe_snk_wait_for_capabilities_exit,
1281 		NULL,
1282 		NULL),
1283 	[PE_SNK_EVALUATE_CAPABILITY] = SMF_CREATE_STATE(
1284 		pe_snk_evaluate_capability_entry,
1285 		NULL,
1286 		NULL,
1287 		NULL,
1288 		NULL),
1289 	[PE_SNK_SELECT_CAPABILITY] = SMF_CREATE_STATE(
1290 		pe_snk_select_capability_entry,
1291 		pe_snk_select_capability_run,
1292 		NULL,
1293 		&pe_states[PE_SENDER_RESPONSE_PARENT],
1294 		NULL),
1295 	[PE_SNK_READY] = SMF_CREATE_STATE(
1296 		pe_snk_ready_entry,
1297 		pe_snk_ready_run,
1298 		pe_snk_ready_exit,
1299 		NULL,
1300 		NULL),
1301 	[PE_SNK_HARD_RESET] = SMF_CREATE_STATE(
1302 		pe_snk_hard_reset_entry,
1303 		pe_snk_hard_reset_run,
1304 		NULL,
1305 		NULL,
1306 		NULL),
1307 	[PE_SNK_TRANSITION_TO_DEFAULT] = SMF_CREATE_STATE(
1308 		pe_snk_transition_to_default_entry,
1309 		pe_snk_transition_to_default_run,
1310 		NULL,
1311 		NULL,
1312 		NULL),
1313 	[PE_SNK_GIVE_SINK_CAP] = SMF_CREATE_STATE(
1314 		pe_snk_give_sink_cap_entry,
1315 		pe_snk_give_sink_cap_run,
1316 		NULL,
1317 		NULL,
1318 		NULL),
1319 	[PE_SNK_GET_SOURCE_CAP] = SMF_CREATE_STATE(
1320 		pe_snk_get_source_cap_entry,
1321 		pe_snk_get_source_cap_run,
1322 		NULL,
1323 		&pe_states[PE_SENDER_RESPONSE_PARENT],
1324 		NULL),
1325 	[PE_SNK_TRANSITION_SINK] = SMF_CREATE_STATE(
1326 		pe_snk_transition_sink_entry,
1327 		pe_snk_transition_sink_run,
1328 		pe_snk_transition_sink_exit,
1329 		NULL,
1330 		NULL),
1331 #else
1332 	[PE_SRC_STARTUP] = SMF_CREATE_STATE(
1333 		pe_src_startup_entry,
1334 		pe_src_startup_run,
1335 		NULL,
1336 		NULL,
1337 		NULL),
1338 	[PE_SRC_DISCOVERY] = SMF_CREATE_STATE(
1339 		pe_src_discovery_entry,
1340 		pe_src_discovery_run,
1341 		pe_src_discovery_exit,
1342 		&pe_states[PE_SENDER_RESPONSE_PARENT],
1343 		NULL),
1344 	[PE_SRC_SEND_CAPABILITIES] = SMF_CREATE_STATE(
1345 		pe_src_send_capabilities_entry,
1346 		pe_src_send_capabilities_run,
1347 		NULL,
1348 		&pe_states[PE_SENDER_RESPONSE_PARENT],
1349 		NULL),
1350 	[PE_SRC_NEGOTIATE_CAPABILITY] = SMF_CREATE_STATE(
1351 		pe_src_negotiate_capability_entry,
1352 		NULL,
1353 		NULL,
1354 		NULL,
1355 		NULL),
1356 	[PE_SRC_CAPABILITY_RESPONSE] = SMF_CREATE_STATE(
1357 		pe_src_capability_response_entry,
1358 		pe_src_capability_response_run,
1359 		NULL,
1360 		NULL,
1361 		NULL),
1362 	[PE_SRC_TRANSITION_SUPPLY] = SMF_CREATE_STATE(
1363 		pe_src_transition_supply_entry,
1364 		pe_src_transition_supply_run,
1365 		pe_src_transition_supply_exit,
1366 		NULL,
1367 		NULL),
1368 	[PE_SRC_READY] = SMF_CREATE_STATE(
1369 		pe_src_ready_entry,
1370 		pe_src_ready_run,
1371 		pe_src_ready_exit,
1372 		NULL,
1373 		NULL),
1374 	[PE_SRC_TRANSITION_TO_DEFAULT] = SMF_CREATE_STATE(
1375 		pe_src_transition_to_default_entry,
1376 		pe_src_transition_to_default_run,
1377 		pe_src_transition_to_default_exit,
1378 		NULL,
1379 		NULL),
1380 	[PE_SRC_HARD_RESET_RECEIVED] = SMF_CREATE_STATE(
1381 		NULL,
1382 		NULL,
1383 		NULL,
1384 		&pe_states[PE_SRC_HARD_RESET_PARENT],
1385 		NULL),
1386 	[PE_SRC_HARD_RESET] = SMF_CREATE_STATE(
1387 		pe_src_hard_reset_entry,
1388 		NULL,
1389 		NULL,
1390 		&pe_states[PE_SRC_HARD_RESET_PARENT],
1391 		NULL),
1392 #endif
1393 	[PE_GET_SINK_CAP] = SMF_CREATE_STATE(
1394 		pe_get_sink_cap_entry,
1395 		pe_get_sink_cap_run,
1396 		NULL,
1397 		&pe_states[PE_SENDER_RESPONSE_PARENT],
1398 		NULL),
1399 	[PE_SEND_SOFT_RESET] = SMF_CREATE_STATE(
1400 		pe_send_soft_reset_entry,
1401 		pe_send_soft_reset_run,
1402 		NULL,
1403 		&pe_states[PE_SENDER_RESPONSE_PARENT],
1404 		NULL),
1405 	[PE_SOFT_RESET] = SMF_CREATE_STATE(
1406 		pe_soft_reset_entry,
1407 		pe_soft_reset_run,
1408 		NULL,
1409 		NULL,
1410 		NULL),
1411 	[PE_SEND_NOT_SUPPORTED] = SMF_CREATE_STATE(
1412 		pe_send_not_supported_entry,
1413 		pe_send_not_supported_run,
1414 		NULL,
1415 		NULL,
1416 		NULL),
1417 	[PE_DRS_EVALUATE_SWAP] = SMF_CREATE_STATE(
1418 		pe_drs_evaluate_swap_entry,
1419 		pe_drs_evaluate_swap_run,
1420 		NULL,
1421 		NULL,
1422 		NULL),
1423 	[PE_DRS_SEND_SWAP] = SMF_CREATE_STATE(
1424 		pe_drs_send_swap_entry,
1425 		pe_drs_send_swap_run,
1426 		NULL,
1427 		&pe_states[PE_SENDER_RESPONSE_PARENT],
1428 		NULL),
1429 	[PE_CHUNK_RECEIVED] = SMF_CREATE_STATE(
1430 		pe_chunk_received_entry,
1431 		pe_chunk_received_run,
1432 		NULL,
1433 		NULL,
1434 		NULL),
1435 	[PE_SUSPEND] = SMF_CREATE_STATE(
1436 		pe_suspend_entry,
1437 		pe_suspend_run,
1438 		NULL,
1439 		NULL,
1440 		NULL),
1441 };
1442 BUILD_ASSERT(ARRAY_SIZE(pe_states) == PE_STATE_COUNT);
1443