1 /*
2 * Copyright (c) 2022 The Chromium OS Authors
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8 #include <zephyr/smf.h>
9 #include <zephyr/usb_c/usbc.h>
10 #include <zephyr/drivers/usb_c/usbc_pd.h>
11 #include <zephyr/logging/log.h>
12 LOG_MODULE_DECLARE(usbc_stack, CONFIG_USBC_STACK_LOG_LEVEL);
13
14 #include "usbc_stack.h"
15
16 /**
17 * @file
18 * @brief USB Power Delivery Protocol Layer (PRL)
19 *
20 * The PRL implementation in this file is base on
21 * Specification Revision 3.1, Version 1.3
22 */
23
24 /**
25 * @brief Protocol Layer Flags
26 *
27 * @note: These flags are used in multiple state machines and could have
28 * different meanings in each state machine.
29 */
30 enum prl_flags {
31 /** Flag to note message transmission completed */
32 PRL_FLAGS_TX_COMPLETE = 0,
33 /** Flag to note message was discarded */
34 PRL_FLAGS_TX_DISCARDED = 1,
35 /** Flag to note PRL waited for SINK_OK CC state before transmitting */
36 PRL_FLAGS_WAIT_SINK_OK = 2,
37 /** Flag to note transmission error occurred */
38 PRL_FLAGS_TX_ERROR = 3,
39 /** Flag to note PE triggered a hard reset */
40 PRL_FLAGS_PE_HARD_RESET = 4,
41 /** Flag to note hard reset has completed */
42 PRL_FLAGS_HARD_RESET_COMPLETE = 5,
43 /** Flag to note port partner sent a hard reset */
44 PRL_FLAGS_PORT_PARTNER_HARD_RESET = 6,
45 /**
46 * Flag to note a message transmission has been requested. It is only
47 * cleared when the message is sent to the TCPC layer.
48 */
49 PRL_FLAGS_MSG_XMIT = 7,
50 /** Flag to track if first message in AMS is pending */
51 PRL_FLAGS_FIRST_MSG_PENDING = 8,
52 /* Flag to note that PRL requested to set SINK_NG CC state */
53 PRL_FLAGS_SINK_NG = 9,
54 };
55
56 /**
57 * @brief Protocol Layer Transmission States
58 */
59 enum usbc_prl_tx_state_t {
60 /** PRL_Tx_PHY_Layer_Reset */
61 PRL_TX_PHY_LAYER_RESET,
62 /** PRL_Tx_Wait_for_Message_Request */
63 PRL_TX_WAIT_FOR_MESSAGE_REQUEST,
64 /** PRL_Tx_Layer_Reset_for_Transmit */
65 PRL_TX_LAYER_RESET_FOR_TRANSMIT,
66 /** PRL_Tx_Wait_for_PHY_response */
67 PRL_TX_WAIT_FOR_PHY_RESPONSE,
68 /** PRL_Tx_Snk_Start_of_AMS */
69 PRL_TX_SNK_START_AMS,
70 /** PRL_Tx_Snk_Pending */
71 PRL_TX_SNK_PENDING,
72 /** PRL_Tx_Discard_Message */
73 PRL_TX_DISCARD_MESSAGE,
74 /** PRL_TX_SRC_Source_Tx */
75 PRL_TX_SRC_SOURCE_TX,
76 /** PRL_TX_SRC_Pending */
77 PRL_TX_SRC_PENDING,
78
79 /** PRL_Tx_Suspend. Not part of the PD specification. */
80 PRL_TX_SUSPEND,
81
82 /** Number of PRL_TX States */
83 PRL_TX_STATE_COUNT
84 };
85
86 /**
87 * @brief Protocol Layer Hard Reset States
88 */
89 enum usbc_prl_hr_state_t {
90 /** PRL_HR_Wait_For_Request */
91 PRL_HR_WAIT_FOR_REQUEST,
92 /** PRL_HR_Reset_Layer */
93 PRL_HR_RESET_LAYER,
94 /** PRL_HR_Wait_For_PHY_Hard_Reset_Complete */
95 PRL_HR_WAIT_FOR_PHY_HARD_RESET_COMPLETE,
96 /** PRL_HR_Wait_For_PE_Hard_Reset_Complete */
97 PRL_HR_WAIT_FOR_PE_HARD_RESET_COMPLETE,
98
99 /** PRL_Hr_Suspend. Not part of the PD specification. */
100 PRL_HR_SUSPEND,
101
102 /** Number of PRL_HR States */
103 PRL_HR_STATE_COUNT
104 };
105
106 static const struct smf_state prl_tx_states[PRL_TX_STATE_COUNT];
107 static const struct smf_state prl_hr_states[PRL_HR_STATE_COUNT];
108
109 static void prl_tx_construct_message(const struct device *dev);
110 static void prl_rx_wait_for_phy_message(const struct device *dev);
111 static void prl_hr_set_state(const struct device *dev, const enum usbc_prl_hr_state_t state);
112 static void prl_tx_set_state(const struct device *dev, const enum usbc_prl_tx_state_t state);
113 static void prl_init(const struct device *dev);
114 static enum usbc_prl_hr_state_t prl_hr_get_state(const struct device *dev);
115
116 /**
117 * @brief Initializes the TX an HR state machines and enters the
118 * PRL_TX_SUSPEND and PRL_TX_SUSPEND states respectively.
119 */
prl_subsys_init(const struct device * dev)120 void prl_subsys_init(const struct device *dev)
121 {
122 struct usbc_port_data *data = dev->data;
123 struct protocol_layer_tx_t *prl_tx = data->prl_tx;
124 struct protocol_hard_reset_t *prl_hr = data->prl_hr;
125
126 /* Save the port device objects so states can access it */
127 prl_tx->dev = dev;
128 prl_hr->dev = dev;
129
130 /* Initialize the state machines */
131 smf_set_initial(SMF_CTX(prl_hr), &prl_hr_states[PRL_HR_SUSPEND]);
132 smf_set_initial(SMF_CTX(prl_tx), &prl_tx_states[PRL_TX_SUSPEND]);
133 }
134
135 /**
136 * @brief Test if the Protocol Layer State Machines are running
137 *
138 * @retval TRUE if the state machines are running
139 * @retval FALSE if the state machines are paused
140 */
prl_is_running(const struct device * dev)141 bool prl_is_running(const struct device *dev)
142 {
143 struct usbc_port_data *data = dev->data;
144
145 return data->prl_sm_state == SM_RUN;
146 }
147
148 /**
149 * @brief Directs the Protocol Layer to perform a hard reset. This function
150 * is called from the Policy Engine.
151 */
prl_execute_hard_reset(const struct device * dev)152 void prl_execute_hard_reset(const struct device *dev)
153 {
154 struct usbc_port_data *data = dev->data;
155 struct protocol_hard_reset_t *prl_hr = data->prl_hr;
156
157 /* Only allow async. function calls when state machine is running */
158 if (prl_is_running(dev) == false) {
159 return;
160 }
161
162 atomic_set_bit(&prl_hr->flags, PRL_FLAGS_PE_HARD_RESET);
163 prl_hr_set_state(dev, PRL_HR_RESET_LAYER);
164 }
165
166 /**
167 * @brief Instructs the Protocol Layer that a hard reset is complete.
168 * This function is called from the Policy Engine.
169 */
prl_hard_reset_complete(const struct device * dev)170 void prl_hard_reset_complete(const struct device *dev)
171 {
172 struct usbc_port_data *data = dev->data;
173 struct protocol_hard_reset_t *prl_hr = data->prl_hr;
174
175 atomic_set_bit(&prl_hr->flags, PRL_FLAGS_HARD_RESET_COMPLETE);
176 }
177
178 /**
179 * @brief Directs the Protocol Layer to construct and transmit a Power Delivery
180 * Control message.
181 */
prl_send_ctrl_msg(const struct device * dev,const enum pd_packet_type type,const enum pd_ctrl_msg_type msg)182 void prl_send_ctrl_msg(const struct device *dev, const enum pd_packet_type type,
183 const enum pd_ctrl_msg_type msg)
184 {
185 struct usbc_port_data *data = dev->data;
186 struct protocol_layer_tx_t *prl_tx = data->prl_tx;
187
188 /* set packet type */
189 prl_tx->emsg.type = type;
190 /* set message type */
191 prl_tx->msg_type = msg;
192 /* control message. set data len to zero */
193 prl_tx->emsg.len = 0;
194
195 atomic_set_bit(&prl_tx->flags, PRL_FLAGS_MSG_XMIT);
196 }
197
198 /**
199 * @brief Directs the Protocol Layer to construct and transmit a Power Delivery
200 * Data message.
201 *
202 * @note: Before calling this function prl_tx->emsg.data and prl_tx->emsg.len
203 * must be set.
204 */
prl_send_data_msg(const struct device * dev,const enum pd_packet_type type,const enum pd_data_msg_type msg)205 void prl_send_data_msg(const struct device *dev, const enum pd_packet_type type,
206 const enum pd_data_msg_type msg)
207 {
208 struct usbc_port_data *data = dev->data;
209 struct protocol_layer_tx_t *prl_tx = data->prl_tx;
210
211 /* set packet type */
212 prl_tx->emsg.type = type;
213 /* set message type */
214 prl_tx->msg_type = msg;
215
216 atomic_set_bit(&prl_tx->flags, PRL_FLAGS_MSG_XMIT);
217 }
218
219 /**
220 * @brief Directs the Protocol Layer to reset the revision of each packet type
221 * to its default value.
222 */
prl_set_default_pd_revision(const struct device * dev)223 void prl_set_default_pd_revision(const struct device *dev)
224 {
225 struct usbc_port_data *data = dev->data;
226
227 /*
228 * Initialize to highest revision supported. If the port or cable
229 * partner doesn't support this revision, the Protocol Engine will
230 * lower this value to the revision supported by the partner.
231 */
232 data->rev[PD_PACKET_SOP] = PD_REV30;
233 data->rev[PD_PACKET_SOP_PRIME] = PD_REV30;
234 data->rev[PD_PACKET_PRIME_PRIME] = PD_REV30;
235 data->rev[PD_PACKET_DEBUG_PRIME] = PD_REV30;
236 data->rev[PD_PACKET_DEBUG_PRIME_PRIME] = PD_REV30;
237 }
238
239 /**
240 * @brief Start the Protocol Layer state machines
241 */
prl_start(const struct device * dev)242 void prl_start(const struct device *dev)
243 {
244 struct usbc_port_data *data = dev->data;
245
246 data->prl_enabled = true;
247 }
248
249 /**
250 * @brief Pause the Protocol Layer state machines
251 */
prl_suspend(const struct device * dev)252 void prl_suspend(const struct device *dev)
253 {
254 struct usbc_port_data *data = dev->data;
255
256 data->prl_enabled = false;
257
258 /*
259 * While we are paused, exit all states
260 * and wait until initialized again.
261 */
262 prl_tx_set_state(dev, PRL_TX_SUSPEND);
263 prl_hr_set_state(dev, PRL_HR_SUSPEND);
264 }
265
266 /**
267 * @brief Reset the Protocol Layer state machines
268 */
prl_reset(const struct device * dev)269 void prl_reset(const struct device *dev)
270 {
271 struct usbc_port_data *data = dev->data;
272
273 if (data->prl_enabled) {
274 data->prl_sm_state = SM_INIT;
275 }
276 }
277
278 /**
279 * @brief Inform the PRL that the first message in an AMS is being sent
280 */
prl_first_msg_notificaiton(const struct device * dev)281 void prl_first_msg_notificaiton(const struct device *dev)
282 {
283 struct usbc_port_data *data = dev->data;
284 struct protocol_layer_tx_t *prl_tx = data->prl_tx;
285
286 atomic_set_bit(&prl_tx->flags, PRL_FLAGS_FIRST_MSG_PENDING);
287 }
288
289 /**
290 * @brief Run the Protocol Layer state machines
291 */
prl_run(const struct device * dev)292 void prl_run(const struct device *dev)
293 {
294 struct usbc_port_data *data = dev->data;
295 struct protocol_layer_tx_t *prl_tx = data->prl_tx;
296 struct protocol_hard_reset_t *prl_hr = data->prl_hr;
297
298 switch (data->prl_sm_state) {
299 case SM_PAUSED:
300 if (data->prl_enabled == false) {
301 break;
302 }
303 /* fall through */
304 case SM_INIT:
305 prl_init(dev);
306 data->prl_sm_state = SM_RUN;
307 /* fall through */
308 case SM_RUN:
309 if (data->prl_enabled == false) {
310 data->prl_sm_state = SM_PAUSED;
311 /* Disable RX */
312 tcpc_set_rx_enable(data->tcpc, false);
313 break;
314 }
315
316 /* Run Protocol Layer Hard Reset state machine */
317 smf_run_state(SMF_CTX(prl_hr));
318
319 /*
320 * During Hard Reset no USB Power Delivery Protocol Messages
321 * are sent or received; only Hard Reset Signaling is present
322 * after which the communication channel is assumed to have
323 * been disabled by the Physical Layer until completion of
324 * the Hard Reset.
325 */
326 if (prl_hr_get_state(dev) == PRL_HR_WAIT_FOR_REQUEST) {
327 /* Run Protocol Layer Message Reception */
328 prl_rx_wait_for_phy_message(dev);
329
330 /* Run Protocol Layer Message Tx state machine */
331 smf_run_state(SMF_CTX(prl_tx));
332 }
333 break;
334 }
335 }
336
337 /**
338 * @brief Set revision for the give packet type. This function is called
339 * from the Policy Engine.
340 */
prl_set_rev(const struct device * dev,const enum pd_packet_type type,const enum pd_rev_type rev)341 void prl_set_rev(const struct device *dev, const enum pd_packet_type type,
342 const enum pd_rev_type rev)
343 {
344 struct usbc_port_data *data = dev->data;
345
346 data->rev[type] = rev;
347 }
348
349 /**
350 * @brief Get the revision for the give packet type.
351 * This function is called from the Policy Engine.
352 */
prl_get_rev(const struct device * dev,const enum pd_packet_type type)353 enum pd_rev_type prl_get_rev(const struct device *dev, const enum pd_packet_type type)
354 {
355 struct usbc_port_data *data = dev->data;
356
357 return data->rev[type];
358 }
359
360 /** Private Protocol Layer API below */
361
362 /**
363 * @brief Alert Handler called by the TCPC driver
364 */
alert_handler(const struct device * tcpc,void * port_dev,enum tcpc_alert alert)365 static void alert_handler(const struct device *tcpc, void *port_dev, enum tcpc_alert alert)
366 {
367 const struct device *dev = (const struct device *)port_dev;
368 struct usbc_port_data *data = dev->data;
369 struct protocol_layer_tx_t *prl_tx = data->prl_tx;
370 struct protocol_hard_reset_t *prl_hr = data->prl_hr;
371
372 switch (alert) {
373 case TCPC_ALERT_HARD_RESET_RECEIVED:
374 atomic_set_bit(&prl_hr->flags, PRL_FLAGS_PORT_PARTNER_HARD_RESET);
375 break;
376 case TCPC_ALERT_TRANSMIT_MSG_FAILED:
377 atomic_set_bit(&prl_tx->flags, PRL_FLAGS_TX_ERROR);
378 break;
379 case TCPC_ALERT_TRANSMIT_MSG_DISCARDED:
380 atomic_set_bit(&prl_tx->flags, PRL_FLAGS_TX_DISCARDED);
381 break;
382 case TCPC_ALERT_TRANSMIT_MSG_SUCCESS:
383 atomic_set_bit(&prl_tx->flags, PRL_FLAGS_TX_COMPLETE);
384 break;
385 /* These alerts are ignored and will just wake the thread. */
386 default:
387 break;
388 }
389
390 /* Wake the thread if it's sleeping */
391 k_wakeup(data->port_thread);
392 }
393
394 /**
395 * @brief Set the Protocol Layer Message Transmission state
396 */
prl_tx_set_state(const struct device * dev,const enum usbc_prl_tx_state_t state)397 static void prl_tx_set_state(const struct device *dev, const enum usbc_prl_tx_state_t state)
398 {
399 struct usbc_port_data *data = dev->data;
400 struct protocol_layer_tx_t *prl_tx = data->prl_tx;
401
402 __ASSERT(state < ARRAY_SIZE(prl_tx_states), "invalid prl_tx_state %d", state);
403 smf_set_state(SMF_CTX(prl_tx), &prl_tx_states[state]);
404 }
405
406 /**
407 * @brief Set the Protocol Layer Hard Reset state
408 */
prl_hr_set_state(const struct device * dev,const enum usbc_prl_hr_state_t state)409 static void prl_hr_set_state(const struct device *dev, const enum usbc_prl_hr_state_t state)
410 {
411 struct usbc_port_data *data = dev->data;
412 struct protocol_hard_reset_t *prl_hr = data->prl_hr;
413
414 __ASSERT(state < ARRAY_SIZE(prl_hr_states), "invalid prl_hr_state %d", state);
415 smf_set_state(SMF_CTX(prl_hr), &prl_hr_states[state]);
416 }
417
418 /**
419 * @brief Get the Protocol Layer Hard Reset state
420 */
prl_hr_get_state(const struct device * dev)421 static enum usbc_prl_hr_state_t prl_hr_get_state(const struct device *dev)
422 {
423 struct usbc_port_data *data = dev->data;
424 struct protocol_hard_reset_t *prl_hr = data->prl_hr;
425
426 return prl_hr->ctx.current - &prl_hr_states[0];
427 }
428
429 /**
430 * @brief Increment the message ID counter for the last transmitted packet type
431 */
increment_msgid_counter(const struct device * dev)432 static void increment_msgid_counter(const struct device *dev)
433 {
434 struct usbc_port_data *data = dev->data;
435 struct protocol_layer_tx_t *prl_tx = data->prl_tx;
436
437 /* If the last message wasn't an SOP* message, no need to increment */
438 if (prl_tx->last_xmit_type >= NUM_SOP_STAR_TYPES) {
439 return;
440 }
441
442 prl_tx->msg_id_counter[prl_tx->last_xmit_type] =
443 (prl_tx->msg_id_counter[prl_tx->last_xmit_type] + 1) & PD_MESSAGE_ID_COUNT;
444 }
445
446 /**
447 * @brief Get the SOP* header for the current received message
448 */
get_sop_star_header(const struct device * dev)449 static uint16_t get_sop_star_header(const struct device *dev)
450 {
451 struct usbc_port_data *data = dev->data;
452 struct protocol_layer_tx_t *prl_tx = data->prl_tx;
453 const bool is_sop_packet = prl_tx->emsg.type == PD_PACKET_SOP;
454 union pd_header header;
455
456 /* SOP vs SOP'/SOP" headers are different. Replace fields as needed */
457 header.message_type = prl_tx->msg_type;
458 header.port_data_role = is_sop_packet ? pe_get_data_role(dev) : 0;
459 header.specification_revision = data->rev[prl_tx->emsg.type];
460 header.port_power_role = is_sop_packet ? pe_get_power_role(dev) : pe_get_cable_plug(dev);
461 header.message_id = prl_tx->msg_id_counter[prl_tx->emsg.type];
462 header.number_of_data_objects = PD_CONVERT_BYTES_TO_PD_HEADER_COUNT(prl_tx->emsg.len);
463 header.extended = false;
464
465 return header.raw_value;
466 }
467
468 /**
469 * @brief Construct and transmit a message
470 */
prl_tx_construct_message(const struct device * dev)471 static void prl_tx_construct_message(const struct device *dev)
472 {
473 struct usbc_port_data *data = dev->data;
474 struct protocol_layer_tx_t *prl_tx = data->prl_tx;
475 const struct device *tcpc = data->tcpc;
476
477 /* The header is unused for hard reset, etc. */
478 prl_tx->emsg.header.raw_value =
479 prl_tx->emsg.type < NUM_SOP_STAR_TYPES ? get_sop_star_header(dev) : 0;
480
481 /* Save SOP* so the correct msg_id_counter can be incremented */
482 prl_tx->last_xmit_type = prl_tx->emsg.type;
483
484 /*
485 * PRL_FLAGS_TX_COMPLETE could be set if this function is called before
486 * the Policy Engine is informed of the previous transmission. Clear
487 * the flag so that this message can be sent.
488 */
489 atomic_clear_bit(&prl_tx->flags, PRL_FLAGS_TX_COMPLETE);
490
491 /* Clear PRL_FLAGS_MSG_XMIT flag */
492 atomic_clear_bit(&prl_tx->flags, PRL_FLAGS_MSG_XMIT);
493
494 /*
495 * Pass message to PHY Layer. It handles retries in hardware as
496 * software cannot handle the required timing ~ 1ms (tReceive + tRetry)
497 */
498 tcpc_transmit_data(tcpc, &prl_tx->emsg);
499 }
500
501 /**
502 * @brief Transmit a Hard Reset Message
503 */
prl_hr_send_msg_to_phy(const struct device * dev)504 static void prl_hr_send_msg_to_phy(const struct device *dev)
505 {
506 struct usbc_port_data *data = dev->data;
507 struct protocol_layer_tx_t *prl_tx = data->prl_tx;
508 const struct device *tcpc = data->tcpc;
509
510 /* Header is not used for hard reset */
511 prl_tx->emsg.header.raw_value = 0;
512 prl_tx->emsg.type = PD_PACKET_TX_HARD_RESET;
513
514 /*
515 * These flags could be set if this function is called before the
516 * Policy Engine is informed of the previous transmission. Clear the
517 * flags so that this message can be sent.
518 */
519 data->prl_tx->flags = ATOMIC_INIT(0);
520
521 /* Pass message to PHY Layer */
522 tcpc_transmit_data(tcpc, &prl_tx->emsg);
523 }
524
525 /**
526 * @brief Initialize the Protocol Layer State Machines
527 */
prl_init(const struct device * dev)528 static void prl_init(const struct device *dev)
529 {
530 struct usbc_port_data *data = dev->data;
531 struct protocol_layer_rx_t *prl_rx = data->prl_rx;
532 struct protocol_layer_tx_t *prl_tx = data->prl_tx;
533 struct protocol_hard_reset_t *prl_hr = data->prl_hr;
534 int i;
535
536 LOG_INF("PRL_INIT");
537
538 /* Set all packet types to default revision */
539 prl_set_default_pd_revision(dev);
540
541 /*
542 * Set TCPC alert handler so we are notified when messages
543 * are received, transmitted, etc.
544 */
545 tcpc_set_alert_handler_cb(data->tcpc, alert_handler, (void *)dev);
546
547 /* Initialize the PRL_HR state machine */
548 prl_hr->flags = ATOMIC_INIT(0);
549 usbc_timer_init(&prl_hr->pd_t_hard_reset_complete, PD_T_HARD_RESET_COMPLETE_MAX_MS);
550 prl_hr_set_state(dev, PRL_HR_WAIT_FOR_REQUEST);
551
552 /* Initialize the PRL_TX state machine */
553 prl_tx->flags = ATOMIC_INIT(0);
554 prl_tx->last_xmit_type = PD_PACKET_SOP;
555 for (i = 0; i < NUM_SOP_STAR_TYPES; i++) {
556 prl_tx->msg_id_counter[i] = 0;
557 }
558 usbc_timer_init(&prl_tx->pd_t_tx_timeout, PD_T_TX_TIMEOUT_MS);
559 usbc_timer_init(&prl_tx->pd_t_sink_tx, PD_T_SINK_TX_MAX_MS);
560 prl_tx_set_state(dev, PRL_TX_PHY_LAYER_RESET);
561
562 /* Initialize the PRL_RX state machine */
563 prl_rx->flags = ATOMIC_INIT(0);
564 for (i = 0; i < NUM_SOP_STAR_TYPES; i++) {
565 prl_rx->msg_id[i] = -1;
566 }
567 }
568
569 /**
570 * @brief PRL_Tx_PHY_Layer_Reset State
571 */
prl_tx_phy_layer_reset_entry(void * obj)572 static void prl_tx_phy_layer_reset_entry(void *obj)
573 {
574 struct protocol_layer_tx_t *prl_tx = (struct protocol_layer_tx_t *)obj;
575 const struct device *dev = prl_tx->dev;
576 struct usbc_port_data *data = dev->data;
577 const struct device *tcpc = data->tcpc;
578
579 LOG_INF("PRL_Tx_PHY_Layer_Reset");
580
581 /* Enable communications */
582 tcpc_set_rx_enable(tcpc, tc_is_in_attached_state(dev));
583
584 /* Reset complete */
585 prl_tx_set_state(dev, PRL_TX_WAIT_FOR_MESSAGE_REQUEST);
586 }
587
588 /**
589 * @brief PRL_Tx_Wait_for_Message_Request Entry State
590 */
prl_tx_wait_for_message_request_entry(void * obj)591 static void prl_tx_wait_for_message_request_entry(void *obj)
592 {
593 struct protocol_layer_tx_t *prl_tx = (struct protocol_layer_tx_t *)obj;
594
595 LOG_INF("PRL_Tx_Wait_for_Message_Request");
596
597 /* Clear outstanding messages */
598 prl_tx->flags = ATOMIC_INIT(0);
599 }
600
601 /**
602 * @brief PRL_Tx_Wait_for_Message_Request Run State
603 */
prl_tx_wait_for_message_request_run(void * obj)604 static void prl_tx_wait_for_message_request_run(void *obj)
605 {
606 struct protocol_layer_tx_t *prl_tx = (struct protocol_layer_tx_t *)obj;
607 const struct device *dev = prl_tx->dev;
608 struct usbc_port_data *data = dev->data;
609
610 /* Clear any AMS flags and state if we are no longer in an AMS */
611 if (pe_dpm_initiated_ams(dev) == false) {
612 #ifdef CONFIG_USBC_CSM_SOURCE_ONLY
613 /* Note PRL_Tx_Src_Sink_Tx is embedded here. */
614 if (atomic_test_and_clear_bit(&prl_tx->flags, PRL_FLAGS_SINK_NG)) {
615 tc_select_src_collision_rp(dev, SINK_TX_OK);
616 }
617 #endif
618 atomic_clear_bit(&prl_tx->flags, PRL_FLAGS_WAIT_SINK_OK);
619 }
620
621 /*
622 * Check if we are starting an AMS and need to wait and/or set the CC
623 * lines appropriately.
624 */
625 if (data->rev[PD_PACKET_SOP] == PD_REV30 && pe_dpm_initiated_ams(dev)) {
626 if (atomic_test_bit(&prl_tx->flags, PRL_FLAGS_WAIT_SINK_OK) ||
627 atomic_test_bit(&prl_tx->flags, PRL_FLAGS_SINK_NG)) {
628 /*
629 * If we are already in an AMS then allow the
630 * multi-message AMS to continue.
631 *
632 * Fall Through using the current AMS
633 */
634 } else {
635 /*
636 * Start of AMS notification received from
637 * Policy Engine
638 */
639 if (IS_ENABLED(CONFIG_USBC_CSM_SOURCE_ONLY) &&
640 pe_get_power_role(dev) == TC_ROLE_SOURCE) {
641 atomic_set_bit(&prl_tx->flags, PRL_FLAGS_SINK_NG);
642 prl_tx_set_state(dev, PRL_TX_SRC_SOURCE_TX);
643 } else {
644 atomic_set_bit(&prl_tx->flags, PRL_FLAGS_WAIT_SINK_OK);
645 prl_tx_set_state(dev, PRL_TX_SNK_START_AMS);
646 }
647 return;
648 }
649 }
650
651 /* Handle non Rev 3.0 or subsequent messages in AMS sequence */
652 if (atomic_test_and_clear_bit(&prl_tx->flags, PRL_FLAGS_MSG_XMIT)) {
653 /*
654 * Soft Reset Message pending
655 */
656 if ((prl_tx->msg_type == PD_CTRL_SOFT_RESET) && (prl_tx->emsg.len == 0)) {
657 prl_tx_set_state(dev, PRL_TX_LAYER_RESET_FOR_TRANSMIT);
658 } else {
659 /* Message pending (except Soft Reset) */
660
661 /* NOTE: PRL_TX_Construct_Message State embedded here */
662 prl_tx_construct_message(dev);
663 prl_tx_set_state(dev, PRL_TX_WAIT_FOR_PHY_RESPONSE);
664 }
665 return;
666 }
667 }
668
669 /**
670 * @brief PRL_Tx_Layer_Reset_for_Transmit Entry State
671 */
prl_tx_layer_reset_for_transmit_entry(void * obj)672 static void prl_tx_layer_reset_for_transmit_entry(void *obj)
673 {
674 struct protocol_layer_tx_t *prl_tx = (struct protocol_layer_tx_t *)obj;
675 const struct device *dev = prl_tx->dev;
676 struct usbc_port_data *data = dev->data;
677 struct protocol_layer_rx_t *prl_rx = data->prl_rx;
678
679 LOG_INF("PRL_Tx_Layer_Reset_for_Transmit");
680
681 if (prl_tx->emsg.type < NUM_SOP_STAR_TYPES) {
682 /*
683 * This state is only used during soft resets. Reset only the
684 * matching message type.
685 *
686 * From section 6.3.13 Soft Reset Message in the USB PD 3.0
687 * v2.0 spec, Soft_Reset Message Shall be targeted at a
688 * specific entity depending on the type of SOP* Packet used.
689 */
690 prl_tx->msg_id_counter[prl_tx->emsg.type] = 0;
691 /*
692 * From section 6.11.2.3.2, the MessageID should be cleared
693 * from the PRL_Rx_Layer_Reset_for_Receive state. However, we
694 * don't implement a full state machine for PRL RX states so
695 * clear the MessageID here.
696 */
697 prl_rx->msg_id[prl_tx->emsg.type] = -1;
698 }
699
700 /* NOTE: PRL_Tx_Construct_Message State embedded here */
701 prl_tx_construct_message(dev);
702 prl_tx_set_state(dev, PRL_TX_WAIT_FOR_PHY_RESPONSE);
703 }
704
705 /**
706 * @brief PRL_Tx_Wait_for_PHY_response Entry State
707 */
prl_tx_wait_for_phy_response_entry(void * obj)708 static void prl_tx_wait_for_phy_response_entry(void *obj)
709 {
710 struct protocol_layer_tx_t *prl_tx = (struct protocol_layer_tx_t *)obj;
711
712 LOG_INF("PRL_Tx_Wait_for_PHY_response");
713 usbc_timer_start(&prl_tx->pd_t_tx_timeout);
714 }
715
716 /**
717 * @brief PRL_Tx_Wait_for_PHY_response Run State
718 */
prl_tx_wait_for_phy_response_run(void * obj)719 static void prl_tx_wait_for_phy_response_run(void *obj)
720 {
721 struct protocol_layer_tx_t *prl_tx = (struct protocol_layer_tx_t *)obj;
722 const struct device *dev = prl_tx->dev;
723
724 /* Wait until TX is complete */
725 if (atomic_test_and_clear_bit(&prl_tx->flags, PRL_FLAGS_TX_DISCARDED)) {
726 /* NOTE: PRL_TX_DISCARD_MESSAGE State embedded here. */
727 /* Inform Policy Engine Message was discarded */
728 pe_report_discard(dev);
729 prl_tx_set_state(dev, PRL_TX_PHY_LAYER_RESET);
730 return;
731 }
732 if (atomic_test_bit(&prl_tx->flags, PRL_FLAGS_TX_COMPLETE)) {
733 /* NOTE: PRL_TX_Message_Sent State embedded here. */
734 /* Inform Policy Engine Message was sent */
735 pe_message_sent(dev);
736 /*
737 * This event reduces the time of informing the policy engine
738 * of the transmission by one state machine cycle
739 */
740 prl_tx_set_state(dev, PRL_TX_WAIT_FOR_MESSAGE_REQUEST);
741 return;
742 } else if (usbc_timer_expired(&prl_tx->pd_t_tx_timeout) ||
743 atomic_test_bit(&prl_tx->flags, PRL_FLAGS_TX_ERROR)) {
744 /*
745 * NOTE: PRL_Tx_Transmission_Error State embedded
746 * here.
747 */
748 /* Report Error To Policy Engine */
749 pe_report_error(dev, ERR_XMIT, prl_tx->last_xmit_type);
750 prl_tx_set_state(dev, PRL_TX_WAIT_FOR_MESSAGE_REQUEST);
751 return;
752 }
753 }
754
755 /**
756 * @brief PRL_Tx_Wait_for_PHY_response Exit State
757 */
prl_tx_wait_for_phy_response_exit(void * obj)758 static void prl_tx_wait_for_phy_response_exit(void *obj)
759 {
760 struct protocol_layer_tx_t *prl_tx = (struct protocol_layer_tx_t *)obj;
761 const struct device *dev = prl_tx->dev;
762
763 usbc_timer_stop(&prl_tx->pd_t_tx_timeout);
764
765 /* Increment messageId counter */
766 increment_msgid_counter(dev);
767 }
768
769 #ifdef CONFIG_USBC_CSM_SOURCE_ONLY
770 /**
771 * @brief 6.11.2.2.2.1 PRL_Tx_Src_Source_Tx
772 */
prl_tx_src_source_tx_entry(void * obj)773 static void prl_tx_src_source_tx_entry(void *obj)
774 {
775 struct protocol_layer_tx_t *prl_tx = (struct protocol_layer_tx_t *)obj;
776 const struct device *dev = prl_tx->dev;
777
778 LOG_INF("PRL_Tx_Src_Tx");
779
780 /* Set Rp = SinkTxNG */
781 tc_select_src_collision_rp(dev, SINK_TX_NG);
782 }
783
prl_tx_src_source_tx_run(void * obj)784 static void prl_tx_src_source_tx_run(void *obj)
785 {
786 struct protocol_layer_tx_t *prl_tx = (struct protocol_layer_tx_t *)obj;
787 const struct device *dev = prl_tx->dev;
788
789 if (atomic_test_bit(&prl_tx->flags, PRL_FLAGS_MSG_XMIT)) {
790 /*
791 * Don't clear pending XMIT flag here. Wait until we send so
792 * we can detect if we dropped this message or not.
793 */
794 prl_tx_set_state(dev, PRL_TX_SRC_PENDING);
795 }
796 }
797 #endif
798 #if CONFIG_USBC_CSM_SINK_ONLY
799 /**
800 * @brief PRL_Tx_Snk_Start_of_AMS Entry State
801 */
prl_tx_snk_start_ams_entry(void * obj)802 static void prl_tx_snk_start_ams_entry(void *obj)
803 {
804 LOG_INF("PRL_Tx_Snk_Start_of_AMS");
805 }
806
807 /**
808 * @brief PRL_Tx_Snk_Start_of_AMS Run State
809 */
prl_tx_snk_start_ams_run(void * obj)810 static void prl_tx_snk_start_ams_run(void *obj)
811 {
812 struct protocol_layer_tx_t *prl_tx = (struct protocol_layer_tx_t *)obj;
813 const struct device *dev = prl_tx->dev;
814
815 if (atomic_test_bit(&prl_tx->flags, PRL_FLAGS_MSG_XMIT)) {
816 /*
817 * Don't clear pending XMIT flag here. Wait until we send so
818 * we can detect if we dropped this message or not.
819 */
820 prl_tx_set_state(dev, PRL_TX_SNK_PENDING);
821 }
822 }
823 #endif
824 #ifdef CONFIG_USBC_CSM_SOURCE_ONLY
825 /**
826 * @brief PRL_Tx_Src_Pending Entry State
827 */
prl_tx_src_pending_entry(void * obj)828 static void prl_tx_src_pending_entry(void *obj)
829 {
830 struct protocol_layer_tx_t *prl_tx = (struct protocol_layer_tx_t *)obj;
831
832 LOG_INF("PRL_Tx_Src_Pending");
833
834 /* Start SinkTxTimer */
835 usbc_timer_start(&prl_tx->pd_t_sink_tx);
836 }
837
838 /**
839 * @brief PRL_Tx_Src_Pending Run State
840 */
prl_tx_src_pending_run(void * obj)841 static void prl_tx_src_pending_run(void *obj)
842 {
843 struct protocol_layer_tx_t *prl_tx = (struct protocol_layer_tx_t *)obj;
844 const struct device *dev = prl_tx->dev;
845
846 if (usbc_timer_expired(&prl_tx->pd_t_sink_tx)) {
847 /*
848 * We clear the pending XMIT flag here right before we send so
849 * we can detect if we discarded this message or not
850 */
851 atomic_clear_bit(&prl_tx->flags, PRL_FLAGS_MSG_XMIT);
852
853 /* Soft Reset Message pending & SinkTxTimer timeout */
854 if ((prl_tx->msg_type == PD_CTRL_SOFT_RESET) && (prl_tx->emsg.len == 0)) {
855 prl_tx_set_state(dev, PRL_TX_LAYER_RESET_FOR_TRANSMIT);
856 }
857 /* Message pending (except Soft Reset) & SinkTxTimer timeout */
858 else {
859 /* If this is the first AMS message, inform the PE that it's been sent */
860 if (atomic_test_bit(&prl_tx->flags, PRL_FLAGS_FIRST_MSG_PENDING)) {
861 atomic_clear_bit(&prl_tx->flags, PRL_FLAGS_FIRST_MSG_PENDING);
862 pe_first_msg_sent(dev);
863 }
864
865 prl_tx_construct_message(dev);
866 prl_tx_set_state(dev, PRL_TX_WAIT_FOR_PHY_RESPONSE);
867 }
868 }
869 }
870
871 /**
872 * @brief PRL_Tx_Src_Pending Exit State
873 */
prl_tx_src_pending_exit(void * obj)874 static void prl_tx_src_pending_exit(void *obj)
875 {
876 struct protocol_layer_tx_t *prl_tx = (struct protocol_layer_tx_t *)obj;
877
878 /* Stop SinkTxTimer */
879 usbc_timer_stop(&prl_tx->pd_t_sink_tx);
880 }
881 #endif
882
883 #ifdef CONFIG_USBC_CSM_SINK_ONLY
884 /**
885 * @brief PRL_Tx_Snk_Pending Entry State
886 */
prl_tx_snk_pending_entry(void * obj)887 static void prl_tx_snk_pending_entry(void *obj)
888 {
889 LOG_INF("PRL_Tx_Snk_Pending");
890 }
891
892 /**
893 * @brief PRL_Tx_Snk_Pending Run State
894 */
prl_tx_snk_pending_run(void * obj)895 static void prl_tx_snk_pending_run(void *obj)
896 {
897 struct protocol_layer_tx_t *prl_tx = (struct protocol_layer_tx_t *)obj;
898 const struct device *dev = prl_tx->dev;
899 struct usbc_port_data *data = dev->data;
900 const struct device *tcpc = data->tcpc;
901 enum tc_cc_voltage_state cc1;
902 enum tc_cc_voltage_state cc2;
903
904 /*
905 * Wait unit the SRC applies SINK_TX_OK so we can transmit.
906 */
907 tcpc_get_cc(tcpc, &cc1, &cc2);
908
909 /*
910 * We clear the pending XMIT flag here right before we send so
911 * we can detect if we discarded this message or not
912 */
913 atomic_clear_bit(&prl_tx->flags, PRL_FLAGS_MSG_XMIT);
914
915 /*
916 * The Protocol Layer Shall transition to the
917 * PRL_Tx_Layer_Reset_for_Transmit state when a Soft_Reset
918 * Message is pending.
919 */
920 if ((prl_tx->msg_type == PD_CTRL_SOFT_RESET) && (prl_tx->emsg.len == 0)) {
921 prl_tx_set_state(dev, PRL_TX_LAYER_RESET_FOR_TRANSMIT);
922 } else if (cc1 == TC_CC_VOLT_RP_3A0 || cc2 == TC_CC_VOLT_RP_3A0) {
923 /* If this is the first AMS message, inform the PE that it's been sent */
924 if (atomic_test_bit(&prl_tx->flags, PRL_FLAGS_FIRST_MSG_PENDING)) {
925 atomic_clear_bit(&prl_tx->flags, PRL_FLAGS_FIRST_MSG_PENDING);
926 pe_first_msg_sent(dev);
927 }
928
929 /*
930 * The Protocol Layer Shall transition to the PRL_Tx_Construct_Message
931 * state when Rp is set to SinkTxOk and a Soft_Reset Message is not
932 * pending.
933 */
934
935 /*
936 * Message pending (except Soft Reset) &
937 * Rp = SinkTxOk
938 */
939 prl_tx_construct_message(dev);
940 prl_tx_set_state(dev, PRL_TX_WAIT_FOR_PHY_RESPONSE);
941 }
942 }
943 #endif
944
prl_tx_suspend_entry(void * obj)945 static void prl_tx_suspend_entry(void *obj)
946 {
947 LOG_INF("PRL_TX_SUSPEND");
948 }
949
prl_tx_suspend_run(void * obj)950 static void prl_tx_suspend_run(void *obj)
951 {
952 /* Do nothing */
953 }
954
955 /**
956 * All necessary Protocol Hard Reset States (Section 6.12.2.4)
957 */
958
959 /**
960 * @brief PRL_HR_Wait_for_Request Entry State
961 *
962 * @note This state is not part of the PRL_HR State Diagram found in
963 * Figure 6-66. The PRL_HR state machine waits here until a
964 * Hard Reset is requested by either the Policy Engine or the
965 * PHY Layer.
966 */
prl_hr_wait_for_request_entry(void * obj)967 static void prl_hr_wait_for_request_entry(void *obj)
968 {
969 struct protocol_hard_reset_t *prl_hr = (struct protocol_hard_reset_t *)obj;
970
971 LOG_INF("PRL_HR_Wait_for_Request");
972
973 /* Reset all Protocol Layer Hard Reset flags */
974 prl_hr->flags = ATOMIC_INIT(0);
975 }
976
977 /**
978 * @brief PRL_HR_Wait_for_Request Run State
979 */
prl_hr_wait_for_request_run(void * obj)980 static void prl_hr_wait_for_request_run(void *obj)
981 {
982 struct protocol_hard_reset_t *prl_hr = (struct protocol_hard_reset_t *)obj;
983 const struct device *dev = prl_hr->dev;
984
985 /*
986 * The PRL_FLAGS_PE_HARD_RESET flag is set when a Hard Reset request is
987 * received from the Policy Engine.
988 *
989 * The PRL_FLAGS_PORT_PARTNER_HARD_RESET flag is set when Hard Reset
990 * signaling is received by the PHY Layer.
991 */
992 if (atomic_test_bit(&prl_hr->flags, PRL_FLAGS_PE_HARD_RESET) ||
993 atomic_test_bit(&prl_hr->flags, PRL_FLAGS_PORT_PARTNER_HARD_RESET)) {
994 /* Start Hard Reset */
995 prl_hr_set_state(dev, PRL_HR_RESET_LAYER);
996 }
997 }
998
999 /**
1000 * @brief PRL_HR_Reset_Layer Entry State
1001 */
prl_hr_reset_layer_entry(void * obj)1002 static void prl_hr_reset_layer_entry(void *obj)
1003 {
1004 struct protocol_hard_reset_t *prl_hr = (struct protocol_hard_reset_t *)obj;
1005 const struct device *dev = prl_hr->dev;
1006 struct usbc_port_data *data = dev->data;
1007 struct protocol_layer_rx_t *prl_rx = data->prl_rx;
1008 struct protocol_layer_tx_t *prl_tx = data->prl_tx;
1009 const struct device *tcpc = data->tcpc;
1010 int i;
1011
1012 LOG_INF("PRL_HR_Reset_Layer");
1013
1014 /* Reset all Protocol Layer message reception flags */
1015 prl_rx->flags = ATOMIC_INIT(0);
1016 /* Reset all Protocol Layer message transmission flags */
1017 prl_tx->flags = ATOMIC_INIT(0);
1018
1019 /* Hard reset resets messageIDCounters for all TX types */
1020 for (i = 0; i < NUM_SOP_STAR_TYPES; i++) {
1021 prl_rx->msg_id[i] = -1;
1022 prl_tx->msg_id_counter[i] = 0;
1023 }
1024
1025 /* Disable RX */
1026 tcpc_set_rx_enable(tcpc, false);
1027
1028 /*
1029 * PD r3.0 v2.0, ss6.2.1.1.5:
1030 * After a physical or logical (USB Type-C Error Recovery) Attach, a
1031 * Port discovers the common Specification Revision level between
1032 * itself and its Port Partner and/or the Cable Plug(s), and uses this
1033 * Specification Revision level until a Detach, Hard Reset or Error
1034 * Recovery happens.
1035 *
1036 * This covers the Hard Reset case.
1037 */
1038 prl_set_default_pd_revision(dev);
1039
1040 /*
1041 * Protocol Layer message transmission transitions to
1042 * PRL_Tx_Wait_For_Message_Request state.
1043 */
1044 prl_tx_set_state(dev, PRL_TX_PHY_LAYER_RESET);
1045
1046 /*
1047 * Protocol Layer message reception transitions to
1048 * PRL_Rx_Wait_for_PHY_Message state.
1049 *
1050 * Note: The PRL_Rx_Wait_for_PHY_Message state is implemented
1051 * as a single function, named prl_rx_wait_for_phy_message.
1052 */
1053
1054 /*
1055 * Protocol Layer reset Complete &
1056 * Hard Reset was initiated by Policy Engine
1057 */
1058 if (atomic_test_bit(&prl_hr->flags, PRL_FLAGS_PE_HARD_RESET)) {
1059 /*
1060 * Request PHY to perform a Hard Reset. Note
1061 * PRL_HR_Request_Reset state is embedded here.
1062 */
1063 prl_hr_send_msg_to_phy(dev);
1064 prl_hr_set_state(dev, PRL_HR_WAIT_FOR_PHY_HARD_RESET_COMPLETE);
1065 } else {
1066 /*
1067 * Protocol Layer reset complete &
1068 * Hard Reset was initiated by Port Partner
1069 */
1070
1071 /* Inform Policy Engine of the Hard Reset */
1072 pe_got_hard_reset(dev);
1073 prl_hr_set_state(dev, PRL_HR_WAIT_FOR_PE_HARD_RESET_COMPLETE);
1074 }
1075 }
1076
1077 /**
1078 * @brief PRL_HR_Wait_for_PHY_Hard_Reset_Complete Entry State
1079 */
prl_hr_wait_for_phy_hard_reset_complete_entry(void * obj)1080 static void prl_hr_wait_for_phy_hard_reset_complete_entry(void *obj)
1081 {
1082 struct protocol_hard_reset_t *prl_hr = (struct protocol_hard_reset_t *)obj;
1083
1084 LOG_INF("PRL_HR_Wait_for_PHY_Hard_Reset_Complete");
1085
1086 /*
1087 * Start the HardResetCompleteTimer and wait for the PHY Layer to
1088 * indicate that the Hard Reset completed.
1089 */
1090 usbc_timer_start(&prl_hr->pd_t_hard_reset_complete);
1091 }
1092
1093 /**
1094 * @brief PRL_HR_Wait_for_PHY_Hard_Reset_Complete Run State
1095 */
prl_hr_wait_for_phy_hard_reset_complete_run(void * obj)1096 static void prl_hr_wait_for_phy_hard_reset_complete_run(void *obj)
1097 {
1098 struct protocol_hard_reset_t *prl_hr = (struct protocol_hard_reset_t *)obj;
1099 const struct device *dev = prl_hr->dev;
1100 struct usbc_port_data *data = dev->data;
1101 struct protocol_layer_tx_t *prl_tx = data->prl_tx;
1102
1103 /*
1104 * Wait for hard reset from PHY or timeout
1105 */
1106 if (atomic_test_bit(&prl_tx->flags, PRL_FLAGS_TX_COMPLETE) ||
1107 usbc_timer_expired(&prl_hr->pd_t_hard_reset_complete)) {
1108 /* PRL_HR_PHY_Hard_Reset_Requested */
1109 /* Inform Policy Engine Hard Reset was sent */
1110 pe_hard_reset_sent(dev);
1111 prl_hr_set_state(dev, PRL_HR_WAIT_FOR_PE_HARD_RESET_COMPLETE);
1112 }
1113 }
1114
1115 /**
1116 * @brief PRL_HR_Wait_for_PHY_Hard_Reset_Complete Exit State
1117 */
prl_hr_wait_for_phy_hard_reset_complete_exit(void * obj)1118 static void prl_hr_wait_for_phy_hard_reset_complete_exit(void *obj)
1119 {
1120 struct protocol_hard_reset_t *prl_hr = (struct protocol_hard_reset_t *)obj;
1121
1122 /* Stop the HardResetCompleteTimer */
1123 usbc_timer_stop(&prl_hr->pd_t_hard_reset_complete);
1124 }
1125
1126 /**
1127 * @brief PRL_HR_Wait_For_PE_Hard_Reset_Complete Entry State
1128 */
prl_hr_wait_for_pe_hard_reset_complete_entry(void * obj)1129 static void prl_hr_wait_for_pe_hard_reset_complete_entry(void *obj)
1130 {
1131 LOG_INF("PRL_HR_Wait_For_PE_Hard_Reset_Complete");
1132 }
1133
1134 /**
1135 * @brief PRL_HR_Wait_For_PE_Hard_Reset_Complete Run State
1136 */
prl_hr_wait_for_pe_hard_reset_complete_run(void * obj)1137 static void prl_hr_wait_for_pe_hard_reset_complete_run(void *obj)
1138 {
1139 struct protocol_hard_reset_t *prl_hr = (struct protocol_hard_reset_t *)obj;
1140 const struct device *dev = prl_hr->dev;
1141
1142 /* Wait for Hard Reset complete indication from Policy Engine */
1143 if (atomic_test_bit(&prl_hr->flags, PRL_FLAGS_HARD_RESET_COMPLETE)) {
1144 prl_hr_set_state(dev, PRL_HR_WAIT_FOR_REQUEST);
1145 }
1146 }
1147
prl_hr_suspend_entry(void * obj)1148 static void prl_hr_suspend_entry(void *obj)
1149 {
1150 LOG_INF("PRL_HR_SUSPEND");
1151 }
1152
prl_hr_suspend_run(void * obj)1153 static void prl_hr_suspend_run(void *obj)
1154 {
1155 /* Do nothing */
1156 }
1157
1158 /**
1159 * @brief This function implements both the Protocol Layer Message Reception
1160 * State Machine. See Figure 6-55 Protocol layer Message reception
1161 *
1162 * The states of the two state machines can be identified by the
1163 * comments preceded by a NOTE: <state name>
1164 */
prl_rx_wait_for_phy_message(const struct device * dev)1165 static void prl_rx_wait_for_phy_message(const struct device *dev)
1166 {
1167 struct usbc_port_data *data = dev->data;
1168 struct protocol_layer_rx_t *prl_rx = data->prl_rx;
1169 struct protocol_layer_tx_t *prl_tx = data->prl_tx;
1170 struct pd_msg *rx_emsg = &prl_rx->emsg;
1171 const struct device *tcpc = data->tcpc;
1172 uint8_t msg_type;
1173 uint8_t pkt_type;
1174 uint8_t ext;
1175 int8_t msid;
1176 uint8_t num_data_objs;
1177 uint8_t power_role;
1178
1179 /* Get the message */
1180 if (tcpc_get_rx_pending_msg(tcpc, rx_emsg) <= 0) {
1181 /* No pending message or problem getting the message */
1182 return;
1183 }
1184
1185 num_data_objs = rx_emsg->header.number_of_data_objects;
1186 msid = rx_emsg->header.message_id;
1187 msg_type = rx_emsg->header.message_type;
1188 ext = rx_emsg->header.extended;
1189 pkt_type = rx_emsg->type;
1190 power_role = rx_emsg->header.port_power_role;
1191
1192 /* Dump the received packet content, except for Pings */
1193 if (msg_type != PD_CTRL_PING) {
1194 int p;
1195
1196 LOG_INF("RECV %04x/%d ", rx_emsg->header.raw_value, num_data_objs);
1197 for (p = 0; p < num_data_objs; p++) {
1198 LOG_INF("\t[%d]%08x ", p, *((uint32_t *)rx_emsg->data + p));
1199 }
1200 }
1201
1202 /* Ignore messages sent to the cable from our port partner */
1203 if (pkt_type != PD_PACKET_SOP && power_role == PD_PLUG_FROM_DFP_UFP) {
1204 return;
1205 }
1206
1207 /* Soft Reset Message received from PHY */
1208 if (num_data_objs == 0 && msg_type == PD_CTRL_SOFT_RESET) {
1209 /* NOTE: PRL_Rx_Layer_Reset_for_Receive State embedded here */
1210
1211 /* Reset MessageIdCounter */
1212 prl_tx->msg_id_counter[pkt_type] = 0;
1213
1214 /* Clear stored MessageID value */
1215 prl_rx->msg_id[pkt_type] = -1;
1216
1217 /*
1218 * Protocol Layer message transmission transitions to
1219 * PRL_Tx_PHY_Layer_Reset state
1220 */
1221 prl_tx_set_state(dev, PRL_TX_PHY_LAYER_RESET);
1222
1223 /*
1224 * Inform Policy Engine of Soft Reset. Note perform this after
1225 * performing the protocol layer reset, otherwise we will lose
1226 * the PE's outgoing ACCEPT message to the soft reset.
1227 */
1228 pe_got_soft_reset(dev);
1229 return;
1230 }
1231
1232 /* Ignore if this is a duplicate message. Stop processing */
1233 if (prl_rx->msg_id[pkt_type] == msid) {
1234 return;
1235 }
1236
1237 /*
1238 * Discard any pending TX message if this RX message is from SOP,
1239 * except for ping messages.
1240 */
1241
1242 /* Check if message transmit is pending */
1243 if (atomic_test_bit(&prl_tx->flags, PRL_FLAGS_MSG_XMIT)) {
1244 /* Don't discard message if a PING was received */
1245 if ((num_data_objs > 0) || (msg_type != PD_CTRL_PING)) {
1246 /* Only discard message if received from SOP */
1247 if (pkt_type == PD_PACKET_SOP) {
1248 atomic_set_bit(&prl_tx->flags, PRL_FLAGS_TX_DISCARDED);
1249 }
1250 }
1251 }
1252
1253 /* Store Message Id */
1254 prl_rx->msg_id[pkt_type] = msid;
1255
1256 /* Pass message to Policy Engine */
1257 pe_message_received(dev);
1258 }
1259
1260 /**
1261 * @brief Protocol Layer Transmit State table
1262 */
1263 static const struct smf_state prl_tx_states[PRL_TX_STATE_COUNT] = {
1264 [PRL_TX_PHY_LAYER_RESET] = SMF_CREATE_STATE(
1265 prl_tx_phy_layer_reset_entry,
1266 NULL,
1267 NULL,
1268 NULL,
1269 NULL),
1270 [PRL_TX_WAIT_FOR_MESSAGE_REQUEST] = SMF_CREATE_STATE(
1271 prl_tx_wait_for_message_request_entry,
1272 prl_tx_wait_for_message_request_run,
1273 NULL,
1274 NULL,
1275 NULL),
1276 [PRL_TX_LAYER_RESET_FOR_TRANSMIT] = SMF_CREATE_STATE(
1277 prl_tx_layer_reset_for_transmit_entry,
1278 NULL,
1279 NULL,
1280 NULL,
1281 NULL),
1282 [PRL_TX_WAIT_FOR_PHY_RESPONSE] = SMF_CREATE_STATE(
1283 prl_tx_wait_for_phy_response_entry,
1284 prl_tx_wait_for_phy_response_run,
1285 prl_tx_wait_for_phy_response_exit,
1286 NULL,
1287 NULL),
1288 [PRL_TX_SUSPEND] = SMF_CREATE_STATE(
1289 prl_tx_suspend_entry,
1290 prl_tx_suspend_run,
1291 NULL,
1292 NULL,
1293 NULL),
1294 #ifdef CONFIG_USBC_CSM_SINK_ONLY
1295 [PRL_TX_SNK_START_AMS] = SMF_CREATE_STATE(
1296 prl_tx_snk_start_ams_entry,
1297 prl_tx_snk_start_ams_run,
1298 NULL,
1299 NULL,
1300 NULL),
1301 [PRL_TX_SNK_PENDING] = SMF_CREATE_STATE(
1302 prl_tx_snk_pending_entry,
1303 prl_tx_snk_pending_run,
1304 NULL,
1305 NULL,
1306 NULL),
1307 #endif
1308 #ifdef CONFIG_USBC_CSM_SOURCE_ONLY
1309 [PRL_TX_SRC_SOURCE_TX] = SMF_CREATE_STATE(
1310 prl_tx_src_source_tx_entry,
1311 prl_tx_src_source_tx_run,
1312 NULL,
1313 NULL,
1314 NULL),
1315 [PRL_TX_SRC_PENDING] = SMF_CREATE_STATE(
1316 prl_tx_src_pending_entry,
1317 prl_tx_src_pending_run,
1318 prl_tx_src_pending_exit,
1319 NULL,
1320 NULL),
1321 #endif
1322 };
1323 BUILD_ASSERT(ARRAY_SIZE(prl_tx_states) == PRL_TX_STATE_COUNT);
1324
1325 /**
1326 * @brief Protocol Layer Hard Reset State table
1327 */
1328 static const struct smf_state prl_hr_states[PRL_HR_STATE_COUNT] = {
1329 [PRL_HR_WAIT_FOR_REQUEST] = SMF_CREATE_STATE(
1330 prl_hr_wait_for_request_entry,
1331 prl_hr_wait_for_request_run,
1332 NULL,
1333 NULL,
1334 NULL),
1335 [PRL_HR_RESET_LAYER] = SMF_CREATE_STATE(
1336 prl_hr_reset_layer_entry,
1337 NULL,
1338 NULL,
1339 NULL,
1340 NULL),
1341 [PRL_HR_WAIT_FOR_PHY_HARD_RESET_COMPLETE] = SMF_CREATE_STATE(
1342 prl_hr_wait_for_phy_hard_reset_complete_entry,
1343 prl_hr_wait_for_phy_hard_reset_complete_run,
1344 prl_hr_wait_for_phy_hard_reset_complete_exit,
1345 NULL,
1346 NULL),
1347 [PRL_HR_WAIT_FOR_PE_HARD_RESET_COMPLETE] = SMF_CREATE_STATE(
1348 prl_hr_wait_for_pe_hard_reset_complete_entry,
1349 prl_hr_wait_for_pe_hard_reset_complete_run,
1350 NULL,
1351 NULL,
1352 NULL),
1353 [PRL_HR_SUSPEND] = SMF_CREATE_STATE(
1354 prl_hr_suspend_entry,
1355 prl_hr_suspend_run,
1356 NULL,
1357 NULL,
1358 NULL),
1359 };
1360 BUILD_ASSERT(ARRAY_SIZE(prl_hr_states) == PRL_HR_STATE_COUNT);
1361