1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright 2015-2017 Google, Inc
4  *
5  * USB Power Delivery protocol stack.
6  */
7 
8 #include <linux/completion.h>
9 #include <linux/debugfs.h>
10 #include <linux/device.h>
11 #include <linux/hrtimer.h>
12 #include <linux/jiffies.h>
13 #include <linux/kernel.h>
14 #include <linux/kthread.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/power_supply.h>
18 #include <linux/proc_fs.h>
19 #include <linux/property.h>
20 #include <linux/sched/clock.h>
21 #include <linux/seq_file.h>
22 #include <linux/slab.h>
23 #include <linux/spinlock.h>
24 #include <linux/usb.h>
25 #include <linux/usb/pd.h>
26 #include <linux/usb/pd_ado.h>
27 #include <linux/usb/pd_bdo.h>
28 #include <linux/usb/pd_ext_sdb.h>
29 #include <linux/usb/pd_vdo.h>
30 #include <linux/usb/role.h>
31 #include <linux/usb/tcpm.h>
32 #include <linux/usb/typec_altmode.h>
33 
34 #include <uapi/linux/sched/types.h>
35 
36 #define FOREACH_STATE(S)			\
37 	S(INVALID_STATE),			\
38 	S(TOGGLING),			\
39 	S(SRC_UNATTACHED),			\
40 	S(SRC_ATTACH_WAIT),			\
41 	S(SRC_ATTACHED),			\
42 	S(SRC_STARTUP),				\
43 	S(SRC_SEND_CAPABILITIES),		\
44 	S(SRC_SEND_CAPABILITIES_TIMEOUT),	\
45 	S(SRC_NEGOTIATE_CAPABILITIES),		\
46 	S(SRC_TRANSITION_SUPPLY),		\
47 	S(SRC_READY),				\
48 	S(SRC_WAIT_NEW_CAPABILITIES),		\
49 						\
50 	S(SNK_UNATTACHED),			\
51 	S(SNK_ATTACH_WAIT),			\
52 	S(SNK_DEBOUNCED),			\
53 	S(SNK_ATTACHED),			\
54 	S(SNK_STARTUP),				\
55 	S(SNK_DISCOVERY),			\
56 	S(SNK_DISCOVERY_DEBOUNCE),		\
57 	S(SNK_DISCOVERY_DEBOUNCE_DONE),		\
58 	S(SNK_WAIT_CAPABILITIES),		\
59 	S(SNK_NEGOTIATE_CAPABILITIES),		\
60 	S(SNK_NEGOTIATE_PPS_CAPABILITIES),	\
61 	S(SNK_TRANSITION_SINK),			\
62 	S(SNK_TRANSITION_SINK_VBUS),		\
63 	S(SNK_READY),				\
64 						\
65 	S(ACC_UNATTACHED),			\
66 	S(DEBUG_ACC_ATTACHED),			\
67 	S(AUDIO_ACC_ATTACHED),			\
68 	S(AUDIO_ACC_DEBOUNCE),			\
69 						\
70 	S(HARD_RESET_SEND),			\
71 	S(HARD_RESET_START),			\
72 	S(SRC_HARD_RESET_VBUS_OFF),		\
73 	S(SRC_HARD_RESET_VBUS_ON),		\
74 	S(SNK_HARD_RESET_SINK_OFF),		\
75 	S(SNK_HARD_RESET_WAIT_VBUS),		\
76 	S(SNK_HARD_RESET_SINK_ON),		\
77 						\
78 	S(SOFT_RESET),				\
79 	S(SOFT_RESET_SEND),			\
80 						\
81 	S(DR_SWAP_ACCEPT),			\
82 	S(DR_SWAP_SEND),			\
83 	S(DR_SWAP_SEND_TIMEOUT),		\
84 	S(DR_SWAP_CANCEL),			\
85 	S(DR_SWAP_CHANGE_DR),			\
86 						\
87 	S(PR_SWAP_ACCEPT),			\
88 	S(PR_SWAP_SEND),			\
89 	S(PR_SWAP_SEND_TIMEOUT),		\
90 	S(PR_SWAP_CANCEL),			\
91 	S(PR_SWAP_START),			\
92 	S(PR_SWAP_SRC_SNK_TRANSITION_OFF),	\
93 	S(PR_SWAP_SRC_SNK_SOURCE_OFF),		\
94 	S(PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED), \
95 	S(PR_SWAP_SRC_SNK_SINK_ON),		\
96 	S(PR_SWAP_SNK_SRC_SINK_OFF),		\
97 	S(PR_SWAP_SNK_SRC_SOURCE_ON),		\
98 	S(PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP),    \
99 						\
100 	S(VCONN_SWAP_ACCEPT),			\
101 	S(VCONN_SWAP_SEND),			\
102 	S(VCONN_SWAP_SEND_TIMEOUT),		\
103 	S(VCONN_SWAP_CANCEL),			\
104 	S(VCONN_SWAP_START),			\
105 	S(VCONN_SWAP_WAIT_FOR_VCONN),		\
106 	S(VCONN_SWAP_TURN_ON_VCONN),		\
107 	S(VCONN_SWAP_TURN_OFF_VCONN),		\
108 						\
109 	S(FR_SWAP_SEND),			\
110 	S(FR_SWAP_SEND_TIMEOUT),		\
111 	S(FR_SWAP_SNK_SRC_TRANSITION_TO_OFF),			\
112 	S(FR_SWAP_SNK_SRC_NEW_SINK_READY),		\
113 	S(FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED),	\
114 	S(FR_SWAP_CANCEL),			\
115 						\
116 	S(SNK_TRY),				\
117 	S(SNK_TRY_WAIT),			\
118 	S(SNK_TRY_WAIT_DEBOUNCE),               \
119 	S(SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS),    \
120 	S(SRC_TRYWAIT),				\
121 	S(SRC_TRYWAIT_DEBOUNCE),		\
122 	S(SRC_TRYWAIT_UNATTACHED),		\
123 						\
124 	S(SRC_TRY),				\
125 	S(SRC_TRY_WAIT),                        \
126 	S(SRC_TRY_DEBOUNCE),			\
127 	S(SNK_TRYWAIT),				\
128 	S(SNK_TRYWAIT_DEBOUNCE),		\
129 	S(SNK_TRYWAIT_VBUS),			\
130 	S(BIST_RX),				\
131 						\
132 	S(GET_STATUS_SEND),			\
133 	S(GET_STATUS_SEND_TIMEOUT),		\
134 	S(GET_PPS_STATUS_SEND),			\
135 	S(GET_PPS_STATUS_SEND_TIMEOUT),		\
136 						\
137 	S(GET_SINK_CAP),			\
138 	S(GET_SINK_CAP_TIMEOUT),		\
139 						\
140 	S(ERROR_RECOVERY),			\
141 	S(PORT_RESET),				\
142 	S(PORT_RESET_WAIT_OFF)
143 
144 #define GENERATE_ENUM(e)	e
145 #define GENERATE_STRING(s)	#s
146 
147 enum tcpm_state {
148 	FOREACH_STATE(GENERATE_ENUM)
149 };
150 
151 static const char * const tcpm_states[] = {
152 	FOREACH_STATE(GENERATE_STRING)
153 };
154 
155 enum vdm_states {
156 	VDM_STATE_ERR_BUSY = -3,
157 	VDM_STATE_ERR_SEND = -2,
158 	VDM_STATE_ERR_TMOUT = -1,
159 	VDM_STATE_DONE = 0,
160 	/* Anything >0 represents an active state */
161 	VDM_STATE_READY = 1,
162 	VDM_STATE_BUSY = 2,
163 	VDM_STATE_WAIT_RSP_BUSY = 3,
164 };
165 
166 enum pd_msg_request {
167 	PD_MSG_NONE = 0,
168 	PD_MSG_CTRL_REJECT,
169 	PD_MSG_CTRL_WAIT,
170 	PD_MSG_CTRL_NOT_SUPP,
171 	PD_MSG_DATA_SINK_CAP,
172 	PD_MSG_DATA_SOURCE_CAP,
173 };
174 
175 enum adev_actions {
176 	ADEV_NONE = 0,
177 	ADEV_NOTIFY_USB_AND_QUEUE_VDM,
178 	ADEV_QUEUE_VDM,
179 	ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL,
180 	ADEV_ATTENTION,
181 };
182 
183 /*
184  * Initial current capability of the new source when vSafe5V is applied during PD3.0 Fast Role Swap.
185  * Based on "Table 6-14 Fixed Supply PDO - Sink" of "USB Power Delivery Specification Revision 3.0,
186  * Version 1.2"
187  */
188 enum frs_typec_current {
189 	FRS_NOT_SUPPORTED,
190 	FRS_DEFAULT_POWER,
191 	FRS_5V_1P5A,
192 	FRS_5V_3A,
193 };
194 
195 /* Events from low level driver */
196 
197 #define TCPM_CC_EVENT		BIT(0)
198 #define TCPM_VBUS_EVENT		BIT(1)
199 #define TCPM_RESET_EVENT	BIT(2)
200 #define TCPM_FRS_EVENT		BIT(3)
201 #define TCPM_SOURCING_VBUS	BIT(4)
202 
203 #define LOG_BUFFER_ENTRIES	1024
204 #define LOG_BUFFER_ENTRY_SIZE	128
205 
206 /* Alternate mode support */
207 
208 #define SVID_DISCOVERY_MAX	16
209 #define ALTMODE_DISCOVERY_MAX	(SVID_DISCOVERY_MAX * MODE_DISCOVERY_MAX)
210 
211 #define GET_SINK_CAP_RETRY_MS	100
212 
213 struct pd_mode_data {
214 	int svid_index;		/* current SVID index		*/
215 	int nsvids;
216 	u16 svids[SVID_DISCOVERY_MAX];
217 	int altmodes;		/* number of alternate modes	*/
218 	struct typec_altmode_desc altmode_desc[ALTMODE_DISCOVERY_MAX];
219 };
220 
221 struct pd_pps_data {
222 	u32 min_volt;
223 	u32 max_volt;
224 	u32 max_curr;
225 	u32 out_volt;
226 	u32 op_curr;
227 	bool supported;
228 	bool active;
229 };
230 
231 struct tcpm_port {
232 	struct device *dev;
233 
234 	struct mutex lock;		/* tcpm state machine lock */
235 	struct kthread_worker *wq;
236 
237 	struct typec_capability typec_caps;
238 	struct typec_port *typec_port;
239 
240 	struct tcpc_dev	*tcpc;
241 	struct usb_role_switch *role_sw;
242 
243 	enum typec_role vconn_role;
244 	enum typec_role pwr_role;
245 	enum typec_data_role data_role;
246 	enum typec_pwr_opmode pwr_opmode;
247 
248 	struct usb_pd_identity partner_ident;
249 	struct typec_partner_desc partner_desc;
250 	struct typec_partner *partner;
251 
252 	enum typec_cc_status cc_req;
253 
254 	enum typec_cc_status cc1;
255 	enum typec_cc_status cc2;
256 	enum typec_cc_polarity polarity;
257 
258 	bool attached;
259 	bool connected;
260 	enum typec_port_type port_type;
261 	bool vbus_present;
262 	bool vbus_never_low;
263 	bool vbus_source;
264 	bool vbus_charge;
265 
266 	bool send_discover;
267 	bool op_vsafe5v;
268 
269 	int try_role;
270 	int try_snk_count;
271 	int try_src_count;
272 
273 	enum pd_msg_request queued_message;
274 
275 	enum tcpm_state enter_state;
276 	enum tcpm_state prev_state;
277 	enum tcpm_state state;
278 	enum tcpm_state delayed_state;
279 	ktime_t delayed_runtime;
280 	unsigned long delay_ms;
281 
282 	spinlock_t pd_event_lock;
283 	u32 pd_events;
284 
285 	struct kthread_work event_work;
286 	struct hrtimer state_machine_timer;
287 	struct kthread_work state_machine;
288 	struct hrtimer vdm_state_machine_timer;
289 	struct kthread_work vdm_state_machine;
290 	struct hrtimer enable_frs_timer;
291 	struct kthread_work enable_frs;
292 	bool state_machine_running;
293 
294 	struct completion tx_complete;
295 	enum tcpm_transmit_status tx_status;
296 
297 	struct mutex swap_lock;		/* swap command lock */
298 	bool swap_pending;
299 	bool non_pd_role_swap;
300 	struct completion swap_complete;
301 	int swap_status;
302 
303 	unsigned int negotiated_rev;
304 	unsigned int message_id;
305 	unsigned int caps_count;
306 	unsigned int hard_reset_count;
307 	bool pd_capable;
308 	bool explicit_contract;
309 	unsigned int rx_msgid;
310 
311 	/* Partner capabilities/requests */
312 	u32 sink_request;
313 	u32 source_caps[PDO_MAX_OBJECTS];
314 	unsigned int nr_source_caps;
315 	u32 sink_caps[PDO_MAX_OBJECTS];
316 	unsigned int nr_sink_caps;
317 
318 	/* Local capabilities */
319 	u32 src_pdo[PDO_MAX_OBJECTS];
320 	unsigned int nr_src_pdo;
321 	u32 snk_pdo[PDO_MAX_OBJECTS];
322 	unsigned int nr_snk_pdo;
323 	u32 snk_vdo[VDO_MAX_OBJECTS];
324 	unsigned int nr_snk_vdo;
325 
326 	unsigned int operating_snk_mw;
327 	bool update_sink_caps;
328 
329 	/* Requested current / voltage */
330 	u32 current_limit;
331 	u32 supply_voltage;
332 
333 	/* Used to export TA voltage and current */
334 	struct power_supply *psy;
335 	struct power_supply_desc psy_desc;
336 	enum power_supply_usb_type usb_type;
337 
338 	u32 bist_request;
339 
340 	/* PD state for Vendor Defined Messages */
341 	enum vdm_states vdm_state;
342 	u32 vdm_retries;
343 	/* next Vendor Defined Message to send */
344 	u32 vdo_data[VDO_MAX_SIZE];
345 	u8 vdo_count;
346 	/* VDO to retry if UFP responder replied busy */
347 	u32 vdo_retry;
348 
349 	/* PPS */
350 	struct pd_pps_data pps_data;
351 	struct completion pps_complete;
352 	bool pps_pending;
353 	int pps_status;
354 
355 	/* Alternate mode data */
356 	struct pd_mode_data mode_data;
357 	struct typec_altmode *partner_altmode[ALTMODE_DISCOVERY_MAX];
358 	struct typec_altmode *port_altmode[ALTMODE_DISCOVERY_MAX];
359 
360 	/* Deadline in jiffies to exit src_try_wait state */
361 	unsigned long max_wait;
362 
363 	/* port belongs to a self powered device */
364 	bool self_powered;
365 
366 	/* FRS */
367 	enum frs_typec_current frs_current;
368 
369 	/* Sink caps have been queried */
370 	bool sink_cap_done;
371 
372 #ifdef CONFIG_DEBUG_FS
373 	struct dentry *dentry;
374 	struct mutex logbuffer_lock;	/* log buffer access lock */
375 	int logbuffer_head;
376 	int logbuffer_tail;
377 	u8 *logbuffer[LOG_BUFFER_ENTRIES];
378 #endif
379 };
380 
381 struct pd_rx_event {
382 	struct kthread_work work;
383 	struct tcpm_port *port;
384 	struct pd_message msg;
385 };
386 
387 #define tcpm_cc_is_sink(cc) \
388 	((cc) == TYPEC_CC_RP_DEF || (cc) == TYPEC_CC_RP_1_5 || \
389 	 (cc) == TYPEC_CC_RP_3_0)
390 
391 #define tcpm_port_is_sink(port) \
392 	((tcpm_cc_is_sink((port)->cc1) && !tcpm_cc_is_sink((port)->cc2)) || \
393 	 (tcpm_cc_is_sink((port)->cc2) && !tcpm_cc_is_sink((port)->cc1)))
394 
395 #define tcpm_cc_is_source(cc) ((cc) == TYPEC_CC_RD)
396 #define tcpm_cc_is_audio(cc) ((cc) == TYPEC_CC_RA)
397 #define tcpm_cc_is_open(cc) ((cc) == TYPEC_CC_OPEN)
398 
399 #define tcpm_port_is_source(port) \
400 	((tcpm_cc_is_source((port)->cc1) && \
401 	 !tcpm_cc_is_source((port)->cc2)) || \
402 	 (tcpm_cc_is_source((port)->cc2) && \
403 	  !tcpm_cc_is_source((port)->cc1)))
404 
405 #define tcpm_port_is_debug(port) \
406 	(tcpm_cc_is_source((port)->cc1) && tcpm_cc_is_source((port)->cc2))
407 
408 #define tcpm_port_is_audio(port) \
409 	(tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_audio((port)->cc2))
410 
411 #define tcpm_port_is_audio_detached(port) \
412 	((tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_open((port)->cc2)) || \
413 	 (tcpm_cc_is_audio((port)->cc2) && tcpm_cc_is_open((port)->cc1)))
414 
415 #define tcpm_try_snk(port) \
416 	((port)->try_snk_count == 0 && (port)->try_role == TYPEC_SINK && \
417 	(port)->port_type == TYPEC_PORT_DRP)
418 
419 #define tcpm_try_src(port) \
420 	((port)->try_src_count == 0 && (port)->try_role == TYPEC_SOURCE && \
421 	(port)->port_type == TYPEC_PORT_DRP)
422 
423 #define tcpm_data_role_for_source(port) \
424 	((port)->typec_caps.data == TYPEC_PORT_UFP ? \
425 	TYPEC_DEVICE : TYPEC_HOST)
426 
427 #define tcpm_data_role_for_sink(port) \
428 	((port)->typec_caps.data == TYPEC_PORT_DFP ? \
429 	TYPEC_HOST : TYPEC_DEVICE)
430 
tcpm_default_state(struct tcpm_port * port)431 static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
432 {
433 	if (port->port_type == TYPEC_PORT_DRP) {
434 		if (port->try_role == TYPEC_SINK)
435 			return SNK_UNATTACHED;
436 		else if (port->try_role == TYPEC_SOURCE)
437 			return SRC_UNATTACHED;
438 		/* Fall through to return SRC_UNATTACHED */
439 	} else if (port->port_type == TYPEC_PORT_SNK) {
440 		return SNK_UNATTACHED;
441 	}
442 	return SRC_UNATTACHED;
443 }
444 
tcpm_port_is_disconnected(struct tcpm_port * port)445 static bool tcpm_port_is_disconnected(struct tcpm_port *port)
446 {
447 	return (!port->attached && port->cc1 == TYPEC_CC_OPEN &&
448 		port->cc2 == TYPEC_CC_OPEN) ||
449 	       (port->attached && ((port->polarity == TYPEC_POLARITY_CC1 &&
450 				    port->cc1 == TYPEC_CC_OPEN) ||
451 				   (port->polarity == TYPEC_POLARITY_CC2 &&
452 				    port->cc2 == TYPEC_CC_OPEN)));
453 }
454 
455 /*
456  * Logging
457  */
458 
459 #ifdef CONFIG_DEBUG_FS
460 
tcpm_log_full(struct tcpm_port * port)461 static bool tcpm_log_full(struct tcpm_port *port)
462 {
463 	return port->logbuffer_tail ==
464 		(port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
465 }
466 
467 __printf(2, 0)
_tcpm_log(struct tcpm_port * port,const char * fmt,va_list args)468 static void _tcpm_log(struct tcpm_port *port, const char *fmt, va_list args)
469 {
470 	char tmpbuffer[LOG_BUFFER_ENTRY_SIZE];
471 	u64 ts_nsec = local_clock();
472 	unsigned long rem_nsec;
473 
474 	mutex_lock(&port->logbuffer_lock);
475 	if (!port->logbuffer[port->logbuffer_head]) {
476 		port->logbuffer[port->logbuffer_head] =
477 				kzalloc(LOG_BUFFER_ENTRY_SIZE, GFP_KERNEL);
478 		if (!port->logbuffer[port->logbuffer_head]) {
479 			mutex_unlock(&port->logbuffer_lock);
480 			return;
481 		}
482 	}
483 
484 	vsnprintf(tmpbuffer, sizeof(tmpbuffer), fmt, args);
485 
486 	if (tcpm_log_full(port)) {
487 		port->logbuffer_head = max(port->logbuffer_head - 1, 0);
488 		strcpy(tmpbuffer, "overflow");
489 	}
490 
491 	if (port->logbuffer_head < 0 ||
492 	    port->logbuffer_head >= LOG_BUFFER_ENTRIES) {
493 		dev_warn(port->dev,
494 			 "Bad log buffer index %d\n", port->logbuffer_head);
495 		goto abort;
496 	}
497 
498 	if (!port->logbuffer[port->logbuffer_head]) {
499 		dev_warn(port->dev,
500 			 "Log buffer index %d is NULL\n", port->logbuffer_head);
501 		goto abort;
502 	}
503 
504 	rem_nsec = do_div(ts_nsec, 1000000000);
505 	scnprintf(port->logbuffer[port->logbuffer_head],
506 		  LOG_BUFFER_ENTRY_SIZE, "[%5lu.%06lu] %s",
507 		  (unsigned long)ts_nsec, rem_nsec / 1000,
508 		  tmpbuffer);
509 	port->logbuffer_head = (port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
510 
511 abort:
512 	mutex_unlock(&port->logbuffer_lock);
513 }
514 
515 __printf(2, 3)
tcpm_log(struct tcpm_port * port,const char * fmt,...)516 static void tcpm_log(struct tcpm_port *port, const char *fmt, ...)
517 {
518 	va_list args;
519 
520 	/* Do not log while disconnected and unattached */
521 	if (tcpm_port_is_disconnected(port) &&
522 	    (port->state == SRC_UNATTACHED || port->state == SNK_UNATTACHED ||
523 	     port->state == TOGGLING))
524 		return;
525 
526 	va_start(args, fmt);
527 	_tcpm_log(port, fmt, args);
528 	va_end(args);
529 }
530 
531 __printf(2, 3)
tcpm_log_force(struct tcpm_port * port,const char * fmt,...)532 static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...)
533 {
534 	va_list args;
535 
536 	va_start(args, fmt);
537 	_tcpm_log(port, fmt, args);
538 	va_end(args);
539 }
540 
tcpm_log_source_caps(struct tcpm_port * port)541 static void tcpm_log_source_caps(struct tcpm_port *port)
542 {
543 	int i;
544 
545 	for (i = 0; i < port->nr_source_caps; i++) {
546 		u32 pdo = port->source_caps[i];
547 		enum pd_pdo_type type = pdo_type(pdo);
548 		char msg[64];
549 
550 		switch (type) {
551 		case PDO_TYPE_FIXED:
552 			scnprintf(msg, sizeof(msg),
553 				  "%u mV, %u mA [%s%s%s%s%s%s]",
554 				  pdo_fixed_voltage(pdo),
555 				  pdo_max_current(pdo),
556 				  (pdo & PDO_FIXED_DUAL_ROLE) ?
557 							"R" : "",
558 				  (pdo & PDO_FIXED_SUSPEND) ?
559 							"S" : "",
560 				  (pdo & PDO_FIXED_HIGHER_CAP) ?
561 							"H" : "",
562 				  (pdo & PDO_FIXED_USB_COMM) ?
563 							"U" : "",
564 				  (pdo & PDO_FIXED_DATA_SWAP) ?
565 							"D" : "",
566 				  (pdo & PDO_FIXED_EXTPOWER) ?
567 							"E" : "");
568 			break;
569 		case PDO_TYPE_VAR:
570 			scnprintf(msg, sizeof(msg),
571 				  "%u-%u mV, %u mA",
572 				  pdo_min_voltage(pdo),
573 				  pdo_max_voltage(pdo),
574 				  pdo_max_current(pdo));
575 			break;
576 		case PDO_TYPE_BATT:
577 			scnprintf(msg, sizeof(msg),
578 				  "%u-%u mV, %u mW",
579 				  pdo_min_voltage(pdo),
580 				  pdo_max_voltage(pdo),
581 				  pdo_max_power(pdo));
582 			break;
583 		case PDO_TYPE_APDO:
584 			if (pdo_apdo_type(pdo) == APDO_TYPE_PPS)
585 				scnprintf(msg, sizeof(msg),
586 					  "%u-%u mV, %u mA",
587 					  pdo_pps_apdo_min_voltage(pdo),
588 					  pdo_pps_apdo_max_voltage(pdo),
589 					  pdo_pps_apdo_max_current(pdo));
590 			else
591 				strcpy(msg, "undefined APDO");
592 			break;
593 		default:
594 			strcpy(msg, "undefined");
595 			break;
596 		}
597 		tcpm_log(port, " PDO %d: type %d, %s",
598 			 i, type, msg);
599 	}
600 }
601 
tcpm_debug_show(struct seq_file * s,void * v)602 static int tcpm_debug_show(struct seq_file *s, void *v)
603 {
604 	struct tcpm_port *port = (struct tcpm_port *)s->private;
605 	int tail;
606 
607 	mutex_lock(&port->logbuffer_lock);
608 	tail = port->logbuffer_tail;
609 	while (tail != port->logbuffer_head) {
610 		seq_printf(s, "%s\n", port->logbuffer[tail]);
611 		tail = (tail + 1) % LOG_BUFFER_ENTRIES;
612 	}
613 	if (!seq_has_overflowed(s))
614 		port->logbuffer_tail = tail;
615 	mutex_unlock(&port->logbuffer_lock);
616 
617 	return 0;
618 }
619 DEFINE_SHOW_ATTRIBUTE(tcpm_debug);
620 
tcpm_debugfs_init(struct tcpm_port * port)621 static void tcpm_debugfs_init(struct tcpm_port *port)
622 {
623 	char name[NAME_MAX];
624 
625 	mutex_init(&port->logbuffer_lock);
626 	snprintf(name, NAME_MAX, "tcpm-%s", dev_name(port->dev));
627 	port->dentry = debugfs_create_file(name, S_IFREG | 0444, usb_debug_root,
628 					   port, &tcpm_debug_fops);
629 }
630 
tcpm_debugfs_exit(struct tcpm_port * port)631 static void tcpm_debugfs_exit(struct tcpm_port *port)
632 {
633 	int i;
634 
635 	mutex_lock(&port->logbuffer_lock);
636 	for (i = 0; i < LOG_BUFFER_ENTRIES; i++) {
637 		kfree(port->logbuffer[i]);
638 		port->logbuffer[i] = NULL;
639 	}
640 	mutex_unlock(&port->logbuffer_lock);
641 
642 	debugfs_remove(port->dentry);
643 }
644 
645 #else
646 
647 __printf(2, 3)
tcpm_log(const struct tcpm_port * port,const char * fmt,...)648 static void tcpm_log(const struct tcpm_port *port, const char *fmt, ...) { }
649 __printf(2, 3)
tcpm_log_force(struct tcpm_port * port,const char * fmt,...)650 static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...) { }
tcpm_log_source_caps(struct tcpm_port * port)651 static void tcpm_log_source_caps(struct tcpm_port *port) { }
tcpm_debugfs_init(const struct tcpm_port * port)652 static void tcpm_debugfs_init(const struct tcpm_port *port) { }
tcpm_debugfs_exit(const struct tcpm_port * port)653 static void tcpm_debugfs_exit(const struct tcpm_port *port) { }
654 
655 #endif
656 
tcpm_pd_transmit(struct tcpm_port * port,enum tcpm_transmit_type type,const struct pd_message * msg)657 static int tcpm_pd_transmit(struct tcpm_port *port,
658 			    enum tcpm_transmit_type type,
659 			    const struct pd_message *msg)
660 {
661 	unsigned long timeout;
662 	int ret;
663 
664 	if (msg)
665 		tcpm_log(port, "PD TX, header: %#x", le16_to_cpu(msg->header));
666 	else
667 		tcpm_log(port, "PD TX, type: %#x", type);
668 
669 	reinit_completion(&port->tx_complete);
670 	ret = port->tcpc->pd_transmit(port->tcpc, type, msg);
671 	if (ret < 0)
672 		return ret;
673 
674 	mutex_unlock(&port->lock);
675 	timeout = wait_for_completion_timeout(&port->tx_complete,
676 				msecs_to_jiffies(PD_T_TCPC_TX_TIMEOUT));
677 	mutex_lock(&port->lock);
678 	if (!timeout)
679 		return -ETIMEDOUT;
680 
681 	switch (port->tx_status) {
682 	case TCPC_TX_SUCCESS:
683 		port->message_id = (port->message_id + 1) & PD_HEADER_ID_MASK;
684 		return 0;
685 	case TCPC_TX_DISCARDED:
686 		return -EAGAIN;
687 	case TCPC_TX_FAILED:
688 	default:
689 		return -EIO;
690 	}
691 }
692 
tcpm_pd_transmit_complete(struct tcpm_port * port,enum tcpm_transmit_status status)693 void tcpm_pd_transmit_complete(struct tcpm_port *port,
694 			       enum tcpm_transmit_status status)
695 {
696 	tcpm_log(port, "PD TX complete, status: %u", status);
697 	port->tx_status = status;
698 	complete(&port->tx_complete);
699 }
700 EXPORT_SYMBOL_GPL(tcpm_pd_transmit_complete);
701 
tcpm_mux_set(struct tcpm_port * port,int state,enum usb_role usb_role,enum typec_orientation orientation)702 static int tcpm_mux_set(struct tcpm_port *port, int state,
703 			enum usb_role usb_role,
704 			enum typec_orientation orientation)
705 {
706 	int ret;
707 
708 	tcpm_log(port, "Requesting mux state %d, usb-role %d, orientation %d",
709 		 state, usb_role, orientation);
710 
711 	ret = typec_set_orientation(port->typec_port, orientation);
712 	if (ret)
713 		return ret;
714 
715 	if (port->role_sw) {
716 		ret = usb_role_switch_set_role(port->role_sw, usb_role);
717 		if (ret)
718 			return ret;
719 	}
720 
721 	return typec_set_mode(port->typec_port, state);
722 }
723 
tcpm_set_polarity(struct tcpm_port * port,enum typec_cc_polarity polarity)724 static int tcpm_set_polarity(struct tcpm_port *port,
725 			     enum typec_cc_polarity polarity)
726 {
727 	int ret;
728 
729 	tcpm_log(port, "polarity %d", polarity);
730 
731 	ret = port->tcpc->set_polarity(port->tcpc, polarity);
732 	if (ret < 0)
733 		return ret;
734 
735 	port->polarity = polarity;
736 
737 	return 0;
738 }
739 
tcpm_set_vconn(struct tcpm_port * port,bool enable)740 static int tcpm_set_vconn(struct tcpm_port *port, bool enable)
741 {
742 	int ret;
743 
744 	tcpm_log(port, "vconn:=%d", enable);
745 
746 	ret = port->tcpc->set_vconn(port->tcpc, enable);
747 	if (!ret) {
748 		port->vconn_role = enable ? TYPEC_SOURCE : TYPEC_SINK;
749 		typec_set_vconn_role(port->typec_port, port->vconn_role);
750 	}
751 
752 	return ret;
753 }
754 
tcpm_get_current_limit(struct tcpm_port * port)755 static u32 tcpm_get_current_limit(struct tcpm_port *port)
756 {
757 	enum typec_cc_status cc;
758 	u32 limit;
759 
760 	cc = port->polarity ? port->cc2 : port->cc1;
761 	switch (cc) {
762 	case TYPEC_CC_RP_1_5:
763 		limit = 1500;
764 		break;
765 	case TYPEC_CC_RP_3_0:
766 		limit = 3000;
767 		break;
768 	case TYPEC_CC_RP_DEF:
769 	default:
770 		if (port->tcpc->get_current_limit)
771 			limit = port->tcpc->get_current_limit(port->tcpc);
772 		else
773 			limit = 0;
774 		break;
775 	}
776 
777 	return limit;
778 }
779 
tcpm_set_current_limit(struct tcpm_port * port,u32 max_ma,u32 mv)780 static int tcpm_set_current_limit(struct tcpm_port *port, u32 max_ma, u32 mv)
781 {
782 	int ret = -EOPNOTSUPP;
783 
784 	tcpm_log(port, "Setting voltage/current limit %u mV %u mA", mv, max_ma);
785 
786 	port->supply_voltage = mv;
787 	port->current_limit = max_ma;
788 
789 	if (port->tcpc->set_current_limit)
790 		ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv);
791 
792 	return ret;
793 }
794 
795 /*
796  * Determine RP value to set based on maximum current supported
797  * by a port if configured as source.
798  * Returns CC value to report to link partner.
799  */
tcpm_rp_cc(struct tcpm_port * port)800 static enum typec_cc_status tcpm_rp_cc(struct tcpm_port *port)
801 {
802 	const u32 *src_pdo = port->src_pdo;
803 	int nr_pdo = port->nr_src_pdo;
804 	int i;
805 
806 	/*
807 	 * Search for first entry with matching voltage.
808 	 * It should report the maximum supported current.
809 	 */
810 	for (i = 0; i < nr_pdo; i++) {
811 		const u32 pdo = src_pdo[i];
812 
813 		if (pdo_type(pdo) == PDO_TYPE_FIXED &&
814 		    pdo_fixed_voltage(pdo) == 5000) {
815 			unsigned int curr = pdo_max_current(pdo);
816 
817 			if (curr >= 3000)
818 				return TYPEC_CC_RP_3_0;
819 			else if (curr >= 1500)
820 				return TYPEC_CC_RP_1_5;
821 			return TYPEC_CC_RP_DEF;
822 		}
823 	}
824 
825 	return TYPEC_CC_RP_DEF;
826 }
827 
tcpm_set_attached_state(struct tcpm_port * port,bool attached)828 static int tcpm_set_attached_state(struct tcpm_port *port, bool attached)
829 {
830 	return port->tcpc->set_roles(port->tcpc, attached, port->pwr_role,
831 				     port->data_role);
832 }
833 
tcpm_set_roles(struct tcpm_port * port,bool attached,enum typec_role role,enum typec_data_role data)834 static int tcpm_set_roles(struct tcpm_port *port, bool attached,
835 			  enum typec_role role, enum typec_data_role data)
836 {
837 	enum typec_orientation orientation;
838 	enum usb_role usb_role;
839 	int ret;
840 
841 	if (port->polarity == TYPEC_POLARITY_CC1)
842 		orientation = TYPEC_ORIENTATION_NORMAL;
843 	else
844 		orientation = TYPEC_ORIENTATION_REVERSE;
845 
846 	if (port->typec_caps.data == TYPEC_PORT_DRD) {
847 		if (data == TYPEC_HOST)
848 			usb_role = USB_ROLE_HOST;
849 		else
850 			usb_role = USB_ROLE_DEVICE;
851 	} else if (port->typec_caps.data == TYPEC_PORT_DFP) {
852 		if (data == TYPEC_HOST) {
853 			if (role == TYPEC_SOURCE)
854 				usb_role = USB_ROLE_HOST;
855 			else
856 				usb_role = USB_ROLE_NONE;
857 		} else {
858 			return -ENOTSUPP;
859 		}
860 	} else {
861 		if (data == TYPEC_DEVICE) {
862 			if (role == TYPEC_SINK)
863 				usb_role = USB_ROLE_DEVICE;
864 			else
865 				usb_role = USB_ROLE_NONE;
866 		} else {
867 			return -ENOTSUPP;
868 		}
869 	}
870 
871 	ret = tcpm_mux_set(port, TYPEC_STATE_USB, usb_role, orientation);
872 	if (ret < 0)
873 		return ret;
874 
875 	ret = port->tcpc->set_roles(port->tcpc, attached, role, data);
876 	if (ret < 0)
877 		return ret;
878 
879 	port->pwr_role = role;
880 	port->data_role = data;
881 	typec_set_data_role(port->typec_port, data);
882 	typec_set_pwr_role(port->typec_port, role);
883 
884 	return 0;
885 }
886 
tcpm_set_pwr_role(struct tcpm_port * port,enum typec_role role)887 static int tcpm_set_pwr_role(struct tcpm_port *port, enum typec_role role)
888 {
889 	int ret;
890 
891 	ret = port->tcpc->set_roles(port->tcpc, true, role,
892 				    port->data_role);
893 	if (ret < 0)
894 		return ret;
895 
896 	port->pwr_role = role;
897 	typec_set_pwr_role(port->typec_port, role);
898 
899 	return 0;
900 }
901 
tcpm_pd_send_source_caps(struct tcpm_port * port)902 static int tcpm_pd_send_source_caps(struct tcpm_port *port)
903 {
904 	struct pd_message msg;
905 	int i;
906 
907 	memset(&msg, 0, sizeof(msg));
908 	if (!port->nr_src_pdo) {
909 		/* No source capabilities defined, sink only */
910 		msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
911 					  port->pwr_role,
912 					  port->data_role,
913 					  port->negotiated_rev,
914 					  port->message_id, 0);
915 	} else {
916 		msg.header = PD_HEADER_LE(PD_DATA_SOURCE_CAP,
917 					  port->pwr_role,
918 					  port->data_role,
919 					  port->negotiated_rev,
920 					  port->message_id,
921 					  port->nr_src_pdo);
922 	}
923 	for (i = 0; i < port->nr_src_pdo; i++)
924 		msg.payload[i] = cpu_to_le32(port->src_pdo[i]);
925 
926 	return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
927 }
928 
tcpm_pd_send_sink_caps(struct tcpm_port * port)929 static int tcpm_pd_send_sink_caps(struct tcpm_port *port)
930 {
931 	struct pd_message msg;
932 	int i;
933 
934 	memset(&msg, 0, sizeof(msg));
935 	if (!port->nr_snk_pdo) {
936 		/* No sink capabilities defined, source only */
937 		msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
938 					  port->pwr_role,
939 					  port->data_role,
940 					  port->negotiated_rev,
941 					  port->message_id, 0);
942 	} else {
943 		msg.header = PD_HEADER_LE(PD_DATA_SINK_CAP,
944 					  port->pwr_role,
945 					  port->data_role,
946 					  port->negotiated_rev,
947 					  port->message_id,
948 					  port->nr_snk_pdo);
949 	}
950 	for (i = 0; i < port->nr_snk_pdo; i++)
951 		msg.payload[i] = cpu_to_le32(port->snk_pdo[i]);
952 
953 	return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
954 }
955 
mod_tcpm_delayed_work(struct tcpm_port * port,unsigned int delay_ms)956 static void mod_tcpm_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
957 {
958 	if (delay_ms) {
959 		hrtimer_start(&port->state_machine_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
960 	} else {
961 		hrtimer_cancel(&port->state_machine_timer);
962 		kthread_queue_work(port->wq, &port->state_machine);
963 	}
964 }
965 
mod_vdm_delayed_work(struct tcpm_port * port,unsigned int delay_ms)966 static void mod_vdm_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
967 {
968 	if (delay_ms) {
969 		hrtimer_start(&port->vdm_state_machine_timer, ms_to_ktime(delay_ms),
970 			      HRTIMER_MODE_REL);
971 	} else {
972 		hrtimer_cancel(&port->vdm_state_machine_timer);
973 		kthread_queue_work(port->wq, &port->vdm_state_machine);
974 	}
975 }
976 
mod_enable_frs_delayed_work(struct tcpm_port * port,unsigned int delay_ms)977 static void mod_enable_frs_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
978 {
979 	if (delay_ms) {
980 		hrtimer_start(&port->enable_frs_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
981 	} else {
982 		hrtimer_cancel(&port->enable_frs_timer);
983 		kthread_queue_work(port->wq, &port->enable_frs);
984 	}
985 }
986 
tcpm_set_state(struct tcpm_port * port,enum tcpm_state state,unsigned int delay_ms)987 static void tcpm_set_state(struct tcpm_port *port, enum tcpm_state state,
988 			   unsigned int delay_ms)
989 {
990 	if (delay_ms) {
991 		tcpm_log(port, "pending state change %s -> %s @ %u ms",
992 			 tcpm_states[port->state], tcpm_states[state],
993 			 delay_ms);
994 		port->delayed_state = state;
995 		mod_tcpm_delayed_work(port, delay_ms);
996 		port->delayed_runtime = ktime_add(ktime_get(), ms_to_ktime(delay_ms));
997 		port->delay_ms = delay_ms;
998 	} else {
999 		tcpm_log(port, "state change %s -> %s",
1000 			 tcpm_states[port->state], tcpm_states[state]);
1001 		port->delayed_state = INVALID_STATE;
1002 		port->prev_state = port->state;
1003 		port->state = state;
1004 		/*
1005 		 * Don't re-queue the state machine work item if we're currently
1006 		 * in the state machine and we're immediately changing states.
1007 		 * tcpm_state_machine_work() will continue running the state
1008 		 * machine.
1009 		 */
1010 		if (!port->state_machine_running)
1011 			mod_tcpm_delayed_work(port, 0);
1012 	}
1013 }
1014 
tcpm_set_state_cond(struct tcpm_port * port,enum tcpm_state state,unsigned int delay_ms)1015 static void tcpm_set_state_cond(struct tcpm_port *port, enum tcpm_state state,
1016 				unsigned int delay_ms)
1017 {
1018 	if (port->enter_state == port->state)
1019 		tcpm_set_state(port, state, delay_ms);
1020 	else
1021 		tcpm_log(port,
1022 			 "skipped %sstate change %s -> %s [%u ms], context state %s",
1023 			 delay_ms ? "delayed " : "",
1024 			 tcpm_states[port->state], tcpm_states[state],
1025 			 delay_ms, tcpm_states[port->enter_state]);
1026 }
1027 
tcpm_queue_message(struct tcpm_port * port,enum pd_msg_request message)1028 static void tcpm_queue_message(struct tcpm_port *port,
1029 			       enum pd_msg_request message)
1030 {
1031 	port->queued_message = message;
1032 	mod_tcpm_delayed_work(port, 0);
1033 }
1034 
1035 /*
1036  * VDM/VDO handling functions
1037  */
tcpm_queue_vdm(struct tcpm_port * port,const u32 header,const u32 * data,int cnt)1038 static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
1039 			   const u32 *data, int cnt)
1040 {
1041 	WARN_ON(!mutex_is_locked(&port->lock));
1042 
1043 	/* Make sure we are not still processing a previous VDM packet */
1044 	WARN_ON(port->vdm_state > VDM_STATE_DONE);
1045 
1046 	port->vdo_count = cnt + 1;
1047 	port->vdo_data[0] = header;
1048 	memcpy(&port->vdo_data[1], data, sizeof(u32) * cnt);
1049 	/* Set ready, vdm state machine will actually send */
1050 	port->vdm_retries = 0;
1051 	port->vdm_state = VDM_STATE_READY;
1052 
1053 	mod_vdm_delayed_work(port, 0);
1054 }
1055 
tcpm_queue_vdm_unlocked(struct tcpm_port * port,const u32 header,const u32 * data,int cnt)1056 static void tcpm_queue_vdm_unlocked(struct tcpm_port *port, const u32 header,
1057 				    const u32 *data, int cnt)
1058 {
1059 	mutex_lock(&port->lock);
1060 	tcpm_queue_vdm(port, header, data, cnt);
1061 	mutex_unlock(&port->lock);
1062 }
1063 
svdm_consume_identity(struct tcpm_port * port,const u32 * p,int cnt)1064 static void svdm_consume_identity(struct tcpm_port *port, const u32 *p, int cnt)
1065 {
1066 	u32 vdo = p[VDO_INDEX_IDH];
1067 	u32 product = p[VDO_INDEX_PRODUCT];
1068 
1069 	memset(&port->mode_data, 0, sizeof(port->mode_data));
1070 
1071 	port->partner_ident.id_header = vdo;
1072 	port->partner_ident.cert_stat = p[VDO_INDEX_CSTAT];
1073 	port->partner_ident.product = product;
1074 
1075 	typec_partner_set_identity(port->partner);
1076 
1077 	tcpm_log(port, "Identity: %04x:%04x.%04x",
1078 		 PD_IDH_VID(vdo),
1079 		 PD_PRODUCT_PID(product), product & 0xffff);
1080 }
1081 
svdm_consume_svids(struct tcpm_port * port,const u32 * p,int cnt)1082 static bool svdm_consume_svids(struct tcpm_port *port, const u32 *p, int cnt)
1083 {
1084 	struct pd_mode_data *pmdata = &port->mode_data;
1085 	int i;
1086 
1087 	for (i = 1; i < cnt; i++) {
1088 		u16 svid;
1089 
1090 		svid = (p[i] >> 16) & 0xffff;
1091 		if (!svid)
1092 			return false;
1093 
1094 		if (pmdata->nsvids >= SVID_DISCOVERY_MAX)
1095 			goto abort;
1096 
1097 		pmdata->svids[pmdata->nsvids++] = svid;
1098 		tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
1099 
1100 		svid = p[i] & 0xffff;
1101 		if (!svid)
1102 			return false;
1103 
1104 		if (pmdata->nsvids >= SVID_DISCOVERY_MAX)
1105 			goto abort;
1106 
1107 		pmdata->svids[pmdata->nsvids++] = svid;
1108 		tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
1109 	}
1110 	return true;
1111 abort:
1112 	tcpm_log(port, "SVID_DISCOVERY_MAX(%d) too low!", SVID_DISCOVERY_MAX);
1113 	return false;
1114 }
1115 
svdm_consume_modes(struct tcpm_port * port,const u32 * p,int cnt)1116 static void svdm_consume_modes(struct tcpm_port *port, const u32 *p, int cnt)
1117 {
1118 	struct pd_mode_data *pmdata = &port->mode_data;
1119 	struct typec_altmode_desc *paltmode;
1120 	int i;
1121 
1122 	if (pmdata->altmodes >= ARRAY_SIZE(port->partner_altmode)) {
1123 		/* Already logged in svdm_consume_svids() */
1124 		return;
1125 	}
1126 
1127 	for (i = 1; i < cnt; i++) {
1128 		paltmode = &pmdata->altmode_desc[pmdata->altmodes];
1129 		memset(paltmode, 0, sizeof(*paltmode));
1130 
1131 		paltmode->svid = pmdata->svids[pmdata->svid_index];
1132 		paltmode->mode = i;
1133 		paltmode->vdo = p[i];
1134 
1135 		tcpm_log(port, " Alternate mode %d: SVID 0x%04x, VDO %d: 0x%08x",
1136 			 pmdata->altmodes, paltmode->svid,
1137 			 paltmode->mode, paltmode->vdo);
1138 
1139 		pmdata->altmodes++;
1140 	}
1141 }
1142 
tcpm_register_partner_altmodes(struct tcpm_port * port)1143 static void tcpm_register_partner_altmodes(struct tcpm_port *port)
1144 {
1145 	struct pd_mode_data *modep = &port->mode_data;
1146 	struct typec_altmode *altmode;
1147 	int i;
1148 
1149 	for (i = 0; i < modep->altmodes; i++) {
1150 		altmode = typec_partner_register_altmode(port->partner,
1151 						&modep->altmode_desc[i]);
1152 		if (IS_ERR(altmode)) {
1153 			tcpm_log(port, "Failed to register partner SVID 0x%04x",
1154 				 modep->altmode_desc[i].svid);
1155 			altmode = NULL;
1156 		}
1157 		port->partner_altmode[i] = altmode;
1158 	}
1159 }
1160 
1161 #define supports_modal(port)	PD_IDH_MODAL_SUPP((port)->partner_ident.id_header)
1162 
tcpm_pd_svdm(struct tcpm_port * port,struct typec_altmode * adev,const u32 * p,int cnt,u32 * response,enum adev_actions * adev_action)1163 static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
1164 			const u32 *p, int cnt, u32 *response,
1165 			enum adev_actions *adev_action)
1166 {
1167 	struct typec_altmode *pdev;
1168 	struct pd_mode_data *modep;
1169 	int rlen = 0;
1170 	int cmd_type;
1171 	int cmd;
1172 	int i;
1173 
1174 	cmd_type = PD_VDO_CMDT(p[0]);
1175 	cmd = PD_VDO_CMD(p[0]);
1176 
1177 	tcpm_log(port, "Rx VDM cmd 0x%x type %d cmd %d len %d",
1178 		 p[0], cmd_type, cmd, cnt);
1179 
1180 	modep = &port->mode_data;
1181 
1182 	pdev = typec_match_altmode(port->partner_altmode, ALTMODE_DISCOVERY_MAX,
1183 				   PD_VDO_VID(p[0]), PD_VDO_OPOS(p[0]));
1184 
1185 	switch (cmd_type) {
1186 	case CMDT_INIT:
1187 		switch (cmd) {
1188 		case CMD_DISCOVER_IDENT:
1189 			/* 6.4.4.3.1: Only respond as UFP (device) */
1190 			if (port->data_role == TYPEC_DEVICE &&
1191 			    port->nr_snk_vdo) {
1192 				for (i = 0; i <  port->nr_snk_vdo; i++)
1193 					response[i + 1] = port->snk_vdo[i];
1194 				rlen = port->nr_snk_vdo + 1;
1195 			}
1196 			break;
1197 		case CMD_DISCOVER_SVID:
1198 			break;
1199 		case CMD_DISCOVER_MODES:
1200 			break;
1201 		case CMD_ENTER_MODE:
1202 			break;
1203 		case CMD_EXIT_MODE:
1204 			break;
1205 		case CMD_ATTENTION:
1206 			/* Attention command does not have response */
1207 			*adev_action = ADEV_ATTENTION;
1208 			return 0;
1209 		default:
1210 			break;
1211 		}
1212 		if (rlen >= 1) {
1213 			response[0] = p[0] | VDO_CMDT(CMDT_RSP_ACK);
1214 		} else if (rlen == 0) {
1215 			response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
1216 			rlen = 1;
1217 		} else {
1218 			response[0] = p[0] | VDO_CMDT(CMDT_RSP_BUSY);
1219 			rlen = 1;
1220 		}
1221 		break;
1222 	case CMDT_RSP_ACK:
1223 		/* silently drop message if we are not connected */
1224 		if (IS_ERR_OR_NULL(port->partner))
1225 			break;
1226 
1227 		switch (cmd) {
1228 		case CMD_DISCOVER_IDENT:
1229 			/* 6.4.4.3.1 */
1230 			svdm_consume_identity(port, p, cnt);
1231 			response[0] = VDO(USB_SID_PD, 1, CMD_DISCOVER_SVID);
1232 			rlen = 1;
1233 			break;
1234 		case CMD_DISCOVER_SVID:
1235 			/* 6.4.4.3.2 */
1236 			if (svdm_consume_svids(port, p, cnt)) {
1237 				response[0] = VDO(USB_SID_PD, 1,
1238 						  CMD_DISCOVER_SVID);
1239 				rlen = 1;
1240 			} else if (modep->nsvids && supports_modal(port)) {
1241 				response[0] = VDO(modep->svids[0], 1,
1242 						  CMD_DISCOVER_MODES);
1243 				rlen = 1;
1244 			}
1245 			break;
1246 		case CMD_DISCOVER_MODES:
1247 			/* 6.4.4.3.3 */
1248 			svdm_consume_modes(port, p, cnt);
1249 			modep->svid_index++;
1250 			if (modep->svid_index < modep->nsvids) {
1251 				u16 svid = modep->svids[modep->svid_index];
1252 				response[0] = VDO(svid, 1, CMD_DISCOVER_MODES);
1253 				rlen = 1;
1254 			} else {
1255 				tcpm_register_partner_altmodes(port);
1256 			}
1257 			break;
1258 		case CMD_ENTER_MODE:
1259 			if (adev && pdev) {
1260 				typec_altmode_update_active(pdev, true);
1261 				*adev_action = ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL;
1262 			}
1263 			return 0;
1264 		case CMD_EXIT_MODE:
1265 			if (adev && pdev) {
1266 				typec_altmode_update_active(pdev, false);
1267 				/* Back to USB Operation */
1268 				*adev_action = ADEV_NOTIFY_USB_AND_QUEUE_VDM;
1269 				return 0;
1270 			}
1271 			break;
1272 		default:
1273 			break;
1274 		}
1275 		break;
1276 	case CMDT_RSP_NAK:
1277 		switch (cmd) {
1278 		case CMD_ENTER_MODE:
1279 			/* Back to USB Operation */
1280 			*adev_action = ADEV_NOTIFY_USB_AND_QUEUE_VDM;
1281 			return 0;
1282 		default:
1283 			break;
1284 		}
1285 		break;
1286 	default:
1287 		break;
1288 	}
1289 
1290 	/* Informing the alternate mode drivers about everything */
1291 	*adev_action = ADEV_QUEUE_VDM;
1292 	return rlen;
1293 }
1294 
tcpm_handle_vdm_request(struct tcpm_port * port,const __le32 * payload,int cnt)1295 static void tcpm_handle_vdm_request(struct tcpm_port *port,
1296 				    const __le32 *payload, int cnt)
1297 {
1298 	enum adev_actions adev_action = ADEV_NONE;
1299 	struct typec_altmode *adev;
1300 	u32 p[PD_MAX_PAYLOAD];
1301 	u32 response[8] = { };
1302 	int i, rlen = 0;
1303 
1304 	for (i = 0; i < cnt; i++)
1305 		p[i] = le32_to_cpu(payload[i]);
1306 
1307 	adev = typec_match_altmode(port->port_altmode, ALTMODE_DISCOVERY_MAX,
1308 				   PD_VDO_VID(p[0]), PD_VDO_OPOS(p[0]));
1309 
1310 	if (port->vdm_state == VDM_STATE_BUSY) {
1311 		/* If UFP responded busy retry after timeout */
1312 		if (PD_VDO_CMDT(p[0]) == CMDT_RSP_BUSY) {
1313 			port->vdm_state = VDM_STATE_WAIT_RSP_BUSY;
1314 			port->vdo_retry = (p[0] & ~VDO_CMDT_MASK) |
1315 				CMDT_INIT;
1316 			mod_vdm_delayed_work(port, PD_T_VDM_BUSY);
1317 			return;
1318 		}
1319 		port->vdm_state = VDM_STATE_DONE;
1320 	}
1321 
1322 	if (PD_VDO_SVDM(p[0]))
1323 		rlen = tcpm_pd_svdm(port, adev, p, cnt, response, &adev_action);
1324 
1325 	/*
1326 	 * We are done with any state stored in the port struct now, except
1327 	 * for any port struct changes done by the tcpm_queue_vdm() call
1328 	 * below, which is a separate operation.
1329 	 *
1330 	 * So we can safely release the lock here; and we MUST release the
1331 	 * lock here to avoid an AB BA lock inversion:
1332 	 *
1333 	 * If we keep the lock here then the lock ordering in this path is:
1334 	 * 1. tcpm_pd_rx_handler take the tcpm port lock
1335 	 * 2. One of the typec_altmode_* calls below takes the alt-mode's lock
1336 	 *
1337 	 * And we also have this ordering:
1338 	 * 1. alt-mode driver takes the alt-mode's lock
1339 	 * 2. alt-mode driver calls tcpm_altmode_enter which takes the
1340 	 *    tcpm port lock
1341 	 *
1342 	 * Dropping our lock here avoids this.
1343 	 */
1344 	mutex_unlock(&port->lock);
1345 
1346 	if (adev) {
1347 		switch (adev_action) {
1348 		case ADEV_NONE:
1349 			break;
1350 		case ADEV_NOTIFY_USB_AND_QUEUE_VDM:
1351 			WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB, NULL));
1352 			typec_altmode_vdm(adev, p[0], &p[1], cnt);
1353 			break;
1354 		case ADEV_QUEUE_VDM:
1355 			typec_altmode_vdm(adev, p[0], &p[1], cnt);
1356 			break;
1357 		case ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL:
1358 			if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
1359 				response[0] = VDO(adev->svid, 1, CMD_EXIT_MODE);
1360 				response[0] |= VDO_OPOS(adev->mode);
1361 				rlen = 1;
1362 			}
1363 			break;
1364 		case ADEV_ATTENTION:
1365 			typec_altmode_attention(adev, p[1]);
1366 			break;
1367 		}
1368 	}
1369 
1370 	/*
1371 	 * We must re-take the lock here to balance the unlock in
1372 	 * tcpm_pd_rx_handler, note that no changes, other then the
1373 	 * tcpm_queue_vdm call, are made while the lock is held again.
1374 	 * All that is done after the call is unwinding the call stack until
1375 	 * we return to tcpm_pd_rx_handler and do the unlock there.
1376 	 */
1377 	mutex_lock(&port->lock);
1378 
1379 	if (rlen > 0)
1380 		tcpm_queue_vdm(port, response[0], &response[1], rlen - 1);
1381 }
1382 
tcpm_send_vdm(struct tcpm_port * port,u32 vid,int cmd,const u32 * data,int count)1383 static void tcpm_send_vdm(struct tcpm_port *port, u32 vid, int cmd,
1384 			  const u32 *data, int count)
1385 {
1386 	u32 header;
1387 
1388 	if (WARN_ON(count > VDO_MAX_SIZE - 1))
1389 		count = VDO_MAX_SIZE - 1;
1390 
1391 	/* set VDM header with VID & CMD */
1392 	header = VDO(vid, ((vid & USB_SID_PD) == USB_SID_PD) ?
1393 			1 : (PD_VDO_CMD(cmd) <= CMD_ATTENTION), cmd);
1394 	tcpm_queue_vdm(port, header, data, count);
1395 }
1396 
vdm_ready_timeout(u32 vdm_hdr)1397 static unsigned int vdm_ready_timeout(u32 vdm_hdr)
1398 {
1399 	unsigned int timeout;
1400 	int cmd = PD_VDO_CMD(vdm_hdr);
1401 
1402 	/* its not a structured VDM command */
1403 	if (!PD_VDO_SVDM(vdm_hdr))
1404 		return PD_T_VDM_UNSTRUCTURED;
1405 
1406 	switch (PD_VDO_CMDT(vdm_hdr)) {
1407 	case CMDT_INIT:
1408 		if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE)
1409 			timeout = PD_T_VDM_WAIT_MODE_E;
1410 		else
1411 			timeout = PD_T_VDM_SNDR_RSP;
1412 		break;
1413 	default:
1414 		if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE)
1415 			timeout = PD_T_VDM_E_MODE;
1416 		else
1417 			timeout = PD_T_VDM_RCVR_RSP;
1418 		break;
1419 	}
1420 	return timeout;
1421 }
1422 
vdm_run_state_machine(struct tcpm_port * port)1423 static void vdm_run_state_machine(struct tcpm_port *port)
1424 {
1425 	struct pd_message msg;
1426 	int i, res;
1427 
1428 	switch (port->vdm_state) {
1429 	case VDM_STATE_READY:
1430 		/* Only transmit VDM if attached */
1431 		if (!port->attached) {
1432 			port->vdm_state = VDM_STATE_ERR_BUSY;
1433 			break;
1434 		}
1435 
1436 		/*
1437 		 * if there's traffic or we're not in PDO ready state don't send
1438 		 * a VDM.
1439 		 */
1440 		if (port->state != SRC_READY && port->state != SNK_READY)
1441 			break;
1442 
1443 		/* Prepare and send VDM */
1444 		memset(&msg, 0, sizeof(msg));
1445 		msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF,
1446 					  port->pwr_role,
1447 					  port->data_role,
1448 					  port->negotiated_rev,
1449 					  port->message_id, port->vdo_count);
1450 		for (i = 0; i < port->vdo_count; i++)
1451 			msg.payload[i] = cpu_to_le32(port->vdo_data[i]);
1452 		res = tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1453 		if (res < 0) {
1454 			port->vdm_state = VDM_STATE_ERR_SEND;
1455 		} else {
1456 			unsigned long timeout;
1457 
1458 			port->vdm_retries = 0;
1459 			port->vdm_state = VDM_STATE_BUSY;
1460 			timeout = vdm_ready_timeout(port->vdo_data[0]);
1461 			mod_vdm_delayed_work(port, timeout);
1462 		}
1463 		break;
1464 	case VDM_STATE_WAIT_RSP_BUSY:
1465 		port->vdo_data[0] = port->vdo_retry;
1466 		port->vdo_count = 1;
1467 		port->vdm_state = VDM_STATE_READY;
1468 		break;
1469 	case VDM_STATE_BUSY:
1470 		port->vdm_state = VDM_STATE_ERR_TMOUT;
1471 		break;
1472 	case VDM_STATE_ERR_SEND:
1473 		/*
1474 		 * A partner which does not support USB PD will not reply,
1475 		 * so this is not a fatal error. At the same time, some
1476 		 * devices may not return GoodCRC under some circumstances,
1477 		 * so we need to retry.
1478 		 */
1479 		if (port->vdm_retries < 3) {
1480 			tcpm_log(port, "VDM Tx error, retry");
1481 			port->vdm_retries++;
1482 			port->vdm_state = VDM_STATE_READY;
1483 		}
1484 		break;
1485 	default:
1486 		break;
1487 	}
1488 }
1489 
vdm_state_machine_work(struct kthread_work * work)1490 static void vdm_state_machine_work(struct kthread_work *work)
1491 {
1492 	struct tcpm_port *port = container_of(work, struct tcpm_port, vdm_state_machine);
1493 	enum vdm_states prev_state;
1494 
1495 	mutex_lock(&port->lock);
1496 
1497 	/*
1498 	 * Continue running as long as the port is not busy and there was
1499 	 * a state change.
1500 	 */
1501 	do {
1502 		prev_state = port->vdm_state;
1503 		vdm_run_state_machine(port);
1504 	} while (port->vdm_state != prev_state &&
1505 		 port->vdm_state != VDM_STATE_BUSY);
1506 
1507 	mutex_unlock(&port->lock);
1508 }
1509 
1510 enum pdo_err {
1511 	PDO_NO_ERR,
1512 	PDO_ERR_NO_VSAFE5V,
1513 	PDO_ERR_VSAFE5V_NOT_FIRST,
1514 	PDO_ERR_PDO_TYPE_NOT_IN_ORDER,
1515 	PDO_ERR_FIXED_NOT_SORTED,
1516 	PDO_ERR_VARIABLE_BATT_NOT_SORTED,
1517 	PDO_ERR_DUPE_PDO,
1518 	PDO_ERR_PPS_APDO_NOT_SORTED,
1519 	PDO_ERR_DUPE_PPS_APDO,
1520 };
1521 
1522 static const char * const pdo_err_msg[] = {
1523 	[PDO_ERR_NO_VSAFE5V] =
1524 	" err: source/sink caps should atleast have vSafe5V",
1525 	[PDO_ERR_VSAFE5V_NOT_FIRST] =
1526 	" err: vSafe5V Fixed Supply Object Shall always be the first object",
1527 	[PDO_ERR_PDO_TYPE_NOT_IN_ORDER] =
1528 	" err: PDOs should be in the following order: Fixed; Battery; Variable",
1529 	[PDO_ERR_FIXED_NOT_SORTED] =
1530 	" err: Fixed supply pdos should be in increasing order of their fixed voltage",
1531 	[PDO_ERR_VARIABLE_BATT_NOT_SORTED] =
1532 	" err: Variable/Battery supply pdos should be in increasing order of their minimum voltage",
1533 	[PDO_ERR_DUPE_PDO] =
1534 	" err: Variable/Batt supply pdos cannot have same min/max voltage",
1535 	[PDO_ERR_PPS_APDO_NOT_SORTED] =
1536 	" err: Programmable power supply apdos should be in increasing order of their maximum voltage",
1537 	[PDO_ERR_DUPE_PPS_APDO] =
1538 	" err: Programmable power supply apdos cannot have same min/max voltage and max current",
1539 };
1540 
tcpm_caps_err(struct tcpm_port * port,const u32 * pdo,unsigned int nr_pdo)1541 static enum pdo_err tcpm_caps_err(struct tcpm_port *port, const u32 *pdo,
1542 				  unsigned int nr_pdo)
1543 {
1544 	unsigned int i;
1545 
1546 	/* Should at least contain vSafe5v */
1547 	if (nr_pdo < 1)
1548 		return PDO_ERR_NO_VSAFE5V;
1549 
1550 	/* The vSafe5V Fixed Supply Object Shall always be the first object */
1551 	if (pdo_type(pdo[0]) != PDO_TYPE_FIXED ||
1552 	    pdo_fixed_voltage(pdo[0]) != VSAFE5V)
1553 		return PDO_ERR_VSAFE5V_NOT_FIRST;
1554 
1555 	for (i = 1; i < nr_pdo; i++) {
1556 		if (pdo_type(pdo[i]) < pdo_type(pdo[i - 1])) {
1557 			return PDO_ERR_PDO_TYPE_NOT_IN_ORDER;
1558 		} else if (pdo_type(pdo[i]) == pdo_type(pdo[i - 1])) {
1559 			enum pd_pdo_type type = pdo_type(pdo[i]);
1560 
1561 			switch (type) {
1562 			/*
1563 			 * The remaining Fixed Supply Objects, if
1564 			 * present, shall be sent in voltage order;
1565 			 * lowest to highest.
1566 			 */
1567 			case PDO_TYPE_FIXED:
1568 				if (pdo_fixed_voltage(pdo[i]) <=
1569 				    pdo_fixed_voltage(pdo[i - 1]))
1570 					return PDO_ERR_FIXED_NOT_SORTED;
1571 				break;
1572 			/*
1573 			 * The Battery Supply Objects and Variable
1574 			 * supply, if present shall be sent in Minimum
1575 			 * Voltage order; lowest to highest.
1576 			 */
1577 			case PDO_TYPE_VAR:
1578 			case PDO_TYPE_BATT:
1579 				if (pdo_min_voltage(pdo[i]) <
1580 				    pdo_min_voltage(pdo[i - 1]))
1581 					return PDO_ERR_VARIABLE_BATT_NOT_SORTED;
1582 				else if ((pdo_min_voltage(pdo[i]) ==
1583 					  pdo_min_voltage(pdo[i - 1])) &&
1584 					 (pdo_max_voltage(pdo[i]) ==
1585 					  pdo_max_voltage(pdo[i - 1])))
1586 					return PDO_ERR_DUPE_PDO;
1587 				break;
1588 			/*
1589 			 * The Programmable Power Supply APDOs, if present,
1590 			 * shall be sent in Maximum Voltage order;
1591 			 * lowest to highest.
1592 			 */
1593 			case PDO_TYPE_APDO:
1594 				if (pdo_apdo_type(pdo[i]) != APDO_TYPE_PPS)
1595 					break;
1596 
1597 				if (pdo_pps_apdo_max_voltage(pdo[i]) <
1598 				    pdo_pps_apdo_max_voltage(pdo[i - 1]))
1599 					return PDO_ERR_PPS_APDO_NOT_SORTED;
1600 				else if (pdo_pps_apdo_min_voltage(pdo[i]) ==
1601 					  pdo_pps_apdo_min_voltage(pdo[i - 1]) &&
1602 					 pdo_pps_apdo_max_voltage(pdo[i]) ==
1603 					  pdo_pps_apdo_max_voltage(pdo[i - 1]) &&
1604 					 pdo_pps_apdo_max_current(pdo[i]) ==
1605 					  pdo_pps_apdo_max_current(pdo[i - 1]))
1606 					return PDO_ERR_DUPE_PPS_APDO;
1607 				break;
1608 			default:
1609 				tcpm_log_force(port, " Unknown pdo type");
1610 			}
1611 		}
1612 	}
1613 
1614 	return PDO_NO_ERR;
1615 }
1616 
tcpm_validate_caps(struct tcpm_port * port,const u32 * pdo,unsigned int nr_pdo)1617 static int tcpm_validate_caps(struct tcpm_port *port, const u32 *pdo,
1618 			      unsigned int nr_pdo)
1619 {
1620 	enum pdo_err err_index = tcpm_caps_err(port, pdo, nr_pdo);
1621 
1622 	if (err_index != PDO_NO_ERR) {
1623 		tcpm_log_force(port, " %s", pdo_err_msg[err_index]);
1624 		return -EINVAL;
1625 	}
1626 
1627 	return 0;
1628 }
1629 
tcpm_altmode_enter(struct typec_altmode * altmode,u32 * vdo)1630 static int tcpm_altmode_enter(struct typec_altmode *altmode, u32 *vdo)
1631 {
1632 	struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
1633 	u32 header;
1634 
1635 	header = VDO(altmode->svid, vdo ? 2 : 1, CMD_ENTER_MODE);
1636 	header |= VDO_OPOS(altmode->mode);
1637 
1638 	tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0);
1639 	return 0;
1640 }
1641 
tcpm_altmode_exit(struct typec_altmode * altmode)1642 static int tcpm_altmode_exit(struct typec_altmode *altmode)
1643 {
1644 	struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
1645 	u32 header;
1646 
1647 	header = VDO(altmode->svid, 1, CMD_EXIT_MODE);
1648 	header |= VDO_OPOS(altmode->mode);
1649 
1650 	tcpm_queue_vdm_unlocked(port, header, NULL, 0);
1651 	return 0;
1652 }
1653 
tcpm_altmode_vdm(struct typec_altmode * altmode,u32 header,const u32 * data,int count)1654 static int tcpm_altmode_vdm(struct typec_altmode *altmode,
1655 			    u32 header, const u32 *data, int count)
1656 {
1657 	struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
1658 
1659 	tcpm_queue_vdm_unlocked(port, header, data, count - 1);
1660 
1661 	return 0;
1662 }
1663 
1664 static const struct typec_altmode_ops tcpm_altmode_ops = {
1665 	.enter = tcpm_altmode_enter,
1666 	.exit = tcpm_altmode_exit,
1667 	.vdm = tcpm_altmode_vdm,
1668 };
1669 
1670 /*
1671  * PD (data, control) command handling functions
1672  */
ready_state(struct tcpm_port * port)1673 static inline enum tcpm_state ready_state(struct tcpm_port *port)
1674 {
1675 	if (port->pwr_role == TYPEC_SOURCE)
1676 		return SRC_READY;
1677 	else
1678 		return SNK_READY;
1679 }
1680 
1681 static int tcpm_pd_send_control(struct tcpm_port *port,
1682 				enum pd_ctrl_msg_type type);
1683 
tcpm_handle_alert(struct tcpm_port * port,const __le32 * payload,int cnt)1684 static void tcpm_handle_alert(struct tcpm_port *port, const __le32 *payload,
1685 			      int cnt)
1686 {
1687 	u32 p0 = le32_to_cpu(payload[0]);
1688 	unsigned int type = usb_pd_ado_type(p0);
1689 
1690 	if (!type) {
1691 		tcpm_log(port, "Alert message received with no type");
1692 		return;
1693 	}
1694 
1695 	/* Just handling non-battery alerts for now */
1696 	if (!(type & USB_PD_ADO_TYPE_BATT_STATUS_CHANGE)) {
1697 		switch (port->state) {
1698 		case SRC_READY:
1699 		case SNK_READY:
1700 			tcpm_set_state(port, GET_STATUS_SEND, 0);
1701 			break;
1702 		default:
1703 			tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
1704 			break;
1705 		}
1706 	}
1707 }
1708 
tcpm_pd_data_request(struct tcpm_port * port,const struct pd_message * msg)1709 static void tcpm_pd_data_request(struct tcpm_port *port,
1710 				 const struct pd_message *msg)
1711 {
1712 	enum pd_data_msg_type type = pd_header_type_le(msg->header);
1713 	unsigned int cnt = pd_header_cnt_le(msg->header);
1714 	unsigned int rev = pd_header_rev_le(msg->header);
1715 	unsigned int i;
1716 	enum frs_typec_current frs_current;
1717 	bool frs_enable;
1718 	int ret;
1719 
1720 	switch (type) {
1721 	case PD_DATA_SOURCE_CAP:
1722 		if (port->pwr_role != TYPEC_SINK)
1723 			break;
1724 
1725 		for (i = 0; i < cnt; i++)
1726 			port->source_caps[i] = le32_to_cpu(msg->payload[i]);
1727 
1728 		port->nr_source_caps = cnt;
1729 
1730 		tcpm_log_source_caps(port);
1731 
1732 		tcpm_validate_caps(port, port->source_caps,
1733 				   port->nr_source_caps);
1734 
1735 		/*
1736 		 * Adjust revision in subsequent message headers, as required,
1737 		 * to comply with 6.2.1.1.5 of the USB PD 3.0 spec. We don't
1738 		 * support Rev 1.0 so just do nothing in that scenario.
1739 		 */
1740 		if (rev == PD_REV10)
1741 			break;
1742 
1743 		if (rev < PD_MAX_REV)
1744 			port->negotiated_rev = rev;
1745 
1746 		/*
1747 		 * This message may be received even if VBUS is not
1748 		 * present. This is quite unexpected; see USB PD
1749 		 * specification, sections 8.3.3.6.3.1 and 8.3.3.6.3.2.
1750 		 * However, at the same time, we must be ready to
1751 		 * receive this message and respond to it 15ms after
1752 		 * receiving PS_RDY during power swap operations, no matter
1753 		 * if VBUS is available or not (USB PD specification,
1754 		 * section 6.5.9.2).
1755 		 * So we need to accept the message either way,
1756 		 * but be prepared to keep waiting for VBUS after it was
1757 		 * handled.
1758 		 */
1759 		tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
1760 		break;
1761 	case PD_DATA_REQUEST:
1762 		if (port->pwr_role != TYPEC_SOURCE ||
1763 		    cnt != 1) {
1764 			tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
1765 			break;
1766 		}
1767 
1768 		/*
1769 		 * Adjust revision in subsequent message headers, as required,
1770 		 * to comply with 6.2.1.1.5 of the USB PD 3.0 spec. We don't
1771 		 * support Rev 1.0 so just reject in that scenario.
1772 		 */
1773 		if (rev == PD_REV10) {
1774 			tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
1775 			break;
1776 		}
1777 
1778 		if (rev < PD_MAX_REV)
1779 			port->negotiated_rev = rev;
1780 
1781 		port->sink_request = le32_to_cpu(msg->payload[0]);
1782 		tcpm_set_state(port, SRC_NEGOTIATE_CAPABILITIES, 0);
1783 		break;
1784 	case PD_DATA_SINK_CAP:
1785 		/* We don't do anything with this at the moment... */
1786 		for (i = 0; i < cnt; i++)
1787 			port->sink_caps[i] = le32_to_cpu(msg->payload[i]);
1788 
1789 		frs_current = (port->sink_caps[0] & PDO_FIXED_FRS_CURR_MASK) >>
1790 			PDO_FIXED_FRS_CURR_SHIFT;
1791 		frs_enable = frs_current && (frs_current <= port->frs_current);
1792 		tcpm_log(port,
1793 			 "Port partner FRS capable partner_frs_current:%u port_frs_current:%u enable:%c",
1794 			 frs_current, port->frs_current, frs_enable ? 'y' : 'n');
1795 		if (frs_enable) {
1796 			ret  = port->tcpc->enable_frs(port->tcpc, true);
1797 			tcpm_log(port, "Enable FRS %s, ret:%d\n", ret ? "fail" : "success", ret);
1798 		}
1799 
1800 		port->nr_sink_caps = cnt;
1801 		port->sink_cap_done = true;
1802 		tcpm_set_state(port, SNK_READY, 0);
1803 		break;
1804 	case PD_DATA_VENDOR_DEF:
1805 		tcpm_handle_vdm_request(port, msg->payload, cnt);
1806 		break;
1807 	case PD_DATA_BIST:
1808 		if (port->state == SRC_READY || port->state == SNK_READY) {
1809 			port->bist_request = le32_to_cpu(msg->payload[0]);
1810 			tcpm_set_state(port, BIST_RX, 0);
1811 		}
1812 		break;
1813 	case PD_DATA_ALERT:
1814 		tcpm_handle_alert(port, msg->payload, cnt);
1815 		break;
1816 	case PD_DATA_BATT_STATUS:
1817 	case PD_DATA_GET_COUNTRY_INFO:
1818 		/* Currently unsupported */
1819 		tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
1820 		break;
1821 	default:
1822 		tcpm_log(port, "Unhandled data message type %#x", type);
1823 		break;
1824 	}
1825 }
1826 
tcpm_pps_complete(struct tcpm_port * port,int result)1827 static void tcpm_pps_complete(struct tcpm_port *port, int result)
1828 {
1829 	if (port->pps_pending) {
1830 		port->pps_status = result;
1831 		port->pps_pending = false;
1832 		complete(&port->pps_complete);
1833 	}
1834 }
1835 
tcpm_pd_ctrl_request(struct tcpm_port * port,const struct pd_message * msg)1836 static void tcpm_pd_ctrl_request(struct tcpm_port *port,
1837 				 const struct pd_message *msg)
1838 {
1839 	enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
1840 	enum tcpm_state next_state;
1841 
1842 	switch (type) {
1843 	case PD_CTRL_GOOD_CRC:
1844 	case PD_CTRL_PING:
1845 		break;
1846 	case PD_CTRL_GET_SOURCE_CAP:
1847 		switch (port->state) {
1848 		case SRC_READY:
1849 		case SNK_READY:
1850 			tcpm_queue_message(port, PD_MSG_DATA_SOURCE_CAP);
1851 			break;
1852 		default:
1853 			tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
1854 			break;
1855 		}
1856 		break;
1857 	case PD_CTRL_GET_SINK_CAP:
1858 		switch (port->state) {
1859 		case SRC_READY:
1860 		case SNK_READY:
1861 			tcpm_queue_message(port, PD_MSG_DATA_SINK_CAP);
1862 			break;
1863 		default:
1864 			tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
1865 			break;
1866 		}
1867 		break;
1868 	case PD_CTRL_GOTO_MIN:
1869 		break;
1870 	case PD_CTRL_PS_RDY:
1871 		switch (port->state) {
1872 		case SNK_TRANSITION_SINK:
1873 			if (port->vbus_present) {
1874 				tcpm_set_current_limit(port,
1875 						       port->current_limit,
1876 						       port->supply_voltage);
1877 				port->explicit_contract = true;
1878 				tcpm_set_state(port, SNK_READY, 0);
1879 			} else {
1880 				/*
1881 				 * Seen after power swap. Keep waiting for VBUS
1882 				 * in a transitional state.
1883 				 */
1884 				tcpm_set_state(port,
1885 					       SNK_TRANSITION_SINK_VBUS, 0);
1886 			}
1887 			break;
1888 		case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
1889 			tcpm_set_state(port, PR_SWAP_SRC_SNK_SINK_ON, 0);
1890 			break;
1891 		case PR_SWAP_SNK_SRC_SINK_OFF:
1892 			tcpm_set_state(port, PR_SWAP_SNK_SRC_SOURCE_ON, 0);
1893 			break;
1894 		case VCONN_SWAP_WAIT_FOR_VCONN:
1895 			tcpm_set_state(port, VCONN_SWAP_TURN_OFF_VCONN, 0);
1896 			break;
1897 		case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
1898 			tcpm_set_state(port, FR_SWAP_SNK_SRC_NEW_SINK_READY, 0);
1899 			break;
1900 		default:
1901 			break;
1902 		}
1903 		break;
1904 	case PD_CTRL_REJECT:
1905 	case PD_CTRL_WAIT:
1906 	case PD_CTRL_NOT_SUPP:
1907 		switch (port->state) {
1908 		case SNK_NEGOTIATE_CAPABILITIES:
1909 			/* USB PD specification, Figure 8-43 */
1910 			if (port->explicit_contract)
1911 				next_state = SNK_READY;
1912 			else
1913 				next_state = SNK_WAIT_CAPABILITIES;
1914 			tcpm_set_state(port, next_state, 0);
1915 			break;
1916 		case SNK_NEGOTIATE_PPS_CAPABILITIES:
1917 			/* Revert data back from any requested PPS updates */
1918 			port->pps_data.out_volt = port->supply_voltage;
1919 			port->pps_data.op_curr = port->current_limit;
1920 			port->pps_status = (type == PD_CTRL_WAIT ?
1921 					    -EAGAIN : -EOPNOTSUPP);
1922 			tcpm_set_state(port, SNK_READY, 0);
1923 			break;
1924 		case DR_SWAP_SEND:
1925 			port->swap_status = (type == PD_CTRL_WAIT ?
1926 					     -EAGAIN : -EOPNOTSUPP);
1927 			tcpm_set_state(port, DR_SWAP_CANCEL, 0);
1928 			break;
1929 		case PR_SWAP_SEND:
1930 			port->swap_status = (type == PD_CTRL_WAIT ?
1931 					     -EAGAIN : -EOPNOTSUPP);
1932 			tcpm_set_state(port, PR_SWAP_CANCEL, 0);
1933 			break;
1934 		case VCONN_SWAP_SEND:
1935 			port->swap_status = (type == PD_CTRL_WAIT ?
1936 					     -EAGAIN : -EOPNOTSUPP);
1937 			tcpm_set_state(port, VCONN_SWAP_CANCEL, 0);
1938 			break;
1939 		case FR_SWAP_SEND:
1940 			tcpm_set_state(port, FR_SWAP_CANCEL, 0);
1941 			break;
1942 		case GET_SINK_CAP:
1943 			port->sink_cap_done = true;
1944 			tcpm_set_state(port, ready_state(port), 0);
1945 			break;
1946 		default:
1947 			break;
1948 		}
1949 		break;
1950 	case PD_CTRL_ACCEPT:
1951 		switch (port->state) {
1952 		case SNK_NEGOTIATE_CAPABILITIES:
1953 			port->pps_data.active = false;
1954 			tcpm_set_state(port, SNK_TRANSITION_SINK, 0);
1955 			break;
1956 		case SNK_NEGOTIATE_PPS_CAPABILITIES:
1957 			port->pps_data.active = true;
1958 			port->supply_voltage = port->pps_data.out_volt;
1959 			port->current_limit = port->pps_data.op_curr;
1960 			tcpm_set_state(port, SNK_TRANSITION_SINK, 0);
1961 			break;
1962 		case SOFT_RESET_SEND:
1963 			port->message_id = 0;
1964 			port->rx_msgid = -1;
1965 			if (port->pwr_role == TYPEC_SOURCE)
1966 				next_state = SRC_SEND_CAPABILITIES;
1967 			else
1968 				next_state = SNK_WAIT_CAPABILITIES;
1969 			tcpm_set_state(port, next_state, 0);
1970 			break;
1971 		case DR_SWAP_SEND:
1972 			tcpm_set_state(port, DR_SWAP_CHANGE_DR, 0);
1973 			break;
1974 		case PR_SWAP_SEND:
1975 			tcpm_set_state(port, PR_SWAP_START, 0);
1976 			break;
1977 		case VCONN_SWAP_SEND:
1978 			tcpm_set_state(port, VCONN_SWAP_START, 0);
1979 			break;
1980 		case FR_SWAP_SEND:
1981 			tcpm_set_state(port, FR_SWAP_SNK_SRC_TRANSITION_TO_OFF, 0);
1982 			break;
1983 		default:
1984 			break;
1985 		}
1986 		break;
1987 	case PD_CTRL_SOFT_RESET:
1988 		tcpm_set_state(port, SOFT_RESET, 0);
1989 		break;
1990 	case PD_CTRL_DR_SWAP:
1991 		if (port->typec_caps.data != TYPEC_PORT_DRD) {
1992 			tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
1993 			break;
1994 		}
1995 		/*
1996 		 * XXX
1997 		 * 6.3.9: If an alternate mode is active, a request to swap
1998 		 * alternate modes shall trigger a port reset.
1999 		 */
2000 		switch (port->state) {
2001 		case SRC_READY:
2002 		case SNK_READY:
2003 			tcpm_set_state(port, DR_SWAP_ACCEPT, 0);
2004 			break;
2005 		default:
2006 			tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
2007 			break;
2008 		}
2009 		break;
2010 	case PD_CTRL_PR_SWAP:
2011 		if (port->port_type != TYPEC_PORT_DRP) {
2012 			tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
2013 			break;
2014 		}
2015 		switch (port->state) {
2016 		case SRC_READY:
2017 		case SNK_READY:
2018 			tcpm_set_state(port, PR_SWAP_ACCEPT, 0);
2019 			break;
2020 		default:
2021 			tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
2022 			break;
2023 		}
2024 		break;
2025 	case PD_CTRL_VCONN_SWAP:
2026 		switch (port->state) {
2027 		case SRC_READY:
2028 		case SNK_READY:
2029 			tcpm_set_state(port, VCONN_SWAP_ACCEPT, 0);
2030 			break;
2031 		default:
2032 			tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
2033 			break;
2034 		}
2035 		break;
2036 	case PD_CTRL_GET_SOURCE_CAP_EXT:
2037 	case PD_CTRL_GET_STATUS:
2038 	case PD_CTRL_FR_SWAP:
2039 	case PD_CTRL_GET_PPS_STATUS:
2040 	case PD_CTRL_GET_COUNTRY_CODES:
2041 		/* Currently not supported */
2042 		tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
2043 		break;
2044 	default:
2045 		tcpm_log(port, "Unhandled ctrl message type %#x", type);
2046 		break;
2047 	}
2048 }
2049 
tcpm_pd_ext_msg_request(struct tcpm_port * port,const struct pd_message * msg)2050 static void tcpm_pd_ext_msg_request(struct tcpm_port *port,
2051 				    const struct pd_message *msg)
2052 {
2053 	enum pd_ext_msg_type type = pd_header_type_le(msg->header);
2054 	unsigned int data_size = pd_ext_header_data_size_le(msg->ext_msg.header);
2055 
2056 	if (!(msg->ext_msg.header & PD_EXT_HDR_CHUNKED)) {
2057 		tcpm_log(port, "Unchunked extended messages unsupported");
2058 		return;
2059 	}
2060 
2061 	if (data_size > PD_EXT_MAX_CHUNK_DATA) {
2062 		tcpm_log(port, "Chunk handling not yet supported");
2063 		return;
2064 	}
2065 
2066 	switch (type) {
2067 	case PD_EXT_STATUS:
2068 		/*
2069 		 * If PPS related events raised then get PPS status to clear
2070 		 * (see USB PD 3.0 Spec, 6.5.2.4)
2071 		 */
2072 		if (msg->ext_msg.data[USB_PD_EXT_SDB_EVENT_FLAGS] &
2073 		    USB_PD_EXT_SDB_PPS_EVENTS)
2074 			tcpm_set_state(port, GET_PPS_STATUS_SEND, 0);
2075 		else
2076 			tcpm_set_state(port, ready_state(port), 0);
2077 		break;
2078 	case PD_EXT_PPS_STATUS:
2079 		/*
2080 		 * For now the PPS status message is used to clear events
2081 		 * and nothing more.
2082 		 */
2083 		tcpm_set_state(port, ready_state(port), 0);
2084 		break;
2085 	case PD_EXT_SOURCE_CAP_EXT:
2086 	case PD_EXT_GET_BATT_CAP:
2087 	case PD_EXT_GET_BATT_STATUS:
2088 	case PD_EXT_BATT_CAP:
2089 	case PD_EXT_GET_MANUFACTURER_INFO:
2090 	case PD_EXT_MANUFACTURER_INFO:
2091 	case PD_EXT_SECURITY_REQUEST:
2092 	case PD_EXT_SECURITY_RESPONSE:
2093 	case PD_EXT_FW_UPDATE_REQUEST:
2094 	case PD_EXT_FW_UPDATE_RESPONSE:
2095 	case PD_EXT_COUNTRY_INFO:
2096 	case PD_EXT_COUNTRY_CODES:
2097 		tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
2098 		break;
2099 	default:
2100 		tcpm_log(port, "Unhandled extended message type %#x", type);
2101 		break;
2102 	}
2103 }
2104 
tcpm_pd_rx_handler(struct kthread_work * work)2105 static void tcpm_pd_rx_handler(struct kthread_work *work)
2106 {
2107 	struct pd_rx_event *event = container_of(work,
2108 						 struct pd_rx_event, work);
2109 	const struct pd_message *msg = &event->msg;
2110 	unsigned int cnt = pd_header_cnt_le(msg->header);
2111 	struct tcpm_port *port = event->port;
2112 
2113 	mutex_lock(&port->lock);
2114 
2115 	tcpm_log(port, "PD RX, header: %#x [%d]", le16_to_cpu(msg->header),
2116 		 port->attached);
2117 
2118 	if (port->attached) {
2119 		enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
2120 		unsigned int msgid = pd_header_msgid_le(msg->header);
2121 
2122 		/*
2123 		 * USB PD standard, 6.6.1.2:
2124 		 * "... if MessageID value in a received Message is the
2125 		 * same as the stored value, the receiver shall return a
2126 		 * GoodCRC Message with that MessageID value and drop
2127 		 * the Message (this is a retry of an already received
2128 		 * Message). Note: this shall not apply to the Soft_Reset
2129 		 * Message which always has a MessageID value of zero."
2130 		 */
2131 		if (msgid == port->rx_msgid && type != PD_CTRL_SOFT_RESET)
2132 			goto done;
2133 		port->rx_msgid = msgid;
2134 
2135 		/*
2136 		 * If both ends believe to be DFP/host, we have a data role
2137 		 * mismatch.
2138 		 */
2139 		if (!!(le16_to_cpu(msg->header) & PD_HEADER_DATA_ROLE) ==
2140 		    (port->data_role == TYPEC_HOST)) {
2141 			tcpm_log(port,
2142 				 "Data role mismatch, initiating error recovery");
2143 			tcpm_set_state(port, ERROR_RECOVERY, 0);
2144 		} else {
2145 			if (msg->header & PD_HEADER_EXT_HDR)
2146 				tcpm_pd_ext_msg_request(port, msg);
2147 			else if (cnt)
2148 				tcpm_pd_data_request(port, msg);
2149 			else
2150 				tcpm_pd_ctrl_request(port, msg);
2151 		}
2152 	}
2153 
2154 done:
2155 	mutex_unlock(&port->lock);
2156 	kfree(event);
2157 }
2158 
tcpm_pd_receive(struct tcpm_port * port,const struct pd_message * msg)2159 void tcpm_pd_receive(struct tcpm_port *port, const struct pd_message *msg)
2160 {
2161 	struct pd_rx_event *event;
2162 
2163 	event = kzalloc(sizeof(*event), GFP_ATOMIC);
2164 	if (!event)
2165 		return;
2166 
2167 	kthread_init_work(&event->work, tcpm_pd_rx_handler);
2168 	event->port = port;
2169 	memcpy(&event->msg, msg, sizeof(*msg));
2170 	kthread_queue_work(port->wq, &event->work);
2171 }
2172 EXPORT_SYMBOL_GPL(tcpm_pd_receive);
2173 
tcpm_pd_send_control(struct tcpm_port * port,enum pd_ctrl_msg_type type)2174 static int tcpm_pd_send_control(struct tcpm_port *port,
2175 				enum pd_ctrl_msg_type type)
2176 {
2177 	struct pd_message msg;
2178 
2179 	memset(&msg, 0, sizeof(msg));
2180 	msg.header = PD_HEADER_LE(type, port->pwr_role,
2181 				  port->data_role,
2182 				  port->negotiated_rev,
2183 				  port->message_id, 0);
2184 
2185 	return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
2186 }
2187 
2188 /*
2189  * Send queued message without affecting state.
2190  * Return true if state machine should go back to sleep,
2191  * false otherwise.
2192  */
tcpm_send_queued_message(struct tcpm_port * port)2193 static bool tcpm_send_queued_message(struct tcpm_port *port)
2194 {
2195 	enum pd_msg_request queued_message;
2196 
2197 	do {
2198 		queued_message = port->queued_message;
2199 		port->queued_message = PD_MSG_NONE;
2200 
2201 		switch (queued_message) {
2202 		case PD_MSG_CTRL_WAIT:
2203 			tcpm_pd_send_control(port, PD_CTRL_WAIT);
2204 			break;
2205 		case PD_MSG_CTRL_REJECT:
2206 			tcpm_pd_send_control(port, PD_CTRL_REJECT);
2207 			break;
2208 		case PD_MSG_CTRL_NOT_SUPP:
2209 			tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP);
2210 			break;
2211 		case PD_MSG_DATA_SINK_CAP:
2212 			tcpm_pd_send_sink_caps(port);
2213 			break;
2214 		case PD_MSG_DATA_SOURCE_CAP:
2215 			tcpm_pd_send_source_caps(port);
2216 			break;
2217 		default:
2218 			break;
2219 		}
2220 	} while (port->queued_message != PD_MSG_NONE);
2221 
2222 	if (port->delayed_state != INVALID_STATE) {
2223 		if (ktime_after(port->delayed_runtime, ktime_get())) {
2224 			mod_tcpm_delayed_work(port, ktime_to_ms(ktime_sub(port->delayed_runtime,
2225 									  ktime_get())));
2226 			return true;
2227 		}
2228 		port->delayed_state = INVALID_STATE;
2229 	}
2230 	return false;
2231 }
2232 
tcpm_pd_check_request(struct tcpm_port * port)2233 static int tcpm_pd_check_request(struct tcpm_port *port)
2234 {
2235 	u32 pdo, rdo = port->sink_request;
2236 	unsigned int max, op, pdo_max, index;
2237 	enum pd_pdo_type type;
2238 
2239 	index = rdo_index(rdo);
2240 	if (!index || index > port->nr_src_pdo)
2241 		return -EINVAL;
2242 
2243 	pdo = port->src_pdo[index - 1];
2244 	type = pdo_type(pdo);
2245 	switch (type) {
2246 	case PDO_TYPE_FIXED:
2247 	case PDO_TYPE_VAR:
2248 		max = rdo_max_current(rdo);
2249 		op = rdo_op_current(rdo);
2250 		pdo_max = pdo_max_current(pdo);
2251 
2252 		if (op > pdo_max)
2253 			return -EINVAL;
2254 		if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH))
2255 			return -EINVAL;
2256 
2257 		if (type == PDO_TYPE_FIXED)
2258 			tcpm_log(port,
2259 				 "Requested %u mV, %u mA for %u / %u mA",
2260 				 pdo_fixed_voltage(pdo), pdo_max, op, max);
2261 		else
2262 			tcpm_log(port,
2263 				 "Requested %u -> %u mV, %u mA for %u / %u mA",
2264 				 pdo_min_voltage(pdo), pdo_max_voltage(pdo),
2265 				 pdo_max, op, max);
2266 		break;
2267 	case PDO_TYPE_BATT:
2268 		max = rdo_max_power(rdo);
2269 		op = rdo_op_power(rdo);
2270 		pdo_max = pdo_max_power(pdo);
2271 
2272 		if (op > pdo_max)
2273 			return -EINVAL;
2274 		if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH))
2275 			return -EINVAL;
2276 		tcpm_log(port,
2277 			 "Requested %u -> %u mV, %u mW for %u / %u mW",
2278 			 pdo_min_voltage(pdo), pdo_max_voltage(pdo),
2279 			 pdo_max, op, max);
2280 		break;
2281 	default:
2282 		return -EINVAL;
2283 	}
2284 
2285 	port->op_vsafe5v = index == 1;
2286 
2287 	return 0;
2288 }
2289 
2290 #define min_power(x, y) min(pdo_max_power(x), pdo_max_power(y))
2291 #define min_current(x, y) min(pdo_max_current(x), pdo_max_current(y))
2292 
tcpm_pd_select_pdo(struct tcpm_port * port,int * sink_pdo,int * src_pdo)2293 static int tcpm_pd_select_pdo(struct tcpm_port *port, int *sink_pdo,
2294 			      int *src_pdo)
2295 {
2296 	unsigned int i, j, max_src_mv = 0, min_src_mv = 0, max_mw = 0,
2297 		     max_mv = 0, src_mw = 0, src_ma = 0, max_snk_mv = 0,
2298 		     min_snk_mv = 0;
2299 	int ret = -EINVAL;
2300 
2301 	port->pps_data.supported = false;
2302 	port->usb_type = POWER_SUPPLY_USB_TYPE_PD;
2303 
2304 	/*
2305 	 * Select the source PDO providing the most power which has a
2306 	 * matchig sink cap.
2307 	 */
2308 	for (i = 0; i < port->nr_source_caps; i++) {
2309 		u32 pdo = port->source_caps[i];
2310 		enum pd_pdo_type type = pdo_type(pdo);
2311 
2312 		switch (type) {
2313 		case PDO_TYPE_FIXED:
2314 			max_src_mv = pdo_fixed_voltage(pdo);
2315 			min_src_mv = max_src_mv;
2316 			break;
2317 		case PDO_TYPE_BATT:
2318 		case PDO_TYPE_VAR:
2319 			max_src_mv = pdo_max_voltage(pdo);
2320 			min_src_mv = pdo_min_voltage(pdo);
2321 			break;
2322 		case PDO_TYPE_APDO:
2323 			if (pdo_apdo_type(pdo) == APDO_TYPE_PPS) {
2324 				port->pps_data.supported = true;
2325 				port->usb_type =
2326 					POWER_SUPPLY_USB_TYPE_PD_PPS;
2327 			}
2328 			continue;
2329 		default:
2330 			tcpm_log(port, "Invalid source PDO type, ignoring");
2331 			continue;
2332 		}
2333 
2334 		switch (type) {
2335 		case PDO_TYPE_FIXED:
2336 		case PDO_TYPE_VAR:
2337 			src_ma = pdo_max_current(pdo);
2338 			src_mw = src_ma * min_src_mv / 1000;
2339 			break;
2340 		case PDO_TYPE_BATT:
2341 			src_mw = pdo_max_power(pdo);
2342 			break;
2343 		case PDO_TYPE_APDO:
2344 			continue;
2345 		default:
2346 			tcpm_log(port, "Invalid source PDO type, ignoring");
2347 			continue;
2348 		}
2349 
2350 		for (j = 0; j < port->nr_snk_pdo; j++) {
2351 			pdo = port->snk_pdo[j];
2352 
2353 			switch (pdo_type(pdo)) {
2354 			case PDO_TYPE_FIXED:
2355 				max_snk_mv = pdo_fixed_voltage(pdo);
2356 				min_snk_mv = max_snk_mv;
2357 				break;
2358 			case PDO_TYPE_BATT:
2359 			case PDO_TYPE_VAR:
2360 				max_snk_mv = pdo_max_voltage(pdo);
2361 				min_snk_mv = pdo_min_voltage(pdo);
2362 				break;
2363 			case PDO_TYPE_APDO:
2364 				continue;
2365 			default:
2366 				tcpm_log(port, "Invalid sink PDO type, ignoring");
2367 				continue;
2368 			}
2369 
2370 			if (max_src_mv <= max_snk_mv &&
2371 				min_src_mv >= min_snk_mv) {
2372 				/* Prefer higher voltages if available */
2373 				if ((src_mw == max_mw && min_src_mv > max_mv) ||
2374 							src_mw > max_mw) {
2375 					*src_pdo = i;
2376 					*sink_pdo = j;
2377 					max_mw = src_mw;
2378 					max_mv = min_src_mv;
2379 					ret = 0;
2380 				}
2381 			}
2382 		}
2383 	}
2384 
2385 	return ret;
2386 }
2387 
2388 #define min_pps_apdo_current(x, y)	\
2389 	min(pdo_pps_apdo_max_current(x), pdo_pps_apdo_max_current(y))
2390 
tcpm_pd_select_pps_apdo(struct tcpm_port * port)2391 static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
2392 {
2393 	unsigned int i, j, max_mw = 0, max_mv = 0;
2394 	unsigned int min_src_mv, max_src_mv, src_ma, src_mw;
2395 	unsigned int min_snk_mv, max_snk_mv;
2396 	unsigned int max_op_mv;
2397 	u32 pdo, src, snk;
2398 	unsigned int src_pdo = 0, snk_pdo = 0;
2399 
2400 	/*
2401 	 * Select the source PPS APDO providing the most power while staying
2402 	 * within the board's limits. We skip the first PDO as this is always
2403 	 * 5V 3A.
2404 	 */
2405 	for (i = 1; i < port->nr_source_caps; ++i) {
2406 		pdo = port->source_caps[i];
2407 
2408 		switch (pdo_type(pdo)) {
2409 		case PDO_TYPE_APDO:
2410 			if (pdo_apdo_type(pdo) != APDO_TYPE_PPS) {
2411 				tcpm_log(port, "Not PPS APDO (source), ignoring");
2412 				continue;
2413 			}
2414 
2415 			min_src_mv = pdo_pps_apdo_min_voltage(pdo);
2416 			max_src_mv = pdo_pps_apdo_max_voltage(pdo);
2417 			src_ma = pdo_pps_apdo_max_current(pdo);
2418 			src_mw = (src_ma * max_src_mv) / 1000;
2419 
2420 			/*
2421 			 * Now search through the sink PDOs to find a matching
2422 			 * PPS APDO. Again skip the first sink PDO as this will
2423 			 * always be 5V 3A.
2424 			 */
2425 			for (j = 1; j < port->nr_snk_pdo; j++) {
2426 				pdo = port->snk_pdo[j];
2427 
2428 				switch (pdo_type(pdo)) {
2429 				case PDO_TYPE_APDO:
2430 					if (pdo_apdo_type(pdo) != APDO_TYPE_PPS) {
2431 						tcpm_log(port,
2432 							 "Not PPS APDO (sink), ignoring");
2433 						continue;
2434 					}
2435 
2436 					min_snk_mv =
2437 						pdo_pps_apdo_min_voltage(pdo);
2438 					max_snk_mv =
2439 						pdo_pps_apdo_max_voltage(pdo);
2440 					break;
2441 				default:
2442 					tcpm_log(port,
2443 						 "Not APDO type (sink), ignoring");
2444 					continue;
2445 				}
2446 
2447 				if (min_src_mv <= max_snk_mv &&
2448 				    max_src_mv >= min_snk_mv) {
2449 					max_op_mv = min(max_src_mv, max_snk_mv);
2450 					src_mw = (max_op_mv * src_ma) / 1000;
2451 					/* Prefer higher voltages if available */
2452 					if ((src_mw == max_mw &&
2453 					     max_op_mv > max_mv) ||
2454 					    src_mw > max_mw) {
2455 						src_pdo = i;
2456 						snk_pdo = j;
2457 						max_mw = src_mw;
2458 						max_mv = max_op_mv;
2459 					}
2460 				}
2461 			}
2462 
2463 			break;
2464 		default:
2465 			tcpm_log(port, "Not APDO type (source), ignoring");
2466 			continue;
2467 		}
2468 	}
2469 
2470 	if (src_pdo) {
2471 		src = port->source_caps[src_pdo];
2472 		snk = port->snk_pdo[snk_pdo];
2473 
2474 		port->pps_data.min_volt = max(pdo_pps_apdo_min_voltage(src),
2475 					      pdo_pps_apdo_min_voltage(snk));
2476 		port->pps_data.max_volt = min(pdo_pps_apdo_max_voltage(src),
2477 					      pdo_pps_apdo_max_voltage(snk));
2478 		port->pps_data.max_curr = min_pps_apdo_current(src, snk);
2479 		port->pps_data.out_volt = min(port->pps_data.max_volt,
2480 					      max(port->pps_data.min_volt,
2481 						  port->pps_data.out_volt));
2482 		port->pps_data.op_curr = min(port->pps_data.max_curr,
2483 					     port->pps_data.op_curr);
2484 	}
2485 
2486 	return src_pdo;
2487 }
2488 
tcpm_pd_build_request(struct tcpm_port * port,u32 * rdo)2489 static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)
2490 {
2491 	unsigned int mv, ma, mw, flags;
2492 	unsigned int max_ma, max_mw;
2493 	enum pd_pdo_type type;
2494 	u32 pdo, matching_snk_pdo;
2495 	int src_pdo_index = 0;
2496 	int snk_pdo_index = 0;
2497 	int ret;
2498 
2499 	ret = tcpm_pd_select_pdo(port, &snk_pdo_index, &src_pdo_index);
2500 	if (ret < 0)
2501 		return ret;
2502 
2503 	pdo = port->source_caps[src_pdo_index];
2504 	matching_snk_pdo = port->snk_pdo[snk_pdo_index];
2505 	type = pdo_type(pdo);
2506 
2507 	switch (type) {
2508 	case PDO_TYPE_FIXED:
2509 		mv = pdo_fixed_voltage(pdo);
2510 		break;
2511 	case PDO_TYPE_BATT:
2512 	case PDO_TYPE_VAR:
2513 		mv = pdo_min_voltage(pdo);
2514 		break;
2515 	default:
2516 		tcpm_log(port, "Invalid PDO selected!");
2517 		return -EINVAL;
2518 	}
2519 
2520 	/* Select maximum available current within the sink pdo's limit */
2521 	if (type == PDO_TYPE_BATT) {
2522 		mw = min_power(pdo, matching_snk_pdo);
2523 		ma = 1000 * mw / mv;
2524 	} else {
2525 		ma = min_current(pdo, matching_snk_pdo);
2526 		mw = ma * mv / 1000;
2527 	}
2528 
2529 	flags = RDO_USB_COMM | RDO_NO_SUSPEND;
2530 
2531 	/* Set mismatch bit if offered power is less than operating power */
2532 	max_ma = ma;
2533 	max_mw = mw;
2534 	if (mw < port->operating_snk_mw) {
2535 		flags |= RDO_CAP_MISMATCH;
2536 		if (type == PDO_TYPE_BATT &&
2537 		    (pdo_max_power(matching_snk_pdo) > pdo_max_power(pdo)))
2538 			max_mw = pdo_max_power(matching_snk_pdo);
2539 		else if (pdo_max_current(matching_snk_pdo) >
2540 			 pdo_max_current(pdo))
2541 			max_ma = pdo_max_current(matching_snk_pdo);
2542 	}
2543 
2544 	tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d",
2545 		 port->cc_req, port->cc1, port->cc2, port->vbus_source,
2546 		 port->vconn_role == TYPEC_SOURCE ? "source" : "sink",
2547 		 port->polarity);
2548 
2549 	if (type == PDO_TYPE_BATT) {
2550 		*rdo = RDO_BATT(src_pdo_index + 1, mw, max_mw, flags);
2551 
2552 		tcpm_log(port, "Requesting PDO %d: %u mV, %u mW%s",
2553 			 src_pdo_index, mv, mw,
2554 			 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
2555 	} else {
2556 		*rdo = RDO_FIXED(src_pdo_index + 1, ma, max_ma, flags);
2557 
2558 		tcpm_log(port, "Requesting PDO %d: %u mV, %u mA%s",
2559 			 src_pdo_index, mv, ma,
2560 			 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
2561 	}
2562 
2563 	port->current_limit = ma;
2564 	port->supply_voltage = mv;
2565 
2566 	return 0;
2567 }
2568 
tcpm_pd_send_request(struct tcpm_port * port)2569 static int tcpm_pd_send_request(struct tcpm_port *port)
2570 {
2571 	struct pd_message msg;
2572 	int ret;
2573 	u32 rdo;
2574 
2575 	ret = tcpm_pd_build_request(port, &rdo);
2576 	if (ret < 0)
2577 		return ret;
2578 
2579 	memset(&msg, 0, sizeof(msg));
2580 	msg.header = PD_HEADER_LE(PD_DATA_REQUEST,
2581 				  port->pwr_role,
2582 				  port->data_role,
2583 				  port->negotiated_rev,
2584 				  port->message_id, 1);
2585 	msg.payload[0] = cpu_to_le32(rdo);
2586 
2587 	return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
2588 }
2589 
tcpm_pd_build_pps_request(struct tcpm_port * port,u32 * rdo)2590 static int tcpm_pd_build_pps_request(struct tcpm_port *port, u32 *rdo)
2591 {
2592 	unsigned int out_mv, op_ma, op_mw, max_mv, max_ma, flags;
2593 	enum pd_pdo_type type;
2594 	unsigned int src_pdo_index;
2595 	u32 pdo;
2596 
2597 	src_pdo_index = tcpm_pd_select_pps_apdo(port);
2598 	if (!src_pdo_index)
2599 		return -EOPNOTSUPP;
2600 
2601 	pdo = port->source_caps[src_pdo_index];
2602 	type = pdo_type(pdo);
2603 
2604 	switch (type) {
2605 	case PDO_TYPE_APDO:
2606 		if (pdo_apdo_type(pdo) != APDO_TYPE_PPS) {
2607 			tcpm_log(port, "Invalid APDO selected!");
2608 			return -EINVAL;
2609 		}
2610 		max_mv = port->pps_data.max_volt;
2611 		max_ma = port->pps_data.max_curr;
2612 		out_mv = port->pps_data.out_volt;
2613 		op_ma = port->pps_data.op_curr;
2614 		break;
2615 	default:
2616 		tcpm_log(port, "Invalid PDO selected!");
2617 		return -EINVAL;
2618 	}
2619 
2620 	flags = RDO_USB_COMM | RDO_NO_SUSPEND;
2621 
2622 	op_mw = (op_ma * out_mv) / 1000;
2623 	if (op_mw < port->operating_snk_mw) {
2624 		/*
2625 		 * Try raising current to meet power needs. If that's not enough
2626 		 * then try upping the voltage. If that's still not enough
2627 		 * then we've obviously chosen a PPS APDO which really isn't
2628 		 * suitable so abandon ship.
2629 		 */
2630 		op_ma = (port->operating_snk_mw * 1000) / out_mv;
2631 		if ((port->operating_snk_mw * 1000) % out_mv)
2632 			++op_ma;
2633 		op_ma += RDO_PROG_CURR_MA_STEP - (op_ma % RDO_PROG_CURR_MA_STEP);
2634 
2635 		if (op_ma > max_ma) {
2636 			op_ma = max_ma;
2637 			out_mv = (port->operating_snk_mw * 1000) / op_ma;
2638 			if ((port->operating_snk_mw * 1000) % op_ma)
2639 				++out_mv;
2640 			out_mv += RDO_PROG_VOLT_MV_STEP -
2641 				  (out_mv % RDO_PROG_VOLT_MV_STEP);
2642 
2643 			if (out_mv > max_mv) {
2644 				tcpm_log(port, "Invalid PPS APDO selected!");
2645 				return -EINVAL;
2646 			}
2647 		}
2648 	}
2649 
2650 	tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d",
2651 		 port->cc_req, port->cc1, port->cc2, port->vbus_source,
2652 		 port->vconn_role == TYPEC_SOURCE ? "source" : "sink",
2653 		 port->polarity);
2654 
2655 	*rdo = RDO_PROG(src_pdo_index + 1, out_mv, op_ma, flags);
2656 
2657 	tcpm_log(port, "Requesting APDO %d: %u mV, %u mA",
2658 		 src_pdo_index, out_mv, op_ma);
2659 
2660 	port->pps_data.op_curr = op_ma;
2661 	port->pps_data.out_volt = out_mv;
2662 
2663 	return 0;
2664 }
2665 
tcpm_pd_send_pps_request(struct tcpm_port * port)2666 static int tcpm_pd_send_pps_request(struct tcpm_port *port)
2667 {
2668 	struct pd_message msg;
2669 	int ret;
2670 	u32 rdo;
2671 
2672 	ret = tcpm_pd_build_pps_request(port, &rdo);
2673 	if (ret < 0)
2674 		return ret;
2675 
2676 	memset(&msg, 0, sizeof(msg));
2677 	msg.header = PD_HEADER_LE(PD_DATA_REQUEST,
2678 				  port->pwr_role,
2679 				  port->data_role,
2680 				  port->negotiated_rev,
2681 				  port->message_id, 1);
2682 	msg.payload[0] = cpu_to_le32(rdo);
2683 
2684 	return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
2685 }
2686 
tcpm_set_vbus(struct tcpm_port * port,bool enable)2687 static int tcpm_set_vbus(struct tcpm_port *port, bool enable)
2688 {
2689 	int ret;
2690 
2691 	if (enable && port->vbus_charge)
2692 		return -EINVAL;
2693 
2694 	tcpm_log(port, "vbus:=%d charge=%d", enable, port->vbus_charge);
2695 
2696 	ret = port->tcpc->set_vbus(port->tcpc, enable, port->vbus_charge);
2697 	if (ret < 0)
2698 		return ret;
2699 
2700 	port->vbus_source = enable;
2701 	return 0;
2702 }
2703 
tcpm_set_charge(struct tcpm_port * port,bool charge)2704 static int tcpm_set_charge(struct tcpm_port *port, bool charge)
2705 {
2706 	int ret;
2707 
2708 	if (charge && port->vbus_source)
2709 		return -EINVAL;
2710 
2711 	if (charge != port->vbus_charge) {
2712 		tcpm_log(port, "vbus=%d charge:=%d", port->vbus_source, charge);
2713 		ret = port->tcpc->set_vbus(port->tcpc, port->vbus_source,
2714 					   charge);
2715 		if (ret < 0)
2716 			return ret;
2717 	}
2718 	port->vbus_charge = charge;
2719 	return 0;
2720 }
2721 
tcpm_start_toggling(struct tcpm_port * port,enum typec_cc_status cc)2722 static bool tcpm_start_toggling(struct tcpm_port *port, enum typec_cc_status cc)
2723 {
2724 	int ret;
2725 
2726 	if (!port->tcpc->start_toggling)
2727 		return false;
2728 
2729 	tcpm_log_force(port, "Start toggling");
2730 	ret = port->tcpc->start_toggling(port->tcpc, port->port_type, cc);
2731 	return ret == 0;
2732 }
2733 
tcpm_set_cc(struct tcpm_port * port,enum typec_cc_status cc)2734 static void tcpm_set_cc(struct tcpm_port *port, enum typec_cc_status cc)
2735 {
2736 	tcpm_log(port, "cc:=%d", cc);
2737 	port->cc_req = cc;
2738 	port->tcpc->set_cc(port->tcpc, cc);
2739 }
2740 
tcpm_init_vbus(struct tcpm_port * port)2741 static int tcpm_init_vbus(struct tcpm_port *port)
2742 {
2743 	int ret;
2744 
2745 	ret = port->tcpc->set_vbus(port->tcpc, false, false);
2746 	port->vbus_source = false;
2747 	port->vbus_charge = false;
2748 	return ret;
2749 }
2750 
tcpm_init_vconn(struct tcpm_port * port)2751 static int tcpm_init_vconn(struct tcpm_port *port)
2752 {
2753 	int ret;
2754 
2755 	ret = port->tcpc->set_vconn(port->tcpc, false);
2756 	port->vconn_role = TYPEC_SINK;
2757 	return ret;
2758 }
2759 
tcpm_typec_connect(struct tcpm_port * port)2760 static void tcpm_typec_connect(struct tcpm_port *port)
2761 {
2762 	if (!port->connected) {
2763 		/* Make sure we don't report stale identity information */
2764 		memset(&port->partner_ident, 0, sizeof(port->partner_ident));
2765 		port->partner_desc.usb_pd = port->pd_capable;
2766 		if (tcpm_port_is_debug(port))
2767 			port->partner_desc.accessory = TYPEC_ACCESSORY_DEBUG;
2768 		else if (tcpm_port_is_audio(port))
2769 			port->partner_desc.accessory = TYPEC_ACCESSORY_AUDIO;
2770 		else
2771 			port->partner_desc.accessory = TYPEC_ACCESSORY_NONE;
2772 		port->partner = typec_register_partner(port->typec_port,
2773 						       &port->partner_desc);
2774 		port->connected = true;
2775 	}
2776 }
2777 
tcpm_src_attach(struct tcpm_port * port)2778 static int tcpm_src_attach(struct tcpm_port *port)
2779 {
2780 	enum typec_cc_polarity polarity =
2781 				port->cc2 == TYPEC_CC_RD ? TYPEC_POLARITY_CC2
2782 							 : TYPEC_POLARITY_CC1;
2783 	int ret;
2784 
2785 	if (port->attached)
2786 		return 0;
2787 
2788 	ret = tcpm_set_polarity(port, polarity);
2789 	if (ret < 0)
2790 		return ret;
2791 
2792 	ret = tcpm_set_roles(port, true, TYPEC_SOURCE,
2793 			     tcpm_data_role_for_source(port));
2794 	if (ret < 0)
2795 		return ret;
2796 
2797 	ret = port->tcpc->set_pd_rx(port->tcpc, true);
2798 	if (ret < 0)
2799 		goto out_disable_mux;
2800 
2801 	/*
2802 	 * USB Type-C specification, version 1.2,
2803 	 * chapter 4.5.2.2.8.1 (Attached.SRC Requirements)
2804 	 * Enable VCONN only if the non-RD port is set to RA.
2805 	 */
2806 	if ((polarity == TYPEC_POLARITY_CC1 && port->cc2 == TYPEC_CC_RA) ||
2807 	    (polarity == TYPEC_POLARITY_CC2 && port->cc1 == TYPEC_CC_RA)) {
2808 		ret = tcpm_set_vconn(port, true);
2809 		if (ret < 0)
2810 			goto out_disable_pd;
2811 	}
2812 
2813 	ret = tcpm_set_vbus(port, true);
2814 	if (ret < 0)
2815 		goto out_disable_vconn;
2816 
2817 	port->pd_capable = false;
2818 
2819 	port->partner = NULL;
2820 
2821 	port->attached = true;
2822 	port->send_discover = true;
2823 
2824 	return 0;
2825 
2826 out_disable_vconn:
2827 	tcpm_set_vconn(port, false);
2828 out_disable_pd:
2829 	port->tcpc->set_pd_rx(port->tcpc, false);
2830 out_disable_mux:
2831 	tcpm_mux_set(port, TYPEC_STATE_SAFE, USB_ROLE_NONE,
2832 		     TYPEC_ORIENTATION_NONE);
2833 	return ret;
2834 }
2835 
tcpm_typec_disconnect(struct tcpm_port * port)2836 static void tcpm_typec_disconnect(struct tcpm_port *port)
2837 {
2838 	if (port->connected) {
2839 		typec_unregister_partner(port->partner);
2840 		port->partner = NULL;
2841 		port->connected = false;
2842 	}
2843 }
2844 
tcpm_unregister_altmodes(struct tcpm_port * port)2845 static void tcpm_unregister_altmodes(struct tcpm_port *port)
2846 {
2847 	struct pd_mode_data *modep = &port->mode_data;
2848 	int i;
2849 
2850 	for (i = 0; i < modep->altmodes; i++) {
2851 		typec_unregister_altmode(port->partner_altmode[i]);
2852 		port->partner_altmode[i] = NULL;
2853 	}
2854 
2855 	memset(modep, 0, sizeof(*modep));
2856 }
2857 
tcpm_reset_port(struct tcpm_port * port)2858 static void tcpm_reset_port(struct tcpm_port *port)
2859 {
2860 	tcpm_unregister_altmodes(port);
2861 	tcpm_typec_disconnect(port);
2862 	port->attached = false;
2863 	port->pd_capable = false;
2864 	port->pps_data.supported = false;
2865 
2866 	/*
2867 	 * First Rx ID should be 0; set this to a sentinel of -1 so that
2868 	 * we can check tcpm_pd_rx_handler() if we had seen it before.
2869 	 */
2870 	port->rx_msgid = -1;
2871 
2872 	port->tcpc->set_pd_rx(port->tcpc, false);
2873 	tcpm_init_vbus(port);	/* also disables charging */
2874 	tcpm_init_vconn(port);
2875 	tcpm_set_current_limit(port, 0, 0);
2876 	tcpm_set_polarity(port, TYPEC_POLARITY_CC1);
2877 	tcpm_mux_set(port, TYPEC_STATE_SAFE, USB_ROLE_NONE,
2878 		     TYPEC_ORIENTATION_NONE);
2879 	tcpm_set_attached_state(port, false);
2880 	port->try_src_count = 0;
2881 	port->try_snk_count = 0;
2882 	port->usb_type = POWER_SUPPLY_USB_TYPE_C;
2883 	port->nr_sink_caps = 0;
2884 	port->sink_cap_done = false;
2885 	if (port->tcpc->enable_frs)
2886 		port->tcpc->enable_frs(port->tcpc, false);
2887 
2888 	power_supply_changed(port->psy);
2889 }
2890 
tcpm_detach(struct tcpm_port * port)2891 static void tcpm_detach(struct tcpm_port *port)
2892 {
2893 	if (tcpm_port_is_disconnected(port))
2894 		port->hard_reset_count = 0;
2895 
2896 	if (!port->attached)
2897 		return;
2898 
2899 	if (port->tcpc->set_bist_data) {
2900 		tcpm_log(port, "disable BIST MODE TESTDATA");
2901 		port->tcpc->set_bist_data(port->tcpc, false);
2902 	}
2903 
2904 	tcpm_reset_port(port);
2905 }
2906 
tcpm_src_detach(struct tcpm_port * port)2907 static void tcpm_src_detach(struct tcpm_port *port)
2908 {
2909 	tcpm_detach(port);
2910 }
2911 
tcpm_snk_attach(struct tcpm_port * port)2912 static int tcpm_snk_attach(struct tcpm_port *port)
2913 {
2914 	int ret;
2915 
2916 	if (port->attached)
2917 		return 0;
2918 
2919 	ret = tcpm_set_polarity(port, port->cc2 != TYPEC_CC_OPEN ?
2920 				TYPEC_POLARITY_CC2 : TYPEC_POLARITY_CC1);
2921 	if (ret < 0)
2922 		return ret;
2923 
2924 	ret = tcpm_set_roles(port, true, TYPEC_SINK,
2925 			     tcpm_data_role_for_sink(port));
2926 	if (ret < 0)
2927 		return ret;
2928 
2929 	port->pd_capable = false;
2930 
2931 	port->partner = NULL;
2932 
2933 	port->attached = true;
2934 	port->send_discover = true;
2935 
2936 	return 0;
2937 }
2938 
tcpm_snk_detach(struct tcpm_port * port)2939 static void tcpm_snk_detach(struct tcpm_port *port)
2940 {
2941 	tcpm_detach(port);
2942 }
2943 
tcpm_acc_attach(struct tcpm_port * port)2944 static int tcpm_acc_attach(struct tcpm_port *port)
2945 {
2946 	int ret;
2947 
2948 	if (port->attached)
2949 		return 0;
2950 
2951 	ret = tcpm_set_roles(port, true, TYPEC_SOURCE,
2952 			     tcpm_data_role_for_source(port));
2953 	if (ret < 0)
2954 		return ret;
2955 
2956 	port->partner = NULL;
2957 
2958 	tcpm_typec_connect(port);
2959 
2960 	port->attached = true;
2961 
2962 	return 0;
2963 }
2964 
tcpm_acc_detach(struct tcpm_port * port)2965 static void tcpm_acc_detach(struct tcpm_port *port)
2966 {
2967 	tcpm_detach(port);
2968 }
2969 
hard_reset_state(struct tcpm_port * port)2970 static inline enum tcpm_state hard_reset_state(struct tcpm_port *port)
2971 {
2972 	if (port->hard_reset_count < PD_N_HARD_RESET_COUNT)
2973 		return HARD_RESET_SEND;
2974 	if (port->pd_capable)
2975 		return ERROR_RECOVERY;
2976 	if (port->pwr_role == TYPEC_SOURCE)
2977 		return SRC_UNATTACHED;
2978 	if (port->state == SNK_WAIT_CAPABILITIES)
2979 		return SNK_READY;
2980 	return SNK_UNATTACHED;
2981 }
2982 
unattached_state(struct tcpm_port * port)2983 static inline enum tcpm_state unattached_state(struct tcpm_port *port)
2984 {
2985 	if (port->port_type == TYPEC_PORT_DRP) {
2986 		if (port->pwr_role == TYPEC_SOURCE)
2987 			return SRC_UNATTACHED;
2988 		else
2989 			return SNK_UNATTACHED;
2990 	} else if (port->port_type == TYPEC_PORT_SRC) {
2991 		return SRC_UNATTACHED;
2992 	}
2993 
2994 	return SNK_UNATTACHED;
2995 }
2996 
tcpm_check_send_discover(struct tcpm_port * port)2997 static void tcpm_check_send_discover(struct tcpm_port *port)
2998 {
2999 	if (port->data_role == TYPEC_HOST && port->send_discover &&
3000 	    port->pd_capable) {
3001 		tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0);
3002 		port->send_discover = false;
3003 	}
3004 }
3005 
tcpm_swap_complete(struct tcpm_port * port,int result)3006 static void tcpm_swap_complete(struct tcpm_port *port, int result)
3007 {
3008 	if (port->swap_pending) {
3009 		port->swap_status = result;
3010 		port->swap_pending = false;
3011 		port->non_pd_role_swap = false;
3012 		complete(&port->swap_complete);
3013 	}
3014 }
3015 
tcpm_get_pwr_opmode(enum typec_cc_status cc)3016 static enum typec_pwr_opmode tcpm_get_pwr_opmode(enum typec_cc_status cc)
3017 {
3018 	switch (cc) {
3019 	case TYPEC_CC_RP_1_5:
3020 		return TYPEC_PWR_MODE_1_5A;
3021 	case TYPEC_CC_RP_3_0:
3022 		return TYPEC_PWR_MODE_3_0A;
3023 	case TYPEC_CC_RP_DEF:
3024 	default:
3025 		return TYPEC_PWR_MODE_USB;
3026 	}
3027 }
3028 
run_state_machine(struct tcpm_port * port)3029 static void run_state_machine(struct tcpm_port *port)
3030 {
3031 	int ret;
3032 	enum typec_pwr_opmode opmode;
3033 	unsigned int msecs;
3034 
3035 	port->enter_state = port->state;
3036 	switch (port->state) {
3037 	case TOGGLING:
3038 		break;
3039 	/* SRC states */
3040 	case SRC_UNATTACHED:
3041 		if (!port->non_pd_role_swap)
3042 			tcpm_swap_complete(port, -ENOTCONN);
3043 		tcpm_src_detach(port);
3044 		if (tcpm_start_toggling(port, tcpm_rp_cc(port))) {
3045 			tcpm_set_state(port, TOGGLING, 0);
3046 			break;
3047 		}
3048 		tcpm_set_cc(port, tcpm_rp_cc(port));
3049 		if (port->port_type == TYPEC_PORT_DRP)
3050 			tcpm_set_state(port, SNK_UNATTACHED, PD_T_DRP_SNK);
3051 		break;
3052 	case SRC_ATTACH_WAIT:
3053 		if (tcpm_port_is_debug(port))
3054 			tcpm_set_state(port, DEBUG_ACC_ATTACHED,
3055 				       PD_T_CC_DEBOUNCE);
3056 		else if (tcpm_port_is_audio(port))
3057 			tcpm_set_state(port, AUDIO_ACC_ATTACHED,
3058 				       PD_T_CC_DEBOUNCE);
3059 		else if (tcpm_port_is_source(port))
3060 			tcpm_set_state(port,
3061 				       tcpm_try_snk(port) ? SNK_TRY
3062 							  : SRC_ATTACHED,
3063 				       PD_T_CC_DEBOUNCE);
3064 		break;
3065 
3066 	case SNK_TRY:
3067 		port->try_snk_count++;
3068 		/*
3069 		 * Requirements:
3070 		 * - Do not drive vconn or vbus
3071 		 * - Terminate CC pins (both) to Rd
3072 		 * Action:
3073 		 * - Wait for tDRPTry (PD_T_DRP_TRY).
3074 		 *   Until then, ignore any state changes.
3075 		 */
3076 		tcpm_set_cc(port, TYPEC_CC_RD);
3077 		tcpm_set_state(port, SNK_TRY_WAIT, PD_T_DRP_TRY);
3078 		break;
3079 	case SNK_TRY_WAIT:
3080 		if (tcpm_port_is_sink(port)) {
3081 			tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE, 0);
3082 		} else {
3083 			tcpm_set_state(port, SRC_TRYWAIT, 0);
3084 			port->max_wait = 0;
3085 		}
3086 		break;
3087 	case SNK_TRY_WAIT_DEBOUNCE:
3088 		tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS,
3089 			       PD_T_PD_DEBOUNCE);
3090 		break;
3091 	case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS:
3092 		if (port->vbus_present && tcpm_port_is_sink(port)) {
3093 			tcpm_set_state(port, SNK_ATTACHED, 0);
3094 		} else {
3095 			tcpm_set_state(port, SRC_TRYWAIT, 0);
3096 			port->max_wait = 0;
3097 		}
3098 		break;
3099 	case SRC_TRYWAIT:
3100 		tcpm_set_cc(port, tcpm_rp_cc(port));
3101 		if (port->max_wait == 0) {
3102 			port->max_wait = jiffies +
3103 					 msecs_to_jiffies(PD_T_DRP_TRY);
3104 			tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED,
3105 				       PD_T_DRP_TRY);
3106 		} else {
3107 			if (time_is_after_jiffies(port->max_wait))
3108 				tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED,
3109 					       jiffies_to_msecs(port->max_wait -
3110 								jiffies));
3111 			else
3112 				tcpm_set_state(port, SNK_UNATTACHED, 0);
3113 		}
3114 		break;
3115 	case SRC_TRYWAIT_DEBOUNCE:
3116 		tcpm_set_state(port, SRC_ATTACHED, PD_T_CC_DEBOUNCE);
3117 		break;
3118 	case SRC_TRYWAIT_UNATTACHED:
3119 		tcpm_set_state(port, SNK_UNATTACHED, 0);
3120 		break;
3121 
3122 	case SRC_ATTACHED:
3123 		ret = tcpm_src_attach(port);
3124 		tcpm_set_state(port, SRC_UNATTACHED,
3125 			       ret < 0 ? 0 : PD_T_PS_SOURCE_ON);
3126 		break;
3127 	case SRC_STARTUP:
3128 		opmode =  tcpm_get_pwr_opmode(tcpm_rp_cc(port));
3129 		typec_set_pwr_opmode(port->typec_port, opmode);
3130 		port->pwr_opmode = TYPEC_PWR_MODE_USB;
3131 		port->caps_count = 0;
3132 		port->negotiated_rev = PD_MAX_REV;
3133 		port->message_id = 0;
3134 		port->rx_msgid = -1;
3135 		port->explicit_contract = false;
3136 		tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
3137 		break;
3138 	case SRC_SEND_CAPABILITIES:
3139 		port->caps_count++;
3140 		if (port->caps_count > PD_N_CAPS_COUNT) {
3141 			tcpm_set_state(port, SRC_READY, 0);
3142 			break;
3143 		}
3144 		ret = tcpm_pd_send_source_caps(port);
3145 		if (ret < 0) {
3146 			tcpm_set_state(port, SRC_SEND_CAPABILITIES,
3147 				       PD_T_SEND_SOURCE_CAP);
3148 		} else {
3149 			/*
3150 			 * Per standard, we should clear the reset counter here.
3151 			 * However, that can result in state machine hang-ups.
3152 			 * Reset it only in READY state to improve stability.
3153 			 */
3154 			/* port->hard_reset_count = 0; */
3155 			port->caps_count = 0;
3156 			port->pd_capable = true;
3157 			tcpm_set_state_cond(port, SRC_SEND_CAPABILITIES_TIMEOUT,
3158 					    PD_T_SEND_SOURCE_CAP);
3159 		}
3160 		break;
3161 	case SRC_SEND_CAPABILITIES_TIMEOUT:
3162 		/*
3163 		 * Error recovery for a PD_DATA_SOURCE_CAP reply timeout.
3164 		 *
3165 		 * PD 2.0 sinks are supposed to accept src-capabilities with a
3166 		 * 3.0 header and simply ignore any src PDOs which the sink does
3167 		 * not understand such as PPS but some 2.0 sinks instead ignore
3168 		 * the entire PD_DATA_SOURCE_CAP message, causing contract
3169 		 * negotiation to fail.
3170 		 *
3171 		 * After PD_N_HARD_RESET_COUNT hard-reset attempts, we try
3172 		 * sending src-capabilities with a lower PD revision to
3173 		 * make these broken sinks work.
3174 		 */
3175 		if (port->hard_reset_count < PD_N_HARD_RESET_COUNT) {
3176 			tcpm_set_state(port, HARD_RESET_SEND, 0);
3177 		} else if (port->negotiated_rev > PD_REV20) {
3178 			port->negotiated_rev--;
3179 			port->hard_reset_count = 0;
3180 			tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
3181 		} else {
3182 			tcpm_set_state(port, hard_reset_state(port), 0);
3183 		}
3184 		break;
3185 	case SRC_NEGOTIATE_CAPABILITIES:
3186 		ret = tcpm_pd_check_request(port);
3187 		if (ret < 0) {
3188 			tcpm_pd_send_control(port, PD_CTRL_REJECT);
3189 			if (!port->explicit_contract) {
3190 				tcpm_set_state(port,
3191 					       SRC_WAIT_NEW_CAPABILITIES, 0);
3192 			} else {
3193 				tcpm_set_state(port, SRC_READY, 0);
3194 			}
3195 		} else {
3196 			tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
3197 			tcpm_set_state(port, SRC_TRANSITION_SUPPLY,
3198 				       PD_T_SRC_TRANSITION);
3199 		}
3200 		break;
3201 	case SRC_TRANSITION_SUPPLY:
3202 		/* XXX: regulator_set_voltage(vbus, ...) */
3203 		tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
3204 		port->explicit_contract = true;
3205 		typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_PD);
3206 		port->pwr_opmode = TYPEC_PWR_MODE_PD;
3207 		tcpm_set_state_cond(port, SRC_READY, 0);
3208 		break;
3209 	case SRC_READY:
3210 #if 1
3211 		port->hard_reset_count = 0;
3212 #endif
3213 		port->try_src_count = 0;
3214 
3215 		tcpm_swap_complete(port, 0);
3216 		tcpm_typec_connect(port);
3217 
3218 		tcpm_check_send_discover(port);
3219 		/*
3220 		 * 6.3.5
3221 		 * Sending ping messages is not necessary if
3222 		 * - the source operates at vSafe5V
3223 		 * or
3224 		 * - The system is not operating in PD mode
3225 		 * or
3226 		 * - Both partners are connected using a Type-C connector
3227 		 *
3228 		 * There is no actual need to send PD messages since the local
3229 		 * port type-c and the spec does not clearly say whether PD is
3230 		 * possible when type-c is connected to Type-A/B
3231 		 */
3232 		break;
3233 	case SRC_WAIT_NEW_CAPABILITIES:
3234 		/* Nothing to do... */
3235 		break;
3236 
3237 	/* SNK states */
3238 	case SNK_UNATTACHED:
3239 		if (!port->non_pd_role_swap)
3240 			tcpm_swap_complete(port, -ENOTCONN);
3241 		tcpm_pps_complete(port, -ENOTCONN);
3242 		tcpm_snk_detach(port);
3243 		if (tcpm_start_toggling(port, TYPEC_CC_RD)) {
3244 			tcpm_set_state(port, TOGGLING, 0);
3245 			break;
3246 		}
3247 		tcpm_set_cc(port, TYPEC_CC_RD);
3248 		if (port->port_type == TYPEC_PORT_DRP)
3249 			tcpm_set_state(port, SRC_UNATTACHED, PD_T_DRP_SRC);
3250 		break;
3251 	case SNK_ATTACH_WAIT:
3252 		if ((port->cc1 == TYPEC_CC_OPEN &&
3253 		     port->cc2 != TYPEC_CC_OPEN) ||
3254 		    (port->cc1 != TYPEC_CC_OPEN &&
3255 		     port->cc2 == TYPEC_CC_OPEN))
3256 			tcpm_set_state(port, SNK_DEBOUNCED,
3257 				       PD_T_CC_DEBOUNCE);
3258 		else if (tcpm_port_is_disconnected(port))
3259 			tcpm_set_state(port, SNK_UNATTACHED,
3260 				       PD_T_PD_DEBOUNCE);
3261 		break;
3262 	case SNK_DEBOUNCED:
3263 		if (tcpm_port_is_disconnected(port))
3264 			tcpm_set_state(port, SNK_UNATTACHED,
3265 				       PD_T_PD_DEBOUNCE);
3266 		else if (port->vbus_present)
3267 			tcpm_set_state(port,
3268 				       tcpm_try_src(port) ? SRC_TRY
3269 							  : SNK_ATTACHED,
3270 				       0);
3271 		else
3272 			/* Wait for VBUS, but not forever */
3273 			tcpm_set_state(port, PORT_RESET, PD_T_PS_SOURCE_ON);
3274 		break;
3275 
3276 	case SRC_TRY:
3277 		port->try_src_count++;
3278 		tcpm_set_cc(port, tcpm_rp_cc(port));
3279 		port->max_wait = 0;
3280 		tcpm_set_state(port, SRC_TRY_WAIT, 0);
3281 		break;
3282 	case SRC_TRY_WAIT:
3283 		if (port->max_wait == 0) {
3284 			port->max_wait = jiffies +
3285 					 msecs_to_jiffies(PD_T_DRP_TRY);
3286 			msecs = PD_T_DRP_TRY;
3287 		} else {
3288 			if (time_is_after_jiffies(port->max_wait))
3289 				msecs = jiffies_to_msecs(port->max_wait -
3290 							 jiffies);
3291 			else
3292 				msecs = 0;
3293 		}
3294 		tcpm_set_state(port, SNK_TRYWAIT, msecs);
3295 		break;
3296 	case SRC_TRY_DEBOUNCE:
3297 		tcpm_set_state(port, SRC_ATTACHED, PD_T_PD_DEBOUNCE);
3298 		break;
3299 	case SNK_TRYWAIT:
3300 		tcpm_set_cc(port, TYPEC_CC_RD);
3301 		tcpm_set_state(port, SNK_TRYWAIT_VBUS, PD_T_CC_DEBOUNCE);
3302 		break;
3303 	case SNK_TRYWAIT_VBUS:
3304 		/*
3305 		 * TCPM stays in this state indefinitely until VBUS
3306 		 * is detected as long as Rp is not detected for
3307 		 * more than a time period of tPDDebounce.
3308 		 */
3309 		if (port->vbus_present && tcpm_port_is_sink(port)) {
3310 			tcpm_set_state(port, SNK_ATTACHED, 0);
3311 			break;
3312 		}
3313 		if (!tcpm_port_is_sink(port))
3314 			tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, 0);
3315 		break;
3316 	case SNK_TRYWAIT_DEBOUNCE:
3317 		tcpm_set_state(port, SNK_UNATTACHED, PD_T_PD_DEBOUNCE);
3318 		break;
3319 	case SNK_ATTACHED:
3320 		ret = tcpm_snk_attach(port);
3321 		if (ret < 0)
3322 			tcpm_set_state(port, SNK_UNATTACHED, 0);
3323 		else
3324 			tcpm_set_state(port, SNK_STARTUP, 0);
3325 		break;
3326 	case SNK_STARTUP:
3327 		opmode =  tcpm_get_pwr_opmode(port->polarity ?
3328 					      port->cc2 : port->cc1);
3329 		typec_set_pwr_opmode(port->typec_port, opmode);
3330 		port->pwr_opmode = TYPEC_PWR_MODE_USB;
3331 		port->negotiated_rev = PD_MAX_REV;
3332 		port->message_id = 0;
3333 		port->rx_msgid = -1;
3334 		port->explicit_contract = false;
3335 		tcpm_set_state(port, SNK_DISCOVERY, 0);
3336 		break;
3337 	case SNK_DISCOVERY:
3338 		if (port->vbus_present) {
3339 			tcpm_set_current_limit(port,
3340 					       tcpm_get_current_limit(port),
3341 					       5000);
3342 			tcpm_set_charge(port, true);
3343 			tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
3344 			break;
3345 		}
3346 		/*
3347 		 * For DRP, timeouts differ. Also, handling is supposed to be
3348 		 * different and much more complex (dead battery detection;
3349 		 * see USB power delivery specification, section 8.3.3.6.1.5.1).
3350 		 */
3351 		tcpm_set_state(port, hard_reset_state(port),
3352 			       port->port_type == TYPEC_PORT_DRP ?
3353 					PD_T_DB_DETECT : PD_T_NO_RESPONSE);
3354 		break;
3355 	case SNK_DISCOVERY_DEBOUNCE:
3356 		tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE_DONE,
3357 			       PD_T_CC_DEBOUNCE);
3358 		break;
3359 	case SNK_DISCOVERY_DEBOUNCE_DONE:
3360 		if (!tcpm_port_is_disconnected(port) &&
3361 		    tcpm_port_is_sink(port) &&
3362 		    ktime_after(port->delayed_runtime, ktime_get())) {
3363 			tcpm_set_state(port, SNK_DISCOVERY,
3364 				       ktime_to_ms(ktime_sub(port->delayed_runtime, ktime_get())));
3365 			break;
3366 		}
3367 		tcpm_set_state(port, unattached_state(port), 0);
3368 		break;
3369 	case SNK_WAIT_CAPABILITIES:
3370 		ret = port->tcpc->set_pd_rx(port->tcpc, true);
3371 		if (ret < 0) {
3372 			tcpm_set_state(port, SNK_READY, 0);
3373 			break;
3374 		}
3375 		/*
3376 		 * If VBUS has never been low, and we time out waiting
3377 		 * for source cap, try a soft reset first, in case we
3378 		 * were already in a stable contract before this boot.
3379 		 * Do this only once.
3380 		 */
3381 		if (port->vbus_never_low) {
3382 			port->vbus_never_low = false;
3383 			tcpm_set_state(port, SOFT_RESET_SEND,
3384 				       PD_T_SINK_WAIT_CAP);
3385 		} else {
3386 			tcpm_set_state(port, hard_reset_state(port),
3387 				       PD_T_SINK_WAIT_CAP);
3388 		}
3389 		break;
3390 	case SNK_NEGOTIATE_CAPABILITIES:
3391 		port->pd_capable = true;
3392 		port->hard_reset_count = 0;
3393 		ret = tcpm_pd_send_request(port);
3394 		if (ret < 0) {
3395 			/* Let the Source send capabilities again. */
3396 			tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
3397 		} else {
3398 			tcpm_set_state_cond(port, hard_reset_state(port),
3399 					    PD_T_SENDER_RESPONSE);
3400 		}
3401 		break;
3402 	case SNK_NEGOTIATE_PPS_CAPABILITIES:
3403 		ret = tcpm_pd_send_pps_request(port);
3404 		if (ret < 0) {
3405 			port->pps_status = ret;
3406 			/*
3407 			 * If this was called due to updates to sink
3408 			 * capabilities, and pps is no longer valid, we should
3409 			 * safely fall back to a standard PDO.
3410 			 */
3411 			if (port->update_sink_caps)
3412 				tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
3413 			else
3414 				tcpm_set_state(port, SNK_READY, 0);
3415 		} else {
3416 			tcpm_set_state_cond(port, hard_reset_state(port),
3417 					    PD_T_SENDER_RESPONSE);
3418 		}
3419 		break;
3420 	case SNK_TRANSITION_SINK:
3421 	case SNK_TRANSITION_SINK_VBUS:
3422 		tcpm_set_state(port, hard_reset_state(port),
3423 			       PD_T_PS_TRANSITION);
3424 		break;
3425 	case SNK_READY:
3426 		port->try_snk_count = 0;
3427 		port->update_sink_caps = false;
3428 		if (port->explicit_contract) {
3429 			typec_set_pwr_opmode(port->typec_port,
3430 					     TYPEC_PWR_MODE_PD);
3431 			port->pwr_opmode = TYPEC_PWR_MODE_PD;
3432 		}
3433 
3434 		tcpm_swap_complete(port, 0);
3435 		tcpm_typec_connect(port);
3436 		tcpm_check_send_discover(port);
3437 		mod_enable_frs_delayed_work(port, 0);
3438 		tcpm_pps_complete(port, port->pps_status);
3439 		power_supply_changed(port->psy);
3440 		break;
3441 
3442 	/* Accessory states */
3443 	case ACC_UNATTACHED:
3444 		tcpm_acc_detach(port);
3445 		tcpm_set_state(port, SRC_UNATTACHED, 0);
3446 		break;
3447 	case DEBUG_ACC_ATTACHED:
3448 	case AUDIO_ACC_ATTACHED:
3449 		ret = tcpm_acc_attach(port);
3450 		if (ret < 0)
3451 			tcpm_set_state(port, ACC_UNATTACHED, 0);
3452 		break;
3453 	case AUDIO_ACC_DEBOUNCE:
3454 		tcpm_set_state(port, ACC_UNATTACHED, PD_T_CC_DEBOUNCE);
3455 		break;
3456 
3457 	/* Hard_Reset states */
3458 	case HARD_RESET_SEND:
3459 		tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL);
3460 		tcpm_set_state(port, HARD_RESET_START, 0);
3461 		break;
3462 	case HARD_RESET_START:
3463 		port->sink_cap_done = false;
3464 		if (port->tcpc->enable_frs)
3465 			port->tcpc->enable_frs(port->tcpc, false);
3466 		port->hard_reset_count++;
3467 		port->tcpc->set_pd_rx(port->tcpc, false);
3468 		tcpm_unregister_altmodes(port);
3469 		port->nr_sink_caps = 0;
3470 		port->send_discover = true;
3471 		if (port->pwr_role == TYPEC_SOURCE)
3472 			tcpm_set_state(port, SRC_HARD_RESET_VBUS_OFF,
3473 				       PD_T_PS_HARD_RESET);
3474 		else
3475 			tcpm_set_state(port, SNK_HARD_RESET_SINK_OFF, 0);
3476 		break;
3477 	case SRC_HARD_RESET_VBUS_OFF:
3478 		/*
3479 		 * 7.1.5 Response to Hard Resets
3480 		 * Hard Reset Signaling indicates a communication failure has occurred and the
3481 		 * Source Shall stop driving VCONN, Shall remove Rp from the VCONN pin and Shall
3482 		 * drive VBUS to vSafe0V as shown in Figure 7-9.
3483 		 */
3484 		tcpm_set_vconn(port, false);
3485 		tcpm_set_vbus(port, false);
3486 		tcpm_set_roles(port, port->self_powered, TYPEC_SOURCE,
3487 			       tcpm_data_role_for_source(port));
3488 		/*
3489 		 * If tcpc fails to notify vbus off, TCPM will wait for PD_T_SAFE_0V +
3490 		 * PD_T_SRC_RECOVER before turning vbus back on.
3491 		 * From Table 7-12 Sequence Description for a Source Initiated Hard Reset:
3492 		 * 4. Policy Engine waits tPSHardReset after sending Hard Reset Signaling and then
3493 		 * tells the Device Policy Manager to instruct the power supply to perform a
3494 		 * Hard Reset. The transition to vSafe0V Shall occur within tSafe0V (t2).
3495 		 * 5. After tSrcRecover the Source applies power to VBUS in an attempt to
3496 		 * re-establish communication with the Sink and resume USB Default Operation.
3497 		 * The transition to vSafe5V Shall occur within tSrcTurnOn(t4).
3498 		 */
3499 		tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SAFE_0V + PD_T_SRC_RECOVER);
3500 		break;
3501 	case SRC_HARD_RESET_VBUS_ON:
3502 		tcpm_set_vconn(port, true);
3503 		tcpm_set_vbus(port, true);
3504 		port->tcpc->set_pd_rx(port->tcpc, true);
3505 		tcpm_set_attached_state(port, true);
3506 		tcpm_set_state(port, SRC_UNATTACHED, PD_T_PS_SOURCE_ON);
3507 		break;
3508 	case SNK_HARD_RESET_SINK_OFF:
3509 		memset(&port->pps_data, 0, sizeof(port->pps_data));
3510 		tcpm_set_vconn(port, false);
3511 		if (port->pd_capable)
3512 			tcpm_set_charge(port, false);
3513 		tcpm_set_roles(port, port->self_powered, TYPEC_SINK,
3514 			       tcpm_data_role_for_sink(port));
3515 		/*
3516 		 * VBUS may or may not toggle, depending on the adapter.
3517 		 * If it doesn't toggle, transition to SNK_HARD_RESET_SINK_ON
3518 		 * directly after timeout.
3519 		 */
3520 		tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, PD_T_SAFE_0V);
3521 		break;
3522 	case SNK_HARD_RESET_WAIT_VBUS:
3523 		/* Assume we're disconnected if VBUS doesn't come back. */
3524 		tcpm_set_state(port, SNK_UNATTACHED,
3525 			       PD_T_SRC_RECOVER_MAX + PD_T_SRC_TURN_ON);
3526 		break;
3527 	case SNK_HARD_RESET_SINK_ON:
3528 		/* Note: There is no guarantee that VBUS is on in this state */
3529 		/*
3530 		 * XXX:
3531 		 * The specification suggests that dual mode ports in sink
3532 		 * mode should transition to state PE_SRC_Transition_to_default.
3533 		 * See USB power delivery specification chapter 8.3.3.6.1.3.
3534 		 * This would mean to to
3535 		 * - turn off VCONN, reset power supply
3536 		 * - request hardware reset
3537 		 * - turn on VCONN
3538 		 * - Transition to state PE_Src_Startup
3539 		 * SNK only ports shall transition to state Snk_Startup
3540 		 * (see chapter 8.3.3.3.8).
3541 		 * Similar, dual-mode ports in source mode should transition
3542 		 * to PE_SNK_Transition_to_default.
3543 		 */
3544 		if (port->pd_capable) {
3545 			tcpm_set_current_limit(port,
3546 					       tcpm_get_current_limit(port),
3547 					       5000);
3548 			tcpm_set_charge(port, true);
3549 		}
3550 		tcpm_set_attached_state(port, true);
3551 		tcpm_set_state(port, SNK_STARTUP, 0);
3552 		break;
3553 
3554 	/* Soft_Reset states */
3555 	case SOFT_RESET:
3556 		port->message_id = 0;
3557 		port->rx_msgid = -1;
3558 		tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
3559 		if (port->pwr_role == TYPEC_SOURCE)
3560 			tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
3561 		else
3562 			tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
3563 		break;
3564 	case SOFT_RESET_SEND:
3565 		port->message_id = 0;
3566 		port->rx_msgid = -1;
3567 		if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET))
3568 			tcpm_set_state_cond(port, hard_reset_state(port), 0);
3569 		else
3570 			tcpm_set_state_cond(port, hard_reset_state(port),
3571 					    PD_T_SENDER_RESPONSE);
3572 		break;
3573 
3574 	/* DR_Swap states */
3575 	case DR_SWAP_SEND:
3576 		tcpm_pd_send_control(port, PD_CTRL_DR_SWAP);
3577 		tcpm_set_state_cond(port, DR_SWAP_SEND_TIMEOUT,
3578 				    PD_T_SENDER_RESPONSE);
3579 		break;
3580 	case DR_SWAP_ACCEPT:
3581 		tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
3582 		tcpm_set_state_cond(port, DR_SWAP_CHANGE_DR, 0);
3583 		break;
3584 	case DR_SWAP_SEND_TIMEOUT:
3585 		tcpm_swap_complete(port, -ETIMEDOUT);
3586 		tcpm_set_state(port, ready_state(port), 0);
3587 		break;
3588 	case DR_SWAP_CHANGE_DR:
3589 		if (port->data_role == TYPEC_HOST) {
3590 			tcpm_unregister_altmodes(port);
3591 			tcpm_set_roles(port, true, port->pwr_role,
3592 				       TYPEC_DEVICE);
3593 		} else {
3594 			tcpm_set_roles(port, true, port->pwr_role,
3595 				       TYPEC_HOST);
3596 			port->send_discover = true;
3597 		}
3598 		tcpm_set_state(port, ready_state(port), 0);
3599 		break;
3600 
3601 	case FR_SWAP_SEND:
3602 		if (tcpm_pd_send_control(port, PD_CTRL_FR_SWAP)) {
3603 			tcpm_set_state(port, ERROR_RECOVERY, 0);
3604 			break;
3605 		}
3606 		tcpm_set_state_cond(port, FR_SWAP_SEND_TIMEOUT, PD_T_SENDER_RESPONSE);
3607 		break;
3608 	case FR_SWAP_SEND_TIMEOUT:
3609 		tcpm_set_state(port, ERROR_RECOVERY, 0);
3610 		break;
3611 	case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
3612 		tcpm_set_state(port, ERROR_RECOVERY, PD_T_PS_SOURCE_OFF);
3613 		break;
3614 	case FR_SWAP_SNK_SRC_NEW_SINK_READY:
3615 		if (port->vbus_source)
3616 			tcpm_set_state(port, FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED, 0);
3617 		else
3618 			tcpm_set_state(port, ERROR_RECOVERY, PD_T_RECEIVER_RESPONSE);
3619 		break;
3620 	case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
3621 		tcpm_set_pwr_role(port, TYPEC_SOURCE);
3622 		if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY)) {
3623 			tcpm_set_state(port, ERROR_RECOVERY, 0);
3624 			break;
3625 		}
3626 		tcpm_set_cc(port, tcpm_rp_cc(port));
3627 		tcpm_set_state(port, SRC_STARTUP, PD_T_SWAP_SRC_START);
3628 		break;
3629 
3630 	/* PR_Swap states */
3631 	case PR_SWAP_ACCEPT:
3632 		tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
3633 		tcpm_set_state(port, PR_SWAP_START, 0);
3634 		break;
3635 	case PR_SWAP_SEND:
3636 		tcpm_pd_send_control(port, PD_CTRL_PR_SWAP);
3637 		tcpm_set_state_cond(port, PR_SWAP_SEND_TIMEOUT,
3638 				    PD_T_SENDER_RESPONSE);
3639 		break;
3640 	case PR_SWAP_SEND_TIMEOUT:
3641 		tcpm_swap_complete(port, -ETIMEDOUT);
3642 		tcpm_set_state(port, ready_state(port), 0);
3643 		break;
3644 	case PR_SWAP_START:
3645 		if (port->pwr_role == TYPEC_SOURCE)
3646 			tcpm_set_state(port, PR_SWAP_SRC_SNK_TRANSITION_OFF,
3647 				       PD_T_SRC_TRANSITION);
3648 		else
3649 			tcpm_set_state(port, PR_SWAP_SNK_SRC_SINK_OFF, 0);
3650 		break;
3651 	case PR_SWAP_SRC_SNK_TRANSITION_OFF:
3652 		tcpm_set_vbus(port, false);
3653 		port->explicit_contract = false;
3654 		/* allow time for Vbus discharge, must be < tSrcSwapStdby */
3655 		tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF,
3656 			       PD_T_SRCSWAPSTDBY);
3657 		break;
3658 	case PR_SWAP_SRC_SNK_SOURCE_OFF:
3659 		tcpm_set_cc(port, TYPEC_CC_RD);
3660 		/* allow CC debounce */
3661 		tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED,
3662 			       PD_T_CC_DEBOUNCE);
3663 		break;
3664 	case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
3665 		/*
3666 		 * USB-PD standard, 6.2.1.4, Port Power Role:
3667 		 * "During the Power Role Swap Sequence, for the initial Source
3668 		 * Port, the Port Power Role field shall be set to Sink in the
3669 		 * PS_RDY Message indicating that the initial Source’s power
3670 		 * supply is turned off"
3671 		 */
3672 		tcpm_set_pwr_role(port, TYPEC_SINK);
3673 		if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY)) {
3674 			tcpm_set_state(port, ERROR_RECOVERY, 0);
3675 			break;
3676 		}
3677 		tcpm_set_state_cond(port, SNK_UNATTACHED, PD_T_PS_SOURCE_ON);
3678 		break;
3679 	case PR_SWAP_SRC_SNK_SINK_ON:
3680 		tcpm_set_state(port, SNK_STARTUP, 0);
3681 		break;
3682 	case PR_SWAP_SNK_SRC_SINK_OFF:
3683 		tcpm_set_charge(port, false);
3684 		tcpm_set_state(port, hard_reset_state(port),
3685 			       PD_T_PS_SOURCE_OFF);
3686 		break;
3687 	case PR_SWAP_SNK_SRC_SOURCE_ON:
3688 		tcpm_set_cc(port, tcpm_rp_cc(port));
3689 		tcpm_set_vbus(port, true);
3690 		/*
3691 		 * allow time VBUS ramp-up, must be < tNewSrc
3692 		 * Also, this window overlaps with CC debounce as well.
3693 		 * So, Wait for the max of two which is PD_T_NEWSRC
3694 		 */
3695 		tcpm_set_state(port, PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP,
3696 			       PD_T_NEWSRC);
3697 		break;
3698 	case PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP:
3699 		/*
3700 		 * USB PD standard, 6.2.1.4:
3701 		 * "Subsequent Messages initiated by the Policy Engine,
3702 		 * such as the PS_RDY Message sent to indicate that Vbus
3703 		 * is ready, will have the Port Power Role field set to
3704 		 * Source."
3705 		 */
3706 		tcpm_set_pwr_role(port, TYPEC_SOURCE);
3707 		tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
3708 		tcpm_set_state(port, SRC_STARTUP, PD_T_SWAP_SRC_START);
3709 		break;
3710 
3711 	case VCONN_SWAP_ACCEPT:
3712 		tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
3713 		tcpm_set_state(port, VCONN_SWAP_START, 0);
3714 		break;
3715 	case VCONN_SWAP_SEND:
3716 		tcpm_pd_send_control(port, PD_CTRL_VCONN_SWAP);
3717 		tcpm_set_state(port, VCONN_SWAP_SEND_TIMEOUT,
3718 			       PD_T_SENDER_RESPONSE);
3719 		break;
3720 	case VCONN_SWAP_SEND_TIMEOUT:
3721 		tcpm_swap_complete(port, -ETIMEDOUT);
3722 		tcpm_set_state(port, ready_state(port), 0);
3723 		break;
3724 	case VCONN_SWAP_START:
3725 		if (port->vconn_role == TYPEC_SOURCE)
3726 			tcpm_set_state(port, VCONN_SWAP_WAIT_FOR_VCONN, 0);
3727 		else
3728 			tcpm_set_state(port, VCONN_SWAP_TURN_ON_VCONN, 0);
3729 		break;
3730 	case VCONN_SWAP_WAIT_FOR_VCONN:
3731 		tcpm_set_state(port, hard_reset_state(port),
3732 			       PD_T_VCONN_SOURCE_ON);
3733 		break;
3734 	case VCONN_SWAP_TURN_ON_VCONN:
3735 		tcpm_set_vconn(port, true);
3736 		tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
3737 		tcpm_set_state(port, ready_state(port), 0);
3738 		break;
3739 	case VCONN_SWAP_TURN_OFF_VCONN:
3740 		tcpm_set_vconn(port, false);
3741 		tcpm_set_state(port, ready_state(port), 0);
3742 		break;
3743 
3744 	case DR_SWAP_CANCEL:
3745 	case PR_SWAP_CANCEL:
3746 	case VCONN_SWAP_CANCEL:
3747 		tcpm_swap_complete(port, port->swap_status);
3748 		if (port->pwr_role == TYPEC_SOURCE)
3749 			tcpm_set_state(port, SRC_READY, 0);
3750 		else
3751 			tcpm_set_state(port, SNK_READY, 0);
3752 		break;
3753 	case FR_SWAP_CANCEL:
3754 		if (port->pwr_role == TYPEC_SOURCE)
3755 			tcpm_set_state(port, SRC_READY, 0);
3756 		else
3757 			tcpm_set_state(port, SNK_READY, 0);
3758 		break;
3759 
3760 	case BIST_RX:
3761 		switch (BDO_MODE_MASK(port->bist_request)) {
3762 		case BDO_MODE_CARRIER2:
3763 			tcpm_pd_transmit(port, TCPC_TX_BIST_MODE_2, NULL);
3764 			tcpm_set_state(port, unattached_state(port),
3765 				       PD_T_BIST_CONT_MODE);
3766 			break;
3767 		case BDO_MODE_TESTDATA:
3768 			if (port->tcpc->set_bist_data) {
3769 				tcpm_log(port, "Enable BIST MODE TESTDATA");
3770 				port->tcpc->set_bist_data(port->tcpc, true);
3771 			}
3772 			break;
3773 		default:
3774 			break;
3775 		}
3776 		break;
3777 	case GET_STATUS_SEND:
3778 		tcpm_pd_send_control(port, PD_CTRL_GET_STATUS);
3779 		tcpm_set_state(port, GET_STATUS_SEND_TIMEOUT,
3780 			       PD_T_SENDER_RESPONSE);
3781 		break;
3782 	case GET_STATUS_SEND_TIMEOUT:
3783 		tcpm_set_state(port, ready_state(port), 0);
3784 		break;
3785 	case GET_PPS_STATUS_SEND:
3786 		tcpm_pd_send_control(port, PD_CTRL_GET_PPS_STATUS);
3787 		tcpm_set_state(port, GET_PPS_STATUS_SEND_TIMEOUT,
3788 			       PD_T_SENDER_RESPONSE);
3789 		break;
3790 	case GET_PPS_STATUS_SEND_TIMEOUT:
3791 		tcpm_set_state(port, ready_state(port), 0);
3792 		break;
3793 	case GET_SINK_CAP:
3794 		tcpm_pd_send_control(port, PD_CTRL_GET_SINK_CAP);
3795 		tcpm_set_state(port, GET_SINK_CAP_TIMEOUT, PD_T_SENDER_RESPONSE);
3796 		break;
3797 	case GET_SINK_CAP_TIMEOUT:
3798 		port->sink_cap_done = true;
3799 		tcpm_set_state(port, ready_state(port), 0);
3800 		break;
3801 	case ERROR_RECOVERY:
3802 		tcpm_swap_complete(port, -EPROTO);
3803 		tcpm_pps_complete(port, -EPROTO);
3804 		tcpm_set_state(port, PORT_RESET, 0);
3805 		break;
3806 	case PORT_RESET:
3807 		tcpm_reset_port(port);
3808 		tcpm_set_cc(port, TYPEC_CC_OPEN);
3809 		tcpm_set_state(port, PORT_RESET_WAIT_OFF,
3810 			       PD_T_ERROR_RECOVERY);
3811 		break;
3812 	case PORT_RESET_WAIT_OFF:
3813 		tcpm_set_state(port,
3814 			       tcpm_default_state(port),
3815 			       port->vbus_present ? PD_T_PS_SOURCE_OFF : 0);
3816 		break;
3817 	default:
3818 		WARN(1, "Unexpected port state %d\n", port->state);
3819 		break;
3820 	}
3821 }
3822 
tcpm_state_machine_work(struct kthread_work * work)3823 static void tcpm_state_machine_work(struct kthread_work *work)
3824 {
3825 	struct tcpm_port *port = container_of(work, struct tcpm_port, state_machine);
3826 	enum tcpm_state prev_state;
3827 
3828 	mutex_lock(&port->lock);
3829 	port->state_machine_running = true;
3830 
3831 	if (port->queued_message && tcpm_send_queued_message(port))
3832 		goto done;
3833 
3834 	/* If we were queued due to a delayed state change, update it now */
3835 	if (port->delayed_state) {
3836 		tcpm_log(port, "state change %s -> %s [delayed %ld ms]",
3837 			 tcpm_states[port->state],
3838 			 tcpm_states[port->delayed_state], port->delay_ms);
3839 		port->prev_state = port->state;
3840 		port->state = port->delayed_state;
3841 		port->delayed_state = INVALID_STATE;
3842 	}
3843 
3844 	/*
3845 	 * Continue running as long as we have (non-delayed) state changes
3846 	 * to make.
3847 	 */
3848 	do {
3849 		prev_state = port->state;
3850 		run_state_machine(port);
3851 		if (port->queued_message)
3852 			tcpm_send_queued_message(port);
3853 	} while (port->state != prev_state && !port->delayed_state);
3854 
3855 done:
3856 	port->state_machine_running = false;
3857 	mutex_unlock(&port->lock);
3858 }
3859 
_tcpm_cc_change(struct tcpm_port * port,enum typec_cc_status cc1,enum typec_cc_status cc2)3860 static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
3861 			    enum typec_cc_status cc2)
3862 {
3863 	enum typec_cc_status old_cc1, old_cc2;
3864 	enum tcpm_state new_state;
3865 
3866 	old_cc1 = port->cc1;
3867 	old_cc2 = port->cc2;
3868 	port->cc1 = cc1;
3869 	port->cc2 = cc2;
3870 
3871 	tcpm_log_force(port,
3872 		       "CC1: %u -> %u, CC2: %u -> %u [state %s, polarity %d, %s]",
3873 		       old_cc1, cc1, old_cc2, cc2, tcpm_states[port->state],
3874 		       port->polarity,
3875 		       tcpm_port_is_disconnected(port) ? "disconnected"
3876 						       : "connected");
3877 
3878 	switch (port->state) {
3879 	case TOGGLING:
3880 		if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
3881 		    tcpm_port_is_source(port))
3882 			tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
3883 		else if (tcpm_port_is_sink(port))
3884 			tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
3885 		break;
3886 	case SRC_UNATTACHED:
3887 	case ACC_UNATTACHED:
3888 		if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
3889 		    tcpm_port_is_source(port))
3890 			tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
3891 		break;
3892 	case SRC_ATTACH_WAIT:
3893 		if (tcpm_port_is_disconnected(port) ||
3894 		    tcpm_port_is_audio_detached(port))
3895 			tcpm_set_state(port, SRC_UNATTACHED, 0);
3896 		else if (cc1 != old_cc1 || cc2 != old_cc2)
3897 			tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
3898 		break;
3899 	case SRC_ATTACHED:
3900 	case SRC_SEND_CAPABILITIES:
3901 	case SRC_READY:
3902 		if (tcpm_port_is_disconnected(port) ||
3903 		    !tcpm_port_is_source(port)) {
3904 			if (port->port_type == TYPEC_PORT_SRC)
3905 				tcpm_set_state(port, SRC_UNATTACHED, 0);
3906 			else
3907 				tcpm_set_state(port, SNK_UNATTACHED, 0);
3908 		}
3909 		break;
3910 	case SNK_UNATTACHED:
3911 		if (tcpm_port_is_sink(port))
3912 			tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
3913 		break;
3914 	case SNK_ATTACH_WAIT:
3915 		if ((port->cc1 == TYPEC_CC_OPEN &&
3916 		     port->cc2 != TYPEC_CC_OPEN) ||
3917 		    (port->cc1 != TYPEC_CC_OPEN &&
3918 		     port->cc2 == TYPEC_CC_OPEN))
3919 			new_state = SNK_DEBOUNCED;
3920 		else if (tcpm_port_is_disconnected(port))
3921 			new_state = SNK_UNATTACHED;
3922 		else
3923 			break;
3924 		if (new_state != port->delayed_state)
3925 			tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
3926 		break;
3927 	case SNK_DEBOUNCED:
3928 		if (tcpm_port_is_disconnected(port))
3929 			new_state = SNK_UNATTACHED;
3930 		else if (port->vbus_present)
3931 			new_state = tcpm_try_src(port) ? SRC_TRY : SNK_ATTACHED;
3932 		else
3933 			new_state = SNK_UNATTACHED;
3934 		if (new_state != port->delayed_state)
3935 			tcpm_set_state(port, SNK_DEBOUNCED, 0);
3936 		break;
3937 	case SNK_READY:
3938 		if (tcpm_port_is_disconnected(port))
3939 			tcpm_set_state(port, unattached_state(port), 0);
3940 		else if (!port->pd_capable &&
3941 			 (cc1 != old_cc1 || cc2 != old_cc2))
3942 			tcpm_set_current_limit(port,
3943 					       tcpm_get_current_limit(port),
3944 					       5000);
3945 		break;
3946 
3947 	case AUDIO_ACC_ATTACHED:
3948 		if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN)
3949 			tcpm_set_state(port, AUDIO_ACC_DEBOUNCE, 0);
3950 		break;
3951 	case AUDIO_ACC_DEBOUNCE:
3952 		if (tcpm_port_is_audio(port))
3953 			tcpm_set_state(port, AUDIO_ACC_ATTACHED, 0);
3954 		break;
3955 
3956 	case DEBUG_ACC_ATTACHED:
3957 		if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN)
3958 			tcpm_set_state(port, ACC_UNATTACHED, 0);
3959 		break;
3960 
3961 	case SNK_TRY:
3962 		/* Do nothing, waiting for timeout */
3963 		break;
3964 
3965 	case SNK_DISCOVERY:
3966 		/* CC line is unstable, wait for debounce */
3967 		if (tcpm_port_is_disconnected(port))
3968 			tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE, 0);
3969 		break;
3970 	case SNK_DISCOVERY_DEBOUNCE:
3971 		break;
3972 
3973 	case SRC_TRYWAIT:
3974 		/* Hand over to state machine if needed */
3975 		if (!port->vbus_present && tcpm_port_is_source(port))
3976 			tcpm_set_state(port, SRC_TRYWAIT_DEBOUNCE, 0);
3977 		break;
3978 	case SRC_TRYWAIT_DEBOUNCE:
3979 		if (port->vbus_present || !tcpm_port_is_source(port))
3980 			tcpm_set_state(port, SRC_TRYWAIT, 0);
3981 		break;
3982 	case SNK_TRY_WAIT_DEBOUNCE:
3983 		if (!tcpm_port_is_sink(port)) {
3984 			port->max_wait = 0;
3985 			tcpm_set_state(port, SRC_TRYWAIT, 0);
3986 		}
3987 		break;
3988 	case SRC_TRY_WAIT:
3989 		if (tcpm_port_is_source(port))
3990 			tcpm_set_state(port, SRC_TRY_DEBOUNCE, 0);
3991 		break;
3992 	case SRC_TRY_DEBOUNCE:
3993 		tcpm_set_state(port, SRC_TRY_WAIT, 0);
3994 		break;
3995 	case SNK_TRYWAIT_DEBOUNCE:
3996 		if (tcpm_port_is_sink(port))
3997 			tcpm_set_state(port, SNK_TRYWAIT_VBUS, 0);
3998 		break;
3999 	case SNK_TRYWAIT_VBUS:
4000 		if (!tcpm_port_is_sink(port))
4001 			tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, 0);
4002 		break;
4003 	case SNK_TRYWAIT:
4004 		/* Do nothing, waiting for tCCDebounce */
4005 		break;
4006 	case PR_SWAP_SNK_SRC_SINK_OFF:
4007 	case PR_SWAP_SRC_SNK_TRANSITION_OFF:
4008 	case PR_SWAP_SRC_SNK_SOURCE_OFF:
4009 	case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
4010 	case PR_SWAP_SNK_SRC_SOURCE_ON:
4011 		/*
4012 		 * CC state change is expected in PR_SWAP
4013 		 * Ignore it.
4014 		 */
4015 		break;
4016 	case FR_SWAP_SEND:
4017 	case FR_SWAP_SEND_TIMEOUT:
4018 	case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
4019 	case FR_SWAP_SNK_SRC_NEW_SINK_READY:
4020 	case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
4021 		/* Do nothing, CC change expected */
4022 		break;
4023 
4024 	case PORT_RESET:
4025 	case PORT_RESET_WAIT_OFF:
4026 		/*
4027 		 * State set back to default mode once the timer completes.
4028 		 * Ignore CC changes here.
4029 		 */
4030 		break;
4031 
4032 	default:
4033 		if (tcpm_port_is_disconnected(port))
4034 			tcpm_set_state(port, unattached_state(port), 0);
4035 		break;
4036 	}
4037 }
4038 
_tcpm_pd_vbus_on(struct tcpm_port * port)4039 static void _tcpm_pd_vbus_on(struct tcpm_port *port)
4040 {
4041 	tcpm_log_force(port, "VBUS on");
4042 	port->vbus_present = true;
4043 	switch (port->state) {
4044 	case SNK_TRANSITION_SINK_VBUS:
4045 		port->explicit_contract = true;
4046 		tcpm_set_state(port, SNK_READY, 0);
4047 		break;
4048 	case SNK_DISCOVERY:
4049 		tcpm_set_state(port, SNK_DISCOVERY, 0);
4050 		break;
4051 
4052 	case SNK_DEBOUNCED:
4053 		tcpm_set_state(port, tcpm_try_src(port) ? SRC_TRY
4054 							: SNK_ATTACHED,
4055 				       0);
4056 		break;
4057 	case SNK_HARD_RESET_WAIT_VBUS:
4058 		tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, 0);
4059 		break;
4060 	case SRC_ATTACHED:
4061 		tcpm_set_state(port, SRC_STARTUP, 0);
4062 		break;
4063 	case SRC_HARD_RESET_VBUS_ON:
4064 		tcpm_set_state(port, SRC_STARTUP, 0);
4065 		break;
4066 
4067 	case SNK_TRY:
4068 		/* Do nothing, waiting for timeout */
4069 		break;
4070 	case SRC_TRYWAIT:
4071 		/* Do nothing, Waiting for Rd to be detected */
4072 		break;
4073 	case SRC_TRYWAIT_DEBOUNCE:
4074 		tcpm_set_state(port, SRC_TRYWAIT, 0);
4075 		break;
4076 	case SNK_TRY_WAIT_DEBOUNCE:
4077 		/* Do nothing, waiting for PD_DEBOUNCE to do be done */
4078 		break;
4079 	case SNK_TRYWAIT:
4080 		/* Do nothing, waiting for tCCDebounce */
4081 		break;
4082 	case SNK_TRYWAIT_VBUS:
4083 		if (tcpm_port_is_sink(port))
4084 			tcpm_set_state(port, SNK_ATTACHED, 0);
4085 		break;
4086 	case SNK_TRYWAIT_DEBOUNCE:
4087 		/* Do nothing, waiting for Rp */
4088 		break;
4089 	case SRC_TRY_WAIT:
4090 	case SRC_TRY_DEBOUNCE:
4091 		/* Do nothing, waiting for sink detection */
4092 		break;
4093 	case FR_SWAP_SNK_SRC_NEW_SINK_READY:
4094 		tcpm_set_state(port, FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED, 0);
4095 		break;
4096 
4097 	case PORT_RESET:
4098 	case PORT_RESET_WAIT_OFF:
4099 		/*
4100 		 * State set back to default mode once the timer completes.
4101 		 * Ignore vbus changes here.
4102 		 */
4103 		break;
4104 
4105 	default:
4106 		break;
4107 	}
4108 }
4109 
_tcpm_pd_vbus_off(struct tcpm_port * port)4110 static void _tcpm_pd_vbus_off(struct tcpm_port *port)
4111 {
4112 	tcpm_log_force(port, "VBUS off");
4113 	port->vbus_present = false;
4114 	port->vbus_never_low = false;
4115 	switch (port->state) {
4116 	case SNK_HARD_RESET_SINK_OFF:
4117 		tcpm_set_state(port, SNK_HARD_RESET_WAIT_VBUS, 0);
4118 		break;
4119 	case SRC_HARD_RESET_VBUS_OFF:
4120 		/*
4121 		 * After establishing the vSafe0V voltage condition on VBUS, the Source Shall wait
4122 		 * tSrcRecover before re-applying VCONN and restoring VBUS to vSafe5V.
4123 		 */
4124 		tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SRC_RECOVER);
4125 		break;
4126 	case HARD_RESET_SEND:
4127 		break;
4128 
4129 	case SNK_TRY:
4130 		/* Do nothing, waiting for timeout */
4131 		break;
4132 	case SRC_TRYWAIT:
4133 		/* Hand over to state machine if needed */
4134 		if (tcpm_port_is_source(port))
4135 			tcpm_set_state(port, SRC_TRYWAIT_DEBOUNCE, 0);
4136 		break;
4137 	case SNK_TRY_WAIT_DEBOUNCE:
4138 		/* Do nothing, waiting for PD_DEBOUNCE to do be done */
4139 		break;
4140 	case SNK_TRYWAIT:
4141 	case SNK_TRYWAIT_VBUS:
4142 	case SNK_TRYWAIT_DEBOUNCE:
4143 		break;
4144 	case SNK_ATTACH_WAIT:
4145 		tcpm_set_state(port, SNK_UNATTACHED, 0);
4146 		break;
4147 
4148 	case SNK_NEGOTIATE_CAPABILITIES:
4149 		break;
4150 
4151 	case PR_SWAP_SRC_SNK_TRANSITION_OFF:
4152 		tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF, 0);
4153 		break;
4154 
4155 	case PR_SWAP_SNK_SRC_SINK_OFF:
4156 		/* Do nothing, expected */
4157 		break;
4158 
4159 	case PORT_RESET_WAIT_OFF:
4160 		tcpm_set_state(port, tcpm_default_state(port), 0);
4161 		break;
4162 
4163 	case SRC_TRY_WAIT:
4164 	case SRC_TRY_DEBOUNCE:
4165 		/* Do nothing, waiting for sink detection */
4166 		break;
4167 
4168 	case PORT_RESET:
4169 		/*
4170 		 * State set back to default mode once the timer completes.
4171 		 * Ignore vbus changes here.
4172 		 */
4173 		break;
4174 
4175 	case FR_SWAP_SEND:
4176 	case FR_SWAP_SEND_TIMEOUT:
4177 	case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
4178 	case FR_SWAP_SNK_SRC_NEW_SINK_READY:
4179 	case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
4180 		/* Do nothing, vbus drop expected */
4181 		break;
4182 
4183 	default:
4184 		if (port->pwr_role == TYPEC_SINK &&
4185 		    port->attached)
4186 			tcpm_set_state(port, SNK_UNATTACHED, 0);
4187 		break;
4188 	}
4189 }
4190 
_tcpm_pd_hard_reset(struct tcpm_port * port)4191 static void _tcpm_pd_hard_reset(struct tcpm_port *port)
4192 {
4193 	tcpm_log_force(port, "Received hard reset");
4194 	if (port->bist_request == BDO_MODE_TESTDATA && port->tcpc->set_bist_data)
4195 		port->tcpc->set_bist_data(port->tcpc, false);
4196 
4197 	/*
4198 	 * If we keep receiving hard reset requests, executing the hard reset
4199 	 * must have failed. Revert to error recovery if that happens.
4200 	 */
4201 	tcpm_set_state(port,
4202 		       port->hard_reset_count < PD_N_HARD_RESET_COUNT ?
4203 				HARD_RESET_START : ERROR_RECOVERY,
4204 		       0);
4205 }
4206 
tcpm_pd_event_handler(struct kthread_work * work)4207 static void tcpm_pd_event_handler(struct kthread_work *work)
4208 {
4209 	struct tcpm_port *port = container_of(work, struct tcpm_port,
4210 					      event_work);
4211 	u32 events;
4212 
4213 	mutex_lock(&port->lock);
4214 
4215 	spin_lock(&port->pd_event_lock);
4216 	while (port->pd_events) {
4217 		events = port->pd_events;
4218 		port->pd_events = 0;
4219 		spin_unlock(&port->pd_event_lock);
4220 		if (events & TCPM_RESET_EVENT)
4221 			_tcpm_pd_hard_reset(port);
4222 		if (events & TCPM_VBUS_EVENT) {
4223 			bool vbus;
4224 
4225 			vbus = port->tcpc->get_vbus(port->tcpc);
4226 			if (vbus)
4227 				_tcpm_pd_vbus_on(port);
4228 			else
4229 				_tcpm_pd_vbus_off(port);
4230 		}
4231 		if (events & TCPM_CC_EVENT) {
4232 			enum typec_cc_status cc1, cc2;
4233 
4234 			if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0)
4235 				_tcpm_cc_change(port, cc1, cc2);
4236 		}
4237 		if (events & TCPM_FRS_EVENT) {
4238 			if (port->state == SNK_READY)
4239 				tcpm_set_state(port, FR_SWAP_SEND, 0);
4240 			else
4241 				tcpm_log(port, "Discarding FRS_SIGNAL! Not in sink ready");
4242 		}
4243 		if (events & TCPM_SOURCING_VBUS) {
4244 			tcpm_log(port, "sourcing vbus");
4245 			/*
4246 			 * In fast role swap case TCPC autonomously sources vbus. Set vbus_source
4247 			 * true as TCPM wouldn't have called tcpm_set_vbus.
4248 			 *
4249 			 * When vbus is sourced on the command on TCPM i.e. TCPM called
4250 			 * tcpm_set_vbus to source vbus, vbus_source would already be true.
4251 			 */
4252 			port->vbus_source = true;
4253 			_tcpm_pd_vbus_on(port);
4254 		}
4255 
4256 		spin_lock(&port->pd_event_lock);
4257 	}
4258 	spin_unlock(&port->pd_event_lock);
4259 	mutex_unlock(&port->lock);
4260 }
4261 
tcpm_cc_change(struct tcpm_port * port)4262 void tcpm_cc_change(struct tcpm_port *port)
4263 {
4264 	spin_lock(&port->pd_event_lock);
4265 	port->pd_events |= TCPM_CC_EVENT;
4266 	spin_unlock(&port->pd_event_lock);
4267 	kthread_queue_work(port->wq, &port->event_work);
4268 }
4269 EXPORT_SYMBOL_GPL(tcpm_cc_change);
4270 
tcpm_vbus_change(struct tcpm_port * port)4271 void tcpm_vbus_change(struct tcpm_port *port)
4272 {
4273 	spin_lock(&port->pd_event_lock);
4274 	port->pd_events |= TCPM_VBUS_EVENT;
4275 	spin_unlock(&port->pd_event_lock);
4276 	kthread_queue_work(port->wq, &port->event_work);
4277 }
4278 EXPORT_SYMBOL_GPL(tcpm_vbus_change);
4279 
tcpm_pd_hard_reset(struct tcpm_port * port)4280 void tcpm_pd_hard_reset(struct tcpm_port *port)
4281 {
4282 	spin_lock(&port->pd_event_lock);
4283 	port->pd_events = TCPM_RESET_EVENT;
4284 	spin_unlock(&port->pd_event_lock);
4285 	kthread_queue_work(port->wq, &port->event_work);
4286 }
4287 EXPORT_SYMBOL_GPL(tcpm_pd_hard_reset);
4288 
tcpm_sink_frs(struct tcpm_port * port)4289 void tcpm_sink_frs(struct tcpm_port *port)
4290 {
4291 	spin_lock(&port->pd_event_lock);
4292 	port->pd_events = TCPM_FRS_EVENT;
4293 	spin_unlock(&port->pd_event_lock);
4294 	kthread_queue_work(port->wq, &port->event_work);
4295 }
4296 EXPORT_SYMBOL_GPL(tcpm_sink_frs);
4297 
tcpm_sourcing_vbus(struct tcpm_port * port)4298 void tcpm_sourcing_vbus(struct tcpm_port *port)
4299 {
4300 	spin_lock(&port->pd_event_lock);
4301 	port->pd_events = TCPM_SOURCING_VBUS;
4302 	spin_unlock(&port->pd_event_lock);
4303 	kthread_queue_work(port->wq, &port->event_work);
4304 }
4305 EXPORT_SYMBOL_GPL(tcpm_sourcing_vbus);
4306 
tcpm_enable_frs_work(struct kthread_work * work)4307 static void tcpm_enable_frs_work(struct kthread_work *work)
4308 {
4309 	struct tcpm_port *port = container_of(work, struct tcpm_port, enable_frs);
4310 
4311 	mutex_lock(&port->lock);
4312 	/* Not FRS capable */
4313 	if (!port->connected || port->port_type != TYPEC_PORT_DRP ||
4314 	    port->pwr_opmode != TYPEC_PWR_MODE_PD ||
4315 	    !port->tcpc->enable_frs ||
4316 	    /* Sink caps queried */
4317 	    port->sink_cap_done || port->negotiated_rev < PD_REV30)
4318 		goto unlock;
4319 
4320 	/* Send when the state machine is idle */
4321 	if (port->state != SNK_READY || port->vdm_state != VDM_STATE_DONE || port->send_discover)
4322 		goto resched;
4323 
4324 	tcpm_set_state(port, GET_SINK_CAP, 0);
4325 	port->sink_cap_done = true;
4326 
4327 resched:
4328 	mod_enable_frs_delayed_work(port, GET_SINK_CAP_RETRY_MS);
4329 unlock:
4330 	mutex_unlock(&port->lock);
4331 }
4332 
tcpm_dr_set(struct typec_port * p,enum typec_data_role data)4333 static int tcpm_dr_set(struct typec_port *p, enum typec_data_role data)
4334 {
4335 	struct tcpm_port *port = typec_get_drvdata(p);
4336 	int ret;
4337 
4338 	mutex_lock(&port->swap_lock);
4339 	mutex_lock(&port->lock);
4340 
4341 	if (port->typec_caps.data != TYPEC_PORT_DRD) {
4342 		ret = -EINVAL;
4343 		goto port_unlock;
4344 	}
4345 	if (port->state != SRC_READY && port->state != SNK_READY) {
4346 		ret = -EAGAIN;
4347 		goto port_unlock;
4348 	}
4349 
4350 	if (port->data_role == data) {
4351 		ret = 0;
4352 		goto port_unlock;
4353 	}
4354 
4355 	/*
4356 	 * XXX
4357 	 * 6.3.9: If an alternate mode is active, a request to swap
4358 	 * alternate modes shall trigger a port reset.
4359 	 * Reject data role swap request in this case.
4360 	 */
4361 
4362 	if (!port->pd_capable) {
4363 		/*
4364 		 * If the partner is not PD capable, reset the port to
4365 		 * trigger a role change. This can only work if a preferred
4366 		 * role is configured, and if it matches the requested role.
4367 		 */
4368 		if (port->try_role == TYPEC_NO_PREFERRED_ROLE ||
4369 		    port->try_role == port->pwr_role) {
4370 			ret = -EINVAL;
4371 			goto port_unlock;
4372 		}
4373 		port->non_pd_role_swap = true;
4374 		tcpm_set_state(port, PORT_RESET, 0);
4375 	} else {
4376 		tcpm_set_state(port, DR_SWAP_SEND, 0);
4377 	}
4378 
4379 	port->swap_status = 0;
4380 	port->swap_pending = true;
4381 	reinit_completion(&port->swap_complete);
4382 	mutex_unlock(&port->lock);
4383 
4384 	if (!wait_for_completion_timeout(&port->swap_complete,
4385 				msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
4386 		ret = -ETIMEDOUT;
4387 	else
4388 		ret = port->swap_status;
4389 
4390 	port->non_pd_role_swap = false;
4391 	goto swap_unlock;
4392 
4393 port_unlock:
4394 	mutex_unlock(&port->lock);
4395 swap_unlock:
4396 	mutex_unlock(&port->swap_lock);
4397 	return ret;
4398 }
4399 
tcpm_pr_set(struct typec_port * p,enum typec_role role)4400 static int tcpm_pr_set(struct typec_port *p, enum typec_role role)
4401 {
4402 	struct tcpm_port *port = typec_get_drvdata(p);
4403 	int ret;
4404 
4405 	mutex_lock(&port->swap_lock);
4406 	mutex_lock(&port->lock);
4407 
4408 	if (port->port_type != TYPEC_PORT_DRP) {
4409 		ret = -EINVAL;
4410 		goto port_unlock;
4411 	}
4412 	if (port->state != SRC_READY && port->state != SNK_READY) {
4413 		ret = -EAGAIN;
4414 		goto port_unlock;
4415 	}
4416 
4417 	if (role == port->pwr_role) {
4418 		ret = 0;
4419 		goto port_unlock;
4420 	}
4421 
4422 	port->swap_status = 0;
4423 	port->swap_pending = true;
4424 	reinit_completion(&port->swap_complete);
4425 	tcpm_set_state(port, PR_SWAP_SEND, 0);
4426 	mutex_unlock(&port->lock);
4427 
4428 	if (!wait_for_completion_timeout(&port->swap_complete,
4429 				msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
4430 		ret = -ETIMEDOUT;
4431 	else
4432 		ret = port->swap_status;
4433 
4434 	goto swap_unlock;
4435 
4436 port_unlock:
4437 	mutex_unlock(&port->lock);
4438 swap_unlock:
4439 	mutex_unlock(&port->swap_lock);
4440 	return ret;
4441 }
4442 
tcpm_vconn_set(struct typec_port * p,enum typec_role role)4443 static int tcpm_vconn_set(struct typec_port *p, enum typec_role role)
4444 {
4445 	struct tcpm_port *port = typec_get_drvdata(p);
4446 	int ret;
4447 
4448 	mutex_lock(&port->swap_lock);
4449 	mutex_lock(&port->lock);
4450 
4451 	if (port->state != SRC_READY && port->state != SNK_READY) {
4452 		ret = -EAGAIN;
4453 		goto port_unlock;
4454 	}
4455 
4456 	if (role == port->vconn_role) {
4457 		ret = 0;
4458 		goto port_unlock;
4459 	}
4460 
4461 	port->swap_status = 0;
4462 	port->swap_pending = true;
4463 	reinit_completion(&port->swap_complete);
4464 	tcpm_set_state(port, VCONN_SWAP_SEND, 0);
4465 	mutex_unlock(&port->lock);
4466 
4467 	if (!wait_for_completion_timeout(&port->swap_complete,
4468 				msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
4469 		ret = -ETIMEDOUT;
4470 	else
4471 		ret = port->swap_status;
4472 
4473 	goto swap_unlock;
4474 
4475 port_unlock:
4476 	mutex_unlock(&port->lock);
4477 swap_unlock:
4478 	mutex_unlock(&port->swap_lock);
4479 	return ret;
4480 }
4481 
tcpm_try_role(struct typec_port * p,int role)4482 static int tcpm_try_role(struct typec_port *p, int role)
4483 {
4484 	struct tcpm_port *port = typec_get_drvdata(p);
4485 	struct tcpc_dev	*tcpc = port->tcpc;
4486 	int ret = 0;
4487 
4488 	mutex_lock(&port->lock);
4489 	if (tcpc->try_role)
4490 		ret = tcpc->try_role(tcpc, role);
4491 	if (!ret)
4492 		port->try_role = role;
4493 	port->try_src_count = 0;
4494 	port->try_snk_count = 0;
4495 	mutex_unlock(&port->lock);
4496 
4497 	return ret;
4498 }
4499 
tcpm_pps_set_op_curr(struct tcpm_port * port,u16 op_curr)4500 static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 op_curr)
4501 {
4502 	unsigned int target_mw;
4503 	int ret;
4504 
4505 	mutex_lock(&port->swap_lock);
4506 	mutex_lock(&port->lock);
4507 
4508 	if (!port->pps_data.active) {
4509 		ret = -EOPNOTSUPP;
4510 		goto port_unlock;
4511 	}
4512 
4513 	if (port->state != SNK_READY) {
4514 		ret = -EAGAIN;
4515 		goto port_unlock;
4516 	}
4517 
4518 	if (op_curr > port->pps_data.max_curr) {
4519 		ret = -EINVAL;
4520 		goto port_unlock;
4521 	}
4522 
4523 	target_mw = (op_curr * port->pps_data.out_volt) / 1000;
4524 	if (target_mw < port->operating_snk_mw) {
4525 		ret = -EINVAL;
4526 		goto port_unlock;
4527 	}
4528 
4529 	/* Round down operating current to align with PPS valid steps */
4530 	op_curr = op_curr - (op_curr % RDO_PROG_CURR_MA_STEP);
4531 
4532 	reinit_completion(&port->pps_complete);
4533 	port->pps_data.op_curr = op_curr;
4534 	port->pps_status = 0;
4535 	port->pps_pending = true;
4536 	tcpm_set_state(port, SNK_NEGOTIATE_PPS_CAPABILITIES, 0);
4537 	mutex_unlock(&port->lock);
4538 
4539 	if (!wait_for_completion_timeout(&port->pps_complete,
4540 				msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT)))
4541 		ret = -ETIMEDOUT;
4542 	else
4543 		ret = port->pps_status;
4544 
4545 	goto swap_unlock;
4546 
4547 port_unlock:
4548 	mutex_unlock(&port->lock);
4549 swap_unlock:
4550 	mutex_unlock(&port->swap_lock);
4551 
4552 	return ret;
4553 }
4554 
tcpm_pps_set_out_volt(struct tcpm_port * port,u16 out_volt)4555 static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 out_volt)
4556 {
4557 	unsigned int target_mw;
4558 	int ret;
4559 
4560 	mutex_lock(&port->swap_lock);
4561 	mutex_lock(&port->lock);
4562 
4563 	if (!port->pps_data.active) {
4564 		ret = -EOPNOTSUPP;
4565 		goto port_unlock;
4566 	}
4567 
4568 	if (port->state != SNK_READY) {
4569 		ret = -EAGAIN;
4570 		goto port_unlock;
4571 	}
4572 
4573 	if (out_volt < port->pps_data.min_volt ||
4574 	    out_volt > port->pps_data.max_volt) {
4575 		ret = -EINVAL;
4576 		goto port_unlock;
4577 	}
4578 
4579 	target_mw = (port->pps_data.op_curr * out_volt) / 1000;
4580 	if (target_mw < port->operating_snk_mw) {
4581 		ret = -EINVAL;
4582 		goto port_unlock;
4583 	}
4584 
4585 	/* Round down output voltage to align with PPS valid steps */
4586 	out_volt = out_volt - (out_volt % RDO_PROG_VOLT_MV_STEP);
4587 
4588 	reinit_completion(&port->pps_complete);
4589 	port->pps_data.out_volt = out_volt;
4590 	port->pps_status = 0;
4591 	port->pps_pending = true;
4592 	tcpm_set_state(port, SNK_NEGOTIATE_PPS_CAPABILITIES, 0);
4593 	mutex_unlock(&port->lock);
4594 
4595 	if (!wait_for_completion_timeout(&port->pps_complete,
4596 				msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT)))
4597 		ret = -ETIMEDOUT;
4598 	else
4599 		ret = port->pps_status;
4600 
4601 	goto swap_unlock;
4602 
4603 port_unlock:
4604 	mutex_unlock(&port->lock);
4605 swap_unlock:
4606 	mutex_unlock(&port->swap_lock);
4607 
4608 	return ret;
4609 }
4610 
tcpm_pps_activate(struct tcpm_port * port,bool activate)4611 static int tcpm_pps_activate(struct tcpm_port *port, bool activate)
4612 {
4613 	int ret = 0;
4614 
4615 	mutex_lock(&port->swap_lock);
4616 	mutex_lock(&port->lock);
4617 
4618 	if (!port->pps_data.supported) {
4619 		ret = -EOPNOTSUPP;
4620 		goto port_unlock;
4621 	}
4622 
4623 	/* Trying to deactivate PPS when already deactivated so just bail */
4624 	if (!port->pps_data.active && !activate)
4625 		goto port_unlock;
4626 
4627 	if (port->state != SNK_READY) {
4628 		ret = -EAGAIN;
4629 		goto port_unlock;
4630 	}
4631 
4632 	reinit_completion(&port->pps_complete);
4633 	port->pps_status = 0;
4634 	port->pps_pending = true;
4635 
4636 	/* Trigger PPS request or move back to standard PDO contract */
4637 	if (activate) {
4638 		port->pps_data.out_volt = port->supply_voltage;
4639 		port->pps_data.op_curr = port->current_limit;
4640 		tcpm_set_state(port, SNK_NEGOTIATE_PPS_CAPABILITIES, 0);
4641 	} else {
4642 		tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
4643 	}
4644 	mutex_unlock(&port->lock);
4645 
4646 	if (!wait_for_completion_timeout(&port->pps_complete,
4647 				msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT)))
4648 		ret = -ETIMEDOUT;
4649 	else
4650 		ret = port->pps_status;
4651 
4652 	goto swap_unlock;
4653 
4654 port_unlock:
4655 	mutex_unlock(&port->lock);
4656 swap_unlock:
4657 	mutex_unlock(&port->swap_lock);
4658 
4659 	return ret;
4660 }
4661 
tcpm_init(struct tcpm_port * port)4662 static void tcpm_init(struct tcpm_port *port)
4663 {
4664 	enum typec_cc_status cc1, cc2;
4665 
4666 	port->tcpc->init(port->tcpc);
4667 
4668 	tcpm_reset_port(port);
4669 
4670 	/*
4671 	 * XXX
4672 	 * Should possibly wait for VBUS to settle if it was enabled locally
4673 	 * since tcpm_reset_port() will disable VBUS.
4674 	 */
4675 	port->vbus_present = port->tcpc->get_vbus(port->tcpc);
4676 	if (port->vbus_present)
4677 		port->vbus_never_low = true;
4678 
4679 	tcpm_set_state(port, tcpm_default_state(port), 0);
4680 
4681 	if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0)
4682 		_tcpm_cc_change(port, cc1, cc2);
4683 
4684 	/*
4685 	 * Some adapters need a clean slate at startup, and won't recover
4686 	 * otherwise. So do not try to be fancy and force a clean disconnect.
4687 	 */
4688 	tcpm_set_state(port, PORT_RESET, 0);
4689 }
4690 
tcpm_port_type_set(struct typec_port * p,enum typec_port_type type)4691 static int tcpm_port_type_set(struct typec_port *p, enum typec_port_type type)
4692 {
4693 	struct tcpm_port *port = typec_get_drvdata(p);
4694 
4695 	mutex_lock(&port->lock);
4696 	if (type == port->port_type)
4697 		goto port_unlock;
4698 
4699 	port->port_type = type;
4700 
4701 	if (!port->connected) {
4702 		tcpm_set_state(port, PORT_RESET, 0);
4703 	} else if (type == TYPEC_PORT_SNK) {
4704 		if (!(port->pwr_role == TYPEC_SINK &&
4705 		      port->data_role == TYPEC_DEVICE))
4706 			tcpm_set_state(port, PORT_RESET, 0);
4707 	} else if (type == TYPEC_PORT_SRC) {
4708 		if (!(port->pwr_role == TYPEC_SOURCE &&
4709 		      port->data_role == TYPEC_HOST))
4710 			tcpm_set_state(port, PORT_RESET, 0);
4711 	}
4712 
4713 port_unlock:
4714 	mutex_unlock(&port->lock);
4715 	return 0;
4716 }
4717 
4718 static const struct typec_operations tcpm_ops = {
4719 	.try_role = tcpm_try_role,
4720 	.dr_set = tcpm_dr_set,
4721 	.pr_set = tcpm_pr_set,
4722 	.vconn_set = tcpm_vconn_set,
4723 	.port_type_set = tcpm_port_type_set
4724 };
4725 
tcpm_tcpc_reset(struct tcpm_port * port)4726 void tcpm_tcpc_reset(struct tcpm_port *port)
4727 {
4728 	mutex_lock(&port->lock);
4729 	/* XXX: Maintain PD connection if possible? */
4730 	tcpm_init(port);
4731 	mutex_unlock(&port->lock);
4732 }
4733 EXPORT_SYMBOL_GPL(tcpm_tcpc_reset);
4734 
tcpm_fw_get_caps(struct tcpm_port * port,struct fwnode_handle * fwnode)4735 static int tcpm_fw_get_caps(struct tcpm_port *port,
4736 			    struct fwnode_handle *fwnode)
4737 {
4738 	const char *cap_str;
4739 	int ret;
4740 	u32 mw, frs_current;
4741 
4742 	if (!fwnode)
4743 		return -EINVAL;
4744 
4745 	/* USB data support is optional */
4746 	ret = fwnode_property_read_string(fwnode, "data-role", &cap_str);
4747 	if (ret == 0) {
4748 		ret = typec_find_port_data_role(cap_str);
4749 		if (ret < 0)
4750 			return ret;
4751 		port->typec_caps.data = ret;
4752 	}
4753 
4754 	ret = fwnode_property_read_string(fwnode, "power-role", &cap_str);
4755 	if (ret < 0)
4756 		return ret;
4757 
4758 	ret = typec_find_port_power_role(cap_str);
4759 	if (ret < 0)
4760 		return ret;
4761 	port->typec_caps.type = ret;
4762 	port->port_type = port->typec_caps.type;
4763 
4764 	if (port->port_type == TYPEC_PORT_SNK)
4765 		goto sink;
4766 
4767 	/* Get source pdos */
4768 	ret = fwnode_property_count_u32(fwnode, "source-pdos");
4769 	if (ret <= 0)
4770 		return -EINVAL;
4771 
4772 	port->nr_src_pdo = min(ret, PDO_MAX_OBJECTS);
4773 	ret = fwnode_property_read_u32_array(fwnode, "source-pdos",
4774 					     port->src_pdo, port->nr_src_pdo);
4775 	if ((ret < 0) || tcpm_validate_caps(port, port->src_pdo,
4776 					    port->nr_src_pdo))
4777 		return -EINVAL;
4778 
4779 	if (port->port_type == TYPEC_PORT_SRC)
4780 		return 0;
4781 
4782 	/* Get the preferred power role for DRP */
4783 	ret = fwnode_property_read_string(fwnode, "try-power-role", &cap_str);
4784 	if (ret < 0)
4785 		return ret;
4786 
4787 	port->typec_caps.prefer_role = typec_find_power_role(cap_str);
4788 	if (port->typec_caps.prefer_role < 0)
4789 		return -EINVAL;
4790 sink:
4791 	/* Get sink pdos */
4792 	ret = fwnode_property_count_u32(fwnode, "sink-pdos");
4793 	if (ret <= 0)
4794 		return -EINVAL;
4795 
4796 	port->nr_snk_pdo = min(ret, PDO_MAX_OBJECTS);
4797 	ret = fwnode_property_read_u32_array(fwnode, "sink-pdos",
4798 					     port->snk_pdo, port->nr_snk_pdo);
4799 	if ((ret < 0) || tcpm_validate_caps(port, port->snk_pdo,
4800 					    port->nr_snk_pdo))
4801 		return -EINVAL;
4802 
4803 	if (fwnode_property_read_u32(fwnode, "op-sink-microwatt", &mw) < 0)
4804 		return -EINVAL;
4805 	port->operating_snk_mw = mw / 1000;
4806 
4807 	port->self_powered = fwnode_property_read_bool(fwnode, "self-powered");
4808 
4809 	/* FRS can only be supported byb DRP ports */
4810 	if (port->port_type == TYPEC_PORT_DRP) {
4811 		ret = fwnode_property_read_u32(fwnode, "frs-typec-current", &frs_current);
4812 		if (ret >= 0 && frs_current <= FRS_5V_3A)
4813 			port->frs_current = frs_current;
4814 	}
4815 
4816 	return 0;
4817 }
4818 
4819 /* Power Supply access to expose source power information */
4820 enum tcpm_psy_online_states {
4821 	TCPM_PSY_OFFLINE = 0,
4822 	TCPM_PSY_FIXED_ONLINE,
4823 	TCPM_PSY_PROG_ONLINE,
4824 };
4825 
4826 static enum power_supply_property tcpm_psy_props[] = {
4827 	POWER_SUPPLY_PROP_USB_TYPE,
4828 	POWER_SUPPLY_PROP_ONLINE,
4829 	POWER_SUPPLY_PROP_VOLTAGE_MIN,
4830 	POWER_SUPPLY_PROP_VOLTAGE_MAX,
4831 	POWER_SUPPLY_PROP_VOLTAGE_NOW,
4832 	POWER_SUPPLY_PROP_CURRENT_MAX,
4833 	POWER_SUPPLY_PROP_CURRENT_NOW,
4834 };
4835 
tcpm_psy_get_online(struct tcpm_port * port,union power_supply_propval * val)4836 static int tcpm_psy_get_online(struct tcpm_port *port,
4837 			       union power_supply_propval *val)
4838 {
4839 	if (port->vbus_charge) {
4840 		if (port->pps_data.active)
4841 			val->intval = TCPM_PSY_PROG_ONLINE;
4842 		else
4843 			val->intval = TCPM_PSY_FIXED_ONLINE;
4844 	} else {
4845 		val->intval = TCPM_PSY_OFFLINE;
4846 	}
4847 
4848 	return 0;
4849 }
4850 
tcpm_psy_get_voltage_min(struct tcpm_port * port,union power_supply_propval * val)4851 static int tcpm_psy_get_voltage_min(struct tcpm_port *port,
4852 				    union power_supply_propval *val)
4853 {
4854 	if (port->pps_data.active)
4855 		val->intval = port->pps_data.min_volt * 1000;
4856 	else
4857 		val->intval = port->supply_voltage * 1000;
4858 
4859 	return 0;
4860 }
4861 
tcpm_psy_get_voltage_max(struct tcpm_port * port,union power_supply_propval * val)4862 static int tcpm_psy_get_voltage_max(struct tcpm_port *port,
4863 				    union power_supply_propval *val)
4864 {
4865 	if (port->pps_data.active)
4866 		val->intval = port->pps_data.max_volt * 1000;
4867 	else
4868 		val->intval = port->supply_voltage * 1000;
4869 
4870 	return 0;
4871 }
4872 
tcpm_psy_get_voltage_now(struct tcpm_port * port,union power_supply_propval * val)4873 static int tcpm_psy_get_voltage_now(struct tcpm_port *port,
4874 				    union power_supply_propval *val)
4875 {
4876 	val->intval = port->supply_voltage * 1000;
4877 
4878 	return 0;
4879 }
4880 
tcpm_psy_get_current_max(struct tcpm_port * port,union power_supply_propval * val)4881 static int tcpm_psy_get_current_max(struct tcpm_port *port,
4882 				    union power_supply_propval *val)
4883 {
4884 	if (port->pps_data.active)
4885 		val->intval = port->pps_data.max_curr * 1000;
4886 	else
4887 		val->intval = port->current_limit * 1000;
4888 
4889 	return 0;
4890 }
4891 
tcpm_psy_get_current_now(struct tcpm_port * port,union power_supply_propval * val)4892 static int tcpm_psy_get_current_now(struct tcpm_port *port,
4893 				    union power_supply_propval *val)
4894 {
4895 	val->intval = port->current_limit * 1000;
4896 
4897 	return 0;
4898 }
4899 
tcpm_psy_get_prop(struct power_supply * psy,enum power_supply_property psp,union power_supply_propval * val)4900 static int tcpm_psy_get_prop(struct power_supply *psy,
4901 			     enum power_supply_property psp,
4902 			     union power_supply_propval *val)
4903 {
4904 	struct tcpm_port *port = power_supply_get_drvdata(psy);
4905 	int ret = 0;
4906 
4907 	switch (psp) {
4908 	case POWER_SUPPLY_PROP_USB_TYPE:
4909 		val->intval = port->usb_type;
4910 		break;
4911 	case POWER_SUPPLY_PROP_ONLINE:
4912 		ret = tcpm_psy_get_online(port, val);
4913 		break;
4914 	case POWER_SUPPLY_PROP_VOLTAGE_MIN:
4915 		ret = tcpm_psy_get_voltage_min(port, val);
4916 		break;
4917 	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
4918 		ret = tcpm_psy_get_voltage_max(port, val);
4919 		break;
4920 	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
4921 		ret = tcpm_psy_get_voltage_now(port, val);
4922 		break;
4923 	case POWER_SUPPLY_PROP_CURRENT_MAX:
4924 		ret = tcpm_psy_get_current_max(port, val);
4925 		break;
4926 	case POWER_SUPPLY_PROP_CURRENT_NOW:
4927 		ret = tcpm_psy_get_current_now(port, val);
4928 		break;
4929 	default:
4930 		ret = -EINVAL;
4931 		break;
4932 	}
4933 
4934 	return ret;
4935 }
4936 
tcpm_psy_set_online(struct tcpm_port * port,const union power_supply_propval * val)4937 static int tcpm_psy_set_online(struct tcpm_port *port,
4938 			       const union power_supply_propval *val)
4939 {
4940 	int ret;
4941 
4942 	switch (val->intval) {
4943 	case TCPM_PSY_FIXED_ONLINE:
4944 		ret = tcpm_pps_activate(port, false);
4945 		break;
4946 	case TCPM_PSY_PROG_ONLINE:
4947 		ret = tcpm_pps_activate(port, true);
4948 		break;
4949 	default:
4950 		ret = -EINVAL;
4951 		break;
4952 	}
4953 
4954 	return ret;
4955 }
4956 
tcpm_psy_set_prop(struct power_supply * psy,enum power_supply_property psp,const union power_supply_propval * val)4957 static int tcpm_psy_set_prop(struct power_supply *psy,
4958 			     enum power_supply_property psp,
4959 			     const union power_supply_propval *val)
4960 {
4961 	struct tcpm_port *port = power_supply_get_drvdata(psy);
4962 	int ret;
4963 
4964 	switch (psp) {
4965 	case POWER_SUPPLY_PROP_ONLINE:
4966 		ret = tcpm_psy_set_online(port, val);
4967 		break;
4968 	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
4969 		if (val->intval < port->pps_data.min_volt * 1000 ||
4970 		    val->intval > port->pps_data.max_volt * 1000)
4971 			ret = -EINVAL;
4972 		else
4973 			ret = tcpm_pps_set_out_volt(port, val->intval / 1000);
4974 		break;
4975 	case POWER_SUPPLY_PROP_CURRENT_NOW:
4976 		if (val->intval > port->pps_data.max_curr * 1000)
4977 			ret = -EINVAL;
4978 		else
4979 			ret = tcpm_pps_set_op_curr(port, val->intval / 1000);
4980 		break;
4981 	default:
4982 		ret = -EINVAL;
4983 		break;
4984 	}
4985 
4986 	return ret;
4987 }
4988 
tcpm_psy_prop_writeable(struct power_supply * psy,enum power_supply_property psp)4989 static int tcpm_psy_prop_writeable(struct power_supply *psy,
4990 				   enum power_supply_property psp)
4991 {
4992 	switch (psp) {
4993 	case POWER_SUPPLY_PROP_ONLINE:
4994 	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
4995 	case POWER_SUPPLY_PROP_CURRENT_NOW:
4996 		return 1;
4997 	default:
4998 		return 0;
4999 	}
5000 }
5001 
5002 static enum power_supply_usb_type tcpm_psy_usb_types[] = {
5003 	POWER_SUPPLY_USB_TYPE_C,
5004 	POWER_SUPPLY_USB_TYPE_PD,
5005 	POWER_SUPPLY_USB_TYPE_PD_PPS,
5006 };
5007 
5008 static const char *tcpm_psy_name_prefix = "tcpm-source-psy-";
5009 
devm_tcpm_psy_register(struct tcpm_port * port)5010 static int devm_tcpm_psy_register(struct tcpm_port *port)
5011 {
5012 	struct power_supply_config psy_cfg = {};
5013 	const char *port_dev_name = dev_name(port->dev);
5014 	size_t psy_name_len = strlen(tcpm_psy_name_prefix) +
5015 				     strlen(port_dev_name) + 1;
5016 	char *psy_name;
5017 
5018 	psy_cfg.drv_data = port;
5019 	psy_cfg.fwnode = dev_fwnode(port->dev);
5020 	psy_name = devm_kzalloc(port->dev, psy_name_len, GFP_KERNEL);
5021 	if (!psy_name)
5022 		return -ENOMEM;
5023 
5024 	snprintf(psy_name, psy_name_len, "%s%s", tcpm_psy_name_prefix,
5025 		 port_dev_name);
5026 	port->psy_desc.name = psy_name;
5027 	port->psy_desc.type = POWER_SUPPLY_TYPE_USB,
5028 	port->psy_desc.usb_types = tcpm_psy_usb_types;
5029 	port->psy_desc.num_usb_types = ARRAY_SIZE(tcpm_psy_usb_types);
5030 	port->psy_desc.properties = tcpm_psy_props,
5031 	port->psy_desc.num_properties = ARRAY_SIZE(tcpm_psy_props),
5032 	port->psy_desc.get_property = tcpm_psy_get_prop,
5033 	port->psy_desc.set_property = tcpm_psy_set_prop,
5034 	port->psy_desc.property_is_writeable = tcpm_psy_prop_writeable,
5035 
5036 	port->usb_type = POWER_SUPPLY_USB_TYPE_C;
5037 
5038 	port->psy = devm_power_supply_register(port->dev, &port->psy_desc,
5039 					       &psy_cfg);
5040 
5041 	return PTR_ERR_OR_ZERO(port->psy);
5042 }
5043 
state_machine_timer_handler(struct hrtimer * timer)5044 static enum hrtimer_restart state_machine_timer_handler(struct hrtimer *timer)
5045 {
5046 	struct tcpm_port *port = container_of(timer, struct tcpm_port, state_machine_timer);
5047 
5048 	kthread_queue_work(port->wq, &port->state_machine);
5049 	return HRTIMER_NORESTART;
5050 }
5051 
vdm_state_machine_timer_handler(struct hrtimer * timer)5052 static enum hrtimer_restart vdm_state_machine_timer_handler(struct hrtimer *timer)
5053 {
5054 	struct tcpm_port *port = container_of(timer, struct tcpm_port, vdm_state_machine_timer);
5055 
5056 	kthread_queue_work(port->wq, &port->vdm_state_machine);
5057 	return HRTIMER_NORESTART;
5058 }
5059 
enable_frs_timer_handler(struct hrtimer * timer)5060 static enum hrtimer_restart enable_frs_timer_handler(struct hrtimer *timer)
5061 {
5062 	struct tcpm_port *port = container_of(timer, struct tcpm_port, enable_frs_timer);
5063 
5064 	kthread_queue_work(port->wq, &port->enable_frs);
5065 	return HRTIMER_NORESTART;
5066 }
5067 
tcpm_register_port(struct device * dev,struct tcpc_dev * tcpc)5068 struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
5069 {
5070 	struct tcpm_port *port;
5071 	int err;
5072 
5073 	if (!dev || !tcpc ||
5074 	    !tcpc->get_vbus || !tcpc->set_cc || !tcpc->get_cc ||
5075 	    !tcpc->set_polarity || !tcpc->set_vconn || !tcpc->set_vbus ||
5076 	    !tcpc->set_pd_rx || !tcpc->set_roles || !tcpc->pd_transmit)
5077 		return ERR_PTR(-EINVAL);
5078 
5079 	port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
5080 	if (!port)
5081 		return ERR_PTR(-ENOMEM);
5082 
5083 	port->dev = dev;
5084 	port->tcpc = tcpc;
5085 
5086 	mutex_init(&port->lock);
5087 	mutex_init(&port->swap_lock);
5088 
5089 	port->wq = kthread_create_worker(0, dev_name(dev));
5090 	if (IS_ERR(port->wq))
5091 		return ERR_CAST(port->wq);
5092 	sched_set_fifo(port->wq->task);
5093 
5094 	kthread_init_work(&port->state_machine, tcpm_state_machine_work);
5095 	kthread_init_work(&port->vdm_state_machine, vdm_state_machine_work);
5096 	kthread_init_work(&port->event_work, tcpm_pd_event_handler);
5097 	kthread_init_work(&port->enable_frs, tcpm_enable_frs_work);
5098 	hrtimer_init(&port->state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
5099 	port->state_machine_timer.function = state_machine_timer_handler;
5100 	hrtimer_init(&port->vdm_state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
5101 	port->vdm_state_machine_timer.function = vdm_state_machine_timer_handler;
5102 	hrtimer_init(&port->enable_frs_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
5103 	port->enable_frs_timer.function = enable_frs_timer_handler;
5104 
5105 	spin_lock_init(&port->pd_event_lock);
5106 
5107 	init_completion(&port->tx_complete);
5108 	init_completion(&port->swap_complete);
5109 	init_completion(&port->pps_complete);
5110 	tcpm_debugfs_init(port);
5111 
5112 	err = tcpm_fw_get_caps(port, tcpc->fwnode);
5113 	if (err < 0)
5114 		goto out_destroy_wq;
5115 
5116 	port->try_role = port->typec_caps.prefer_role;
5117 
5118 	port->typec_caps.fwnode = tcpc->fwnode;
5119 	port->typec_caps.revision = 0x0120;	/* Type-C spec release 1.2 */
5120 	port->typec_caps.pd_revision = 0x0300;	/* USB-PD spec release 3.0 */
5121 	port->typec_caps.driver_data = port;
5122 	port->typec_caps.ops = &tcpm_ops;
5123 	port->typec_caps.orientation_aware = 1;
5124 
5125 	port->partner_desc.identity = &port->partner_ident;
5126 	port->port_type = port->typec_caps.type;
5127 
5128 	port->role_sw = usb_role_switch_get(port->dev);
5129 	if (IS_ERR(port->role_sw)) {
5130 		err = PTR_ERR(port->role_sw);
5131 		goto out_destroy_wq;
5132 	}
5133 
5134 	err = devm_tcpm_psy_register(port);
5135 	if (err)
5136 		goto out_role_sw_put;
5137 
5138 	port->typec_port = typec_register_port(port->dev, &port->typec_caps);
5139 	if (IS_ERR(port->typec_port)) {
5140 		err = PTR_ERR(port->typec_port);
5141 		goto out_role_sw_put;
5142 	}
5143 
5144 	mutex_lock(&port->lock);
5145 	tcpm_init(port);
5146 	mutex_unlock(&port->lock);
5147 
5148 	tcpm_log(port, "%s: registered", dev_name(dev));
5149 	return port;
5150 
5151 out_role_sw_put:
5152 	usb_role_switch_put(port->role_sw);
5153 out_destroy_wq:
5154 	tcpm_debugfs_exit(port);
5155 	kthread_destroy_worker(port->wq);
5156 	return ERR_PTR(err);
5157 }
5158 EXPORT_SYMBOL_GPL(tcpm_register_port);
5159 
tcpm_unregister_port(struct tcpm_port * port)5160 void tcpm_unregister_port(struct tcpm_port *port)
5161 {
5162 	int i;
5163 
5164 	tcpm_reset_port(port);
5165 	for (i = 0; i < ARRAY_SIZE(port->port_altmode); i++)
5166 		typec_unregister_altmode(port->port_altmode[i]);
5167 	typec_unregister_port(port->typec_port);
5168 	usb_role_switch_put(port->role_sw);
5169 	tcpm_debugfs_exit(port);
5170 	kthread_destroy_worker(port->wq);
5171 }
5172 EXPORT_SYMBOL_GPL(tcpm_unregister_port);
5173 
5174 MODULE_AUTHOR("Guenter Roeck <groeck@chromium.org>");
5175 MODULE_DESCRIPTION("USB Type-C Port Manager");
5176 MODULE_LICENSE("GPL");
5177