]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/usb/typec/tcpm.c
usb: typec: tcpm: Set USB role switch to device mode when configured as such
[mirror_ubuntu-hirsute-kernel.git] / drivers / usb / typec / tcpm.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright 2015-2017 Google, Inc
4 *
5 * USB Power Delivery protocol stack.
6 */
7
8 #include <linux/completion.h>
9 #include <linux/debugfs.h>
10 #include <linux/device.h>
11 #include <linux/jiffies.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/proc_fs.h>
16 #include <linux/sched/clock.h>
17 #include <linux/seq_file.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/usb/pd.h>
21 #include <linux/usb/pd_bdo.h>
22 #include <linux/usb/pd_vdo.h>
23 #include <linux/usb/tcpm.h>
24 #include <linux/usb/typec.h>
25 #include <linux/workqueue.h>
26
27 #define FOREACH_STATE(S) \
28 S(INVALID_STATE), \
29 S(DRP_TOGGLING), \
30 S(SRC_UNATTACHED), \
31 S(SRC_ATTACH_WAIT), \
32 S(SRC_ATTACHED), \
33 S(SRC_STARTUP), \
34 S(SRC_SEND_CAPABILITIES), \
35 S(SRC_NEGOTIATE_CAPABILITIES), \
36 S(SRC_TRANSITION_SUPPLY), \
37 S(SRC_READY), \
38 S(SRC_WAIT_NEW_CAPABILITIES), \
39 \
40 S(SNK_UNATTACHED), \
41 S(SNK_ATTACH_WAIT), \
42 S(SNK_DEBOUNCED), \
43 S(SNK_ATTACHED), \
44 S(SNK_STARTUP), \
45 S(SNK_DISCOVERY), \
46 S(SNK_DISCOVERY_DEBOUNCE), \
47 S(SNK_DISCOVERY_DEBOUNCE_DONE), \
48 S(SNK_WAIT_CAPABILITIES), \
49 S(SNK_NEGOTIATE_CAPABILITIES), \
50 S(SNK_TRANSITION_SINK), \
51 S(SNK_TRANSITION_SINK_VBUS), \
52 S(SNK_READY), \
53 \
54 S(ACC_UNATTACHED), \
55 S(DEBUG_ACC_ATTACHED), \
56 S(AUDIO_ACC_ATTACHED), \
57 S(AUDIO_ACC_DEBOUNCE), \
58 \
59 S(HARD_RESET_SEND), \
60 S(HARD_RESET_START), \
61 S(SRC_HARD_RESET_VBUS_OFF), \
62 S(SRC_HARD_RESET_VBUS_ON), \
63 S(SNK_HARD_RESET_SINK_OFF), \
64 S(SNK_HARD_RESET_WAIT_VBUS), \
65 S(SNK_HARD_RESET_SINK_ON), \
66 \
67 S(SOFT_RESET), \
68 S(SOFT_RESET_SEND), \
69 \
70 S(DR_SWAP_ACCEPT), \
71 S(DR_SWAP_SEND), \
72 S(DR_SWAP_SEND_TIMEOUT), \
73 S(DR_SWAP_CANCEL), \
74 S(DR_SWAP_CHANGE_DR), \
75 \
76 S(PR_SWAP_ACCEPT), \
77 S(PR_SWAP_SEND), \
78 S(PR_SWAP_SEND_TIMEOUT), \
79 S(PR_SWAP_CANCEL), \
80 S(PR_SWAP_START), \
81 S(PR_SWAP_SRC_SNK_TRANSITION_OFF), \
82 S(PR_SWAP_SRC_SNK_SOURCE_OFF), \
83 S(PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED), \
84 S(PR_SWAP_SRC_SNK_SINK_ON), \
85 S(PR_SWAP_SNK_SRC_SINK_OFF), \
86 S(PR_SWAP_SNK_SRC_SOURCE_ON), \
87 S(PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP), \
88 \
89 S(VCONN_SWAP_ACCEPT), \
90 S(VCONN_SWAP_SEND), \
91 S(VCONN_SWAP_SEND_TIMEOUT), \
92 S(VCONN_SWAP_CANCEL), \
93 S(VCONN_SWAP_START), \
94 S(VCONN_SWAP_WAIT_FOR_VCONN), \
95 S(VCONN_SWAP_TURN_ON_VCONN), \
96 S(VCONN_SWAP_TURN_OFF_VCONN), \
97 \
98 S(SNK_TRY), \
99 S(SNK_TRY_WAIT), \
100 S(SNK_TRY_WAIT_DEBOUNCE), \
101 S(SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS), \
102 S(SRC_TRYWAIT), \
103 S(SRC_TRYWAIT_DEBOUNCE), \
104 S(SRC_TRYWAIT_UNATTACHED), \
105 \
106 S(SRC_TRY), \
107 S(SRC_TRY_WAIT), \
108 S(SRC_TRY_DEBOUNCE), \
109 S(SNK_TRYWAIT), \
110 S(SNK_TRYWAIT_DEBOUNCE), \
111 S(SNK_TRYWAIT_VBUS), \
112 S(BIST_RX), \
113 \
114 S(ERROR_RECOVERY), \
115 S(PORT_RESET), \
116 S(PORT_RESET_WAIT_OFF)
117
118 #define GENERATE_ENUM(e) e
119 #define GENERATE_STRING(s) #s
120
121 enum tcpm_state {
122 FOREACH_STATE(GENERATE_ENUM)
123 };
124
125 static const char * const tcpm_states[] = {
126 FOREACH_STATE(GENERATE_STRING)
127 };
128
129 enum vdm_states {
130 VDM_STATE_ERR_BUSY = -3,
131 VDM_STATE_ERR_SEND = -2,
132 VDM_STATE_ERR_TMOUT = -1,
133 VDM_STATE_DONE = 0,
134 /* Anything >0 represents an active state */
135 VDM_STATE_READY = 1,
136 VDM_STATE_BUSY = 2,
137 VDM_STATE_WAIT_RSP_BUSY = 3,
138 };
139
140 enum pd_msg_request {
141 PD_MSG_NONE = 0,
142 PD_MSG_CTRL_REJECT,
143 PD_MSG_CTRL_WAIT,
144 PD_MSG_DATA_SINK_CAP,
145 PD_MSG_DATA_SOURCE_CAP,
146 };
147
148 /* Events from low level driver */
149
150 #define TCPM_CC_EVENT BIT(0)
151 #define TCPM_VBUS_EVENT BIT(1)
152 #define TCPM_RESET_EVENT BIT(2)
153
154 #define LOG_BUFFER_ENTRIES 1024
155 #define LOG_BUFFER_ENTRY_SIZE 128
156
157 /* Alternate mode support */
158
159 #define SVID_DISCOVERY_MAX 16
160
161 struct pd_mode_data {
162 int svid_index; /* current SVID index */
163 int nsvids;
164 u16 svids[SVID_DISCOVERY_MAX];
165 int altmodes; /* number of alternate modes */
166 struct typec_altmode_desc altmode_desc[SVID_DISCOVERY_MAX];
167 };
168
169 struct tcpm_port {
170 struct device *dev;
171
172 struct mutex lock; /* tcpm state machine lock */
173 struct workqueue_struct *wq;
174
175 struct typec_capability typec_caps;
176 struct typec_port *typec_port;
177
178 struct tcpc_dev *tcpc;
179
180 enum typec_role vconn_role;
181 enum typec_role pwr_role;
182 enum typec_data_role data_role;
183 enum typec_pwr_opmode pwr_opmode;
184
185 struct usb_pd_identity partner_ident;
186 struct typec_partner_desc partner_desc;
187 struct typec_partner *partner;
188
189 enum typec_cc_status cc_req;
190
191 enum typec_cc_status cc1;
192 enum typec_cc_status cc2;
193 enum typec_cc_polarity polarity;
194
195 bool attached;
196 bool connected;
197 enum typec_port_type port_type;
198 bool vbus_present;
199 bool vbus_never_low;
200 bool vbus_source;
201 bool vbus_charge;
202
203 bool send_discover;
204 bool op_vsafe5v;
205
206 int try_role;
207 int try_snk_count;
208 int try_src_count;
209
210 enum pd_msg_request queued_message;
211
212 enum tcpm_state enter_state;
213 enum tcpm_state prev_state;
214 enum tcpm_state state;
215 enum tcpm_state delayed_state;
216 unsigned long delayed_runtime;
217 unsigned long delay_ms;
218
219 spinlock_t pd_event_lock;
220 u32 pd_events;
221
222 struct work_struct event_work;
223 struct delayed_work state_machine;
224 struct delayed_work vdm_state_machine;
225 bool state_machine_running;
226
227 struct completion tx_complete;
228 enum tcpm_transmit_status tx_status;
229
230 struct mutex swap_lock; /* swap command lock */
231 bool swap_pending;
232 bool non_pd_role_swap;
233 struct completion swap_complete;
234 int swap_status;
235
236 unsigned int message_id;
237 unsigned int caps_count;
238 unsigned int hard_reset_count;
239 bool pd_capable;
240 bool explicit_contract;
241 unsigned int rx_msgid;
242
243 /* Partner capabilities/requests */
244 u32 sink_request;
245 u32 source_caps[PDO_MAX_OBJECTS];
246 unsigned int nr_source_caps;
247 u32 sink_caps[PDO_MAX_OBJECTS];
248 unsigned int nr_sink_caps;
249
250 /* Local capabilities */
251 u32 src_pdo[PDO_MAX_OBJECTS];
252 unsigned int nr_src_pdo;
253 u32 snk_pdo[PDO_MAX_OBJECTS];
254 unsigned int nr_snk_pdo;
255 u32 snk_vdo[VDO_MAX_OBJECTS];
256 unsigned int nr_snk_vdo;
257
258 unsigned int max_snk_mv;
259 unsigned int max_snk_ma;
260 unsigned int max_snk_mw;
261 unsigned int operating_snk_mw;
262
263 /* Requested current / voltage */
264 u32 current_limit;
265 u32 supply_voltage;
266
267 u32 bist_request;
268
269 /* PD state for Vendor Defined Messages */
270 enum vdm_states vdm_state;
271 u32 vdm_retries;
272 /* next Vendor Defined Message to send */
273 u32 vdo_data[VDO_MAX_SIZE];
274 u8 vdo_count;
275 /* VDO to retry if UFP responder replied busy */
276 u32 vdo_retry;
277
278 /* Alternate mode data */
279
280 struct pd_mode_data mode_data;
281 struct typec_altmode *partner_altmode[SVID_DISCOVERY_MAX];
282 struct typec_altmode *port_altmode[SVID_DISCOVERY_MAX];
283
284 /* Deadline in jiffies to exit src_try_wait state */
285 unsigned long max_wait;
286
287 #ifdef CONFIG_DEBUG_FS
288 struct dentry *dentry;
289 struct mutex logbuffer_lock; /* log buffer access lock */
290 int logbuffer_head;
291 int logbuffer_tail;
292 u8 *logbuffer[LOG_BUFFER_ENTRIES];
293 #endif
294 };
295
296 struct pd_rx_event {
297 struct work_struct work;
298 struct tcpm_port *port;
299 struct pd_message msg;
300 };
301
302 #define tcpm_cc_is_sink(cc) \
303 ((cc) == TYPEC_CC_RP_DEF || (cc) == TYPEC_CC_RP_1_5 || \
304 (cc) == TYPEC_CC_RP_3_0)
305
306 #define tcpm_port_is_sink(port) \
307 ((tcpm_cc_is_sink((port)->cc1) && !tcpm_cc_is_sink((port)->cc2)) || \
308 (tcpm_cc_is_sink((port)->cc2) && !tcpm_cc_is_sink((port)->cc1)))
309
310 #define tcpm_cc_is_source(cc) ((cc) == TYPEC_CC_RD)
311 #define tcpm_cc_is_audio(cc) ((cc) == TYPEC_CC_RA)
312 #define tcpm_cc_is_open(cc) ((cc) == TYPEC_CC_OPEN)
313
314 #define tcpm_port_is_source(port) \
315 ((tcpm_cc_is_source((port)->cc1) && \
316 !tcpm_cc_is_source((port)->cc2)) || \
317 (tcpm_cc_is_source((port)->cc2) && \
318 !tcpm_cc_is_source((port)->cc1)))
319
320 #define tcpm_port_is_debug(port) \
321 (tcpm_cc_is_source((port)->cc1) && tcpm_cc_is_source((port)->cc2))
322
323 #define tcpm_port_is_audio(port) \
324 (tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_audio((port)->cc2))
325
326 #define tcpm_port_is_audio_detached(port) \
327 ((tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_open((port)->cc2)) || \
328 (tcpm_cc_is_audio((port)->cc2) && tcpm_cc_is_open((port)->cc1)))
329
330 #define tcpm_try_snk(port) \
331 ((port)->try_snk_count == 0 && (port)->try_role == TYPEC_SINK && \
332 (port)->port_type == TYPEC_PORT_DRP)
333
334 #define tcpm_try_src(port) \
335 ((port)->try_src_count == 0 && (port)->try_role == TYPEC_SOURCE && \
336 (port)->port_type == TYPEC_PORT_DRP)
337
338 static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
339 {
340 if (port->port_type == TYPEC_PORT_DRP) {
341 if (port->try_role == TYPEC_SINK)
342 return SNK_UNATTACHED;
343 else if (port->try_role == TYPEC_SOURCE)
344 return SRC_UNATTACHED;
345 else if (port->tcpc->config->default_role == TYPEC_SINK)
346 return SNK_UNATTACHED;
347 /* Fall through to return SRC_UNATTACHED */
348 } else if (port->port_type == TYPEC_PORT_SNK) {
349 return SNK_UNATTACHED;
350 }
351 return SRC_UNATTACHED;
352 }
353
354 static inline
355 struct tcpm_port *typec_cap_to_tcpm(const struct typec_capability *cap)
356 {
357 return container_of(cap, struct tcpm_port, typec_caps);
358 }
359
360 static bool tcpm_port_is_disconnected(struct tcpm_port *port)
361 {
362 return (!port->attached && port->cc1 == TYPEC_CC_OPEN &&
363 port->cc2 == TYPEC_CC_OPEN) ||
364 (port->attached && ((port->polarity == TYPEC_POLARITY_CC1 &&
365 port->cc1 == TYPEC_CC_OPEN) ||
366 (port->polarity == TYPEC_POLARITY_CC2 &&
367 port->cc2 == TYPEC_CC_OPEN)));
368 }
369
370 /*
371 * Logging
372 */
373
374 #ifdef CONFIG_DEBUG_FS
375
376 static bool tcpm_log_full(struct tcpm_port *port)
377 {
378 return port->logbuffer_tail ==
379 (port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
380 }
381
382 __printf(2, 0)
383 static void _tcpm_log(struct tcpm_port *port, const char *fmt, va_list args)
384 {
385 char tmpbuffer[LOG_BUFFER_ENTRY_SIZE];
386 u64 ts_nsec = local_clock();
387 unsigned long rem_nsec;
388
389 if (!port->logbuffer[port->logbuffer_head]) {
390 port->logbuffer[port->logbuffer_head] =
391 kzalloc(LOG_BUFFER_ENTRY_SIZE, GFP_KERNEL);
392 if (!port->logbuffer[port->logbuffer_head])
393 return;
394 }
395
396 vsnprintf(tmpbuffer, sizeof(tmpbuffer), fmt, args);
397
398 mutex_lock(&port->logbuffer_lock);
399
400 if (tcpm_log_full(port)) {
401 port->logbuffer_head = max(port->logbuffer_head - 1, 0);
402 strcpy(tmpbuffer, "overflow");
403 }
404
405 if (port->logbuffer_head < 0 ||
406 port->logbuffer_head >= LOG_BUFFER_ENTRIES) {
407 dev_warn(port->dev,
408 "Bad log buffer index %d\n", port->logbuffer_head);
409 goto abort;
410 }
411
412 if (!port->logbuffer[port->logbuffer_head]) {
413 dev_warn(port->dev,
414 "Log buffer index %d is NULL\n", port->logbuffer_head);
415 goto abort;
416 }
417
418 rem_nsec = do_div(ts_nsec, 1000000000);
419 scnprintf(port->logbuffer[port->logbuffer_head],
420 LOG_BUFFER_ENTRY_SIZE, "[%5lu.%06lu] %s",
421 (unsigned long)ts_nsec, rem_nsec / 1000,
422 tmpbuffer);
423 port->logbuffer_head = (port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
424
425 abort:
426 mutex_unlock(&port->logbuffer_lock);
427 }
428
429 __printf(2, 3)
430 static void tcpm_log(struct tcpm_port *port, const char *fmt, ...)
431 {
432 va_list args;
433
434 /* Do not log while disconnected and unattached */
435 if (tcpm_port_is_disconnected(port) &&
436 (port->state == SRC_UNATTACHED || port->state == SNK_UNATTACHED ||
437 port->state == DRP_TOGGLING))
438 return;
439
440 va_start(args, fmt);
441 _tcpm_log(port, fmt, args);
442 va_end(args);
443 }
444
445 __printf(2, 3)
446 static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...)
447 {
448 va_list args;
449
450 va_start(args, fmt);
451 _tcpm_log(port, fmt, args);
452 va_end(args);
453 }
454
455 static void tcpm_log_source_caps(struct tcpm_port *port)
456 {
457 int i;
458
459 for (i = 0; i < port->nr_source_caps; i++) {
460 u32 pdo = port->source_caps[i];
461 enum pd_pdo_type type = pdo_type(pdo);
462 char msg[64];
463
464 switch (type) {
465 case PDO_TYPE_FIXED:
466 scnprintf(msg, sizeof(msg),
467 "%u mV, %u mA [%s%s%s%s%s%s]",
468 pdo_fixed_voltage(pdo),
469 pdo_max_current(pdo),
470 (pdo & PDO_FIXED_DUAL_ROLE) ?
471 "R" : "",
472 (pdo & PDO_FIXED_SUSPEND) ?
473 "S" : "",
474 (pdo & PDO_FIXED_HIGHER_CAP) ?
475 "H" : "",
476 (pdo & PDO_FIXED_USB_COMM) ?
477 "U" : "",
478 (pdo & PDO_FIXED_DATA_SWAP) ?
479 "D" : "",
480 (pdo & PDO_FIXED_EXTPOWER) ?
481 "E" : "");
482 break;
483 case PDO_TYPE_VAR:
484 scnprintf(msg, sizeof(msg),
485 "%u-%u mV, %u mA",
486 pdo_min_voltage(pdo),
487 pdo_max_voltage(pdo),
488 pdo_max_current(pdo));
489 break;
490 case PDO_TYPE_BATT:
491 scnprintf(msg, sizeof(msg),
492 "%u-%u mV, %u mW",
493 pdo_min_voltage(pdo),
494 pdo_max_voltage(pdo),
495 pdo_max_power(pdo));
496 break;
497 default:
498 strcpy(msg, "undefined");
499 break;
500 }
501 tcpm_log(port, " PDO %d: type %d, %s",
502 i, type, msg);
503 }
504 }
505
506 static int tcpm_debug_show(struct seq_file *s, void *v)
507 {
508 struct tcpm_port *port = (struct tcpm_port *)s->private;
509 int tail;
510
511 mutex_lock(&port->logbuffer_lock);
512 tail = port->logbuffer_tail;
513 while (tail != port->logbuffer_head) {
514 seq_printf(s, "%s\n", port->logbuffer[tail]);
515 tail = (tail + 1) % LOG_BUFFER_ENTRIES;
516 }
517 if (!seq_has_overflowed(s))
518 port->logbuffer_tail = tail;
519 mutex_unlock(&port->logbuffer_lock);
520
521 return 0;
522 }
523 DEFINE_SHOW_ATTRIBUTE(tcpm_debug);
524
525 static struct dentry *rootdir;
526
527 static int tcpm_debugfs_init(struct tcpm_port *port)
528 {
529 mutex_init(&port->logbuffer_lock);
530 /* /sys/kernel/debug/tcpm/usbcX */
531 if (!rootdir) {
532 rootdir = debugfs_create_dir("tcpm", NULL);
533 if (!rootdir)
534 return -ENOMEM;
535 }
536
537 port->dentry = debugfs_create_file(dev_name(port->dev),
538 S_IFREG | 0444, rootdir,
539 port, &tcpm_debug_fops);
540
541 return 0;
542 }
543
544 static void tcpm_debugfs_exit(struct tcpm_port *port)
545 {
546 debugfs_remove(port->dentry);
547 }
548
549 #else
550
551 __printf(2, 3)
552 static void tcpm_log(const struct tcpm_port *port, const char *fmt, ...) { }
553 __printf(2, 3)
554 static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...) { }
555 static void tcpm_log_source_caps(struct tcpm_port *port) { }
556 static int tcpm_debugfs_init(const struct tcpm_port *port) { return 0; }
557 static void tcpm_debugfs_exit(const struct tcpm_port *port) { }
558
559 #endif
560
561 static int tcpm_pd_transmit(struct tcpm_port *port,
562 enum tcpm_transmit_type type,
563 const struct pd_message *msg)
564 {
565 unsigned long timeout;
566 int ret;
567
568 if (msg)
569 tcpm_log(port, "PD TX, header: %#x", le16_to_cpu(msg->header));
570 else
571 tcpm_log(port, "PD TX, type: %#x", type);
572
573 reinit_completion(&port->tx_complete);
574 ret = port->tcpc->pd_transmit(port->tcpc, type, msg);
575 if (ret < 0)
576 return ret;
577
578 mutex_unlock(&port->lock);
579 timeout = wait_for_completion_timeout(&port->tx_complete,
580 msecs_to_jiffies(PD_T_TCPC_TX_TIMEOUT));
581 mutex_lock(&port->lock);
582 if (!timeout)
583 return -ETIMEDOUT;
584
585 switch (port->tx_status) {
586 case TCPC_TX_SUCCESS:
587 port->message_id = (port->message_id + 1) & PD_HEADER_ID_MASK;
588 return 0;
589 case TCPC_TX_DISCARDED:
590 return -EAGAIN;
591 case TCPC_TX_FAILED:
592 default:
593 return -EIO;
594 }
595 }
596
597 void tcpm_pd_transmit_complete(struct tcpm_port *port,
598 enum tcpm_transmit_status status)
599 {
600 tcpm_log(port, "PD TX complete, status: %u", status);
601 port->tx_status = status;
602 complete(&port->tx_complete);
603 }
604 EXPORT_SYMBOL_GPL(tcpm_pd_transmit_complete);
605
606 static int tcpm_mux_set(struct tcpm_port *port, enum tcpc_mux_mode mode,
607 enum usb_role usb_role)
608 {
609 int ret = 0;
610
611 tcpm_log(port, "Requesting mux mode %d, usb-role %d, polarity %d",
612 mode, usb_role, port->polarity);
613
614 if (port->tcpc->mux)
615 ret = port->tcpc->mux->set(port->tcpc->mux, mode, usb_role,
616 port->polarity);
617
618 return ret;
619 }
620
621 static int tcpm_set_polarity(struct tcpm_port *port,
622 enum typec_cc_polarity polarity)
623 {
624 int ret;
625
626 tcpm_log(port, "polarity %d", polarity);
627
628 ret = port->tcpc->set_polarity(port->tcpc, polarity);
629 if (ret < 0)
630 return ret;
631
632 port->polarity = polarity;
633
634 return 0;
635 }
636
637 static int tcpm_set_vconn(struct tcpm_port *port, bool enable)
638 {
639 int ret;
640
641 tcpm_log(port, "vconn:=%d", enable);
642
643 ret = port->tcpc->set_vconn(port->tcpc, enable);
644 if (!ret) {
645 port->vconn_role = enable ? TYPEC_SOURCE : TYPEC_SINK;
646 typec_set_vconn_role(port->typec_port, port->vconn_role);
647 }
648
649 return ret;
650 }
651
652 static u32 tcpm_get_current_limit(struct tcpm_port *port)
653 {
654 enum typec_cc_status cc;
655 u32 limit;
656
657 cc = port->polarity ? port->cc2 : port->cc1;
658 switch (cc) {
659 case TYPEC_CC_RP_1_5:
660 limit = 1500;
661 break;
662 case TYPEC_CC_RP_3_0:
663 limit = 3000;
664 break;
665 case TYPEC_CC_RP_DEF:
666 default:
667 if (port->tcpc->get_current_limit)
668 limit = port->tcpc->get_current_limit(port->tcpc);
669 else
670 limit = 0;
671 break;
672 }
673
674 return limit;
675 }
676
677 static int tcpm_set_current_limit(struct tcpm_port *port, u32 max_ma, u32 mv)
678 {
679 int ret = -EOPNOTSUPP;
680
681 tcpm_log(port, "Setting voltage/current limit %u mV %u mA", mv, max_ma);
682
683 if (port->tcpc->set_current_limit)
684 ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv);
685
686 return ret;
687 }
688
689 /*
690 * Determine RP value to set based on maximum current supported
691 * by a port if configured as source.
692 * Returns CC value to report to link partner.
693 */
694 static enum typec_cc_status tcpm_rp_cc(struct tcpm_port *port)
695 {
696 const u32 *src_pdo = port->src_pdo;
697 int nr_pdo = port->nr_src_pdo;
698 int i;
699
700 /*
701 * Search for first entry with matching voltage.
702 * It should report the maximum supported current.
703 */
704 for (i = 0; i < nr_pdo; i++) {
705 const u32 pdo = src_pdo[i];
706
707 if (pdo_type(pdo) == PDO_TYPE_FIXED &&
708 pdo_fixed_voltage(pdo) == 5000) {
709 unsigned int curr = pdo_max_current(pdo);
710
711 if (curr >= 3000)
712 return TYPEC_CC_RP_3_0;
713 else if (curr >= 1500)
714 return TYPEC_CC_RP_1_5;
715 return TYPEC_CC_RP_DEF;
716 }
717 }
718
719 return TYPEC_CC_RP_DEF;
720 }
721
722 static int tcpm_set_attached_state(struct tcpm_port *port, bool attached)
723 {
724 return port->tcpc->set_roles(port->tcpc, attached, port->pwr_role,
725 port->data_role);
726 }
727
728 static int tcpm_set_roles(struct tcpm_port *port, bool attached,
729 enum typec_role role, enum typec_data_role data)
730 {
731 enum usb_role usb_role;
732 int ret;
733
734 if (data == TYPEC_HOST)
735 usb_role = USB_ROLE_HOST;
736 else
737 usb_role = USB_ROLE_DEVICE;
738
739 ret = tcpm_mux_set(port, TYPEC_MUX_USB, usb_role);
740 if (ret < 0)
741 return ret;
742
743 ret = port->tcpc->set_roles(port->tcpc, attached, role, data);
744 if (ret < 0)
745 return ret;
746
747 port->pwr_role = role;
748 port->data_role = data;
749 typec_set_data_role(port->typec_port, data);
750 typec_set_pwr_role(port->typec_port, role);
751
752 return 0;
753 }
754
755 static int tcpm_set_pwr_role(struct tcpm_port *port, enum typec_role role)
756 {
757 int ret;
758
759 ret = port->tcpc->set_roles(port->tcpc, true, role,
760 port->data_role);
761 if (ret < 0)
762 return ret;
763
764 port->pwr_role = role;
765 typec_set_pwr_role(port->typec_port, role);
766
767 return 0;
768 }
769
770 static int tcpm_pd_send_source_caps(struct tcpm_port *port)
771 {
772 struct pd_message msg;
773 int i;
774
775 memset(&msg, 0, sizeof(msg));
776 if (!port->nr_src_pdo) {
777 /* No source capabilities defined, sink only */
778 msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
779 port->pwr_role,
780 port->data_role,
781 port->message_id, 0);
782 } else {
783 msg.header = PD_HEADER_LE(PD_DATA_SOURCE_CAP,
784 port->pwr_role,
785 port->data_role,
786 port->message_id,
787 port->nr_src_pdo);
788 }
789 for (i = 0; i < port->nr_src_pdo; i++)
790 msg.payload[i] = cpu_to_le32(port->src_pdo[i]);
791
792 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
793 }
794
795 static int tcpm_pd_send_sink_caps(struct tcpm_port *port)
796 {
797 struct pd_message msg;
798 int i;
799
800 memset(&msg, 0, sizeof(msg));
801 if (!port->nr_snk_pdo) {
802 /* No sink capabilities defined, source only */
803 msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
804 port->pwr_role,
805 port->data_role,
806 port->message_id, 0);
807 } else {
808 msg.header = PD_HEADER_LE(PD_DATA_SINK_CAP,
809 port->pwr_role,
810 port->data_role,
811 port->message_id,
812 port->nr_snk_pdo);
813 }
814 for (i = 0; i < port->nr_snk_pdo; i++)
815 msg.payload[i] = cpu_to_le32(port->snk_pdo[i]);
816
817 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
818 }
819
820 static void tcpm_set_state(struct tcpm_port *port, enum tcpm_state state,
821 unsigned int delay_ms)
822 {
823 if (delay_ms) {
824 tcpm_log(port, "pending state change %s -> %s @ %u ms",
825 tcpm_states[port->state], tcpm_states[state],
826 delay_ms);
827 port->delayed_state = state;
828 mod_delayed_work(port->wq, &port->state_machine,
829 msecs_to_jiffies(delay_ms));
830 port->delayed_runtime = jiffies + msecs_to_jiffies(delay_ms);
831 port->delay_ms = delay_ms;
832 } else {
833 tcpm_log(port, "state change %s -> %s",
834 tcpm_states[port->state], tcpm_states[state]);
835 port->delayed_state = INVALID_STATE;
836 port->prev_state = port->state;
837 port->state = state;
838 /*
839 * Don't re-queue the state machine work item if we're currently
840 * in the state machine and we're immediately changing states.
841 * tcpm_state_machine_work() will continue running the state
842 * machine.
843 */
844 if (!port->state_machine_running)
845 mod_delayed_work(port->wq, &port->state_machine, 0);
846 }
847 }
848
849 static void tcpm_set_state_cond(struct tcpm_port *port, enum tcpm_state state,
850 unsigned int delay_ms)
851 {
852 if (port->enter_state == port->state)
853 tcpm_set_state(port, state, delay_ms);
854 else
855 tcpm_log(port,
856 "skipped %sstate change %s -> %s [%u ms], context state %s",
857 delay_ms ? "delayed " : "",
858 tcpm_states[port->state], tcpm_states[state],
859 delay_ms, tcpm_states[port->enter_state]);
860 }
861
862 static void tcpm_queue_message(struct tcpm_port *port,
863 enum pd_msg_request message)
864 {
865 port->queued_message = message;
866 mod_delayed_work(port->wq, &port->state_machine, 0);
867 }
868
869 /*
870 * VDM/VDO handling functions
871 */
872 static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
873 const u32 *data, int cnt)
874 {
875 port->vdo_count = cnt + 1;
876 port->vdo_data[0] = header;
877 memcpy(&port->vdo_data[1], data, sizeof(u32) * cnt);
878 /* Set ready, vdm state machine will actually send */
879 port->vdm_retries = 0;
880 port->vdm_state = VDM_STATE_READY;
881 }
882
883 static void svdm_consume_identity(struct tcpm_port *port, const __le32 *payload,
884 int cnt)
885 {
886 u32 vdo = le32_to_cpu(payload[VDO_INDEX_IDH]);
887 u32 product = le32_to_cpu(payload[VDO_INDEX_PRODUCT]);
888
889 memset(&port->mode_data, 0, sizeof(port->mode_data));
890
891 port->partner_ident.id_header = vdo;
892 port->partner_ident.cert_stat = le32_to_cpu(payload[VDO_INDEX_CSTAT]);
893 port->partner_ident.product = product;
894
895 typec_partner_set_identity(port->partner);
896
897 tcpm_log(port, "Identity: %04x:%04x.%04x",
898 PD_IDH_VID(vdo),
899 PD_PRODUCT_PID(product), product & 0xffff);
900 }
901
902 static bool svdm_consume_svids(struct tcpm_port *port, const __le32 *payload,
903 int cnt)
904 {
905 struct pd_mode_data *pmdata = &port->mode_data;
906 int i;
907
908 for (i = 1; i < cnt; i++) {
909 u32 p = le32_to_cpu(payload[i]);
910 u16 svid;
911
912 svid = (p >> 16) & 0xffff;
913 if (!svid)
914 return false;
915
916 if (pmdata->nsvids >= SVID_DISCOVERY_MAX)
917 goto abort;
918
919 pmdata->svids[pmdata->nsvids++] = svid;
920 tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
921
922 svid = p & 0xffff;
923 if (!svid)
924 return false;
925
926 if (pmdata->nsvids >= SVID_DISCOVERY_MAX)
927 goto abort;
928
929 pmdata->svids[pmdata->nsvids++] = svid;
930 tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
931 }
932 return true;
933 abort:
934 tcpm_log(port, "SVID_DISCOVERY_MAX(%d) too low!", SVID_DISCOVERY_MAX);
935 return false;
936 }
937
938 static void svdm_consume_modes(struct tcpm_port *port, const __le32 *payload,
939 int cnt)
940 {
941 struct pd_mode_data *pmdata = &port->mode_data;
942 struct typec_altmode_desc *paltmode;
943 struct typec_mode_desc *pmode;
944 int i;
945
946 if (pmdata->altmodes >= ARRAY_SIZE(port->partner_altmode)) {
947 /* Already logged in svdm_consume_svids() */
948 return;
949 }
950
951 paltmode = &pmdata->altmode_desc[pmdata->altmodes];
952 memset(paltmode, 0, sizeof(*paltmode));
953
954 paltmode->svid = pmdata->svids[pmdata->svid_index];
955
956 tcpm_log(port, " Alternate mode %d: SVID 0x%04x",
957 pmdata->altmodes, paltmode->svid);
958
959 for (i = 1; i < cnt && paltmode->n_modes < ALTMODE_MAX_MODES; i++) {
960 pmode = &paltmode->modes[paltmode->n_modes];
961 memset(pmode, 0, sizeof(*pmode));
962 pmode->vdo = le32_to_cpu(payload[i]);
963 pmode->index = i - 1;
964 paltmode->n_modes++;
965 tcpm_log(port, " VDO %d: 0x%08x",
966 pmode->index, pmode->vdo);
967 }
968 port->partner_altmode[pmdata->altmodes] =
969 typec_partner_register_altmode(port->partner, paltmode);
970 if (!port->partner_altmode[pmdata->altmodes]) {
971 tcpm_log(port,
972 "Failed to register alternate modes for SVID 0x%04x",
973 paltmode->svid);
974 return;
975 }
976 pmdata->altmodes++;
977 }
978
979 #define supports_modal(port) PD_IDH_MODAL_SUPP((port)->partner_ident.id_header)
980
981 static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
982 u32 *response)
983 {
984 u32 p0 = le32_to_cpu(payload[0]);
985 int cmd_type = PD_VDO_CMDT(p0);
986 int cmd = PD_VDO_CMD(p0);
987 struct pd_mode_data *modep;
988 int rlen = 0;
989 u16 svid;
990 int i;
991
992 tcpm_log(port, "Rx VDM cmd 0x%x type %d cmd %d len %d",
993 p0, cmd_type, cmd, cnt);
994
995 modep = &port->mode_data;
996
997 switch (cmd_type) {
998 case CMDT_INIT:
999 switch (cmd) {
1000 case CMD_DISCOVER_IDENT:
1001 /* 6.4.4.3.1: Only respond as UFP (device) */
1002 if (port->data_role == TYPEC_DEVICE &&
1003 port->nr_snk_vdo) {
1004 for (i = 0; i < port->nr_snk_vdo; i++)
1005 response[i + 1] = port->snk_vdo[i];
1006 rlen = port->nr_snk_vdo + 1;
1007 }
1008 break;
1009 case CMD_DISCOVER_SVID:
1010 break;
1011 case CMD_DISCOVER_MODES:
1012 break;
1013 case CMD_ENTER_MODE:
1014 break;
1015 case CMD_EXIT_MODE:
1016 break;
1017 case CMD_ATTENTION:
1018 break;
1019 default:
1020 break;
1021 }
1022 if (rlen >= 1) {
1023 response[0] = p0 | VDO_CMDT(CMDT_RSP_ACK);
1024 } else if (rlen == 0) {
1025 response[0] = p0 | VDO_CMDT(CMDT_RSP_NAK);
1026 rlen = 1;
1027 } else {
1028 response[0] = p0 | VDO_CMDT(CMDT_RSP_BUSY);
1029 rlen = 1;
1030 }
1031 break;
1032 case CMDT_RSP_ACK:
1033 /* silently drop message if we are not connected */
1034 if (IS_ERR_OR_NULL(port->partner))
1035 break;
1036
1037 switch (cmd) {
1038 case CMD_DISCOVER_IDENT:
1039 /* 6.4.4.3.1 */
1040 svdm_consume_identity(port, payload, cnt);
1041 response[0] = VDO(USB_SID_PD, 1, CMD_DISCOVER_SVID);
1042 rlen = 1;
1043 break;
1044 case CMD_DISCOVER_SVID:
1045 /* 6.4.4.3.2 */
1046 if (svdm_consume_svids(port, payload, cnt)) {
1047 response[0] = VDO(USB_SID_PD, 1,
1048 CMD_DISCOVER_SVID);
1049 rlen = 1;
1050 } else if (modep->nsvids && supports_modal(port)) {
1051 response[0] = VDO(modep->svids[0], 1,
1052 CMD_DISCOVER_MODES);
1053 rlen = 1;
1054 }
1055 break;
1056 case CMD_DISCOVER_MODES:
1057 /* 6.4.4.3.3 */
1058 svdm_consume_modes(port, payload, cnt);
1059 modep->svid_index++;
1060 if (modep->svid_index < modep->nsvids) {
1061 svid = modep->svids[modep->svid_index];
1062 response[0] = VDO(svid, 1, CMD_DISCOVER_MODES);
1063 rlen = 1;
1064 } else {
1065 /* enter alternate mode if/when implemented */
1066 }
1067 break;
1068 case CMD_ENTER_MODE:
1069 break;
1070 default:
1071 break;
1072 }
1073 break;
1074 default:
1075 break;
1076 }
1077
1078 return rlen;
1079 }
1080
1081 static void tcpm_handle_vdm_request(struct tcpm_port *port,
1082 const __le32 *payload, int cnt)
1083 {
1084 int rlen = 0;
1085 u32 response[8] = { };
1086 u32 p0 = le32_to_cpu(payload[0]);
1087
1088 if (port->vdm_state == VDM_STATE_BUSY) {
1089 /* If UFP responded busy retry after timeout */
1090 if (PD_VDO_CMDT(p0) == CMDT_RSP_BUSY) {
1091 port->vdm_state = VDM_STATE_WAIT_RSP_BUSY;
1092 port->vdo_retry = (p0 & ~VDO_CMDT_MASK) |
1093 CMDT_INIT;
1094 mod_delayed_work(port->wq, &port->vdm_state_machine,
1095 msecs_to_jiffies(PD_T_VDM_BUSY));
1096 return;
1097 }
1098 port->vdm_state = VDM_STATE_DONE;
1099 }
1100
1101 if (PD_VDO_SVDM(p0))
1102 rlen = tcpm_pd_svdm(port, payload, cnt, response);
1103
1104 if (rlen > 0) {
1105 tcpm_queue_vdm(port, response[0], &response[1], rlen - 1);
1106 mod_delayed_work(port->wq, &port->vdm_state_machine, 0);
1107 }
1108 }
1109
1110 static void tcpm_send_vdm(struct tcpm_port *port, u32 vid, int cmd,
1111 const u32 *data, int count)
1112 {
1113 u32 header;
1114
1115 if (WARN_ON(count > VDO_MAX_SIZE - 1))
1116 count = VDO_MAX_SIZE - 1;
1117
1118 /* set VDM header with VID & CMD */
1119 header = VDO(vid, ((vid & USB_SID_PD) == USB_SID_PD) ?
1120 1 : (PD_VDO_CMD(cmd) <= CMD_ATTENTION), cmd);
1121 tcpm_queue_vdm(port, header, data, count);
1122
1123 mod_delayed_work(port->wq, &port->vdm_state_machine, 0);
1124 }
1125
1126 static unsigned int vdm_ready_timeout(u32 vdm_hdr)
1127 {
1128 unsigned int timeout;
1129 int cmd = PD_VDO_CMD(vdm_hdr);
1130
1131 /* its not a structured VDM command */
1132 if (!PD_VDO_SVDM(vdm_hdr))
1133 return PD_T_VDM_UNSTRUCTURED;
1134
1135 switch (PD_VDO_CMDT(vdm_hdr)) {
1136 case CMDT_INIT:
1137 if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE)
1138 timeout = PD_T_VDM_WAIT_MODE_E;
1139 else
1140 timeout = PD_T_VDM_SNDR_RSP;
1141 break;
1142 default:
1143 if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE)
1144 timeout = PD_T_VDM_E_MODE;
1145 else
1146 timeout = PD_T_VDM_RCVR_RSP;
1147 break;
1148 }
1149 return timeout;
1150 }
1151
1152 static void vdm_run_state_machine(struct tcpm_port *port)
1153 {
1154 struct pd_message msg;
1155 int i, res;
1156
1157 switch (port->vdm_state) {
1158 case VDM_STATE_READY:
1159 /* Only transmit VDM if attached */
1160 if (!port->attached) {
1161 port->vdm_state = VDM_STATE_ERR_BUSY;
1162 break;
1163 }
1164
1165 /*
1166 * if there's traffic or we're not in PDO ready state don't send
1167 * a VDM.
1168 */
1169 if (port->state != SRC_READY && port->state != SNK_READY)
1170 break;
1171
1172 /* Prepare and send VDM */
1173 memset(&msg, 0, sizeof(msg));
1174 msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF,
1175 port->pwr_role,
1176 port->data_role,
1177 port->message_id, port->vdo_count);
1178 for (i = 0; i < port->vdo_count; i++)
1179 msg.payload[i] = cpu_to_le32(port->vdo_data[i]);
1180 res = tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1181 if (res < 0) {
1182 port->vdm_state = VDM_STATE_ERR_SEND;
1183 } else {
1184 unsigned long timeout;
1185
1186 port->vdm_retries = 0;
1187 port->vdm_state = VDM_STATE_BUSY;
1188 timeout = vdm_ready_timeout(port->vdo_data[0]);
1189 mod_delayed_work(port->wq, &port->vdm_state_machine,
1190 timeout);
1191 }
1192 break;
1193 case VDM_STATE_WAIT_RSP_BUSY:
1194 port->vdo_data[0] = port->vdo_retry;
1195 port->vdo_count = 1;
1196 port->vdm_state = VDM_STATE_READY;
1197 break;
1198 case VDM_STATE_BUSY:
1199 port->vdm_state = VDM_STATE_ERR_TMOUT;
1200 break;
1201 case VDM_STATE_ERR_SEND:
1202 /*
1203 * A partner which does not support USB PD will not reply,
1204 * so this is not a fatal error. At the same time, some
1205 * devices may not return GoodCRC under some circumstances,
1206 * so we need to retry.
1207 */
1208 if (port->vdm_retries < 3) {
1209 tcpm_log(port, "VDM Tx error, retry");
1210 port->vdm_retries++;
1211 port->vdm_state = VDM_STATE_READY;
1212 }
1213 break;
1214 default:
1215 break;
1216 }
1217 }
1218
1219 static void vdm_state_machine_work(struct work_struct *work)
1220 {
1221 struct tcpm_port *port = container_of(work, struct tcpm_port,
1222 vdm_state_machine.work);
1223 enum vdm_states prev_state;
1224
1225 mutex_lock(&port->lock);
1226
1227 /*
1228 * Continue running as long as the port is not busy and there was
1229 * a state change.
1230 */
1231 do {
1232 prev_state = port->vdm_state;
1233 vdm_run_state_machine(port);
1234 } while (port->vdm_state != prev_state &&
1235 port->vdm_state != VDM_STATE_BUSY);
1236
1237 mutex_unlock(&port->lock);
1238 }
1239
1240 enum pdo_err {
1241 PDO_NO_ERR,
1242 PDO_ERR_NO_VSAFE5V,
1243 PDO_ERR_VSAFE5V_NOT_FIRST,
1244 PDO_ERR_PDO_TYPE_NOT_IN_ORDER,
1245 PDO_ERR_FIXED_NOT_SORTED,
1246 PDO_ERR_VARIABLE_BATT_NOT_SORTED,
1247 PDO_ERR_DUPE_PDO,
1248 };
1249
1250 static const char * const pdo_err_msg[] = {
1251 [PDO_ERR_NO_VSAFE5V] =
1252 " err: source/sink caps should atleast have vSafe5V",
1253 [PDO_ERR_VSAFE5V_NOT_FIRST] =
1254 " err: vSafe5V Fixed Supply Object Shall always be the first object",
1255 [PDO_ERR_PDO_TYPE_NOT_IN_ORDER] =
1256 " err: PDOs should be in the following order: Fixed; Battery; Variable",
1257 [PDO_ERR_FIXED_NOT_SORTED] =
1258 " err: Fixed supply pdos should be in increasing order of their fixed voltage",
1259 [PDO_ERR_VARIABLE_BATT_NOT_SORTED] =
1260 " err: Variable/Battery supply pdos should be in increasing order of their minimum voltage",
1261 [PDO_ERR_DUPE_PDO] =
1262 " err: Variable/Batt supply pdos cannot have same min/max voltage",
1263 };
1264
1265 static enum pdo_err tcpm_caps_err(struct tcpm_port *port, const u32 *pdo,
1266 unsigned int nr_pdo)
1267 {
1268 unsigned int i;
1269
1270 /* Should at least contain vSafe5v */
1271 if (nr_pdo < 1)
1272 return PDO_ERR_NO_VSAFE5V;
1273
1274 /* The vSafe5V Fixed Supply Object Shall always be the first object */
1275 if (pdo_type(pdo[0]) != PDO_TYPE_FIXED ||
1276 pdo_fixed_voltage(pdo[0]) != VSAFE5V)
1277 return PDO_ERR_VSAFE5V_NOT_FIRST;
1278
1279 for (i = 1; i < nr_pdo; i++) {
1280 if (pdo_type(pdo[i]) < pdo_type(pdo[i - 1])) {
1281 return PDO_ERR_PDO_TYPE_NOT_IN_ORDER;
1282 } else if (pdo_type(pdo[i]) == pdo_type(pdo[i - 1])) {
1283 enum pd_pdo_type type = pdo_type(pdo[i]);
1284
1285 switch (type) {
1286 /*
1287 * The remaining Fixed Supply Objects, if
1288 * present, shall be sent in voltage order;
1289 * lowest to highest.
1290 */
1291 case PDO_TYPE_FIXED:
1292 if (pdo_fixed_voltage(pdo[i]) <=
1293 pdo_fixed_voltage(pdo[i - 1]))
1294 return PDO_ERR_FIXED_NOT_SORTED;
1295 break;
1296 /*
1297 * The Battery Supply Objects and Variable
1298 * supply, if present shall be sent in Minimum
1299 * Voltage order; lowest to highest.
1300 */
1301 case PDO_TYPE_VAR:
1302 case PDO_TYPE_BATT:
1303 if (pdo_min_voltage(pdo[i]) <
1304 pdo_min_voltage(pdo[i - 1]))
1305 return PDO_ERR_VARIABLE_BATT_NOT_SORTED;
1306 else if ((pdo_min_voltage(pdo[i]) ==
1307 pdo_min_voltage(pdo[i - 1])) &&
1308 (pdo_max_voltage(pdo[i]) ==
1309 pdo_min_voltage(pdo[i - 1])))
1310 return PDO_ERR_DUPE_PDO;
1311 break;
1312 default:
1313 tcpm_log_force(port, " Unknown pdo type");
1314 }
1315 }
1316 }
1317
1318 return PDO_NO_ERR;
1319 }
1320
1321 static int tcpm_validate_caps(struct tcpm_port *port, const u32 *pdo,
1322 unsigned int nr_pdo)
1323 {
1324 enum pdo_err err_index = tcpm_caps_err(port, pdo, nr_pdo);
1325
1326 if (err_index != PDO_NO_ERR) {
1327 tcpm_log_force(port, " %s", pdo_err_msg[err_index]);
1328 return -EINVAL;
1329 }
1330
1331 return 0;
1332 }
1333
1334 /*
1335 * PD (data, control) command handling functions
1336 */
1337 static void tcpm_pd_data_request(struct tcpm_port *port,
1338 const struct pd_message *msg)
1339 {
1340 enum pd_data_msg_type type = pd_header_type_le(msg->header);
1341 unsigned int cnt = pd_header_cnt_le(msg->header);
1342 unsigned int i;
1343
1344 switch (type) {
1345 case PD_DATA_SOURCE_CAP:
1346 if (port->pwr_role != TYPEC_SINK)
1347 break;
1348
1349 for (i = 0; i < cnt; i++)
1350 port->source_caps[i] = le32_to_cpu(msg->payload[i]);
1351
1352 port->nr_source_caps = cnt;
1353
1354 tcpm_log_source_caps(port);
1355
1356 tcpm_validate_caps(port, port->source_caps,
1357 port->nr_source_caps);
1358
1359 /*
1360 * This message may be received even if VBUS is not
1361 * present. This is quite unexpected; see USB PD
1362 * specification, sections 8.3.3.6.3.1 and 8.3.3.6.3.2.
1363 * However, at the same time, we must be ready to
1364 * receive this message and respond to it 15ms after
1365 * receiving PS_RDY during power swap operations, no matter
1366 * if VBUS is available or not (USB PD specification,
1367 * section 6.5.9.2).
1368 * So we need to accept the message either way,
1369 * but be prepared to keep waiting for VBUS after it was
1370 * handled.
1371 */
1372 tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
1373 break;
1374 case PD_DATA_REQUEST:
1375 if (port->pwr_role != TYPEC_SOURCE ||
1376 cnt != 1) {
1377 tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
1378 break;
1379 }
1380 port->sink_request = le32_to_cpu(msg->payload[0]);
1381 tcpm_set_state(port, SRC_NEGOTIATE_CAPABILITIES, 0);
1382 break;
1383 case PD_DATA_SINK_CAP:
1384 /* We don't do anything with this at the moment... */
1385 for (i = 0; i < cnt; i++)
1386 port->sink_caps[i] = le32_to_cpu(msg->payload[i]);
1387 port->nr_sink_caps = cnt;
1388 break;
1389 case PD_DATA_VENDOR_DEF:
1390 tcpm_handle_vdm_request(port, msg->payload, cnt);
1391 break;
1392 case PD_DATA_BIST:
1393 if (port->state == SRC_READY || port->state == SNK_READY) {
1394 port->bist_request = le32_to_cpu(msg->payload[0]);
1395 tcpm_set_state(port, BIST_RX, 0);
1396 }
1397 break;
1398 default:
1399 tcpm_log(port, "Unhandled data message type %#x", type);
1400 break;
1401 }
1402 }
1403
1404 static void tcpm_pd_ctrl_request(struct tcpm_port *port,
1405 const struct pd_message *msg)
1406 {
1407 enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
1408 enum tcpm_state next_state;
1409
1410 switch (type) {
1411 case PD_CTRL_GOOD_CRC:
1412 case PD_CTRL_PING:
1413 break;
1414 case PD_CTRL_GET_SOURCE_CAP:
1415 switch (port->state) {
1416 case SRC_READY:
1417 case SNK_READY:
1418 tcpm_queue_message(port, PD_MSG_DATA_SOURCE_CAP);
1419 break;
1420 default:
1421 tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
1422 break;
1423 }
1424 break;
1425 case PD_CTRL_GET_SINK_CAP:
1426 switch (port->state) {
1427 case SRC_READY:
1428 case SNK_READY:
1429 tcpm_queue_message(port, PD_MSG_DATA_SINK_CAP);
1430 break;
1431 default:
1432 tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
1433 break;
1434 }
1435 break;
1436 case PD_CTRL_GOTO_MIN:
1437 break;
1438 case PD_CTRL_PS_RDY:
1439 switch (port->state) {
1440 case SNK_TRANSITION_SINK:
1441 if (port->vbus_present) {
1442 tcpm_set_current_limit(port,
1443 port->current_limit,
1444 port->supply_voltage);
1445 port->explicit_contract = true;
1446 tcpm_set_state(port, SNK_READY, 0);
1447 } else {
1448 /*
1449 * Seen after power swap. Keep waiting for VBUS
1450 * in a transitional state.
1451 */
1452 tcpm_set_state(port,
1453 SNK_TRANSITION_SINK_VBUS, 0);
1454 }
1455 break;
1456 case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
1457 tcpm_set_state(port, PR_SWAP_SRC_SNK_SINK_ON, 0);
1458 break;
1459 case PR_SWAP_SNK_SRC_SINK_OFF:
1460 tcpm_set_state(port, PR_SWAP_SNK_SRC_SOURCE_ON, 0);
1461 break;
1462 case VCONN_SWAP_WAIT_FOR_VCONN:
1463 tcpm_set_state(port, VCONN_SWAP_TURN_OFF_VCONN, 0);
1464 break;
1465 default:
1466 break;
1467 }
1468 break;
1469 case PD_CTRL_REJECT:
1470 case PD_CTRL_WAIT:
1471 switch (port->state) {
1472 case SNK_NEGOTIATE_CAPABILITIES:
1473 /* USB PD specification, Figure 8-43 */
1474 if (port->explicit_contract)
1475 next_state = SNK_READY;
1476 else
1477 next_state = SNK_WAIT_CAPABILITIES;
1478 tcpm_set_state(port, next_state, 0);
1479 break;
1480 case DR_SWAP_SEND:
1481 port->swap_status = (type == PD_CTRL_WAIT ?
1482 -EAGAIN : -EOPNOTSUPP);
1483 tcpm_set_state(port, DR_SWAP_CANCEL, 0);
1484 break;
1485 case PR_SWAP_SEND:
1486 port->swap_status = (type == PD_CTRL_WAIT ?
1487 -EAGAIN : -EOPNOTSUPP);
1488 tcpm_set_state(port, PR_SWAP_CANCEL, 0);
1489 break;
1490 case VCONN_SWAP_SEND:
1491 port->swap_status = (type == PD_CTRL_WAIT ?
1492 -EAGAIN : -EOPNOTSUPP);
1493 tcpm_set_state(port, VCONN_SWAP_CANCEL, 0);
1494 break;
1495 default:
1496 break;
1497 }
1498 break;
1499 case PD_CTRL_ACCEPT:
1500 switch (port->state) {
1501 case SNK_NEGOTIATE_CAPABILITIES:
1502 tcpm_set_state(port, SNK_TRANSITION_SINK, 0);
1503 break;
1504 case SOFT_RESET_SEND:
1505 port->message_id = 0;
1506 port->rx_msgid = -1;
1507 if (port->pwr_role == TYPEC_SOURCE)
1508 next_state = SRC_SEND_CAPABILITIES;
1509 else
1510 next_state = SNK_WAIT_CAPABILITIES;
1511 tcpm_set_state(port, next_state, 0);
1512 break;
1513 case DR_SWAP_SEND:
1514 tcpm_set_state(port, DR_SWAP_CHANGE_DR, 0);
1515 break;
1516 case PR_SWAP_SEND:
1517 tcpm_set_state(port, PR_SWAP_START, 0);
1518 break;
1519 case VCONN_SWAP_SEND:
1520 tcpm_set_state(port, VCONN_SWAP_START, 0);
1521 break;
1522 default:
1523 break;
1524 }
1525 break;
1526 case PD_CTRL_SOFT_RESET:
1527 tcpm_set_state(port, SOFT_RESET, 0);
1528 break;
1529 case PD_CTRL_DR_SWAP:
1530 if (port->port_type != TYPEC_PORT_DRP) {
1531 tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
1532 break;
1533 }
1534 /*
1535 * XXX
1536 * 6.3.9: If an alternate mode is active, a request to swap
1537 * alternate modes shall trigger a port reset.
1538 */
1539 switch (port->state) {
1540 case SRC_READY:
1541 case SNK_READY:
1542 tcpm_set_state(port, DR_SWAP_ACCEPT, 0);
1543 break;
1544 default:
1545 tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
1546 break;
1547 }
1548 break;
1549 case PD_CTRL_PR_SWAP:
1550 if (port->port_type != TYPEC_PORT_DRP) {
1551 tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
1552 break;
1553 }
1554 switch (port->state) {
1555 case SRC_READY:
1556 case SNK_READY:
1557 tcpm_set_state(port, PR_SWAP_ACCEPT, 0);
1558 break;
1559 default:
1560 tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
1561 break;
1562 }
1563 break;
1564 case PD_CTRL_VCONN_SWAP:
1565 switch (port->state) {
1566 case SRC_READY:
1567 case SNK_READY:
1568 tcpm_set_state(port, VCONN_SWAP_ACCEPT, 0);
1569 break;
1570 default:
1571 tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
1572 break;
1573 }
1574 break;
1575 default:
1576 tcpm_log(port, "Unhandled ctrl message type %#x", type);
1577 break;
1578 }
1579 }
1580
1581 static void tcpm_pd_rx_handler(struct work_struct *work)
1582 {
1583 struct pd_rx_event *event = container_of(work,
1584 struct pd_rx_event, work);
1585 const struct pd_message *msg = &event->msg;
1586 unsigned int cnt = pd_header_cnt_le(msg->header);
1587 struct tcpm_port *port = event->port;
1588
1589 mutex_lock(&port->lock);
1590
1591 tcpm_log(port, "PD RX, header: %#x [%d]", le16_to_cpu(msg->header),
1592 port->attached);
1593
1594 if (port->attached) {
1595 enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
1596 unsigned int msgid = pd_header_msgid_le(msg->header);
1597
1598 /*
1599 * USB PD standard, 6.6.1.2:
1600 * "... if MessageID value in a received Message is the
1601 * same as the stored value, the receiver shall return a
1602 * GoodCRC Message with that MessageID value and drop
1603 * the Message (this is a retry of an already received
1604 * Message). Note: this shall not apply to the Soft_Reset
1605 * Message which always has a MessageID value of zero."
1606 */
1607 if (msgid == port->rx_msgid && type != PD_CTRL_SOFT_RESET)
1608 goto done;
1609 port->rx_msgid = msgid;
1610
1611 /*
1612 * If both ends believe to be DFP/host, we have a data role
1613 * mismatch.
1614 */
1615 if (!!(le16_to_cpu(msg->header) & PD_HEADER_DATA_ROLE) ==
1616 (port->data_role == TYPEC_HOST)) {
1617 tcpm_log(port,
1618 "Data role mismatch, initiating error recovery");
1619 tcpm_set_state(port, ERROR_RECOVERY, 0);
1620 } else {
1621 if (cnt)
1622 tcpm_pd_data_request(port, msg);
1623 else
1624 tcpm_pd_ctrl_request(port, msg);
1625 }
1626 }
1627
1628 done:
1629 mutex_unlock(&port->lock);
1630 kfree(event);
1631 }
1632
1633 void tcpm_pd_receive(struct tcpm_port *port, const struct pd_message *msg)
1634 {
1635 struct pd_rx_event *event;
1636
1637 event = kzalloc(sizeof(*event), GFP_ATOMIC);
1638 if (!event)
1639 return;
1640
1641 INIT_WORK(&event->work, tcpm_pd_rx_handler);
1642 event->port = port;
1643 memcpy(&event->msg, msg, sizeof(*msg));
1644 queue_work(port->wq, &event->work);
1645 }
1646 EXPORT_SYMBOL_GPL(tcpm_pd_receive);
1647
1648 static int tcpm_pd_send_control(struct tcpm_port *port,
1649 enum pd_ctrl_msg_type type)
1650 {
1651 struct pd_message msg;
1652
1653 memset(&msg, 0, sizeof(msg));
1654 msg.header = PD_HEADER_LE(type, port->pwr_role,
1655 port->data_role,
1656 port->message_id, 0);
1657
1658 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1659 }
1660
1661 /*
1662 * Send queued message without affecting state.
1663 * Return true if state machine should go back to sleep,
1664 * false otherwise.
1665 */
1666 static bool tcpm_send_queued_message(struct tcpm_port *port)
1667 {
1668 enum pd_msg_request queued_message;
1669
1670 do {
1671 queued_message = port->queued_message;
1672 port->queued_message = PD_MSG_NONE;
1673
1674 switch (queued_message) {
1675 case PD_MSG_CTRL_WAIT:
1676 tcpm_pd_send_control(port, PD_CTRL_WAIT);
1677 break;
1678 case PD_MSG_CTRL_REJECT:
1679 tcpm_pd_send_control(port, PD_CTRL_REJECT);
1680 break;
1681 case PD_MSG_DATA_SINK_CAP:
1682 tcpm_pd_send_sink_caps(port);
1683 break;
1684 case PD_MSG_DATA_SOURCE_CAP:
1685 tcpm_pd_send_source_caps(port);
1686 break;
1687 default:
1688 break;
1689 }
1690 } while (port->queued_message != PD_MSG_NONE);
1691
1692 if (port->delayed_state != INVALID_STATE) {
1693 if (time_is_after_jiffies(port->delayed_runtime)) {
1694 mod_delayed_work(port->wq, &port->state_machine,
1695 port->delayed_runtime - jiffies);
1696 return true;
1697 }
1698 port->delayed_state = INVALID_STATE;
1699 }
1700 return false;
1701 }
1702
1703 static int tcpm_pd_check_request(struct tcpm_port *port)
1704 {
1705 u32 pdo, rdo = port->sink_request;
1706 unsigned int max, op, pdo_max, index;
1707 enum pd_pdo_type type;
1708
1709 index = rdo_index(rdo);
1710 if (!index || index > port->nr_src_pdo)
1711 return -EINVAL;
1712
1713 pdo = port->src_pdo[index - 1];
1714 type = pdo_type(pdo);
1715 switch (type) {
1716 case PDO_TYPE_FIXED:
1717 case PDO_TYPE_VAR:
1718 max = rdo_max_current(rdo);
1719 op = rdo_op_current(rdo);
1720 pdo_max = pdo_max_current(pdo);
1721
1722 if (op > pdo_max)
1723 return -EINVAL;
1724 if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH))
1725 return -EINVAL;
1726
1727 if (type == PDO_TYPE_FIXED)
1728 tcpm_log(port,
1729 "Requested %u mV, %u mA for %u / %u mA",
1730 pdo_fixed_voltage(pdo), pdo_max, op, max);
1731 else
1732 tcpm_log(port,
1733 "Requested %u -> %u mV, %u mA for %u / %u mA",
1734 pdo_min_voltage(pdo), pdo_max_voltage(pdo),
1735 pdo_max, op, max);
1736 break;
1737 case PDO_TYPE_BATT:
1738 max = rdo_max_power(rdo);
1739 op = rdo_op_power(rdo);
1740 pdo_max = pdo_max_power(pdo);
1741
1742 if (op > pdo_max)
1743 return -EINVAL;
1744 if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH))
1745 return -EINVAL;
1746 tcpm_log(port,
1747 "Requested %u -> %u mV, %u mW for %u / %u mW",
1748 pdo_min_voltage(pdo), pdo_max_voltage(pdo),
1749 pdo_max, op, max);
1750 break;
1751 default:
1752 return -EINVAL;
1753 }
1754
1755 port->op_vsafe5v = index == 1;
1756
1757 return 0;
1758 }
1759
1760 static int tcpm_pd_select_pdo(struct tcpm_port *port)
1761 {
1762 unsigned int i, max_mw = 0, max_mv = 0;
1763 int ret = -EINVAL;
1764
1765 /*
1766 * Select the source PDO providing the most power while staying within
1767 * the board's voltage limits. Prefer PDO providing exp
1768 */
1769 for (i = 0; i < port->nr_source_caps; i++) {
1770 u32 pdo = port->source_caps[i];
1771 enum pd_pdo_type type = pdo_type(pdo);
1772 unsigned int mv, ma, mw;
1773
1774 if (type == PDO_TYPE_FIXED)
1775 mv = pdo_fixed_voltage(pdo);
1776 else
1777 mv = pdo_min_voltage(pdo);
1778
1779 if (type == PDO_TYPE_BATT) {
1780 mw = pdo_max_power(pdo);
1781 } else {
1782 ma = min(pdo_max_current(pdo),
1783 port->max_snk_ma);
1784 mw = ma * mv / 1000;
1785 }
1786
1787 /* Perfer higher voltages if available */
1788 if ((mw > max_mw || (mw == max_mw && mv > max_mv)) &&
1789 mv <= port->max_snk_mv) {
1790 ret = i;
1791 max_mw = mw;
1792 max_mv = mv;
1793 }
1794 }
1795
1796 return ret;
1797 }
1798
1799 static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)
1800 {
1801 unsigned int mv, ma, mw, flags;
1802 unsigned int max_ma, max_mw;
1803 enum pd_pdo_type type;
1804 int index;
1805 u32 pdo;
1806
1807 index = tcpm_pd_select_pdo(port);
1808 if (index < 0)
1809 return -EINVAL;
1810 pdo = port->source_caps[index];
1811 type = pdo_type(pdo);
1812
1813 if (type == PDO_TYPE_FIXED)
1814 mv = pdo_fixed_voltage(pdo);
1815 else
1816 mv = pdo_min_voltage(pdo);
1817
1818 /* Select maximum available current within the board's power limit */
1819 if (type == PDO_TYPE_BATT) {
1820 mw = pdo_max_power(pdo);
1821 ma = 1000 * min(mw, port->max_snk_mw) / mv;
1822 } else {
1823 ma = min(pdo_max_current(pdo),
1824 1000 * port->max_snk_mw / mv);
1825 }
1826 ma = min(ma, port->max_snk_ma);
1827
1828 flags = RDO_USB_COMM | RDO_NO_SUSPEND;
1829
1830 /* Set mismatch bit if offered power is less than operating power */
1831 mw = ma * mv / 1000;
1832 max_ma = ma;
1833 max_mw = mw;
1834 if (mw < port->operating_snk_mw) {
1835 flags |= RDO_CAP_MISMATCH;
1836 max_mw = port->operating_snk_mw;
1837 max_ma = max_mw * 1000 / mv;
1838 }
1839
1840 tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d",
1841 port->cc_req, port->cc1, port->cc2, port->vbus_source,
1842 port->vconn_role == TYPEC_SOURCE ? "source" : "sink",
1843 port->polarity);
1844
1845 if (type == PDO_TYPE_BATT) {
1846 *rdo = RDO_BATT(index + 1, mw, max_mw, flags);
1847
1848 tcpm_log(port, "Requesting PDO %d: %u mV, %u mW%s",
1849 index, mv, mw,
1850 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
1851 } else {
1852 *rdo = RDO_FIXED(index + 1, ma, max_ma, flags);
1853
1854 tcpm_log(port, "Requesting PDO %d: %u mV, %u mA%s",
1855 index, mv, ma,
1856 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
1857 }
1858
1859 port->current_limit = ma;
1860 port->supply_voltage = mv;
1861
1862 return 0;
1863 }
1864
1865 static int tcpm_pd_send_request(struct tcpm_port *port)
1866 {
1867 struct pd_message msg;
1868 int ret;
1869 u32 rdo;
1870
1871 ret = tcpm_pd_build_request(port, &rdo);
1872 if (ret < 0)
1873 return ret;
1874
1875 memset(&msg, 0, sizeof(msg));
1876 msg.header = PD_HEADER_LE(PD_DATA_REQUEST,
1877 port->pwr_role,
1878 port->data_role,
1879 port->message_id, 1);
1880 msg.payload[0] = cpu_to_le32(rdo);
1881
1882 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1883 }
1884
1885 static int tcpm_set_vbus(struct tcpm_port *port, bool enable)
1886 {
1887 int ret;
1888
1889 if (enable && port->vbus_charge)
1890 return -EINVAL;
1891
1892 tcpm_log(port, "vbus:=%d charge=%d", enable, port->vbus_charge);
1893
1894 ret = port->tcpc->set_vbus(port->tcpc, enable, port->vbus_charge);
1895 if (ret < 0)
1896 return ret;
1897
1898 port->vbus_source = enable;
1899 return 0;
1900 }
1901
1902 static int tcpm_set_charge(struct tcpm_port *port, bool charge)
1903 {
1904 int ret;
1905
1906 if (charge && port->vbus_source)
1907 return -EINVAL;
1908
1909 if (charge != port->vbus_charge) {
1910 tcpm_log(port, "vbus=%d charge:=%d", port->vbus_source, charge);
1911 ret = port->tcpc->set_vbus(port->tcpc, port->vbus_source,
1912 charge);
1913 if (ret < 0)
1914 return ret;
1915 }
1916 port->vbus_charge = charge;
1917 return 0;
1918 }
1919
1920 static bool tcpm_start_drp_toggling(struct tcpm_port *port)
1921 {
1922 int ret;
1923
1924 if (port->tcpc->start_drp_toggling &&
1925 port->port_type == TYPEC_PORT_DRP) {
1926 tcpm_log_force(port, "Start DRP toggling");
1927 ret = port->tcpc->start_drp_toggling(port->tcpc,
1928 tcpm_rp_cc(port));
1929 if (!ret)
1930 return true;
1931 }
1932
1933 return false;
1934 }
1935
1936 static void tcpm_set_cc(struct tcpm_port *port, enum typec_cc_status cc)
1937 {
1938 tcpm_log(port, "cc:=%d", cc);
1939 port->cc_req = cc;
1940 port->tcpc->set_cc(port->tcpc, cc);
1941 }
1942
1943 static int tcpm_init_vbus(struct tcpm_port *port)
1944 {
1945 int ret;
1946
1947 ret = port->tcpc->set_vbus(port->tcpc, false, false);
1948 port->vbus_source = false;
1949 port->vbus_charge = false;
1950 return ret;
1951 }
1952
1953 static int tcpm_init_vconn(struct tcpm_port *port)
1954 {
1955 int ret;
1956
1957 ret = port->tcpc->set_vconn(port->tcpc, false);
1958 port->vconn_role = TYPEC_SINK;
1959 return ret;
1960 }
1961
1962 static void tcpm_typec_connect(struct tcpm_port *port)
1963 {
1964 if (!port->connected) {
1965 /* Make sure we don't report stale identity information */
1966 memset(&port->partner_ident, 0, sizeof(port->partner_ident));
1967 port->partner_desc.usb_pd = port->pd_capable;
1968 if (tcpm_port_is_debug(port))
1969 port->partner_desc.accessory = TYPEC_ACCESSORY_DEBUG;
1970 else if (tcpm_port_is_audio(port))
1971 port->partner_desc.accessory = TYPEC_ACCESSORY_AUDIO;
1972 else
1973 port->partner_desc.accessory = TYPEC_ACCESSORY_NONE;
1974 port->partner = typec_register_partner(port->typec_port,
1975 &port->partner_desc);
1976 port->connected = true;
1977 }
1978 }
1979
1980 static int tcpm_src_attach(struct tcpm_port *port)
1981 {
1982 enum typec_cc_polarity polarity =
1983 port->cc2 == TYPEC_CC_RD ? TYPEC_POLARITY_CC2
1984 : TYPEC_POLARITY_CC1;
1985 int ret;
1986
1987 if (port->attached)
1988 return 0;
1989
1990 ret = tcpm_set_polarity(port, polarity);
1991 if (ret < 0)
1992 return ret;
1993
1994 ret = tcpm_set_roles(port, true, TYPEC_SOURCE, TYPEC_HOST);
1995 if (ret < 0)
1996 return ret;
1997
1998 ret = port->tcpc->set_pd_rx(port->tcpc, true);
1999 if (ret < 0)
2000 goto out_disable_mux;
2001
2002 /*
2003 * USB Type-C specification, version 1.2,
2004 * chapter 4.5.2.2.8.1 (Attached.SRC Requirements)
2005 * Enable VCONN only if the non-RD port is set to RA.
2006 */
2007 if ((polarity == TYPEC_POLARITY_CC1 && port->cc2 == TYPEC_CC_RA) ||
2008 (polarity == TYPEC_POLARITY_CC2 && port->cc1 == TYPEC_CC_RA)) {
2009 ret = tcpm_set_vconn(port, true);
2010 if (ret < 0)
2011 goto out_disable_pd;
2012 }
2013
2014 ret = tcpm_set_vbus(port, true);
2015 if (ret < 0)
2016 goto out_disable_vconn;
2017
2018 port->pd_capable = false;
2019
2020 port->partner = NULL;
2021
2022 port->attached = true;
2023 port->send_discover = true;
2024
2025 return 0;
2026
2027 out_disable_vconn:
2028 tcpm_set_vconn(port, false);
2029 out_disable_pd:
2030 port->tcpc->set_pd_rx(port->tcpc, false);
2031 out_disable_mux:
2032 tcpm_mux_set(port, TYPEC_MUX_NONE, USB_ROLE_NONE);
2033 return ret;
2034 }
2035
2036 static void tcpm_typec_disconnect(struct tcpm_port *port)
2037 {
2038 if (port->connected) {
2039 typec_unregister_partner(port->partner);
2040 port->partner = NULL;
2041 port->connected = false;
2042 }
2043 }
2044
2045 static void tcpm_unregister_altmodes(struct tcpm_port *port)
2046 {
2047 struct pd_mode_data *modep = &port->mode_data;
2048 int i;
2049
2050 for (i = 0; i < modep->altmodes; i++) {
2051 typec_unregister_altmode(port->partner_altmode[i]);
2052 port->partner_altmode[i] = NULL;
2053 }
2054
2055 memset(modep, 0, sizeof(*modep));
2056 }
2057
2058 static void tcpm_reset_port(struct tcpm_port *port)
2059 {
2060 tcpm_unregister_altmodes(port);
2061 tcpm_typec_disconnect(port);
2062 port->attached = false;
2063 port->pd_capable = false;
2064
2065 /*
2066 * First Rx ID should be 0; set this to a sentinel of -1 so that
2067 * we can check tcpm_pd_rx_handler() if we had seen it before.
2068 */
2069 port->rx_msgid = -1;
2070
2071 port->tcpc->set_pd_rx(port->tcpc, false);
2072 tcpm_init_vbus(port); /* also disables charging */
2073 tcpm_init_vconn(port);
2074 tcpm_set_current_limit(port, 0, 0);
2075 tcpm_set_polarity(port, TYPEC_POLARITY_CC1);
2076 tcpm_mux_set(port, TYPEC_MUX_NONE, USB_ROLE_NONE);
2077 tcpm_set_attached_state(port, false);
2078 port->try_src_count = 0;
2079 port->try_snk_count = 0;
2080 }
2081
2082 static void tcpm_detach(struct tcpm_port *port)
2083 {
2084 if (!port->attached)
2085 return;
2086
2087 if (tcpm_port_is_disconnected(port))
2088 port->hard_reset_count = 0;
2089
2090 tcpm_reset_port(port);
2091 }
2092
2093 static void tcpm_src_detach(struct tcpm_port *port)
2094 {
2095 tcpm_detach(port);
2096 }
2097
2098 static int tcpm_snk_attach(struct tcpm_port *port)
2099 {
2100 int ret;
2101
2102 if (port->attached)
2103 return 0;
2104
2105 ret = tcpm_set_polarity(port, port->cc2 != TYPEC_CC_OPEN ?
2106 TYPEC_POLARITY_CC2 : TYPEC_POLARITY_CC1);
2107 if (ret < 0)
2108 return ret;
2109
2110 ret = tcpm_set_roles(port, true, TYPEC_SINK, TYPEC_DEVICE);
2111 if (ret < 0)
2112 return ret;
2113
2114 port->pd_capable = false;
2115
2116 port->partner = NULL;
2117
2118 port->attached = true;
2119 port->send_discover = true;
2120
2121 return 0;
2122 }
2123
2124 static void tcpm_snk_detach(struct tcpm_port *port)
2125 {
2126 tcpm_detach(port);
2127 }
2128
2129 static int tcpm_acc_attach(struct tcpm_port *port)
2130 {
2131 int ret;
2132
2133 if (port->attached)
2134 return 0;
2135
2136 ret = tcpm_set_roles(port, true, TYPEC_SOURCE, TYPEC_HOST);
2137 if (ret < 0)
2138 return ret;
2139
2140 port->partner = NULL;
2141
2142 tcpm_typec_connect(port);
2143
2144 port->attached = true;
2145
2146 return 0;
2147 }
2148
2149 static void tcpm_acc_detach(struct tcpm_port *port)
2150 {
2151 tcpm_detach(port);
2152 }
2153
2154 static inline enum tcpm_state hard_reset_state(struct tcpm_port *port)
2155 {
2156 if (port->hard_reset_count < PD_N_HARD_RESET_COUNT)
2157 return HARD_RESET_SEND;
2158 if (port->pd_capable)
2159 return ERROR_RECOVERY;
2160 if (port->pwr_role == TYPEC_SOURCE)
2161 return SRC_UNATTACHED;
2162 if (port->state == SNK_WAIT_CAPABILITIES)
2163 return SNK_READY;
2164 return SNK_UNATTACHED;
2165 }
2166
2167 static inline enum tcpm_state ready_state(struct tcpm_port *port)
2168 {
2169 if (port->pwr_role == TYPEC_SOURCE)
2170 return SRC_READY;
2171 else
2172 return SNK_READY;
2173 }
2174
2175 static inline enum tcpm_state unattached_state(struct tcpm_port *port)
2176 {
2177 if (port->port_type == TYPEC_PORT_DRP) {
2178 if (port->pwr_role == TYPEC_SOURCE)
2179 return SRC_UNATTACHED;
2180 else
2181 return SNK_UNATTACHED;
2182 } else if (port->port_type == TYPEC_PORT_SRC) {
2183 return SRC_UNATTACHED;
2184 }
2185
2186 return SNK_UNATTACHED;
2187 }
2188
2189 static void tcpm_check_send_discover(struct tcpm_port *port)
2190 {
2191 if (port->data_role == TYPEC_HOST && port->send_discover &&
2192 port->pd_capable) {
2193 tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0);
2194 port->send_discover = false;
2195 }
2196 }
2197
2198 static void tcpm_swap_complete(struct tcpm_port *port, int result)
2199 {
2200 if (port->swap_pending) {
2201 port->swap_status = result;
2202 port->swap_pending = false;
2203 port->non_pd_role_swap = false;
2204 complete(&port->swap_complete);
2205 }
2206 }
2207
2208 static enum typec_pwr_opmode tcpm_get_pwr_opmode(enum typec_cc_status cc)
2209 {
2210 switch (cc) {
2211 case TYPEC_CC_RP_1_5:
2212 return TYPEC_PWR_MODE_1_5A;
2213 case TYPEC_CC_RP_3_0:
2214 return TYPEC_PWR_MODE_3_0A;
2215 case TYPEC_CC_RP_DEF:
2216 default:
2217 return TYPEC_PWR_MODE_USB;
2218 }
2219 }
2220
2221 static void run_state_machine(struct tcpm_port *port)
2222 {
2223 int ret;
2224 enum typec_pwr_opmode opmode;
2225 unsigned int msecs;
2226
2227 port->enter_state = port->state;
2228 switch (port->state) {
2229 case DRP_TOGGLING:
2230 break;
2231 /* SRC states */
2232 case SRC_UNATTACHED:
2233 if (!port->non_pd_role_swap)
2234 tcpm_swap_complete(port, -ENOTCONN);
2235 tcpm_src_detach(port);
2236 if (tcpm_start_drp_toggling(port)) {
2237 tcpm_set_state(port, DRP_TOGGLING, 0);
2238 break;
2239 }
2240 tcpm_set_cc(port, tcpm_rp_cc(port));
2241 if (port->port_type == TYPEC_PORT_DRP)
2242 tcpm_set_state(port, SNK_UNATTACHED, PD_T_DRP_SNK);
2243 break;
2244 case SRC_ATTACH_WAIT:
2245 if (tcpm_port_is_debug(port))
2246 tcpm_set_state(port, DEBUG_ACC_ATTACHED,
2247 PD_T_CC_DEBOUNCE);
2248 else if (tcpm_port_is_audio(port))
2249 tcpm_set_state(port, AUDIO_ACC_ATTACHED,
2250 PD_T_CC_DEBOUNCE);
2251 else if (tcpm_port_is_source(port))
2252 tcpm_set_state(port,
2253 tcpm_try_snk(port) ? SNK_TRY
2254 : SRC_ATTACHED,
2255 PD_T_CC_DEBOUNCE);
2256 break;
2257
2258 case SNK_TRY:
2259 port->try_snk_count++;
2260 /*
2261 * Requirements:
2262 * - Do not drive vconn or vbus
2263 * - Terminate CC pins (both) to Rd
2264 * Action:
2265 * - Wait for tDRPTry (PD_T_DRP_TRY).
2266 * Until then, ignore any state changes.
2267 */
2268 tcpm_set_cc(port, TYPEC_CC_RD);
2269 tcpm_set_state(port, SNK_TRY_WAIT, PD_T_DRP_TRY);
2270 break;
2271 case SNK_TRY_WAIT:
2272 if (tcpm_port_is_sink(port)) {
2273 tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE, 0);
2274 } else {
2275 tcpm_set_state(port, SRC_TRYWAIT, 0);
2276 port->max_wait = 0;
2277 }
2278 break;
2279 case SNK_TRY_WAIT_DEBOUNCE:
2280 tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS,
2281 PD_T_PD_DEBOUNCE);
2282 break;
2283 case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS:
2284 if (port->vbus_present && tcpm_port_is_sink(port)) {
2285 tcpm_set_state(port, SNK_ATTACHED, 0);
2286 } else {
2287 tcpm_set_state(port, SRC_TRYWAIT, 0);
2288 port->max_wait = 0;
2289 }
2290 break;
2291 case SRC_TRYWAIT:
2292 tcpm_set_cc(port, tcpm_rp_cc(port));
2293 if (port->max_wait == 0) {
2294 port->max_wait = jiffies +
2295 msecs_to_jiffies(PD_T_DRP_TRY);
2296 tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED,
2297 PD_T_DRP_TRY);
2298 } else {
2299 if (time_is_after_jiffies(port->max_wait))
2300 tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED,
2301 jiffies_to_msecs(port->max_wait -
2302 jiffies));
2303 else
2304 tcpm_set_state(port, SNK_UNATTACHED, 0);
2305 }
2306 break;
2307 case SRC_TRYWAIT_DEBOUNCE:
2308 tcpm_set_state(port, SRC_ATTACHED, PD_T_CC_DEBOUNCE);
2309 break;
2310 case SRC_TRYWAIT_UNATTACHED:
2311 tcpm_set_state(port, SNK_UNATTACHED, 0);
2312 break;
2313
2314 case SRC_ATTACHED:
2315 ret = tcpm_src_attach(port);
2316 tcpm_set_state(port, SRC_UNATTACHED,
2317 ret < 0 ? 0 : PD_T_PS_SOURCE_ON);
2318 break;
2319 case SRC_STARTUP:
2320 opmode = tcpm_get_pwr_opmode(tcpm_rp_cc(port));
2321 typec_set_pwr_opmode(port->typec_port, opmode);
2322 port->pwr_opmode = TYPEC_PWR_MODE_USB;
2323 port->caps_count = 0;
2324 port->message_id = 0;
2325 port->rx_msgid = -1;
2326 port->explicit_contract = false;
2327 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
2328 break;
2329 case SRC_SEND_CAPABILITIES:
2330 port->caps_count++;
2331 if (port->caps_count > PD_N_CAPS_COUNT) {
2332 tcpm_set_state(port, SRC_READY, 0);
2333 break;
2334 }
2335 ret = tcpm_pd_send_source_caps(port);
2336 if (ret < 0) {
2337 tcpm_set_state(port, SRC_SEND_CAPABILITIES,
2338 PD_T_SEND_SOURCE_CAP);
2339 } else {
2340 /*
2341 * Per standard, we should clear the reset counter here.
2342 * However, that can result in state machine hang-ups.
2343 * Reset it only in READY state to improve stability.
2344 */
2345 /* port->hard_reset_count = 0; */
2346 port->caps_count = 0;
2347 port->pd_capable = true;
2348 tcpm_set_state_cond(port, hard_reset_state(port),
2349 PD_T_SEND_SOURCE_CAP);
2350 }
2351 break;
2352 case SRC_NEGOTIATE_CAPABILITIES:
2353 ret = tcpm_pd_check_request(port);
2354 if (ret < 0) {
2355 tcpm_pd_send_control(port, PD_CTRL_REJECT);
2356 if (!port->explicit_contract) {
2357 tcpm_set_state(port,
2358 SRC_WAIT_NEW_CAPABILITIES, 0);
2359 } else {
2360 tcpm_set_state(port, SRC_READY, 0);
2361 }
2362 } else {
2363 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
2364 tcpm_set_state(port, SRC_TRANSITION_SUPPLY,
2365 PD_T_SRC_TRANSITION);
2366 }
2367 break;
2368 case SRC_TRANSITION_SUPPLY:
2369 /* XXX: regulator_set_voltage(vbus, ...) */
2370 tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
2371 port->explicit_contract = true;
2372 typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_PD);
2373 port->pwr_opmode = TYPEC_PWR_MODE_PD;
2374 tcpm_set_state_cond(port, SRC_READY, 0);
2375 break;
2376 case SRC_READY:
2377 #if 1
2378 port->hard_reset_count = 0;
2379 #endif
2380 port->try_src_count = 0;
2381
2382 tcpm_swap_complete(port, 0);
2383 tcpm_typec_connect(port);
2384 tcpm_check_send_discover(port);
2385 /*
2386 * 6.3.5
2387 * Sending ping messages is not necessary if
2388 * - the source operates at vSafe5V
2389 * or
2390 * - The system is not operating in PD mode
2391 * or
2392 * - Both partners are connected using a Type-C connector
2393 *
2394 * There is no actual need to send PD messages since the local
2395 * port type-c and the spec does not clearly say whether PD is
2396 * possible when type-c is connected to Type-A/B
2397 */
2398 break;
2399 case SRC_WAIT_NEW_CAPABILITIES:
2400 /* Nothing to do... */
2401 break;
2402
2403 /* SNK states */
2404 case SNK_UNATTACHED:
2405 if (!port->non_pd_role_swap)
2406 tcpm_swap_complete(port, -ENOTCONN);
2407 tcpm_snk_detach(port);
2408 if (tcpm_start_drp_toggling(port)) {
2409 tcpm_set_state(port, DRP_TOGGLING, 0);
2410 break;
2411 }
2412 tcpm_set_cc(port, TYPEC_CC_RD);
2413 if (port->port_type == TYPEC_PORT_DRP)
2414 tcpm_set_state(port, SRC_UNATTACHED, PD_T_DRP_SRC);
2415 break;
2416 case SNK_ATTACH_WAIT:
2417 if ((port->cc1 == TYPEC_CC_OPEN &&
2418 port->cc2 != TYPEC_CC_OPEN) ||
2419 (port->cc1 != TYPEC_CC_OPEN &&
2420 port->cc2 == TYPEC_CC_OPEN))
2421 tcpm_set_state(port, SNK_DEBOUNCED,
2422 PD_T_CC_DEBOUNCE);
2423 else if (tcpm_port_is_disconnected(port))
2424 tcpm_set_state(port, SNK_UNATTACHED,
2425 PD_T_PD_DEBOUNCE);
2426 break;
2427 case SNK_DEBOUNCED:
2428 if (tcpm_port_is_disconnected(port))
2429 tcpm_set_state(port, SNK_UNATTACHED,
2430 PD_T_PD_DEBOUNCE);
2431 else if (port->vbus_present)
2432 tcpm_set_state(port,
2433 tcpm_try_src(port) ? SRC_TRY
2434 : SNK_ATTACHED,
2435 0);
2436 else
2437 /* Wait for VBUS, but not forever */
2438 tcpm_set_state(port, PORT_RESET, PD_T_PS_SOURCE_ON);
2439 break;
2440
2441 case SRC_TRY:
2442 port->try_src_count++;
2443 tcpm_set_cc(port, tcpm_rp_cc(port));
2444 port->max_wait = 0;
2445 tcpm_set_state(port, SRC_TRY_WAIT, 0);
2446 break;
2447 case SRC_TRY_WAIT:
2448 if (port->max_wait == 0) {
2449 port->max_wait = jiffies +
2450 msecs_to_jiffies(PD_T_DRP_TRY);
2451 msecs = PD_T_DRP_TRY;
2452 } else {
2453 if (time_is_after_jiffies(port->max_wait))
2454 msecs = jiffies_to_msecs(port->max_wait -
2455 jiffies);
2456 else
2457 msecs = 0;
2458 }
2459 tcpm_set_state(port, SNK_TRYWAIT, msecs);
2460 break;
2461 case SRC_TRY_DEBOUNCE:
2462 tcpm_set_state(port, SRC_ATTACHED, PD_T_PD_DEBOUNCE);
2463 break;
2464 case SNK_TRYWAIT:
2465 tcpm_set_cc(port, TYPEC_CC_RD);
2466 tcpm_set_state(port, SNK_TRYWAIT_VBUS, PD_T_CC_DEBOUNCE);
2467 break;
2468 case SNK_TRYWAIT_VBUS:
2469 /*
2470 * TCPM stays in this state indefinitely until VBUS
2471 * is detected as long as Rp is not detected for
2472 * more than a time period of tPDDebounce.
2473 */
2474 if (port->vbus_present && tcpm_port_is_sink(port)) {
2475 tcpm_set_state(port, SNK_ATTACHED, 0);
2476 break;
2477 }
2478 if (!tcpm_port_is_sink(port))
2479 tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, 0);
2480 break;
2481 case SNK_TRYWAIT_DEBOUNCE:
2482 tcpm_set_state(port, SNK_UNATTACHED, PD_T_PD_DEBOUNCE);
2483 break;
2484 case SNK_ATTACHED:
2485 ret = tcpm_snk_attach(port);
2486 if (ret < 0)
2487 tcpm_set_state(port, SNK_UNATTACHED, 0);
2488 else
2489 tcpm_set_state(port, SNK_STARTUP, 0);
2490 break;
2491 case SNK_STARTUP:
2492 opmode = tcpm_get_pwr_opmode(port->polarity ?
2493 port->cc2 : port->cc1);
2494 typec_set_pwr_opmode(port->typec_port, opmode);
2495 port->pwr_opmode = TYPEC_PWR_MODE_USB;
2496 port->message_id = 0;
2497 port->rx_msgid = -1;
2498 port->explicit_contract = false;
2499 tcpm_set_state(port, SNK_DISCOVERY, 0);
2500 break;
2501 case SNK_DISCOVERY:
2502 if (port->vbus_present) {
2503 tcpm_set_current_limit(port,
2504 tcpm_get_current_limit(port),
2505 5000);
2506 tcpm_set_charge(port, true);
2507 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
2508 break;
2509 }
2510 /*
2511 * For DRP, timeouts differ. Also, handling is supposed to be
2512 * different and much more complex (dead battery detection;
2513 * see USB power delivery specification, section 8.3.3.6.1.5.1).
2514 */
2515 tcpm_set_state(port, hard_reset_state(port),
2516 port->port_type == TYPEC_PORT_DRP ?
2517 PD_T_DB_DETECT : PD_T_NO_RESPONSE);
2518 break;
2519 case SNK_DISCOVERY_DEBOUNCE:
2520 tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE_DONE,
2521 PD_T_CC_DEBOUNCE);
2522 break;
2523 case SNK_DISCOVERY_DEBOUNCE_DONE:
2524 if (!tcpm_port_is_disconnected(port) &&
2525 tcpm_port_is_sink(port) &&
2526 time_is_after_jiffies(port->delayed_runtime)) {
2527 tcpm_set_state(port, SNK_DISCOVERY,
2528 port->delayed_runtime - jiffies);
2529 break;
2530 }
2531 tcpm_set_state(port, unattached_state(port), 0);
2532 break;
2533 case SNK_WAIT_CAPABILITIES:
2534 ret = port->tcpc->set_pd_rx(port->tcpc, true);
2535 if (ret < 0) {
2536 tcpm_set_state(port, SNK_READY, 0);
2537 break;
2538 }
2539 /*
2540 * If VBUS has never been low, and we time out waiting
2541 * for source cap, try a soft reset first, in case we
2542 * were already in a stable contract before this boot.
2543 * Do this only once.
2544 */
2545 if (port->vbus_never_low) {
2546 port->vbus_never_low = false;
2547 tcpm_set_state(port, SOFT_RESET_SEND,
2548 PD_T_SINK_WAIT_CAP);
2549 } else {
2550 tcpm_set_state(port, hard_reset_state(port),
2551 PD_T_SINK_WAIT_CAP);
2552 }
2553 break;
2554 case SNK_NEGOTIATE_CAPABILITIES:
2555 port->pd_capable = true;
2556 port->hard_reset_count = 0;
2557 ret = tcpm_pd_send_request(port);
2558 if (ret < 0) {
2559 /* Let the Source send capabilities again. */
2560 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
2561 } else {
2562 tcpm_set_state_cond(port, hard_reset_state(port),
2563 PD_T_SENDER_RESPONSE);
2564 }
2565 break;
2566 case SNK_TRANSITION_SINK:
2567 case SNK_TRANSITION_SINK_VBUS:
2568 tcpm_set_state(port, hard_reset_state(port),
2569 PD_T_PS_TRANSITION);
2570 break;
2571 case SNK_READY:
2572 port->try_snk_count = 0;
2573 if (port->explicit_contract) {
2574 typec_set_pwr_opmode(port->typec_port,
2575 TYPEC_PWR_MODE_PD);
2576 port->pwr_opmode = TYPEC_PWR_MODE_PD;
2577 }
2578
2579 tcpm_swap_complete(port, 0);
2580 tcpm_typec_connect(port);
2581 tcpm_check_send_discover(port);
2582 break;
2583
2584 /* Accessory states */
2585 case ACC_UNATTACHED:
2586 tcpm_acc_detach(port);
2587 tcpm_set_state(port, SRC_UNATTACHED, 0);
2588 break;
2589 case DEBUG_ACC_ATTACHED:
2590 case AUDIO_ACC_ATTACHED:
2591 ret = tcpm_acc_attach(port);
2592 if (ret < 0)
2593 tcpm_set_state(port, ACC_UNATTACHED, 0);
2594 break;
2595 case AUDIO_ACC_DEBOUNCE:
2596 tcpm_set_state(port, ACC_UNATTACHED, PD_T_CC_DEBOUNCE);
2597 break;
2598
2599 /* Hard_Reset states */
2600 case HARD_RESET_SEND:
2601 tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL);
2602 tcpm_set_state(port, HARD_RESET_START, 0);
2603 break;
2604 case HARD_RESET_START:
2605 port->hard_reset_count++;
2606 port->tcpc->set_pd_rx(port->tcpc, false);
2607 tcpm_unregister_altmodes(port);
2608 port->send_discover = true;
2609 if (port->pwr_role == TYPEC_SOURCE)
2610 tcpm_set_state(port, SRC_HARD_RESET_VBUS_OFF,
2611 PD_T_PS_HARD_RESET);
2612 else
2613 tcpm_set_state(port, SNK_HARD_RESET_SINK_OFF, 0);
2614 break;
2615 case SRC_HARD_RESET_VBUS_OFF:
2616 tcpm_set_vconn(port, true);
2617 tcpm_set_vbus(port, false);
2618 tcpm_set_roles(port, false, TYPEC_SOURCE, TYPEC_HOST);
2619 tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SRC_RECOVER);
2620 break;
2621 case SRC_HARD_RESET_VBUS_ON:
2622 tcpm_set_vbus(port, true);
2623 port->tcpc->set_pd_rx(port->tcpc, true);
2624 tcpm_set_attached_state(port, true);
2625 tcpm_set_state(port, SRC_UNATTACHED, PD_T_PS_SOURCE_ON);
2626 break;
2627 case SNK_HARD_RESET_SINK_OFF:
2628 tcpm_set_vconn(port, false);
2629 tcpm_set_charge(port, false);
2630 tcpm_set_roles(port, false, TYPEC_SINK, TYPEC_DEVICE);
2631 /*
2632 * VBUS may or may not toggle, depending on the adapter.
2633 * If it doesn't toggle, transition to SNK_HARD_RESET_SINK_ON
2634 * directly after timeout.
2635 */
2636 tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, PD_T_SAFE_0V);
2637 break;
2638 case SNK_HARD_RESET_WAIT_VBUS:
2639 /* Assume we're disconnected if VBUS doesn't come back. */
2640 tcpm_set_state(port, SNK_UNATTACHED,
2641 PD_T_SRC_RECOVER_MAX + PD_T_SRC_TURN_ON);
2642 break;
2643 case SNK_HARD_RESET_SINK_ON:
2644 /* Note: There is no guarantee that VBUS is on in this state */
2645 /*
2646 * XXX:
2647 * The specification suggests that dual mode ports in sink
2648 * mode should transition to state PE_SRC_Transition_to_default.
2649 * See USB power delivery specification chapter 8.3.3.6.1.3.
2650 * This would mean to to
2651 * - turn off VCONN, reset power supply
2652 * - request hardware reset
2653 * - turn on VCONN
2654 * - Transition to state PE_Src_Startup
2655 * SNK only ports shall transition to state Snk_Startup
2656 * (see chapter 8.3.3.3.8).
2657 * Similar, dual-mode ports in source mode should transition
2658 * to PE_SNK_Transition_to_default.
2659 */
2660 tcpm_set_attached_state(port, true);
2661 tcpm_set_state(port, SNK_STARTUP, 0);
2662 break;
2663
2664 /* Soft_Reset states */
2665 case SOFT_RESET:
2666 port->message_id = 0;
2667 port->rx_msgid = -1;
2668 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
2669 if (port->pwr_role == TYPEC_SOURCE)
2670 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
2671 else
2672 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
2673 break;
2674 case SOFT_RESET_SEND:
2675 port->message_id = 0;
2676 port->rx_msgid = -1;
2677 if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET))
2678 tcpm_set_state_cond(port, hard_reset_state(port), 0);
2679 else
2680 tcpm_set_state_cond(port, hard_reset_state(port),
2681 PD_T_SENDER_RESPONSE);
2682 break;
2683
2684 /* DR_Swap states */
2685 case DR_SWAP_SEND:
2686 tcpm_pd_send_control(port, PD_CTRL_DR_SWAP);
2687 tcpm_set_state_cond(port, DR_SWAP_SEND_TIMEOUT,
2688 PD_T_SENDER_RESPONSE);
2689 break;
2690 case DR_SWAP_ACCEPT:
2691 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
2692 tcpm_set_state_cond(port, DR_SWAP_CHANGE_DR, 0);
2693 break;
2694 case DR_SWAP_SEND_TIMEOUT:
2695 tcpm_swap_complete(port, -ETIMEDOUT);
2696 tcpm_set_state(port, ready_state(port), 0);
2697 break;
2698 case DR_SWAP_CHANGE_DR:
2699 if (port->data_role == TYPEC_HOST) {
2700 tcpm_unregister_altmodes(port);
2701 tcpm_set_roles(port, true, port->pwr_role,
2702 TYPEC_DEVICE);
2703 } else {
2704 tcpm_set_roles(port, true, port->pwr_role,
2705 TYPEC_HOST);
2706 port->send_discover = true;
2707 }
2708 tcpm_set_state(port, ready_state(port), 0);
2709 break;
2710
2711 /* PR_Swap states */
2712 case PR_SWAP_ACCEPT:
2713 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
2714 tcpm_set_state(port, PR_SWAP_START, 0);
2715 break;
2716 case PR_SWAP_SEND:
2717 tcpm_pd_send_control(port, PD_CTRL_PR_SWAP);
2718 tcpm_set_state_cond(port, PR_SWAP_SEND_TIMEOUT,
2719 PD_T_SENDER_RESPONSE);
2720 break;
2721 case PR_SWAP_SEND_TIMEOUT:
2722 tcpm_swap_complete(port, -ETIMEDOUT);
2723 tcpm_set_state(port, ready_state(port), 0);
2724 break;
2725 case PR_SWAP_START:
2726 if (port->pwr_role == TYPEC_SOURCE)
2727 tcpm_set_state(port, PR_SWAP_SRC_SNK_TRANSITION_OFF,
2728 PD_T_SRC_TRANSITION);
2729 else
2730 tcpm_set_state(port, PR_SWAP_SNK_SRC_SINK_OFF, 0);
2731 break;
2732 case PR_SWAP_SRC_SNK_TRANSITION_OFF:
2733 tcpm_set_vbus(port, false);
2734 port->explicit_contract = false;
2735 /* allow time for Vbus discharge, must be < tSrcSwapStdby */
2736 tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF,
2737 PD_T_SRCSWAPSTDBY);
2738 break;
2739 case PR_SWAP_SRC_SNK_SOURCE_OFF:
2740 tcpm_set_cc(port, TYPEC_CC_RD);
2741 /* allow CC debounce */
2742 tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED,
2743 PD_T_CC_DEBOUNCE);
2744 break;
2745 case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
2746 /*
2747 * USB-PD standard, 6.2.1.4, Port Power Role:
2748 * "During the Power Role Swap Sequence, for the initial Source
2749 * Port, the Port Power Role field shall be set to Sink in the
2750 * PS_RDY Message indicating that the initial Source’s power
2751 * supply is turned off"
2752 */
2753 tcpm_set_pwr_role(port, TYPEC_SINK);
2754 if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY)) {
2755 tcpm_set_state(port, ERROR_RECOVERY, 0);
2756 break;
2757 }
2758 tcpm_set_state_cond(port, SNK_UNATTACHED, PD_T_PS_SOURCE_ON);
2759 break;
2760 case PR_SWAP_SRC_SNK_SINK_ON:
2761 tcpm_set_state(port, SNK_STARTUP, 0);
2762 break;
2763 case PR_SWAP_SNK_SRC_SINK_OFF:
2764 tcpm_set_charge(port, false);
2765 tcpm_set_state(port, hard_reset_state(port),
2766 PD_T_PS_SOURCE_OFF);
2767 break;
2768 case PR_SWAP_SNK_SRC_SOURCE_ON:
2769 tcpm_set_cc(port, tcpm_rp_cc(port));
2770 tcpm_set_vbus(port, true);
2771 /*
2772 * allow time VBUS ramp-up, must be < tNewSrc
2773 * Also, this window overlaps with CC debounce as well.
2774 * So, Wait for the max of two which is PD_T_NEWSRC
2775 */
2776 tcpm_set_state(port, PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP,
2777 PD_T_NEWSRC);
2778 break;
2779 case PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP:
2780 /*
2781 * USB PD standard, 6.2.1.4:
2782 * "Subsequent Messages initiated by the Policy Engine,
2783 * such as the PS_RDY Message sent to indicate that Vbus
2784 * is ready, will have the Port Power Role field set to
2785 * Source."
2786 */
2787 tcpm_set_pwr_role(port, TYPEC_SOURCE);
2788 tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
2789 tcpm_set_state(port, SRC_STARTUP, 0);
2790 break;
2791
2792 case VCONN_SWAP_ACCEPT:
2793 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
2794 tcpm_set_state(port, VCONN_SWAP_START, 0);
2795 break;
2796 case VCONN_SWAP_SEND:
2797 tcpm_pd_send_control(port, PD_CTRL_VCONN_SWAP);
2798 tcpm_set_state(port, VCONN_SWAP_SEND_TIMEOUT,
2799 PD_T_SENDER_RESPONSE);
2800 break;
2801 case VCONN_SWAP_SEND_TIMEOUT:
2802 tcpm_swap_complete(port, -ETIMEDOUT);
2803 tcpm_set_state(port, ready_state(port), 0);
2804 break;
2805 case VCONN_SWAP_START:
2806 if (port->vconn_role == TYPEC_SOURCE)
2807 tcpm_set_state(port, VCONN_SWAP_WAIT_FOR_VCONN, 0);
2808 else
2809 tcpm_set_state(port, VCONN_SWAP_TURN_ON_VCONN, 0);
2810 break;
2811 case VCONN_SWAP_WAIT_FOR_VCONN:
2812 tcpm_set_state(port, hard_reset_state(port),
2813 PD_T_VCONN_SOURCE_ON);
2814 break;
2815 case VCONN_SWAP_TURN_ON_VCONN:
2816 tcpm_set_vconn(port, true);
2817 tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
2818 tcpm_set_state(port, ready_state(port), 0);
2819 break;
2820 case VCONN_SWAP_TURN_OFF_VCONN:
2821 tcpm_set_vconn(port, false);
2822 tcpm_set_state(port, ready_state(port), 0);
2823 break;
2824
2825 case DR_SWAP_CANCEL:
2826 case PR_SWAP_CANCEL:
2827 case VCONN_SWAP_CANCEL:
2828 tcpm_swap_complete(port, port->swap_status);
2829 if (port->pwr_role == TYPEC_SOURCE)
2830 tcpm_set_state(port, SRC_READY, 0);
2831 else
2832 tcpm_set_state(port, SNK_READY, 0);
2833 break;
2834
2835 case BIST_RX:
2836 switch (BDO_MODE_MASK(port->bist_request)) {
2837 case BDO_MODE_CARRIER2:
2838 tcpm_pd_transmit(port, TCPC_TX_BIST_MODE_2, NULL);
2839 break;
2840 default:
2841 break;
2842 }
2843 /* Always switch to unattached state */
2844 tcpm_set_state(port, unattached_state(port), 0);
2845 break;
2846 case ERROR_RECOVERY:
2847 tcpm_swap_complete(port, -EPROTO);
2848 tcpm_set_state(port, PORT_RESET, 0);
2849 break;
2850 case PORT_RESET:
2851 tcpm_reset_port(port);
2852 tcpm_set_cc(port, TYPEC_CC_OPEN);
2853 tcpm_set_state(port, PORT_RESET_WAIT_OFF,
2854 PD_T_ERROR_RECOVERY);
2855 break;
2856 case PORT_RESET_WAIT_OFF:
2857 tcpm_set_state(port,
2858 tcpm_default_state(port),
2859 port->vbus_present ? PD_T_PS_SOURCE_OFF : 0);
2860 break;
2861 default:
2862 WARN(1, "Unexpected port state %d\n", port->state);
2863 break;
2864 }
2865 }
2866
2867 static void tcpm_state_machine_work(struct work_struct *work)
2868 {
2869 struct tcpm_port *port = container_of(work, struct tcpm_port,
2870 state_machine.work);
2871 enum tcpm_state prev_state;
2872
2873 mutex_lock(&port->lock);
2874 port->state_machine_running = true;
2875
2876 if (port->queued_message && tcpm_send_queued_message(port))
2877 goto done;
2878
2879 /* If we were queued due to a delayed state change, update it now */
2880 if (port->delayed_state) {
2881 tcpm_log(port, "state change %s -> %s [delayed %ld ms]",
2882 tcpm_states[port->state],
2883 tcpm_states[port->delayed_state], port->delay_ms);
2884 port->prev_state = port->state;
2885 port->state = port->delayed_state;
2886 port->delayed_state = INVALID_STATE;
2887 }
2888
2889 /*
2890 * Continue running as long as we have (non-delayed) state changes
2891 * to make.
2892 */
2893 do {
2894 prev_state = port->state;
2895 run_state_machine(port);
2896 if (port->queued_message)
2897 tcpm_send_queued_message(port);
2898 } while (port->state != prev_state && !port->delayed_state);
2899
2900 done:
2901 port->state_machine_running = false;
2902 mutex_unlock(&port->lock);
2903 }
2904
2905 static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
2906 enum typec_cc_status cc2)
2907 {
2908 enum typec_cc_status old_cc1, old_cc2;
2909 enum tcpm_state new_state;
2910
2911 old_cc1 = port->cc1;
2912 old_cc2 = port->cc2;
2913 port->cc1 = cc1;
2914 port->cc2 = cc2;
2915
2916 tcpm_log_force(port,
2917 "CC1: %u -> %u, CC2: %u -> %u [state %s, polarity %d, %s]",
2918 old_cc1, cc1, old_cc2, cc2, tcpm_states[port->state],
2919 port->polarity,
2920 tcpm_port_is_disconnected(port) ? "disconnected"
2921 : "connected");
2922
2923 switch (port->state) {
2924 case DRP_TOGGLING:
2925 if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
2926 tcpm_port_is_source(port))
2927 tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
2928 else if (tcpm_port_is_sink(port))
2929 tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
2930 break;
2931 case SRC_UNATTACHED:
2932 case ACC_UNATTACHED:
2933 if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
2934 tcpm_port_is_source(port))
2935 tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
2936 break;
2937 case SRC_ATTACH_WAIT:
2938 if (tcpm_port_is_disconnected(port) ||
2939 tcpm_port_is_audio_detached(port))
2940 tcpm_set_state(port, SRC_UNATTACHED, 0);
2941 else if (cc1 != old_cc1 || cc2 != old_cc2)
2942 tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
2943 break;
2944 case SRC_ATTACHED:
2945 case SRC_SEND_CAPABILITIES:
2946 case SRC_READY:
2947 if (tcpm_port_is_disconnected(port) ||
2948 !tcpm_port_is_source(port))
2949 tcpm_set_state(port, SRC_UNATTACHED, 0);
2950 break;
2951 case SNK_UNATTACHED:
2952 if (tcpm_port_is_sink(port))
2953 tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
2954 break;
2955 case SNK_ATTACH_WAIT:
2956 if ((port->cc1 == TYPEC_CC_OPEN &&
2957 port->cc2 != TYPEC_CC_OPEN) ||
2958 (port->cc1 != TYPEC_CC_OPEN &&
2959 port->cc2 == TYPEC_CC_OPEN))
2960 new_state = SNK_DEBOUNCED;
2961 else if (tcpm_port_is_disconnected(port))
2962 new_state = SNK_UNATTACHED;
2963 else
2964 break;
2965 if (new_state != port->delayed_state)
2966 tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
2967 break;
2968 case SNK_DEBOUNCED:
2969 if (tcpm_port_is_disconnected(port))
2970 new_state = SNK_UNATTACHED;
2971 else if (port->vbus_present)
2972 new_state = tcpm_try_src(port) ? SRC_TRY : SNK_ATTACHED;
2973 else
2974 new_state = SNK_UNATTACHED;
2975 if (new_state != port->delayed_state)
2976 tcpm_set_state(port, SNK_DEBOUNCED, 0);
2977 break;
2978 case SNK_READY:
2979 if (tcpm_port_is_disconnected(port))
2980 tcpm_set_state(port, unattached_state(port), 0);
2981 else if (!port->pd_capable &&
2982 (cc1 != old_cc1 || cc2 != old_cc2))
2983 tcpm_set_current_limit(port,
2984 tcpm_get_current_limit(port),
2985 5000);
2986 break;
2987
2988 case AUDIO_ACC_ATTACHED:
2989 if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN)
2990 tcpm_set_state(port, AUDIO_ACC_DEBOUNCE, 0);
2991 break;
2992 case AUDIO_ACC_DEBOUNCE:
2993 if (tcpm_port_is_audio(port))
2994 tcpm_set_state(port, AUDIO_ACC_ATTACHED, 0);
2995 break;
2996
2997 case DEBUG_ACC_ATTACHED:
2998 if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN)
2999 tcpm_set_state(port, ACC_UNATTACHED, 0);
3000 break;
3001
3002 case SNK_TRY:
3003 /* Do nothing, waiting for timeout */
3004 break;
3005
3006 case SNK_DISCOVERY:
3007 /* CC line is unstable, wait for debounce */
3008 if (tcpm_port_is_disconnected(port))
3009 tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE, 0);
3010 break;
3011 case SNK_DISCOVERY_DEBOUNCE:
3012 break;
3013
3014 case SRC_TRYWAIT:
3015 /* Hand over to state machine if needed */
3016 if (!port->vbus_present && tcpm_port_is_source(port))
3017 tcpm_set_state(port, SRC_TRYWAIT_DEBOUNCE, 0);
3018 break;
3019 case SRC_TRYWAIT_DEBOUNCE:
3020 if (port->vbus_present || !tcpm_port_is_source(port))
3021 tcpm_set_state(port, SRC_TRYWAIT, 0);
3022 break;
3023 case SNK_TRY_WAIT_DEBOUNCE:
3024 if (!tcpm_port_is_sink(port)) {
3025 port->max_wait = 0;
3026 tcpm_set_state(port, SRC_TRYWAIT, 0);
3027 }
3028 break;
3029 case SRC_TRY_WAIT:
3030 if (tcpm_port_is_source(port))
3031 tcpm_set_state(port, SRC_TRY_DEBOUNCE, 0);
3032 break;
3033 case SRC_TRY_DEBOUNCE:
3034 tcpm_set_state(port, SRC_TRY_WAIT, 0);
3035 break;
3036 case SNK_TRYWAIT_DEBOUNCE:
3037 if (tcpm_port_is_sink(port))
3038 tcpm_set_state(port, SNK_TRYWAIT_VBUS, 0);
3039 break;
3040 case SNK_TRYWAIT_VBUS:
3041 if (!tcpm_port_is_sink(port))
3042 tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, 0);
3043 break;
3044 case SNK_TRYWAIT:
3045 /* Do nothing, waiting for tCCDebounce */
3046 break;
3047 case PR_SWAP_SNK_SRC_SINK_OFF:
3048 case PR_SWAP_SRC_SNK_TRANSITION_OFF:
3049 case PR_SWAP_SRC_SNK_SOURCE_OFF:
3050 case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
3051 case PR_SWAP_SNK_SRC_SOURCE_ON:
3052 /*
3053 * CC state change is expected in PR_SWAP
3054 * Ignore it.
3055 */
3056 break;
3057
3058 default:
3059 if (tcpm_port_is_disconnected(port))
3060 tcpm_set_state(port, unattached_state(port), 0);
3061 break;
3062 }
3063 }
3064
3065 static void _tcpm_pd_vbus_on(struct tcpm_port *port)
3066 {
3067 tcpm_log_force(port, "VBUS on");
3068 port->vbus_present = true;
3069 switch (port->state) {
3070 case SNK_TRANSITION_SINK_VBUS:
3071 port->explicit_contract = true;
3072 tcpm_set_state(port, SNK_READY, 0);
3073 break;
3074 case SNK_DISCOVERY:
3075 tcpm_set_state(port, SNK_DISCOVERY, 0);
3076 break;
3077
3078 case SNK_DEBOUNCED:
3079 tcpm_set_state(port, tcpm_try_src(port) ? SRC_TRY
3080 : SNK_ATTACHED,
3081 0);
3082 break;
3083 case SNK_HARD_RESET_WAIT_VBUS:
3084 tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, 0);
3085 break;
3086 case SRC_ATTACHED:
3087 tcpm_set_state(port, SRC_STARTUP, 0);
3088 break;
3089 case SRC_HARD_RESET_VBUS_ON:
3090 tcpm_set_state(port, SRC_STARTUP, 0);
3091 break;
3092
3093 case SNK_TRY:
3094 /* Do nothing, waiting for timeout */
3095 break;
3096 case SRC_TRYWAIT:
3097 /* Do nothing, Waiting for Rd to be detected */
3098 break;
3099 case SRC_TRYWAIT_DEBOUNCE:
3100 tcpm_set_state(port, SRC_TRYWAIT, 0);
3101 break;
3102 case SNK_TRY_WAIT_DEBOUNCE:
3103 /* Do nothing, waiting for PD_DEBOUNCE to do be done */
3104 break;
3105 case SNK_TRYWAIT:
3106 /* Do nothing, waiting for tCCDebounce */
3107 break;
3108 case SNK_TRYWAIT_VBUS:
3109 if (tcpm_port_is_sink(port))
3110 tcpm_set_state(port, SNK_ATTACHED, 0);
3111 break;
3112 case SNK_TRYWAIT_DEBOUNCE:
3113 /* Do nothing, waiting for Rp */
3114 break;
3115 case SRC_TRY_WAIT:
3116 case SRC_TRY_DEBOUNCE:
3117 /* Do nothing, waiting for sink detection */
3118 break;
3119 default:
3120 break;
3121 }
3122 }
3123
3124 static void _tcpm_pd_vbus_off(struct tcpm_port *port)
3125 {
3126 tcpm_log_force(port, "VBUS off");
3127 port->vbus_present = false;
3128 port->vbus_never_low = false;
3129 switch (port->state) {
3130 case SNK_HARD_RESET_SINK_OFF:
3131 tcpm_set_state(port, SNK_HARD_RESET_WAIT_VBUS, 0);
3132 break;
3133 case SRC_HARD_RESET_VBUS_OFF:
3134 tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, 0);
3135 break;
3136 case HARD_RESET_SEND:
3137 break;
3138
3139 case SNK_TRY:
3140 /* Do nothing, waiting for timeout */
3141 break;
3142 case SRC_TRYWAIT:
3143 /* Hand over to state machine if needed */
3144 if (tcpm_port_is_source(port))
3145 tcpm_set_state(port, SRC_TRYWAIT_DEBOUNCE, 0);
3146 break;
3147 case SNK_TRY_WAIT_DEBOUNCE:
3148 /* Do nothing, waiting for PD_DEBOUNCE to do be done */
3149 break;
3150 case SNK_TRYWAIT:
3151 case SNK_TRYWAIT_VBUS:
3152 case SNK_TRYWAIT_DEBOUNCE:
3153 break;
3154 case SNK_ATTACH_WAIT:
3155 tcpm_set_state(port, SNK_UNATTACHED, 0);
3156 break;
3157
3158 case SNK_NEGOTIATE_CAPABILITIES:
3159 break;
3160
3161 case PR_SWAP_SRC_SNK_TRANSITION_OFF:
3162 tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF, 0);
3163 break;
3164
3165 case PR_SWAP_SNK_SRC_SINK_OFF:
3166 /* Do nothing, expected */
3167 break;
3168
3169 case PORT_RESET_WAIT_OFF:
3170 tcpm_set_state(port, tcpm_default_state(port), 0);
3171 break;
3172 case SRC_TRY_WAIT:
3173 case SRC_TRY_DEBOUNCE:
3174 /* Do nothing, waiting for sink detection */
3175 break;
3176 default:
3177 if (port->pwr_role == TYPEC_SINK &&
3178 port->attached)
3179 tcpm_set_state(port, SNK_UNATTACHED, 0);
3180 break;
3181 }
3182 }
3183
3184 static void _tcpm_pd_hard_reset(struct tcpm_port *port)
3185 {
3186 tcpm_log_force(port, "Received hard reset");
3187 /*
3188 * If we keep receiving hard reset requests, executing the hard reset
3189 * must have failed. Revert to error recovery if that happens.
3190 */
3191 tcpm_set_state(port,
3192 port->hard_reset_count < PD_N_HARD_RESET_COUNT ?
3193 HARD_RESET_START : ERROR_RECOVERY,
3194 0);
3195 }
3196
3197 static void tcpm_pd_event_handler(struct work_struct *work)
3198 {
3199 struct tcpm_port *port = container_of(work, struct tcpm_port,
3200 event_work);
3201 u32 events;
3202
3203 mutex_lock(&port->lock);
3204
3205 spin_lock(&port->pd_event_lock);
3206 while (port->pd_events) {
3207 events = port->pd_events;
3208 port->pd_events = 0;
3209 spin_unlock(&port->pd_event_lock);
3210 if (events & TCPM_RESET_EVENT)
3211 _tcpm_pd_hard_reset(port);
3212 if (events & TCPM_VBUS_EVENT) {
3213 bool vbus;
3214
3215 vbus = port->tcpc->get_vbus(port->tcpc);
3216 if (vbus)
3217 _tcpm_pd_vbus_on(port);
3218 else
3219 _tcpm_pd_vbus_off(port);
3220 }
3221 if (events & TCPM_CC_EVENT) {
3222 enum typec_cc_status cc1, cc2;
3223
3224 if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0)
3225 _tcpm_cc_change(port, cc1, cc2);
3226 }
3227 spin_lock(&port->pd_event_lock);
3228 }
3229 spin_unlock(&port->pd_event_lock);
3230 mutex_unlock(&port->lock);
3231 }
3232
3233 void tcpm_cc_change(struct tcpm_port *port)
3234 {
3235 spin_lock(&port->pd_event_lock);
3236 port->pd_events |= TCPM_CC_EVENT;
3237 spin_unlock(&port->pd_event_lock);
3238 queue_work(port->wq, &port->event_work);
3239 }
3240 EXPORT_SYMBOL_GPL(tcpm_cc_change);
3241
3242 void tcpm_vbus_change(struct tcpm_port *port)
3243 {
3244 spin_lock(&port->pd_event_lock);
3245 port->pd_events |= TCPM_VBUS_EVENT;
3246 spin_unlock(&port->pd_event_lock);
3247 queue_work(port->wq, &port->event_work);
3248 }
3249 EXPORT_SYMBOL_GPL(tcpm_vbus_change);
3250
3251 void tcpm_pd_hard_reset(struct tcpm_port *port)
3252 {
3253 spin_lock(&port->pd_event_lock);
3254 port->pd_events = TCPM_RESET_EVENT;
3255 spin_unlock(&port->pd_event_lock);
3256 queue_work(port->wq, &port->event_work);
3257 }
3258 EXPORT_SYMBOL_GPL(tcpm_pd_hard_reset);
3259
3260 static int tcpm_dr_set(const struct typec_capability *cap,
3261 enum typec_data_role data)
3262 {
3263 struct tcpm_port *port = typec_cap_to_tcpm(cap);
3264 int ret;
3265
3266 mutex_lock(&port->swap_lock);
3267 mutex_lock(&port->lock);
3268
3269 if (port->port_type != TYPEC_PORT_DRP) {
3270 ret = -EINVAL;
3271 goto port_unlock;
3272 }
3273 if (port->state != SRC_READY && port->state != SNK_READY) {
3274 ret = -EAGAIN;
3275 goto port_unlock;
3276 }
3277
3278 if (port->data_role == data) {
3279 ret = 0;
3280 goto port_unlock;
3281 }
3282
3283 /*
3284 * XXX
3285 * 6.3.9: If an alternate mode is active, a request to swap
3286 * alternate modes shall trigger a port reset.
3287 * Reject data role swap request in this case.
3288 */
3289
3290 if (!port->pd_capable) {
3291 /*
3292 * If the partner is not PD capable, reset the port to
3293 * trigger a role change. This can only work if a preferred
3294 * role is configured, and if it matches the requested role.
3295 */
3296 if (port->try_role == TYPEC_NO_PREFERRED_ROLE ||
3297 port->try_role == port->pwr_role) {
3298 ret = -EINVAL;
3299 goto port_unlock;
3300 }
3301 port->non_pd_role_swap = true;
3302 tcpm_set_state(port, PORT_RESET, 0);
3303 } else {
3304 tcpm_set_state(port, DR_SWAP_SEND, 0);
3305 }
3306
3307 port->swap_status = 0;
3308 port->swap_pending = true;
3309 reinit_completion(&port->swap_complete);
3310 mutex_unlock(&port->lock);
3311
3312 if (!wait_for_completion_timeout(&port->swap_complete,
3313 msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
3314 ret = -ETIMEDOUT;
3315 else
3316 ret = port->swap_status;
3317
3318 port->non_pd_role_swap = false;
3319 goto swap_unlock;
3320
3321 port_unlock:
3322 mutex_unlock(&port->lock);
3323 swap_unlock:
3324 mutex_unlock(&port->swap_lock);
3325 return ret;
3326 }
3327
3328 static int tcpm_pr_set(const struct typec_capability *cap,
3329 enum typec_role role)
3330 {
3331 struct tcpm_port *port = typec_cap_to_tcpm(cap);
3332 int ret;
3333
3334 mutex_lock(&port->swap_lock);
3335 mutex_lock(&port->lock);
3336
3337 if (port->port_type != TYPEC_PORT_DRP) {
3338 ret = -EINVAL;
3339 goto port_unlock;
3340 }
3341 if (port->state != SRC_READY && port->state != SNK_READY) {
3342 ret = -EAGAIN;
3343 goto port_unlock;
3344 }
3345
3346 if (role == port->pwr_role) {
3347 ret = 0;
3348 goto port_unlock;
3349 }
3350
3351 port->swap_status = 0;
3352 port->swap_pending = true;
3353 reinit_completion(&port->swap_complete);
3354 tcpm_set_state(port, PR_SWAP_SEND, 0);
3355 mutex_unlock(&port->lock);
3356
3357 if (!wait_for_completion_timeout(&port->swap_complete,
3358 msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
3359 ret = -ETIMEDOUT;
3360 else
3361 ret = port->swap_status;
3362
3363 goto swap_unlock;
3364
3365 port_unlock:
3366 mutex_unlock(&port->lock);
3367 swap_unlock:
3368 mutex_unlock(&port->swap_lock);
3369 return ret;
3370 }
3371
3372 static int tcpm_vconn_set(const struct typec_capability *cap,
3373 enum typec_role role)
3374 {
3375 struct tcpm_port *port = typec_cap_to_tcpm(cap);
3376 int ret;
3377
3378 mutex_lock(&port->swap_lock);
3379 mutex_lock(&port->lock);
3380
3381 if (port->state != SRC_READY && port->state != SNK_READY) {
3382 ret = -EAGAIN;
3383 goto port_unlock;
3384 }
3385
3386 if (role == port->vconn_role) {
3387 ret = 0;
3388 goto port_unlock;
3389 }
3390
3391 port->swap_status = 0;
3392 port->swap_pending = true;
3393 reinit_completion(&port->swap_complete);
3394 tcpm_set_state(port, VCONN_SWAP_SEND, 0);
3395 mutex_unlock(&port->lock);
3396
3397 if (!wait_for_completion_timeout(&port->swap_complete,
3398 msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
3399 ret = -ETIMEDOUT;
3400 else
3401 ret = port->swap_status;
3402
3403 goto swap_unlock;
3404
3405 port_unlock:
3406 mutex_unlock(&port->lock);
3407 swap_unlock:
3408 mutex_unlock(&port->swap_lock);
3409 return ret;
3410 }
3411
3412 static int tcpm_try_role(const struct typec_capability *cap, int role)
3413 {
3414 struct tcpm_port *port = typec_cap_to_tcpm(cap);
3415 struct tcpc_dev *tcpc = port->tcpc;
3416 int ret = 0;
3417
3418 mutex_lock(&port->lock);
3419 if (tcpc->try_role)
3420 ret = tcpc->try_role(tcpc, role);
3421 if (!ret && !tcpc->config->try_role_hw)
3422 port->try_role = role;
3423 port->try_src_count = 0;
3424 port->try_snk_count = 0;
3425 mutex_unlock(&port->lock);
3426
3427 return ret;
3428 }
3429
3430 static void tcpm_init(struct tcpm_port *port)
3431 {
3432 enum typec_cc_status cc1, cc2;
3433
3434 port->tcpc->init(port->tcpc);
3435
3436 tcpm_reset_port(port);
3437
3438 /*
3439 * XXX
3440 * Should possibly wait for VBUS to settle if it was enabled locally
3441 * since tcpm_reset_port() will disable VBUS.
3442 */
3443 port->vbus_present = port->tcpc->get_vbus(port->tcpc);
3444 if (port->vbus_present)
3445 port->vbus_never_low = true;
3446
3447 tcpm_set_state(port, tcpm_default_state(port), 0);
3448
3449 if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0)
3450 _tcpm_cc_change(port, cc1, cc2);
3451
3452 /*
3453 * Some adapters need a clean slate at startup, and won't recover
3454 * otherwise. So do not try to be fancy and force a clean disconnect.
3455 */
3456 tcpm_set_state(port, PORT_RESET, 0);
3457 }
3458
3459 static int tcpm_port_type_set(const struct typec_capability *cap,
3460 enum typec_port_type type)
3461 {
3462 struct tcpm_port *port = typec_cap_to_tcpm(cap);
3463
3464 mutex_lock(&port->lock);
3465 if (type == port->port_type)
3466 goto port_unlock;
3467
3468 port->port_type = type;
3469
3470 if (!port->connected) {
3471 tcpm_set_state(port, PORT_RESET, 0);
3472 } else if (type == TYPEC_PORT_SNK) {
3473 if (!(port->pwr_role == TYPEC_SINK &&
3474 port->data_role == TYPEC_DEVICE))
3475 tcpm_set_state(port, PORT_RESET, 0);
3476 } else if (type == TYPEC_PORT_SRC) {
3477 if (!(port->pwr_role == TYPEC_SOURCE &&
3478 port->data_role == TYPEC_HOST))
3479 tcpm_set_state(port, PORT_RESET, 0);
3480 }
3481
3482 port_unlock:
3483 mutex_unlock(&port->lock);
3484 return 0;
3485 }
3486
3487 void tcpm_tcpc_reset(struct tcpm_port *port)
3488 {
3489 mutex_lock(&port->lock);
3490 /* XXX: Maintain PD connection if possible? */
3491 tcpm_init(port);
3492 mutex_unlock(&port->lock);
3493 }
3494 EXPORT_SYMBOL_GPL(tcpm_tcpc_reset);
3495
3496 static int tcpm_copy_pdos(u32 *dest_pdo, const u32 *src_pdo,
3497 unsigned int nr_pdo)
3498 {
3499 unsigned int i;
3500
3501 if (nr_pdo > PDO_MAX_OBJECTS)
3502 nr_pdo = PDO_MAX_OBJECTS;
3503
3504 for (i = 0; i < nr_pdo; i++)
3505 dest_pdo[i] = src_pdo[i];
3506
3507 return nr_pdo;
3508 }
3509
3510 static int tcpm_copy_vdos(u32 *dest_vdo, const u32 *src_vdo,
3511 unsigned int nr_vdo)
3512 {
3513 unsigned int i;
3514
3515 if (nr_vdo > VDO_MAX_OBJECTS)
3516 nr_vdo = VDO_MAX_OBJECTS;
3517
3518 for (i = 0; i < nr_vdo; i++)
3519 dest_vdo[i] = src_vdo[i];
3520
3521 return nr_vdo;
3522 }
3523
3524 int tcpm_update_source_capabilities(struct tcpm_port *port, const u32 *pdo,
3525 unsigned int nr_pdo)
3526 {
3527 if (tcpm_validate_caps(port, pdo, nr_pdo))
3528 return -EINVAL;
3529
3530 mutex_lock(&port->lock);
3531 port->nr_src_pdo = tcpm_copy_pdos(port->src_pdo, pdo, nr_pdo);
3532 switch (port->state) {
3533 case SRC_UNATTACHED:
3534 case SRC_ATTACH_WAIT:
3535 case SRC_TRYWAIT:
3536 tcpm_set_cc(port, tcpm_rp_cc(port));
3537 break;
3538 case SRC_SEND_CAPABILITIES:
3539 case SRC_NEGOTIATE_CAPABILITIES:
3540 case SRC_READY:
3541 case SRC_WAIT_NEW_CAPABILITIES:
3542 tcpm_set_cc(port, tcpm_rp_cc(port));
3543 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
3544 break;
3545 default:
3546 break;
3547 }
3548 mutex_unlock(&port->lock);
3549 return 0;
3550 }
3551 EXPORT_SYMBOL_GPL(tcpm_update_source_capabilities);
3552
3553 int tcpm_update_sink_capabilities(struct tcpm_port *port, const u32 *pdo,
3554 unsigned int nr_pdo,
3555 unsigned int max_snk_mv,
3556 unsigned int max_snk_ma,
3557 unsigned int max_snk_mw,
3558 unsigned int operating_snk_mw)
3559 {
3560 if (tcpm_validate_caps(port, pdo, nr_pdo))
3561 return -EINVAL;
3562
3563 mutex_lock(&port->lock);
3564 port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, pdo, nr_pdo);
3565 port->max_snk_mv = max_snk_mv;
3566 port->max_snk_ma = max_snk_ma;
3567 port->max_snk_mw = max_snk_mw;
3568 port->operating_snk_mw = operating_snk_mw;
3569
3570 switch (port->state) {
3571 case SNK_NEGOTIATE_CAPABILITIES:
3572 case SNK_READY:
3573 case SNK_TRANSITION_SINK:
3574 case SNK_TRANSITION_SINK_VBUS:
3575 tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
3576 break;
3577 default:
3578 break;
3579 }
3580 mutex_unlock(&port->lock);
3581 return 0;
3582 }
3583 EXPORT_SYMBOL_GPL(tcpm_update_sink_capabilities);
3584
3585 struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
3586 {
3587 struct tcpm_port *port;
3588 int i, err;
3589
3590 if (!dev || !tcpc || !tcpc->config ||
3591 !tcpc->get_vbus || !tcpc->set_cc || !tcpc->get_cc ||
3592 !tcpc->set_polarity || !tcpc->set_vconn || !tcpc->set_vbus ||
3593 !tcpc->set_pd_rx || !tcpc->set_roles || !tcpc->pd_transmit)
3594 return ERR_PTR(-EINVAL);
3595
3596 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
3597 if (!port)
3598 return ERR_PTR(-ENOMEM);
3599
3600 port->dev = dev;
3601 port->tcpc = tcpc;
3602
3603 mutex_init(&port->lock);
3604 mutex_init(&port->swap_lock);
3605
3606 port->wq = create_singlethread_workqueue(dev_name(dev));
3607 if (!port->wq)
3608 return ERR_PTR(-ENOMEM);
3609 INIT_DELAYED_WORK(&port->state_machine, tcpm_state_machine_work);
3610 INIT_DELAYED_WORK(&port->vdm_state_machine, vdm_state_machine_work);
3611 INIT_WORK(&port->event_work, tcpm_pd_event_handler);
3612
3613 spin_lock_init(&port->pd_event_lock);
3614
3615 init_completion(&port->tx_complete);
3616 init_completion(&port->swap_complete);
3617 tcpm_debugfs_init(port);
3618
3619 if (tcpm_validate_caps(port, tcpc->config->src_pdo,
3620 tcpc->config->nr_src_pdo) ||
3621 tcpm_validate_caps(port, tcpc->config->snk_pdo,
3622 tcpc->config->nr_snk_pdo)) {
3623 err = -EINVAL;
3624 goto out_destroy_wq;
3625 }
3626 port->nr_src_pdo = tcpm_copy_pdos(port->src_pdo, tcpc->config->src_pdo,
3627 tcpc->config->nr_src_pdo);
3628 port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, tcpc->config->snk_pdo,
3629 tcpc->config->nr_snk_pdo);
3630 port->nr_snk_vdo = tcpm_copy_vdos(port->snk_vdo, tcpc->config->snk_vdo,
3631 tcpc->config->nr_snk_vdo);
3632
3633 port->max_snk_mv = tcpc->config->max_snk_mv;
3634 port->max_snk_ma = tcpc->config->max_snk_ma;
3635 port->max_snk_mw = tcpc->config->max_snk_mw;
3636 port->operating_snk_mw = tcpc->config->operating_snk_mw;
3637 if (!tcpc->config->try_role_hw)
3638 port->try_role = tcpc->config->default_role;
3639 else
3640 port->try_role = TYPEC_NO_PREFERRED_ROLE;
3641
3642 port->typec_caps.prefer_role = tcpc->config->default_role;
3643 port->typec_caps.type = tcpc->config->type;
3644 port->typec_caps.data = tcpc->config->data;
3645 port->typec_caps.revision = 0x0120; /* Type-C spec release 1.2 */
3646 port->typec_caps.pd_revision = 0x0200; /* USB-PD spec release 2.0 */
3647 port->typec_caps.dr_set = tcpm_dr_set;
3648 port->typec_caps.pr_set = tcpm_pr_set;
3649 port->typec_caps.vconn_set = tcpm_vconn_set;
3650 port->typec_caps.try_role = tcpm_try_role;
3651 port->typec_caps.port_type_set = tcpm_port_type_set;
3652
3653 port->partner_desc.identity = &port->partner_ident;
3654 port->port_type = tcpc->config->type;
3655
3656 port->typec_port = typec_register_port(port->dev, &port->typec_caps);
3657 if (IS_ERR(port->typec_port)) {
3658 err = PTR_ERR(port->typec_port);
3659 goto out_destroy_wq;
3660 }
3661
3662 if (tcpc->config->alt_modes) {
3663 const struct typec_altmode_desc *paltmode = tcpc->config->alt_modes;
3664
3665 i = 0;
3666 while (paltmode->svid && i < ARRAY_SIZE(port->port_altmode)) {
3667 struct typec_altmode *alt;
3668
3669 alt = typec_port_register_altmode(port->typec_port,
3670 paltmode);
3671 if (IS_ERR(alt)) {
3672 tcpm_log(port,
3673 "%s: failed to register port alternate mode 0x%x",
3674 dev_name(dev), paltmode->svid);
3675 break;
3676 }
3677 port->port_altmode[i] = alt;
3678 i++;
3679 paltmode++;
3680 }
3681 }
3682
3683 mutex_lock(&port->lock);
3684 tcpm_init(port);
3685 mutex_unlock(&port->lock);
3686
3687 tcpm_log(port, "%s: registered", dev_name(dev));
3688 return port;
3689
3690 out_destroy_wq:
3691 destroy_workqueue(port->wq);
3692 return ERR_PTR(err);
3693 }
3694 EXPORT_SYMBOL_GPL(tcpm_register_port);
3695
3696 void tcpm_unregister_port(struct tcpm_port *port)
3697 {
3698 int i;
3699
3700 tcpm_reset_port(port);
3701 for (i = 0; i < ARRAY_SIZE(port->port_altmode); i++)
3702 typec_unregister_altmode(port->port_altmode[i]);
3703 typec_unregister_port(port->typec_port);
3704 tcpm_debugfs_exit(port);
3705 destroy_workqueue(port->wq);
3706 }
3707 EXPORT_SYMBOL_GPL(tcpm_unregister_port);
3708
3709 MODULE_AUTHOR("Guenter Roeck <groeck@chromium.org>");
3710 MODULE_DESCRIPTION("USB Type-C Port Manager");
3711 MODULE_LICENSE("GPL");