]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/usb/typec/tcpm.c
Merge branches 'x86/amd', 'x86/vt-d', 'arm/rockchip', 'arm/omap', 'arm/mediatek'...
[mirror_ubuntu-hirsute-kernel.git] / drivers / usb / typec / tcpm.c
CommitLineData
956c36c2 1// SPDX-License-Identifier: GPL-2.0+
f0690a25
GR
2/*
3 * Copyright 2015-2017 Google, Inc
4 *
f0690a25
GR
5 * USB Power Delivery protocol stack.
6 */
7
8#include <linux/completion.h>
9#include <linux/debugfs.h>
10#include <linux/device.h>
02d5be46 11#include <linux/jiffies.h>
f0690a25
GR
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/mutex.h>
15#include <linux/proc_fs.h>
16#include <linux/sched/clock.h>
17#include <linux/seq_file.h>
18#include <linux/slab.h>
19#include <linux/spinlock.h>
4b4e02c8
GR
20#include <linux/usb/pd.h>
21#include <linux/usb/pd_bdo.h>
22#include <linux/usb/pd_vdo.h>
23#include <linux/usb/tcpm.h>
f0690a25
GR
24#include <linux/usb/typec.h>
25#include <linux/workqueue.h>
26
f0690a25
GR
27#define FOREACH_STATE(S) \
28 S(INVALID_STATE), \
29 S(DRP_TOGGLING), \
30 S(SRC_UNATTACHED), \
31 S(SRC_ATTACH_WAIT), \
32 S(SRC_ATTACHED), \
33 S(SRC_STARTUP), \
34 S(SRC_SEND_CAPABILITIES), \
35 S(SRC_NEGOTIATE_CAPABILITIES), \
36 S(SRC_TRANSITION_SUPPLY), \
37 S(SRC_READY), \
38 S(SRC_WAIT_NEW_CAPABILITIES), \
39 \
40 S(SNK_UNATTACHED), \
41 S(SNK_ATTACH_WAIT), \
42 S(SNK_DEBOUNCED), \
43 S(SNK_ATTACHED), \
44 S(SNK_STARTUP), \
45 S(SNK_DISCOVERY), \
46 S(SNK_DISCOVERY_DEBOUNCE), \
47 S(SNK_DISCOVERY_DEBOUNCE_DONE), \
48 S(SNK_WAIT_CAPABILITIES), \
49 S(SNK_NEGOTIATE_CAPABILITIES), \
50 S(SNK_TRANSITION_SINK), \
51 S(SNK_TRANSITION_SINK_VBUS), \
52 S(SNK_READY), \
53 \
54 S(ACC_UNATTACHED), \
55 S(DEBUG_ACC_ATTACHED), \
56 S(AUDIO_ACC_ATTACHED), \
57 S(AUDIO_ACC_DEBOUNCE), \
58 \
59 S(HARD_RESET_SEND), \
60 S(HARD_RESET_START), \
61 S(SRC_HARD_RESET_VBUS_OFF), \
62 S(SRC_HARD_RESET_VBUS_ON), \
63 S(SNK_HARD_RESET_SINK_OFF), \
64 S(SNK_HARD_RESET_WAIT_VBUS), \
65 S(SNK_HARD_RESET_SINK_ON), \
66 \
67 S(SOFT_RESET), \
68 S(SOFT_RESET_SEND), \
69 \
70 S(DR_SWAP_ACCEPT), \
71 S(DR_SWAP_SEND), \
72 S(DR_SWAP_SEND_TIMEOUT), \
73 S(DR_SWAP_CANCEL), \
74 S(DR_SWAP_CHANGE_DR), \
75 \
76 S(PR_SWAP_ACCEPT), \
77 S(PR_SWAP_SEND), \
78 S(PR_SWAP_SEND_TIMEOUT), \
79 S(PR_SWAP_CANCEL), \
80 S(PR_SWAP_START), \
81 S(PR_SWAP_SRC_SNK_TRANSITION_OFF), \
82 S(PR_SWAP_SRC_SNK_SOURCE_OFF), \
b965b631 83 S(PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED), \
f0690a25
GR
84 S(PR_SWAP_SRC_SNK_SINK_ON), \
85 S(PR_SWAP_SNK_SRC_SINK_OFF), \
86 S(PR_SWAP_SNK_SRC_SOURCE_ON), \
b965b631 87 S(PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP), \
f0690a25
GR
88 \
89 S(VCONN_SWAP_ACCEPT), \
90 S(VCONN_SWAP_SEND), \
91 S(VCONN_SWAP_SEND_TIMEOUT), \
92 S(VCONN_SWAP_CANCEL), \
93 S(VCONN_SWAP_START), \
94 S(VCONN_SWAP_WAIT_FOR_VCONN), \
95 S(VCONN_SWAP_TURN_ON_VCONN), \
96 S(VCONN_SWAP_TURN_OFF_VCONN), \
97 \
98 S(SNK_TRY), \
99 S(SNK_TRY_WAIT), \
a0a3e04e
BJS
100 S(SNK_TRY_WAIT_DEBOUNCE), \
101 S(SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS), \
f0690a25 102 S(SRC_TRYWAIT), \
02d5be46 103 S(SRC_TRYWAIT_DEBOUNCE), \
f0690a25
GR
104 S(SRC_TRYWAIT_UNATTACHED), \
105 \
106 S(SRC_TRY), \
131c7d12 107 S(SRC_TRY_WAIT), \
f0690a25
GR
108 S(SRC_TRY_DEBOUNCE), \
109 S(SNK_TRYWAIT), \
110 S(SNK_TRYWAIT_DEBOUNCE), \
111 S(SNK_TRYWAIT_VBUS), \
112 S(BIST_RX), \
113 \
114 S(ERROR_RECOVERY), \
b17dd571
GR
115 S(PORT_RESET), \
116 S(PORT_RESET_WAIT_OFF)
f0690a25
GR
117
118#define GENERATE_ENUM(e) e
119#define GENERATE_STRING(s) #s
120
121enum tcpm_state {
122 FOREACH_STATE(GENERATE_ENUM)
123};
124
125static const char * const tcpm_states[] = {
126 FOREACH_STATE(GENERATE_STRING)
127};
128
129enum vdm_states {
130 VDM_STATE_ERR_BUSY = -3,
131 VDM_STATE_ERR_SEND = -2,
132 VDM_STATE_ERR_TMOUT = -1,
133 VDM_STATE_DONE = 0,
134 /* Anything >0 represents an active state */
135 VDM_STATE_READY = 1,
136 VDM_STATE_BUSY = 2,
137 VDM_STATE_WAIT_RSP_BUSY = 3,
138};
139
140enum pd_msg_request {
141 PD_MSG_NONE = 0,
142 PD_MSG_CTRL_REJECT,
143 PD_MSG_CTRL_WAIT,
144 PD_MSG_DATA_SINK_CAP,
145 PD_MSG_DATA_SOURCE_CAP,
146};
147
148/* Events from low level driver */
149
150#define TCPM_CC_EVENT BIT(0)
151#define TCPM_VBUS_EVENT BIT(1)
152#define TCPM_RESET_EVENT BIT(2)
153
154#define LOG_BUFFER_ENTRIES 1024
155#define LOG_BUFFER_ENTRY_SIZE 128
156
157/* Alternate mode support */
158
159#define SVID_DISCOVERY_MAX 16
160
161struct pd_mode_data {
162 int svid_index; /* current SVID index */
163 int nsvids;
164 u16 svids[SVID_DISCOVERY_MAX];
165 int altmodes; /* number of alternate modes */
166 struct typec_altmode_desc altmode_desc[SVID_DISCOVERY_MAX];
167};
168
169struct tcpm_port {
170 struct device *dev;
171
172 struct mutex lock; /* tcpm state machine lock */
173 struct workqueue_struct *wq;
174
175 struct typec_capability typec_caps;
176 struct typec_port *typec_port;
177
178 struct tcpc_dev *tcpc;
179
180 enum typec_role vconn_role;
181 enum typec_role pwr_role;
182 enum typec_data_role data_role;
183 enum typec_pwr_opmode pwr_opmode;
184
185 struct usb_pd_identity partner_ident;
186 struct typec_partner_desc partner_desc;
187 struct typec_partner *partner;
188
189 enum typec_cc_status cc_req;
190
191 enum typec_cc_status cc1;
192 enum typec_cc_status cc2;
193 enum typec_cc_polarity polarity;
194
195 bool attached;
196 bool connected;
9b0ae699 197 enum typec_port_type port_type;
f0690a25
GR
198 bool vbus_present;
199 bool vbus_never_low;
200 bool vbus_source;
201 bool vbus_charge;
202
203 bool send_discover;
204 bool op_vsafe5v;
205
206 int try_role;
207 int try_snk_count;
208 int try_src_count;
209
210 enum pd_msg_request queued_message;
211
212 enum tcpm_state enter_state;
213 enum tcpm_state prev_state;
214 enum tcpm_state state;
215 enum tcpm_state delayed_state;
216 unsigned long delayed_runtime;
217 unsigned long delay_ms;
218
219 spinlock_t pd_event_lock;
220 u32 pd_events;
221
222 struct work_struct event_work;
223 struct delayed_work state_machine;
224 struct delayed_work vdm_state_machine;
225 bool state_machine_running;
226
227 struct completion tx_complete;
228 enum tcpm_transmit_status tx_status;
229
230 struct mutex swap_lock; /* swap command lock */
231 bool swap_pending;
b17dd571 232 bool non_pd_role_swap;
f0690a25
GR
233 struct completion swap_complete;
234 int swap_status;
235
236 unsigned int message_id;
237 unsigned int caps_count;
238 unsigned int hard_reset_count;
239 bool pd_capable;
240 bool explicit_contract;
5fec4b54 241 unsigned int rx_msgid;
f0690a25
GR
242
243 /* Partner capabilities/requests */
244 u32 sink_request;
245 u32 source_caps[PDO_MAX_OBJECTS];
246 unsigned int nr_source_caps;
247 u32 sink_caps[PDO_MAX_OBJECTS];
248 unsigned int nr_sink_caps;
249
250 /* Local capabilities */
251 u32 src_pdo[PDO_MAX_OBJECTS];
252 unsigned int nr_src_pdo;
253 u32 snk_pdo[PDO_MAX_OBJECTS];
254 unsigned int nr_snk_pdo;
193a6801
GR
255 u32 snk_vdo[VDO_MAX_OBJECTS];
256 unsigned int nr_snk_vdo;
f0690a25
GR
257
258 unsigned int max_snk_mv;
259 unsigned int max_snk_ma;
260 unsigned int max_snk_mw;
261 unsigned int operating_snk_mw;
262
263 /* Requested current / voltage */
264 u32 current_limit;
265 u32 supply_voltage;
266
267 u32 bist_request;
268
269 /* PD state for Vendor Defined Messages */
270 enum vdm_states vdm_state;
271 u32 vdm_retries;
272 /* next Vendor Defined Message to send */
273 u32 vdo_data[VDO_MAX_SIZE];
274 u8 vdo_count;
275 /* VDO to retry if UFP responder replied busy */
276 u32 vdo_retry;
277
278 /* Alternate mode data */
279
280 struct pd_mode_data mode_data;
281 struct typec_altmode *partner_altmode[SVID_DISCOVERY_MAX];
282 struct typec_altmode *port_altmode[SVID_DISCOVERY_MAX];
283
02d5be46
BJS
284 /* Deadline in jiffies to exit src_try_wait state */
285 unsigned long max_wait;
286
f0690a25
GR
287#ifdef CONFIG_DEBUG_FS
288 struct dentry *dentry;
289 struct mutex logbuffer_lock; /* log buffer access lock */
290 int logbuffer_head;
291 int logbuffer_tail;
292 u8 *logbuffer[LOG_BUFFER_ENTRIES];
293#endif
294};
295
296struct pd_rx_event {
297 struct work_struct work;
298 struct tcpm_port *port;
299 struct pd_message msg;
300};
301
302#define tcpm_cc_is_sink(cc) \
303 ((cc) == TYPEC_CC_RP_DEF || (cc) == TYPEC_CC_RP_1_5 || \
304 (cc) == TYPEC_CC_RP_3_0)
305
306#define tcpm_port_is_sink(port) \
307 ((tcpm_cc_is_sink((port)->cc1) && !tcpm_cc_is_sink((port)->cc2)) || \
308 (tcpm_cc_is_sink((port)->cc2) && !tcpm_cc_is_sink((port)->cc1)))
309
310#define tcpm_cc_is_source(cc) ((cc) == TYPEC_CC_RD)
311#define tcpm_cc_is_audio(cc) ((cc) == TYPEC_CC_RA)
312#define tcpm_cc_is_open(cc) ((cc) == TYPEC_CC_OPEN)
313
314#define tcpm_port_is_source(port) \
315 ((tcpm_cc_is_source((port)->cc1) && \
316 !tcpm_cc_is_source((port)->cc2)) || \
317 (tcpm_cc_is_source((port)->cc2) && \
318 !tcpm_cc_is_source((port)->cc1)))
319
320#define tcpm_port_is_debug(port) \
321 (tcpm_cc_is_source((port)->cc1) && tcpm_cc_is_source((port)->cc2))
322
323#define tcpm_port_is_audio(port) \
324 (tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_audio((port)->cc2))
325
326#define tcpm_port_is_audio_detached(port) \
327 ((tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_open((port)->cc2)) || \
328 (tcpm_cc_is_audio((port)->cc2) && tcpm_cc_is_open((port)->cc1)))
329
330#define tcpm_try_snk(port) \
ff6c8cb1
BJS
331 ((port)->try_snk_count == 0 && (port)->try_role == TYPEC_SINK && \
332 (port)->port_type == TYPEC_PORT_DRP)
f0690a25
GR
333
334#define tcpm_try_src(port) \
ff6c8cb1
BJS
335 ((port)->try_src_count == 0 && (port)->try_role == TYPEC_SOURCE && \
336 (port)->port_type == TYPEC_PORT_DRP)
f0690a25
GR
337
338static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
339{
9b0ae699 340 if (port->port_type == TYPEC_PORT_DRP) {
b46a9c90
BJS
341 if (port->try_role == TYPEC_SINK)
342 return SNK_UNATTACHED;
343 else if (port->try_role == TYPEC_SOURCE)
344 return SRC_UNATTACHED;
345 else if (port->tcpc->config->default_role == TYPEC_SINK)
346 return SNK_UNATTACHED;
347 /* Fall through to return SRC_UNATTACHED */
9b0ae699 348 } else if (port->port_type == TYPEC_PORT_UFP) {
f0690a25 349 return SNK_UNATTACHED;
b46a9c90 350 }
f0690a25
GR
351 return SRC_UNATTACHED;
352}
353
354static inline
355struct tcpm_port *typec_cap_to_tcpm(const struct typec_capability *cap)
356{
357 return container_of(cap, struct tcpm_port, typec_caps);
358}
359
360static bool tcpm_port_is_disconnected(struct tcpm_port *port)
361{
362 return (!port->attached && port->cc1 == TYPEC_CC_OPEN &&
363 port->cc2 == TYPEC_CC_OPEN) ||
364 (port->attached && ((port->polarity == TYPEC_POLARITY_CC1 &&
365 port->cc1 == TYPEC_CC_OPEN) ||
366 (port->polarity == TYPEC_POLARITY_CC2 &&
367 port->cc2 == TYPEC_CC_OPEN)));
368}
369
370/*
371 * Logging
372 */
373
374#ifdef CONFIG_DEBUG_FS
375
376static bool tcpm_log_full(struct tcpm_port *port)
377{
378 return port->logbuffer_tail ==
379 (port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
380}
381
e79e0125 382__printf(2, 0)
f0690a25
GR
383static void _tcpm_log(struct tcpm_port *port, const char *fmt, va_list args)
384{
385 char tmpbuffer[LOG_BUFFER_ENTRY_SIZE];
386 u64 ts_nsec = local_clock();
387 unsigned long rem_nsec;
388
389 if (!port->logbuffer[port->logbuffer_head]) {
390 port->logbuffer[port->logbuffer_head] =
391 kzalloc(LOG_BUFFER_ENTRY_SIZE, GFP_KERNEL);
392 if (!port->logbuffer[port->logbuffer_head])
393 return;
394 }
395
396 vsnprintf(tmpbuffer, sizeof(tmpbuffer), fmt, args);
397
398 mutex_lock(&port->logbuffer_lock);
399
400 if (tcpm_log_full(port)) {
401 port->logbuffer_head = max(port->logbuffer_head - 1, 0);
402 strcpy(tmpbuffer, "overflow");
403 }
404
405 if (port->logbuffer_head < 0 ||
406 port->logbuffer_head >= LOG_BUFFER_ENTRIES) {
407 dev_warn(port->dev,
408 "Bad log buffer index %d\n", port->logbuffer_head);
409 goto abort;
410 }
411
412 if (!port->logbuffer[port->logbuffer_head]) {
413 dev_warn(port->dev,
414 "Log buffer index %d is NULL\n", port->logbuffer_head);
415 goto abort;
416 }
417
418 rem_nsec = do_div(ts_nsec, 1000000000);
419 scnprintf(port->logbuffer[port->logbuffer_head],
420 LOG_BUFFER_ENTRY_SIZE, "[%5lu.%06lu] %s",
421 (unsigned long)ts_nsec, rem_nsec / 1000,
422 tmpbuffer);
423 port->logbuffer_head = (port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
424
425abort:
426 mutex_unlock(&port->logbuffer_lock);
427}
428
e79e0125 429__printf(2, 3)
f0690a25
GR
430static void tcpm_log(struct tcpm_port *port, const char *fmt, ...)
431{
432 va_list args;
433
434 /* Do not log while disconnected and unattached */
435 if (tcpm_port_is_disconnected(port) &&
436 (port->state == SRC_UNATTACHED || port->state == SNK_UNATTACHED ||
437 port->state == DRP_TOGGLING))
438 return;
439
440 va_start(args, fmt);
441 _tcpm_log(port, fmt, args);
442 va_end(args);
443}
444
e79e0125 445__printf(2, 3)
f0690a25
GR
446static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...)
447{
448 va_list args;
449
450 va_start(args, fmt);
451 _tcpm_log(port, fmt, args);
452 va_end(args);
453}
454
455static void tcpm_log_source_caps(struct tcpm_port *port)
456{
457 int i;
458
459 for (i = 0; i < port->nr_source_caps; i++) {
460 u32 pdo = port->source_caps[i];
461 enum pd_pdo_type type = pdo_type(pdo);
462 char msg[64];
463
464 switch (type) {
465 case PDO_TYPE_FIXED:
466 scnprintf(msg, sizeof(msg),
467 "%u mV, %u mA [%s%s%s%s%s%s]",
468 pdo_fixed_voltage(pdo),
469 pdo_max_current(pdo),
470 (pdo & PDO_FIXED_DUAL_ROLE) ?
471 "R" : "",
472 (pdo & PDO_FIXED_SUSPEND) ?
473 "S" : "",
474 (pdo & PDO_FIXED_HIGHER_CAP) ?
475 "H" : "",
476 (pdo & PDO_FIXED_USB_COMM) ?
477 "U" : "",
478 (pdo & PDO_FIXED_DATA_SWAP) ?
479 "D" : "",
480 (pdo & PDO_FIXED_EXTPOWER) ?
481 "E" : "");
482 break;
483 case PDO_TYPE_VAR:
484 scnprintf(msg, sizeof(msg),
485 "%u-%u mV, %u mA",
486 pdo_min_voltage(pdo),
487 pdo_max_voltage(pdo),
488 pdo_max_current(pdo));
489 break;
490 case PDO_TYPE_BATT:
491 scnprintf(msg, sizeof(msg),
492 "%u-%u mV, %u mW",
493 pdo_min_voltage(pdo),
494 pdo_max_voltage(pdo),
495 pdo_max_power(pdo));
496 break;
497 default:
498 strcpy(msg, "undefined");
499 break;
500 }
501 tcpm_log(port, " PDO %d: type %d, %s",
502 i, type, msg);
503 }
504}
505
506static int tcpm_seq_show(struct seq_file *s, void *v)
507{
508 struct tcpm_port *port = (struct tcpm_port *)s->private;
509 int tail;
510
511 mutex_lock(&port->logbuffer_lock);
512 tail = port->logbuffer_tail;
513 while (tail != port->logbuffer_head) {
514 seq_printf(s, "%s\n", port->logbuffer[tail]);
515 tail = (tail + 1) % LOG_BUFFER_ENTRIES;
516 }
517 if (!seq_has_overflowed(s))
518 port->logbuffer_tail = tail;
519 mutex_unlock(&port->logbuffer_lock);
520
521 return 0;
522}
523
524static int tcpm_debug_open(struct inode *inode, struct file *file)
525{
526 return single_open(file, tcpm_seq_show, inode->i_private);
527}
528
529static const struct file_operations tcpm_debug_operations = {
530 .open = tcpm_debug_open,
531 .llseek = seq_lseek,
532 .read = seq_read,
533 .release = single_release,
534};
535
536static struct dentry *rootdir;
537
538static int tcpm_debugfs_init(struct tcpm_port *port)
539{
540 mutex_init(&port->logbuffer_lock);
541 /* /sys/kernel/debug/tcpm/usbcX */
542 if (!rootdir) {
543 rootdir = debugfs_create_dir("tcpm", NULL);
544 if (!rootdir)
545 return -ENOMEM;
546 }
547
548 port->dentry = debugfs_create_file(dev_name(port->dev),
549 S_IFREG | 0444, rootdir,
550 port, &tcpm_debug_operations);
551
552 return 0;
553}
554
555static void tcpm_debugfs_exit(struct tcpm_port *port)
556{
557 debugfs_remove(port->dentry);
558}
559
560#else
561
e79e0125 562__printf(2, 3)
f0690a25 563static void tcpm_log(const struct tcpm_port *port, const char *fmt, ...) { }
e79e0125 564__printf(2, 3)
f0690a25
GR
565static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...) { }
566static void tcpm_log_source_caps(struct tcpm_port *port) { }
567static int tcpm_debugfs_init(const struct tcpm_port *port) { return 0; }
568static void tcpm_debugfs_exit(const struct tcpm_port *port) { }
569
570#endif
571
572static int tcpm_pd_transmit(struct tcpm_port *port,
573 enum tcpm_transmit_type type,
574 const struct pd_message *msg)
575{
576 unsigned long timeout;
577 int ret;
578
579 if (msg)
580 tcpm_log(port, "PD TX, header: %#x", le16_to_cpu(msg->header));
581 else
582 tcpm_log(port, "PD TX, type: %#x", type);
583
584 reinit_completion(&port->tx_complete);
585 ret = port->tcpc->pd_transmit(port->tcpc, type, msg);
586 if (ret < 0)
587 return ret;
588
589 mutex_unlock(&port->lock);
590 timeout = wait_for_completion_timeout(&port->tx_complete,
591 msecs_to_jiffies(PD_T_TCPC_TX_TIMEOUT));
592 mutex_lock(&port->lock);
593 if (!timeout)
594 return -ETIMEDOUT;
595
596 switch (port->tx_status) {
597 case TCPC_TX_SUCCESS:
598 port->message_id = (port->message_id + 1) & PD_HEADER_ID_MASK;
599 return 0;
600 case TCPC_TX_DISCARDED:
601 return -EAGAIN;
602 case TCPC_TX_FAILED:
603 default:
604 return -EIO;
605 }
606}
607
608void tcpm_pd_transmit_complete(struct tcpm_port *port,
609 enum tcpm_transmit_status status)
610{
611 tcpm_log(port, "PD TX complete, status: %u", status);
612 port->tx_status = status;
613 complete(&port->tx_complete);
614}
615EXPORT_SYMBOL_GPL(tcpm_pd_transmit_complete);
616
617static int tcpm_mux_set(struct tcpm_port *port, enum tcpc_mux_mode mode,
618 enum tcpc_usb_switch config)
619{
620 int ret = 0;
621
622 tcpm_log(port, "Requesting mux mode %d, config %d, polarity %d",
623 mode, config, port->polarity);
624
625 if (port->tcpc->mux)
626 ret = port->tcpc->mux->set(port->tcpc->mux, mode, config,
627 port->polarity);
628
629 return ret;
630}
631
632static int tcpm_set_polarity(struct tcpm_port *port,
633 enum typec_cc_polarity polarity)
634{
635 int ret;
636
637 tcpm_log(port, "polarity %d", polarity);
638
639 ret = port->tcpc->set_polarity(port->tcpc, polarity);
640 if (ret < 0)
641 return ret;
642
643 port->polarity = polarity;
644
645 return 0;
646}
647
648static int tcpm_set_vconn(struct tcpm_port *port, bool enable)
649{
650 int ret;
651
652 tcpm_log(port, "vconn:=%d", enable);
653
654 ret = port->tcpc->set_vconn(port->tcpc, enable);
655 if (!ret) {
656 port->vconn_role = enable ? TYPEC_SOURCE : TYPEC_SINK;
657 typec_set_vconn_role(port->typec_port, port->vconn_role);
658 }
659
660 return ret;
661}
662
663static u32 tcpm_get_current_limit(struct tcpm_port *port)
664{
665 enum typec_cc_status cc;
666 u32 limit;
667
668 cc = port->polarity ? port->cc2 : port->cc1;
669 switch (cc) {
670 case TYPEC_CC_RP_1_5:
671 limit = 1500;
672 break;
673 case TYPEC_CC_RP_3_0:
674 limit = 3000;
675 break;
676 case TYPEC_CC_RP_DEF:
677 default:
ea62cfc7
HG
678 if (port->tcpc->get_current_limit)
679 limit = port->tcpc->get_current_limit(port->tcpc);
680 else
681 limit = 0;
f0690a25
GR
682 break;
683 }
684
685 return limit;
686}
687
688static int tcpm_set_current_limit(struct tcpm_port *port, u32 max_ma, u32 mv)
689{
690 int ret = -EOPNOTSUPP;
691
692 tcpm_log(port, "Setting voltage/current limit %u mV %u mA", mv, max_ma);
693
694 if (port->tcpc->set_current_limit)
695 ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv);
696
697 return ret;
698}
699
700/*
701 * Determine RP value to set based on maximum current supported
702 * by a port if configured as source.
703 * Returns CC value to report to link partner.
704 */
705static enum typec_cc_status tcpm_rp_cc(struct tcpm_port *port)
706{
707 const u32 *src_pdo = port->src_pdo;
708 int nr_pdo = port->nr_src_pdo;
709 int i;
710
711 /*
712 * Search for first entry with matching voltage.
713 * It should report the maximum supported current.
714 */
715 for (i = 0; i < nr_pdo; i++) {
716 const u32 pdo = src_pdo[i];
717
718 if (pdo_type(pdo) == PDO_TYPE_FIXED &&
719 pdo_fixed_voltage(pdo) == 5000) {
720 unsigned int curr = pdo_max_current(pdo);
721
722 if (curr >= 3000)
723 return TYPEC_CC_RP_3_0;
724 else if (curr >= 1500)
725 return TYPEC_CC_RP_1_5;
726 return TYPEC_CC_RP_DEF;
727 }
728 }
729
730 return TYPEC_CC_RP_DEF;
731}
732
733static int tcpm_set_attached_state(struct tcpm_port *port, bool attached)
734{
735 return port->tcpc->set_roles(port->tcpc, attached, port->pwr_role,
736 port->data_role);
737}
738
739static int tcpm_set_roles(struct tcpm_port *port, bool attached,
740 enum typec_role role, enum typec_data_role data)
741{
742 int ret;
743
744 if (data == TYPEC_HOST)
745 ret = tcpm_mux_set(port, TYPEC_MUX_USB,
746 TCPC_USB_SWITCH_CONNECT);
747 else
748 ret = tcpm_mux_set(port, TYPEC_MUX_NONE,
749 TCPC_USB_SWITCH_DISCONNECT);
750 if (ret < 0)
751 return ret;
752
753 ret = port->tcpc->set_roles(port->tcpc, attached, role, data);
754 if (ret < 0)
755 return ret;
756
757 port->pwr_role = role;
758 port->data_role = data;
759 typec_set_data_role(port->typec_port, data);
760 typec_set_pwr_role(port->typec_port, role);
761
762 return 0;
763}
764
765static int tcpm_set_pwr_role(struct tcpm_port *port, enum typec_role role)
766{
767 int ret;
768
769 ret = port->tcpc->set_roles(port->tcpc, true, role,
770 port->data_role);
771 if (ret < 0)
772 return ret;
773
774 port->pwr_role = role;
775 typec_set_pwr_role(port->typec_port, role);
776
777 return 0;
778}
779
780static int tcpm_pd_send_source_caps(struct tcpm_port *port)
781{
782 struct pd_message msg;
783 int i;
784
785 memset(&msg, 0, sizeof(msg));
786 if (!port->nr_src_pdo) {
787 /* No source capabilities defined, sink only */
788 msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
789 port->pwr_role,
790 port->data_role,
791 port->message_id, 0);
792 } else {
793 msg.header = PD_HEADER_LE(PD_DATA_SOURCE_CAP,
794 port->pwr_role,
795 port->data_role,
796 port->message_id,
797 port->nr_src_pdo);
798 }
799 for (i = 0; i < port->nr_src_pdo; i++)
800 msg.payload[i] = cpu_to_le32(port->src_pdo[i]);
801
802 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
803}
804
805static int tcpm_pd_send_sink_caps(struct tcpm_port *port)
806{
807 struct pd_message msg;
808 int i;
809
810 memset(&msg, 0, sizeof(msg));
811 if (!port->nr_snk_pdo) {
812 /* No sink capabilities defined, source only */
813 msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
814 port->pwr_role,
815 port->data_role,
816 port->message_id, 0);
817 } else {
818 msg.header = PD_HEADER_LE(PD_DATA_SINK_CAP,
819 port->pwr_role,
820 port->data_role,
821 port->message_id,
822 port->nr_snk_pdo);
823 }
824 for (i = 0; i < port->nr_snk_pdo; i++)
825 msg.payload[i] = cpu_to_le32(port->snk_pdo[i]);
826
827 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
828}
829
830static void tcpm_set_state(struct tcpm_port *port, enum tcpm_state state,
831 unsigned int delay_ms)
832{
833 if (delay_ms) {
834 tcpm_log(port, "pending state change %s -> %s @ %u ms",
835 tcpm_states[port->state], tcpm_states[state],
836 delay_ms);
837 port->delayed_state = state;
838 mod_delayed_work(port->wq, &port->state_machine,
839 msecs_to_jiffies(delay_ms));
840 port->delayed_runtime = jiffies + msecs_to_jiffies(delay_ms);
841 port->delay_ms = delay_ms;
842 } else {
843 tcpm_log(port, "state change %s -> %s",
844 tcpm_states[port->state], tcpm_states[state]);
845 port->delayed_state = INVALID_STATE;
846 port->prev_state = port->state;
847 port->state = state;
848 /*
849 * Don't re-queue the state machine work item if we're currently
850 * in the state machine and we're immediately changing states.
851 * tcpm_state_machine_work() will continue running the state
852 * machine.
853 */
854 if (!port->state_machine_running)
855 mod_delayed_work(port->wq, &port->state_machine, 0);
856 }
857}
858
859static void tcpm_set_state_cond(struct tcpm_port *port, enum tcpm_state state,
860 unsigned int delay_ms)
861{
862 if (port->enter_state == port->state)
863 tcpm_set_state(port, state, delay_ms);
864 else
865 tcpm_log(port,
866 "skipped %sstate change %s -> %s [%u ms], context state %s",
867 delay_ms ? "delayed " : "",
868 tcpm_states[port->state], tcpm_states[state],
869 delay_ms, tcpm_states[port->enter_state]);
870}
871
872static void tcpm_queue_message(struct tcpm_port *port,
873 enum pd_msg_request message)
874{
875 port->queued_message = message;
876 mod_delayed_work(port->wq, &port->state_machine, 0);
877}
878
879/*
880 * VDM/VDO handling functions
881 */
882static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
883 const u32 *data, int cnt)
884{
885 port->vdo_count = cnt + 1;
886 port->vdo_data[0] = header;
887 memcpy(&port->vdo_data[1], data, sizeof(u32) * cnt);
888 /* Set ready, vdm state machine will actually send */
889 port->vdm_retries = 0;
890 port->vdm_state = VDM_STATE_READY;
891}
892
893static void svdm_consume_identity(struct tcpm_port *port, const __le32 *payload,
894 int cnt)
895{
896 u32 vdo = le32_to_cpu(payload[VDO_INDEX_IDH]);
897 u32 product = le32_to_cpu(payload[VDO_INDEX_PRODUCT]);
898
899 memset(&port->mode_data, 0, sizeof(port->mode_data));
900
f0690a25
GR
901 port->partner_ident.id_header = vdo;
902 port->partner_ident.cert_stat = le32_to_cpu(payload[VDO_INDEX_CSTAT]);
903 port->partner_ident.product = product;
904
905 typec_partner_set_identity(port->partner);
906
907 tcpm_log(port, "Identity: %04x:%04x.%04x",
908 PD_IDH_VID(vdo),
909 PD_PRODUCT_PID(product), product & 0xffff);
910}
911
912static bool svdm_consume_svids(struct tcpm_port *port, const __le32 *payload,
913 int cnt)
914{
915 struct pd_mode_data *pmdata = &port->mode_data;
916 int i;
917
918 for (i = 1; i < cnt; i++) {
919 u32 p = le32_to_cpu(payload[i]);
920 u16 svid;
921
922 svid = (p >> 16) & 0xffff;
923 if (!svid)
924 return false;
925
926 if (pmdata->nsvids >= SVID_DISCOVERY_MAX)
927 goto abort;
928
929 pmdata->svids[pmdata->nsvids++] = svid;
930 tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
931
932 svid = p & 0xffff;
933 if (!svid)
934 return false;
935
936 if (pmdata->nsvids >= SVID_DISCOVERY_MAX)
937 goto abort;
938
939 pmdata->svids[pmdata->nsvids++] = svid;
940 tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
941 }
942 return true;
943abort:
944 tcpm_log(port, "SVID_DISCOVERY_MAX(%d) too low!", SVID_DISCOVERY_MAX);
945 return false;
946}
947
948static void svdm_consume_modes(struct tcpm_port *port, const __le32 *payload,
949 int cnt)
950{
951 struct pd_mode_data *pmdata = &port->mode_data;
952 struct typec_altmode_desc *paltmode;
953 struct typec_mode_desc *pmode;
954 int i;
955
956 if (pmdata->altmodes >= ARRAY_SIZE(port->partner_altmode)) {
957 /* Already logged in svdm_consume_svids() */
958 return;
959 }
960
961 paltmode = &pmdata->altmode_desc[pmdata->altmodes];
962 memset(paltmode, 0, sizeof(*paltmode));
963
964 paltmode->svid = pmdata->svids[pmdata->svid_index];
965
966 tcpm_log(port, " Alternate mode %d: SVID 0x%04x",
967 pmdata->altmodes, paltmode->svid);
968
969 for (i = 1; i < cnt && paltmode->n_modes < ALTMODE_MAX_MODES; i++) {
970 pmode = &paltmode->modes[paltmode->n_modes];
971 memset(pmode, 0, sizeof(*pmode));
972 pmode->vdo = le32_to_cpu(payload[i]);
973 pmode->index = i - 1;
974 paltmode->n_modes++;
975 tcpm_log(port, " VDO %d: 0x%08x",
976 pmode->index, pmode->vdo);
977 }
978 port->partner_altmode[pmdata->altmodes] =
979 typec_partner_register_altmode(port->partner, paltmode);
4c87b3e5 980 if (!port->partner_altmode[pmdata->altmodes]) {
f0690a25
GR
981 tcpm_log(port,
982 "Failed to register alternate modes for SVID 0x%04x",
983 paltmode->svid);
984 return;
985 }
986 pmdata->altmodes++;
987}
988
989#define supports_modal(port) PD_IDH_MODAL_SUPP((port)->partner_ident.id_header)
990
991static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
992 u32 *response)
993{
994 u32 p0 = le32_to_cpu(payload[0]);
995 int cmd_type = PD_VDO_CMDT(p0);
996 int cmd = PD_VDO_CMD(p0);
997 struct pd_mode_data *modep;
998 int rlen = 0;
999 u16 svid;
193a6801 1000 int i;
f0690a25
GR
1001
1002 tcpm_log(port, "Rx VDM cmd 0x%x type %d cmd %d len %d",
1003 p0, cmd_type, cmd, cnt);
1004
1005 modep = &port->mode_data;
1006
1007 switch (cmd_type) {
1008 case CMDT_INIT:
1009 switch (cmd) {
1010 case CMD_DISCOVER_IDENT:
193a6801
GR
1011 /* 6.4.4.3.1: Only respond as UFP (device) */
1012 if (port->data_role == TYPEC_DEVICE &&
1013 port->nr_snk_vdo) {
1014 for (i = 0; i < port->nr_snk_vdo; i++)
cbe5843e 1015 response[i + 1] = port->snk_vdo[i];
193a6801
GR
1016 rlen = port->nr_snk_vdo + 1;
1017 }
f0690a25
GR
1018 break;
1019 case CMD_DISCOVER_SVID:
1020 break;
1021 case CMD_DISCOVER_MODES:
1022 break;
1023 case CMD_ENTER_MODE:
1024 break;
1025 case CMD_EXIT_MODE:
1026 break;
1027 case CMD_ATTENTION:
1028 break;
1029 default:
1030 break;
1031 }
1032 if (rlen >= 1) {
1033 response[0] = p0 | VDO_CMDT(CMDT_RSP_ACK);
1034 } else if (rlen == 0) {
1035 response[0] = p0 | VDO_CMDT(CMDT_RSP_NAK);
1036 rlen = 1;
1037 } else {
1038 response[0] = p0 | VDO_CMDT(CMDT_RSP_BUSY);
1039 rlen = 1;
1040 }
1041 break;
1042 case CMDT_RSP_ACK:
1043 /* silently drop message if we are not connected */
1044 if (!port->partner)
1045 break;
1046
1047 switch (cmd) {
1048 case CMD_DISCOVER_IDENT:
1049 /* 6.4.4.3.1 */
1050 svdm_consume_identity(port, payload, cnt);
1051 response[0] = VDO(USB_SID_PD, 1, CMD_DISCOVER_SVID);
1052 rlen = 1;
1053 break;
1054 case CMD_DISCOVER_SVID:
1055 /* 6.4.4.3.2 */
1056 if (svdm_consume_svids(port, payload, cnt)) {
1057 response[0] = VDO(USB_SID_PD, 1,
1058 CMD_DISCOVER_SVID);
1059 rlen = 1;
1060 } else if (modep->nsvids && supports_modal(port)) {
1061 response[0] = VDO(modep->svids[0], 1,
1062 CMD_DISCOVER_MODES);
1063 rlen = 1;
1064 }
1065 break;
1066 case CMD_DISCOVER_MODES:
1067 /* 6.4.4.3.3 */
1068 svdm_consume_modes(port, payload, cnt);
1069 modep->svid_index++;
1070 if (modep->svid_index < modep->nsvids) {
1071 svid = modep->svids[modep->svid_index];
1072 response[0] = VDO(svid, 1, CMD_DISCOVER_MODES);
1073 rlen = 1;
1074 } else {
44262fad 1075 /* enter alternate mode if/when implemented */
f0690a25
GR
1076 }
1077 break;
1078 case CMD_ENTER_MODE:
1079 break;
1080 default:
1081 break;
1082 }
1083 break;
1084 default:
1085 break;
1086 }
1087
1088 return rlen;
1089}
1090
1091static void tcpm_handle_vdm_request(struct tcpm_port *port,
1092 const __le32 *payload, int cnt)
1093{
1094 int rlen = 0;
1095 u32 response[8] = { };
1096 u32 p0 = le32_to_cpu(payload[0]);
1097
1098 if (port->vdm_state == VDM_STATE_BUSY) {
1099 /* If UFP responded busy retry after timeout */
1100 if (PD_VDO_CMDT(p0) == CMDT_RSP_BUSY) {
1101 port->vdm_state = VDM_STATE_WAIT_RSP_BUSY;
1102 port->vdo_retry = (p0 & ~VDO_CMDT_MASK) |
1103 CMDT_INIT;
1104 mod_delayed_work(port->wq, &port->vdm_state_machine,
1105 msecs_to_jiffies(PD_T_VDM_BUSY));
1106 return;
1107 }
1108 port->vdm_state = VDM_STATE_DONE;
1109 }
1110
1111 if (PD_VDO_SVDM(p0))
1112 rlen = tcpm_pd_svdm(port, payload, cnt, response);
f0690a25
GR
1113
1114 if (rlen > 0) {
1115 tcpm_queue_vdm(port, response[0], &response[1], rlen - 1);
1116 mod_delayed_work(port->wq, &port->vdm_state_machine, 0);
1117 }
1118}
1119
1120static void tcpm_send_vdm(struct tcpm_port *port, u32 vid, int cmd,
1121 const u32 *data, int count)
1122{
1123 u32 header;
1124
1125 if (WARN_ON(count > VDO_MAX_SIZE - 1))
1126 count = VDO_MAX_SIZE - 1;
1127
1128 /* set VDM header with VID & CMD */
1129 header = VDO(vid, ((vid & USB_SID_PD) == USB_SID_PD) ?
1130 1 : (PD_VDO_CMD(cmd) <= CMD_ATTENTION), cmd);
1131 tcpm_queue_vdm(port, header, data, count);
1132
1133 mod_delayed_work(port->wq, &port->vdm_state_machine, 0);
1134}
1135
1136static unsigned int vdm_ready_timeout(u32 vdm_hdr)
1137{
1138 unsigned int timeout;
1139 int cmd = PD_VDO_CMD(vdm_hdr);
1140
1141 /* its not a structured VDM command */
1142 if (!PD_VDO_SVDM(vdm_hdr))
1143 return PD_T_VDM_UNSTRUCTURED;
1144
1145 switch (PD_VDO_CMDT(vdm_hdr)) {
1146 case CMDT_INIT:
1147 if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE)
1148 timeout = PD_T_VDM_WAIT_MODE_E;
1149 else
1150 timeout = PD_T_VDM_SNDR_RSP;
1151 break;
1152 default:
1153 if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE)
1154 timeout = PD_T_VDM_E_MODE;
1155 else
1156 timeout = PD_T_VDM_RCVR_RSP;
1157 break;
1158 }
1159 return timeout;
1160}
1161
1162static void vdm_run_state_machine(struct tcpm_port *port)
1163{
1164 struct pd_message msg;
1165 int i, res;
1166
1167 switch (port->vdm_state) {
1168 case VDM_STATE_READY:
1169 /* Only transmit VDM if attached */
1170 if (!port->attached) {
1171 port->vdm_state = VDM_STATE_ERR_BUSY;
1172 break;
1173 }
1174
1175 /*
1176 * if there's traffic or we're not in PDO ready state don't send
1177 * a VDM.
1178 */
1179 if (port->state != SRC_READY && port->state != SNK_READY)
1180 break;
1181
1182 /* Prepare and send VDM */
1183 memset(&msg, 0, sizeof(msg));
1184 msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF,
1185 port->pwr_role,
1186 port->data_role,
1187 port->message_id, port->vdo_count);
1188 for (i = 0; i < port->vdo_count; i++)
1189 msg.payload[i] = cpu_to_le32(port->vdo_data[i]);
1190 res = tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1191 if (res < 0) {
1192 port->vdm_state = VDM_STATE_ERR_SEND;
1193 } else {
1194 unsigned long timeout;
1195
1196 port->vdm_retries = 0;
1197 port->vdm_state = VDM_STATE_BUSY;
1198 timeout = vdm_ready_timeout(port->vdo_data[0]);
1199 mod_delayed_work(port->wq, &port->vdm_state_machine,
1200 timeout);
1201 }
1202 break;
1203 case VDM_STATE_WAIT_RSP_BUSY:
1204 port->vdo_data[0] = port->vdo_retry;
1205 port->vdo_count = 1;
1206 port->vdm_state = VDM_STATE_READY;
1207 break;
1208 case VDM_STATE_BUSY:
1209 port->vdm_state = VDM_STATE_ERR_TMOUT;
1210 break;
1211 case VDM_STATE_ERR_SEND:
1212 /*
1213 * A partner which does not support USB PD will not reply,
1214 * so this is not a fatal error. At the same time, some
1215 * devices may not return GoodCRC under some circumstances,
1216 * so we need to retry.
1217 */
1218 if (port->vdm_retries < 3) {
1219 tcpm_log(port, "VDM Tx error, retry");
1220 port->vdm_retries++;
1221 port->vdm_state = VDM_STATE_READY;
1222 }
1223 break;
1224 default:
1225 break;
1226 }
1227}
1228
1229static void vdm_state_machine_work(struct work_struct *work)
1230{
1231 struct tcpm_port *port = container_of(work, struct tcpm_port,
1232 vdm_state_machine.work);
1233 enum vdm_states prev_state;
1234
1235 mutex_lock(&port->lock);
1236
1237 /*
1238 * Continue running as long as the port is not busy and there was
1239 * a state change.
1240 */
1241 do {
1242 prev_state = port->vdm_state;
1243 vdm_run_state_machine(port);
1244 } while (port->vdm_state != prev_state &&
1245 port->vdm_state != VDM_STATE_BUSY);
1246
1247 mutex_unlock(&port->lock);
1248}
1249
5007e1b5
BJS
1250enum pdo_err {
1251 PDO_NO_ERR,
1252 PDO_ERR_NO_VSAFE5V,
1253 PDO_ERR_VSAFE5V_NOT_FIRST,
1254 PDO_ERR_PDO_TYPE_NOT_IN_ORDER,
1255 PDO_ERR_FIXED_NOT_SORTED,
1256 PDO_ERR_VARIABLE_BATT_NOT_SORTED,
1257 PDO_ERR_DUPE_PDO,
1258};
1259
1260static const char * const pdo_err_msg[] = {
1261 [PDO_ERR_NO_VSAFE5V] =
1262 " err: source/sink caps should atleast have vSafe5V",
1263 [PDO_ERR_VSAFE5V_NOT_FIRST] =
1264 " err: vSafe5V Fixed Supply Object Shall always be the first object",
1265 [PDO_ERR_PDO_TYPE_NOT_IN_ORDER] =
1266 " err: PDOs should be in the following order: Fixed; Battery; Variable",
1267 [PDO_ERR_FIXED_NOT_SORTED] =
1268 " err: Fixed supply pdos should be in increasing order of their fixed voltage",
1269 [PDO_ERR_VARIABLE_BATT_NOT_SORTED] =
1270 " err: Variable/Battery supply pdos should be in increasing order of their minimum voltage",
1271 [PDO_ERR_DUPE_PDO] =
1272 " err: Variable/Batt supply pdos cannot have same min/max voltage",
1273};
1274
1275static enum pdo_err tcpm_caps_err(struct tcpm_port *port, const u32 *pdo,
1276 unsigned int nr_pdo)
1277{
1278 unsigned int i;
1279
1280 /* Should at least contain vSafe5v */
1281 if (nr_pdo < 1)
1282 return PDO_ERR_NO_VSAFE5V;
1283
1284 /* The vSafe5V Fixed Supply Object Shall always be the first object */
1285 if (pdo_type(pdo[0]) != PDO_TYPE_FIXED ||
1286 pdo_fixed_voltage(pdo[0]) != VSAFE5V)
1287 return PDO_ERR_VSAFE5V_NOT_FIRST;
1288
1289 for (i = 1; i < nr_pdo; i++) {
1290 if (pdo_type(pdo[i]) < pdo_type(pdo[i - 1])) {
1291 return PDO_ERR_PDO_TYPE_NOT_IN_ORDER;
1292 } else if (pdo_type(pdo[i]) == pdo_type(pdo[i - 1])) {
1293 enum pd_pdo_type type = pdo_type(pdo[i]);
1294
1295 switch (type) {
1296 /*
1297 * The remaining Fixed Supply Objects, if
1298 * present, shall be sent in voltage order;
1299 * lowest to highest.
1300 */
1301 case PDO_TYPE_FIXED:
1302 if (pdo_fixed_voltage(pdo[i]) <=
1303 pdo_fixed_voltage(pdo[i - 1]))
1304 return PDO_ERR_FIXED_NOT_SORTED;
1305 break;
1306 /*
1307 * The Battery Supply Objects and Variable
1308 * supply, if present shall be sent in Minimum
1309 * Voltage order; lowest to highest.
1310 */
1311 case PDO_TYPE_VAR:
1312 case PDO_TYPE_BATT:
1313 if (pdo_min_voltage(pdo[i]) <
1314 pdo_min_voltage(pdo[i - 1]))
1315 return PDO_ERR_VARIABLE_BATT_NOT_SORTED;
1316 else if ((pdo_min_voltage(pdo[i]) ==
1317 pdo_min_voltage(pdo[i - 1])) &&
1318 (pdo_max_voltage(pdo[i]) ==
1319 pdo_min_voltage(pdo[i - 1])))
1320 return PDO_ERR_DUPE_PDO;
1321 break;
1322 default:
1323 tcpm_log_force(port, " Unknown pdo type");
1324 }
1325 }
1326 }
1327
1328 return PDO_NO_ERR;
1329}
1330
1331static int tcpm_validate_caps(struct tcpm_port *port, const u32 *pdo,
1332 unsigned int nr_pdo)
1333{
1334 enum pdo_err err_index = tcpm_caps_err(port, pdo, nr_pdo);
1335
1336 if (err_index != PDO_NO_ERR) {
1337 tcpm_log_force(port, " %s", pdo_err_msg[err_index]);
1338 return -EINVAL;
1339 }
1340
1341 return 0;
1342}
1343
f0690a25
GR
1344/*
1345 * PD (data, control) command handling functions
1346 */
1347static void tcpm_pd_data_request(struct tcpm_port *port,
1348 const struct pd_message *msg)
1349{
1350 enum pd_data_msg_type type = pd_header_type_le(msg->header);
1351 unsigned int cnt = pd_header_cnt_le(msg->header);
1352 unsigned int i;
1353
1354 switch (type) {
1355 case PD_DATA_SOURCE_CAP:
1356 if (port->pwr_role != TYPEC_SINK)
1357 break;
1358
1359 for (i = 0; i < cnt; i++)
1360 port->source_caps[i] = le32_to_cpu(msg->payload[i]);
1361
1362 port->nr_source_caps = cnt;
1363
1364 tcpm_log_source_caps(port);
1365
5007e1b5
BJS
1366 tcpm_validate_caps(port, port->source_caps,
1367 port->nr_source_caps);
1368
f0690a25
GR
1369 /*
1370 * This message may be received even if VBUS is not
1371 * present. This is quite unexpected; see USB PD
1372 * specification, sections 8.3.3.6.3.1 and 8.3.3.6.3.2.
1373 * However, at the same time, we must be ready to
1374 * receive this message and respond to it 15ms after
1375 * receiving PS_RDY during power swap operations, no matter
1376 * if VBUS is available or not (USB PD specification,
1377 * section 6.5.9.2).
1378 * So we need to accept the message either way,
1379 * but be prepared to keep waiting for VBUS after it was
1380 * handled.
1381 */
1382 tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
1383 break;
1384 case PD_DATA_REQUEST:
1385 if (port->pwr_role != TYPEC_SOURCE ||
1386 cnt != 1) {
1387 tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
1388 break;
1389 }
1390 port->sink_request = le32_to_cpu(msg->payload[0]);
1391 tcpm_set_state(port, SRC_NEGOTIATE_CAPABILITIES, 0);
1392 break;
1393 case PD_DATA_SINK_CAP:
1394 /* We don't do anything with this at the moment... */
1395 for (i = 0; i < cnt; i++)
1396 port->sink_caps[i] = le32_to_cpu(msg->payload[i]);
1397 port->nr_sink_caps = cnt;
1398 break;
1399 case PD_DATA_VENDOR_DEF:
1400 tcpm_handle_vdm_request(port, msg->payload, cnt);
1401 break;
1402 case PD_DATA_BIST:
1403 if (port->state == SRC_READY || port->state == SNK_READY) {
1404 port->bist_request = le32_to_cpu(msg->payload[0]);
1405 tcpm_set_state(port, BIST_RX, 0);
1406 }
1407 break;
1408 default:
1409 tcpm_log(port, "Unhandled data message type %#x", type);
1410 break;
1411 }
1412}
1413
1414static void tcpm_pd_ctrl_request(struct tcpm_port *port,
1415 const struct pd_message *msg)
1416{
1417 enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
1418 enum tcpm_state next_state;
1419
1420 switch (type) {
1421 case PD_CTRL_GOOD_CRC:
1422 case PD_CTRL_PING:
1423 break;
1424 case PD_CTRL_GET_SOURCE_CAP:
1425 switch (port->state) {
1426 case SRC_READY:
1427 case SNK_READY:
1428 tcpm_queue_message(port, PD_MSG_DATA_SOURCE_CAP);
1429 break;
1430 default:
1431 tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
1432 break;
1433 }
1434 break;
1435 case PD_CTRL_GET_SINK_CAP:
1436 switch (port->state) {
1437 case SRC_READY:
1438 case SNK_READY:
1439 tcpm_queue_message(port, PD_MSG_DATA_SINK_CAP);
1440 break;
1441 default:
1442 tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
1443 break;
1444 }
1445 break;
1446 case PD_CTRL_GOTO_MIN:
1447 break;
1448 case PD_CTRL_PS_RDY:
1449 switch (port->state) {
1450 case SNK_TRANSITION_SINK:
1451 if (port->vbus_present) {
1452 tcpm_set_current_limit(port,
1453 port->current_limit,
1454 port->supply_voltage);
8bf05746 1455 port->explicit_contract = true;
f0690a25
GR
1456 tcpm_set_state(port, SNK_READY, 0);
1457 } else {
1458 /*
1459 * Seen after power swap. Keep waiting for VBUS
1460 * in a transitional state.
1461 */
1462 tcpm_set_state(port,
1463 SNK_TRANSITION_SINK_VBUS, 0);
1464 }
1465 break;
b965b631 1466 case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
f0690a25
GR
1467 tcpm_set_state(port, PR_SWAP_SRC_SNK_SINK_ON, 0);
1468 break;
1469 case PR_SWAP_SNK_SRC_SINK_OFF:
1470 tcpm_set_state(port, PR_SWAP_SNK_SRC_SOURCE_ON, 0);
1471 break;
1472 case VCONN_SWAP_WAIT_FOR_VCONN:
1473 tcpm_set_state(port, VCONN_SWAP_TURN_OFF_VCONN, 0);
1474 break;
1475 default:
1476 break;
1477 }
1478 break;
1479 case PD_CTRL_REJECT:
1480 case PD_CTRL_WAIT:
1481 switch (port->state) {
1482 case SNK_NEGOTIATE_CAPABILITIES:
1483 /* USB PD specification, Figure 8-43 */
1484 if (port->explicit_contract)
1485 next_state = SNK_READY;
1486 else
1487 next_state = SNK_WAIT_CAPABILITIES;
1488 tcpm_set_state(port, next_state, 0);
1489 break;
1490 case DR_SWAP_SEND:
1491 port->swap_status = (type == PD_CTRL_WAIT ?
1492 -EAGAIN : -EOPNOTSUPP);
1493 tcpm_set_state(port, DR_SWAP_CANCEL, 0);
1494 break;
1495 case PR_SWAP_SEND:
1496 port->swap_status = (type == PD_CTRL_WAIT ?
1497 -EAGAIN : -EOPNOTSUPP);
1498 tcpm_set_state(port, PR_SWAP_CANCEL, 0);
1499 break;
1500 case VCONN_SWAP_SEND:
1501 port->swap_status = (type == PD_CTRL_WAIT ?
1502 -EAGAIN : -EOPNOTSUPP);
1503 tcpm_set_state(port, VCONN_SWAP_CANCEL, 0);
1504 break;
1505 default:
1506 break;
1507 }
1508 break;
1509 case PD_CTRL_ACCEPT:
1510 switch (port->state) {
1511 case SNK_NEGOTIATE_CAPABILITIES:
1512 tcpm_set_state(port, SNK_TRANSITION_SINK, 0);
1513 break;
1514 case SOFT_RESET_SEND:
1515 port->message_id = 0;
5fec4b54 1516 port->rx_msgid = -1;
f0690a25
GR
1517 if (port->pwr_role == TYPEC_SOURCE)
1518 next_state = SRC_SEND_CAPABILITIES;
1519 else
1520 next_state = SNK_WAIT_CAPABILITIES;
1521 tcpm_set_state(port, next_state, 0);
1522 break;
1523 case DR_SWAP_SEND:
1524 tcpm_set_state(port, DR_SWAP_CHANGE_DR, 0);
1525 break;
1526 case PR_SWAP_SEND:
1527 tcpm_set_state(port, PR_SWAP_START, 0);
1528 break;
1529 case VCONN_SWAP_SEND:
1530 tcpm_set_state(port, VCONN_SWAP_START, 0);
1531 break;
1532 default:
1533 break;
1534 }
1535 break;
1536 case PD_CTRL_SOFT_RESET:
1537 tcpm_set_state(port, SOFT_RESET, 0);
1538 break;
1539 case PD_CTRL_DR_SWAP:
9b0ae699 1540 if (port->port_type != TYPEC_PORT_DRP) {
f0690a25
GR
1541 tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
1542 break;
1543 }
1544 /*
1545 * XXX
1546 * 6.3.9: If an alternate mode is active, a request to swap
1547 * alternate modes shall trigger a port reset.
1548 */
1549 switch (port->state) {
1550 case SRC_READY:
1551 case SNK_READY:
1552 tcpm_set_state(port, DR_SWAP_ACCEPT, 0);
1553 break;
1554 default:
1555 tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
1556 break;
1557 }
1558 break;
1559 case PD_CTRL_PR_SWAP:
9b0ae699 1560 if (port->port_type != TYPEC_PORT_DRP) {
f0690a25
GR
1561 tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
1562 break;
1563 }
1564 switch (port->state) {
1565 case SRC_READY:
1566 case SNK_READY:
1567 tcpm_set_state(port, PR_SWAP_ACCEPT, 0);
1568 break;
1569 default:
1570 tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
1571 break;
1572 }
1573 break;
1574 case PD_CTRL_VCONN_SWAP:
1575 switch (port->state) {
1576 case SRC_READY:
1577 case SNK_READY:
1578 tcpm_set_state(port, VCONN_SWAP_ACCEPT, 0);
1579 break;
1580 default:
1581 tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
1582 break;
1583 }
1584 break;
1585 default:
1586 tcpm_log(port, "Unhandled ctrl message type %#x", type);
1587 break;
1588 }
1589}
1590
1591static void tcpm_pd_rx_handler(struct work_struct *work)
1592{
1593 struct pd_rx_event *event = container_of(work,
1594 struct pd_rx_event, work);
1595 const struct pd_message *msg = &event->msg;
1596 unsigned int cnt = pd_header_cnt_le(msg->header);
1597 struct tcpm_port *port = event->port;
1598
1599 mutex_lock(&port->lock);
1600
1601 tcpm_log(port, "PD RX, header: %#x [%d]", le16_to_cpu(msg->header),
1602 port->attached);
1603
1604 if (port->attached) {
5fec4b54
GR
1605 enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
1606 unsigned int msgid = pd_header_msgid_le(msg->header);
1607
1608 /*
1609 * USB PD standard, 6.6.1.2:
1610 * "... if MessageID value in a received Message is the
1611 * same as the stored value, the receiver shall return a
1612 * GoodCRC Message with that MessageID value and drop
1613 * the Message (this is a retry of an already received
1614 * Message). Note: this shall not apply to the Soft_Reset
1615 * Message which always has a MessageID value of zero."
1616 */
1617 if (msgid == port->rx_msgid && type != PD_CTRL_SOFT_RESET)
1618 goto done;
1619 port->rx_msgid = msgid;
1620
f0690a25
GR
1621 /*
1622 * If both ends believe to be DFP/host, we have a data role
1623 * mismatch.
1624 */
1625 if (!!(le16_to_cpu(msg->header) & PD_HEADER_DATA_ROLE) ==
1626 (port->data_role == TYPEC_HOST)) {
1627 tcpm_log(port,
1628 "Data role mismatch, initiating error recovery");
1629 tcpm_set_state(port, ERROR_RECOVERY, 0);
1630 } else {
1631 if (cnt)
1632 tcpm_pd_data_request(port, msg);
1633 else
1634 tcpm_pd_ctrl_request(port, msg);
1635 }
1636 }
1637
5fec4b54 1638done:
f0690a25
GR
1639 mutex_unlock(&port->lock);
1640 kfree(event);
1641}
1642
1643void tcpm_pd_receive(struct tcpm_port *port, const struct pd_message *msg)
1644{
1645 struct pd_rx_event *event;
1646
1647 event = kzalloc(sizeof(*event), GFP_ATOMIC);
1648 if (!event)
1649 return;
1650
1651 INIT_WORK(&event->work, tcpm_pd_rx_handler);
1652 event->port = port;
1653 memcpy(&event->msg, msg, sizeof(*msg));
1654 queue_work(port->wq, &event->work);
1655}
1656EXPORT_SYMBOL_GPL(tcpm_pd_receive);
1657
1658static int tcpm_pd_send_control(struct tcpm_port *port,
1659 enum pd_ctrl_msg_type type)
1660{
1661 struct pd_message msg;
1662
1663 memset(&msg, 0, sizeof(msg));
1664 msg.header = PD_HEADER_LE(type, port->pwr_role,
1665 port->data_role,
1666 port->message_id, 0);
1667
1668 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1669}
1670
1671/*
1672 * Send queued message without affecting state.
1673 * Return true if state machine should go back to sleep,
1674 * false otherwise.
1675 */
1676static bool tcpm_send_queued_message(struct tcpm_port *port)
1677{
1678 enum pd_msg_request queued_message;
1679
1680 do {
1681 queued_message = port->queued_message;
1682 port->queued_message = PD_MSG_NONE;
1683
1684 switch (queued_message) {
1685 case PD_MSG_CTRL_WAIT:
1686 tcpm_pd_send_control(port, PD_CTRL_WAIT);
1687 break;
1688 case PD_MSG_CTRL_REJECT:
1689 tcpm_pd_send_control(port, PD_CTRL_REJECT);
1690 break;
1691 case PD_MSG_DATA_SINK_CAP:
1692 tcpm_pd_send_sink_caps(port);
1693 break;
1694 case PD_MSG_DATA_SOURCE_CAP:
1695 tcpm_pd_send_source_caps(port);
1696 break;
1697 default:
1698 break;
1699 }
1700 } while (port->queued_message != PD_MSG_NONE);
1701
1702 if (port->delayed_state != INVALID_STATE) {
1703 if (time_is_after_jiffies(port->delayed_runtime)) {
1704 mod_delayed_work(port->wq, &port->state_machine,
1705 port->delayed_runtime - jiffies);
1706 return true;
1707 }
1708 port->delayed_state = INVALID_STATE;
1709 }
1710 return false;
1711}
1712
1713static int tcpm_pd_check_request(struct tcpm_port *port)
1714{
1715 u32 pdo, rdo = port->sink_request;
1716 unsigned int max, op, pdo_max, index;
1717 enum pd_pdo_type type;
1718
1719 index = rdo_index(rdo);
1720 if (!index || index > port->nr_src_pdo)
1721 return -EINVAL;
1722
1723 pdo = port->src_pdo[index - 1];
1724 type = pdo_type(pdo);
1725 switch (type) {
1726 case PDO_TYPE_FIXED:
1727 case PDO_TYPE_VAR:
1728 max = rdo_max_current(rdo);
1729 op = rdo_op_current(rdo);
1730 pdo_max = pdo_max_current(pdo);
1731
1732 if (op > pdo_max)
1733 return -EINVAL;
1734 if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH))
1735 return -EINVAL;
1736
1737 if (type == PDO_TYPE_FIXED)
1738 tcpm_log(port,
1739 "Requested %u mV, %u mA for %u / %u mA",
1740 pdo_fixed_voltage(pdo), pdo_max, op, max);
1741 else
1742 tcpm_log(port,
1743 "Requested %u -> %u mV, %u mA for %u / %u mA",
1744 pdo_min_voltage(pdo), pdo_max_voltage(pdo),
1745 pdo_max, op, max);
1746 break;
1747 case PDO_TYPE_BATT:
1748 max = rdo_max_power(rdo);
1749 op = rdo_op_power(rdo);
1750 pdo_max = pdo_max_power(pdo);
1751
1752 if (op > pdo_max)
1753 return -EINVAL;
1754 if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH))
1755 return -EINVAL;
1756 tcpm_log(port,
1757 "Requested %u -> %u mV, %u mW for %u / %u mW",
1758 pdo_min_voltage(pdo), pdo_max_voltage(pdo),
1759 pdo_max, op, max);
1760 break;
1761 default:
1762 return -EINVAL;
1763 }
1764
1765 port->op_vsafe5v = index == 1;
1766
1767 return 0;
1768}
1769
6f566af3 1770static int tcpm_pd_select_pdo(struct tcpm_port *port)
f0690a25 1771{
6f566af3 1772 unsigned int i, max_mw = 0, max_mv = 0;
f0690a25
GR
1773 int ret = -EINVAL;
1774
1775 /*
6f566af3
HG
1776 * Select the source PDO providing the most power while staying within
1777 * the board's voltage limits. Prefer PDO providing exp
f0690a25
GR
1778 */
1779 for (i = 0; i < port->nr_source_caps; i++) {
1780 u32 pdo = port->source_caps[i];
1781 enum pd_pdo_type type = pdo_type(pdo);
6f566af3 1782 unsigned int mv, ma, mw;
f0690a25 1783
6f566af3
HG
1784 if (type == PDO_TYPE_FIXED)
1785 mv = pdo_fixed_voltage(pdo);
1786 else
1787 mv = pdo_min_voltage(pdo);
1788
1789 if (type == PDO_TYPE_BATT) {
1790 mw = pdo_max_power(pdo);
1791 } else {
1792 ma = min(pdo_max_current(pdo),
1793 port->max_snk_ma);
1794 mw = ma * mv / 1000;
1795 }
1796
1797 /* Perfer higher voltages if available */
1798 if ((mw > max_mw || (mw == max_mw && mv > max_mv)) &&
1799 mv <= port->max_snk_mv) {
1800 ret = i;
1801 max_mw = mw;
1802 max_mv = mv;
f0690a25
GR
1803 }
1804 }
1805
1806 return ret;
1807}
1808
1809static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)
1810{
1811 unsigned int mv, ma, mw, flags;
1812 unsigned int max_ma, max_mw;
1813 enum pd_pdo_type type;
6f566af3
HG
1814 int index;
1815 u32 pdo;
f0690a25 1816
6f566af3
HG
1817 index = tcpm_pd_select_pdo(port);
1818 if (index < 0)
f0690a25 1819 return -EINVAL;
6f566af3 1820 pdo = port->source_caps[index];
f0690a25
GR
1821 type = pdo_type(pdo);
1822
1823 if (type == PDO_TYPE_FIXED)
1824 mv = pdo_fixed_voltage(pdo);
1825 else
1826 mv = pdo_min_voltage(pdo);
1827
6f566af3 1828 /* Select maximum available current within the board's power limit */
f0690a25 1829 if (type == PDO_TYPE_BATT) {
6f566af3
HG
1830 mw = pdo_max_power(pdo);
1831 ma = 1000 * min(mw, port->max_snk_mw) / mv;
f0690a25 1832 } else {
6f566af3
HG
1833 ma = min(pdo_max_current(pdo),
1834 1000 * port->max_snk_mw / mv);
f0690a25 1835 }
6f566af3 1836 ma = min(ma, port->max_snk_ma);
f0690a25 1837
931693f9 1838 flags = RDO_USB_COMM | RDO_NO_SUSPEND;
f0690a25
GR
1839
1840 /* Set mismatch bit if offered power is less than operating power */
6f566af3 1841 mw = ma * mv / 1000;
f0690a25
GR
1842 max_ma = ma;
1843 max_mw = mw;
1844 if (mw < port->operating_snk_mw) {
1845 flags |= RDO_CAP_MISMATCH;
6f566af3
HG
1846 max_mw = port->operating_snk_mw;
1847 max_ma = max_mw * 1000 / mv;
f0690a25
GR
1848 }
1849
1850 tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d",
1851 port->cc_req, port->cc1, port->cc2, port->vbus_source,
1852 port->vconn_role == TYPEC_SOURCE ? "source" : "sink",
1853 port->polarity);
1854
1855 if (type == PDO_TYPE_BATT) {
6f566af3 1856 *rdo = RDO_BATT(index + 1, mw, max_mw, flags);
f0690a25
GR
1857
1858 tcpm_log(port, "Requesting PDO %d: %u mV, %u mW%s",
6f566af3 1859 index, mv, mw,
f0690a25
GR
1860 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
1861 } else {
6f566af3 1862 *rdo = RDO_FIXED(index + 1, ma, max_ma, flags);
f0690a25
GR
1863
1864 tcpm_log(port, "Requesting PDO %d: %u mV, %u mA%s",
6f566af3 1865 index, mv, ma,
f0690a25
GR
1866 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
1867 }
1868
1869 port->current_limit = ma;
1870 port->supply_voltage = mv;
1871
1872 return 0;
1873}
1874
1875static int tcpm_pd_send_request(struct tcpm_port *port)
1876{
1877 struct pd_message msg;
1878 int ret;
1879 u32 rdo;
1880
1881 ret = tcpm_pd_build_request(port, &rdo);
1882 if (ret < 0)
1883 return ret;
1884
1885 memset(&msg, 0, sizeof(msg));
1886 msg.header = PD_HEADER_LE(PD_DATA_REQUEST,
1887 port->pwr_role,
1888 port->data_role,
1889 port->message_id, 1);
1890 msg.payload[0] = cpu_to_le32(rdo);
1891
1892 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1893}
1894
1895static int tcpm_set_vbus(struct tcpm_port *port, bool enable)
1896{
1897 int ret;
1898
1899 if (enable && port->vbus_charge)
1900 return -EINVAL;
1901
1902 tcpm_log(port, "vbus:=%d charge=%d", enable, port->vbus_charge);
1903
1904 ret = port->tcpc->set_vbus(port->tcpc, enable, port->vbus_charge);
1905 if (ret < 0)
1906 return ret;
1907
1908 port->vbus_source = enable;
1909 return 0;
1910}
1911
1912static int tcpm_set_charge(struct tcpm_port *port, bool charge)
1913{
1914 int ret;
1915
1916 if (charge && port->vbus_source)
1917 return -EINVAL;
1918
1919 if (charge != port->vbus_charge) {
1920 tcpm_log(port, "vbus=%d charge:=%d", port->vbus_source, charge);
1921 ret = port->tcpc->set_vbus(port->tcpc, port->vbus_source,
1922 charge);
1923 if (ret < 0)
1924 return ret;
1925 }
1926 port->vbus_charge = charge;
1927 return 0;
1928}
1929
1930static bool tcpm_start_drp_toggling(struct tcpm_port *port)
1931{
1932 int ret;
1933
1934 if (port->tcpc->start_drp_toggling &&
9b0ae699 1935 port->port_type == TYPEC_PORT_DRP) {
f0690a25
GR
1936 tcpm_log_force(port, "Start DRP toggling");
1937 ret = port->tcpc->start_drp_toggling(port->tcpc,
1938 tcpm_rp_cc(port));
1939 if (!ret)
1940 return true;
1941 }
1942
1943 return false;
1944}
1945
1946static void tcpm_set_cc(struct tcpm_port *port, enum typec_cc_status cc)
1947{
1948 tcpm_log(port, "cc:=%d", cc);
1949 port->cc_req = cc;
1950 port->tcpc->set_cc(port->tcpc, cc);
1951}
1952
1953static int tcpm_init_vbus(struct tcpm_port *port)
1954{
1955 int ret;
1956
1957 ret = port->tcpc->set_vbus(port->tcpc, false, false);
1958 port->vbus_source = false;
1959 port->vbus_charge = false;
1960 return ret;
1961}
1962
1963static int tcpm_init_vconn(struct tcpm_port *port)
1964{
1965 int ret;
1966
1967 ret = port->tcpc->set_vconn(port->tcpc, false);
1968 port->vconn_role = TYPEC_SINK;
1969 return ret;
1970}
1971
1972static void tcpm_typec_connect(struct tcpm_port *port)
1973{
1974 if (!port->connected) {
1975 /* Make sure we don't report stale identity information */
1976 memset(&port->partner_ident, 0, sizeof(port->partner_ident));
1977 port->partner_desc.usb_pd = port->pd_capable;
1978 if (tcpm_port_is_debug(port))
1979 port->partner_desc.accessory = TYPEC_ACCESSORY_DEBUG;
1980 else if (tcpm_port_is_audio(port))
1981 port->partner_desc.accessory = TYPEC_ACCESSORY_AUDIO;
1982 else
1983 port->partner_desc.accessory = TYPEC_ACCESSORY_NONE;
1984 port->partner = typec_register_partner(port->typec_port,
1985 &port->partner_desc);
1986 port->connected = true;
1987 }
1988}
1989
1990static int tcpm_src_attach(struct tcpm_port *port)
1991{
1992 enum typec_cc_polarity polarity =
1993 port->cc2 == TYPEC_CC_RD ? TYPEC_POLARITY_CC2
1994 : TYPEC_POLARITY_CC1;
1995 int ret;
1996
1997 if (port->attached)
1998 return 0;
1999
2000 ret = tcpm_set_polarity(port, polarity);
2001 if (ret < 0)
2002 return ret;
2003
2004 ret = tcpm_set_roles(port, true, TYPEC_SOURCE, TYPEC_HOST);
2005 if (ret < 0)
2006 return ret;
2007
2008 ret = port->tcpc->set_pd_rx(port->tcpc, true);
2009 if (ret < 0)
2010 goto out_disable_mux;
2011
2012 /*
2013 * USB Type-C specification, version 1.2,
2014 * chapter 4.5.2.2.8.1 (Attached.SRC Requirements)
2015 * Enable VCONN only if the non-RD port is set to RA.
2016 */
2017 if ((polarity == TYPEC_POLARITY_CC1 && port->cc2 == TYPEC_CC_RA) ||
2018 (polarity == TYPEC_POLARITY_CC2 && port->cc1 == TYPEC_CC_RA)) {
2019 ret = tcpm_set_vconn(port, true);
2020 if (ret < 0)
2021 goto out_disable_pd;
2022 }
2023
2024 ret = tcpm_set_vbus(port, true);
2025 if (ret < 0)
2026 goto out_disable_vconn;
2027
2028 port->pd_capable = false;
2029
2030 port->partner = NULL;
2031
2032 port->attached = true;
2033 port->send_discover = true;
2034
2035 return 0;
2036
2037out_disable_vconn:
2038 tcpm_set_vconn(port, false);
2039out_disable_pd:
2040 port->tcpc->set_pd_rx(port->tcpc, false);
2041out_disable_mux:
2042 tcpm_mux_set(port, TYPEC_MUX_NONE, TCPC_USB_SWITCH_DISCONNECT);
2043 return ret;
2044}
2045
2046static void tcpm_typec_disconnect(struct tcpm_port *port)
2047{
2048 if (port->connected) {
2049 typec_unregister_partner(port->partner);
2050 port->partner = NULL;
2051 port->connected = false;
2052 }
2053}
2054
2055static void tcpm_unregister_altmodes(struct tcpm_port *port)
2056{
2057 struct pd_mode_data *modep = &port->mode_data;
2058 int i;
2059
2060 for (i = 0; i < modep->altmodes; i++) {
2061 typec_unregister_altmode(port->partner_altmode[i]);
2062 port->partner_altmode[i] = NULL;
2063 }
2064
2065 memset(modep, 0, sizeof(*modep));
2066}
2067
2068static void tcpm_reset_port(struct tcpm_port *port)
2069{
2070 tcpm_unregister_altmodes(port);
2071 tcpm_typec_disconnect(port);
2072 port->attached = false;
2073 port->pd_capable = false;
2074
5fec4b54
GR
2075 /*
2076 * First Rx ID should be 0; set this to a sentinel of -1 so that
2077 * we can check tcpm_pd_rx_handler() if we had seen it before.
2078 */
2079 port->rx_msgid = -1;
2080
f0690a25
GR
2081 port->tcpc->set_pd_rx(port->tcpc, false);
2082 tcpm_init_vbus(port); /* also disables charging */
2083 tcpm_init_vconn(port);
2084 tcpm_set_current_limit(port, 0, 0);
2085 tcpm_set_polarity(port, TYPEC_POLARITY_CC1);
2086 tcpm_set_attached_state(port, false);
2087 port->try_src_count = 0;
2088 port->try_snk_count = 0;
2089}
2090
2091static void tcpm_detach(struct tcpm_port *port)
2092{
2093 if (!port->attached)
2094 return;
2095
2096 if (tcpm_port_is_disconnected(port))
2097 port->hard_reset_count = 0;
2098
2099 tcpm_reset_port(port);
2100}
2101
2102static void tcpm_src_detach(struct tcpm_port *port)
2103{
2104 tcpm_detach(port);
2105}
2106
2107static int tcpm_snk_attach(struct tcpm_port *port)
2108{
2109 int ret;
2110
2111 if (port->attached)
2112 return 0;
2113
2114 ret = tcpm_set_polarity(port, port->cc2 != TYPEC_CC_OPEN ?
2115 TYPEC_POLARITY_CC2 : TYPEC_POLARITY_CC1);
2116 if (ret < 0)
2117 return ret;
2118
2119 ret = tcpm_set_roles(port, true, TYPEC_SINK, TYPEC_DEVICE);
2120 if (ret < 0)
2121 return ret;
2122
2123 port->pd_capable = false;
2124
2125 port->partner = NULL;
2126
2127 port->attached = true;
2128 port->send_discover = true;
2129
2130 return 0;
2131}
2132
2133static void tcpm_snk_detach(struct tcpm_port *port)
2134{
2135 tcpm_detach(port);
2136
2137 /* XXX: (Dis)connect SuperSpeed mux? */
2138}
2139
2140static int tcpm_acc_attach(struct tcpm_port *port)
2141{
2142 int ret;
2143
2144 if (port->attached)
2145 return 0;
2146
2147 ret = tcpm_set_roles(port, true, TYPEC_SOURCE, TYPEC_HOST);
2148 if (ret < 0)
2149 return ret;
2150
2151 port->partner = NULL;
2152
2153 tcpm_typec_connect(port);
2154
2155 port->attached = true;
2156
2157 return 0;
2158}
2159
2160static void tcpm_acc_detach(struct tcpm_port *port)
2161{
2162 tcpm_detach(port);
2163}
2164
2165static inline enum tcpm_state hard_reset_state(struct tcpm_port *port)
2166{
2167 if (port->hard_reset_count < PD_N_HARD_RESET_COUNT)
2168 return HARD_RESET_SEND;
2169 if (port->pd_capable)
2170 return ERROR_RECOVERY;
2171 if (port->pwr_role == TYPEC_SOURCE)
2172 return SRC_UNATTACHED;
2173 if (port->state == SNK_WAIT_CAPABILITIES)
2174 return SNK_READY;
2175 return SNK_UNATTACHED;
2176}
2177
2178static inline enum tcpm_state ready_state(struct tcpm_port *port)
2179{
2180 if (port->pwr_role == TYPEC_SOURCE)
2181 return SRC_READY;
2182 else
2183 return SNK_READY;
2184}
2185
2186static inline enum tcpm_state unattached_state(struct tcpm_port *port)
2187{
13cb492c
BJS
2188 if (port->port_type == TYPEC_PORT_DRP) {
2189 if (port->pwr_role == TYPEC_SOURCE)
2190 return SRC_UNATTACHED;
2191 else
2192 return SNK_UNATTACHED;
2193 } else if (port->port_type == TYPEC_PORT_DFP) {
f0690a25 2194 return SRC_UNATTACHED;
13cb492c
BJS
2195 }
2196
2197 return SNK_UNATTACHED;
f0690a25
GR
2198}
2199
2200static void tcpm_check_send_discover(struct tcpm_port *port)
2201{
2202 if (port->data_role == TYPEC_HOST && port->send_discover &&
2203 port->pd_capable) {
2204 tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0);
2205 port->send_discover = false;
2206 }
2207}
2208
2209static void tcpm_swap_complete(struct tcpm_port *port, int result)
2210{
2211 if (port->swap_pending) {
2212 port->swap_status = result;
2213 port->swap_pending = false;
b17dd571 2214 port->non_pd_role_swap = false;
f0690a25
GR
2215 complete(&port->swap_complete);
2216 }
2217}
2218
53b70e5c 2219static enum typec_pwr_opmode tcpm_get_pwr_opmode(enum typec_cc_status cc)
fce042f0
BJS
2220{
2221 switch (cc) {
2222 case TYPEC_CC_RP_1_5:
2223 return TYPEC_PWR_MODE_1_5A;
2224 case TYPEC_CC_RP_3_0:
2225 return TYPEC_PWR_MODE_3_0A;
2226 case TYPEC_CC_RP_DEF:
2227 default:
2228 return TYPEC_PWR_MODE_USB;
2229 }
2230}
2231
f0690a25
GR
2232static void run_state_machine(struct tcpm_port *port)
2233{
2234 int ret;
fce042f0 2235 enum typec_pwr_opmode opmode;
131c7d12 2236 unsigned int msecs;
f0690a25
GR
2237
2238 port->enter_state = port->state;
2239 switch (port->state) {
2240 case DRP_TOGGLING:
2241 break;
2242 /* SRC states */
2243 case SRC_UNATTACHED:
b17dd571
GR
2244 if (!port->non_pd_role_swap)
2245 tcpm_swap_complete(port, -ENOTCONN);
f0690a25
GR
2246 tcpm_src_detach(port);
2247 if (tcpm_start_drp_toggling(port)) {
2248 tcpm_set_state(port, DRP_TOGGLING, 0);
2249 break;
2250 }
2251 tcpm_set_cc(port, tcpm_rp_cc(port));
9b0ae699 2252 if (port->port_type == TYPEC_PORT_DRP)
f0690a25
GR
2253 tcpm_set_state(port, SNK_UNATTACHED, PD_T_DRP_SNK);
2254 break;
2255 case SRC_ATTACH_WAIT:
2256 if (tcpm_port_is_debug(port))
2257 tcpm_set_state(port, DEBUG_ACC_ATTACHED,
2258 PD_T_CC_DEBOUNCE);
2259 else if (tcpm_port_is_audio(port))
2260 tcpm_set_state(port, AUDIO_ACC_ATTACHED,
2261 PD_T_CC_DEBOUNCE);
2262 else if (tcpm_port_is_source(port))
2263 tcpm_set_state(port,
2264 tcpm_try_snk(port) ? SNK_TRY
2265 : SRC_ATTACHED,
2266 PD_T_CC_DEBOUNCE);
2267 break;
2268
2269 case SNK_TRY:
2270 port->try_snk_count++;
2271 /*
2272 * Requirements:
2273 * - Do not drive vconn or vbus
2274 * - Terminate CC pins (both) to Rd
2275 * Action:
2276 * - Wait for tDRPTry (PD_T_DRP_TRY).
2277 * Until then, ignore any state changes.
2278 */
2279 tcpm_set_cc(port, TYPEC_CC_RD);
2280 tcpm_set_state(port, SNK_TRY_WAIT, PD_T_DRP_TRY);
2281 break;
2282 case SNK_TRY_WAIT:
a0a3e04e
BJS
2283 if (tcpm_port_is_sink(port)) {
2284 tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE, 0);
2285 } else {
2286 tcpm_set_state(port, SRC_TRYWAIT, 0);
2287 port->max_wait = 0;
2288 }
2289 break;
2290 case SNK_TRY_WAIT_DEBOUNCE:
2291 tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS,
2292 PD_T_PD_DEBOUNCE);
2293 break;
2294 case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS:
f0690a25
GR
2295 if (port->vbus_present && tcpm_port_is_sink(port)) {
2296 tcpm_set_state(port, SNK_ATTACHED, 0);
a0a3e04e
BJS
2297 } else {
2298 tcpm_set_state(port, SRC_TRYWAIT, 0);
02d5be46 2299 port->max_wait = 0;
f0690a25 2300 }
f0690a25
GR
2301 break;
2302 case SRC_TRYWAIT:
2303 tcpm_set_cc(port, tcpm_rp_cc(port));
02d5be46
BJS
2304 if (port->max_wait == 0) {
2305 port->max_wait = jiffies +
2306 msecs_to_jiffies(PD_T_DRP_TRY);
f0690a25
GR
2307 tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED,
2308 PD_T_DRP_TRY);
02d5be46
BJS
2309 } else {
2310 if (time_is_after_jiffies(port->max_wait))
2311 tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED,
2312 jiffies_to_msecs(port->max_wait -
2313 jiffies));
2314 else
2315 tcpm_set_state(port, SNK_UNATTACHED, 0);
2316 }
2317 break;
2318 case SRC_TRYWAIT_DEBOUNCE:
2319 tcpm_set_state(port, SRC_ATTACHED, PD_T_CC_DEBOUNCE);
f0690a25
GR
2320 break;
2321 case SRC_TRYWAIT_UNATTACHED:
2322 tcpm_set_state(port, SNK_UNATTACHED, 0);
2323 break;
2324
2325 case SRC_ATTACHED:
2326 ret = tcpm_src_attach(port);
2327 tcpm_set_state(port, SRC_UNATTACHED,
2328 ret < 0 ? 0 : PD_T_PS_SOURCE_ON);
2329 break;
2330 case SRC_STARTUP:
fce042f0
BJS
2331 opmode = tcpm_get_pwr_opmode(tcpm_rp_cc(port));
2332 typec_set_pwr_opmode(port->typec_port, opmode);
f0690a25
GR
2333 port->pwr_opmode = TYPEC_PWR_MODE_USB;
2334 port->caps_count = 0;
2335 port->message_id = 0;
5fec4b54 2336 port->rx_msgid = -1;
f0690a25
GR
2337 port->explicit_contract = false;
2338 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
2339 break;
2340 case SRC_SEND_CAPABILITIES:
2341 port->caps_count++;
2342 if (port->caps_count > PD_N_CAPS_COUNT) {
2343 tcpm_set_state(port, SRC_READY, 0);
2344 break;
2345 }
2346 ret = tcpm_pd_send_source_caps(port);
2347 if (ret < 0) {
2348 tcpm_set_state(port, SRC_SEND_CAPABILITIES,
2349 PD_T_SEND_SOURCE_CAP);
2350 } else {
2351 /*
2352 * Per standard, we should clear the reset counter here.
2353 * However, that can result in state machine hang-ups.
2354 * Reset it only in READY state to improve stability.
2355 */
2356 /* port->hard_reset_count = 0; */
2357 port->caps_count = 0;
2358 port->pd_capable = true;
2359 tcpm_set_state_cond(port, hard_reset_state(port),
2360 PD_T_SEND_SOURCE_CAP);
2361 }
2362 break;
2363 case SRC_NEGOTIATE_CAPABILITIES:
2364 ret = tcpm_pd_check_request(port);
2365 if (ret < 0) {
2366 tcpm_pd_send_control(port, PD_CTRL_REJECT);
2367 if (!port->explicit_contract) {
2368 tcpm_set_state(port,
2369 SRC_WAIT_NEW_CAPABILITIES, 0);
2370 } else {
2371 tcpm_set_state(port, SRC_READY, 0);
2372 }
2373 } else {
2374 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
2375 tcpm_set_state(port, SRC_TRANSITION_SUPPLY,
2376 PD_T_SRC_TRANSITION);
2377 }
2378 break;
2379 case SRC_TRANSITION_SUPPLY:
2380 /* XXX: regulator_set_voltage(vbus, ...) */
2381 tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
2382 port->explicit_contract = true;
2383 typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_PD);
2384 port->pwr_opmode = TYPEC_PWR_MODE_PD;
2385 tcpm_set_state_cond(port, SRC_READY, 0);
2386 break;
2387 case SRC_READY:
2388#if 1
2389 port->hard_reset_count = 0;
2390#endif
2391 port->try_src_count = 0;
2392
3113bf1a 2393 tcpm_swap_complete(port, 0);
f0690a25 2394 tcpm_typec_connect(port);
f0690a25
GR
2395 tcpm_check_send_discover(port);
2396 /*
2397 * 6.3.5
2398 * Sending ping messages is not necessary if
2399 * - the source operates at vSafe5V
2400 * or
2401 * - The system is not operating in PD mode
2402 * or
2403 * - Both partners are connected using a Type-C connector
f451ac9e
BJS
2404 *
2405 * There is no actual need to send PD messages since the local
2406 * port type-c and the spec does not clearly say whether PD is
2407 * possible when type-c is connected to Type-A/B
f0690a25 2408 */
f0690a25
GR
2409 break;
2410 case SRC_WAIT_NEW_CAPABILITIES:
2411 /* Nothing to do... */
2412 break;
2413
2414 /* SNK states */
2415 case SNK_UNATTACHED:
b17dd571
GR
2416 if (!port->non_pd_role_swap)
2417 tcpm_swap_complete(port, -ENOTCONN);
f0690a25
GR
2418 tcpm_snk_detach(port);
2419 if (tcpm_start_drp_toggling(port)) {
2420 tcpm_set_state(port, DRP_TOGGLING, 0);
2421 break;
2422 }
2423 tcpm_set_cc(port, TYPEC_CC_RD);
9b0ae699 2424 if (port->port_type == TYPEC_PORT_DRP)
f0690a25
GR
2425 tcpm_set_state(port, SRC_UNATTACHED, PD_T_DRP_SRC);
2426 break;
2427 case SNK_ATTACH_WAIT:
2428 if ((port->cc1 == TYPEC_CC_OPEN &&
2429 port->cc2 != TYPEC_CC_OPEN) ||
2430 (port->cc1 != TYPEC_CC_OPEN &&
2431 port->cc2 == TYPEC_CC_OPEN))
2432 tcpm_set_state(port, SNK_DEBOUNCED,
2433 PD_T_CC_DEBOUNCE);
2434 else if (tcpm_port_is_disconnected(port))
2435 tcpm_set_state(port, SNK_UNATTACHED,
2436 PD_T_PD_DEBOUNCE);
2437 break;
2438 case SNK_DEBOUNCED:
2439 if (tcpm_port_is_disconnected(port))
2440 tcpm_set_state(port, SNK_UNATTACHED,
2441 PD_T_PD_DEBOUNCE);
2442 else if (port->vbus_present)
2443 tcpm_set_state(port,
2444 tcpm_try_src(port) ? SRC_TRY
2445 : SNK_ATTACHED,
2446 0);
2447 else
2448 /* Wait for VBUS, but not forever */
56277035 2449 tcpm_set_state(port, PORT_RESET, PD_T_PS_SOURCE_ON);
f0690a25
GR
2450 break;
2451
2452 case SRC_TRY:
2453 port->try_src_count++;
2454 tcpm_set_cc(port, tcpm_rp_cc(port));
131c7d12
BJS
2455 port->max_wait = 0;
2456 tcpm_set_state(port, SRC_TRY_WAIT, 0);
2457 break;
2458 case SRC_TRY_WAIT:
2459 if (port->max_wait == 0) {
2460 port->max_wait = jiffies +
2461 msecs_to_jiffies(PD_T_DRP_TRY);
2462 msecs = PD_T_DRP_TRY;
2463 } else {
2464 if (time_is_after_jiffies(port->max_wait))
2465 msecs = jiffies_to_msecs(port->max_wait -
2466 jiffies);
2467 else
2468 msecs = 0;
2469 }
2470 tcpm_set_state(port, SNK_TRYWAIT, msecs);
f0690a25
GR
2471 break;
2472 case SRC_TRY_DEBOUNCE:
2473 tcpm_set_state(port, SRC_ATTACHED, PD_T_PD_DEBOUNCE);
2474 break;
2475 case SNK_TRYWAIT:
2476 tcpm_set_cc(port, TYPEC_CC_RD);
af450ebb 2477 tcpm_set_state(port, SNK_TRYWAIT_VBUS, PD_T_CC_DEBOUNCE);
f0690a25 2478 break;
af450ebb
BJS
2479 case SNK_TRYWAIT_VBUS:
2480 /*
2481 * TCPM stays in this state indefinitely until VBUS
2482 * is detected as long as Rp is not detected for
2483 * more than a time period of tPDDebounce.
2484 */
2485 if (port->vbus_present && tcpm_port_is_sink(port)) {
f0690a25
GR
2486 tcpm_set_state(port, SNK_ATTACHED, 0);
2487 break;
2488 }
af450ebb
BJS
2489 if (!tcpm_port_is_sink(port))
2490 tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, 0);
f0690a25 2491 break;
af450ebb
BJS
2492 case SNK_TRYWAIT_DEBOUNCE:
2493 tcpm_set_state(port, SNK_UNATTACHED, PD_T_PD_DEBOUNCE);
f0690a25 2494 break;
f0690a25
GR
2495 case SNK_ATTACHED:
2496 ret = tcpm_snk_attach(port);
2497 if (ret < 0)
2498 tcpm_set_state(port, SNK_UNATTACHED, 0);
2499 else
2500 tcpm_set_state(port, SNK_STARTUP, 0);
2501 break;
2502 case SNK_STARTUP:
fce042f0
BJS
2503 opmode = tcpm_get_pwr_opmode(port->polarity ?
2504 port->cc2 : port->cc1);
2505 typec_set_pwr_opmode(port->typec_port, opmode);
f0690a25
GR
2506 port->pwr_opmode = TYPEC_PWR_MODE_USB;
2507 port->message_id = 0;
5fec4b54 2508 port->rx_msgid = -1;
f0690a25
GR
2509 port->explicit_contract = false;
2510 tcpm_set_state(port, SNK_DISCOVERY, 0);
2511 break;
2512 case SNK_DISCOVERY:
2513 if (port->vbus_present) {
2514 tcpm_set_current_limit(port,
2515 tcpm_get_current_limit(port),
2516 5000);
2517 tcpm_set_charge(port, true);
2518 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
2519 break;
2520 }
2521 /*
2522 * For DRP, timeouts differ. Also, handling is supposed to be
2523 * different and much more complex (dead battery detection;
2524 * see USB power delivery specification, section 8.3.3.6.1.5.1).
2525 */
2526 tcpm_set_state(port, hard_reset_state(port),
9b0ae699 2527 port->port_type == TYPEC_PORT_DRP ?
f0690a25
GR
2528 PD_T_DB_DETECT : PD_T_NO_RESPONSE);
2529 break;
2530 case SNK_DISCOVERY_DEBOUNCE:
2531 tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE_DONE,
2532 PD_T_CC_DEBOUNCE);
2533 break;
2534 case SNK_DISCOVERY_DEBOUNCE_DONE:
2535 if (!tcpm_port_is_disconnected(port) &&
2536 tcpm_port_is_sink(port) &&
2537 time_is_after_jiffies(port->delayed_runtime)) {
2538 tcpm_set_state(port, SNK_DISCOVERY,
2539 port->delayed_runtime - jiffies);
2540 break;
2541 }
2542 tcpm_set_state(port, unattached_state(port), 0);
2543 break;
2544 case SNK_WAIT_CAPABILITIES:
2545 ret = port->tcpc->set_pd_rx(port->tcpc, true);
2546 if (ret < 0) {
2547 tcpm_set_state(port, SNK_READY, 0);
2548 break;
2549 }
2550 /*
2551 * If VBUS has never been low, and we time out waiting
2552 * for source cap, try a soft reset first, in case we
2553 * were already in a stable contract before this boot.
2554 * Do this only once.
2555 */
2556 if (port->vbus_never_low) {
2557 port->vbus_never_low = false;
2558 tcpm_set_state(port, SOFT_RESET_SEND,
2559 PD_T_SINK_WAIT_CAP);
2560 } else {
2561 tcpm_set_state(port, hard_reset_state(port),
2562 PD_T_SINK_WAIT_CAP);
2563 }
2564 break;
2565 case SNK_NEGOTIATE_CAPABILITIES:
2566 port->pd_capable = true;
2567 port->hard_reset_count = 0;
2568 ret = tcpm_pd_send_request(port);
2569 if (ret < 0) {
2570 /* Let the Source send capabilities again. */
2571 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
2572 } else {
2573 tcpm_set_state_cond(port, hard_reset_state(port),
2574 PD_T_SENDER_RESPONSE);
2575 }
2576 break;
2577 case SNK_TRANSITION_SINK:
2578 case SNK_TRANSITION_SINK_VBUS:
2579 tcpm_set_state(port, hard_reset_state(port),
2580 PD_T_PS_TRANSITION);
2581 break;
2582 case SNK_READY:
2583 port->try_snk_count = 0;
8bf05746
BJS
2584 if (port->explicit_contract) {
2585 typec_set_pwr_opmode(port->typec_port,
2586 TYPEC_PWR_MODE_PD);
2587 port->pwr_opmode = TYPEC_PWR_MODE_PD;
2588 }
f0690a25 2589
3113bf1a 2590 tcpm_swap_complete(port, 0);
f0690a25 2591 tcpm_typec_connect(port);
f0690a25
GR
2592 tcpm_check_send_discover(port);
2593 break;
2594
2595 /* Accessory states */
2596 case ACC_UNATTACHED:
2597 tcpm_acc_detach(port);
2598 tcpm_set_state(port, SRC_UNATTACHED, 0);
2599 break;
2600 case DEBUG_ACC_ATTACHED:
2601 case AUDIO_ACC_ATTACHED:
2602 ret = tcpm_acc_attach(port);
2603 if (ret < 0)
2604 tcpm_set_state(port, ACC_UNATTACHED, 0);
2605 break;
2606 case AUDIO_ACC_DEBOUNCE:
2607 tcpm_set_state(port, ACC_UNATTACHED, PD_T_CC_DEBOUNCE);
2608 break;
2609
2610 /* Hard_Reset states */
2611 case HARD_RESET_SEND:
2612 tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL);
2613 tcpm_set_state(port, HARD_RESET_START, 0);
2614 break;
2615 case HARD_RESET_START:
2616 port->hard_reset_count++;
2617 port->tcpc->set_pd_rx(port->tcpc, false);
2618 tcpm_unregister_altmodes(port);
2619 port->send_discover = true;
2620 if (port->pwr_role == TYPEC_SOURCE)
2621 tcpm_set_state(port, SRC_HARD_RESET_VBUS_OFF,
2622 PD_T_PS_HARD_RESET);
2623 else
2624 tcpm_set_state(port, SNK_HARD_RESET_SINK_OFF, 0);
2625 break;
2626 case SRC_HARD_RESET_VBUS_OFF:
2627 tcpm_set_vconn(port, true);
2628 tcpm_set_vbus(port, false);
2629 tcpm_set_roles(port, false, TYPEC_SOURCE, TYPEC_HOST);
2630 tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SRC_RECOVER);
2631 break;
2632 case SRC_HARD_RESET_VBUS_ON:
2633 tcpm_set_vbus(port, true);
2634 port->tcpc->set_pd_rx(port->tcpc, true);
2635 tcpm_set_attached_state(port, true);
2636 tcpm_set_state(port, SRC_UNATTACHED, PD_T_PS_SOURCE_ON);
2637 break;
2638 case SNK_HARD_RESET_SINK_OFF:
2639 tcpm_set_vconn(port, false);
2640 tcpm_set_charge(port, false);
2641 tcpm_set_roles(port, false, TYPEC_SINK, TYPEC_DEVICE);
2642 /*
2643 * VBUS may or may not toggle, depending on the adapter.
2644 * If it doesn't toggle, transition to SNK_HARD_RESET_SINK_ON
2645 * directly after timeout.
2646 */
2647 tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, PD_T_SAFE_0V);
2648 break;
2649 case SNK_HARD_RESET_WAIT_VBUS:
2650 /* Assume we're disconnected if VBUS doesn't come back. */
2651 tcpm_set_state(port, SNK_UNATTACHED,
2652 PD_T_SRC_RECOVER_MAX + PD_T_SRC_TURN_ON);
2653 break;
2654 case SNK_HARD_RESET_SINK_ON:
2655 /* Note: There is no guarantee that VBUS is on in this state */
2656 /*
2657 * XXX:
2658 * The specification suggests that dual mode ports in sink
2659 * mode should transition to state PE_SRC_Transition_to_default.
2660 * See USB power delivery specification chapter 8.3.3.6.1.3.
2661 * This would mean to to
2662 * - turn off VCONN, reset power supply
2663 * - request hardware reset
2664 * - turn on VCONN
2665 * - Transition to state PE_Src_Startup
2666 * SNK only ports shall transition to state Snk_Startup
2667 * (see chapter 8.3.3.3.8).
2668 * Similar, dual-mode ports in source mode should transition
2669 * to PE_SNK_Transition_to_default.
2670 */
2671 tcpm_set_attached_state(port, true);
2672 tcpm_set_state(port, SNK_STARTUP, 0);
2673 break;
2674
2675 /* Soft_Reset states */
2676 case SOFT_RESET:
2677 port->message_id = 0;
5fec4b54 2678 port->rx_msgid = -1;
f0690a25
GR
2679 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
2680 if (port->pwr_role == TYPEC_SOURCE)
2681 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
2682 else
2683 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
2684 break;
2685 case SOFT_RESET_SEND:
2686 port->message_id = 0;
5fec4b54 2687 port->rx_msgid = -1;
f0690a25
GR
2688 if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET))
2689 tcpm_set_state_cond(port, hard_reset_state(port), 0);
2690 else
2691 tcpm_set_state_cond(port, hard_reset_state(port),
2692 PD_T_SENDER_RESPONSE);
2693 break;
2694
2695 /* DR_Swap states */
2696 case DR_SWAP_SEND:
2697 tcpm_pd_send_control(port, PD_CTRL_DR_SWAP);
2698 tcpm_set_state_cond(port, DR_SWAP_SEND_TIMEOUT,
2699 PD_T_SENDER_RESPONSE);
2700 break;
2701 case DR_SWAP_ACCEPT:
2702 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
2703 tcpm_set_state_cond(port, DR_SWAP_CHANGE_DR, 0);
2704 break;
2705 case DR_SWAP_SEND_TIMEOUT:
2706 tcpm_swap_complete(port, -ETIMEDOUT);
2707 tcpm_set_state(port, ready_state(port), 0);
2708 break;
2709 case DR_SWAP_CHANGE_DR:
2710 if (port->data_role == TYPEC_HOST) {
2711 tcpm_unregister_altmodes(port);
2712 tcpm_set_roles(port, true, port->pwr_role,
2713 TYPEC_DEVICE);
2714 } else {
2715 tcpm_set_roles(port, true, port->pwr_role,
2716 TYPEC_HOST);
2717 port->send_discover = true;
2718 }
f0690a25
GR
2719 tcpm_set_state(port, ready_state(port), 0);
2720 break;
2721
2722 /* PR_Swap states */
2723 case PR_SWAP_ACCEPT:
2724 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
2725 tcpm_set_state(port, PR_SWAP_START, 0);
2726 break;
2727 case PR_SWAP_SEND:
2728 tcpm_pd_send_control(port, PD_CTRL_PR_SWAP);
2729 tcpm_set_state_cond(port, PR_SWAP_SEND_TIMEOUT,
2730 PD_T_SENDER_RESPONSE);
2731 break;
2732 case PR_SWAP_SEND_TIMEOUT:
2733 tcpm_swap_complete(port, -ETIMEDOUT);
2734 tcpm_set_state(port, ready_state(port), 0);
2735 break;
2736 case PR_SWAP_START:
2737 if (port->pwr_role == TYPEC_SOURCE)
2738 tcpm_set_state(port, PR_SWAP_SRC_SNK_TRANSITION_OFF,
2739 PD_T_SRC_TRANSITION);
2740 else
2741 tcpm_set_state(port, PR_SWAP_SNK_SRC_SINK_OFF, 0);
2742 break;
2743 case PR_SWAP_SRC_SNK_TRANSITION_OFF:
2744 tcpm_set_vbus(port, false);
2745 port->explicit_contract = false;
b965b631 2746 /* allow time for Vbus discharge, must be < tSrcSwapStdby */
f0690a25 2747 tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF,
b965b631 2748 PD_T_SRCSWAPSTDBY);
f0690a25
GR
2749 break;
2750 case PR_SWAP_SRC_SNK_SOURCE_OFF:
2751 tcpm_set_cc(port, TYPEC_CC_RD);
b965b631
BJS
2752 /* allow CC debounce */
2753 tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED,
2754 PD_T_CC_DEBOUNCE);
2755 break;
2756 case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
050161ea
GR
2757 /*
2758 * USB-PD standard, 6.2.1.4, Port Power Role:
2759 * "During the Power Role Swap Sequence, for the initial Source
2760 * Port, the Port Power Role field shall be set to Sink in the
2761 * PS_RDY Message indicating that the initial Source’s power
2762 * supply is turned off"
2763 */
2764 tcpm_set_pwr_role(port, TYPEC_SINK);
f0690a25
GR
2765 if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY)) {
2766 tcpm_set_state(port, ERROR_RECOVERY, 0);
2767 break;
2768 }
2769 tcpm_set_state_cond(port, SNK_UNATTACHED, PD_T_PS_SOURCE_ON);
2770 break;
2771 case PR_SWAP_SRC_SNK_SINK_ON:
f0690a25
GR
2772 tcpm_set_state(port, SNK_STARTUP, 0);
2773 break;
2774 case PR_SWAP_SNK_SRC_SINK_OFF:
2775 tcpm_set_charge(port, false);
2776 tcpm_set_state(port, hard_reset_state(port),
2777 PD_T_PS_SOURCE_OFF);
2778 break;
2779 case PR_SWAP_SNK_SRC_SOURCE_ON:
2780 tcpm_set_cc(port, tcpm_rp_cc(port));
2781 tcpm_set_vbus(port, true);
b965b631
BJS
2782 /*
2783 * allow time VBUS ramp-up, must be < tNewSrc
2784 * Also, this window overlaps with CC debounce as well.
2785 * So, Wait for the max of two which is PD_T_NEWSRC
2786 */
2787 tcpm_set_state(port, PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP,
2788 PD_T_NEWSRC);
2789 break;
2790 case PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP:
050161ea
GR
2791 /*
2792 * USB PD standard, 6.2.1.4:
2793 * "Subsequent Messages initiated by the Policy Engine,
2794 * such as the PS_RDY Message sent to indicate that Vbus
2795 * is ready, will have the Port Power Role field set to
2796 * Source."
2797 */
f0690a25 2798 tcpm_set_pwr_role(port, TYPEC_SOURCE);
050161ea 2799 tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
f0690a25
GR
2800 tcpm_set_state(port, SRC_STARTUP, 0);
2801 break;
2802
2803 case VCONN_SWAP_ACCEPT:
2804 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
2805 tcpm_set_state(port, VCONN_SWAP_START, 0);
2806 break;
2807 case VCONN_SWAP_SEND:
2808 tcpm_pd_send_control(port, PD_CTRL_VCONN_SWAP);
2809 tcpm_set_state(port, VCONN_SWAP_SEND_TIMEOUT,
2810 PD_T_SENDER_RESPONSE);
2811 break;
2812 case VCONN_SWAP_SEND_TIMEOUT:
2813 tcpm_swap_complete(port, -ETIMEDOUT);
2814 tcpm_set_state(port, ready_state(port), 0);
2815 break;
2816 case VCONN_SWAP_START:
2817 if (port->vconn_role == TYPEC_SOURCE)
2818 tcpm_set_state(port, VCONN_SWAP_WAIT_FOR_VCONN, 0);
2819 else
2820 tcpm_set_state(port, VCONN_SWAP_TURN_ON_VCONN, 0);
2821 break;
2822 case VCONN_SWAP_WAIT_FOR_VCONN:
2823 tcpm_set_state(port, hard_reset_state(port),
2824 PD_T_VCONN_SOURCE_ON);
2825 break;
2826 case VCONN_SWAP_TURN_ON_VCONN:
2827 tcpm_set_vconn(port, true);
2828 tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
f0690a25
GR
2829 tcpm_set_state(port, ready_state(port), 0);
2830 break;
2831 case VCONN_SWAP_TURN_OFF_VCONN:
2832 tcpm_set_vconn(port, false);
f0690a25
GR
2833 tcpm_set_state(port, ready_state(port), 0);
2834 break;
2835
2836 case DR_SWAP_CANCEL:
2837 case PR_SWAP_CANCEL:
2838 case VCONN_SWAP_CANCEL:
2839 tcpm_swap_complete(port, port->swap_status);
2840 if (port->pwr_role == TYPEC_SOURCE)
2841 tcpm_set_state(port, SRC_READY, 0);
2842 else
2843 tcpm_set_state(port, SNK_READY, 0);
2844 break;
2845
2846 case BIST_RX:
2847 switch (BDO_MODE_MASK(port->bist_request)) {
2848 case BDO_MODE_CARRIER2:
2849 tcpm_pd_transmit(port, TCPC_TX_BIST_MODE_2, NULL);
2850 break;
2851 default:
2852 break;
2853 }
2854 /* Always switch to unattached state */
2855 tcpm_set_state(port, unattached_state(port), 0);
2856 break;
2857 case ERROR_RECOVERY:
2858 tcpm_swap_complete(port, -EPROTO);
b17dd571
GR
2859 tcpm_set_state(port, PORT_RESET, 0);
2860 break;
2861 case PORT_RESET:
f0690a25 2862 tcpm_reset_port(port);
f0690a25 2863 tcpm_set_cc(port, TYPEC_CC_OPEN);
b17dd571 2864 tcpm_set_state(port, PORT_RESET_WAIT_OFF,
f0690a25
GR
2865 PD_T_ERROR_RECOVERY);
2866 break;
b17dd571 2867 case PORT_RESET_WAIT_OFF:
f0690a25
GR
2868 tcpm_set_state(port,
2869 tcpm_default_state(port),
2870 port->vbus_present ? PD_T_PS_SOURCE_OFF : 0);
2871 break;
2872 default:
2873 WARN(1, "Unexpected port state %d\n", port->state);
2874 break;
2875 }
2876}
2877
2878static void tcpm_state_machine_work(struct work_struct *work)
2879{
2880 struct tcpm_port *port = container_of(work, struct tcpm_port,
2881 state_machine.work);
2882 enum tcpm_state prev_state;
2883
2884 mutex_lock(&port->lock);
2885 port->state_machine_running = true;
2886
2887 if (port->queued_message && tcpm_send_queued_message(port))
2888 goto done;
2889
2890 /* If we were queued due to a delayed state change, update it now */
2891 if (port->delayed_state) {
2892 tcpm_log(port, "state change %s -> %s [delayed %ld ms]",
2893 tcpm_states[port->state],
2894 tcpm_states[port->delayed_state], port->delay_ms);
2895 port->prev_state = port->state;
2896 port->state = port->delayed_state;
2897 port->delayed_state = INVALID_STATE;
2898 }
2899
2900 /*
2901 * Continue running as long as we have (non-delayed) state changes
2902 * to make.
2903 */
2904 do {
2905 prev_state = port->state;
2906 run_state_machine(port);
2907 if (port->queued_message)
2908 tcpm_send_queued_message(port);
2909 } while (port->state != prev_state && !port->delayed_state);
2910
2911done:
2912 port->state_machine_running = false;
2913 mutex_unlock(&port->lock);
2914}
2915
2916static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
2917 enum typec_cc_status cc2)
2918{
2919 enum typec_cc_status old_cc1, old_cc2;
2920 enum tcpm_state new_state;
2921
2922 old_cc1 = port->cc1;
2923 old_cc2 = port->cc2;
2924 port->cc1 = cc1;
2925 port->cc2 = cc2;
2926
2927 tcpm_log_force(port,
2928 "CC1: %u -> %u, CC2: %u -> %u [state %s, polarity %d, %s]",
2929 old_cc1, cc1, old_cc2, cc2, tcpm_states[port->state],
2930 port->polarity,
2931 tcpm_port_is_disconnected(port) ? "disconnected"
2932 : "connected");
2933
2934 switch (port->state) {
2935 case DRP_TOGGLING:
2936 if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
2937 tcpm_port_is_source(port))
2938 tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
2939 else if (tcpm_port_is_sink(port))
2940 tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
2941 break;
2942 case SRC_UNATTACHED:
2943 case ACC_UNATTACHED:
2944 if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
2945 tcpm_port_is_source(port))
2946 tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
2947 break;
2948 case SRC_ATTACH_WAIT:
2949 if (tcpm_port_is_disconnected(port) ||
2950 tcpm_port_is_audio_detached(port))
2951 tcpm_set_state(port, SRC_UNATTACHED, 0);
2952 else if (cc1 != old_cc1 || cc2 != old_cc2)
2953 tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
2954 break;
2955 case SRC_ATTACHED:
f3b73364
BJS
2956 case SRC_SEND_CAPABILITIES:
2957 case SRC_READY:
2958 if (tcpm_port_is_disconnected(port) ||
2959 !tcpm_port_is_source(port))
f0690a25
GR
2960 tcpm_set_state(port, SRC_UNATTACHED, 0);
2961 break;
f0690a25
GR
2962 case SNK_UNATTACHED:
2963 if (tcpm_port_is_sink(port))
2964 tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
2965 break;
2966 case SNK_ATTACH_WAIT:
2967 if ((port->cc1 == TYPEC_CC_OPEN &&
2968 port->cc2 != TYPEC_CC_OPEN) ||
2969 (port->cc1 != TYPEC_CC_OPEN &&
2970 port->cc2 == TYPEC_CC_OPEN))
2971 new_state = SNK_DEBOUNCED;
2972 else if (tcpm_port_is_disconnected(port))
2973 new_state = SNK_UNATTACHED;
2974 else
2975 break;
2976 if (new_state != port->delayed_state)
2977 tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
2978 break;
2979 case SNK_DEBOUNCED:
2980 if (tcpm_port_is_disconnected(port))
2981 new_state = SNK_UNATTACHED;
2982 else if (port->vbus_present)
2983 new_state = tcpm_try_src(port) ? SRC_TRY : SNK_ATTACHED;
2984 else
2985 new_state = SNK_UNATTACHED;
2986 if (new_state != port->delayed_state)
2987 tcpm_set_state(port, SNK_DEBOUNCED, 0);
2988 break;
2989 case SNK_READY:
2990 if (tcpm_port_is_disconnected(port))
2991 tcpm_set_state(port, unattached_state(port), 0);
2992 else if (!port->pd_capable &&
2993 (cc1 != old_cc1 || cc2 != old_cc2))
2994 tcpm_set_current_limit(port,
2995 tcpm_get_current_limit(port),
2996 5000);
2997 break;
2998
2999 case AUDIO_ACC_ATTACHED:
3000 if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN)
3001 tcpm_set_state(port, AUDIO_ACC_DEBOUNCE, 0);
3002 break;
3003 case AUDIO_ACC_DEBOUNCE:
3004 if (tcpm_port_is_audio(port))
3005 tcpm_set_state(port, AUDIO_ACC_ATTACHED, 0);
3006 break;
3007
3008 case DEBUG_ACC_ATTACHED:
3009 if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN)
3010 tcpm_set_state(port, ACC_UNATTACHED, 0);
3011 break;
3012
3013 case SNK_TRY:
3014 /* Do nothing, waiting for timeout */
3015 break;
3016
3017 case SNK_DISCOVERY:
3018 /* CC line is unstable, wait for debounce */
3019 if (tcpm_port_is_disconnected(port))
3020 tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE, 0);
3021 break;
3022 case SNK_DISCOVERY_DEBOUNCE:
3023 break;
3024
3025 case SRC_TRYWAIT:
3026 /* Hand over to state machine if needed */
3027 if (!port->vbus_present && tcpm_port_is_source(port))
02d5be46
BJS
3028 tcpm_set_state(port, SRC_TRYWAIT_DEBOUNCE, 0);
3029 break;
3030 case SRC_TRYWAIT_DEBOUNCE:
3031 if (port->vbus_present || !tcpm_port_is_source(port))
f0690a25
GR
3032 tcpm_set_state(port, SRC_TRYWAIT, 0);
3033 break;
a0a3e04e
BJS
3034 case SNK_TRY_WAIT_DEBOUNCE:
3035 if (!tcpm_port_is_sink(port)) {
3036 port->max_wait = 0;
3037 tcpm_set_state(port, SRC_TRYWAIT, 0);
f0690a25 3038 }
f0690a25 3039 break;
131c7d12 3040 case SRC_TRY_WAIT:
c79d92bd
BJS
3041 if (tcpm_port_is_source(port))
3042 tcpm_set_state(port, SRC_TRY_DEBOUNCE, 0);
f0690a25
GR
3043 break;
3044 case SRC_TRY_DEBOUNCE:
131c7d12 3045 tcpm_set_state(port, SRC_TRY_WAIT, 0);
f0690a25
GR
3046 break;
3047 case SNK_TRYWAIT_DEBOUNCE:
af450ebb
BJS
3048 if (tcpm_port_is_sink(port))
3049 tcpm_set_state(port, SNK_TRYWAIT_VBUS, 0);
3050 break;
3051 case SNK_TRYWAIT_VBUS:
3052 if (!tcpm_port_is_sink(port))
f0690a25
GR
3053 tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, 0);
3054 break;
af450ebb
BJS
3055 case SNK_TRYWAIT:
3056 /* Do nothing, waiting for tCCDebounce */
3057 break;
f0690a25
GR
3058 case PR_SWAP_SNK_SRC_SINK_OFF:
3059 case PR_SWAP_SRC_SNK_TRANSITION_OFF:
3060 case PR_SWAP_SRC_SNK_SOURCE_OFF:
b965b631
BJS
3061 case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
3062 case PR_SWAP_SNK_SRC_SOURCE_ON:
f0690a25 3063 /*
b965b631 3064 * CC state change is expected in PR_SWAP
f0690a25
GR
3065 * Ignore it.
3066 */
3067 break;
3068
3069 default:
3070 if (tcpm_port_is_disconnected(port))
3071 tcpm_set_state(port, unattached_state(port), 0);
3072 break;
3073 }
3074}
3075
3076static void _tcpm_pd_vbus_on(struct tcpm_port *port)
3077{
f0690a25
GR
3078 tcpm_log_force(port, "VBUS on");
3079 port->vbus_present = true;
3080 switch (port->state) {
3081 case SNK_TRANSITION_SINK_VBUS:
8bf05746 3082 port->explicit_contract = true;
f0690a25
GR
3083 tcpm_set_state(port, SNK_READY, 0);
3084 break;
3085 case SNK_DISCOVERY:
3086 tcpm_set_state(port, SNK_DISCOVERY, 0);
3087 break;
3088
3089 case SNK_DEBOUNCED:
3090 tcpm_set_state(port, tcpm_try_src(port) ? SRC_TRY
3091 : SNK_ATTACHED,
3092 0);
3093 break;
3094 case SNK_HARD_RESET_WAIT_VBUS:
3095 tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, 0);
3096 break;
3097 case SRC_ATTACHED:
3098 tcpm_set_state(port, SRC_STARTUP, 0);
3099 break;
3100 case SRC_HARD_RESET_VBUS_ON:
3101 tcpm_set_state(port, SRC_STARTUP, 0);
3102 break;
3103
3104 case SNK_TRY:
3105 /* Do nothing, waiting for timeout */
3106 break;
3107 case SRC_TRYWAIT:
02d5be46
BJS
3108 /* Do nothing, Waiting for Rd to be detected */
3109 break;
3110 case SRC_TRYWAIT_DEBOUNCE:
3111 tcpm_set_state(port, SRC_TRYWAIT, 0);
f0690a25 3112 break;
a0a3e04e
BJS
3113 case SNK_TRY_WAIT_DEBOUNCE:
3114 /* Do nothing, waiting for PD_DEBOUNCE to do be done */
f0690a25
GR
3115 break;
3116 case SNK_TRYWAIT:
af450ebb
BJS
3117 /* Do nothing, waiting for tCCDebounce */
3118 break;
3119 case SNK_TRYWAIT_VBUS:
3120 if (tcpm_port_is_sink(port))
3121 tcpm_set_state(port, SNK_ATTACHED, 0);
3122 break;
3123 case SNK_TRYWAIT_DEBOUNCE:
3124 /* Do nothing, waiting for Rp */
f0690a25 3125 break;
131c7d12
BJS
3126 case SRC_TRY_WAIT:
3127 case SRC_TRY_DEBOUNCE:
3128 /* Do nothing, waiting for sink detection */
3129 break;
f0690a25
GR
3130 default:
3131 break;
3132 }
3133}
3134
3135static void _tcpm_pd_vbus_off(struct tcpm_port *port)
3136{
f0690a25
GR
3137 tcpm_log_force(port, "VBUS off");
3138 port->vbus_present = false;
3139 port->vbus_never_low = false;
3140 switch (port->state) {
3141 case SNK_HARD_RESET_SINK_OFF:
3142 tcpm_set_state(port, SNK_HARD_RESET_WAIT_VBUS, 0);
3143 break;
3144 case SRC_HARD_RESET_VBUS_OFF:
3145 tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, 0);
3146 break;
3147 case HARD_RESET_SEND:
3148 break;
3149
3150 case SNK_TRY:
3151 /* Do nothing, waiting for timeout */
3152 break;
3153 case SRC_TRYWAIT:
3154 /* Hand over to state machine if needed */
3155 if (tcpm_port_is_source(port))
02d5be46 3156 tcpm_set_state(port, SRC_TRYWAIT_DEBOUNCE, 0);
f0690a25 3157 break;
a0a3e04e
BJS
3158 case SNK_TRY_WAIT_DEBOUNCE:
3159 /* Do nothing, waiting for PD_DEBOUNCE to do be done */
f0690a25 3160 break;
af450ebb 3161 case SNK_TRYWAIT:
f0690a25 3162 case SNK_TRYWAIT_VBUS:
af450ebb 3163 case SNK_TRYWAIT_DEBOUNCE:
f0690a25 3164 break;
f0690a25
GR
3165 case SNK_ATTACH_WAIT:
3166 tcpm_set_state(port, SNK_UNATTACHED, 0);
3167 break;
3168
3169 case SNK_NEGOTIATE_CAPABILITIES:
3170 break;
3171
3172 case PR_SWAP_SRC_SNK_TRANSITION_OFF:
3173 tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF, 0);
3174 break;
3175
3176 case PR_SWAP_SNK_SRC_SINK_OFF:
3177 /* Do nothing, expected */
3178 break;
3179
b17dd571 3180 case PORT_RESET_WAIT_OFF:
c749d4d0 3181 tcpm_set_state(port, tcpm_default_state(port), 0);
f0690a25 3182 break;
131c7d12
BJS
3183 case SRC_TRY_WAIT:
3184 case SRC_TRY_DEBOUNCE:
3185 /* Do nothing, waiting for sink detection */
3186 break;
f0690a25
GR
3187 default:
3188 if (port->pwr_role == TYPEC_SINK &&
3189 port->attached)
3190 tcpm_set_state(port, SNK_UNATTACHED, 0);
3191 break;
3192 }
3193}
3194
3195static void _tcpm_pd_hard_reset(struct tcpm_port *port)
3196{
3197 tcpm_log_force(port, "Received hard reset");
3198 /*
3199 * If we keep receiving hard reset requests, executing the hard reset
3200 * must have failed. Revert to error recovery if that happens.
3201 */
3202 tcpm_set_state(port,
3203 port->hard_reset_count < PD_N_HARD_RESET_COUNT ?
3204 HARD_RESET_START : ERROR_RECOVERY,
3205 0);
3206}
3207
3208static void tcpm_pd_event_handler(struct work_struct *work)
3209{
3210 struct tcpm_port *port = container_of(work, struct tcpm_port,
3211 event_work);
3212 u32 events;
3213
3214 mutex_lock(&port->lock);
3215
3216 spin_lock(&port->pd_event_lock);
3217 while (port->pd_events) {
3218 events = port->pd_events;
3219 port->pd_events = 0;
3220 spin_unlock(&port->pd_event_lock);
3221 if (events & TCPM_RESET_EVENT)
3222 _tcpm_pd_hard_reset(port);
3223 if (events & TCPM_VBUS_EVENT) {
3224 bool vbus;
3225
3226 vbus = port->tcpc->get_vbus(port->tcpc);
3227 if (vbus)
3228 _tcpm_pd_vbus_on(port);
3229 else
3230 _tcpm_pd_vbus_off(port);
3231 }
3232 if (events & TCPM_CC_EVENT) {
3233 enum typec_cc_status cc1, cc2;
3234
3235 if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0)
3236 _tcpm_cc_change(port, cc1, cc2);
3237 }
3238 spin_lock(&port->pd_event_lock);
3239 }
3240 spin_unlock(&port->pd_event_lock);
3241 mutex_unlock(&port->lock);
3242}
3243
3244void tcpm_cc_change(struct tcpm_port *port)
3245{
3246 spin_lock(&port->pd_event_lock);
3247 port->pd_events |= TCPM_CC_EVENT;
3248 spin_unlock(&port->pd_event_lock);
3249 queue_work(port->wq, &port->event_work);
3250}
3251EXPORT_SYMBOL_GPL(tcpm_cc_change);
3252
3253void tcpm_vbus_change(struct tcpm_port *port)
3254{
3255 spin_lock(&port->pd_event_lock);
3256 port->pd_events |= TCPM_VBUS_EVENT;
3257 spin_unlock(&port->pd_event_lock);
3258 queue_work(port->wq, &port->event_work);
3259}
3260EXPORT_SYMBOL_GPL(tcpm_vbus_change);
3261
3262void tcpm_pd_hard_reset(struct tcpm_port *port)
3263{
3264 spin_lock(&port->pd_event_lock);
3265 port->pd_events = TCPM_RESET_EVENT;
3266 spin_unlock(&port->pd_event_lock);
3267 queue_work(port->wq, &port->event_work);
3268}
3269EXPORT_SYMBOL_GPL(tcpm_pd_hard_reset);
3270
3271static int tcpm_dr_set(const struct typec_capability *cap,
3272 enum typec_data_role data)
3273{
3274 struct tcpm_port *port = typec_cap_to_tcpm(cap);
3275 int ret;
3276
3277 mutex_lock(&port->swap_lock);
3278 mutex_lock(&port->lock);
3279
9b0ae699 3280 if (port->port_type != TYPEC_PORT_DRP) {
f0690a25
GR
3281 ret = -EINVAL;
3282 goto port_unlock;
3283 }
3284 if (port->state != SRC_READY && port->state != SNK_READY) {
3285 ret = -EAGAIN;
3286 goto port_unlock;
3287 }
3288
3289 if (port->data_role == data) {
3290 ret = 0;
3291 goto port_unlock;
3292 }
3293
3294 /*
3295 * XXX
3296 * 6.3.9: If an alternate mode is active, a request to swap
3297 * alternate modes shall trigger a port reset.
3298 * Reject data role swap request in this case.
3299 */
3300
b17dd571
GR
3301 if (!port->pd_capable) {
3302 /*
3303 * If the partner is not PD capable, reset the port to
3304 * trigger a role change. This can only work if a preferred
3305 * role is configured, and if it matches the requested role.
3306 */
3307 if (port->try_role == TYPEC_NO_PREFERRED_ROLE ||
3308 port->try_role == port->pwr_role) {
3309 ret = -EINVAL;
3310 goto port_unlock;
3311 }
3312 port->non_pd_role_swap = true;
3313 tcpm_set_state(port, PORT_RESET, 0);
3314 } else {
3315 tcpm_set_state(port, DR_SWAP_SEND, 0);
3316 }
3317
f0690a25
GR
3318 port->swap_status = 0;
3319 port->swap_pending = true;
3320 reinit_completion(&port->swap_complete);
f0690a25
GR
3321 mutex_unlock(&port->lock);
3322
9adf9f9e
GR
3323 if (!wait_for_completion_timeout(&port->swap_complete,
3324 msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
3325 ret = -ETIMEDOUT;
3326 else
3327 ret = port->swap_status;
f0690a25 3328
b17dd571 3329 port->non_pd_role_swap = false;
f0690a25
GR
3330 goto swap_unlock;
3331
3332port_unlock:
3333 mutex_unlock(&port->lock);
3334swap_unlock:
3335 mutex_unlock(&port->swap_lock);
3336 return ret;
3337}
3338
3339static int tcpm_pr_set(const struct typec_capability *cap,
3340 enum typec_role role)
3341{
3342 struct tcpm_port *port = typec_cap_to_tcpm(cap);
3343 int ret;
3344
3345 mutex_lock(&port->swap_lock);
3346 mutex_lock(&port->lock);
3347
9b0ae699 3348 if (port->port_type != TYPEC_PORT_DRP) {
f0690a25
GR
3349 ret = -EINVAL;
3350 goto port_unlock;
3351 }
3352 if (port->state != SRC_READY && port->state != SNK_READY) {
3353 ret = -EAGAIN;
3354 goto port_unlock;
3355 }
3356
3357 if (role == port->pwr_role) {
3358 ret = 0;
3359 goto port_unlock;
3360 }
3361
f0690a25
GR
3362 port->swap_status = 0;
3363 port->swap_pending = true;
3364 reinit_completion(&port->swap_complete);
3365 tcpm_set_state(port, PR_SWAP_SEND, 0);
3366 mutex_unlock(&port->lock);
3367
9adf9f9e
GR
3368 if (!wait_for_completion_timeout(&port->swap_complete,
3369 msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
3370 ret = -ETIMEDOUT;
3371 else
3372 ret = port->swap_status;
f0690a25 3373
f0690a25
GR
3374 goto swap_unlock;
3375
3376port_unlock:
3377 mutex_unlock(&port->lock);
3378swap_unlock:
3379 mutex_unlock(&port->swap_lock);
3380 return ret;
3381}
3382
3383static int tcpm_vconn_set(const struct typec_capability *cap,
3384 enum typec_role role)
3385{
3386 struct tcpm_port *port = typec_cap_to_tcpm(cap);
3387 int ret;
3388
3389 mutex_lock(&port->swap_lock);
3390 mutex_lock(&port->lock);
3391
3392 if (port->state != SRC_READY && port->state != SNK_READY) {
3393 ret = -EAGAIN;
3394 goto port_unlock;
3395 }
3396
3397 if (role == port->vconn_role) {
3398 ret = 0;
3399 goto port_unlock;
3400 }
3401
3402 port->swap_status = 0;
3403 port->swap_pending = true;
3404 reinit_completion(&port->swap_complete);
3405 tcpm_set_state(port, VCONN_SWAP_SEND, 0);
3406 mutex_unlock(&port->lock);
3407
9adf9f9e
GR
3408 if (!wait_for_completion_timeout(&port->swap_complete,
3409 msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
3410 ret = -ETIMEDOUT;
3411 else
3412 ret = port->swap_status;
f0690a25 3413
f0690a25
GR
3414 goto swap_unlock;
3415
3416port_unlock:
3417 mutex_unlock(&port->lock);
3418swap_unlock:
3419 mutex_unlock(&port->swap_lock);
3420 return ret;
3421}
3422
3423static int tcpm_try_role(const struct typec_capability *cap, int role)
3424{
3425 struct tcpm_port *port = typec_cap_to_tcpm(cap);
3426 struct tcpc_dev *tcpc = port->tcpc;
3427 int ret = 0;
3428
3429 mutex_lock(&port->lock);
3430 if (tcpc->try_role)
3431 ret = tcpc->try_role(tcpc, role);
3432 if (!ret && !tcpc->config->try_role_hw)
3433 port->try_role = role;
3434 port->try_src_count = 0;
3435 port->try_snk_count = 0;
3436 mutex_unlock(&port->lock);
3437
3438 return ret;
3439}
3440
3441static void tcpm_init(struct tcpm_port *port)
3442{
3443 enum typec_cc_status cc1, cc2;
3444
3445 port->tcpc->init(port->tcpc);
3446
3447 tcpm_reset_port(port);
3448
3449 /*
3450 * XXX
3451 * Should possibly wait for VBUS to settle if it was enabled locally
3452 * since tcpm_reset_port() will disable VBUS.
3453 */
3454 port->vbus_present = port->tcpc->get_vbus(port->tcpc);
3455 if (port->vbus_present)
3456 port->vbus_never_low = true;
3457
3458 tcpm_set_state(port, tcpm_default_state(port), 0);
3459
3460 if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0)
3461 _tcpm_cc_change(port, cc1, cc2);
3462
3463 /*
3464 * Some adapters need a clean slate at startup, and won't recover
3465 * otherwise. So do not try to be fancy and force a clean disconnect.
3466 */
b17dd571 3467 tcpm_set_state(port, PORT_RESET, 0);
f0690a25
GR
3468}
3469
9b0ae699
BJS
3470static int tcpm_port_type_set(const struct typec_capability *cap,
3471 enum typec_port_type type)
3472{
3473 struct tcpm_port *port = typec_cap_to_tcpm(cap);
3474
3475 mutex_lock(&port->lock);
3476 if (type == port->port_type)
3477 goto port_unlock;
3478
3479 port->port_type = type;
3480
3481 if (!port->connected) {
3482 tcpm_set_state(port, PORT_RESET, 0);
3483 } else if (type == TYPEC_PORT_UFP) {
3484 if (!(port->pwr_role == TYPEC_SINK &&
3485 port->data_role == TYPEC_DEVICE))
3486 tcpm_set_state(port, PORT_RESET, 0);
3487 } else if (type == TYPEC_PORT_DFP) {
3488 if (!(port->pwr_role == TYPEC_SOURCE &&
3489 port->data_role == TYPEC_HOST))
3490 tcpm_set_state(port, PORT_RESET, 0);
3491 }
3492
3493port_unlock:
3494 mutex_unlock(&port->lock);
3495 return 0;
3496}
3497
f0690a25
GR
3498void tcpm_tcpc_reset(struct tcpm_port *port)
3499{
3500 mutex_lock(&port->lock);
3501 /* XXX: Maintain PD connection if possible? */
3502 tcpm_init(port);
3503 mutex_unlock(&port->lock);
3504}
3505EXPORT_SYMBOL_GPL(tcpm_tcpc_reset);
3506
3507static int tcpm_copy_pdos(u32 *dest_pdo, const u32 *src_pdo,
3508 unsigned int nr_pdo)
3509{
3510 unsigned int i;
3511
3512 if (nr_pdo > PDO_MAX_OBJECTS)
3513 nr_pdo = PDO_MAX_OBJECTS;
3514
3515 for (i = 0; i < nr_pdo; i++)
3516 dest_pdo[i] = src_pdo[i];
3517
3518 return nr_pdo;
3519}
3520
193a6801
GR
3521static int tcpm_copy_vdos(u32 *dest_vdo, const u32 *src_vdo,
3522 unsigned int nr_vdo)
3523{
3524 unsigned int i;
3525
3526 if (nr_vdo > VDO_MAX_OBJECTS)
3527 nr_vdo = VDO_MAX_OBJECTS;
3528
3529 for (i = 0; i < nr_vdo; i++)
3530 dest_vdo[i] = src_vdo[i];
3531
3532 return nr_vdo;
3533}
3534
5007e1b5
BJS
3535int tcpm_update_source_capabilities(struct tcpm_port *port, const u32 *pdo,
3536 unsigned int nr_pdo)
f0690a25 3537{
5007e1b5
BJS
3538 if (tcpm_validate_caps(port, pdo, nr_pdo))
3539 return -EINVAL;
3540
f0690a25
GR
3541 mutex_lock(&port->lock);
3542 port->nr_src_pdo = tcpm_copy_pdos(port->src_pdo, pdo, nr_pdo);
3543 switch (port->state) {
3544 case SRC_UNATTACHED:
3545 case SRC_ATTACH_WAIT:
3546 case SRC_TRYWAIT:
3547 tcpm_set_cc(port, tcpm_rp_cc(port));
3548 break;
3549 case SRC_SEND_CAPABILITIES:
3550 case SRC_NEGOTIATE_CAPABILITIES:
3551 case SRC_READY:
3552 case SRC_WAIT_NEW_CAPABILITIES:
3553 tcpm_set_cc(port, tcpm_rp_cc(port));
3554 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
3555 break;
3556 default:
3557 break;
3558 }
3559 mutex_unlock(&port->lock);
5007e1b5 3560 return 0;
f0690a25
GR
3561}
3562EXPORT_SYMBOL_GPL(tcpm_update_source_capabilities);
3563
5007e1b5
BJS
3564int tcpm_update_sink_capabilities(struct tcpm_port *port, const u32 *pdo,
3565 unsigned int nr_pdo,
3566 unsigned int max_snk_mv,
3567 unsigned int max_snk_ma,
3568 unsigned int max_snk_mw,
3569 unsigned int operating_snk_mw)
f0690a25 3570{
5007e1b5
BJS
3571 if (tcpm_validate_caps(port, pdo, nr_pdo))
3572 return -EINVAL;
3573
f0690a25
GR
3574 mutex_lock(&port->lock);
3575 port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, pdo, nr_pdo);
3576 port->max_snk_mv = max_snk_mv;
3577 port->max_snk_ma = max_snk_ma;
3578 port->max_snk_mw = max_snk_mw;
3579 port->operating_snk_mw = operating_snk_mw;
3580
3581 switch (port->state) {
3582 case SNK_NEGOTIATE_CAPABILITIES:
3583 case SNK_READY:
3584 case SNK_TRANSITION_SINK:
3585 case SNK_TRANSITION_SINK_VBUS:
3586 tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
3587 break;
3588 default:
3589 break;
3590 }
3591 mutex_unlock(&port->lock);
5007e1b5 3592 return 0;
f0690a25
GR
3593}
3594EXPORT_SYMBOL_GPL(tcpm_update_sink_capabilities);
3595
3596struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
3597{
3598 struct tcpm_port *port;
3599 int i, err;
3600
3601 if (!dev || !tcpc || !tcpc->config ||
3602 !tcpc->get_vbus || !tcpc->set_cc || !tcpc->get_cc ||
3603 !tcpc->set_polarity || !tcpc->set_vconn || !tcpc->set_vbus ||
3604 !tcpc->set_pd_rx || !tcpc->set_roles || !tcpc->pd_transmit)
3605 return ERR_PTR(-EINVAL);
3606
3607 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
3608 if (!port)
3609 return ERR_PTR(-ENOMEM);
3610
3611 port->dev = dev;
3612 port->tcpc = tcpc;
3613
3614 mutex_init(&port->lock);
3615 mutex_init(&port->swap_lock);
3616
3617 port->wq = create_singlethread_workqueue(dev_name(dev));
3618 if (!port->wq)
3619 return ERR_PTR(-ENOMEM);
3620 INIT_DELAYED_WORK(&port->state_machine, tcpm_state_machine_work);
3621 INIT_DELAYED_WORK(&port->vdm_state_machine, vdm_state_machine_work);
3622 INIT_WORK(&port->event_work, tcpm_pd_event_handler);
3623
3624 spin_lock_init(&port->pd_event_lock);
3625
3626 init_completion(&port->tx_complete);
3627 init_completion(&port->swap_complete);
5007e1b5 3628 tcpm_debugfs_init(port);
f0690a25 3629
5007e1b5
BJS
3630 if (tcpm_validate_caps(port, tcpc->config->src_pdo,
3631 tcpc->config->nr_src_pdo) ||
3632 tcpm_validate_caps(port, tcpc->config->snk_pdo,
3633 tcpc->config->nr_snk_pdo)) {
3634 err = -EINVAL;
3635 goto out_destroy_wq;
3636 }
f0690a25
GR
3637 port->nr_src_pdo = tcpm_copy_pdos(port->src_pdo, tcpc->config->src_pdo,
3638 tcpc->config->nr_src_pdo);
3639 port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, tcpc->config->snk_pdo,
3640 tcpc->config->nr_snk_pdo);
193a6801
GR
3641 port->nr_snk_vdo = tcpm_copy_vdos(port->snk_vdo, tcpc->config->snk_vdo,
3642 tcpc->config->nr_snk_vdo);
f0690a25
GR
3643
3644 port->max_snk_mv = tcpc->config->max_snk_mv;
3645 port->max_snk_ma = tcpc->config->max_snk_ma;
3646 port->max_snk_mw = tcpc->config->max_snk_mw;
3647 port->operating_snk_mw = tcpc->config->operating_snk_mw;
3648 if (!tcpc->config->try_role_hw)
3649 port->try_role = tcpc->config->default_role;
3650 else
3651 port->try_role = TYPEC_NO_PREFERRED_ROLE;
3652
3653 port->typec_caps.prefer_role = tcpc->config->default_role;
3654 port->typec_caps.type = tcpc->config->type;
3655 port->typec_caps.revision = 0x0120; /* Type-C spec release 1.2 */
3656 port->typec_caps.pd_revision = 0x0200; /* USB-PD spec release 2.0 */
3657 port->typec_caps.dr_set = tcpm_dr_set;
3658 port->typec_caps.pr_set = tcpm_pr_set;
3659 port->typec_caps.vconn_set = tcpm_vconn_set;
3660 port->typec_caps.try_role = tcpm_try_role;
9b0ae699 3661 port->typec_caps.port_type_set = tcpm_port_type_set;
f0690a25
GR
3662
3663 port->partner_desc.identity = &port->partner_ident;
9b0ae699 3664 port->port_type = tcpc->config->type;
f0690a25
GR
3665
3666 port->typec_port = typec_register_port(port->dev, &port->typec_caps);
3667 if (!port->typec_port) {
3668 err = -ENOMEM;
3669 goto out_destroy_wq;
3670 }
3671
3672 if (tcpc->config->alt_modes) {
3c41dbde 3673 const struct typec_altmode_desc *paltmode = tcpc->config->alt_modes;
f0690a25
GR
3674
3675 i = 0;
3676 while (paltmode->svid && i < ARRAY_SIZE(port->port_altmode)) {
3677 port->port_altmode[i] =
3678 typec_port_register_altmode(port->typec_port,
3679 paltmode);
3680 if (!port->port_altmode[i]) {
3681 tcpm_log(port,
3682 "%s: failed to register port alternate mode 0x%x",
3683 dev_name(dev), paltmode->svid);
3684 break;
3685 }
3686 i++;
3687 paltmode++;
3688 }
3689 }
3690
f0690a25
GR
3691 mutex_lock(&port->lock);
3692 tcpm_init(port);
3693 mutex_unlock(&port->lock);
3694
3695 tcpm_log(port, "%s: registered", dev_name(dev));
3696 return port;
3697
3698out_destroy_wq:
3699 destroy_workqueue(port->wq);
3700 return ERR_PTR(err);
3701}
3702EXPORT_SYMBOL_GPL(tcpm_register_port);
3703
3704void tcpm_unregister_port(struct tcpm_port *port)
3705{
3706 int i;
3707
a6d5d230 3708 tcpm_reset_port(port);
f0690a25
GR
3709 for (i = 0; i < ARRAY_SIZE(port->port_altmode); i++)
3710 typec_unregister_altmode(port->port_altmode[i]);
3711 typec_unregister_port(port->typec_port);
3712 tcpm_debugfs_exit(port);
3713 destroy_workqueue(port->wq);
3714}
3715EXPORT_SYMBOL_GPL(tcpm_unregister_port);
3716
3717MODULE_AUTHOR("Guenter Roeck <groeck@chromium.org>");
3718MODULE_DESCRIPTION("USB Type-C Port Manager");
3719MODULE_LICENSE("GPL");