1 // SPDX-License-Identifier: GPL-2.0
3 * mtu3_gadget.c - MediaTek usb3 DRD peripheral support
5 * Copyright (C) 2016 MediaTek Inc.
7 * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
11 #include "mtu3_trace.h"
13 void mtu3_req_complete(struct mtu3_ep
*mep
,
14 struct usb_request
*req
, int status
)
15 __releases(mep
->mtu
->lock
)
16 __acquires(mep
->mtu
->lock
)
18 struct mtu3_request
*mreq
;
22 mreq
= to_mtu3_request(req
);
23 list_del(&mreq
->list
);
24 if (mreq
->request
.status
== -EINPROGRESS
)
25 mreq
->request
.status
= status
;
30 trace_mtu3_req_complete(mreq
);
31 spin_unlock(&mtu
->lock
);
33 /* ep0 makes use of PIO, needn't unmap it */
35 usb_gadget_unmap_request(&mtu
->g
, req
, mep
->is_in
);
37 dev_dbg(mtu
->dev
, "%s complete req: %p, sts %d, %d/%d\n", mep
->name
,
38 req
, req
->status
, mreq
->request
.actual
, mreq
->request
.length
);
40 usb_gadget_giveback_request(&mep
->ep
, &mreq
->request
);
42 spin_lock(&mtu
->lock
);
46 static void nuke(struct mtu3_ep
*mep
, const int status
)
48 struct mtu3_request
*mreq
= NULL
;
51 if (list_empty(&mep
->req_list
))
54 dev_dbg(mep
->mtu
->dev
, "abort %s's req: sts %d\n", mep
->name
, status
);
60 while (!list_empty(&mep
->req_list
)) {
61 mreq
= list_first_entry(&mep
->req_list
,
62 struct mtu3_request
, list
);
63 mtu3_req_complete(mep
, &mreq
->request
, status
);
67 static int mtu3_ep_enable(struct mtu3_ep
*mep
)
69 const struct usb_endpoint_descriptor
*desc
;
70 const struct usb_ss_ep_comp_descriptor
*comp_desc
;
71 struct mtu3
*mtu
= mep
->mtu
;
78 comp_desc
= mep
->comp_desc
;
79 mep
->type
= usb_endpoint_type(desc
);
80 mep
->maxp
= usb_endpoint_maxp(desc
);
82 switch (mtu
->g
.speed
) {
84 case USB_SPEED_SUPER_PLUS
:
85 if (usb_endpoint_xfer_int(desc
) ||
86 usb_endpoint_xfer_isoc(desc
)) {
87 interval
= desc
->bInterval
;
88 interval
= clamp_val(interval
, 1, 16);
89 if (usb_endpoint_xfer_isoc(desc
) && comp_desc
)
90 mult
= comp_desc
->bmAttributes
;
93 burst
= comp_desc
->bMaxBurst
;
97 if (usb_endpoint_xfer_isoc(desc
) ||
98 usb_endpoint_xfer_int(desc
)) {
99 interval
= desc
->bInterval
;
100 interval
= clamp_val(interval
, 1, 16);
101 mult
= usb_endpoint_maxp_mult(desc
) - 1;
105 if (usb_endpoint_xfer_isoc(desc
))
106 interval
= clamp_val(desc
->bInterval
, 1, 16);
107 else if (usb_endpoint_xfer_int(desc
))
108 interval
= clamp_val(desc
->bInterval
, 1, 255);
112 break; /*others are ignored */
115 dev_dbg(mtu
->dev
, "%s maxp:%d, interval:%d, burst:%d, mult:%d\n",
116 __func__
, mep
->maxp
, interval
, burst
, mult
);
118 mep
->ep
.maxpacket
= mep
->maxp
;
120 mep
->ep
.comp_desc
= comp_desc
;
122 /* slot mainly affects bulk/isoc transfer, so ignore int */
123 mep
->slot
= usb_endpoint_xfer_int(desc
) ? 0 : mtu
->slot
;
125 ret
= mtu3_config_ep(mtu
, mep
, interval
, burst
, mult
);
129 ret
= mtu3_gpd_ring_alloc(mep
);
131 mtu3_deconfig_ep(mtu
, mep
);
140 static int mtu3_ep_disable(struct mtu3_ep
*mep
)
142 struct mtu3
*mtu
= mep
->mtu
;
146 /* abort all pending requests */
147 nuke(mep
, -ESHUTDOWN
);
148 mtu3_deconfig_ep(mtu
, mep
);
149 mtu3_gpd_ring_free(mep
);
153 mep
->comp_desc
= NULL
;
160 static int mtu3_gadget_ep_enable(struct usb_ep
*ep
,
161 const struct usb_endpoint_descriptor
*desc
)
168 if (!ep
|| !desc
|| desc
->bDescriptorType
!= USB_DT_ENDPOINT
) {
169 pr_debug("%s invalid parameters\n", __func__
);
173 if (!desc
->wMaxPacketSize
) {
174 pr_debug("%s missing wMaxPacketSize\n", __func__
);
177 mep
= to_mtu3_ep(ep
);
180 /* check ep number and direction against endpoint */
181 if (usb_endpoint_num(desc
) != mep
->epnum
)
184 if (!!usb_endpoint_dir_in(desc
) ^ !!mep
->is_in
)
187 dev_dbg(mtu
->dev
, "%s %s\n", __func__
, ep
->name
);
189 if (mep
->flags
& MTU3_EP_ENABLED
) {
190 dev_WARN_ONCE(mtu
->dev
, true, "%s is already enabled\n",
195 spin_lock_irqsave(&mtu
->lock
, flags
);
197 mep
->comp_desc
= ep
->comp_desc
;
199 ret
= mtu3_ep_enable(mep
);
205 mep
->flags
|= MTU3_EP_ENABLED
;
209 spin_unlock_irqrestore(&mtu
->lock
, flags
);
211 dev_dbg(mtu
->dev
, "%s active_ep=%d\n", __func__
, mtu
->active_ep
);
212 trace_mtu3_gadget_ep_enable(mep
);
217 static int mtu3_gadget_ep_disable(struct usb_ep
*ep
)
219 struct mtu3_ep
*mep
= to_mtu3_ep(ep
);
220 struct mtu3
*mtu
= mep
->mtu
;
223 dev_dbg(mtu
->dev
, "%s %s\n", __func__
, mep
->name
);
224 trace_mtu3_gadget_ep_disable(mep
);
226 if (!(mep
->flags
& MTU3_EP_ENABLED
)) {
227 dev_warn(mtu
->dev
, "%s is already disabled\n", mep
->name
);
231 spin_lock_irqsave(&mtu
->lock
, flags
);
232 mtu3_ep_disable(mep
);
233 mep
->flags
&= ~MTU3_EP_ENABLED
;
235 spin_unlock_irqrestore(&(mtu
->lock
), flags
);
237 dev_dbg(mtu
->dev
, "%s active_ep=%d, mtu3 is_active=%d\n",
238 __func__
, mtu
->active_ep
, mtu
->is_active
);
243 struct usb_request
*mtu3_alloc_request(struct usb_ep
*ep
, gfp_t gfp_flags
)
245 struct mtu3_ep
*mep
= to_mtu3_ep(ep
);
246 struct mtu3_request
*mreq
;
248 mreq
= kzalloc(sizeof(*mreq
), gfp_flags
);
252 mreq
->request
.dma
= DMA_ADDR_INVALID
;
253 mreq
->epnum
= mep
->epnum
;
255 INIT_LIST_HEAD(&mreq
->list
);
256 trace_mtu3_alloc_request(mreq
);
258 return &mreq
->request
;
261 void mtu3_free_request(struct usb_ep
*ep
, struct usb_request
*req
)
263 struct mtu3_request
*mreq
= to_mtu3_request(req
);
265 trace_mtu3_free_request(mreq
);
269 static int mtu3_gadget_queue(struct usb_ep
*ep
,
270 struct usb_request
*req
, gfp_t gfp_flags
)
273 struct mtu3_request
*mreq
;
284 mep
= to_mtu3_ep(ep
);
286 mreq
= to_mtu3_request(req
);
289 if (mreq
->mep
!= mep
)
292 dev_dbg(mtu
->dev
, "%s %s EP%d(%s), req=%p, maxp=%d, len#%d\n",
293 __func__
, mep
->is_in
? "TX" : "RX", mreq
->epnum
, ep
->name
,
294 mreq
, ep
->maxpacket
, mreq
->request
.length
);
296 if (req
->length
> GPD_BUF_SIZE
||
297 (mtu
->gen2cp
&& req
->length
> GPD_BUF_SIZE_EL
)) {
299 "req length > supported MAX:%d requested:%d\n",
300 mtu
->gen2cp
? GPD_BUF_SIZE_EL
: GPD_BUF_SIZE
,
305 /* don't queue if the ep is down */
307 dev_dbg(mtu
->dev
, "req=%p queued to %s while it's disabled\n",
312 mreq
->request
.actual
= 0;
313 mreq
->request
.status
= -EINPROGRESS
;
315 ret
= usb_gadget_map_request(&mtu
->g
, req
, mep
->is_in
);
317 dev_err(mtu
->dev
, "dma mapping failed\n");
321 spin_lock_irqsave(&mtu
->lock
, flags
);
323 if (mtu3_prepare_transfer(mep
)) {
328 list_add_tail(&mreq
->list
, &mep
->req_list
);
329 mtu3_insert_gpd(mep
, mreq
);
330 mtu3_qmu_resume(mep
);
333 spin_unlock_irqrestore(&mtu
->lock
, flags
);
334 trace_mtu3_gadget_queue(mreq
);
339 static int mtu3_gadget_dequeue(struct usb_ep
*ep
, struct usb_request
*req
)
341 struct mtu3_ep
*mep
= to_mtu3_ep(ep
);
342 struct mtu3_request
*mreq
= to_mtu3_request(req
);
343 struct mtu3_request
*r
;
346 struct mtu3
*mtu
= mep
->mtu
;
348 if (!ep
|| !req
|| mreq
->mep
!= mep
)
351 dev_dbg(mtu
->dev
, "%s : req=%p\n", __func__
, req
);
352 trace_mtu3_gadget_dequeue(mreq
);
354 spin_lock_irqsave(&mtu
->lock
, flags
);
356 list_for_each_entry(r
, &mep
->req_list
, list
) {
361 dev_dbg(mtu
->dev
, "req=%p not queued to %s\n", req
, ep
->name
);
366 mtu3_qmu_flush(mep
); /* REVISIT: set BPS ?? */
367 mtu3_req_complete(mep
, req
, -ECONNRESET
);
371 spin_unlock_irqrestore(&mtu
->lock
, flags
);
377 * Set or clear the halt bit of an EP.
378 * A halted EP won't TX/RX any data but will queue requests.
380 static int mtu3_gadget_ep_set_halt(struct usb_ep
*ep
, int value
)
382 struct mtu3_ep
*mep
= to_mtu3_ep(ep
);
383 struct mtu3
*mtu
= mep
->mtu
;
384 struct mtu3_request
*mreq
;
391 dev_dbg(mtu
->dev
, "%s : %s...", __func__
, ep
->name
);
393 spin_lock_irqsave(&mtu
->lock
, flags
);
395 if (mep
->type
== USB_ENDPOINT_XFER_ISOC
) {
400 mreq
= next_request(mep
);
403 * If there is not request for TX-EP, QMU will not transfer
404 * data to TX-FIFO, so no need check whether TX-FIFO
405 * holds bytes or not here
408 dev_dbg(mtu
->dev
, "req in progress, cannot halt %s\n",
417 dev_dbg(mtu
->dev
, "%s %s stall\n", ep
->name
, value
? "set" : "clear");
419 mtu3_ep_stall_set(mep
, value
);
422 spin_unlock_irqrestore(&mtu
->lock
, flags
);
423 trace_mtu3_gadget_ep_set_halt(mep
);
428 /* Sets the halt feature with the clear requests ignored */
429 static int mtu3_gadget_ep_set_wedge(struct usb_ep
*ep
)
431 struct mtu3_ep
*mep
= to_mtu3_ep(ep
);
438 return usb_ep_set_halt(ep
);
441 static const struct usb_ep_ops mtu3_ep_ops
= {
442 .enable
= mtu3_gadget_ep_enable
,
443 .disable
= mtu3_gadget_ep_disable
,
444 .alloc_request
= mtu3_alloc_request
,
445 .free_request
= mtu3_free_request
,
446 .queue
= mtu3_gadget_queue
,
447 .dequeue
= mtu3_gadget_dequeue
,
448 .set_halt
= mtu3_gadget_ep_set_halt
,
449 .set_wedge
= mtu3_gadget_ep_set_wedge
,
452 static int mtu3_gadget_get_frame(struct usb_gadget
*gadget
)
454 struct mtu3
*mtu
= gadget_to_mtu3(gadget
);
456 return (int)mtu3_readl(mtu
->mac_base
, U3D_USB20_FRAME_NUM
);
459 static int mtu3_gadget_wakeup(struct usb_gadget
*gadget
)
461 struct mtu3
*mtu
= gadget_to_mtu3(gadget
);
464 dev_dbg(mtu
->dev
, "%s\n", __func__
);
466 /* remote wakeup feature is not enabled by host */
467 if (!mtu
->may_wakeup
)
470 spin_lock_irqsave(&mtu
->lock
, flags
);
471 if (mtu
->g
.speed
>= USB_SPEED_SUPER
) {
472 mtu3_setbits(mtu
->mac_base
, U3D_LINK_POWER_CONTROL
, UX_EXIT
);
474 mtu3_setbits(mtu
->mac_base
, U3D_POWER_MANAGEMENT
, RESUME
);
475 spin_unlock_irqrestore(&mtu
->lock
, flags
);
476 usleep_range(10000, 11000);
477 spin_lock_irqsave(&mtu
->lock
, flags
);
478 mtu3_clrbits(mtu
->mac_base
, U3D_POWER_MANAGEMENT
, RESUME
);
480 spin_unlock_irqrestore(&mtu
->lock
, flags
);
484 static int mtu3_gadget_set_self_powered(struct usb_gadget
*gadget
,
487 struct mtu3
*mtu
= gadget_to_mtu3(gadget
);
489 mtu
->is_self_powered
= !!is_selfpowered
;
493 static int mtu3_gadget_pullup(struct usb_gadget
*gadget
, int is_on
)
495 struct mtu3
*mtu
= gadget_to_mtu3(gadget
);
498 dev_dbg(mtu
->dev
, "%s (%s) for %sactive device\n", __func__
,
499 is_on
? "on" : "off", mtu
->is_active
? "" : "in");
501 /* we'd rather not pullup unless the device is active. */
502 spin_lock_irqsave(&mtu
->lock
, flags
);
505 if (!mtu
->is_active
) {
506 /* save it for mtu3_start() to process the request */
507 mtu
->softconnect
= is_on
;
508 } else if (is_on
!= mtu
->softconnect
) {
509 mtu
->softconnect
= is_on
;
510 mtu3_dev_on_off(mtu
, is_on
);
513 spin_unlock_irqrestore(&mtu
->lock
, flags
);
518 static int mtu3_gadget_start(struct usb_gadget
*gadget
,
519 struct usb_gadget_driver
*driver
)
521 struct mtu3
*mtu
= gadget_to_mtu3(gadget
);
524 if (mtu
->gadget_driver
) {
525 dev_err(mtu
->dev
, "%s is already bound to %s\n",
526 mtu
->g
.name
, mtu
->gadget_driver
->driver
.name
);
530 dev_dbg(mtu
->dev
, "bind driver %s\n", driver
->function
);
532 spin_lock_irqsave(&mtu
->lock
, flags
);
534 mtu
->softconnect
= 0;
535 mtu
->gadget_driver
= driver
;
537 if (mtu
->ssusb
->dr_mode
== USB_DR_MODE_PERIPHERAL
)
540 spin_unlock_irqrestore(&mtu
->lock
, flags
);
545 static void stop_activity(struct mtu3
*mtu
)
547 struct usb_gadget_driver
*driver
= mtu
->gadget_driver
;
550 /* don't disconnect if it's not connected */
551 if (mtu
->g
.speed
== USB_SPEED_UNKNOWN
)
554 mtu
->g
.speed
= USB_SPEED_UNKNOWN
;
556 /* deactivate the hardware */
557 if (mtu
->softconnect
) {
558 mtu
->softconnect
= 0;
559 mtu3_dev_on_off(mtu
, 0);
563 * killing any outstanding requests will quiesce the driver;
564 * then report disconnect
566 nuke(mtu
->ep0
, -ESHUTDOWN
);
567 for (i
= 1; i
< mtu
->num_eps
; i
++) {
568 nuke(mtu
->in_eps
+ i
, -ESHUTDOWN
);
569 nuke(mtu
->out_eps
+ i
, -ESHUTDOWN
);
573 spin_unlock(&mtu
->lock
);
574 driver
->disconnect(&mtu
->g
);
575 spin_lock(&mtu
->lock
);
579 static int mtu3_gadget_stop(struct usb_gadget
*g
)
581 struct mtu3
*mtu
= gadget_to_mtu3(g
);
584 dev_dbg(mtu
->dev
, "%s\n", __func__
);
586 spin_lock_irqsave(&mtu
->lock
, flags
);
589 mtu
->gadget_driver
= NULL
;
591 if (mtu
->ssusb
->dr_mode
== USB_DR_MODE_PERIPHERAL
)
594 spin_unlock_irqrestore(&mtu
->lock
, flags
);
596 synchronize_irq(mtu
->irq
);
600 static const struct usb_gadget_ops mtu3_gadget_ops
= {
601 .get_frame
= mtu3_gadget_get_frame
,
602 .wakeup
= mtu3_gadget_wakeup
,
603 .set_selfpowered
= mtu3_gadget_set_self_powered
,
604 .pullup
= mtu3_gadget_pullup
,
605 .udc_start
= mtu3_gadget_start
,
606 .udc_stop
= mtu3_gadget_stop
,
609 static void mtu3_state_reset(struct mtu3
*mtu
)
612 mtu
->ep0_state
= MU3D_EP0_STATE_SETUP
;
616 mtu
->delayed_status
= false;
617 mtu
->test_mode
= false;
620 static void init_hw_ep(struct mtu3
*mtu
, struct mtu3_ep
*mep
,
621 u32 epnum
, u32 is_in
)
627 INIT_LIST_HEAD(&mep
->req_list
);
629 sprintf(mep
->name
, "ep%d%s", epnum
,
630 !epnum
? "" : (is_in
? "in" : "out"));
632 mep
->ep
.name
= mep
->name
;
633 INIT_LIST_HEAD(&mep
->ep
.ep_list
);
635 /* initialize maxpacket as SS */
637 usb_ep_set_maxpacket_limit(&mep
->ep
, 512);
638 mep
->ep
.caps
.type_control
= true;
639 mep
->ep
.ops
= &mtu3_ep0_ops
;
640 mtu
->g
.ep0
= &mep
->ep
;
642 usb_ep_set_maxpacket_limit(&mep
->ep
, 1024);
643 mep
->ep
.caps
.type_iso
= true;
644 mep
->ep
.caps
.type_bulk
= true;
645 mep
->ep
.caps
.type_int
= true;
646 mep
->ep
.ops
= &mtu3_ep_ops
;
647 list_add_tail(&mep
->ep
.ep_list
, &mtu
->g
.ep_list
);
650 dev_dbg(mtu
->dev
, "%s, name=%s, maxp=%d\n", __func__
, mep
->ep
.name
,
654 mep
->ep
.caps
.dir_in
= true;
655 mep
->ep
.caps
.dir_out
= true;
657 mep
->ep
.caps
.dir_in
= true;
659 mep
->ep
.caps
.dir_out
= true;
663 static void mtu3_gadget_init_eps(struct mtu3
*mtu
)
667 /* initialize endpoint list just once */
668 INIT_LIST_HEAD(&(mtu
->g
.ep_list
));
670 dev_dbg(mtu
->dev
, "%s num_eps(1 for a pair of tx&rx ep)=%d\n",
671 __func__
, mtu
->num_eps
);
673 init_hw_ep(mtu
, mtu
->ep0
, 0, 0);
674 for (epnum
= 1; epnum
< mtu
->num_eps
; epnum
++) {
675 init_hw_ep(mtu
, mtu
->in_eps
+ epnum
, epnum
, 1);
676 init_hw_ep(mtu
, mtu
->out_eps
+ epnum
, epnum
, 0);
680 int mtu3_gadget_setup(struct mtu3
*mtu
)
684 mtu
->g
.ops
= &mtu3_gadget_ops
;
685 mtu
->g
.max_speed
= mtu
->max_speed
;
686 mtu
->g
.speed
= USB_SPEED_UNKNOWN
;
687 mtu
->g
.sg_supported
= 0;
688 mtu
->g
.name
= MTU3_DRIVER_NAME
;
690 mtu
->delayed_status
= false;
692 mtu3_gadget_init_eps(mtu
);
694 ret
= usb_add_gadget_udc(mtu
->dev
, &mtu
->g
);
696 dev_err(mtu
->dev
, "failed to register udc\n");
701 void mtu3_gadget_cleanup(struct mtu3
*mtu
)
703 usb_del_gadget_udc(&mtu
->g
);
706 void mtu3_gadget_resume(struct mtu3
*mtu
)
708 dev_dbg(mtu
->dev
, "gadget RESUME\n");
709 if (mtu
->gadget_driver
&& mtu
->gadget_driver
->resume
) {
710 spin_unlock(&mtu
->lock
);
711 mtu
->gadget_driver
->resume(&mtu
->g
);
712 spin_lock(&mtu
->lock
);
716 /* called when SOF packets stop for 3+ msec or enters U3 */
717 void mtu3_gadget_suspend(struct mtu3
*mtu
)
719 dev_dbg(mtu
->dev
, "gadget SUSPEND\n");
720 if (mtu
->gadget_driver
&& mtu
->gadget_driver
->suspend
) {
721 spin_unlock(&mtu
->lock
);
722 mtu
->gadget_driver
->suspend(&mtu
->g
);
723 spin_lock(&mtu
->lock
);
727 /* called when VBUS drops below session threshold, and in other cases */
728 void mtu3_gadget_disconnect(struct mtu3
*mtu
)
730 dev_dbg(mtu
->dev
, "gadget DISCONNECT\n");
731 if (mtu
->gadget_driver
&& mtu
->gadget_driver
->disconnect
) {
732 spin_unlock(&mtu
->lock
);
733 mtu
->gadget_driver
->disconnect(&mtu
->g
);
734 spin_lock(&mtu
->lock
);
737 mtu3_state_reset(mtu
);
738 usb_gadget_set_state(&mtu
->g
, USB_STATE_NOTATTACHED
);
741 void mtu3_gadget_reset(struct mtu3
*mtu
)
743 dev_dbg(mtu
->dev
, "gadget RESET\n");
745 /* report disconnect, if we didn't flush EP state */
746 if (mtu
->g
.speed
!= USB_SPEED_UNKNOWN
)
747 mtu3_gadget_disconnect(mtu
);
749 mtu3_state_reset(mtu
);