2 * MUSB OTG driver host support
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
26 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
30 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <linux/module.h>
37 #include <linux/kernel.h>
38 #include <linux/delay.h>
39 #include <linux/sched.h>
40 #include <linux/slab.h>
41 #include <linux/errno.h>
42 #include <linux/list.h>
43 #include <linux/dma-mapping.h>
45 #include "musb_core.h"
46 #include "musb_host.h"
47 #include "musb_trace.h"
49 /* MUSB HOST status 22-mar-2006
51 * - There's still lots of partial code duplication for fault paths, so
52 * they aren't handled as consistently as they need to be.
54 * - PIO mostly behaved when last tested.
55 * + including ep0, with all usbtest cases 9, 10
56 * + usbtest 14 (ep0out) doesn't seem to run at all
57 * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
58 * configurations, but otherwise double buffering passes basic tests.
59 * + for 2.6.N, for N > ~10, needs API changes for hcd framework.
61 * - DMA (CPPI) ... partially behaves, not currently recommended
62 * + about 1/15 the speed of typical EHCI implementations (PCI)
63 * + RX, all too often reqpkt seems to misbehave after tx
64 * + TX, no known issues (other than evident silicon issue)
66 * - DMA (Mentor/OMAP) ...has at least toggle update problems
68 * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet
69 * starvation ... nothing yet for TX, interrupt, or bulk.
71 * - Not tested with HNP, but some SRP paths seem to behave.
73 * NOTE 24-August-2006:
75 * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
76 * extra endpoint for periodic use enabling hub + keybd + mouse. That
77 * mostly works, except that with "usbnet" it's easy to trigger cases
78 * with "ping" where RX loses. (a) ping to davinci, even "ping -f",
79 * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
80 * although ARP RX wins. (That test was done with a full speed link.)
85 * NOTE on endpoint usage:
87 * CONTROL transfers all go through ep0. BULK ones go through dedicated IN
88 * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
89 * (Yes, bulk _could_ use more of the endpoints than that, and would even
92 * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
93 * So far that scheduling is both dumb and optimistic: the endpoint will be
94 * "claimed" until its software queue is no longer refilled. No multiplexing
95 * of transfers between endpoints, or anything clever.
98 struct musb
*hcd_to_musb(struct usb_hcd
*hcd
)
100 return *(struct musb
**) hcd
->hcd_priv
;
104 static void musb_ep_program(struct musb
*musb
, u8 epnum
,
105 struct urb
*urb
, int is_out
,
106 u8
*buf
, u32 offset
, u32 len
);
109 * Clear TX fifo. Needed to avoid BABBLE errors.
111 static void musb_h_tx_flush_fifo(struct musb_hw_ep
*ep
)
113 struct musb
*musb
= ep
->musb
;
114 void __iomem
*epio
= ep
->regs
;
118 csr
= musb_readw(epio
, MUSB_TXCSR
);
119 while (csr
& MUSB_TXCSR_FIFONOTEMPTY
) {
120 csr
|= MUSB_TXCSR_FLUSHFIFO
| MUSB_TXCSR_TXPKTRDY
;
121 musb_writew(epio
, MUSB_TXCSR
, csr
);
122 csr
= musb_readw(epio
, MUSB_TXCSR
);
125 * FIXME: sometimes the tx fifo flush failed, it has been
126 * observed during device disconnect on AM335x.
128 * To reproduce the issue, ensure tx urb(s) are queued when
129 * unplug the usb device which is connected to AM335x usb
132 * I found using a usb-ethernet device and running iperf
133 * (client on AM335x) has very high chance to trigger it.
135 * Better to turn on musb_dbg() in musb_cleanup_urb() with
136 * CPPI enabled to see the issue when aborting the tx channel.
138 if (dev_WARN_ONCE(musb
->controller
, retries
-- < 1,
139 "Could not flush host TX%d fifo: csr: %04x\n",
146 static void musb_h_ep0_flush_fifo(struct musb_hw_ep
*ep
)
148 void __iomem
*epio
= ep
->regs
;
152 /* scrub any data left in the fifo */
154 csr
= musb_readw(epio
, MUSB_TXCSR
);
155 if (!(csr
& (MUSB_CSR0_TXPKTRDY
| MUSB_CSR0_RXPKTRDY
)))
157 musb_writew(epio
, MUSB_TXCSR
, MUSB_CSR0_FLUSHFIFO
);
158 csr
= musb_readw(epio
, MUSB_TXCSR
);
162 WARN(!retries
, "Could not flush host TX%d fifo: csr: %04x\n",
165 /* and reset for the next transfer */
166 musb_writew(epio
, MUSB_TXCSR
, 0);
170 * Start transmit. Caller is responsible for locking shared resources.
171 * musb must be locked.
173 static inline void musb_h_tx_start(struct musb_hw_ep
*ep
)
177 /* NOTE: no locks here; caller should lock and select EP */
179 txcsr
= musb_readw(ep
->regs
, MUSB_TXCSR
);
180 txcsr
|= MUSB_TXCSR_TXPKTRDY
| MUSB_TXCSR_H_WZC_BITS
;
181 musb_writew(ep
->regs
, MUSB_TXCSR
, txcsr
);
183 txcsr
= MUSB_CSR0_H_SETUPPKT
| MUSB_CSR0_TXPKTRDY
;
184 musb_writew(ep
->regs
, MUSB_CSR0
, txcsr
);
189 static inline void musb_h_tx_dma_start(struct musb_hw_ep
*ep
)
193 /* NOTE: no locks here; caller should lock and select EP */
194 txcsr
= musb_readw(ep
->regs
, MUSB_TXCSR
);
195 txcsr
|= MUSB_TXCSR_DMAENAB
| MUSB_TXCSR_H_WZC_BITS
;
196 if (is_cppi_enabled(ep
->musb
))
197 txcsr
|= MUSB_TXCSR_DMAMODE
;
198 musb_writew(ep
->regs
, MUSB_TXCSR
, txcsr
);
201 static void musb_ep_set_qh(struct musb_hw_ep
*ep
, int is_in
, struct musb_qh
*qh
)
203 if (is_in
!= 0 || ep
->is_shared_fifo
)
205 if (is_in
== 0 || ep
->is_shared_fifo
)
209 static struct musb_qh
*musb_ep_get_qh(struct musb_hw_ep
*ep
, int is_in
)
211 return is_in
? ep
->in_qh
: ep
->out_qh
;
215 * Start the URB at the front of an endpoint's queue
216 * end must be claimed from the caller.
218 * Context: controller locked, irqs blocked
221 musb_start_urb(struct musb
*musb
, int is_in
, struct musb_qh
*qh
)
225 void __iomem
*mbase
= musb
->mregs
;
226 struct urb
*urb
= next_urb(qh
);
227 void *buf
= urb
->transfer_buffer
;
229 struct musb_hw_ep
*hw_ep
= qh
->hw_ep
;
230 int epnum
= hw_ep
->epnum
;
232 /* initialize software qh state */
236 /* gather right source of data */
238 case USB_ENDPOINT_XFER_CONTROL
:
239 /* control transfers always start with SETUP */
241 musb
->ep0_stage
= MUSB_EP0_START
;
242 buf
= urb
->setup_packet
;
245 case USB_ENDPOINT_XFER_ISOC
:
248 offset
= urb
->iso_frame_desc
[0].offset
;
249 len
= urb
->iso_frame_desc
[0].length
;
251 default: /* bulk, interrupt */
252 /* actual_length may be nonzero on retry paths */
253 buf
= urb
->transfer_buffer
+ urb
->actual_length
;
254 len
= urb
->transfer_buffer_length
- urb
->actual_length
;
257 trace_musb_urb_start(musb
, urb
);
259 /* Configure endpoint */
260 musb_ep_set_qh(hw_ep
, is_in
, qh
);
261 musb_ep_program(musb
, epnum
, urb
, !is_in
, buf
, offset
, len
);
263 /* transmit may have more work: start it when it is time */
267 /* determine if the time is right for a periodic transfer */
269 case USB_ENDPOINT_XFER_ISOC
:
270 case USB_ENDPOINT_XFER_INT
:
271 musb_dbg(musb
, "check whether there's still time for periodic Tx");
272 frame
= musb_readw(mbase
, MUSB_FRAME
);
273 /* FIXME this doesn't implement that scheduling policy ...
274 * or handle framecounter wrapping
276 if (1) { /* Always assume URB_ISO_ASAP */
277 /* REVISIT the SOF irq handler shouldn't duplicate
278 * this code; and we don't init urb->start_frame...
283 qh
->frame
= urb
->start_frame
;
284 /* enable SOF interrupt so we can count down */
285 musb_dbg(musb
, "SOF for %d", epnum
);
286 #if 1 /* ifndef CONFIG_ARCH_DAVINCI */
287 musb_writeb(mbase
, MUSB_INTRUSBE
, 0xff);
293 musb_dbg(musb
, "Start TX%d %s", epnum
,
294 hw_ep
->tx_channel
? "dma" : "pio");
296 if (!hw_ep
->tx_channel
)
297 musb_h_tx_start(hw_ep
);
298 else if (is_cppi_enabled(musb
) || tusb_dma_omap(musb
))
299 musb_h_tx_dma_start(hw_ep
);
303 /* Context: caller owns controller lock, IRQs are blocked */
304 static void musb_giveback(struct musb
*musb
, struct urb
*urb
, int status
)
305 __releases(musb
->lock
)
306 __acquires(musb
->lock
)
308 trace_musb_urb_gb(musb
, urb
);
310 usb_hcd_unlink_urb_from_ep(musb
->hcd
, urb
);
311 spin_unlock(&musb
->lock
);
312 usb_hcd_giveback_urb(musb
->hcd
, urb
, status
);
313 spin_lock(&musb
->lock
);
316 /* For bulk/interrupt endpoints only */
317 static inline void musb_save_toggle(struct musb_qh
*qh
, int is_in
,
320 void __iomem
*epio
= qh
->hw_ep
->regs
;
324 * FIXME: the current Mentor DMA code seems to have
325 * problems getting toggle correct.
329 csr
= musb_readw(epio
, MUSB_RXCSR
) & MUSB_RXCSR_H_DATATOGGLE
;
331 csr
= musb_readw(epio
, MUSB_TXCSR
) & MUSB_TXCSR_H_DATATOGGLE
;
333 usb_settoggle(urb
->dev
, qh
->epnum
, !is_in
, csr
? 1 : 0);
337 * Advance this hardware endpoint's queue, completing the specified URB and
338 * advancing to either the next URB queued to that qh, or else invalidating
339 * that qh and advancing to the next qh scheduled after the current one.
341 * Context: caller owns controller lock, IRQs are blocked
343 static void musb_advance_schedule(struct musb
*musb
, struct urb
*urb
,
344 struct musb_hw_ep
*hw_ep
, int is_in
)
346 struct musb_qh
*qh
= musb_ep_get_qh(hw_ep
, is_in
);
347 struct musb_hw_ep
*ep
= qh
->hw_ep
;
348 int ready
= qh
->is_ready
;
351 status
= (urb
->status
== -EINPROGRESS
) ? 0 : urb
->status
;
353 /* save toggle eagerly, for paranoia */
355 case USB_ENDPOINT_XFER_BULK
:
356 case USB_ENDPOINT_XFER_INT
:
357 musb_save_toggle(qh
, is_in
, urb
);
359 case USB_ENDPOINT_XFER_ISOC
:
360 if (status
== 0 && urb
->error_count
)
366 musb_giveback(musb
, urb
, status
);
367 qh
->is_ready
= ready
;
369 /* reclaim resources (and bandwidth) ASAP; deschedule it, and
370 * invalidate qh as soon as list_empty(&hep->urb_list)
372 if (list_empty(&qh
->hep
->urb_list
)) {
373 struct list_head
*head
;
374 struct dma_controller
*dma
= musb
->dma_controller
;
378 if (ep
->rx_channel
) {
379 dma
->channel_release(ep
->rx_channel
);
380 ep
->rx_channel
= NULL
;
384 if (ep
->tx_channel
) {
385 dma
->channel_release(ep
->tx_channel
);
386 ep
->tx_channel
= NULL
;
390 /* Clobber old pointers to this qh */
391 musb_ep_set_qh(ep
, is_in
, NULL
);
392 qh
->hep
->hcpriv
= NULL
;
396 case USB_ENDPOINT_XFER_CONTROL
:
397 case USB_ENDPOINT_XFER_BULK
:
398 /* fifo policy for these lists, except that NAKing
399 * should rotate a qh to the end (for fairness).
402 head
= qh
->ring
.prev
;
409 case USB_ENDPOINT_XFER_ISOC
:
410 case USB_ENDPOINT_XFER_INT
:
411 /* this is where periodic bandwidth should be
412 * de-allocated if it's tracked and allocated;
413 * and where we'd update the schedule tree...
422 * The pipe must be broken if current urb->status is set, so don't
424 * TODO: to minimize the risk of regression, only check urb->status
425 * for RX, until we have a test case to understand the behavior of TX.
427 if ((!status
|| !is_in
) && qh
&& qh
->is_ready
) {
428 musb_dbg(musb
, "... next ep%d %cX urb %p",
429 hw_ep
->epnum
, is_in
? 'R' : 'T', next_urb(qh
));
430 musb_start_urb(musb
, is_in
, qh
);
434 static u16
musb_h_flush_rxfifo(struct musb_hw_ep
*hw_ep
, u16 csr
)
436 /* we don't want fifo to fill itself again;
437 * ignore dma (various models),
438 * leave toggle alone (may not have been saved yet)
440 csr
|= MUSB_RXCSR_FLUSHFIFO
| MUSB_RXCSR_RXPKTRDY
;
441 csr
&= ~(MUSB_RXCSR_H_REQPKT
442 | MUSB_RXCSR_H_AUTOREQ
443 | MUSB_RXCSR_AUTOCLEAR
);
445 /* write 2x to allow double buffering */
446 musb_writew(hw_ep
->regs
, MUSB_RXCSR
, csr
);
447 musb_writew(hw_ep
->regs
, MUSB_RXCSR
, csr
);
449 /* flush writebuffer */
450 return musb_readw(hw_ep
->regs
, MUSB_RXCSR
);
454 * PIO RX for a packet (or part of it).
457 musb_host_packet_rx(struct musb
*musb
, struct urb
*urb
, u8 epnum
, u8 iso_err
)
465 struct musb_hw_ep
*hw_ep
= musb
->endpoints
+ epnum
;
466 void __iomem
*epio
= hw_ep
->regs
;
467 struct musb_qh
*qh
= hw_ep
->in_qh
;
468 int pipe
= urb
->pipe
;
469 void *buffer
= urb
->transfer_buffer
;
471 /* musb_ep_select(mbase, epnum); */
472 rx_count
= musb_readw(epio
, MUSB_RXCOUNT
);
473 musb_dbg(musb
, "RX%d count %d, buffer %p len %d/%d", epnum
, rx_count
,
474 urb
->transfer_buffer
, qh
->offset
,
475 urb
->transfer_buffer_length
);
478 if (usb_pipeisoc(pipe
)) {
480 struct usb_iso_packet_descriptor
*d
;
487 d
= urb
->iso_frame_desc
+ qh
->iso_idx
;
488 buf
= buffer
+ d
->offset
;
490 if (rx_count
> length
) {
495 musb_dbg(musb
, "OVERFLOW %d into %d", rx_count
, length
);
499 urb
->actual_length
+= length
;
500 d
->actual_length
= length
;
504 /* see if we are done */
505 done
= (++qh
->iso_idx
>= urb
->number_of_packets
);
508 buf
= buffer
+ qh
->offset
;
509 length
= urb
->transfer_buffer_length
- qh
->offset
;
510 if (rx_count
> length
) {
511 if (urb
->status
== -EINPROGRESS
)
512 urb
->status
= -EOVERFLOW
;
513 musb_dbg(musb
, "OVERFLOW %d into %d", rx_count
, length
);
517 urb
->actual_length
+= length
;
518 qh
->offset
+= length
;
520 /* see if we are done */
521 done
= (urb
->actual_length
== urb
->transfer_buffer_length
)
522 || (rx_count
< qh
->maxpacket
)
523 || (urb
->status
!= -EINPROGRESS
);
525 && (urb
->status
== -EINPROGRESS
)
526 && (urb
->transfer_flags
& URB_SHORT_NOT_OK
)
527 && (urb
->actual_length
528 < urb
->transfer_buffer_length
))
529 urb
->status
= -EREMOTEIO
;
532 musb_read_fifo(hw_ep
, length
, buf
);
534 csr
= musb_readw(epio
, MUSB_RXCSR
);
535 csr
|= MUSB_RXCSR_H_WZC_BITS
;
536 if (unlikely(do_flush
))
537 musb_h_flush_rxfifo(hw_ep
, csr
);
539 /* REVISIT this assumes AUTOCLEAR is never set */
540 csr
&= ~(MUSB_RXCSR_RXPKTRDY
| MUSB_RXCSR_H_REQPKT
);
542 csr
|= MUSB_RXCSR_H_REQPKT
;
543 musb_writew(epio
, MUSB_RXCSR
, csr
);
549 /* we don't always need to reinit a given side of an endpoint...
550 * when we do, use tx/rx reinit routine and then construct a new CSR
551 * to address data toggle, NYET, and DMA or PIO.
553 * it's possible that driver bugs (especially for DMA) or aborting a
554 * transfer might have left the endpoint busier than it should be.
555 * the busy/not-empty tests are basically paranoia.
558 musb_rx_reinit(struct musb
*musb
, struct musb_qh
*qh
, u8 epnum
)
560 struct musb_hw_ep
*ep
= musb
->endpoints
+ epnum
;
563 /* NOTE: we know the "rx" fifo reinit never triggers for ep0.
564 * That always uses tx_reinit since ep0 repurposes TX register
565 * offsets; the initial SETUP packet is also a kind of OUT.
568 /* if programmed for Tx, put it in RX mode */
569 if (ep
->is_shared_fifo
) {
570 csr
= musb_readw(ep
->regs
, MUSB_TXCSR
);
571 if (csr
& MUSB_TXCSR_MODE
) {
572 musb_h_tx_flush_fifo(ep
);
573 csr
= musb_readw(ep
->regs
, MUSB_TXCSR
);
574 musb_writew(ep
->regs
, MUSB_TXCSR
,
575 csr
| MUSB_TXCSR_FRCDATATOG
);
579 * Clear the MODE bit (and everything else) to enable Rx.
580 * NOTE: we mustn't clear the DMAMODE bit before DMAENAB.
582 if (csr
& MUSB_TXCSR_DMAMODE
)
583 musb_writew(ep
->regs
, MUSB_TXCSR
, MUSB_TXCSR_DMAMODE
);
584 musb_writew(ep
->regs
, MUSB_TXCSR
, 0);
586 /* scrub all previous state, clearing toggle */
588 csr
= musb_readw(ep
->regs
, MUSB_RXCSR
);
589 if (csr
& MUSB_RXCSR_RXPKTRDY
)
590 WARNING("rx%d, packet/%d ready?\n", ep
->epnum
,
591 musb_readw(ep
->regs
, MUSB_RXCOUNT
));
593 musb_h_flush_rxfifo(ep
, MUSB_RXCSR_CLRDATATOG
);
595 /* target addr and (for multipoint) hub addr/port */
596 if (musb
->is_multipoint
) {
597 musb_write_rxfunaddr(musb
, epnum
, qh
->addr_reg
);
598 musb_write_rxhubaddr(musb
, epnum
, qh
->h_addr_reg
);
599 musb_write_rxhubport(musb
, epnum
, qh
->h_port_reg
);
601 musb_writeb(musb
->mregs
, MUSB_FADDR
, qh
->addr_reg
);
603 /* protocol/endpoint, interval/NAKlimit, i/o size */
604 musb_writeb(ep
->regs
, MUSB_RXTYPE
, qh
->type_reg
);
605 musb_writeb(ep
->regs
, MUSB_RXINTERVAL
, qh
->intv_reg
);
606 /* NOTE: bulk combining rewrites high bits of maxpacket */
607 /* Set RXMAXP with the FIFO size of the endpoint
608 * to disable double buffer mode.
610 if (musb
->double_buffer_not_ok
)
611 musb_writew(ep
->regs
, MUSB_RXMAXP
, ep
->max_packet_sz_rx
);
613 musb_writew(ep
->regs
, MUSB_RXMAXP
,
614 qh
->maxpacket
| ((qh
->hb_mult
- 1) << 11));
619 static void musb_tx_dma_set_mode_mentor(struct dma_controller
*dma
,
620 struct musb_hw_ep
*hw_ep
, struct musb_qh
*qh
,
621 struct urb
*urb
, u32 offset
,
622 u32
*length
, u8
*mode
)
624 struct dma_channel
*channel
= hw_ep
->tx_channel
;
625 void __iomem
*epio
= hw_ep
->regs
;
626 u16 pkt_size
= qh
->maxpacket
;
629 if (*length
> channel
->max_len
)
630 *length
= channel
->max_len
;
632 csr
= musb_readw(epio
, MUSB_TXCSR
);
633 if (*length
> pkt_size
) {
635 csr
|= MUSB_TXCSR_DMAMODE
| MUSB_TXCSR_DMAENAB
;
636 /* autoset shouldn't be set in high bandwidth */
638 * Enable Autoset according to table
640 * bulk_split hb_mult Autoset_Enable
642 * 0 >1 No(High BW ISO)
646 if (qh
->hb_mult
== 1 || (qh
->hb_mult
> 1 &&
647 can_bulk_split(hw_ep
->musb
, qh
->type
)))
648 csr
|= MUSB_TXCSR_AUTOSET
;
651 csr
&= ~(MUSB_TXCSR_AUTOSET
| MUSB_TXCSR_DMAMODE
);
652 csr
|= MUSB_TXCSR_DMAENAB
; /* against programmer's guide */
654 channel
->desired_mode
= *mode
;
655 musb_writew(epio
, MUSB_TXCSR
, csr
);
658 static void musb_tx_dma_set_mode_cppi_tusb(struct dma_controller
*dma
,
659 struct musb_hw_ep
*hw_ep
,
666 struct dma_channel
*channel
= hw_ep
->tx_channel
;
668 channel
->actual_len
= 0;
671 * TX uses "RNDIS" mode automatically but needs help
672 * to identify the zero-length-final-packet case.
674 *mode
= (urb
->transfer_flags
& URB_ZERO_PACKET
) ? 1 : 0;
677 static bool musb_tx_dma_program(struct dma_controller
*dma
,
678 struct musb_hw_ep
*hw_ep
, struct musb_qh
*qh
,
679 struct urb
*urb
, u32 offset
, u32 length
)
681 struct dma_channel
*channel
= hw_ep
->tx_channel
;
682 u16 pkt_size
= qh
->maxpacket
;
685 if (musb_dma_inventra(hw_ep
->musb
) || musb_dma_ux500(hw_ep
->musb
))
686 musb_tx_dma_set_mode_mentor(dma
, hw_ep
, qh
, urb
, offset
,
688 else if (is_cppi_enabled(hw_ep
->musb
) || tusb_dma_omap(hw_ep
->musb
))
689 musb_tx_dma_set_mode_cppi_tusb(dma
, hw_ep
, qh
, urb
, offset
,
694 qh
->segsize
= length
;
697 * Ensure the data reaches to main memory before starting
702 if (!dma
->channel_program(channel
, pkt_size
, mode
,
703 urb
->transfer_dma
+ offset
, length
)) {
704 void __iomem
*epio
= hw_ep
->regs
;
707 dma
->channel_release(channel
);
708 hw_ep
->tx_channel
= NULL
;
710 csr
= musb_readw(epio
, MUSB_TXCSR
);
711 csr
&= ~(MUSB_TXCSR_AUTOSET
| MUSB_TXCSR_DMAENAB
);
712 musb_writew(epio
, MUSB_TXCSR
, csr
| MUSB_TXCSR_H_WZC_BITS
);
719 * Program an HDRC endpoint as per the given URB
720 * Context: irqs blocked, controller lock held
722 static void musb_ep_program(struct musb
*musb
, u8 epnum
,
723 struct urb
*urb
, int is_out
,
724 u8
*buf
, u32 offset
, u32 len
)
726 struct dma_controller
*dma_controller
;
727 struct dma_channel
*dma_channel
;
729 void __iomem
*mbase
= musb
->mregs
;
730 struct musb_hw_ep
*hw_ep
= musb
->endpoints
+ epnum
;
731 void __iomem
*epio
= hw_ep
->regs
;
732 struct musb_qh
*qh
= musb_ep_get_qh(hw_ep
, !is_out
);
733 u16 packet_sz
= qh
->maxpacket
;
737 musb_dbg(musb
, "%s hw%d urb %p spd%d dev%d ep%d%s "
738 "h_addr%02x h_port%02x bytes %d",
739 is_out
? "-->" : "<--",
740 epnum
, urb
, urb
->dev
->speed
,
741 qh
->addr_reg
, qh
->epnum
, is_out
? "out" : "in",
742 qh
->h_addr_reg
, qh
->h_port_reg
,
745 musb_ep_select(mbase
, epnum
);
747 if (is_out
&& !len
) {
749 csr
= musb_readw(epio
, MUSB_TXCSR
);
750 csr
&= ~MUSB_TXCSR_DMAENAB
;
751 musb_writew(epio
, MUSB_TXCSR
, csr
);
752 hw_ep
->tx_channel
= NULL
;
755 /* candidate for DMA? */
756 dma_controller
= musb
->dma_controller
;
757 if (use_dma
&& is_dma_capable() && epnum
&& dma_controller
) {
758 dma_channel
= is_out
? hw_ep
->tx_channel
: hw_ep
->rx_channel
;
760 dma_channel
= dma_controller
->channel_alloc(
761 dma_controller
, hw_ep
, is_out
);
763 hw_ep
->tx_channel
= dma_channel
;
765 hw_ep
->rx_channel
= dma_channel
;
770 /* make sure we clear DMAEnab, autoSet bits from previous run */
772 /* OUT/transmit/EP0 or IN/receive? */
778 csr
= musb_readw(epio
, MUSB_TXCSR
);
780 /* disable interrupt in case we flush */
781 int_txe
= musb
->intrtxe
;
782 musb_writew(mbase
, MUSB_INTRTXE
, int_txe
& ~(1 << epnum
));
784 /* general endpoint setup */
786 /* flush all old state, set default */
788 * We could be flushing valid
789 * packets in double buffering
792 if (!hw_ep
->tx_double_buffered
)
793 musb_h_tx_flush_fifo(hw_ep
);
796 * We must not clear the DMAMODE bit before or in
797 * the same cycle with the DMAENAB bit, so we clear
798 * the latter first...
800 csr
&= ~(MUSB_TXCSR_H_NAKTIMEOUT
803 | MUSB_TXCSR_FRCDATATOG
804 | MUSB_TXCSR_H_RXSTALL
806 | MUSB_TXCSR_TXPKTRDY
808 csr
|= MUSB_TXCSR_MODE
;
810 if (!hw_ep
->tx_double_buffered
) {
811 if (usb_gettoggle(urb
->dev
, qh
->epnum
, 1))
812 csr
|= MUSB_TXCSR_H_WR_DATATOGGLE
813 | MUSB_TXCSR_H_DATATOGGLE
;
815 csr
|= MUSB_TXCSR_CLRDATATOG
;
818 musb_writew(epio
, MUSB_TXCSR
, csr
);
819 /* REVISIT may need to clear FLUSHFIFO ... */
820 csr
&= ~MUSB_TXCSR_DMAMODE
;
821 musb_writew(epio
, MUSB_TXCSR
, csr
);
822 csr
= musb_readw(epio
, MUSB_TXCSR
);
824 /* endpoint 0: just flush */
825 musb_h_ep0_flush_fifo(hw_ep
);
828 /* target addr and (for multipoint) hub addr/port */
829 if (musb
->is_multipoint
) {
830 musb_write_txfunaddr(musb
, epnum
, qh
->addr_reg
);
831 musb_write_txhubaddr(musb
, epnum
, qh
->h_addr_reg
);
832 musb_write_txhubport(musb
, epnum
, qh
->h_port_reg
);
833 /* FIXME if !epnum, do the same for RX ... */
835 musb_writeb(mbase
, MUSB_FADDR
, qh
->addr_reg
);
837 /* protocol/endpoint/interval/NAKlimit */
839 musb_writeb(epio
, MUSB_TXTYPE
, qh
->type_reg
);
840 if (musb
->double_buffer_not_ok
) {
841 musb_writew(epio
, MUSB_TXMAXP
,
842 hw_ep
->max_packet_sz_tx
);
843 } else if (can_bulk_split(musb
, qh
->type
)) {
844 qh
->hb_mult
= hw_ep
->max_packet_sz_tx
846 musb_writew(epio
, MUSB_TXMAXP
, packet_sz
847 | ((qh
->hb_mult
) - 1) << 11);
849 musb_writew(epio
, MUSB_TXMAXP
,
851 ((qh
->hb_mult
- 1) << 11));
853 musb_writeb(epio
, MUSB_TXINTERVAL
, qh
->intv_reg
);
855 musb_writeb(epio
, MUSB_NAKLIMIT0
, qh
->intv_reg
);
856 if (musb
->is_multipoint
)
857 musb_writeb(epio
, MUSB_TYPE0
,
861 if (can_bulk_split(musb
, qh
->type
))
862 load_count
= min((u32
) hw_ep
->max_packet_sz_tx
,
865 load_count
= min((u32
) packet_sz
, len
);
867 if (dma_channel
&& musb_tx_dma_program(dma_controller
,
868 hw_ep
, qh
, urb
, offset
, len
))
872 /* PIO to load FIFO */
873 qh
->segsize
= load_count
;
875 sg_miter_start(&qh
->sg_miter
, urb
->sg
, 1,
878 if (!sg_miter_next(&qh
->sg_miter
)) {
879 dev_err(musb
->controller
,
882 sg_miter_stop(&qh
->sg_miter
);
885 buf
= qh
->sg_miter
.addr
+ urb
->sg
->offset
+
887 load_count
= min_t(u32
, load_count
,
888 qh
->sg_miter
.length
);
889 musb_write_fifo(hw_ep
, load_count
, buf
);
890 qh
->sg_miter
.consumed
= load_count
;
891 sg_miter_stop(&qh
->sg_miter
);
893 musb_write_fifo(hw_ep
, load_count
, buf
);
896 /* re-enable interrupt */
897 musb_writew(mbase
, MUSB_INTRTXE
, int_txe
);
903 if (hw_ep
->rx_reinit
) {
904 musb_rx_reinit(musb
, qh
, epnum
);
906 /* init new state: toggle and NYET, maybe DMA later */
907 if (usb_gettoggle(urb
->dev
, qh
->epnum
, 0))
908 csr
= MUSB_RXCSR_H_WR_DATATOGGLE
909 | MUSB_RXCSR_H_DATATOGGLE
;
912 if (qh
->type
== USB_ENDPOINT_XFER_INT
)
913 csr
|= MUSB_RXCSR_DISNYET
;
916 csr
= musb_readw(hw_ep
->regs
, MUSB_RXCSR
);
918 if (csr
& (MUSB_RXCSR_RXPKTRDY
920 | MUSB_RXCSR_H_REQPKT
))
921 ERR("broken !rx_reinit, ep%d csr %04x\n",
924 /* scrub any stale state, leaving toggle alone */
925 csr
&= MUSB_RXCSR_DISNYET
;
928 /* kick things off */
930 if ((is_cppi_enabled(musb
) || tusb_dma_omap(musb
)) && dma_channel
) {
931 /* Candidate for DMA */
932 dma_channel
->actual_len
= 0L;
935 /* AUTOREQ is in a DMA register */
936 musb_writew(hw_ep
->regs
, MUSB_RXCSR
, csr
);
937 csr
= musb_readw(hw_ep
->regs
, MUSB_RXCSR
);
940 * Unless caller treats short RX transfers as
941 * errors, we dare not queue multiple transfers.
943 dma_ok
= dma_controller
->channel_program(dma_channel
,
944 packet_sz
, !(urb
->transfer_flags
&
946 urb
->transfer_dma
+ offset
,
949 dma_controller
->channel_release(dma_channel
);
950 hw_ep
->rx_channel
= dma_channel
= NULL
;
952 csr
|= MUSB_RXCSR_DMAENAB
;
955 csr
|= MUSB_RXCSR_H_REQPKT
;
956 musb_dbg(musb
, "RXCSR%d := %04x", epnum
, csr
);
957 musb_writew(hw_ep
->regs
, MUSB_RXCSR
, csr
);
958 csr
= musb_readw(hw_ep
->regs
, MUSB_RXCSR
);
962 /* Schedule next QH from musb->in_bulk/out_bulk and move the current qh to
963 * the end; avoids starvation for other endpoints.
965 static void musb_bulk_nak_timeout(struct musb
*musb
, struct musb_hw_ep
*ep
,
968 struct dma_channel
*dma
;
970 void __iomem
*mbase
= musb
->mregs
;
971 void __iomem
*epio
= ep
->regs
;
972 struct musb_qh
*cur_qh
, *next_qh
;
975 musb_ep_select(mbase
, ep
->epnum
);
977 dma
= is_dma_capable() ? ep
->rx_channel
: NULL
;
980 * Need to stop the transaction by clearing REQPKT first
981 * then the NAK Timeout bit ref MUSBMHDRC USB 2.0 HIGH-SPEED
982 * DUAL-ROLE CONTROLLER Programmer's Guide, section 9.2.2
984 rx_csr
= musb_readw(epio
, MUSB_RXCSR
);
985 rx_csr
|= MUSB_RXCSR_H_WZC_BITS
;
986 rx_csr
&= ~MUSB_RXCSR_H_REQPKT
;
987 musb_writew(epio
, MUSB_RXCSR
, rx_csr
);
988 rx_csr
&= ~MUSB_RXCSR_DATAERROR
;
989 musb_writew(epio
, MUSB_RXCSR
, rx_csr
);
991 cur_qh
= first_qh(&musb
->in_bulk
);
993 dma
= is_dma_capable() ? ep
->tx_channel
: NULL
;
995 /* clear nak timeout bit */
996 tx_csr
= musb_readw(epio
, MUSB_TXCSR
);
997 tx_csr
|= MUSB_TXCSR_H_WZC_BITS
;
998 tx_csr
&= ~MUSB_TXCSR_H_NAKTIMEOUT
;
999 musb_writew(epio
, MUSB_TXCSR
, tx_csr
);
1001 cur_qh
= first_qh(&musb
->out_bulk
);
1004 urb
= next_urb(cur_qh
);
1005 if (dma_channel_status(dma
) == MUSB_DMA_STATUS_BUSY
) {
1006 dma
->status
= MUSB_DMA_STATUS_CORE_ABORT
;
1007 musb
->dma_controller
->channel_abort(dma
);
1008 urb
->actual_length
+= dma
->actual_len
;
1009 dma
->actual_len
= 0L;
1011 musb_save_toggle(cur_qh
, is_in
, urb
);
1014 /* move cur_qh to end of queue */
1015 list_move_tail(&cur_qh
->ring
, &musb
->in_bulk
);
1017 /* get the next qh from musb->in_bulk */
1018 next_qh
= first_qh(&musb
->in_bulk
);
1020 /* set rx_reinit and schedule the next qh */
1023 /* move cur_qh to end of queue */
1024 list_move_tail(&cur_qh
->ring
, &musb
->out_bulk
);
1026 /* get the next qh from musb->out_bulk */
1027 next_qh
= first_qh(&musb
->out_bulk
);
1029 /* set tx_reinit and schedule the next qh */
1032 musb_start_urb(musb
, is_in
, next_qh
);
1037 * Service the default endpoint (ep0) as host.
1038 * Return true until it's time to start the status stage.
1040 static bool musb_h_ep0_continue(struct musb
*musb
, u16 len
, struct urb
*urb
)
1043 u8
*fifo_dest
= NULL
;
1045 struct musb_hw_ep
*hw_ep
= musb
->control_ep
;
1046 struct musb_qh
*qh
= hw_ep
->in_qh
;
1047 struct usb_ctrlrequest
*request
;
1049 switch (musb
->ep0_stage
) {
1051 fifo_dest
= urb
->transfer_buffer
+ urb
->actual_length
;
1052 fifo_count
= min_t(size_t, len
, urb
->transfer_buffer_length
-
1053 urb
->actual_length
);
1054 if (fifo_count
< len
)
1055 urb
->status
= -EOVERFLOW
;
1057 musb_read_fifo(hw_ep
, fifo_count
, fifo_dest
);
1059 urb
->actual_length
+= fifo_count
;
1060 if (len
< qh
->maxpacket
) {
1061 /* always terminate on short read; it's
1062 * rarely reported as an error.
1064 } else if (urb
->actual_length
<
1065 urb
->transfer_buffer_length
)
1068 case MUSB_EP0_START
:
1069 request
= (struct usb_ctrlrequest
*) urb
->setup_packet
;
1071 if (!request
->wLength
) {
1072 musb_dbg(musb
, "start no-DATA");
1074 } else if (request
->bRequestType
& USB_DIR_IN
) {
1075 musb_dbg(musb
, "start IN-DATA");
1076 musb
->ep0_stage
= MUSB_EP0_IN
;
1080 musb_dbg(musb
, "start OUT-DATA");
1081 musb
->ep0_stage
= MUSB_EP0_OUT
;
1086 fifo_count
= min_t(size_t, qh
->maxpacket
,
1087 urb
->transfer_buffer_length
-
1088 urb
->actual_length
);
1090 fifo_dest
= (u8
*) (urb
->transfer_buffer
1091 + urb
->actual_length
);
1092 musb_dbg(musb
, "Sending %d byte%s to ep0 fifo %p",
1094 (fifo_count
== 1) ? "" : "s",
1096 musb_write_fifo(hw_ep
, fifo_count
, fifo_dest
);
1098 urb
->actual_length
+= fifo_count
;
1103 ERR("bogus ep0 stage %d\n", musb
->ep0_stage
);
1111 * Handle default endpoint interrupt as host. Only called in IRQ time
1112 * from musb_interrupt().
1114 * called with controller irqlocked
1116 irqreturn_t
musb_h_ep0_irq(struct musb
*musb
)
1121 void __iomem
*mbase
= musb
->mregs
;
1122 struct musb_hw_ep
*hw_ep
= musb
->control_ep
;
1123 void __iomem
*epio
= hw_ep
->regs
;
1124 struct musb_qh
*qh
= hw_ep
->in_qh
;
1125 bool complete
= false;
1126 irqreturn_t retval
= IRQ_NONE
;
1128 /* ep0 only has one queue, "in" */
1131 musb_ep_select(mbase
, 0);
1132 csr
= musb_readw(epio
, MUSB_CSR0
);
1133 len
= (csr
& MUSB_CSR0_RXPKTRDY
)
1134 ? musb_readb(epio
, MUSB_COUNT0
)
1137 musb_dbg(musb
, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d",
1138 csr
, qh
, len
, urb
, musb
->ep0_stage
);
1140 /* if we just did status stage, we are done */
1141 if (MUSB_EP0_STATUS
== musb
->ep0_stage
) {
1142 retval
= IRQ_HANDLED
;
1146 /* prepare status */
1147 if (csr
& MUSB_CSR0_H_RXSTALL
) {
1148 musb_dbg(musb
, "STALLING ENDPOINT");
1151 } else if (csr
& MUSB_CSR0_H_ERROR
) {
1152 musb_dbg(musb
, "no response, csr0 %04x", csr
);
1155 } else if (csr
& MUSB_CSR0_H_NAKTIMEOUT
) {
1156 musb_dbg(musb
, "control NAK timeout");
1158 /* NOTE: this code path would be a good place to PAUSE a
1159 * control transfer, if another one is queued, so that
1160 * ep0 is more likely to stay busy. That's already done
1161 * for bulk RX transfers.
1163 * if (qh->ring.next != &musb->control), then
1164 * we have a candidate... NAKing is *NOT* an error
1166 musb_writew(epio
, MUSB_CSR0
, 0);
1167 retval
= IRQ_HANDLED
;
1171 musb_dbg(musb
, "aborting");
1172 retval
= IRQ_HANDLED
;
1174 urb
->status
= status
;
1177 /* use the proper sequence to abort the transfer */
1178 if (csr
& MUSB_CSR0_H_REQPKT
) {
1179 csr
&= ~MUSB_CSR0_H_REQPKT
;
1180 musb_writew(epio
, MUSB_CSR0
, csr
);
1181 csr
&= ~MUSB_CSR0_H_NAKTIMEOUT
;
1182 musb_writew(epio
, MUSB_CSR0
, csr
);
1184 musb_h_ep0_flush_fifo(hw_ep
);
1187 musb_writeb(epio
, MUSB_NAKLIMIT0
, 0);
1190 musb_writew(epio
, MUSB_CSR0
, 0);
1193 if (unlikely(!urb
)) {
1194 /* stop endpoint since we have no place for its data, this
1195 * SHOULD NEVER HAPPEN! */
1196 ERR("no URB for end 0\n");
1198 musb_h_ep0_flush_fifo(hw_ep
);
1203 /* call common logic and prepare response */
1204 if (musb_h_ep0_continue(musb
, len
, urb
)) {
1205 /* more packets required */
1206 csr
= (MUSB_EP0_IN
== musb
->ep0_stage
)
1207 ? MUSB_CSR0_H_REQPKT
: MUSB_CSR0_TXPKTRDY
;
1209 /* data transfer complete; perform status phase */
1210 if (usb_pipeout(urb
->pipe
)
1211 || !urb
->transfer_buffer_length
)
1212 csr
= MUSB_CSR0_H_STATUSPKT
1213 | MUSB_CSR0_H_REQPKT
;
1215 csr
= MUSB_CSR0_H_STATUSPKT
1216 | MUSB_CSR0_TXPKTRDY
;
1218 /* disable ping token in status phase */
1219 csr
|= MUSB_CSR0_H_DIS_PING
;
1221 /* flag status stage */
1222 musb
->ep0_stage
= MUSB_EP0_STATUS
;
1224 musb_dbg(musb
, "ep0 STATUS, csr %04x", csr
);
1227 musb_writew(epio
, MUSB_CSR0
, csr
);
1228 retval
= IRQ_HANDLED
;
1230 musb
->ep0_stage
= MUSB_EP0_IDLE
;
1232 /* call completion handler if done */
1234 musb_advance_schedule(musb
, urb
, hw_ep
, 1);
1240 #ifdef CONFIG_USB_INVENTRA_DMA
1242 /* Host side TX (OUT) using Mentor DMA works as follows:
1244 - if queue was empty, Program Endpoint
1245 - ... which starts DMA to fifo in mode 1 or 0
1247 DMA Isr (transfer complete) -> TxAvail()
1248 - Stop DMA (~DmaEnab) (<--- Alert ... currently happens
1249 only in musb_cleanup_urb)
1250 - TxPktRdy has to be set in mode 0 or for
1251 short packets in mode 1.
1256 /* Service a Tx-Available or dma completion irq for the endpoint */
1257 void musb_host_tx(struct musb
*musb
, u8 epnum
)
1264 struct musb_hw_ep
*hw_ep
= musb
->endpoints
+ epnum
;
1265 void __iomem
*epio
= hw_ep
->regs
;
1266 struct musb_qh
*qh
= hw_ep
->out_qh
;
1267 struct urb
*urb
= next_urb(qh
);
1269 void __iomem
*mbase
= musb
->mregs
;
1270 struct dma_channel
*dma
;
1271 bool transfer_pending
= false;
1273 musb_ep_select(mbase
, epnum
);
1274 tx_csr
= musb_readw(epio
, MUSB_TXCSR
);
1276 /* with CPPI, DMA sometimes triggers "extra" irqs */
1278 musb_dbg(musb
, "extra TX%d ready, csr %04x", epnum
, tx_csr
);
1283 dma
= is_dma_capable() ? hw_ep
->tx_channel
: NULL
;
1284 trace_musb_urb_tx(musb
, urb
);
1285 musb_dbg(musb
, "OUT/TX%d end, csr %04x%s", epnum
, tx_csr
,
1286 dma
? ", dma" : "");
1288 /* check for errors */
1289 if (tx_csr
& MUSB_TXCSR_H_RXSTALL
) {
1290 /* dma was disabled, fifo flushed */
1291 musb_dbg(musb
, "TX end %d stall", epnum
);
1293 /* stall; record URB status */
1296 } else if (tx_csr
& MUSB_TXCSR_H_ERROR
) {
1297 /* (NON-ISO) dma was disabled, fifo flushed */
1298 musb_dbg(musb
, "TX 3strikes on ep=%d", epnum
);
1300 status
= -ETIMEDOUT
;
1302 } else if (tx_csr
& MUSB_TXCSR_H_NAKTIMEOUT
) {
1303 if (USB_ENDPOINT_XFER_BULK
== qh
->type
&& qh
->mux
== 1
1304 && !list_is_singular(&musb
->out_bulk
)) {
1305 musb_dbg(musb
, "NAK timeout on TX%d ep", epnum
);
1306 musb_bulk_nak_timeout(musb
, hw_ep
, 0);
1308 musb_dbg(musb
, "TX ep%d device not responding", epnum
);
1309 /* NOTE: this code path would be a good place to PAUSE a
1310 * transfer, if there's some other (nonperiodic) tx urb
1311 * that could use this fifo. (dma complicates it...)
1312 * That's already done for bulk RX transfers.
1314 * if (bulk && qh->ring.next != &musb->out_bulk), then
1315 * we have a candidate... NAKing is *NOT* an error
1317 musb_ep_select(mbase
, epnum
);
1318 musb_writew(epio
, MUSB_TXCSR
,
1319 MUSB_TXCSR_H_WZC_BITS
1320 | MUSB_TXCSR_TXPKTRDY
);
1327 if (dma_channel_status(dma
) == MUSB_DMA_STATUS_BUSY
) {
1328 dma
->status
= MUSB_DMA_STATUS_CORE_ABORT
;
1329 musb
->dma_controller
->channel_abort(dma
);
1332 /* do the proper sequence to abort the transfer in the
1333 * usb core; the dma engine should already be stopped.
1335 musb_h_tx_flush_fifo(hw_ep
);
1336 tx_csr
&= ~(MUSB_TXCSR_AUTOSET
1337 | MUSB_TXCSR_DMAENAB
1338 | MUSB_TXCSR_H_ERROR
1339 | MUSB_TXCSR_H_RXSTALL
1340 | MUSB_TXCSR_H_NAKTIMEOUT
1343 musb_ep_select(mbase
, epnum
);
1344 musb_writew(epio
, MUSB_TXCSR
, tx_csr
);
1345 /* REVISIT may need to clear FLUSHFIFO ... */
1346 musb_writew(epio
, MUSB_TXCSR
, tx_csr
);
1347 musb_writeb(epio
, MUSB_TXINTERVAL
, 0);
1352 /* second cppi case */
1353 if (dma_channel_status(dma
) == MUSB_DMA_STATUS_BUSY
) {
1354 musb_dbg(musb
, "extra TX%d ready, csr %04x", epnum
, tx_csr
);
1358 if (is_dma_capable() && dma
&& !status
) {
1360 * DMA has completed. But if we're using DMA mode 1 (multi
1361 * packet DMA), we need a terminal TXPKTRDY interrupt before
1362 * we can consider this transfer completed, lest we trash
1363 * its last packet when writing the next URB's data. So we
1364 * switch back to mode 0 to get that interrupt; we'll come
1365 * back here once it happens.
1367 if (tx_csr
& MUSB_TXCSR_DMAMODE
) {
1369 * We shouldn't clear DMAMODE with DMAENAB set; so
1370 * clear them in a safe order. That should be OK
1371 * once TXPKTRDY has been set (and I've never seen
1372 * it being 0 at this moment -- DMA interrupt latency
1373 * is significant) but if it hasn't been then we have
1374 * no choice but to stop being polite and ignore the
1375 * programmer's guide... :-)
1377 * Note that we must write TXCSR with TXPKTRDY cleared
1378 * in order not to re-trigger the packet send (this bit
1379 * can't be cleared by CPU), and there's another caveat:
1380 * TXPKTRDY may be set shortly and then cleared in the
1381 * double-buffered FIFO mode, so we do an extra TXCSR
1382 * read for debouncing...
1384 tx_csr
&= musb_readw(epio
, MUSB_TXCSR
);
1385 if (tx_csr
& MUSB_TXCSR_TXPKTRDY
) {
1386 tx_csr
&= ~(MUSB_TXCSR_DMAENAB
|
1387 MUSB_TXCSR_TXPKTRDY
);
1388 musb_writew(epio
, MUSB_TXCSR
,
1389 tx_csr
| MUSB_TXCSR_H_WZC_BITS
);
1391 tx_csr
&= ~(MUSB_TXCSR_DMAMODE
|
1392 MUSB_TXCSR_TXPKTRDY
);
1393 musb_writew(epio
, MUSB_TXCSR
,
1394 tx_csr
| MUSB_TXCSR_H_WZC_BITS
);
1397 * There is no guarantee that we'll get an interrupt
1398 * after clearing DMAMODE as we might have done this
1399 * too late (after TXPKTRDY was cleared by controller).
1400 * Re-read TXCSR as we have spoiled its previous value.
1402 tx_csr
= musb_readw(epio
, MUSB_TXCSR
);
1406 * We may get here from a DMA completion or TXPKTRDY interrupt.
1407 * In any case, we must check the FIFO status here and bail out
1408 * only if the FIFO still has data -- that should prevent the
1409 * "missed" TXPKTRDY interrupts and deal with double-buffered
1412 if (tx_csr
& (MUSB_TXCSR_FIFONOTEMPTY
| MUSB_TXCSR_TXPKTRDY
)) {
1414 "DMA complete but FIFO not empty, CSR %04x",
1420 if (!status
|| dma
|| usb_pipeisoc(pipe
)) {
1422 length
= dma
->actual_len
;
1424 length
= qh
->segsize
;
1425 qh
->offset
+= length
;
1427 if (usb_pipeisoc(pipe
)) {
1428 struct usb_iso_packet_descriptor
*d
;
1430 d
= urb
->iso_frame_desc
+ qh
->iso_idx
;
1431 d
->actual_length
= length
;
1433 if (++qh
->iso_idx
>= urb
->number_of_packets
) {
1440 } else if (dma
&& urb
->transfer_buffer_length
== qh
->offset
) {
1443 /* see if we need to send more data, or ZLP */
1444 if (qh
->segsize
< qh
->maxpacket
)
1446 else if (qh
->offset
== urb
->transfer_buffer_length
1447 && !(urb
->transfer_flags
1451 offset
= qh
->offset
;
1452 length
= urb
->transfer_buffer_length
- offset
;
1453 transfer_pending
= true;
1458 /* urb->status != -EINPROGRESS means request has been faulted,
1459 * so we must abort this transfer after cleanup
1461 if (urb
->status
!= -EINPROGRESS
) {
1464 status
= urb
->status
;
1469 urb
->status
= status
;
1470 urb
->actual_length
= qh
->offset
;
1471 musb_advance_schedule(musb
, urb
, hw_ep
, USB_DIR_OUT
);
1473 } else if ((usb_pipeisoc(pipe
) || transfer_pending
) && dma
) {
1474 if (musb_tx_dma_program(musb
->dma_controller
, hw_ep
, qh
, urb
,
1476 if (is_cppi_enabled(musb
) || tusb_dma_omap(musb
))
1477 musb_h_tx_dma_start(hw_ep
);
1480 } else if (tx_csr
& MUSB_TXCSR_DMAENAB
) {
1481 musb_dbg(musb
, "not complete, but DMA enabled?");
1486 * PIO: start next packet in this URB.
1488 * REVISIT: some docs say that when hw_ep->tx_double_buffered,
1489 * (and presumably, FIFO is not half-full) we should write *two*
1490 * packets before updating TXCSR; other docs disagree...
1492 if (length
> qh
->maxpacket
)
1493 length
= qh
->maxpacket
;
1494 /* Unmap the buffer so that CPU can use it */
1495 usb_hcd_unmap_urb_for_dma(musb
->hcd
, urb
);
1498 * We need to map sg if the transfer_buffer is
1501 if (!urb
->transfer_buffer
)
1505 /* sg_miter_start is already done in musb_ep_program */
1506 if (!sg_miter_next(&qh
->sg_miter
)) {
1507 dev_err(musb
->controller
, "error: sg list empty\n");
1508 sg_miter_stop(&qh
->sg_miter
);
1512 urb
->transfer_buffer
= qh
->sg_miter
.addr
;
1513 length
= min_t(u32
, length
, qh
->sg_miter
.length
);
1514 musb_write_fifo(hw_ep
, length
, urb
->transfer_buffer
);
1515 qh
->sg_miter
.consumed
= length
;
1516 sg_miter_stop(&qh
->sg_miter
);
1518 musb_write_fifo(hw_ep
, length
, urb
->transfer_buffer
+ offset
);
1521 qh
->segsize
= length
;
1524 if (offset
+ length
>= urb
->transfer_buffer_length
)
1528 musb_ep_select(mbase
, epnum
);
1529 musb_writew(epio
, MUSB_TXCSR
,
1530 MUSB_TXCSR_H_WZC_BITS
| MUSB_TXCSR_TXPKTRDY
);
1533 #ifdef CONFIG_USB_TI_CPPI41_DMA
1534 /* Seems to set up ISO for cppi41 and not advance len. See commit c57c41d */
1535 static int musb_rx_dma_iso_cppi41(struct dma_controller
*dma
,
1536 struct musb_hw_ep
*hw_ep
,
1541 struct dma_channel
*channel
= hw_ep
->rx_channel
;
1542 void __iomem
*epio
= hw_ep
->regs
;
1547 buf
= (void *)urb
->iso_frame_desc
[qh
->iso_idx
].offset
+
1548 (u32
)urb
->transfer_dma
;
1550 length
= urb
->iso_frame_desc
[qh
->iso_idx
].length
;
1552 val
= musb_readw(epio
, MUSB_RXCSR
);
1553 val
|= MUSB_RXCSR_DMAENAB
;
1554 musb_writew(hw_ep
->regs
, MUSB_RXCSR
, val
);
1556 return dma
->channel_program(channel
, qh
->maxpacket
, 0,
1560 static inline int musb_rx_dma_iso_cppi41(struct dma_controller
*dma
,
1561 struct musb_hw_ep
*hw_ep
,
1570 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) || \
1571 defined(CONFIG_USB_TI_CPPI41_DMA)
1572 /* Host side RX (IN) using Mentor DMA works as follows:
1574 - if queue was empty, ProgramEndpoint
1575 - first IN token is sent out (by setting ReqPkt)
1576 LinuxIsr -> RxReady()
1577 /\ => first packet is received
1578 | - Set in mode 0 (DmaEnab, ~ReqPkt)
1579 | -> DMA Isr (transfer complete) -> RxReady()
1580 | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
1581 | - if urb not complete, send next IN token (ReqPkt)
1582 | | else complete urb.
1584 ---------------------------
1586 * Nuances of mode 1:
1587 * For short packets, no ack (+RxPktRdy) is sent automatically
1588 * (even if AutoClear is ON)
1589 * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
1590 * automatically => major problem, as collecting the next packet becomes
1591 * difficult. Hence mode 1 is not used.
1594 * All we care about at this driver level is that
1595 * (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
1596 * (b) termination conditions are: short RX, or buffer full;
1597 * (c) fault modes include
1598 * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
1599 * (and that endpoint's dma queue stops immediately)
1600 * - overflow (full, PLUS more bytes in the terminal packet)
1602 * So for example, usb-storage sets URB_SHORT_NOT_OK, and would
1603 * thus be a great candidate for using mode 1 ... for all but the
1604 * last packet of one URB's transfer.
1606 static int musb_rx_dma_inventra_cppi41(struct dma_controller
*dma
,
1607 struct musb_hw_ep
*hw_ep
,
1612 struct dma_channel
*channel
= hw_ep
->rx_channel
;
1613 void __iomem
*epio
= hw_ep
->regs
;
1620 if (usb_pipeisoc(pipe
)) {
1621 struct usb_iso_packet_descriptor
*d
;
1623 d
= urb
->iso_frame_desc
+ qh
->iso_idx
;
1624 d
->actual_length
= len
;
1626 /* even if there was an error, we did the dma
1627 * for iso_frame_desc->length
1629 if (d
->status
!= -EILSEQ
&& d
->status
!= -EOVERFLOW
)
1632 if (++qh
->iso_idx
>= urb
->number_of_packets
) {
1635 /* REVISIT: Why ignore return value here? */
1636 if (musb_dma_cppi41(hw_ep
->musb
))
1637 done
= musb_rx_dma_iso_cppi41(dma
, hw_ep
, qh
,
1643 /* done if urb buffer is full or short packet is recd */
1644 done
= (urb
->actual_length
+ len
>=
1645 urb
->transfer_buffer_length
1646 || channel
->actual_len
< qh
->maxpacket
1647 || channel
->rx_packet_done
);
1650 /* send IN token for next packet, without AUTOREQ */
1652 val
= musb_readw(epio
, MUSB_RXCSR
);
1653 val
|= MUSB_RXCSR_H_REQPKT
;
1654 musb_writew(epio
, MUSB_RXCSR
, MUSB_RXCSR_H_WZC_BITS
| val
);
1660 /* Disadvantage of using mode 1:
1661 * It's basically usable only for mass storage class; essentially all
1662 * other protocols also terminate transfers on short packets.
1665 * An extra IN token is sent at the end of the transfer (due to AUTOREQ)
1666 * If you try to use mode 1 for (transfer_buffer_length - 512), and try
1667 * to use the extra IN token to grab the last packet using mode 0, then
1668 * the problem is that you cannot be sure when the device will send the
1669 * last packet and RxPktRdy set. Sometimes the packet is recd too soon
1670 * such that it gets lost when RxCSR is re-set at the end of the mode 1
1671 * transfer, while sometimes it is recd just a little late so that if you
1672 * try to configure for mode 0 soon after the mode 1 transfer is
1673 * completed, you will find rxcount 0. Okay, so you might think why not
1674 * wait for an interrupt when the pkt is recd. Well, you won't get any!
1676 static int musb_rx_dma_in_inventra_cppi41(struct dma_controller
*dma
,
1677 struct musb_hw_ep
*hw_ep
,
1683 struct musb
*musb
= hw_ep
->musb
;
1684 void __iomem
*epio
= hw_ep
->regs
;
1685 struct dma_channel
*channel
= hw_ep
->rx_channel
;
1687 int length
, pipe
, done
;
1690 rx_count
= musb_readw(epio
, MUSB_RXCOUNT
);
1693 if (usb_pipeisoc(pipe
)) {
1695 struct usb_iso_packet_descriptor
*d
;
1697 d
= urb
->iso_frame_desc
+ qh
->iso_idx
;
1703 if (rx_count
> d
->length
) {
1704 if (d_status
== 0) {
1705 d_status
= -EOVERFLOW
;
1708 musb_dbg(musb
, "** OVERFLOW %d into %d",
1709 rx_count
, d
->length
);
1714 d
->status
= d_status
;
1715 buf
= urb
->transfer_dma
+ d
->offset
;
1718 buf
= urb
->transfer_dma
+ urb
->actual_length
;
1721 channel
->desired_mode
= 0;
1723 /* because of the issue below, mode 1 will
1724 * only rarely behave with correct semantics.
1726 if ((urb
->transfer_flags
& URB_SHORT_NOT_OK
)
1727 && (urb
->transfer_buffer_length
- urb
->actual_length
)
1729 channel
->desired_mode
= 1;
1730 if (rx_count
< hw_ep
->max_packet_sz_rx
) {
1732 channel
->desired_mode
= 0;
1734 length
= urb
->transfer_buffer_length
;
1738 /* See comments above on disadvantages of using mode 1 */
1739 val
= musb_readw(epio
, MUSB_RXCSR
);
1740 val
&= ~MUSB_RXCSR_H_REQPKT
;
1742 if (channel
->desired_mode
== 0)
1743 val
&= ~MUSB_RXCSR_H_AUTOREQ
;
1745 val
|= MUSB_RXCSR_H_AUTOREQ
;
1746 val
|= MUSB_RXCSR_DMAENAB
;
1748 /* autoclear shouldn't be set in high bandwidth */
1749 if (qh
->hb_mult
== 1)
1750 val
|= MUSB_RXCSR_AUTOCLEAR
;
1752 musb_writew(epio
, MUSB_RXCSR
, MUSB_RXCSR_H_WZC_BITS
| val
);
1754 /* REVISIT if when actual_length != 0,
1755 * transfer_buffer_length needs to be
1758 done
= dma
->channel_program(channel
, qh
->maxpacket
,
1759 channel
->desired_mode
,
1763 dma
->channel_release(channel
);
1764 hw_ep
->rx_channel
= NULL
;
1766 val
= musb_readw(epio
, MUSB_RXCSR
);
1767 val
&= ~(MUSB_RXCSR_DMAENAB
1768 | MUSB_RXCSR_H_AUTOREQ
1769 | MUSB_RXCSR_AUTOCLEAR
);
1770 musb_writew(epio
, MUSB_RXCSR
, val
);
1776 static inline int musb_rx_dma_inventra_cppi41(struct dma_controller
*dma
,
1777 struct musb_hw_ep
*hw_ep
,
1785 static inline int musb_rx_dma_in_inventra_cppi41(struct dma_controller
*dma
,
1786 struct musb_hw_ep
*hw_ep
,
1797 * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
1798 * and high-bandwidth IN transfer cases.
1800 void musb_host_rx(struct musb
*musb
, u8 epnum
)
1803 struct musb_hw_ep
*hw_ep
= musb
->endpoints
+ epnum
;
1804 struct dma_controller
*c
= musb
->dma_controller
;
1805 void __iomem
*epio
= hw_ep
->regs
;
1806 struct musb_qh
*qh
= hw_ep
->in_qh
;
1808 void __iomem
*mbase
= musb
->mregs
;
1811 bool iso_err
= false;
1814 struct dma_channel
*dma
;
1815 unsigned int sg_flags
= SG_MITER_ATOMIC
| SG_MITER_TO_SG
;
1817 musb_ep_select(mbase
, epnum
);
1820 dma
= is_dma_capable() ? hw_ep
->rx_channel
: NULL
;
1824 rx_csr
= musb_readw(epio
, MUSB_RXCSR
);
1827 if (unlikely(!urb
)) {
1828 /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
1829 * usbtest #11 (unlinks) triggers it regularly, sometimes
1830 * with fifo full. (Only with DMA??)
1832 musb_dbg(musb
, "BOGUS RX%d ready, csr %04x, count %d",
1833 epnum
, val
, musb_readw(epio
, MUSB_RXCOUNT
));
1834 musb_h_flush_rxfifo(hw_ep
, MUSB_RXCSR_CLRDATATOG
);
1840 trace_musb_urb_rx(musb
, urb
);
1842 /* check for errors, concurrent stall & unlink is not really
1844 if (rx_csr
& MUSB_RXCSR_H_RXSTALL
) {
1845 musb_dbg(musb
, "RX end %d STALL", epnum
);
1847 /* stall; record URB status */
1850 } else if (rx_csr
& MUSB_RXCSR_H_ERROR
) {
1851 musb_dbg(musb
, "end %d RX proto error", epnum
);
1854 musb_writeb(epio
, MUSB_RXINTERVAL
, 0);
1856 rx_csr
&= ~MUSB_RXCSR_H_ERROR
;
1857 musb_writew(epio
, MUSB_RXCSR
, rx_csr
);
1859 } else if (rx_csr
& MUSB_RXCSR_DATAERROR
) {
1861 if (USB_ENDPOINT_XFER_ISOC
!= qh
->type
) {
1862 musb_dbg(musb
, "RX end %d NAK timeout", epnum
);
1864 /* NOTE: NAKing is *NOT* an error, so we want to
1865 * continue. Except ... if there's a request for
1866 * another QH, use that instead of starving it.
1868 * Devices like Ethernet and serial adapters keep
1869 * reads posted at all times, which will starve
1870 * other devices without this logic.
1872 if (usb_pipebulk(urb
->pipe
)
1874 && !list_is_singular(&musb
->in_bulk
)) {
1875 musb_bulk_nak_timeout(musb
, hw_ep
, 1);
1878 musb_ep_select(mbase
, epnum
);
1879 rx_csr
|= MUSB_RXCSR_H_WZC_BITS
;
1880 rx_csr
&= ~MUSB_RXCSR_DATAERROR
;
1881 musb_writew(epio
, MUSB_RXCSR
, rx_csr
);
1885 musb_dbg(musb
, "RX end %d ISO data error", epnum
);
1886 /* packet error reported later */
1889 } else if (rx_csr
& MUSB_RXCSR_INCOMPRX
) {
1890 musb_dbg(musb
, "end %d high bandwidth incomplete ISO packet RX",
1895 /* faults abort the transfer */
1897 /* clean up dma and collect transfer count */
1898 if (dma_channel_status(dma
) == MUSB_DMA_STATUS_BUSY
) {
1899 dma
->status
= MUSB_DMA_STATUS_CORE_ABORT
;
1900 musb
->dma_controller
->channel_abort(dma
);
1901 xfer_len
= dma
->actual_len
;
1903 musb_h_flush_rxfifo(hw_ep
, MUSB_RXCSR_CLRDATATOG
);
1904 musb_writeb(epio
, MUSB_RXINTERVAL
, 0);
1909 if (unlikely(dma_channel_status(dma
) == MUSB_DMA_STATUS_BUSY
)) {
1910 /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
1911 ERR("RX%d dma busy, csr %04x\n", epnum
, rx_csr
);
1915 /* thorough shutdown for now ... given more precise fault handling
1916 * and better queueing support, we might keep a DMA pipeline going
1917 * while processing this irq for earlier completions.
1920 /* FIXME this is _way_ too much in-line logic for Mentor DMA */
1921 if (!musb_dma_inventra(musb
) && !musb_dma_ux500(musb
) &&
1922 (rx_csr
& MUSB_RXCSR_H_REQPKT
)) {
1923 /* REVISIT this happened for a while on some short reads...
1924 * the cleanup still needs investigation... looks bad...
1925 * and also duplicates dma cleanup code above ... plus,
1926 * shouldn't this be the "half full" double buffer case?
1928 if (dma_channel_status(dma
) == MUSB_DMA_STATUS_BUSY
) {
1929 dma
->status
= MUSB_DMA_STATUS_CORE_ABORT
;
1930 musb
->dma_controller
->channel_abort(dma
);
1931 xfer_len
= dma
->actual_len
;
1935 musb_dbg(musb
, "RXCSR%d %04x, reqpkt, len %zu%s", epnum
, rx_csr
,
1936 xfer_len
, dma
? ", dma" : "");
1937 rx_csr
&= ~MUSB_RXCSR_H_REQPKT
;
1939 musb_ep_select(mbase
, epnum
);
1940 musb_writew(epio
, MUSB_RXCSR
,
1941 MUSB_RXCSR_H_WZC_BITS
| rx_csr
);
1944 if (dma
&& (rx_csr
& MUSB_RXCSR_DMAENAB
)) {
1945 xfer_len
= dma
->actual_len
;
1947 val
&= ~(MUSB_RXCSR_DMAENAB
1948 | MUSB_RXCSR_H_AUTOREQ
1949 | MUSB_RXCSR_AUTOCLEAR
1950 | MUSB_RXCSR_RXPKTRDY
);
1951 musb_writew(hw_ep
->regs
, MUSB_RXCSR
, val
);
1953 if (musb_dma_inventra(musb
) || musb_dma_ux500(musb
) ||
1954 musb_dma_cppi41(musb
)) {
1955 done
= musb_rx_dma_inventra_cppi41(c
, hw_ep
, qh
, urb
, xfer_len
);
1956 musb_dbg(hw_ep
->musb
,
1957 "ep %d dma %s, rxcsr %04x, rxcount %d",
1958 epnum
, done
? "off" : "reset",
1959 musb_readw(epio
, MUSB_RXCSR
),
1960 musb_readw(epio
, MUSB_RXCOUNT
));
1965 } else if (urb
->status
== -EINPROGRESS
) {
1966 /* if no errors, be sure a packet is ready for unloading */
1967 if (unlikely(!(rx_csr
& MUSB_RXCSR_RXPKTRDY
))) {
1969 ERR("Rx interrupt with no errors or packet!\n");
1971 /* FIXME this is another "SHOULD NEVER HAPPEN" */
1974 /* do the proper sequence to abort the transfer */
1975 musb_ep_select(mbase
, epnum
);
1976 val
&= ~MUSB_RXCSR_H_REQPKT
;
1977 musb_writew(epio
, MUSB_RXCSR
, val
);
1981 /* we are expecting IN packets */
1982 if ((musb_dma_inventra(musb
) || musb_dma_ux500(musb
) ||
1983 musb_dma_cppi41(musb
)) && dma
) {
1984 musb_dbg(hw_ep
->musb
,
1985 "RX%d count %d, buffer 0x%llx len %d/%d",
1986 epnum
, musb_readw(epio
, MUSB_RXCOUNT
),
1987 (unsigned long long) urb
->transfer_dma
1988 + urb
->actual_length
,
1990 urb
->transfer_buffer_length
);
1992 if (musb_rx_dma_in_inventra_cppi41(c
, hw_ep
, qh
, urb
,
1996 dev_err(musb
->controller
, "error: rx_dma failed\n");
2000 unsigned int received_len
;
2002 /* Unmap the buffer so that CPU can use it */
2003 usb_hcd_unmap_urb_for_dma(musb
->hcd
, urb
);
2006 * We need to map sg if the transfer_buffer is
2009 if (!urb
->transfer_buffer
) {
2011 sg_miter_start(&qh
->sg_miter
, urb
->sg
, 1,
2016 if (!sg_miter_next(&qh
->sg_miter
)) {
2017 dev_err(musb
->controller
, "error: sg list empty\n");
2018 sg_miter_stop(&qh
->sg_miter
);
2023 urb
->transfer_buffer
= qh
->sg_miter
.addr
;
2024 received_len
= urb
->actual_length
;
2026 done
= musb_host_packet_rx(musb
, urb
, epnum
,
2028 /* Calculate the number of bytes received */
2029 received_len
= urb
->actual_length
-
2031 qh
->sg_miter
.consumed
= received_len
;
2032 sg_miter_stop(&qh
->sg_miter
);
2034 done
= musb_host_packet_rx(musb
, urb
,
2037 musb_dbg(musb
, "read %spacket", done
? "last " : "");
2042 urb
->actual_length
+= xfer_len
;
2043 qh
->offset
+= xfer_len
;
2048 if (urb
->status
== -EINPROGRESS
)
2049 urb
->status
= status
;
2050 musb_advance_schedule(musb
, urb
, hw_ep
, USB_DIR_IN
);
2054 /* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
2055 * the software schedule associates multiple such nodes with a given
2056 * host side hardware endpoint + direction; scheduling may activate
2057 * that hardware endpoint.
2059 static int musb_schedule(
2066 int best_end
, epnum
;
2067 struct musb_hw_ep
*hw_ep
= NULL
;
2068 struct list_head
*head
= NULL
;
2071 struct urb
*urb
= next_urb(qh
);
2073 /* use fixed hardware for control and bulk */
2074 if (qh
->type
== USB_ENDPOINT_XFER_CONTROL
) {
2075 head
= &musb
->control
;
2076 hw_ep
= musb
->control_ep
;
2080 /* else, periodic transfers get muxed to other endpoints */
2083 * We know this qh hasn't been scheduled, so all we need to do
2084 * is choose which hardware endpoint to put it on ...
2086 * REVISIT what we really want here is a regular schedule tree
2087 * like e.g. OHCI uses.
2092 for (epnum
= 1, hw_ep
= musb
->endpoints
+ 1;
2093 epnum
< musb
->nr_endpoints
;
2097 if (musb_ep_get_qh(hw_ep
, is_in
) != NULL
)
2100 if (hw_ep
== musb
->bulk_ep
)
2104 diff
= hw_ep
->max_packet_sz_rx
;
2106 diff
= hw_ep
->max_packet_sz_tx
;
2107 diff
-= (qh
->maxpacket
* qh
->hb_mult
);
2109 if (diff
>= 0 && best_diff
> diff
) {
2112 * Mentor controller has a bug in that if we schedule
2113 * a BULK Tx transfer on an endpoint that had earlier
2114 * handled ISOC then the BULK transfer has to start on
2115 * a zero toggle. If the BULK transfer starts on a 1
2116 * toggle then this transfer will fail as the mentor
2117 * controller starts the Bulk transfer on a 0 toggle
2118 * irrespective of the programming of the toggle bits
2119 * in the TXCSR register. Check for this condition
2120 * while allocating the EP for a Tx Bulk transfer. If
2123 hw_ep
= musb
->endpoints
+ epnum
;
2124 toggle
= usb_gettoggle(urb
->dev
, qh
->epnum
, !is_in
);
2125 txtype
= (musb_readb(hw_ep
->regs
, MUSB_TXTYPE
)
2127 if (!is_in
&& (qh
->type
== USB_ENDPOINT_XFER_BULK
) &&
2128 toggle
&& (txtype
== USB_ENDPOINT_XFER_ISOC
))
2135 /* use bulk reserved ep1 if no other ep is free */
2136 if (best_end
< 0 && qh
->type
== USB_ENDPOINT_XFER_BULK
) {
2137 hw_ep
= musb
->bulk_ep
;
2139 head
= &musb
->in_bulk
;
2141 head
= &musb
->out_bulk
;
2143 /* Enable bulk RX/TX NAK timeout scheme when bulk requests are
2144 * multiplexed. This scheme does not work in high speed to full
2145 * speed scenario as NAK interrupts are not coming from a
2146 * full speed device connected to a high speed device.
2147 * NAK timeout interval is 8 (128 uframe or 16ms) for HS and
2148 * 4 (8 frame or 8ms) for FS device.
2152 (USB_SPEED_HIGH
== qh
->dev
->speed
) ? 8 : 4;
2154 } else if (best_end
< 0) {
2160 hw_ep
= musb
->endpoints
+ best_end
;
2161 musb_dbg(musb
, "qh %p periodic slot %d", qh
, best_end
);
2164 idle
= list_empty(head
);
2165 list_add_tail(&qh
->ring
, head
);
2169 qh
->hep
->hcpriv
= qh
;
2171 musb_start_urb(musb
, is_in
, qh
);
2175 static int musb_urb_enqueue(
2176 struct usb_hcd
*hcd
,
2180 unsigned long flags
;
2181 struct musb
*musb
= hcd_to_musb(hcd
);
2182 struct usb_host_endpoint
*hep
= urb
->ep
;
2184 struct usb_endpoint_descriptor
*epd
= &hep
->desc
;
2189 /* host role must be active */
2190 if (!is_host_active(musb
) || !musb
->is_active
)
2193 trace_musb_urb_enq(musb
, urb
);
2195 spin_lock_irqsave(&musb
->lock
, flags
);
2196 ret
= usb_hcd_link_urb_to_ep(hcd
, urb
);
2197 qh
= ret
? NULL
: hep
->hcpriv
;
2200 spin_unlock_irqrestore(&musb
->lock
, flags
);
2202 /* DMA mapping was already done, if needed, and this urb is on
2203 * hep->urb_list now ... so we're done, unless hep wasn't yet
2204 * scheduled onto a live qh.
2206 * REVISIT best to keep hep->hcpriv valid until the endpoint gets
2207 * disabled, testing for empty qh->ring and avoiding qh setup costs
2208 * except for the first urb queued after a config change.
2213 /* Allocate and initialize qh, minimizing the work done each time
2214 * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it.
2216 * REVISIT consider a dedicated qh kmem_cache, so it's harder
2217 * for bugs in other kernel code to break this driver...
2219 qh
= kzalloc(sizeof *qh
, mem_flags
);
2221 spin_lock_irqsave(&musb
->lock
, flags
);
2222 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
2223 spin_unlock_irqrestore(&musb
->lock
, flags
);
2229 INIT_LIST_HEAD(&qh
->ring
);
2232 qh
->maxpacket
= usb_endpoint_maxp(epd
);
2233 qh
->type
= usb_endpoint_type(epd
);
2235 /* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier.
2236 * Some musb cores don't support high bandwidth ISO transfers; and
2237 * we don't (yet!) support high bandwidth interrupt transfers.
2239 qh
->hb_mult
= usb_endpoint_maxp_mult(epd
);
2240 if (qh
->hb_mult
> 1) {
2241 int ok
= (qh
->type
== USB_ENDPOINT_XFER_ISOC
);
2244 ok
= (usb_pipein(urb
->pipe
) && musb
->hb_iso_rx
)
2245 || (usb_pipeout(urb
->pipe
) && musb
->hb_iso_tx
);
2250 qh
->maxpacket
&= 0x7ff;
2253 qh
->epnum
= usb_endpoint_num(epd
);
2255 /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
2256 qh
->addr_reg
= (u8
) usb_pipedevice(urb
->pipe
);
2258 /* precompute rxtype/txtype/type0 register */
2259 type_reg
= (qh
->type
<< 4) | qh
->epnum
;
2260 switch (urb
->dev
->speed
) {
2264 case USB_SPEED_FULL
:
2270 qh
->type_reg
= type_reg
;
2272 /* Precompute RXINTERVAL/TXINTERVAL register */
2274 case USB_ENDPOINT_XFER_INT
:
2276 * Full/low speeds use the linear encoding,
2277 * high speed uses the logarithmic encoding.
2279 if (urb
->dev
->speed
<= USB_SPEED_FULL
) {
2280 interval
= max_t(u8
, epd
->bInterval
, 1);
2284 case USB_ENDPOINT_XFER_ISOC
:
2285 /* ISO always uses logarithmic encoding */
2286 interval
= min_t(u8
, epd
->bInterval
, 16);
2289 /* REVISIT we actually want to use NAK limits, hinting to the
2290 * transfer scheduling logic to try some other qh, e.g. try
2293 * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
2295 * The downside of disabling this is that transfer scheduling
2296 * gets VERY unfair for nonperiodic transfers; a misbehaving
2297 * peripheral could make that hurt. That's perfectly normal
2298 * for reads from network or serial adapters ... so we have
2299 * partial NAKlimit support for bulk RX.
2301 * The upside of disabling it is simpler transfer scheduling.
2305 qh
->intv_reg
= interval
;
2307 /* precompute addressing for external hub/tt ports */
2308 if (musb
->is_multipoint
) {
2309 struct usb_device
*parent
= urb
->dev
->parent
;
2311 if (parent
!= hcd
->self
.root_hub
) {
2312 qh
->h_addr_reg
= (u8
) parent
->devnum
;
2314 /* set up tt info if needed */
2316 qh
->h_port_reg
= (u8
) urb
->dev
->ttport
;
2317 if (urb
->dev
->tt
->hub
)
2319 (u8
) urb
->dev
->tt
->hub
->devnum
;
2320 if (urb
->dev
->tt
->multi
)
2321 qh
->h_addr_reg
|= 0x80;
2326 /* invariant: hep->hcpriv is null OR the qh that's already scheduled.
2327 * until we get real dma queues (with an entry for each urb/buffer),
2328 * we only have work to do in the former case.
2330 spin_lock_irqsave(&musb
->lock
, flags
);
2331 if (hep
->hcpriv
|| !next_urb(qh
)) {
2332 /* some concurrent activity submitted another urb to hep...
2333 * odd, rare, error prone, but legal.
2339 ret
= musb_schedule(musb
, qh
,
2340 epd
->bEndpointAddress
& USB_ENDPOINT_DIR_MASK
);
2344 /* FIXME set urb->start_frame for iso/intr, it's tested in
2345 * musb_start_urb(), but otherwise only konicawc cares ...
2348 spin_unlock_irqrestore(&musb
->lock
, flags
);
2352 spin_lock_irqsave(&musb
->lock
, flags
);
2353 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
2354 spin_unlock_irqrestore(&musb
->lock
, flags
);
2362 * abort a transfer that's at the head of a hardware queue.
2363 * called with controller locked, irqs blocked
2364 * that hardware queue advances to the next transfer, unless prevented
2366 static int musb_cleanup_urb(struct urb
*urb
, struct musb_qh
*qh
)
2368 struct musb_hw_ep
*ep
= qh
->hw_ep
;
2369 struct musb
*musb
= ep
->musb
;
2370 void __iomem
*epio
= ep
->regs
;
2371 unsigned hw_end
= ep
->epnum
;
2372 void __iomem
*regs
= ep
->musb
->mregs
;
2373 int is_in
= usb_pipein(urb
->pipe
);
2376 struct dma_channel
*dma
= NULL
;
2378 musb_ep_select(regs
, hw_end
);
2380 if (is_dma_capable()) {
2381 dma
= is_in
? ep
->rx_channel
: ep
->tx_channel
;
2383 status
= ep
->musb
->dma_controller
->channel_abort(dma
);
2384 musb_dbg(musb
, "abort %cX%d DMA for urb %p --> %d",
2385 is_in
? 'R' : 'T', ep
->epnum
,
2387 urb
->actual_length
+= dma
->actual_len
;
2391 /* turn off DMA requests, discard state, stop polling ... */
2392 if (ep
->epnum
&& is_in
) {
2393 /* giveback saves bulk toggle */
2394 csr
= musb_h_flush_rxfifo(ep
, 0);
2396 /* clear the endpoint's irq status here to avoid bogus irqs */
2397 if (is_dma_capable() && dma
)
2398 musb_platform_clear_ep_rxintr(musb
, ep
->epnum
);
2399 } else if (ep
->epnum
) {
2400 musb_h_tx_flush_fifo(ep
);
2401 csr
= musb_readw(epio
, MUSB_TXCSR
);
2402 csr
&= ~(MUSB_TXCSR_AUTOSET
2403 | MUSB_TXCSR_DMAENAB
2404 | MUSB_TXCSR_H_RXSTALL
2405 | MUSB_TXCSR_H_NAKTIMEOUT
2406 | MUSB_TXCSR_H_ERROR
2407 | MUSB_TXCSR_TXPKTRDY
);
2408 musb_writew(epio
, MUSB_TXCSR
, csr
);
2409 /* REVISIT may need to clear FLUSHFIFO ... */
2410 musb_writew(epio
, MUSB_TXCSR
, csr
);
2411 /* flush cpu writebuffer */
2412 csr
= musb_readw(epio
, MUSB_TXCSR
);
2414 musb_h_ep0_flush_fifo(ep
);
2417 musb_advance_schedule(ep
->musb
, urb
, ep
, is_in
);
2421 static int musb_urb_dequeue(struct usb_hcd
*hcd
, struct urb
*urb
, int status
)
2423 struct musb
*musb
= hcd_to_musb(hcd
);
2425 unsigned long flags
;
2426 int is_in
= usb_pipein(urb
->pipe
);
2429 trace_musb_urb_deq(musb
, urb
);
2431 spin_lock_irqsave(&musb
->lock
, flags
);
2432 ret
= usb_hcd_check_unlink_urb(hcd
, urb
, status
);
2441 * Any URB not actively programmed into endpoint hardware can be
2442 * immediately given back; that's any URB not at the head of an
2443 * endpoint queue, unless someday we get real DMA queues. And even
2444 * if it's at the head, it might not be known to the hardware...
2446 * Otherwise abort current transfer, pending DMA, etc.; urb->status
2447 * has already been updated. This is a synchronous abort; it'd be
2448 * OK to hold off until after some IRQ, though.
2450 * NOTE: qh is invalid unless !list_empty(&hep->urb_list)
2453 || urb
->urb_list
.prev
!= &qh
->hep
->urb_list
2454 || musb_ep_get_qh(qh
->hw_ep
, is_in
) != qh
) {
2455 int ready
= qh
->is_ready
;
2458 musb_giveback(musb
, urb
, 0);
2459 qh
->is_ready
= ready
;
2461 /* If nothing else (usually musb_giveback) is using it
2462 * and its URB list has emptied, recycle this qh.
2464 if (ready
&& list_empty(&qh
->hep
->urb_list
)) {
2465 qh
->hep
->hcpriv
= NULL
;
2466 list_del(&qh
->ring
);
2470 ret
= musb_cleanup_urb(urb
, qh
);
2472 spin_unlock_irqrestore(&musb
->lock
, flags
);
2476 /* disable an endpoint */
2478 musb_h_disable(struct usb_hcd
*hcd
, struct usb_host_endpoint
*hep
)
2480 u8 is_in
= hep
->desc
.bEndpointAddress
& USB_DIR_IN
;
2481 unsigned long flags
;
2482 struct musb
*musb
= hcd_to_musb(hcd
);
2486 spin_lock_irqsave(&musb
->lock
, flags
);
2492 /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
2494 /* Kick the first URB off the hardware, if needed */
2496 if (musb_ep_get_qh(qh
->hw_ep
, is_in
) == qh
) {
2499 /* make software (then hardware) stop ASAP */
2501 urb
->status
= -ESHUTDOWN
;
2504 musb_cleanup_urb(urb
, qh
);
2506 /* Then nuke all the others ... and advance the
2507 * queue on hw_ep (e.g. bulk ring) when we're done.
2509 while (!list_empty(&hep
->urb_list
)) {
2511 urb
->status
= -ESHUTDOWN
;
2512 musb_advance_schedule(musb
, urb
, qh
->hw_ep
, is_in
);
2515 /* Just empty the queue; the hardware is busy with
2516 * other transfers, and since !qh->is_ready nothing
2517 * will activate any of these as it advances.
2519 while (!list_empty(&hep
->urb_list
))
2520 musb_giveback(musb
, next_urb(qh
), -ESHUTDOWN
);
2523 list_del(&qh
->ring
);
2527 spin_unlock_irqrestore(&musb
->lock
, flags
);
2530 static int musb_h_get_frame_number(struct usb_hcd
*hcd
)
2532 struct musb
*musb
= hcd_to_musb(hcd
);
2534 return musb_readw(musb
->mregs
, MUSB_FRAME
);
2537 static int musb_h_start(struct usb_hcd
*hcd
)
2539 struct musb
*musb
= hcd_to_musb(hcd
);
2541 /* NOTE: musb_start() is called when the hub driver turns
2542 * on port power, or when (OTG) peripheral starts.
2544 hcd
->state
= HC_STATE_RUNNING
;
2545 musb
->port1_status
= 0;
2549 static void musb_h_stop(struct usb_hcd
*hcd
)
2551 musb_stop(hcd_to_musb(hcd
));
2552 hcd
->state
= HC_STATE_HALT
;
2555 static int musb_bus_suspend(struct usb_hcd
*hcd
)
2557 struct musb
*musb
= hcd_to_musb(hcd
);
2560 musb_port_suspend(musb
, true);
2562 if (!is_host_active(musb
))
2565 switch (musb
->xceiv
->otg
->state
) {
2566 case OTG_STATE_A_SUSPEND
:
2568 case OTG_STATE_A_WAIT_VRISE
:
2569 /* ID could be grounded even if there's no device
2570 * on the other end of the cable. NOTE that the
2571 * A_WAIT_VRISE timers are messy with MUSB...
2573 devctl
= musb_readb(musb
->mregs
, MUSB_DEVCTL
);
2574 if ((devctl
& MUSB_DEVCTL_VBUS
) == MUSB_DEVCTL_VBUS
)
2575 musb
->xceiv
->otg
->state
= OTG_STATE_A_WAIT_BCON
;
2581 if (musb
->is_active
) {
2582 WARNING("trying to suspend as %s while active\n",
2583 usb_otg_state_string(musb
->xceiv
->otg
->state
));
2589 static int musb_bus_resume(struct usb_hcd
*hcd
)
2591 struct musb
*musb
= hcd_to_musb(hcd
);
2594 musb
->config
->host_port_deassert_reset_at_resume
)
2595 musb_port_reset(musb
, false);
2600 #ifndef CONFIG_MUSB_PIO_ONLY
2602 #define MUSB_USB_DMA_ALIGN 4
2604 struct musb_temp_buffer
{
2606 void *old_xfer_buffer
;
2610 static void musb_free_temp_buffer(struct urb
*urb
)
2612 enum dma_data_direction dir
;
2613 struct musb_temp_buffer
*temp
;
2616 if (!(urb
->transfer_flags
& URB_ALIGNED_TEMP_BUFFER
))
2619 dir
= usb_urb_dir_in(urb
) ? DMA_FROM_DEVICE
: DMA_TO_DEVICE
;
2621 temp
= container_of(urb
->transfer_buffer
, struct musb_temp_buffer
,
2624 if (dir
== DMA_FROM_DEVICE
) {
2625 if (usb_pipeisoc(urb
->pipe
))
2626 length
= urb
->transfer_buffer_length
;
2628 length
= urb
->actual_length
;
2630 memcpy(temp
->old_xfer_buffer
, temp
->data
, length
);
2632 urb
->transfer_buffer
= temp
->old_xfer_buffer
;
2633 kfree(temp
->kmalloc_ptr
);
2635 urb
->transfer_flags
&= ~URB_ALIGNED_TEMP_BUFFER
;
2638 static int musb_alloc_temp_buffer(struct urb
*urb
, gfp_t mem_flags
)
2640 enum dma_data_direction dir
;
2641 struct musb_temp_buffer
*temp
;
2643 size_t kmalloc_size
;
2645 if (urb
->num_sgs
|| urb
->sg
||
2646 urb
->transfer_buffer_length
== 0 ||
2647 !((uintptr_t)urb
->transfer_buffer
& (MUSB_USB_DMA_ALIGN
- 1)))
2650 dir
= usb_urb_dir_in(urb
) ? DMA_FROM_DEVICE
: DMA_TO_DEVICE
;
2652 /* Allocate a buffer with enough padding for alignment */
2653 kmalloc_size
= urb
->transfer_buffer_length
+
2654 sizeof(struct musb_temp_buffer
) + MUSB_USB_DMA_ALIGN
- 1;
2656 kmalloc_ptr
= kmalloc(kmalloc_size
, mem_flags
);
2660 /* Position our struct temp_buffer such that data is aligned */
2661 temp
= PTR_ALIGN(kmalloc_ptr
, MUSB_USB_DMA_ALIGN
);
2664 temp
->kmalloc_ptr
= kmalloc_ptr
;
2665 temp
->old_xfer_buffer
= urb
->transfer_buffer
;
2666 if (dir
== DMA_TO_DEVICE
)
2667 memcpy(temp
->data
, urb
->transfer_buffer
,
2668 urb
->transfer_buffer_length
);
2669 urb
->transfer_buffer
= temp
->data
;
2671 urb
->transfer_flags
|= URB_ALIGNED_TEMP_BUFFER
;
2676 static int musb_map_urb_for_dma(struct usb_hcd
*hcd
, struct urb
*urb
,
2679 struct musb
*musb
= hcd_to_musb(hcd
);
2683 * The DMA engine in RTL1.8 and above cannot handle
2684 * DMA addresses that are not aligned to a 4 byte boundary.
2685 * For such engine implemented (un)map_urb_for_dma hooks.
2686 * Do not use these hooks for RTL<1.8
2688 if (musb
->hwvers
< MUSB_HWVERS_1800
)
2689 return usb_hcd_map_urb_for_dma(hcd
, urb
, mem_flags
);
2691 ret
= musb_alloc_temp_buffer(urb
, mem_flags
);
2695 ret
= usb_hcd_map_urb_for_dma(hcd
, urb
, mem_flags
);
2697 musb_free_temp_buffer(urb
);
2702 static void musb_unmap_urb_for_dma(struct usb_hcd
*hcd
, struct urb
*urb
)
2704 struct musb
*musb
= hcd_to_musb(hcd
);
2706 usb_hcd_unmap_urb_for_dma(hcd
, urb
);
2708 /* Do not use this hook for RTL<1.8 (see description above) */
2709 if (musb
->hwvers
< MUSB_HWVERS_1800
)
2712 musb_free_temp_buffer(urb
);
2714 #endif /* !CONFIG_MUSB_PIO_ONLY */
2716 static const struct hc_driver musb_hc_driver
= {
2717 .description
= "musb-hcd",
2718 .product_desc
= "MUSB HDRC host driver",
2719 .hcd_priv_size
= sizeof(struct musb
*),
2720 .flags
= HCD_USB2
| HCD_MEMORY
,
2722 /* not using irq handler or reset hooks from usbcore, since
2723 * those must be shared with peripheral code for OTG configs
2726 .start
= musb_h_start
,
2727 .stop
= musb_h_stop
,
2729 .get_frame_number
= musb_h_get_frame_number
,
2731 .urb_enqueue
= musb_urb_enqueue
,
2732 .urb_dequeue
= musb_urb_dequeue
,
2733 .endpoint_disable
= musb_h_disable
,
2735 #ifndef CONFIG_MUSB_PIO_ONLY
2736 .map_urb_for_dma
= musb_map_urb_for_dma
,
2737 .unmap_urb_for_dma
= musb_unmap_urb_for_dma
,
2740 .hub_status_data
= musb_hub_status_data
,
2741 .hub_control
= musb_hub_control
,
2742 .bus_suspend
= musb_bus_suspend
,
2743 .bus_resume
= musb_bus_resume
,
2744 /* .start_port_reset = NULL, */
2745 /* .hub_irq_enable = NULL, */
2748 int musb_host_alloc(struct musb
*musb
)
2750 struct device
*dev
= musb
->controller
;
2752 /* usbcore sets dev->driver_data to hcd, and sometimes uses that... */
2753 musb
->hcd
= usb_create_hcd(&musb_hc_driver
, dev
, dev_name(dev
));
2757 *musb
->hcd
->hcd_priv
= (unsigned long) musb
;
2758 musb
->hcd
->self
.uses_pio_for_control
= 1;
2759 musb
->hcd
->uses_new_polling
= 1;
2760 musb
->hcd
->has_tt
= 1;
2765 void musb_host_cleanup(struct musb
*musb
)
2767 if (musb
->port_mode
== MUSB_PORT_MODE_GADGET
)
2769 usb_remove_hcd(musb
->hcd
);
2772 void musb_host_free(struct musb
*musb
)
2774 usb_put_hcd(musb
->hcd
);
2777 int musb_host_setup(struct musb
*musb
, int power_budget
)
2780 struct usb_hcd
*hcd
= musb
->hcd
;
2782 if (musb
->port_mode
== MUSB_PORT_MODE_HOST
) {
2783 MUSB_HST_MODE(musb
);
2784 musb
->xceiv
->otg
->default_a
= 1;
2785 musb
->xceiv
->otg
->state
= OTG_STATE_A_IDLE
;
2787 otg_set_host(musb
->xceiv
->otg
, &hcd
->self
);
2788 hcd
->self
.otg_port
= 1;
2789 musb
->xceiv
->otg
->host
= &hcd
->self
;
2790 hcd
->power_budget
= 2 * (power_budget
? : 250);
2792 ret
= usb_add_hcd(hcd
, 0, 0);
2796 device_wakeup_enable(hcd
->self
.controller
);
2800 void musb_host_resume_root_hub(struct musb
*musb
)
2802 usb_hcd_resume_root_hub(musb
->hcd
);
2805 void musb_host_poke_root_hub(struct musb
*musb
)
2807 MUSB_HST_MODE(musb
);
2808 if (musb
->hcd
->status_urb
)
2809 usb_hcd_poll_rh_status(musb
->hcd
);
2811 usb_hcd_resume_root_hub(musb
->hcd
);