1 #include <linux/device.h>
2 #include <linux/dma-mapping.h>
3 #include <linux/dmaengine.h>
4 #include <linux/sizes.h>
5 #include <linux/platform_device.h>
10 #include "musb_trace.h"
12 #define RNDIS_REG(x) (0x80 + ((x - 1) * 4))
14 #define EP_MODE_AUTOREQ_NONE 0
15 #define EP_MODE_AUTOREQ_ALL_NEOP 1
16 #define EP_MODE_AUTOREQ_ALWAYS 3
18 #define EP_MODE_DMA_TRANSPARENT 0
19 #define EP_MODE_DMA_RNDIS 1
20 #define EP_MODE_DMA_GEN_RNDIS 3
22 #define USB_CTRL_TX_MODE 0x70
23 #define USB_CTRL_RX_MODE 0x74
24 #define USB_CTRL_AUTOREQ 0xd0
25 #define USB_TDOWN 0xd8
27 #define MUSB_DMA_NUM_CHANNELS 15
29 struct cppi41_dma_controller
{
30 struct dma_controller controller
;
31 struct cppi41_dma_channel rx_channel
[MUSB_DMA_NUM_CHANNELS
];
32 struct cppi41_dma_channel tx_channel
[MUSB_DMA_NUM_CHANNELS
];
34 struct hrtimer early_tx
;
35 struct list_head early_tx_list
;
41 static void save_rx_toggle(struct cppi41_dma_channel
*cppi41_channel
)
46 if (cppi41_channel
->is_tx
)
48 if (!is_host_active(cppi41_channel
->controller
->musb
))
51 csr
= musb_readw(cppi41_channel
->hw_ep
->regs
, MUSB_RXCSR
);
52 toggle
= csr
& MUSB_RXCSR_H_DATATOGGLE
? 1 : 0;
54 cppi41_channel
->usb_toggle
= toggle
;
57 static void update_rx_toggle(struct cppi41_dma_channel
*cppi41_channel
)
59 struct musb_hw_ep
*hw_ep
= cppi41_channel
->hw_ep
;
60 struct musb
*musb
= hw_ep
->musb
;
64 if (cppi41_channel
->is_tx
)
66 if (!is_host_active(musb
))
69 musb_ep_select(musb
->mregs
, hw_ep
->epnum
);
70 csr
= musb_readw(hw_ep
->regs
, MUSB_RXCSR
);
71 toggle
= csr
& MUSB_RXCSR_H_DATATOGGLE
? 1 : 0;
74 * AM335x Advisory 1.0.13: Due to internal synchronisation error the
75 * data toggle may reset from DATA1 to DATA0 during receiving data from
76 * more than one endpoint.
78 if (!toggle
&& toggle
== cppi41_channel
->usb_toggle
) {
79 csr
|= MUSB_RXCSR_H_DATATOGGLE
| MUSB_RXCSR_H_WR_DATATOGGLE
;
80 musb_writew(cppi41_channel
->hw_ep
->regs
, MUSB_RXCSR
, csr
);
81 musb_dbg(cppi41_channel
->controller
->musb
,
82 "Restoring DATA1 toggle.");
85 cppi41_channel
->usb_toggle
= toggle
;
88 static bool musb_is_tx_fifo_empty(struct musb_hw_ep
*hw_ep
)
90 u8 epnum
= hw_ep
->epnum
;
91 struct musb
*musb
= hw_ep
->musb
;
92 void __iomem
*epio
= musb
->endpoints
[epnum
].regs
;
95 musb_ep_select(musb
->mregs
, hw_ep
->epnum
);
96 csr
= musb_readw(epio
, MUSB_TXCSR
);
97 if (csr
& MUSB_TXCSR_TXPKTRDY
)
102 static void cppi41_dma_callback(void *private_data
);
104 static void cppi41_trans_done(struct cppi41_dma_channel
*cppi41_channel
)
106 struct musb_hw_ep
*hw_ep
= cppi41_channel
->hw_ep
;
107 struct musb
*musb
= hw_ep
->musb
;
108 void __iomem
*epio
= hw_ep
->regs
;
111 if (!cppi41_channel
->prog_len
||
112 (cppi41_channel
->channel
.status
== MUSB_DMA_STATUS_FREE
)) {
115 cppi41_channel
->channel
.actual_len
=
116 cppi41_channel
->transferred
;
117 cppi41_channel
->channel
.status
= MUSB_DMA_STATUS_FREE
;
118 cppi41_channel
->channel
.rx_packet_done
= true;
121 * transmit ZLP using PIO mode for transfers which size is
122 * multiple of EP packet size.
124 if (cppi41_channel
->tx_zlp
&& (cppi41_channel
->transferred
%
125 cppi41_channel
->packet_sz
) == 0) {
126 musb_ep_select(musb
->mregs
, hw_ep
->epnum
);
127 csr
= MUSB_TXCSR_MODE
| MUSB_TXCSR_TXPKTRDY
;
128 musb_writew(epio
, MUSB_TXCSR
, csr
);
131 trace_musb_cppi41_done(cppi41_channel
);
132 musb_dma_completion(musb
, hw_ep
->epnum
, cppi41_channel
->is_tx
);
134 /* next iteration, reload */
135 struct dma_chan
*dc
= cppi41_channel
->dc
;
136 struct dma_async_tx_descriptor
*dma_desc
;
137 enum dma_transfer_direction direction
;
140 cppi41_channel
->buf_addr
+= cppi41_channel
->packet_sz
;
142 remain_bytes
= cppi41_channel
->total_len
;
143 remain_bytes
-= cppi41_channel
->transferred
;
144 remain_bytes
= min(remain_bytes
, cppi41_channel
->packet_sz
);
145 cppi41_channel
->prog_len
= remain_bytes
;
147 direction
= cppi41_channel
->is_tx
? DMA_MEM_TO_DEV
149 dma_desc
= dmaengine_prep_slave_single(dc
,
150 cppi41_channel
->buf_addr
,
153 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
154 if (WARN_ON(!dma_desc
))
157 dma_desc
->callback
= cppi41_dma_callback
;
158 dma_desc
->callback_param
= &cppi41_channel
->channel
;
159 cppi41_channel
->cookie
= dma_desc
->tx_submit(dma_desc
);
160 trace_musb_cppi41_cont(cppi41_channel
);
161 dma_async_issue_pending(dc
);
163 if (!cppi41_channel
->is_tx
) {
164 musb_ep_select(musb
->mregs
, hw_ep
->epnum
);
165 csr
= musb_readw(epio
, MUSB_RXCSR
);
166 csr
|= MUSB_RXCSR_H_REQPKT
;
167 musb_writew(epio
, MUSB_RXCSR
, csr
);
172 static enum hrtimer_restart
cppi41_recheck_tx_req(struct hrtimer
*timer
)
174 struct cppi41_dma_controller
*controller
;
175 struct cppi41_dma_channel
*cppi41_channel
, *n
;
178 enum hrtimer_restart ret
= HRTIMER_NORESTART
;
180 controller
= container_of(timer
, struct cppi41_dma_controller
,
182 musb
= controller
->musb
;
184 spin_lock_irqsave(&musb
->lock
, flags
);
185 list_for_each_entry_safe(cppi41_channel
, n
, &controller
->early_tx_list
,
188 struct musb_hw_ep
*hw_ep
= cppi41_channel
->hw_ep
;
190 empty
= musb_is_tx_fifo_empty(hw_ep
);
192 list_del_init(&cppi41_channel
->tx_check
);
193 cppi41_trans_done(cppi41_channel
);
197 if (!list_empty(&controller
->early_tx_list
) &&
198 !hrtimer_is_queued(&controller
->early_tx
)) {
199 ret
= HRTIMER_RESTART
;
200 hrtimer_forward_now(&controller
->early_tx
, 20 * NSEC_PER_USEC
);
203 spin_unlock_irqrestore(&musb
->lock
, flags
);
207 static void cppi41_dma_callback(void *private_data
)
209 struct dma_channel
*channel
= private_data
;
210 struct cppi41_dma_channel
*cppi41_channel
= channel
->private_data
;
211 struct musb_hw_ep
*hw_ep
= cppi41_channel
->hw_ep
;
212 struct cppi41_dma_controller
*controller
;
213 struct musb
*musb
= hw_ep
->musb
;
215 struct dma_tx_state txstate
;
220 spin_lock_irqsave(&musb
->lock
, flags
);
222 dmaengine_tx_status(cppi41_channel
->dc
, cppi41_channel
->cookie
,
224 transferred
= cppi41_channel
->prog_len
- txstate
.residue
;
225 cppi41_channel
->transferred
+= transferred
;
227 trace_musb_cppi41_gb(cppi41_channel
);
228 update_rx_toggle(cppi41_channel
);
230 if (cppi41_channel
->transferred
== cppi41_channel
->total_len
||
231 transferred
< cppi41_channel
->packet_sz
)
232 cppi41_channel
->prog_len
= 0;
234 if (cppi41_channel
->is_tx
)
235 empty
= musb_is_tx_fifo_empty(hw_ep
);
237 if (!cppi41_channel
->is_tx
|| empty
) {
238 cppi41_trans_done(cppi41_channel
);
243 * On AM335x it has been observed that the TX interrupt fires
244 * too early that means the TXFIFO is not yet empty but the DMA
245 * engine says that it is done with the transfer. We don't
246 * receive a FIFO empty interrupt so the only thing we can do is
247 * to poll for the bit. On HS it usually takes 2us, on FS around
248 * 110us - 150us depending on the transfer size.
249 * We spin on HS (no longer than than 25us and setup a timer on
250 * FS to check for the bit and complete the transfer.
252 controller
= cppi41_channel
->controller
;
254 if (is_host_active(musb
)) {
255 if (musb
->port1_status
& USB_PORT_STAT_HIGH_SPEED
)
258 if (musb
->g
.speed
== USB_SPEED_HIGH
)
265 empty
= musb_is_tx_fifo_empty(hw_ep
);
267 cppi41_trans_done(cppi41_channel
);
276 list_add_tail(&cppi41_channel
->tx_check
,
277 &controller
->early_tx_list
);
278 if (!hrtimer_is_queued(&controller
->early_tx
)) {
279 unsigned long usecs
= cppi41_channel
->total_len
/ 10;
281 hrtimer_start_range_ns(&controller
->early_tx
,
282 usecs
* NSEC_PER_USEC
,
288 spin_unlock_irqrestore(&musb
->lock
, flags
);
291 static u32
update_ep_mode(unsigned ep
, unsigned mode
, u32 old
)
295 shift
= (ep
- 1) * 2;
296 old
&= ~(3 << shift
);
297 old
|= mode
<< shift
;
301 static void cppi41_set_dma_mode(struct cppi41_dma_channel
*cppi41_channel
,
304 struct cppi41_dma_controller
*controller
= cppi41_channel
->controller
;
309 if (cppi41_channel
->is_tx
)
310 old_mode
= controller
->tx_mode
;
312 old_mode
= controller
->rx_mode
;
313 port
= cppi41_channel
->port_num
;
314 new_mode
= update_ep_mode(port
, mode
, old_mode
);
316 if (new_mode
== old_mode
)
318 if (cppi41_channel
->is_tx
) {
319 controller
->tx_mode
= new_mode
;
320 musb_writel(controller
->musb
->ctrl_base
, USB_CTRL_TX_MODE
,
323 controller
->rx_mode
= new_mode
;
324 musb_writel(controller
->musb
->ctrl_base
, USB_CTRL_RX_MODE
,
329 static void cppi41_set_autoreq_mode(struct cppi41_dma_channel
*cppi41_channel
,
332 struct cppi41_dma_controller
*controller
= cppi41_channel
->controller
;
337 old_mode
= controller
->auto_req
;
338 port
= cppi41_channel
->port_num
;
339 new_mode
= update_ep_mode(port
, mode
, old_mode
);
341 if (new_mode
== old_mode
)
343 controller
->auto_req
= new_mode
;
344 musb_writel(controller
->musb
->ctrl_base
, USB_CTRL_AUTOREQ
, new_mode
);
347 static bool cppi41_configure_channel(struct dma_channel
*channel
,
348 u16 packet_sz
, u8 mode
,
349 dma_addr_t dma_addr
, u32 len
)
351 struct cppi41_dma_channel
*cppi41_channel
= channel
->private_data
;
352 struct dma_chan
*dc
= cppi41_channel
->dc
;
353 struct dma_async_tx_descriptor
*dma_desc
;
354 enum dma_transfer_direction direction
;
355 struct musb
*musb
= cppi41_channel
->controller
->musb
;
356 unsigned use_gen_rndis
= 0;
358 cppi41_channel
->buf_addr
= dma_addr
;
359 cppi41_channel
->total_len
= len
;
360 cppi41_channel
->transferred
= 0;
361 cppi41_channel
->packet_sz
= packet_sz
;
362 cppi41_channel
->tx_zlp
= (cppi41_channel
->is_tx
&& mode
) ? 1 : 0;
365 * Due to AM335x' Advisory 1.0.13 we are not allowed to transfer more
366 * than max packet size at a time.
368 if (cppi41_channel
->is_tx
)
373 if (len
> packet_sz
) {
374 musb_writel(musb
->ctrl_base
,
375 RNDIS_REG(cppi41_channel
->port_num
), len
);
377 cppi41_set_dma_mode(cppi41_channel
,
378 EP_MODE_DMA_GEN_RNDIS
);
381 cppi41_set_autoreq_mode(cppi41_channel
,
382 EP_MODE_AUTOREQ_ALL_NEOP
);
384 musb_writel(musb
->ctrl_base
,
385 RNDIS_REG(cppi41_channel
->port_num
), 0);
386 cppi41_set_dma_mode(cppi41_channel
,
387 EP_MODE_DMA_TRANSPARENT
);
388 cppi41_set_autoreq_mode(cppi41_channel
,
389 EP_MODE_AUTOREQ_NONE
);
393 cppi41_set_dma_mode(cppi41_channel
, EP_MODE_DMA_TRANSPARENT
);
394 cppi41_set_autoreq_mode(cppi41_channel
, EP_MODE_AUTOREQ_NONE
);
395 len
= min_t(u32
, packet_sz
, len
);
397 cppi41_channel
->prog_len
= len
;
398 direction
= cppi41_channel
->is_tx
? DMA_MEM_TO_DEV
: DMA_DEV_TO_MEM
;
399 dma_desc
= dmaengine_prep_slave_single(dc
, dma_addr
, len
, direction
,
400 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
404 dma_desc
->callback
= cppi41_dma_callback
;
405 dma_desc
->callback_param
= channel
;
406 cppi41_channel
->cookie
= dma_desc
->tx_submit(dma_desc
);
407 cppi41_channel
->channel
.rx_packet_done
= false;
409 trace_musb_cppi41_config(cppi41_channel
);
411 save_rx_toggle(cppi41_channel
);
412 dma_async_issue_pending(dc
);
416 static struct dma_channel
*cppi41_dma_channel_allocate(struct dma_controller
*c
,
417 struct musb_hw_ep
*hw_ep
, u8 is_tx
)
419 struct cppi41_dma_controller
*controller
= container_of(c
,
420 struct cppi41_dma_controller
, controller
);
421 struct cppi41_dma_channel
*cppi41_channel
= NULL
;
422 u8 ch_num
= hw_ep
->epnum
- 1;
424 if (ch_num
>= MUSB_DMA_NUM_CHANNELS
)
428 cppi41_channel
= &controller
->tx_channel
[ch_num
];
430 cppi41_channel
= &controller
->rx_channel
[ch_num
];
432 if (!cppi41_channel
->dc
)
435 if (cppi41_channel
->is_allocated
)
438 cppi41_channel
->hw_ep
= hw_ep
;
439 cppi41_channel
->is_allocated
= 1;
441 trace_musb_cppi41_alloc(cppi41_channel
);
442 return &cppi41_channel
->channel
;
445 static void cppi41_dma_channel_release(struct dma_channel
*channel
)
447 struct cppi41_dma_channel
*cppi41_channel
= channel
->private_data
;
449 trace_musb_cppi41_free(cppi41_channel
);
450 if (cppi41_channel
->is_allocated
) {
451 cppi41_channel
->is_allocated
= 0;
452 channel
->status
= MUSB_DMA_STATUS_FREE
;
453 channel
->actual_len
= 0;
457 static int cppi41_dma_channel_program(struct dma_channel
*channel
,
458 u16 packet_sz
, u8 mode
,
459 dma_addr_t dma_addr
, u32 len
)
462 struct cppi41_dma_channel
*cppi41_channel
= channel
->private_data
;
465 BUG_ON(channel
->status
== MUSB_DMA_STATUS_UNKNOWN
||
466 channel
->status
== MUSB_DMA_STATUS_BUSY
);
468 if (is_host_active(cppi41_channel
->controller
->musb
)) {
469 if (cppi41_channel
->is_tx
)
470 hb_mult
= cppi41_channel
->hw_ep
->out_qh
->hb_mult
;
472 hb_mult
= cppi41_channel
->hw_ep
->in_qh
->hb_mult
;
475 channel
->status
= MUSB_DMA_STATUS_BUSY
;
476 channel
->actual_len
= 0;
479 packet_sz
= hb_mult
* (packet_sz
& 0x7FF);
481 ret
= cppi41_configure_channel(channel
, packet_sz
, mode
, dma_addr
, len
);
483 channel
->status
= MUSB_DMA_STATUS_FREE
;
488 static int cppi41_is_compatible(struct dma_channel
*channel
, u16 maxpacket
,
489 void *buf
, u32 length
)
491 struct cppi41_dma_channel
*cppi41_channel
= channel
->private_data
;
492 struct cppi41_dma_controller
*controller
= cppi41_channel
->controller
;
493 struct musb
*musb
= controller
->musb
;
495 if (is_host_active(musb
)) {
499 if (cppi41_channel
->hw_ep
->ep_in
.type
!= USB_ENDPOINT_XFER_BULK
)
501 if (cppi41_channel
->is_tx
)
503 /* AM335x Advisory 1.0.13. No workaround for device RX mode */
507 static int cppi41_dma_channel_abort(struct dma_channel
*channel
)
509 struct cppi41_dma_channel
*cppi41_channel
= channel
->private_data
;
510 struct cppi41_dma_controller
*controller
= cppi41_channel
->controller
;
511 struct musb
*musb
= controller
->musb
;
512 void __iomem
*epio
= cppi41_channel
->hw_ep
->regs
;
518 is_tx
= cppi41_channel
->is_tx
;
519 trace_musb_cppi41_abort(cppi41_channel
);
521 if (cppi41_channel
->channel
.status
== MUSB_DMA_STATUS_FREE
)
524 list_del_init(&cppi41_channel
->tx_check
);
526 csr
= musb_readw(epio
, MUSB_TXCSR
);
527 csr
&= ~MUSB_TXCSR_DMAENAB
;
528 musb_writew(epio
, MUSB_TXCSR
, csr
);
530 cppi41_set_autoreq_mode(cppi41_channel
, EP_MODE_AUTOREQ_NONE
);
532 /* delay to drain to cppi dma pipeline for isoch */
535 csr
= musb_readw(epio
, MUSB_RXCSR
);
536 csr
&= ~(MUSB_RXCSR_H_REQPKT
| MUSB_RXCSR_DMAENAB
);
537 musb_writew(epio
, MUSB_RXCSR
, csr
);
539 /* wait to drain cppi dma pipe line */
542 csr
= musb_readw(epio
, MUSB_RXCSR
);
543 if (csr
& MUSB_RXCSR_RXPKTRDY
) {
544 csr
|= MUSB_RXCSR_FLUSHFIFO
;
545 musb_writew(epio
, MUSB_RXCSR
, csr
);
546 musb_writew(epio
, MUSB_RXCSR
, csr
);
550 tdbit
= 1 << cppi41_channel
->port_num
;
556 musb_writel(musb
->ctrl_base
, USB_TDOWN
, tdbit
);
557 ret
= dmaengine_terminate_all(cppi41_channel
->dc
);
558 } while (ret
== -EAGAIN
);
561 musb_writel(musb
->ctrl_base
, USB_TDOWN
, tdbit
);
563 csr
= musb_readw(epio
, MUSB_TXCSR
);
564 if (csr
& MUSB_TXCSR_TXPKTRDY
) {
565 csr
|= MUSB_TXCSR_FLUSHFIFO
;
566 musb_writew(epio
, MUSB_TXCSR
, csr
);
570 cppi41_channel
->channel
.status
= MUSB_DMA_STATUS_FREE
;
574 static void cppi41_release_all_dma_chans(struct cppi41_dma_controller
*ctrl
)
579 for (i
= 0; i
< MUSB_DMA_NUM_CHANNELS
; i
++) {
580 dc
= ctrl
->tx_channel
[i
].dc
;
582 dma_release_channel(dc
);
583 dc
= ctrl
->rx_channel
[i
].dc
;
585 dma_release_channel(dc
);
589 static void cppi41_dma_controller_stop(struct cppi41_dma_controller
*controller
)
591 cppi41_release_all_dma_chans(controller
);
594 static int cppi41_dma_controller_start(struct cppi41_dma_controller
*controller
)
596 struct musb
*musb
= controller
->musb
;
597 struct device
*dev
= musb
->controller
;
598 struct device_node
*np
= dev
->parent
->of_node
;
599 struct cppi41_dma_channel
*cppi41_channel
;
604 count
= of_property_count_strings(np
, "dma-names");
608 for (i
= 0; i
< count
; i
++) {
610 struct dma_channel
*musb_dma
;
615 ret
= of_property_read_string_index(np
, "dma-names", i
, &str
);
618 if (strstarts(str
, "tx"))
620 else if (strstarts(str
, "rx"))
623 dev_err(dev
, "Wrong dmatype %s\n", str
);
626 ret
= kstrtouint(str
+ 2, 0, &port
);
631 if (port
> MUSB_DMA_NUM_CHANNELS
|| !port
)
634 cppi41_channel
= &controller
->tx_channel
[port
- 1];
636 cppi41_channel
= &controller
->rx_channel
[port
- 1];
638 cppi41_channel
->controller
= controller
;
639 cppi41_channel
->port_num
= port
;
640 cppi41_channel
->is_tx
= is_tx
;
641 INIT_LIST_HEAD(&cppi41_channel
->tx_check
);
643 musb_dma
= &cppi41_channel
->channel
;
644 musb_dma
->private_data
= cppi41_channel
;
645 musb_dma
->status
= MUSB_DMA_STATUS_FREE
;
646 musb_dma
->max_len
= SZ_4M
;
648 dc
= dma_request_slave_channel(dev
->parent
, str
);
650 dev_err(dev
, "Failed to request %s.\n", str
);
654 cppi41_channel
->dc
= dc
;
658 cppi41_release_all_dma_chans(controller
);
662 void cppi41_dma_controller_destroy(struct dma_controller
*c
)
664 struct cppi41_dma_controller
*controller
= container_of(c
,
665 struct cppi41_dma_controller
, controller
);
667 hrtimer_cancel(&controller
->early_tx
);
668 cppi41_dma_controller_stop(controller
);
671 EXPORT_SYMBOL_GPL(cppi41_dma_controller_destroy
);
673 struct dma_controller
*
674 cppi41_dma_controller_create(struct musb
*musb
, void __iomem
*base
)
676 struct cppi41_dma_controller
*controller
;
679 if (!musb
->controller
->parent
->of_node
) {
680 dev_err(musb
->controller
, "Need DT for the DMA engine.\n");
684 controller
= kzalloc(sizeof(*controller
), GFP_KERNEL
);
688 hrtimer_init(&controller
->early_tx
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
689 controller
->early_tx
.function
= cppi41_recheck_tx_req
;
690 INIT_LIST_HEAD(&controller
->early_tx_list
);
691 controller
->musb
= musb
;
693 controller
->controller
.channel_alloc
= cppi41_dma_channel_allocate
;
694 controller
->controller
.channel_release
= cppi41_dma_channel_release
;
695 controller
->controller
.channel_program
= cppi41_dma_channel_program
;
696 controller
->controller
.channel_abort
= cppi41_dma_channel_abort
;
697 controller
->controller
.is_compatible
= cppi41_is_compatible
;
699 ret
= cppi41_dma_controller_start(controller
);
702 return &controller
->controller
;
707 if (ret
== -EPROBE_DEFER
)
711 EXPORT_SYMBOL_GPL(cppi41_dma_controller_create
);