2 * Driver for the PLX NET2280 USB device controller.
3 * Specs and errata are available from <http://www.plxtech.com>.
5 * PLX Technology Inc. (formerly NetChip Technology) supported the
6 * development of this driver.
9 * CODE STATUS HIGHLIGHTS
11 * This driver should work well with most "gadget" drivers, including
12 * the Mass Storage, Serial, and Ethernet/RNDIS gadget drivers
13 * as well as Gadget Zero and Gadgetfs.
15 * DMA is enabled by default.
17 * MSI is enabled by default. The legacy IRQ is used if MSI couldn't
20 * Note that almost all the errata workarounds here are only needed for
21 * rev1 chips. Rev1a silicon (0110) fixes almost all of them.
25 * Copyright (C) 2003 David Brownell
26 * Copyright (C) 2003-2005 PLX Technology, Inc.
27 * Copyright (C) 2014 Ricardo Ribalda - Qtechnology/AS
29 * Modified Seth Levy 2005 PLX Technology, Inc. to provide compatibility
32 * Modified Ricardo Ribalda Qtechnology AS to provide compatibility
33 * with usb 338x chip. Based on PLX driver
35 * This program is free software; you can redistribute it and/or modify
36 * it under the terms of the GNU General Public License as published by
37 * the Free Software Foundation; either version 2 of the License, or
38 * (at your option) any later version.
41 #include <linux/module.h>
42 #include <linux/pci.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/kernel.h>
45 #include <linux/delay.h>
46 #include <linux/ioport.h>
47 #include <linux/slab.h>
48 #include <linux/errno.h>
49 #include <linux/init.h>
50 #include <linux/timer.h>
51 #include <linux/list.h>
52 #include <linux/interrupt.h>
53 #include <linux/moduleparam.h>
54 #include <linux/device.h>
55 #include <linux/usb/ch9.h>
56 #include <linux/usb/gadget.h>
57 #include <linux/prefetch.h>
60 #include <asm/byteorder.h>
62 #include <asm/unaligned.h>
64 #define DRIVER_DESC "PLX NET228x/USB338x USB Peripheral Controller"
65 #define DRIVER_VERSION "2005 Sept 27/v3.0"
67 #define EP_DONTUSE 13 /* nonzero */
69 #define USE_RDK_LEDS /* GPIO pins control three LEDs */
72 static const char driver_name
[] = "net2280";
73 static const char driver_desc
[] = DRIVER_DESC
;
75 static const u32 ep_bit
[9] = { 0, 17, 2, 19, 4, 1, 18, 3, 20 };
76 static const char ep0name
[] = "ep0";
77 static const char *const ep_name
[] = {
79 "ep-a", "ep-b", "ep-c", "ep-d",
80 "ep-e", "ep-f", "ep-g", "ep-h",
83 /* Endpoint names for usb3380 advance mode */
84 static const char *const ep_name_adv
[] = {
86 "ep1in", "ep2out", "ep3in", "ep4out",
87 "ep1out", "ep2in", "ep3out", "ep4in",
90 /* mode 0 == ep-{a,b,c,d} 1K fifo each
91 * mode 1 == ep-{a,b} 2K fifo each, ep-{c,d} unavailable
92 * mode 2 == ep-a 2K fifo, ep-{b,c} 1K each, ep-d unavailable
94 static ushort fifo_mode
;
96 /* "modprobe net2280 fifo_mode=1" etc */
97 module_param(fifo_mode
, ushort
, 0644);
99 /* enable_suspend -- When enabled, the driver will respond to
100 * USB suspend requests by powering down the NET2280. Otherwise,
101 * USB suspend requests will be ignored. This is acceptable for
102 * self-powered devices
104 static bool enable_suspend
;
106 /* "modprobe net2280 enable_suspend=1" etc */
107 module_param(enable_suspend
, bool, 0444);
109 #define DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out")
111 static char *type_string(u8 bmAttributes
)
113 switch ((bmAttributes
) & USB_ENDPOINT_XFERTYPE_MASK
) {
114 case USB_ENDPOINT_XFER_BULK
: return "bulk";
115 case USB_ENDPOINT_XFER_ISOC
: return "iso";
116 case USB_ENDPOINT_XFER_INT
: return "intr";
123 #define valid_bit cpu_to_le32(BIT(VALID_BIT))
124 #define dma_done_ie cpu_to_le32(BIT(DMA_DONE_INTERRUPT_ENABLE))
126 static void ep_clear_seqnum(struct net2280_ep
*ep
);
128 /*-------------------------------------------------------------------------*/
129 static inline void enable_pciirqenb(struct net2280_ep
*ep
)
131 u32 tmp
= readl(&ep
->dev
->regs
->pciirqenb0
);
133 if (ep
->dev
->quirks
& PLX_LEGACY
)
136 tmp
|= BIT(ep_bit
[ep
->num
]);
137 writel(tmp
, &ep
->dev
->regs
->pciirqenb0
);
143 net2280_enable(struct usb_ep
*_ep
, const struct usb_endpoint_descriptor
*desc
)
146 struct net2280_ep
*ep
;
151 static const u32 ep_key
[9] = { 1, 0, 1, 0, 1, 1, 0, 1, 0 };
154 ep
= container_of(_ep
, struct net2280_ep
, ep
);
155 if (!_ep
|| !desc
|| ep
->desc
|| _ep
->name
== ep0name
||
156 desc
->bDescriptorType
!= USB_DT_ENDPOINT
) {
157 pr_err("%s: failed at line=%d\n", __func__
, __LINE__
);
161 if (!dev
->driver
|| dev
->gadget
.speed
== USB_SPEED_UNKNOWN
) {
166 /* erratum 0119 workaround ties up an endpoint number */
167 if ((desc
->bEndpointAddress
& 0x0f) == EP_DONTUSE
) {
172 if (dev
->quirks
& PLX_SUPERSPEED
) {
173 if ((desc
->bEndpointAddress
& 0x0f) >= 0x0c) {
177 ep
->is_in
= !!usb_endpoint_dir_in(desc
);
178 if (dev
->enhanced_mode
&& ep
->is_in
&& ep_key
[ep
->num
]) {
184 /* sanity check ep-e/ep-f since their fifos are small */
185 max
= usb_endpoint_maxp(desc
) & 0x1fff;
186 if (ep
->num
> 4 && max
> 64 && (dev
->quirks
& PLX_LEGACY
)) {
191 spin_lock_irqsave(&dev
->lock
, flags
);
192 _ep
->maxpacket
= max
& 0x7ff;
195 /* ep_reset() has already been called */
198 ep
->out_overflow
= 0;
200 /* set speed-dependent max packet; may kick in high bandwidth */
201 set_max_speed(ep
, max
);
203 /* set type, direction, address; reset fifo counters */
204 writel(BIT(FIFO_FLUSH
), &ep
->regs
->ep_stat
);
206 if ((dev
->quirks
& PLX_SUPERSPEED
) && dev
->enhanced_mode
) {
207 tmp
= readl(&ep
->cfg
->ep_cfg
);
208 /* If USB ep number doesn't match hardware ep number */
209 if ((tmp
& 0xf) != usb_endpoint_num(desc
)) {
211 spin_unlock_irqrestore(&dev
->lock
, flags
);
215 tmp
&= ~USB3380_EP_CFG_MASK_IN
;
217 tmp
&= ~USB3380_EP_CFG_MASK_OUT
;
219 type
= (desc
->bmAttributes
& USB_ENDPOINT_XFERTYPE_MASK
);
220 if (type
== USB_ENDPOINT_XFER_INT
) {
221 /* erratum 0105 workaround prevents hs NYET */
222 if (dev
->chiprev
== 0100 &&
223 dev
->gadget
.speed
== USB_SPEED_HIGH
&&
224 !(desc
->bEndpointAddress
& USB_DIR_IN
))
225 writel(BIT(CLEAR_NAK_OUT_PACKETS_MODE
),
227 } else if (type
== USB_ENDPOINT_XFER_BULK
) {
228 /* catch some particularly blatant driver bugs */
229 if ((dev
->gadget
.speed
== USB_SPEED_SUPER
&& max
!= 1024) ||
230 (dev
->gadget
.speed
== USB_SPEED_HIGH
&& max
!= 512) ||
231 (dev
->gadget
.speed
== USB_SPEED_FULL
&& max
> 64)) {
232 spin_unlock_irqrestore(&dev
->lock
, flags
);
237 ep
->is_iso
= (type
== USB_ENDPOINT_XFER_ISOC
);
238 /* Enable this endpoint */
239 if (dev
->quirks
& PLX_LEGACY
) {
240 tmp
|= type
<< ENDPOINT_TYPE
;
241 tmp
|= desc
->bEndpointAddress
;
242 /* default full fifo lines */
243 tmp
|= (4 << ENDPOINT_BYTE_COUNT
);
244 tmp
|= BIT(ENDPOINT_ENABLE
);
245 ep
->is_in
= (tmp
& USB_DIR_IN
) != 0;
247 /* In Legacy mode, only OUT endpoints are used */
248 if (dev
->enhanced_mode
&& ep
->is_in
) {
249 tmp
|= type
<< IN_ENDPOINT_TYPE
;
250 tmp
|= BIT(IN_ENDPOINT_ENABLE
);
252 tmp
|= type
<< OUT_ENDPOINT_TYPE
;
253 tmp
|= BIT(OUT_ENDPOINT_ENABLE
);
254 tmp
|= (ep
->is_in
<< ENDPOINT_DIRECTION
);
257 tmp
|= (4 << ENDPOINT_BYTE_COUNT
);
258 if (!dev
->enhanced_mode
)
259 tmp
|= usb_endpoint_num(desc
);
260 tmp
|= (ep
->ep
.maxburst
<< MAX_BURST_SIZE
);
263 /* Make sure all the registers are written before ep_rsp*/
266 /* for OUT transfers, block the rx fifo until a read is posted */
268 writel(BIT(SET_NAK_OUT_PACKETS
), &ep
->regs
->ep_rsp
);
269 else if (!(dev
->quirks
& PLX_2280
)) {
270 /* Added for 2282, Don't use nak packets on an in endpoint,
271 * this was ignored on 2280
273 writel(BIT(CLEAR_NAK_OUT_PACKETS
) |
274 BIT(CLEAR_NAK_OUT_PACKETS_MODE
), &ep
->regs
->ep_rsp
);
277 if (dev
->quirks
& PLX_SUPERSPEED
)
279 writel(tmp
, &ep
->cfg
->ep_cfg
);
282 if (!ep
->dma
) { /* pio, per-packet */
283 enable_pciirqenb(ep
);
285 tmp
= BIT(DATA_PACKET_RECEIVED_INTERRUPT_ENABLE
) |
286 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE
);
287 if (dev
->quirks
& PLX_2280
)
288 tmp
|= readl(&ep
->regs
->ep_irqenb
);
289 writel(tmp
, &ep
->regs
->ep_irqenb
);
290 } else { /* dma, per-request */
291 tmp
= BIT((8 + ep
->num
)); /* completion */
292 tmp
|= readl(&dev
->regs
->pciirqenb1
);
293 writel(tmp
, &dev
->regs
->pciirqenb1
);
295 /* for short OUT transfers, dma completions can't
296 * advance the queue; do it pio-style, by hand.
297 * NOTE erratum 0112 workaround #2
299 if ((desc
->bEndpointAddress
& USB_DIR_IN
) == 0) {
300 tmp
= BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE
);
301 writel(tmp
, &ep
->regs
->ep_irqenb
);
303 enable_pciirqenb(ep
);
307 tmp
= desc
->bEndpointAddress
;
308 ep_dbg(dev
, "enabled %s (ep%d%s-%s) %s max %04x\n",
309 _ep
->name
, tmp
& 0x0f, DIR_STRING(tmp
),
310 type_string(desc
->bmAttributes
),
311 ep
->dma
? "dma" : "pio", max
);
313 /* pci writes may still be posted */
314 spin_unlock_irqrestore(&dev
->lock
, flags
);
318 dev_err(&ep
->dev
->pdev
->dev
, "%s: error=%d\n", __func__
, ret
);
322 static int handshake(u32 __iomem
*ptr
, u32 mask
, u32 done
, int usec
)
328 if (result
== ~(u32
)0) /* "device unplugged" */
339 static const struct usb_ep_ops net2280_ep_ops
;
341 static void ep_reset_228x(struct net2280_regs __iomem
*regs
,
342 struct net2280_ep
*ep
)
347 INIT_LIST_HEAD(&ep
->queue
);
349 usb_ep_set_maxpacket_limit(&ep
->ep
, ~0);
350 ep
->ep
.ops
= &net2280_ep_ops
;
352 /* disable the dma, irqs, endpoint... */
354 writel(0, &ep
->dma
->dmactl
);
355 writel(BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT
) |
356 BIT(DMA_TRANSACTION_DONE_INTERRUPT
) |
360 tmp
= readl(®s
->pciirqenb0
);
361 tmp
&= ~BIT(ep
->num
);
362 writel(tmp
, ®s
->pciirqenb0
);
364 tmp
= readl(®s
->pciirqenb1
);
365 tmp
&= ~BIT((8 + ep
->num
)); /* completion */
366 writel(tmp
, ®s
->pciirqenb1
);
368 writel(0, &ep
->regs
->ep_irqenb
);
370 /* init to our chosen defaults, notably so that we NAK OUT
371 * packets until the driver queues a read (+note erratum 0112)
373 if (!ep
->is_in
|| (ep
->dev
->quirks
& PLX_2280
)) {
374 tmp
= BIT(SET_NAK_OUT_PACKETS_MODE
) |
375 BIT(SET_NAK_OUT_PACKETS
) |
376 BIT(CLEAR_EP_HIDE_STATUS_PHASE
) |
377 BIT(CLEAR_INTERRUPT_MODE
);
380 tmp
= BIT(CLEAR_NAK_OUT_PACKETS_MODE
) |
381 BIT(CLEAR_NAK_OUT_PACKETS
) |
382 BIT(CLEAR_EP_HIDE_STATUS_PHASE
) |
383 BIT(CLEAR_INTERRUPT_MODE
);
387 tmp
|= BIT(CLEAR_ENDPOINT_TOGGLE
) |
388 BIT(CLEAR_ENDPOINT_HALT
);
390 writel(tmp
, &ep
->regs
->ep_rsp
);
392 /* scrub most status bits, and flush any fifo state */
393 if (ep
->dev
->quirks
& PLX_2280
)
394 tmp
= BIT(FIFO_OVERFLOW
) |
399 writel(tmp
| BIT(TIMEOUT
) |
400 BIT(USB_STALL_SENT
) |
401 BIT(USB_IN_NAK_SENT
) |
402 BIT(USB_IN_ACK_RCVD
) |
403 BIT(USB_OUT_PING_NAK_SENT
) |
404 BIT(USB_OUT_ACK_SENT
) |
406 BIT(SHORT_PACKET_OUT_DONE_INTERRUPT
) |
407 BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT
) |
408 BIT(DATA_PACKET_RECEIVED_INTERRUPT
) |
409 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT
) |
410 BIT(DATA_OUT_PING_TOKEN_INTERRUPT
) |
411 BIT(DATA_IN_TOKEN_INTERRUPT
),
414 /* fifo size is handled separately */
417 static void ep_reset_338x(struct net2280_regs __iomem
*regs
,
418 struct net2280_ep
*ep
)
423 INIT_LIST_HEAD(&ep
->queue
);
425 usb_ep_set_maxpacket_limit(&ep
->ep
, ~0);
426 ep
->ep
.ops
= &net2280_ep_ops
;
428 /* disable the dma, irqs, endpoint... */
430 writel(0, &ep
->dma
->dmactl
);
431 writel(BIT(DMA_ABORT_DONE_INTERRUPT
) |
432 BIT(DMA_PAUSE_DONE_INTERRUPT
) |
433 BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT
) |
434 BIT(DMA_TRANSACTION_DONE_INTERRUPT
),
435 /* | BIT(DMA_ABORT), */
438 dmastat
= readl(&ep
->dma
->dmastat
);
439 if (dmastat
== 0x5002) {
440 ep_warn(ep
->dev
, "The dmastat return = %x!!\n",
442 writel(0x5a, &ep
->dma
->dmastat
);
445 tmp
= readl(®s
->pciirqenb0
);
446 tmp
&= ~BIT(ep_bit
[ep
->num
]);
447 writel(tmp
, ®s
->pciirqenb0
);
450 tmp
= readl(®s
->pciirqenb1
);
451 tmp
&= ~BIT((8 + ep
->num
)); /* completion */
452 writel(tmp
, ®s
->pciirqenb1
);
455 writel(0, &ep
->regs
->ep_irqenb
);
457 writel(BIT(SHORT_PACKET_OUT_DONE_INTERRUPT
) |
458 BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT
) |
460 BIT(DATA_PACKET_RECEIVED_INTERRUPT
) |
461 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT
) |
462 BIT(DATA_OUT_PING_TOKEN_INTERRUPT
) |
463 BIT(DATA_IN_TOKEN_INTERRUPT
), &ep
->regs
->ep_stat
);
465 tmp
= readl(&ep
->cfg
->ep_cfg
);
467 tmp
&= ~USB3380_EP_CFG_MASK_IN
;
469 tmp
&= ~USB3380_EP_CFG_MASK_OUT
;
470 writel(tmp
, &ep
->cfg
->ep_cfg
);
473 static void nuke(struct net2280_ep
*);
475 static int net2280_disable(struct usb_ep
*_ep
)
477 struct net2280_ep
*ep
;
480 ep
= container_of(_ep
, struct net2280_ep
, ep
);
481 if (!_ep
|| !ep
->desc
|| _ep
->name
== ep0name
) {
482 pr_err("%s: Invalid ep=%p or ep->desc\n", __func__
, _ep
);
485 spin_lock_irqsave(&ep
->dev
->lock
, flags
);
488 if (ep
->dev
->quirks
& PLX_SUPERSPEED
)
489 ep_reset_338x(ep
->dev
->regs
, ep
);
491 ep_reset_228x(ep
->dev
->regs
, ep
);
493 ep_vdbg(ep
->dev
, "disabled %s %s\n",
494 ep
->dma
? "dma" : "pio", _ep
->name
);
496 /* synch memory views with the device */
497 (void)readl(&ep
->cfg
->ep_cfg
);
499 if (!ep
->dma
&& ep
->num
>= 1 && ep
->num
<= 4)
500 ep
->dma
= &ep
->dev
->dma
[ep
->num
- 1];
502 spin_unlock_irqrestore(&ep
->dev
->lock
, flags
);
506 /*-------------------------------------------------------------------------*/
508 static struct usb_request
509 *net2280_alloc_request(struct usb_ep
*_ep
, gfp_t gfp_flags
)
511 struct net2280_ep
*ep
;
512 struct net2280_request
*req
;
515 pr_err("%s: Invalid ep\n", __func__
);
518 ep
= container_of(_ep
, struct net2280_ep
, ep
);
520 req
= kzalloc(sizeof(*req
), gfp_flags
);
524 INIT_LIST_HEAD(&req
->queue
);
526 /* this dma descriptor may be swapped with the previous dummy */
528 struct net2280_dma
*td
;
530 td
= pci_pool_alloc(ep
->dev
->requests
, gfp_flags
,
536 td
->dmacount
= 0; /* not VALID */
537 td
->dmadesc
= td
->dmaaddr
;
543 static void net2280_free_request(struct usb_ep
*_ep
, struct usb_request
*_req
)
545 struct net2280_ep
*ep
;
546 struct net2280_request
*req
;
548 ep
= container_of(_ep
, struct net2280_ep
, ep
);
550 dev_err(&ep
->dev
->pdev
->dev
, "%s: Inavlid ep=%p or req=%p\n",
551 __func__
, _ep
, _req
);
555 req
= container_of(_req
, struct net2280_request
, req
);
556 WARN_ON(!list_empty(&req
->queue
));
558 pci_pool_free(ep
->dev
->requests
, req
->td
, req
->td_dma
);
562 /*-------------------------------------------------------------------------*/
564 /* load a packet into the fifo we use for usb IN transfers.
565 * works for all endpoints.
567 * NOTE: pio with ep-a..ep-d could stuff multiple packets into the fifo
568 * at a time, but this code is simpler because it knows it only writes
569 * one packet. ep-a..ep-d should use dma instead.
571 static void write_fifo(struct net2280_ep
*ep
, struct usb_request
*req
)
573 struct net2280_ep_regs __iomem
*regs
= ep
->regs
;
576 unsigned count
, total
;
578 /* INVARIANT: fifo is currently empty. (testable) */
581 buf
= req
->buf
+ req
->actual
;
583 total
= req
->length
- req
->actual
;
589 /* write just one packet at a time */
590 count
= ep
->ep
.maxpacket
;
591 if (count
> total
) /* min() cannot be used on a bitfield */
594 ep_vdbg(ep
->dev
, "write %s fifo (IN) %d bytes%s req %p\n",
596 (count
!= ep
->ep
.maxpacket
) ? " (short)" : "",
599 /* NOTE be careful if you try to align these. fifo lines
600 * should normally be full (4 bytes) and successive partial
601 * lines are ok only in certain cases.
603 tmp
= get_unaligned((u32
*)buf
);
605 writel(tmp
, ®s
->ep_data
);
610 /* last fifo entry is "short" unless we wrote a full packet.
611 * also explicitly validate last word in (periodic) transfers
612 * when maxpacket is not a multiple of 4 bytes.
614 if (count
|| total
< ep
->ep
.maxpacket
) {
615 tmp
= count
? get_unaligned((u32
*)buf
) : count
;
617 set_fifo_bytecount(ep
, count
& 0x03);
618 writel(tmp
, ®s
->ep_data
);
621 /* pci writes may still be posted */
624 /* work around erratum 0106: PCI and USB race over the OUT fifo.
625 * caller guarantees chiprev 0100, out endpoint is NAKing, and
626 * there's no real data in the fifo.
628 * NOTE: also used in cases where that erratum doesn't apply:
629 * where the host wrote "too much" data to us.
631 static void out_flush(struct net2280_ep
*ep
)
636 statp
= &ep
->regs
->ep_stat
;
639 if (tmp
& BIT(NAK_OUT_PACKETS
)) {
640 ep_dbg(ep
->dev
, "%s %s %08x !NAK\n",
641 ep
->ep
.name
, __func__
, tmp
);
642 writel(BIT(SET_NAK_OUT_PACKETS
), &ep
->regs
->ep_rsp
);
645 writel(BIT(DATA_OUT_PING_TOKEN_INTERRUPT
) |
646 BIT(DATA_PACKET_RECEIVED_INTERRUPT
),
648 writel(BIT(FIFO_FLUSH
), statp
);
649 /* Make sure that stap is written */
652 if (tmp
& BIT(DATA_OUT_PING_TOKEN_INTERRUPT
) &&
653 /* high speed did bulk NYET; fifo isn't filling */
654 ep
->dev
->gadget
.speed
== USB_SPEED_FULL
) {
657 usec
= 50; /* 64 byte bulk/interrupt */
658 handshake(statp
, BIT(USB_OUT_PING_NAK_SENT
),
659 BIT(USB_OUT_PING_NAK_SENT
), usec
);
660 /* NAK done; now CLEAR_NAK_OUT_PACKETS is safe */
664 /* unload packet(s) from the fifo we use for usb OUT transfers.
665 * returns true iff the request completed, because of short packet
666 * or the request buffer having filled with full packets.
668 * for ep-a..ep-d this will read multiple packets out when they
669 * have been accepted.
671 static int read_fifo(struct net2280_ep
*ep
, struct net2280_request
*req
)
673 struct net2280_ep_regs __iomem
*regs
= ep
->regs
;
674 u8
*buf
= req
->req
.buf
+ req
->req
.actual
;
675 unsigned count
, tmp
, is_short
;
676 unsigned cleanup
= 0, prevent
= 0;
678 /* erratum 0106 ... packets coming in during fifo reads might
679 * be incompletely rejected. not all cases have workarounds.
681 if (ep
->dev
->chiprev
== 0x0100 &&
682 ep
->dev
->gadget
.speed
== USB_SPEED_FULL
) {
684 tmp
= readl(&ep
->regs
->ep_stat
);
685 if ((tmp
& BIT(NAK_OUT_PACKETS
)))
687 else if ((tmp
& BIT(FIFO_FULL
))) {
688 start_out_naking(ep
);
691 /* else: hope we don't see the problem */
694 /* never overflow the rx buffer. the fifo reads packets until
695 * it sees a short one; we might not be ready for them all.
698 count
= readl(®s
->ep_avail
);
699 if (unlikely(count
== 0)) {
701 tmp
= readl(&ep
->regs
->ep_stat
);
702 count
= readl(®s
->ep_avail
);
703 /* handled that data already? */
704 if (count
== 0 && (tmp
& BIT(NAK_OUT_PACKETS
)) == 0)
708 tmp
= req
->req
.length
- req
->req
.actual
;
710 /* as with DMA, data overflow gets flushed */
711 if ((tmp
% ep
->ep
.maxpacket
) != 0) {
713 "%s out fifo %d bytes, expected %d\n",
714 ep
->ep
.name
, count
, tmp
);
715 req
->req
.status
= -EOVERFLOW
;
717 /* NAK_OUT_PACKETS will be set, so flushing is safe;
718 * the next read will start with the next packet
720 } /* else it's a ZLP, no worries */
723 req
->req
.actual
+= count
;
725 is_short
= (count
== 0) || ((count
% ep
->ep
.maxpacket
) != 0);
727 ep_vdbg(ep
->dev
, "read %s fifo (OUT) %d bytes%s%s%s req %p %d/%d\n",
728 ep
->ep
.name
, count
, is_short
? " (short)" : "",
729 cleanup
? " flush" : "", prevent
? " nak" : "",
730 req
, req
->req
.actual
, req
->req
.length
);
733 tmp
= readl(®s
->ep_data
);
735 put_unaligned(tmp
, (u32
*)buf
);
740 tmp
= readl(®s
->ep_data
);
741 /* LE conversion is implicit here: */
750 writel(BIT(CLEAR_NAK_OUT_PACKETS
), &ep
->regs
->ep_rsp
);
751 (void) readl(&ep
->regs
->ep_rsp
);
754 return is_short
|| ((req
->req
.actual
== req
->req
.length
) &&
758 /* fill out dma descriptor to match a given request */
759 static void fill_dma_desc(struct net2280_ep
*ep
,
760 struct net2280_request
*req
, int valid
)
762 struct net2280_dma
*td
= req
->td
;
763 u32 dmacount
= req
->req
.length
;
765 /* don't let DMA continue after a short OUT packet,
766 * so overruns can't affect the next transfer.
767 * in case of overruns on max-size packets, we can't
768 * stop the fifo from filling but we can flush it.
771 dmacount
|= BIT(DMA_DIRECTION
);
772 if ((!ep
->is_in
&& (dmacount
% ep
->ep
.maxpacket
) != 0) ||
773 !(ep
->dev
->quirks
& PLX_2280
))
774 dmacount
|= BIT(END_OF_CHAIN
);
778 dmacount
|= BIT(VALID_BIT
);
779 dmacount
|= BIT(DMA_DONE_INTERRUPT_ENABLE
);
781 /* td->dmadesc = previously set by caller */
782 td
->dmaaddr
= cpu_to_le32 (req
->req
.dma
);
784 /* 2280 may be polling VALID_BIT through ep->dma->dmadesc */
786 td
->dmacount
= cpu_to_le32(dmacount
);
789 static const u32 dmactl_default
=
790 BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT
) |
791 BIT(DMA_CLEAR_COUNT_ENABLE
) |
792 /* erratum 0116 workaround part 1 (use POLLING) */
793 (POLL_100_USEC
<< DESCRIPTOR_POLLING_RATE
) |
794 BIT(DMA_VALID_BIT_POLLING_ENABLE
) |
795 BIT(DMA_VALID_BIT_ENABLE
) |
796 BIT(DMA_SCATTER_GATHER_ENABLE
) |
797 /* erratum 0116 workaround part 2 (no AUTOSTART) */
800 static inline void spin_stop_dma(struct net2280_dma_regs __iomem
*dma
)
802 handshake(&dma
->dmactl
, BIT(DMA_ENABLE
), 0, 50);
805 static inline void stop_dma(struct net2280_dma_regs __iomem
*dma
)
807 writel(readl(&dma
->dmactl
) & ~BIT(DMA_ENABLE
), &dma
->dmactl
);
811 static void start_queue(struct net2280_ep
*ep
, u32 dmactl
, u32 td_dma
)
813 struct net2280_dma_regs __iomem
*dma
= ep
->dma
;
814 unsigned int tmp
= BIT(VALID_BIT
) | (ep
->is_in
<< DMA_DIRECTION
);
816 if (!(ep
->dev
->quirks
& PLX_2280
))
817 tmp
|= BIT(END_OF_CHAIN
);
819 writel(tmp
, &dma
->dmacount
);
820 writel(readl(&dma
->dmastat
), &dma
->dmastat
);
822 writel(td_dma
, &dma
->dmadesc
);
823 if (ep
->dev
->quirks
& PLX_SUPERSPEED
)
824 dmactl
|= BIT(DMA_REQUEST_OUTSTANDING
);
825 writel(dmactl
, &dma
->dmactl
);
827 /* erratum 0116 workaround part 3: pci arbiter away from net2280 */
828 (void) readl(&ep
->dev
->pci
->pcimstctl
);
830 writel(BIT(DMA_START
), &dma
->dmastat
);
836 static void start_dma(struct net2280_ep
*ep
, struct net2280_request
*req
)
839 struct net2280_dma_regs __iomem
*dma
= ep
->dma
;
841 /* FIXME can't use DMA for ZLPs */
843 /* on this path we "know" there's no dma active (yet) */
844 WARN_ON(readl(&dma
->dmactl
) & BIT(DMA_ENABLE
));
845 writel(0, &ep
->dma
->dmactl
);
847 /* previous OUT packet might have been short */
848 if (!ep
->is_in
&& (readl(&ep
->regs
->ep_stat
) &
849 BIT(NAK_OUT_PACKETS
))) {
850 writel(BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT
),
853 tmp
= readl(&ep
->regs
->ep_avail
);
855 writel(readl(&dma
->dmastat
), &dma
->dmastat
);
857 /* transfer all/some fifo data */
858 writel(req
->req
.dma
, &dma
->dmaaddr
);
859 tmp
= min(tmp
, req
->req
.length
);
861 /* dma irq, faking scatterlist status */
862 req
->td
->dmacount
= cpu_to_le32(req
->req
.length
- tmp
);
863 writel(BIT(DMA_DONE_INTERRUPT_ENABLE
) | tmp
,
865 req
->td
->dmadesc
= 0;
868 writel(BIT(DMA_ENABLE
), &dma
->dmactl
);
869 writel(BIT(DMA_START
), &dma
->dmastat
);
874 tmp
= dmactl_default
;
876 /* force packet boundaries between dma requests, but prevent the
877 * controller from automagically writing a last "short" packet
878 * (zero length) unless the driver explicitly said to do that.
881 if (likely((req
->req
.length
% ep
->ep
.maxpacket
) ||
883 tmp
|= BIT(DMA_FIFO_VALIDATE
);
884 ep
->in_fifo_validate
= 1;
886 ep
->in_fifo_validate
= 0;
889 /* init req->td, pointing to the current dummy */
890 req
->td
->dmadesc
= cpu_to_le32 (ep
->td_dma
);
891 fill_dma_desc(ep
, req
, 1);
893 req
->td
->dmacount
|= cpu_to_le32(BIT(END_OF_CHAIN
));
895 start_queue(ep
, tmp
, req
->td_dma
);
899 queue_dma(struct net2280_ep
*ep
, struct net2280_request
*req
, int valid
)
901 struct net2280_dma
*end
;
904 /* swap new dummy for old, link; fill and maybe activate */
910 ep
->td_dma
= req
->td_dma
;
913 end
->dmadesc
= cpu_to_le32 (ep
->td_dma
);
915 fill_dma_desc(ep
, req
, valid
);
919 done(struct net2280_ep
*ep
, struct net2280_request
*req
, int status
)
922 unsigned stopped
= ep
->stopped
;
924 list_del_init(&req
->queue
);
926 if (req
->req
.status
== -EINPROGRESS
)
927 req
->req
.status
= status
;
929 status
= req
->req
.status
;
933 usb_gadget_unmap_request(&dev
->gadget
, &req
->req
, ep
->is_in
);
935 if (status
&& status
!= -ESHUTDOWN
)
936 ep_vdbg(dev
, "complete %s req %p stat %d len %u/%u\n",
937 ep
->ep
.name
, &req
->req
, status
,
938 req
->req
.actual
, req
->req
.length
);
940 /* don't modify queue heads during completion callback */
942 spin_unlock(&dev
->lock
);
943 usb_gadget_giveback_request(&ep
->ep
, &req
->req
);
944 spin_lock(&dev
->lock
);
945 ep
->stopped
= stopped
;
948 /*-------------------------------------------------------------------------*/
951 net2280_queue(struct usb_ep
*_ep
, struct usb_request
*_req
, gfp_t gfp_flags
)
953 struct net2280_request
*req
;
954 struct net2280_ep
*ep
;
959 /* we always require a cpu-view buffer, so that we can
960 * always use pio (as fallback or whatever).
962 ep
= container_of(_ep
, struct net2280_ep
, ep
);
963 if (!_ep
|| (!ep
->desc
&& ep
->num
!= 0)) {
964 pr_err("%s: Invalid ep=%p or ep->desc\n", __func__
, _ep
);
967 req
= container_of(_req
, struct net2280_request
, req
);
968 if (!_req
|| !_req
->complete
|| !_req
->buf
||
969 !list_empty(&req
->queue
)) {
973 if (_req
->length
> (~0 & DMA_BYTE_COUNT_MASK
)) {
978 if (!dev
->driver
|| dev
->gadget
.speed
== USB_SPEED_UNKNOWN
) {
983 /* FIXME implement PIO fallback for ZLPs with DMA */
984 if (ep
->dma
&& _req
->length
== 0) {
989 /* set up dma mapping in case the caller didn't */
991 ret
= usb_gadget_map_request(&dev
->gadget
, _req
,
997 ep_vdbg(dev
, "%s queue req %p, len %d buf %p\n",
998 _ep
->name
, _req
, _req
->length
, _req
->buf
);
1000 spin_lock_irqsave(&dev
->lock
, flags
);
1002 _req
->status
= -EINPROGRESS
;
1005 /* kickstart this i/o queue? */
1006 if (list_empty(&ep
->queue
) && !ep
->stopped
&&
1007 !((dev
->quirks
& PLX_SUPERSPEED
) && ep
->dma
&&
1008 (readl(&ep
->regs
->ep_rsp
) & BIT(CLEAR_ENDPOINT_HALT
)))) {
1010 /* use DMA if the endpoint supports it, else pio */
1014 /* maybe there's no control data, just status ack */
1015 if (ep
->num
== 0 && _req
->length
== 0) {
1018 ep_vdbg(dev
, "%s status ack\n", ep
->ep
.name
);
1022 /* PIO ... stuff the fifo, or unblock it. */
1024 write_fifo(ep
, _req
);
1025 else if (list_empty(&ep
->queue
)) {
1028 /* OUT FIFO might have packet(s) buffered */
1029 s
= readl(&ep
->regs
->ep_stat
);
1030 if ((s
& BIT(FIFO_EMPTY
)) == 0) {
1031 /* note: _req->short_not_ok is
1032 * ignored here since PIO _always_
1033 * stops queue advance here, and
1034 * _req->status doesn't change for
1035 * short reads (only _req->actual)
1037 if (read_fifo(ep
, req
) &&
1041 /* don't queue it */
1043 } else if (read_fifo(ep
, req
) &&
1048 s
= readl(&ep
->regs
->ep_stat
);
1051 /* don't NAK, let the fifo fill */
1052 if (req
&& (s
& BIT(NAK_OUT_PACKETS
)))
1053 writel(BIT(CLEAR_NAK_OUT_PACKETS
),
1058 } else if (ep
->dma
) {
1064 /* preventing magic zlps is per-engine state, not
1065 * per-transfer; irq logic must recover hiccups.
1067 expect
= likely(req
->req
.zero
||
1068 (req
->req
.length
% ep
->ep
.maxpacket
));
1069 if (expect
!= ep
->in_fifo_validate
)
1072 queue_dma(ep
, req
, valid
);
1074 } /* else the irq handler advances the queue. */
1078 list_add_tail(&req
->queue
, &ep
->queue
);
1080 spin_unlock_irqrestore(&dev
->lock
, flags
);
1082 /* pci writes may still be posted */
1086 dev_err(&ep
->dev
->pdev
->dev
, "%s: error=%d\n", __func__
, ret
);
1091 dma_done(struct net2280_ep
*ep
, struct net2280_request
*req
, u32 dmacount
,
1094 req
->req
.actual
= req
->req
.length
- (DMA_BYTE_COUNT_MASK
& dmacount
);
1095 done(ep
, req
, status
);
1098 static void scan_dma_completions(struct net2280_ep
*ep
)
1100 /* only look at descriptors that were "naturally" retired,
1101 * so fifo and list head state won't matter
1103 while (!list_empty(&ep
->queue
)) {
1104 struct net2280_request
*req
;
1107 req
= list_entry(ep
->queue
.next
,
1108 struct net2280_request
, queue
);
1112 tmp
= le32_to_cpup(&req
->td
->dmacount
);
1113 if ((tmp
& BIT(VALID_BIT
)) != 0)
1116 /* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short"
1117 * cases where DMA must be aborted; this code handles
1118 * all non-abort DMA completions.
1120 if (unlikely(req
->td
->dmadesc
== 0)) {
1122 tmp
= readl(&ep
->dma
->dmacount
);
1123 if (tmp
& DMA_BYTE_COUNT_MASK
)
1125 /* single transfer mode */
1126 dma_done(ep
, req
, tmp
, 0);
1128 } else if (!ep
->is_in
&&
1129 (req
->req
.length
% ep
->ep
.maxpacket
) &&
1130 !(ep
->dev
->quirks
& PLX_SUPERSPEED
)) {
1132 tmp
= readl(&ep
->regs
->ep_stat
);
1133 /* AVOID TROUBLE HERE by not issuing short reads from
1134 * your gadget driver. That helps avoids errata 0121,
1135 * 0122, and 0124; not all cases trigger the warning.
1137 if ((tmp
& BIT(NAK_OUT_PACKETS
)) == 0) {
1138 ep_warn(ep
->dev
, "%s lost packet sync!\n",
1140 req
->req
.status
= -EOVERFLOW
;
1142 tmp
= readl(&ep
->regs
->ep_avail
);
1144 /* fifo gets flushed later */
1145 ep
->out_overflow
= 1;
1147 "%s dma, discard %d len %d\n",
1150 req
->req
.status
= -EOVERFLOW
;
1154 dma_done(ep
, req
, tmp
, 0);
1158 static void restart_dma(struct net2280_ep
*ep
)
1160 struct net2280_request
*req
;
1164 req
= list_entry(ep
->queue
.next
, struct net2280_request
, queue
);
1169 static void abort_dma(struct net2280_ep
*ep
)
1171 /* abort the current transfer */
1172 if (likely(!list_empty(&ep
->queue
))) {
1173 /* FIXME work around errata 0121, 0122, 0124 */
1174 writel(BIT(DMA_ABORT
), &ep
->dma
->dmastat
);
1175 spin_stop_dma(ep
->dma
);
1178 scan_dma_completions(ep
);
1181 /* dequeue ALL requests */
1182 static void nuke(struct net2280_ep
*ep
)
1184 struct net2280_request
*req
;
1186 /* called with spinlock held */
1190 while (!list_empty(&ep
->queue
)) {
1191 req
= list_entry(ep
->queue
.next
,
1192 struct net2280_request
,
1194 done(ep
, req
, -ESHUTDOWN
);
1198 /* dequeue JUST ONE request */
1199 static int net2280_dequeue(struct usb_ep
*_ep
, struct usb_request
*_req
)
1201 struct net2280_ep
*ep
;
1202 struct net2280_request
*req
;
1203 unsigned long flags
;
1207 ep
= container_of(_ep
, struct net2280_ep
, ep
);
1208 if (!_ep
|| (!ep
->desc
&& ep
->num
!= 0) || !_req
) {
1209 pr_err("%s: Invalid ep=%p or ep->desc or req=%p\n",
1210 __func__
, _ep
, _req
);
1214 spin_lock_irqsave(&ep
->dev
->lock
, flags
);
1215 stopped
= ep
->stopped
;
1217 /* quiesce dma while we patch the queue */
1221 dmactl
= readl(&ep
->dma
->dmactl
);
1222 /* WARNING erratum 0127 may kick in ... */
1224 scan_dma_completions(ep
);
1227 /* make sure it's still queued on this endpoint */
1228 list_for_each_entry(req
, &ep
->queue
, queue
) {
1229 if (&req
->req
== _req
)
1232 if (&req
->req
!= _req
) {
1233 spin_unlock_irqrestore(&ep
->dev
->lock
, flags
);
1234 dev_err(&ep
->dev
->pdev
->dev
, "%s: Request mismatch\n",
1239 /* queue head may be partially complete. */
1240 if (ep
->queue
.next
== &req
->queue
) {
1242 ep_dbg(ep
->dev
, "unlink (%s) dma\n", _ep
->name
);
1243 _req
->status
= -ECONNRESET
;
1245 if (likely(ep
->queue
.next
== &req
->queue
)) {
1246 /* NOTE: misreports single-transfer mode*/
1247 req
->td
->dmacount
= 0; /* invalidate */
1249 readl(&ep
->dma
->dmacount
),
1253 ep_dbg(ep
->dev
, "unlink (%s) pio\n", _ep
->name
);
1254 done(ep
, req
, -ECONNRESET
);
1260 done(ep
, req
, -ECONNRESET
);
1261 ep
->stopped
= stopped
;
1264 /* turn off dma on inactive queues */
1265 if (list_empty(&ep
->queue
))
1267 else if (!ep
->stopped
) {
1268 /* resume current request, or start new one */
1270 writel(dmactl
, &ep
->dma
->dmactl
);
1272 start_dma(ep
, list_entry(ep
->queue
.next
,
1273 struct net2280_request
, queue
));
1277 spin_unlock_irqrestore(&ep
->dev
->lock
, flags
);
1281 /*-------------------------------------------------------------------------*/
1283 static int net2280_fifo_status(struct usb_ep
*_ep
);
1286 net2280_set_halt_and_wedge(struct usb_ep
*_ep
, int value
, int wedged
)
1288 struct net2280_ep
*ep
;
1289 unsigned long flags
;
1292 ep
= container_of(_ep
, struct net2280_ep
, ep
);
1293 if (!_ep
|| (!ep
->desc
&& ep
->num
!= 0)) {
1294 pr_err("%s: Invalid ep=%p or ep->desc\n", __func__
, _ep
);
1297 if (!ep
->dev
->driver
|| ep
->dev
->gadget
.speed
== USB_SPEED_UNKNOWN
) {
1298 retval
= -ESHUTDOWN
;
1301 if (ep
->desc
/* not ep0 */ && (ep
->desc
->bmAttributes
& 0x03)
1302 == USB_ENDPOINT_XFER_ISOC
) {
1307 spin_lock_irqsave(&ep
->dev
->lock
, flags
);
1308 if (!list_empty(&ep
->queue
)) {
1311 } else if (ep
->is_in
&& value
&& net2280_fifo_status(_ep
) != 0) {
1315 ep_vdbg(ep
->dev
, "%s %s %s\n", _ep
->name
,
1316 value
? "set" : "clear",
1317 wedged
? "wedge" : "halt");
1318 /* set/clear, then synch memory views with the device */
1321 ep
->dev
->protocol_stall
= 1;
1328 if (ep
->dev
->quirks
& PLX_SUPERSPEED
&&
1329 !list_empty(&ep
->queue
) && ep
->td_dma
)
1333 (void) readl(&ep
->regs
->ep_rsp
);
1335 spin_unlock_irqrestore(&ep
->dev
->lock
, flags
);
1340 spin_unlock_irqrestore(&ep
->dev
->lock
, flags
);
1342 dev_err(&ep
->dev
->pdev
->dev
, "%s: error=%d\n", __func__
, retval
);
1346 static int net2280_set_halt(struct usb_ep
*_ep
, int value
)
1348 return net2280_set_halt_and_wedge(_ep
, value
, 0);
1351 static int net2280_set_wedge(struct usb_ep
*_ep
)
1353 if (!_ep
|| _ep
->name
== ep0name
) {
1354 pr_err("%s: Invalid ep=%p or ep0\n", __func__
, _ep
);
1357 return net2280_set_halt_and_wedge(_ep
, 1, 1);
1360 static int net2280_fifo_status(struct usb_ep
*_ep
)
1362 struct net2280_ep
*ep
;
1365 ep
= container_of(_ep
, struct net2280_ep
, ep
);
1366 if (!_ep
|| (!ep
->desc
&& ep
->num
!= 0)) {
1367 pr_err("%s: Invalid ep=%p or ep->desc\n", __func__
, _ep
);
1370 if (!ep
->dev
->driver
|| ep
->dev
->gadget
.speed
== USB_SPEED_UNKNOWN
) {
1371 dev_err(&ep
->dev
->pdev
->dev
,
1372 "%s: Invalid driver=%p or speed=%d\n",
1373 __func__
, ep
->dev
->driver
, ep
->dev
->gadget
.speed
);
1377 avail
= readl(&ep
->regs
->ep_avail
) & (BIT(12) - 1);
1378 if (avail
> ep
->fifo_size
) {
1379 dev_err(&ep
->dev
->pdev
->dev
, "%s: Fifo overflow\n", __func__
);
1383 avail
= ep
->fifo_size
- avail
;
1387 static void net2280_fifo_flush(struct usb_ep
*_ep
)
1389 struct net2280_ep
*ep
;
1391 ep
= container_of(_ep
, struct net2280_ep
, ep
);
1392 if (!_ep
|| (!ep
->desc
&& ep
->num
!= 0)) {
1393 pr_err("%s: Invalid ep=%p or ep->desc\n", __func__
, _ep
);
1396 if (!ep
->dev
->driver
|| ep
->dev
->gadget
.speed
== USB_SPEED_UNKNOWN
) {
1397 dev_err(&ep
->dev
->pdev
->dev
,
1398 "%s: Invalid driver=%p or speed=%d\n",
1399 __func__
, ep
->dev
->driver
, ep
->dev
->gadget
.speed
);
1403 writel(BIT(FIFO_FLUSH
), &ep
->regs
->ep_stat
);
1404 (void) readl(&ep
->regs
->ep_rsp
);
1407 static const struct usb_ep_ops net2280_ep_ops
= {
1408 .enable
= net2280_enable
,
1409 .disable
= net2280_disable
,
1411 .alloc_request
= net2280_alloc_request
,
1412 .free_request
= net2280_free_request
,
1414 .queue
= net2280_queue
,
1415 .dequeue
= net2280_dequeue
,
1417 .set_halt
= net2280_set_halt
,
1418 .set_wedge
= net2280_set_wedge
,
1419 .fifo_status
= net2280_fifo_status
,
1420 .fifo_flush
= net2280_fifo_flush
,
1423 /*-------------------------------------------------------------------------*/
1425 static int net2280_get_frame(struct usb_gadget
*_gadget
)
1427 struct net2280
*dev
;
1428 unsigned long flags
;
1433 dev
= container_of(_gadget
, struct net2280
, gadget
);
1434 spin_lock_irqsave(&dev
->lock
, flags
);
1435 retval
= get_idx_reg(dev
->regs
, REG_FRAME
) & 0x03ff;
1436 spin_unlock_irqrestore(&dev
->lock
, flags
);
1440 static int net2280_wakeup(struct usb_gadget
*_gadget
)
1442 struct net2280
*dev
;
1444 unsigned long flags
;
1448 dev
= container_of(_gadget
, struct net2280
, gadget
);
1450 spin_lock_irqsave(&dev
->lock
, flags
);
1451 tmp
= readl(&dev
->usb
->usbctl
);
1452 if (tmp
& BIT(DEVICE_REMOTE_WAKEUP_ENABLE
))
1453 writel(BIT(GENERATE_RESUME
), &dev
->usb
->usbstat
);
1454 spin_unlock_irqrestore(&dev
->lock
, flags
);
1456 /* pci writes may still be posted */
1460 static int net2280_set_selfpowered(struct usb_gadget
*_gadget
, int value
)
1462 struct net2280
*dev
;
1464 unsigned long flags
;
1468 dev
= container_of(_gadget
, struct net2280
, gadget
);
1470 spin_lock_irqsave(&dev
->lock
, flags
);
1471 tmp
= readl(&dev
->usb
->usbctl
);
1473 tmp
|= BIT(SELF_POWERED_STATUS
);
1474 _gadget
->is_selfpowered
= 1;
1476 tmp
&= ~BIT(SELF_POWERED_STATUS
);
1477 _gadget
->is_selfpowered
= 0;
1479 writel(tmp
, &dev
->usb
->usbctl
);
1480 spin_unlock_irqrestore(&dev
->lock
, flags
);
1485 static int net2280_pullup(struct usb_gadget
*_gadget
, int is_on
)
1487 struct net2280
*dev
;
1489 unsigned long flags
;
1493 dev
= container_of(_gadget
, struct net2280
, gadget
);
1495 spin_lock_irqsave(&dev
->lock
, flags
);
1496 tmp
= readl(&dev
->usb
->usbctl
);
1497 dev
->softconnect
= (is_on
!= 0);
1499 tmp
|= BIT(USB_DETECT_ENABLE
);
1501 tmp
&= ~BIT(USB_DETECT_ENABLE
);
1502 writel(tmp
, &dev
->usb
->usbctl
);
1503 spin_unlock_irqrestore(&dev
->lock
, flags
);
1508 static int net2280_start(struct usb_gadget
*_gadget
,
1509 struct usb_gadget_driver
*driver
);
1510 static int net2280_stop(struct usb_gadget
*_gadget
);
1512 static const struct usb_gadget_ops net2280_ops
= {
1513 .get_frame
= net2280_get_frame
,
1514 .wakeup
= net2280_wakeup
,
1515 .set_selfpowered
= net2280_set_selfpowered
,
1516 .pullup
= net2280_pullup
,
1517 .udc_start
= net2280_start
,
1518 .udc_stop
= net2280_stop
,
1521 /*-------------------------------------------------------------------------*/
1523 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
1525 /* FIXME move these into procfs, and use seq_file.
1526 * Sysfs _still_ doesn't behave for arbitrarily sized files,
1527 * and also doesn't help products using this with 2.4 kernels.
1530 /* "function" sysfs attribute */
1531 static ssize_t
function_show(struct device
*_dev
, struct device_attribute
*attr
,
1534 struct net2280
*dev
= dev_get_drvdata(_dev
);
1536 if (!dev
->driver
|| !dev
->driver
->function
||
1537 strlen(dev
->driver
->function
) > PAGE_SIZE
)
1539 return scnprintf(buf
, PAGE_SIZE
, "%s\n", dev
->driver
->function
);
1541 static DEVICE_ATTR_RO(function
);
1543 static ssize_t
registers_show(struct device
*_dev
,
1544 struct device_attribute
*attr
, char *buf
)
1546 struct net2280
*dev
;
1549 unsigned long flags
;
1554 dev
= dev_get_drvdata(_dev
);
1557 spin_lock_irqsave(&dev
->lock
, flags
);
1560 s
= dev
->driver
->driver
.name
;
1564 /* Main Control Registers */
1565 t
= scnprintf(next
, size
, "%s version " DRIVER_VERSION
1566 ", chiprev %04x\n\n"
1567 "devinit %03x fifoctl %08x gadget '%s'\n"
1568 "pci irqenb0 %02x irqenb1 %08x "
1569 "irqstat0 %04x irqstat1 %08x\n",
1570 driver_name
, dev
->chiprev
,
1571 readl(&dev
->regs
->devinit
),
1572 readl(&dev
->regs
->fifoctl
),
1574 readl(&dev
->regs
->pciirqenb0
),
1575 readl(&dev
->regs
->pciirqenb1
),
1576 readl(&dev
->regs
->irqstat0
),
1577 readl(&dev
->regs
->irqstat1
));
1581 /* USB Control Registers */
1582 t1
= readl(&dev
->usb
->usbctl
);
1583 t2
= readl(&dev
->usb
->usbstat
);
1584 if (t1
& BIT(VBUS_PIN
)) {
1585 if (t2
& BIT(HIGH_SPEED
))
1587 else if (dev
->gadget
.speed
== USB_SPEED_UNKNOWN
)
1591 /* full speed bit (6) not working?? */
1594 t
= scnprintf(next
, size
,
1595 "stdrsp %08x usbctl %08x usbstat %08x "
1596 "addr 0x%02x (%s)\n",
1597 readl(&dev
->usb
->stdrsp
), t1
, t2
,
1598 readl(&dev
->usb
->ouraddr
), s
);
1602 /* PCI Master Control Registers */
1604 /* DMA Control Registers */
1606 /* Configurable EP Control Registers */
1607 for (i
= 0; i
< dev
->n_ep
; i
++) {
1608 struct net2280_ep
*ep
;
1614 t1
= readl(&ep
->cfg
->ep_cfg
);
1615 t2
= readl(&ep
->regs
->ep_rsp
) & 0xff;
1616 t
= scnprintf(next
, size
,
1617 "\n%s\tcfg %05x rsp (%02x) %s%s%s%s%s%s%s%s"
1619 ep
->ep
.name
, t1
, t2
,
1620 (t2
& BIT(CLEAR_NAK_OUT_PACKETS
))
1622 (t2
& BIT(CLEAR_EP_HIDE_STATUS_PHASE
))
1624 (t2
& BIT(CLEAR_EP_FORCE_CRC_ERROR
))
1626 (t2
& BIT(CLEAR_INTERRUPT_MODE
))
1627 ? "interrupt " : "",
1628 (t2
& BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE
))
1630 (t2
& BIT(CLEAR_NAK_OUT_PACKETS_MODE
))
1632 (t2
& BIT(CLEAR_ENDPOINT_TOGGLE
))
1633 ? "DATA1 " : "DATA0 ",
1634 (t2
& BIT(CLEAR_ENDPOINT_HALT
))
1636 readl(&ep
->regs
->ep_irqenb
));
1640 t
= scnprintf(next
, size
,
1641 "\tstat %08x avail %04x "
1643 readl(&ep
->regs
->ep_stat
),
1644 readl(&ep
->regs
->ep_avail
),
1645 t1
& 0x0f, DIR_STRING(t1
),
1646 type_string(t1
>> 8),
1647 ep
->stopped
? "*" : "");
1654 t
= scnprintf(next
, size
,
1655 " dma\tctl %08x stat %08x count %08x\n"
1656 "\taddr %08x desc %08x\n",
1657 readl(&ep
->dma
->dmactl
),
1658 readl(&ep
->dma
->dmastat
),
1659 readl(&ep
->dma
->dmacount
),
1660 readl(&ep
->dma
->dmaaddr
),
1661 readl(&ep
->dma
->dmadesc
));
1667 /* Indexed Registers (none yet) */
1670 t
= scnprintf(next
, size
, "\nirqs: ");
1673 for (i
= 0; i
< dev
->n_ep
; i
++) {
1674 struct net2280_ep
*ep
;
1679 t
= scnprintf(next
, size
, " %s/%lu", ep
->ep
.name
, ep
->irqs
);
1684 t
= scnprintf(next
, size
, "\n");
1688 spin_unlock_irqrestore(&dev
->lock
, flags
);
1690 return PAGE_SIZE
- size
;
1692 static DEVICE_ATTR_RO(registers
);
1694 static ssize_t
queues_show(struct device
*_dev
, struct device_attribute
*attr
,
1697 struct net2280
*dev
;
1700 unsigned long flags
;
1703 dev
= dev_get_drvdata(_dev
);
1706 spin_lock_irqsave(&dev
->lock
, flags
);
1708 for (i
= 0; i
< dev
->n_ep
; i
++) {
1709 struct net2280_ep
*ep
= &dev
->ep
[i
];
1710 struct net2280_request
*req
;
1714 const struct usb_endpoint_descriptor
*d
;
1719 t
= d
->bEndpointAddress
;
1720 t
= scnprintf(next
, size
,
1721 "\n%s (ep%d%s-%s) max %04x %s fifo %d\n",
1722 ep
->ep
.name
, t
& USB_ENDPOINT_NUMBER_MASK
,
1723 (t
& USB_DIR_IN
) ? "in" : "out",
1724 type_string(d
->bmAttributes
),
1725 usb_endpoint_maxp(d
) & 0x1fff,
1726 ep
->dma
? "dma" : "pio", ep
->fifo_size
1728 } else /* ep0 should only have one transfer queued */
1729 t
= scnprintf(next
, size
, "ep0 max 64 pio %s\n",
1730 ep
->is_in
? "in" : "out");
1731 if (t
<= 0 || t
> size
)
1736 if (list_empty(&ep
->queue
)) {
1737 t
= scnprintf(next
, size
, "\t(nothing queued)\n");
1738 if (t
<= 0 || t
> size
)
1744 list_for_each_entry(req
, &ep
->queue
, queue
) {
1745 if (ep
->dma
&& req
->td_dma
== readl(&ep
->dma
->dmadesc
))
1746 t
= scnprintf(next
, size
,
1747 "\treq %p len %d/%d "
1748 "buf %p (dmacount %08x)\n",
1749 &req
->req
, req
->req
.actual
,
1750 req
->req
.length
, req
->req
.buf
,
1751 readl(&ep
->dma
->dmacount
));
1753 t
= scnprintf(next
, size
,
1754 "\treq %p len %d/%d buf %p\n",
1755 &req
->req
, req
->req
.actual
,
1756 req
->req
.length
, req
->req
.buf
);
1757 if (t
<= 0 || t
> size
)
1763 struct net2280_dma
*td
;
1766 t
= scnprintf(next
, size
, "\t td %08x "
1767 " count %08x buf %08x desc %08x\n",
1769 le32_to_cpu(td
->dmacount
),
1770 le32_to_cpu(td
->dmaaddr
),
1771 le32_to_cpu(td
->dmadesc
));
1772 if (t
<= 0 || t
> size
)
1781 spin_unlock_irqrestore(&dev
->lock
, flags
);
1782 return PAGE_SIZE
- size
;
1784 static DEVICE_ATTR_RO(queues
);
1789 #define device_create_file(a, b) (0)
1790 #define device_remove_file(a, b) do { } while (0)
1794 /*-------------------------------------------------------------------------*/
1796 /* another driver-specific mode might be a request type doing dma
1797 * to/from another device fifo instead of to/from memory.
1800 static void set_fifo_mode(struct net2280
*dev
, int mode
)
1802 /* keeping high bits preserves BAR2 */
1803 writel((0xffff << PCI_BASE2_RANGE
) | mode
, &dev
->regs
->fifoctl
);
1805 /* always ep-{a,b,e,f} ... maybe not ep-c or ep-d */
1806 INIT_LIST_HEAD(&dev
->gadget
.ep_list
);
1807 list_add_tail(&dev
->ep
[1].ep
.ep_list
, &dev
->gadget
.ep_list
);
1808 list_add_tail(&dev
->ep
[2].ep
.ep_list
, &dev
->gadget
.ep_list
);
1811 list_add_tail(&dev
->ep
[3].ep
.ep_list
, &dev
->gadget
.ep_list
);
1812 list_add_tail(&dev
->ep
[4].ep
.ep_list
, &dev
->gadget
.ep_list
);
1813 dev
->ep
[1].fifo_size
= dev
->ep
[2].fifo_size
= 1024;
1816 dev
->ep
[1].fifo_size
= dev
->ep
[2].fifo_size
= 2048;
1819 list_add_tail(&dev
->ep
[3].ep
.ep_list
, &dev
->gadget
.ep_list
);
1820 dev
->ep
[1].fifo_size
= 2048;
1821 dev
->ep
[2].fifo_size
= 1024;
1824 /* fifo sizes for ep0, ep-c, ep-d, ep-e, and ep-f never change */
1825 list_add_tail(&dev
->ep
[5].ep
.ep_list
, &dev
->gadget
.ep_list
);
1826 list_add_tail(&dev
->ep
[6].ep
.ep_list
, &dev
->gadget
.ep_list
);
1829 static void defect7374_disable_data_eps(struct net2280
*dev
)
1832 * For Defect 7374, disable data EPs (and more):
1833 * - This phase undoes the earlier phase of the Defect 7374 workaround,
1834 * returing ep regs back to normal.
1836 struct net2280_ep
*ep
;
1838 unsigned char ep_sel
;
1841 for (i
= 1; i
< 5; i
++) {
1843 writel(0, &ep
->cfg
->ep_cfg
);
1846 /* CSROUT, CSRIN, PCIOUT, PCIIN, STATIN, RCIN */
1847 for (i
= 0; i
< 6; i
++)
1848 writel(0, &dev
->dep
[i
].dep_cfg
);
1850 for (ep_sel
= 0; ep_sel
<= 21; ep_sel
++) {
1851 /* Select an endpoint for subsequent operations: */
1852 tmp_reg
= readl(&dev
->plregs
->pl_ep_ctrl
);
1853 writel(((tmp_reg
& ~0x1f) | ep_sel
), &dev
->plregs
->pl_ep_ctrl
);
1855 if (ep_sel
< 2 || (ep_sel
> 9 && ep_sel
< 14) ||
1856 ep_sel
== 18 || ep_sel
== 20)
1859 /* Change settings on some selected endpoints */
1860 tmp_reg
= readl(&dev
->plregs
->pl_ep_cfg_4
);
1861 tmp_reg
&= ~BIT(NON_CTRL_IN_TOLERATE_BAD_DIR
);
1862 writel(tmp_reg
, &dev
->plregs
->pl_ep_cfg_4
);
1863 tmp_reg
= readl(&dev
->plregs
->pl_ep_ctrl
);
1864 tmp_reg
|= BIT(EP_INITIALIZED
);
1865 writel(tmp_reg
, &dev
->plregs
->pl_ep_ctrl
);
1869 static void defect7374_enable_data_eps_zero(struct net2280
*dev
)
1871 u32 tmp
= 0, tmp_reg
;
1874 unsigned char ep_sel
;
1876 scratch
= get_idx_reg(dev
->regs
, SCRATCH
);
1878 WARN_ON((scratch
& (0xf << DEFECT7374_FSM_FIELD
))
1879 == DEFECT7374_FSM_SS_CONTROL_READ
);
1881 scratch
&= ~(0xf << DEFECT7374_FSM_FIELD
);
1883 ep_warn(dev
, "Operate Defect 7374 workaround soft this time");
1884 ep_warn(dev
, "It will operate on cold-reboot and SS connect");
1887 tmp
= ((0 << ENDPOINT_NUMBER
) | BIT(ENDPOINT_DIRECTION
) |
1888 (2 << OUT_ENDPOINT_TYPE
) | (2 << IN_ENDPOINT_TYPE
) |
1889 ((dev
->enhanced_mode
) ?
1890 BIT(OUT_ENDPOINT_ENABLE
) | BIT(IN_ENDPOINT_ENABLE
) :
1891 BIT(ENDPOINT_ENABLE
)));
1893 for (i
= 1; i
< 5; i
++)
1894 writel(tmp
, &dev
->ep
[i
].cfg
->ep_cfg
);
1896 /* CSRIN, PCIIN, STATIN, RCIN*/
1897 tmp
= ((0 << ENDPOINT_NUMBER
) | BIT(ENDPOINT_ENABLE
));
1898 writel(tmp
, &dev
->dep
[1].dep_cfg
);
1899 writel(tmp
, &dev
->dep
[3].dep_cfg
);
1900 writel(tmp
, &dev
->dep
[4].dep_cfg
);
1901 writel(tmp
, &dev
->dep
[5].dep_cfg
);
1903 /*Implemented for development and debug.
1904 * Can be refined/tuned later.*/
1905 for (ep_sel
= 0; ep_sel
<= 21; ep_sel
++) {
1906 /* Select an endpoint for subsequent operations: */
1907 tmp_reg
= readl(&dev
->plregs
->pl_ep_ctrl
);
1908 writel(((tmp_reg
& ~0x1f) | ep_sel
),
1909 &dev
->plregs
->pl_ep_ctrl
);
1913 (readl(&dev
->plregs
->pl_ep_ctrl
) |
1914 BIT(CLEAR_ACK_ERROR_CODE
) | 0);
1915 writel(tmp
, &dev
->plregs
->pl_ep_ctrl
);
1919 if (ep_sel
== 0 || (ep_sel
> 9 && ep_sel
< 14) ||
1920 ep_sel
== 18 || ep_sel
== 20)
1923 tmp
= (readl(&dev
->plregs
->pl_ep_cfg_4
) |
1924 BIT(NON_CTRL_IN_TOLERATE_BAD_DIR
) | 0);
1925 writel(tmp
, &dev
->plregs
->pl_ep_cfg_4
);
1927 tmp
= readl(&dev
->plregs
->pl_ep_ctrl
) &
1928 ~BIT(EP_INITIALIZED
);
1929 writel(tmp
, &dev
->plregs
->pl_ep_ctrl
);
1933 /* Set FSM to focus on the first Control Read:
1934 * - Tip: Connection speed is known upon the first
1936 scratch
|= DEFECT7374_FSM_WAITING_FOR_CONTROL_READ
;
1937 set_idx_reg(dev
->regs
, SCRATCH
, scratch
);
1941 /* keeping it simple:
1942 * - one bus driver, initted first;
1943 * - one function driver, initted second
1945 * most of the work to support multiple net2280 controllers would
1946 * be to associate this gadget driver (yes?) with all of them, or
1947 * perhaps to bind specific drivers to specific devices.
1950 static void usb_reset_228x(struct net2280
*dev
)
1954 dev
->gadget
.speed
= USB_SPEED_UNKNOWN
;
1955 (void) readl(&dev
->usb
->usbctl
);
1957 net2280_led_init(dev
);
1959 /* disable automatic responses, and irqs */
1960 writel(0, &dev
->usb
->stdrsp
);
1961 writel(0, &dev
->regs
->pciirqenb0
);
1962 writel(0, &dev
->regs
->pciirqenb1
);
1964 /* clear old dma and irq state */
1965 for (tmp
= 0; tmp
< 4; tmp
++) {
1966 struct net2280_ep
*ep
= &dev
->ep
[tmp
+ 1];
1971 writel(~0, &dev
->regs
->irqstat0
),
1972 writel(~(u32
)BIT(SUSPEND_REQUEST_INTERRUPT
), &dev
->regs
->irqstat1
),
1974 /* reset, and enable pci */
1975 tmp
= readl(&dev
->regs
->devinit
) |
1977 BIT(FIFO_SOFT_RESET
) |
1978 BIT(USB_SOFT_RESET
) |
1980 writel(tmp
, &dev
->regs
->devinit
);
1982 /* standard fifo and endpoint allocations */
1983 set_fifo_mode(dev
, (fifo_mode
<= 2) ? fifo_mode
: 0);
1986 static void usb_reset_338x(struct net2280
*dev
)
1990 dev
->gadget
.speed
= USB_SPEED_UNKNOWN
;
1991 (void)readl(&dev
->usb
->usbctl
);
1993 net2280_led_init(dev
);
1995 if (dev
->bug7734_patched
) {
1996 /* disable automatic responses, and irqs */
1997 writel(0, &dev
->usb
->stdrsp
);
1998 writel(0, &dev
->regs
->pciirqenb0
);
1999 writel(0, &dev
->regs
->pciirqenb1
);
2002 /* clear old dma and irq state */
2003 for (tmp
= 0; tmp
< 4; tmp
++) {
2004 struct net2280_ep
*ep
= &dev
->ep
[tmp
+ 1];
2005 struct net2280_dma_regs __iomem
*dma
;
2010 dma
= &dev
->dma
[tmp
];
2011 writel(BIT(DMA_ABORT
), &dma
->dmastat
);
2012 writel(0, &dma
->dmactl
);
2016 writel(~0, &dev
->regs
->irqstat0
), writel(~0, &dev
->regs
->irqstat1
);
2018 if (dev
->bug7734_patched
) {
2019 /* reset, and enable pci */
2020 tmp
= readl(&dev
->regs
->devinit
) |
2022 BIT(FIFO_SOFT_RESET
) |
2023 BIT(USB_SOFT_RESET
) |
2026 writel(tmp
, &dev
->regs
->devinit
);
2029 /* always ep-{1,2,3,4} ... maybe not ep-3 or ep-4 */
2030 INIT_LIST_HEAD(&dev
->gadget
.ep_list
);
2032 for (tmp
= 1; tmp
< dev
->n_ep
; tmp
++)
2033 list_add_tail(&dev
->ep
[tmp
].ep
.ep_list
, &dev
->gadget
.ep_list
);
2037 static void usb_reset(struct net2280
*dev
)
2039 if (dev
->quirks
& PLX_LEGACY
)
2040 return usb_reset_228x(dev
);
2041 return usb_reset_338x(dev
);
2044 static void usb_reinit_228x(struct net2280
*dev
)
2048 /* basic endpoint init */
2049 for (tmp
= 0; tmp
< 7; tmp
++) {
2050 struct net2280_ep
*ep
= &dev
->ep
[tmp
];
2052 ep
->ep
.name
= ep_name
[tmp
];
2056 if (tmp
> 0 && tmp
<= 4) {
2057 ep
->fifo_size
= 1024;
2058 ep
->dma
= &dev
->dma
[tmp
- 1];
2061 ep
->regs
= &dev
->epregs
[tmp
];
2062 ep
->cfg
= &dev
->epregs
[tmp
];
2063 ep_reset_228x(dev
->regs
, ep
);
2065 usb_ep_set_maxpacket_limit(&dev
->ep
[0].ep
, 64);
2066 usb_ep_set_maxpacket_limit(&dev
->ep
[5].ep
, 64);
2067 usb_ep_set_maxpacket_limit(&dev
->ep
[6].ep
, 64);
2069 dev
->gadget
.ep0
= &dev
->ep
[0].ep
;
2070 dev
->ep
[0].stopped
= 0;
2071 INIT_LIST_HEAD(&dev
->gadget
.ep0
->ep_list
);
2073 /* we want to prevent lowlevel/insecure access from the USB host,
2074 * but erratum 0119 means this enable bit is ignored
2076 for (tmp
= 0; tmp
< 5; tmp
++)
2077 writel(EP_DONTUSE
, &dev
->dep
[tmp
].dep_cfg
);
2080 static void usb_reinit_338x(struct net2280
*dev
)
2084 static const u32 ne
[9] = { 0, 1, 2, 3, 4, 1, 2, 3, 4 };
2085 static const u32 ep_reg_addr
[9] = { 0x00, 0xC0, 0x00, 0xC0, 0x00,
2086 0x00, 0xC0, 0x00, 0xC0 };
2088 /* basic endpoint init */
2089 for (i
= 0; i
< dev
->n_ep
; i
++) {
2090 struct net2280_ep
*ep
= &dev
->ep
[i
];
2092 ep
->ep
.name
= dev
->enhanced_mode
? ep_name_adv
[i
] : ep_name
[i
];
2096 if (i
> 0 && i
<= 4)
2097 ep
->dma
= &dev
->dma
[i
- 1];
2099 if (dev
->enhanced_mode
) {
2100 ep
->cfg
= &dev
->epregs
[ne
[i
]];
2102 * Set USB endpoint number, hardware allows same number
2103 * in both directions.
2106 writel(ne
[i
], &ep
->cfg
->ep_cfg
);
2107 ep
->regs
= (struct net2280_ep_regs __iomem
*)
2108 (((void __iomem
*)&dev
->epregs
[ne
[i
]]) +
2111 ep
->cfg
= &dev
->epregs
[i
];
2112 ep
->regs
= &dev
->epregs
[i
];
2115 ep
->fifo_size
= (i
!= 0) ? 2048 : 512;
2117 ep_reset_338x(dev
->regs
, ep
);
2119 usb_ep_set_maxpacket_limit(&dev
->ep
[0].ep
, 512);
2121 dev
->gadget
.ep0
= &dev
->ep
[0].ep
;
2122 dev
->ep
[0].stopped
= 0;
2124 /* Link layer set up */
2125 if (dev
->bug7734_patched
) {
2126 tmp
= readl(&dev
->usb_ext
->usbctl2
) &
2127 ~(BIT(U1_ENABLE
) | BIT(U2_ENABLE
) | BIT(LTM_ENABLE
));
2128 writel(tmp
, &dev
->usb_ext
->usbctl2
);
2131 /* Hardware Defect and Workaround */
2132 val
= readl(&dev
->ll_lfps_regs
->ll_lfps_5
);
2133 val
&= ~(0xf << TIMER_LFPS_6US
);
2134 val
|= 0x5 << TIMER_LFPS_6US
;
2135 writel(val
, &dev
->ll_lfps_regs
->ll_lfps_5
);
2137 val
= readl(&dev
->ll_lfps_regs
->ll_lfps_6
);
2138 val
&= ~(0xffff << TIMER_LFPS_80US
);
2139 val
|= 0x0100 << TIMER_LFPS_80US
;
2140 writel(val
, &dev
->ll_lfps_regs
->ll_lfps_6
);
2143 * AA_AB Errata. Issue 4. Workaround for SuperSpeed USB
2144 * Hot Reset Exit Handshake may Fail in Specific Case using
2145 * Default Register Settings. Workaround for Enumeration test.
2147 val
= readl(&dev
->ll_tsn_regs
->ll_tsn_counters_2
);
2148 val
&= ~(0x1f << HOT_TX_NORESET_TS2
);
2149 val
|= 0x10 << HOT_TX_NORESET_TS2
;
2150 writel(val
, &dev
->ll_tsn_regs
->ll_tsn_counters_2
);
2152 val
= readl(&dev
->ll_tsn_regs
->ll_tsn_counters_3
);
2153 val
&= ~(0x1f << HOT_RX_RESET_TS2
);
2154 val
|= 0x3 << HOT_RX_RESET_TS2
;
2155 writel(val
, &dev
->ll_tsn_regs
->ll_tsn_counters_3
);
2158 * Set Recovery Idle to Recover bit:
2159 * - On SS connections, setting Recovery Idle to Recover Fmw improves
2160 * link robustness with various hosts and hubs.
2161 * - It is safe to set for all connection speeds; all chip revisions.
2162 * - R-M-W to leave other bits undisturbed.
2163 * - Reference PLX TT-7372
2165 val
= readl(&dev
->ll_chicken_reg
->ll_tsn_chicken_bit
);
2166 val
|= BIT(RECOVERY_IDLE_TO_RECOVER_FMW
);
2167 writel(val
, &dev
->ll_chicken_reg
->ll_tsn_chicken_bit
);
2169 INIT_LIST_HEAD(&dev
->gadget
.ep0
->ep_list
);
2171 /* disable dedicated endpoints */
2172 writel(0x0D, &dev
->dep
[0].dep_cfg
);
2173 writel(0x0D, &dev
->dep
[1].dep_cfg
);
2174 writel(0x0E, &dev
->dep
[2].dep_cfg
);
2175 writel(0x0E, &dev
->dep
[3].dep_cfg
);
2176 writel(0x0F, &dev
->dep
[4].dep_cfg
);
2177 writel(0x0C, &dev
->dep
[5].dep_cfg
);
2180 static void usb_reinit(struct net2280
*dev
)
2182 if (dev
->quirks
& PLX_LEGACY
)
2183 return usb_reinit_228x(dev
);
2184 return usb_reinit_338x(dev
);
2187 static void ep0_start_228x(struct net2280
*dev
)
2189 writel(BIT(CLEAR_EP_HIDE_STATUS_PHASE
) |
2190 BIT(CLEAR_NAK_OUT_PACKETS
) |
2191 BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE
),
2192 &dev
->epregs
[0].ep_rsp
);
2195 * hardware optionally handles a bunch of standard requests
2196 * that the API hides from drivers anyway. have it do so.
2197 * endpoint status/features are handled in software, to
2198 * help pass tests for some dubious behavior.
2200 writel(BIT(SET_TEST_MODE
) |
2202 BIT(DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP
) |
2203 BIT(GET_DEVICE_STATUS
) |
2204 BIT(GET_INTERFACE_STATUS
),
2206 writel(BIT(USB_ROOT_PORT_WAKEUP_ENABLE
) |
2207 BIT(SELF_POWERED_USB_DEVICE
) |
2208 BIT(REMOTE_WAKEUP_SUPPORT
) |
2209 (dev
->softconnect
<< USB_DETECT_ENABLE
) |
2210 BIT(SELF_POWERED_STATUS
),
2213 /* enable irqs so we can see ep0 and general operation */
2214 writel(BIT(SETUP_PACKET_INTERRUPT_ENABLE
) |
2215 BIT(ENDPOINT_0_INTERRUPT_ENABLE
),
2216 &dev
->regs
->pciirqenb0
);
2217 writel(BIT(PCI_INTERRUPT_ENABLE
) |
2218 BIT(PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE
) |
2219 BIT(PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE
) |
2220 BIT(PCI_RETRY_ABORT_INTERRUPT_ENABLE
) |
2221 BIT(VBUS_INTERRUPT_ENABLE
) |
2222 BIT(ROOT_PORT_RESET_INTERRUPT_ENABLE
) |
2223 BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE
),
2224 &dev
->regs
->pciirqenb1
);
2226 /* don't leave any writes posted */
2227 (void) readl(&dev
->usb
->usbctl
);
2230 static void ep0_start_338x(struct net2280
*dev
)
2233 if (dev
->bug7734_patched
)
2234 writel(BIT(CLEAR_NAK_OUT_PACKETS_MODE
) |
2235 BIT(SET_EP_HIDE_STATUS_PHASE
),
2236 &dev
->epregs
[0].ep_rsp
);
2239 * hardware optionally handles a bunch of standard requests
2240 * that the API hides from drivers anyway. have it do so.
2241 * endpoint status/features are handled in software, to
2242 * help pass tests for some dubious behavior.
2244 writel(BIT(SET_ISOCHRONOUS_DELAY
) |
2246 BIT(SET_TEST_MODE
) |
2248 BIT(GET_INTERFACE_STATUS
) |
2249 BIT(GET_DEVICE_STATUS
),
2251 dev
->wakeup_enable
= 1;
2252 writel(BIT(USB_ROOT_PORT_WAKEUP_ENABLE
) |
2253 (dev
->softconnect
<< USB_DETECT_ENABLE
) |
2254 BIT(DEVICE_REMOTE_WAKEUP_ENABLE
),
2257 /* enable irqs so we can see ep0 and general operation */
2258 writel(BIT(SETUP_PACKET_INTERRUPT_ENABLE
) |
2259 BIT(ENDPOINT_0_INTERRUPT_ENABLE
),
2260 &dev
->regs
->pciirqenb0
);
2261 writel(BIT(PCI_INTERRUPT_ENABLE
) |
2262 BIT(ROOT_PORT_RESET_INTERRUPT_ENABLE
) |
2263 BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE
) |
2264 BIT(VBUS_INTERRUPT_ENABLE
),
2265 &dev
->regs
->pciirqenb1
);
2267 /* don't leave any writes posted */
2268 (void)readl(&dev
->usb
->usbctl
);
2271 static void ep0_start(struct net2280
*dev
)
2273 if (dev
->quirks
& PLX_LEGACY
)
2274 return ep0_start_228x(dev
);
2275 return ep0_start_338x(dev
);
2278 /* when a driver is successfully registered, it will receive
2279 * control requests including set_configuration(), which enables
2280 * non-control requests. then usb traffic follows until a
2281 * disconnect is reported. then a host may connect again, or
2282 * the driver might get unbound.
2284 static int net2280_start(struct usb_gadget
*_gadget
,
2285 struct usb_gadget_driver
*driver
)
2287 struct net2280
*dev
;
2291 /* insist on high speed support from the driver, since
2292 * (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE)
2293 * "must not be used in normal operation"
2295 if (!driver
|| driver
->max_speed
< USB_SPEED_HIGH
||
2299 dev
= container_of(_gadget
, struct net2280
, gadget
);
2301 for (i
= 0; i
< dev
->n_ep
; i
++)
2302 dev
->ep
[i
].irqs
= 0;
2304 /* hook up the driver ... */
2305 driver
->driver
.bus
= NULL
;
2306 dev
->driver
= driver
;
2308 retval
= device_create_file(&dev
->pdev
->dev
, &dev_attr_function
);
2311 retval
= device_create_file(&dev
->pdev
->dev
, &dev_attr_queues
);
2315 /* enable host detection and ep0; and we're ready
2316 * for set_configuration as well as eventual disconnect.
2318 net2280_led_active(dev
, 1);
2320 if ((dev
->quirks
& PLX_SUPERSPEED
) && !dev
->bug7734_patched
)
2321 defect7374_enable_data_eps_zero(dev
);
2325 /* pci writes may still be posted */
2329 device_remove_file(&dev
->pdev
->dev
, &dev_attr_function
);
2335 static void stop_activity(struct net2280
*dev
, struct usb_gadget_driver
*driver
)
2339 /* don't disconnect if it's not connected */
2340 if (dev
->gadget
.speed
== USB_SPEED_UNKNOWN
)
2343 /* stop hardware; prevent new request submissions;
2344 * and kill any outstanding requests.
2347 for (i
= 0; i
< dev
->n_ep
; i
++)
2350 /* report disconnect; the driver is already quiesced */
2352 spin_unlock(&dev
->lock
);
2353 driver
->disconnect(&dev
->gadget
);
2354 spin_lock(&dev
->lock
);
2360 static int net2280_stop(struct usb_gadget
*_gadget
)
2362 struct net2280
*dev
;
2363 unsigned long flags
;
2365 dev
= container_of(_gadget
, struct net2280
, gadget
);
2367 spin_lock_irqsave(&dev
->lock
, flags
);
2368 stop_activity(dev
, NULL
);
2369 spin_unlock_irqrestore(&dev
->lock
, flags
);
2371 net2280_led_active(dev
, 0);
2373 device_remove_file(&dev
->pdev
->dev
, &dev_attr_function
);
2374 device_remove_file(&dev
->pdev
->dev
, &dev_attr_queues
);
2381 /*-------------------------------------------------------------------------*/
2383 /* handle ep0, ep-e, ep-f with 64 byte packets: packet per irq.
2384 * also works for dma-capable endpoints, in pio mode or just
2385 * to manually advance the queue after short OUT transfers.
2387 static void handle_ep_small(struct net2280_ep
*ep
)
2389 struct net2280_request
*req
;
2391 /* 0 error, 1 mid-data, 2 done */
2394 if (!list_empty(&ep
->queue
))
2395 req
= list_entry(ep
->queue
.next
,
2396 struct net2280_request
, queue
);
2400 /* ack all, and handle what we care about */
2401 t
= readl(&ep
->regs
->ep_stat
);
2404 ep_vdbg(ep
->dev
, "%s ack ep_stat %08x, req %p\n",
2405 ep
->ep
.name
, t
, req
? &req
->req
: NULL
);
2407 if (!ep
->is_in
|| (ep
->dev
->quirks
& PLX_2280
))
2408 writel(t
& ~BIT(NAK_OUT_PACKETS
), &ep
->regs
->ep_stat
);
2410 /* Added for 2282 */
2411 writel(t
, &ep
->regs
->ep_stat
);
2413 /* for ep0, monitor token irqs to catch data stage length errors
2414 * and to synchronize on status.
2416 * also, to defer reporting of protocol stalls ... here's where
2417 * data or status first appears, handling stalls here should never
2418 * cause trouble on the host side..
2420 * control requests could be slightly faster without token synch for
2421 * status, but status can jam up that way.
2423 if (unlikely(ep
->num
== 0)) {
2425 /* status; stop NAKing */
2426 if (t
& BIT(DATA_OUT_PING_TOKEN_INTERRUPT
)) {
2427 if (ep
->dev
->protocol_stall
) {
2434 /* reply to extra IN data tokens with a zlp */
2435 } else if (t
& BIT(DATA_IN_TOKEN_INTERRUPT
)) {
2436 if (ep
->dev
->protocol_stall
) {
2440 } else if (ep
->responded
&&
2441 !req
&& !ep
->stopped
)
2442 write_fifo(ep
, NULL
);
2445 /* status; stop NAKing */
2446 if (t
& BIT(DATA_IN_TOKEN_INTERRUPT
)) {
2447 if (ep
->dev
->protocol_stall
) {
2452 /* an extra OUT token is an error */
2453 } else if (((t
& BIT(DATA_OUT_PING_TOKEN_INTERRUPT
)) &&
2455 req
->req
.actual
== req
->req
.length
) ||
2456 (ep
->responded
&& !req
)) {
2457 ep
->dev
->protocol_stall
= 1;
2461 done(ep
, req
, -EOVERFLOW
);
2470 /* manual DMA queue advance after short OUT */
2471 if (likely(ep
->dma
)) {
2472 if (t
& BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT
)) {
2474 int stopped
= ep
->stopped
;
2476 /* TRANSFERRED works around OUT_DONE erratum 0112.
2477 * we expect (N <= maxpacket) bytes; host wrote M.
2478 * iff (M < N) we won't ever see a DMA interrupt.
2481 for (count
= 0; ; t
= readl(&ep
->regs
->ep_stat
)) {
2483 /* any preceding dma transfers must finish.
2484 * dma handles (M >= N), may empty the queue
2486 scan_dma_completions(ep
);
2487 if (unlikely(list_empty(&ep
->queue
) ||
2488 ep
->out_overflow
)) {
2492 req
= list_entry(ep
->queue
.next
,
2493 struct net2280_request
, queue
);
2495 /* here either (M < N), a "real" short rx;
2496 * or (M == N) and the queue didn't empty
2498 if (likely(t
& BIT(FIFO_EMPTY
))) {
2499 count
= readl(&ep
->dma
->dmacount
);
2500 count
&= DMA_BYTE_COUNT_MASK
;
2501 if (readl(&ep
->dma
->dmadesc
)
2509 /* stop DMA, leave ep NAKing */
2510 writel(BIT(DMA_ABORT
), &ep
->dma
->dmastat
);
2511 spin_stop_dma(ep
->dma
);
2514 req
->td
->dmacount
= 0;
2515 t
= readl(&ep
->regs
->ep_avail
);
2516 dma_done(ep
, req
, count
,
2517 (ep
->out_overflow
|| t
)
2521 /* also flush to prevent erratum 0106 trouble */
2522 if (unlikely(ep
->out_overflow
||
2523 (ep
->dev
->chiprev
== 0x0100 &&
2524 ep
->dev
->gadget
.speed
2525 == USB_SPEED_FULL
))) {
2527 ep
->out_overflow
= 0;
2530 /* (re)start dma if needed, stop NAKing */
2531 ep
->stopped
= stopped
;
2532 if (!list_empty(&ep
->queue
))
2535 ep_dbg(ep
->dev
, "%s dma ep_stat %08x ??\n",
2539 /* data packet(s) received (in the fifo, OUT) */
2540 } else if (t
& BIT(DATA_PACKET_RECEIVED_INTERRUPT
)) {
2541 if (read_fifo(ep
, req
) && ep
->num
!= 0)
2544 /* data packet(s) transmitted (IN) */
2545 } else if (t
& BIT(DATA_PACKET_TRANSMITTED_INTERRUPT
)) {
2548 len
= req
->req
.length
- req
->req
.actual
;
2549 if (len
> ep
->ep
.maxpacket
)
2550 len
= ep
->ep
.maxpacket
;
2551 req
->req
.actual
+= len
;
2553 /* if we wrote it all, we're usually done */
2554 /* send zlps until the status stage */
2555 if ((req
->req
.actual
== req
->req
.length
) &&
2556 (!req
->req
.zero
|| len
!= ep
->ep
.maxpacket
) && ep
->num
)
2559 /* there was nothing to do ... */
2560 } else if (mode
== 1)
2565 /* stream endpoints often resubmit/unlink in completion */
2568 /* maybe advance queue to next request */
2570 /* NOTE: net2280 could let gadget driver start the
2571 * status stage later. since not all controllers let
2572 * them control that, the api doesn't (yet) allow it.
2578 if (!list_empty(&ep
->queue
) && !ep
->stopped
)
2579 req
= list_entry(ep
->queue
.next
,
2580 struct net2280_request
, queue
);
2583 if (req
&& !ep
->is_in
)
2584 stop_out_naking(ep
);
2588 /* is there a buffer for the next packet?
2589 * for best streaming performance, make sure there is one.
2591 if (req
&& !ep
->stopped
) {
2593 /* load IN fifo with next packet (may be zlp) */
2594 if (t
& BIT(DATA_PACKET_TRANSMITTED_INTERRUPT
))
2595 write_fifo(ep
, &req
->req
);
2599 static struct net2280_ep
*get_ep_by_addr(struct net2280
*dev
, u16 wIndex
)
2601 struct net2280_ep
*ep
;
2603 if ((wIndex
& USB_ENDPOINT_NUMBER_MASK
) == 0)
2605 list_for_each_entry(ep
, &dev
->gadget
.ep_list
, ep
.ep_list
) {
2606 u8 bEndpointAddress
;
2610 bEndpointAddress
= ep
->desc
->bEndpointAddress
;
2611 if ((wIndex
^ bEndpointAddress
) & USB_DIR_IN
)
2613 if ((wIndex
& 0x0f) == (bEndpointAddress
& 0x0f))
2619 static void defect7374_workaround(struct net2280
*dev
, struct usb_ctrlrequest r
)
2621 u32 scratch
, fsmvalue
;
2622 u32 ack_wait_timeout
, state
;
2624 /* Workaround for Defect 7374 (U1/U2 erroneously rejected): */
2625 scratch
= get_idx_reg(dev
->regs
, SCRATCH
);
2626 fsmvalue
= scratch
& (0xf << DEFECT7374_FSM_FIELD
);
2627 scratch
&= ~(0xf << DEFECT7374_FSM_FIELD
);
2629 if (!((fsmvalue
== DEFECT7374_FSM_WAITING_FOR_CONTROL_READ
) &&
2630 (r
.bRequestType
& USB_DIR_IN
)))
2633 /* This is the first Control Read for this connection: */
2634 if (!(readl(&dev
->usb
->usbstat
) & BIT(SUPER_SPEED_MODE
))) {
2636 * Connection is NOT SS:
2637 * - Connection must be FS or HS.
2638 * - This FSM state should allow workaround software to
2639 * run after the next USB connection.
2641 scratch
|= DEFECT7374_FSM_NON_SS_CONTROL_READ
;
2642 dev
->bug7734_patched
= 1;
2643 goto restore_data_eps
;
2646 /* Connection is SS: */
2647 for (ack_wait_timeout
= 0;
2648 ack_wait_timeout
< DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS
;
2649 ack_wait_timeout
++) {
2651 state
= readl(&dev
->plregs
->pl_ep_status_1
)
2653 if ((state
>= (ACK_GOOD_NORMAL
<< STATE
)) &&
2654 (state
<= (ACK_GOOD_MORE_ACKS_TO_COME
<< STATE
))) {
2655 scratch
|= DEFECT7374_FSM_SS_CONTROL_READ
;
2656 dev
->bug7734_patched
= 1;
2661 * We have not yet received host's Data Phase ACK
2662 * - Wait and try again.
2664 udelay(DEFECT_7374_PROCESSOR_WAIT_TIME
);
2670 if (ack_wait_timeout
>= DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS
) {
2671 ep_err(dev
, "FAIL: Defect 7374 workaround waited but failed "
2672 "to detect SS host's data phase ACK.");
2673 ep_err(dev
, "PL_EP_STATUS_1(23:16):.Expected from 0x11 to 0x16"
2674 "got 0x%2.2x.\n", state
>> STATE
);
2676 ep_warn(dev
, "INFO: Defect 7374 workaround waited about\n"
2677 "%duSec for Control Read Data Phase ACK\n",
2678 DEFECT_7374_PROCESSOR_WAIT_TIME
* ack_wait_timeout
);
2683 * Restore data EPs to their pre-workaround settings (disabled,
2684 * initialized, and other details).
2686 defect7374_disable_data_eps(dev
);
2688 set_idx_reg(dev
->regs
, SCRATCH
, scratch
);
2693 static void ep_clear_seqnum(struct net2280_ep
*ep
)
2695 struct net2280
*dev
= ep
->dev
;
2697 static const u32 ep_pl
[9] = { 0, 3, 4, 7, 8, 2, 5, 6, 9 };
2699 val
= readl(&dev
->plregs
->pl_ep_ctrl
) & ~0x1f;
2700 val
|= ep_pl
[ep
->num
];
2701 writel(val
, &dev
->plregs
->pl_ep_ctrl
);
2702 val
|= BIT(SEQUENCE_NUMBER_RESET
);
2703 writel(val
, &dev
->plregs
->pl_ep_ctrl
);
2708 static void handle_stat0_irqs_superspeed(struct net2280
*dev
,
2709 struct net2280_ep
*ep
, struct usb_ctrlrequest r
)
2713 #define w_value le16_to_cpu(r.wValue)
2714 #define w_index le16_to_cpu(r.wIndex)
2715 #define w_length le16_to_cpu(r.wLength)
2717 switch (r
.bRequest
) {
2718 struct net2280_ep
*e
;
2721 case USB_REQ_SET_CONFIGURATION
:
2722 dev
->addressed_state
= !w_value
;
2725 case USB_REQ_GET_STATUS
:
2726 switch (r
.bRequestType
) {
2727 case (USB_DIR_IN
| USB_TYPE_STANDARD
| USB_RECIP_DEVICE
):
2728 status
= dev
->wakeup_enable
? 0x02 : 0x00;
2729 if (dev
->gadget
.is_selfpowered
)
2731 status
|= (dev
->u1_enable
<< 2 | dev
->u2_enable
<< 3 |
2732 dev
->ltm_enable
<< 4);
2733 writel(0, &dev
->epregs
[0].ep_irqenb
);
2734 set_fifo_bytecount(ep
, sizeof(status
));
2735 writel((__force u32
) status
, &dev
->epregs
[0].ep_data
);
2736 allow_status_338x(ep
);
2739 case (USB_DIR_IN
| USB_TYPE_STANDARD
| USB_RECIP_ENDPOINT
):
2740 e
= get_ep_by_addr(dev
, w_index
);
2743 status
= readl(&e
->regs
->ep_rsp
) &
2744 BIT(CLEAR_ENDPOINT_HALT
);
2745 writel(0, &dev
->epregs
[0].ep_irqenb
);
2746 set_fifo_bytecount(ep
, sizeof(status
));
2747 writel((__force u32
) status
, &dev
->epregs
[0].ep_data
);
2748 allow_status_338x(ep
);
2756 case USB_REQ_CLEAR_FEATURE
:
2757 switch (r
.bRequestType
) {
2758 case (USB_DIR_OUT
| USB_TYPE_STANDARD
| USB_RECIP_DEVICE
):
2759 if (!dev
->addressed_state
) {
2761 case USB_DEVICE_U1_ENABLE
:
2763 writel(readl(&dev
->usb_ext
->usbctl2
) &
2765 &dev
->usb_ext
->usbctl2
);
2766 allow_status_338x(ep
);
2767 goto next_endpoints3
;
2769 case USB_DEVICE_U2_ENABLE
:
2771 writel(readl(&dev
->usb_ext
->usbctl2
) &
2773 &dev
->usb_ext
->usbctl2
);
2774 allow_status_338x(ep
);
2775 goto next_endpoints3
;
2777 case USB_DEVICE_LTM_ENABLE
:
2778 dev
->ltm_enable
= 0;
2779 writel(readl(&dev
->usb_ext
->usbctl2
) &
2781 &dev
->usb_ext
->usbctl2
);
2782 allow_status_338x(ep
);
2783 goto next_endpoints3
;
2789 if (w_value
== USB_DEVICE_REMOTE_WAKEUP
) {
2790 dev
->wakeup_enable
= 0;
2791 writel(readl(&dev
->usb
->usbctl
) &
2792 ~BIT(DEVICE_REMOTE_WAKEUP_ENABLE
),
2794 allow_status_338x(ep
);
2799 case (USB_DIR_OUT
| USB_TYPE_STANDARD
| USB_RECIP_ENDPOINT
):
2800 e
= get_ep_by_addr(dev
, w_index
);
2803 if (w_value
!= USB_ENDPOINT_HALT
)
2805 ep_vdbg(dev
, "%s clear halt\n", e
->ep
.name
);
2807 * Workaround for SS SeqNum not cleared via
2808 * Endpoint Halt (Clear) bit. select endpoint
2812 if (!list_empty(&e
->queue
) && e
->td_dma
)
2822 case USB_REQ_SET_FEATURE
:
2823 switch (r
.bRequestType
) {
2824 case (USB_DIR_OUT
| USB_TYPE_STANDARD
| USB_RECIP_DEVICE
):
2825 if (!dev
->addressed_state
) {
2827 case USB_DEVICE_U1_ENABLE
:
2829 writel(readl(&dev
->usb_ext
->usbctl2
) |
2831 &dev
->usb_ext
->usbctl2
);
2832 allow_status_338x(ep
);
2833 goto next_endpoints3
;
2835 case USB_DEVICE_U2_ENABLE
:
2837 writel(readl(&dev
->usb_ext
->usbctl2
) |
2839 &dev
->usb_ext
->usbctl2
);
2840 allow_status_338x(ep
);
2841 goto next_endpoints3
;
2843 case USB_DEVICE_LTM_ENABLE
:
2844 dev
->ltm_enable
= 1;
2845 writel(readl(&dev
->usb_ext
->usbctl2
) |
2847 &dev
->usb_ext
->usbctl2
);
2848 allow_status_338x(ep
);
2849 goto next_endpoints3
;
2855 if (w_value
== USB_DEVICE_REMOTE_WAKEUP
) {
2856 dev
->wakeup_enable
= 1;
2857 writel(readl(&dev
->usb
->usbctl
) |
2858 BIT(DEVICE_REMOTE_WAKEUP_ENABLE
),
2860 allow_status_338x(ep
);
2865 case (USB_DIR_OUT
| USB_TYPE_STANDARD
| USB_RECIP_ENDPOINT
):
2866 e
= get_ep_by_addr(dev
, w_index
);
2867 if (!e
|| (w_value
!= USB_ENDPOINT_HALT
))
2871 ep
->dev
->protocol_stall
= 1;
2877 allow_status_338x(ep
);
2888 ep_vdbg(dev
, "setup %02x.%02x v%04x i%04x l%04x ep_cfg %08x\n",
2889 r
.bRequestType
, r
.bRequest
,
2890 w_value
, w_index
, w_length
,
2891 readl(&ep
->cfg
->ep_cfg
));
2894 spin_unlock(&dev
->lock
);
2895 tmp
= dev
->driver
->setup(&dev
->gadget
, &r
);
2896 spin_lock(&dev
->lock
);
2900 ep_vdbg(dev
, "req %02x.%02x protocol STALL; stat %d\n",
2901 r
.bRequestType
, r
.bRequest
, tmp
);
2902 dev
->protocol_stall
= 1;
2903 /* TD 9.9 Halt Endpoint test. TD 9.22 Set feature test */
2916 static void usb338x_handle_ep_intr(struct net2280
*dev
, u32 stat0
)
2921 for (index
= 0; index
< ARRAY_SIZE(ep_bit
); index
++) {
2922 bit
= BIT(ep_bit
[index
]);
2932 handle_ep_small(&dev
->ep
[index
]);
2936 static void handle_stat0_irqs(struct net2280
*dev
, u32 stat
)
2938 struct net2280_ep
*ep
;
2941 /* most of these don't need individual acks */
2942 stat
&= ~BIT(INTA_ASSERTED
);
2945 /* ep_dbg(dev, "irqstat0 %04x\n", stat); */
2947 /* starting a control request? */
2948 if (unlikely(stat
& BIT(SETUP_PACKET_INTERRUPT
))) {
2951 struct usb_ctrlrequest r
;
2954 struct net2280_request
*req
;
2956 if (dev
->gadget
.speed
== USB_SPEED_UNKNOWN
) {
2957 u32 val
= readl(&dev
->usb
->usbstat
);
2958 if (val
& BIT(SUPER_SPEED
)) {
2959 dev
->gadget
.speed
= USB_SPEED_SUPER
;
2960 usb_ep_set_maxpacket_limit(&dev
->ep
[0].ep
,
2961 EP0_SS_MAX_PACKET_SIZE
);
2962 } else if (val
& BIT(HIGH_SPEED
)) {
2963 dev
->gadget
.speed
= USB_SPEED_HIGH
;
2964 usb_ep_set_maxpacket_limit(&dev
->ep
[0].ep
,
2965 EP0_HS_MAX_PACKET_SIZE
);
2967 dev
->gadget
.speed
= USB_SPEED_FULL
;
2968 usb_ep_set_maxpacket_limit(&dev
->ep
[0].ep
,
2969 EP0_HS_MAX_PACKET_SIZE
);
2971 net2280_led_speed(dev
, dev
->gadget
.speed
);
2973 usb_speed_string(dev
->gadget
.speed
));
2979 /* make sure any leftover request state is cleared */
2980 stat
&= ~BIT(ENDPOINT_0_INTERRUPT
);
2981 while (!list_empty(&ep
->queue
)) {
2982 req
= list_entry(ep
->queue
.next
,
2983 struct net2280_request
, queue
);
2984 done(ep
, req
, (req
->req
.actual
== req
->req
.length
)
2988 dev
->protocol_stall
= 0;
2989 if (!(dev
->quirks
& PLX_SUPERSPEED
)) {
2990 if (ep
->dev
->quirks
& PLX_2280
)
2991 tmp
= BIT(FIFO_OVERFLOW
) |
2992 BIT(FIFO_UNDERFLOW
);
2996 writel(tmp
| BIT(TIMEOUT
) |
2997 BIT(USB_STALL_SENT
) |
2998 BIT(USB_IN_NAK_SENT
) |
2999 BIT(USB_IN_ACK_RCVD
) |
3000 BIT(USB_OUT_PING_NAK_SENT
) |
3001 BIT(USB_OUT_ACK_SENT
) |
3002 BIT(SHORT_PACKET_OUT_DONE_INTERRUPT
) |
3003 BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT
) |
3004 BIT(DATA_PACKET_RECEIVED_INTERRUPT
) |
3005 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT
) |
3006 BIT(DATA_OUT_PING_TOKEN_INTERRUPT
) |
3007 BIT(DATA_IN_TOKEN_INTERRUPT
),
3008 &ep
->regs
->ep_stat
);
3010 u
.raw
[0] = readl(&dev
->usb
->setup0123
);
3011 u
.raw
[1] = readl(&dev
->usb
->setup4567
);
3013 cpu_to_le32s(&u
.raw
[0]);
3014 cpu_to_le32s(&u
.raw
[1]);
3016 if ((dev
->quirks
& PLX_SUPERSPEED
) && !dev
->bug7734_patched
)
3017 defect7374_workaround(dev
, u
.r
);
3021 #define w_value le16_to_cpu(u.r.wValue)
3022 #define w_index le16_to_cpu(u.r.wIndex)
3023 #define w_length le16_to_cpu(u.r.wLength)
3026 writel(BIT(SETUP_PACKET_INTERRUPT
), &dev
->regs
->irqstat0
);
3027 stat
^= BIT(SETUP_PACKET_INTERRUPT
);
3029 /* watch control traffic at the token level, and force
3030 * synchronization before letting the status stage happen.
3031 * FIXME ignore tokens we'll NAK, until driver responds.
3032 * that'll mean a lot less irqs for some drivers.
3034 ep
->is_in
= (u
.r
.bRequestType
& USB_DIR_IN
) != 0;
3036 scratch
= BIT(DATA_PACKET_TRANSMITTED_INTERRUPT
) |
3037 BIT(DATA_OUT_PING_TOKEN_INTERRUPT
) |
3038 BIT(DATA_IN_TOKEN_INTERRUPT
);
3039 stop_out_naking(ep
);
3041 scratch
= BIT(DATA_PACKET_RECEIVED_INTERRUPT
) |
3042 BIT(DATA_OUT_PING_TOKEN_INTERRUPT
) |
3043 BIT(DATA_IN_TOKEN_INTERRUPT
);
3044 writel(scratch
, &dev
->epregs
[0].ep_irqenb
);
3046 /* we made the hardware handle most lowlevel requests;
3047 * everything else goes uplevel to the gadget code.
3051 if (dev
->gadget
.speed
== USB_SPEED_SUPER
) {
3052 handle_stat0_irqs_superspeed(dev
, ep
, u
.r
);
3053 goto next_endpoints
;
3056 switch (u
.r
.bRequest
) {
3057 case USB_REQ_GET_STATUS
: {
3058 struct net2280_ep
*e
;
3061 /* hw handles device and interface status */
3062 if (u
.r
.bRequestType
!= (USB_DIR_IN
|USB_RECIP_ENDPOINT
))
3064 e
= get_ep_by_addr(dev
, w_index
);
3065 if (!e
|| w_length
> 2)
3068 if (readl(&e
->regs
->ep_rsp
) & BIT(SET_ENDPOINT_HALT
))
3069 status
= cpu_to_le32(1);
3071 status
= cpu_to_le32(0);
3073 /* don't bother with a request object! */
3074 writel(0, &dev
->epregs
[0].ep_irqenb
);
3075 set_fifo_bytecount(ep
, w_length
);
3076 writel((__force u32
)status
, &dev
->epregs
[0].ep_data
);
3078 ep_vdbg(dev
, "%s stat %02x\n", ep
->ep
.name
, status
);
3079 goto next_endpoints
;
3082 case USB_REQ_CLEAR_FEATURE
: {
3083 struct net2280_ep
*e
;
3085 /* hw handles device features */
3086 if (u
.r
.bRequestType
!= USB_RECIP_ENDPOINT
)
3088 if (w_value
!= USB_ENDPOINT_HALT
|| w_length
!= 0)
3090 e
= get_ep_by_addr(dev
, w_index
);
3094 ep_vdbg(dev
, "%s wedged, halt not cleared\n",
3097 ep_vdbg(dev
, "%s clear halt\n", e
->ep
.name
);
3099 if ((ep
->dev
->quirks
& PLX_SUPERSPEED
) &&
3100 !list_empty(&e
->queue
) && e
->td_dma
)
3104 goto next_endpoints
;
3107 case USB_REQ_SET_FEATURE
: {
3108 struct net2280_ep
*e
;
3110 /* hw handles device features */
3111 if (u
.r
.bRequestType
!= USB_RECIP_ENDPOINT
)
3113 if (w_value
!= USB_ENDPOINT_HALT
|| w_length
!= 0)
3115 e
= get_ep_by_addr(dev
, w_index
);
3118 if (e
->ep
.name
== ep0name
)
3121 if ((dev
->quirks
& PLX_SUPERSPEED
) && e
->dma
)
3124 ep_vdbg(dev
, "%s set halt\n", ep
->ep
.name
);
3125 goto next_endpoints
;
3130 ep_vdbg(dev
, "setup %02x.%02x v%04x i%04x l%04x "
3132 u
.r
.bRequestType
, u
.r
.bRequest
,
3133 w_value
, w_index
, w_length
,
3134 readl(&ep
->cfg
->ep_cfg
));
3136 spin_unlock(&dev
->lock
);
3137 tmp
= dev
->driver
->setup(&dev
->gadget
, &u
.r
);
3138 spin_lock(&dev
->lock
);
3141 /* stall ep0 on error */
3144 ep_vdbg(dev
, "req %02x.%02x protocol STALL; stat %d\n",
3145 u
.r
.bRequestType
, u
.r
.bRequest
, tmp
);
3146 dev
->protocol_stall
= 1;
3149 /* some in/out token irq should follow; maybe stall then.
3150 * driver must queue a request (even zlp) or halt ep0
3151 * before the host times out.
3160 if ((dev
->quirks
& PLX_SUPERSPEED
) && dev
->enhanced_mode
) {
3161 u32 mask
= (BIT(ENDPOINT_0_INTERRUPT
) |
3162 USB3380_IRQSTAT0_EP_INTR_MASK_IN
|
3163 USB3380_IRQSTAT0_EP_INTR_MASK_OUT
);
3166 usb338x_handle_ep_intr(dev
, stat
& mask
);
3170 /* endpoint data irq ? */
3171 scratch
= stat
& 0x7f;
3173 for (num
= 0; scratch
; num
++) {
3176 /* do this endpoint's FIFO and queue need tending? */
3178 if ((scratch
& t
) == 0)
3183 handle_ep_small(ep
);
3188 ep_dbg(dev
, "unhandled irqstat0 %08x\n", stat
);
3191 #define DMA_INTERRUPTS (BIT(DMA_D_INTERRUPT) | \
3192 BIT(DMA_C_INTERRUPT) | \
3193 BIT(DMA_B_INTERRUPT) | \
3194 BIT(DMA_A_INTERRUPT))
3195 #define PCI_ERROR_INTERRUPTS ( \
3196 BIT(PCI_MASTER_ABORT_RECEIVED_INTERRUPT) | \
3197 BIT(PCI_TARGET_ABORT_RECEIVED_INTERRUPT) | \
3198 BIT(PCI_RETRY_ABORT_INTERRUPT))
3200 static void handle_stat1_irqs(struct net2280
*dev
, u32 stat
)
3201 __releases(dev
->lock
)
3202 __acquires(dev
->lock
)
3204 struct net2280_ep
*ep
;
3205 u32 tmp
, num
, mask
, scratch
;
3207 /* after disconnect there's nothing else to do! */
3208 tmp
= BIT(VBUS_INTERRUPT
) | BIT(ROOT_PORT_RESET_INTERRUPT
);
3209 mask
= BIT(SUPER_SPEED
) | BIT(HIGH_SPEED
) | BIT(FULL_SPEED
);
3211 /* VBUS disconnect is indicated by VBUS_PIN and VBUS_INTERRUPT set.
3212 * Root Port Reset is indicated by ROOT_PORT_RESET_INTERRUPT set and
3213 * both HIGH_SPEED and FULL_SPEED clear (as ROOT_PORT_RESET_INTERRUPT
3214 * only indicates a change in the reset state).
3218 bool disconnect
= false;
3221 * Ignore disconnects and resets if the speed hasn't been set.
3222 * VBUS can bounce and there's always an initial reset.
3224 writel(tmp
, &dev
->regs
->irqstat1
);
3225 if (dev
->gadget
.speed
!= USB_SPEED_UNKNOWN
) {
3226 if ((stat
& BIT(VBUS_INTERRUPT
)) &&
3227 (readl(&dev
->usb
->usbctl
) &
3228 BIT(VBUS_PIN
)) == 0) {
3230 ep_dbg(dev
, "disconnect %s\n",
3231 dev
->driver
->driver
.name
);
3232 } else if ((stat
& BIT(ROOT_PORT_RESET_INTERRUPT
)) &&
3233 (readl(&dev
->usb
->usbstat
) & mask
)
3236 ep_dbg(dev
, "reset %s\n",
3237 dev
->driver
->driver
.name
);
3240 if (disconnect
|| reset
) {
3241 stop_activity(dev
, dev
->driver
);
3243 spin_unlock(&dev
->lock
);
3245 usb_gadget_udc_reset
3246 (&dev
->gadget
, dev
->driver
);
3248 (dev
->driver
->disconnect
)
3250 spin_lock(&dev
->lock
);
3256 /* vBUS can bounce ... one of many reasons to ignore the
3257 * notion of hotplug events on bus connect/disconnect!
3263 /* NOTE: chip stays in PCI D0 state for now, but it could
3264 * enter D1 to save more power
3266 tmp
= BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT
);
3268 writel(tmp
, &dev
->regs
->irqstat1
);
3269 if (stat
& BIT(SUSPEND_REQUEST_INTERRUPT
)) {
3270 if (dev
->driver
->suspend
)
3271 dev
->driver
->suspend(&dev
->gadget
);
3272 if (!enable_suspend
)
3273 stat
&= ~BIT(SUSPEND_REQUEST_INTERRUPT
);
3275 if (dev
->driver
->resume
)
3276 dev
->driver
->resume(&dev
->gadget
);
3277 /* at high speed, note erratum 0133 */
3282 /* clear any other status/irqs */
3284 writel(stat
, &dev
->regs
->irqstat1
);
3286 /* some status we can just ignore */
3287 if (dev
->quirks
& PLX_2280
)
3288 stat
&= ~(BIT(CONTROL_STATUS_INTERRUPT
) |
3289 BIT(SUSPEND_REQUEST_INTERRUPT
) |
3290 BIT(RESUME_INTERRUPT
) |
3291 BIT(SOF_INTERRUPT
));
3293 stat
&= ~(BIT(CONTROL_STATUS_INTERRUPT
) |
3294 BIT(RESUME_INTERRUPT
) |
3295 BIT(SOF_DOWN_INTERRUPT
) |
3296 BIT(SOF_INTERRUPT
));
3300 /* ep_dbg(dev, "irqstat1 %08x\n", stat);*/
3302 /* DMA status, for ep-{a,b,c,d} */
3303 scratch
= stat
& DMA_INTERRUPTS
;
3304 stat
&= ~DMA_INTERRUPTS
;
3306 for (num
= 0; scratch
; num
++) {
3307 struct net2280_dma_regs __iomem
*dma
;
3310 if ((tmp
& scratch
) == 0)
3314 ep
= &dev
->ep
[num
+ 1];
3320 /* clear ep's dma status */
3321 tmp
= readl(&dma
->dmastat
);
3322 writel(tmp
, &dma
->dmastat
);
3325 if (dev
->quirks
& PLX_SUPERSPEED
) {
3326 u32 r_dmacount
= readl(&dma
->dmacount
);
3327 if (!ep
->is_in
&& (r_dmacount
& 0x00FFFFFF) &&
3328 (tmp
& BIT(DMA_TRANSACTION_DONE_INTERRUPT
)))
3332 if (!(tmp
& BIT(DMA_TRANSACTION_DONE_INTERRUPT
))) {
3333 ep_dbg(ep
->dev
, "%s no xact done? %08x\n",
3339 /* OUT transfers terminate when the data from the
3340 * host is in our memory. Process whatever's done.
3341 * On this path, we know transfer's last packet wasn't
3342 * less than req->length. NAK_OUT_PACKETS may be set,
3343 * or the FIFO may already be holding new packets.
3345 * IN transfers can linger in the FIFO for a very
3346 * long time ... we ignore that for now, accounting
3347 * precisely (like PIO does) needs per-packet irqs
3349 scan_dma_completions(ep
);
3351 /* disable dma on inactive queues; else maybe restart */
3352 if (!list_empty(&ep
->queue
)) {
3353 tmp
= readl(&dma
->dmactl
);
3359 /* NOTE: there are other PCI errors we might usefully notice.
3360 * if they appear very often, here's where to try recovering.
3362 if (stat
& PCI_ERROR_INTERRUPTS
) {
3363 ep_err(dev
, "pci dma error; stat %08x\n", stat
);
3364 stat
&= ~PCI_ERROR_INTERRUPTS
;
3365 /* these are fatal errors, but "maybe" they won't
3368 stop_activity(dev
, dev
->driver
);
3374 ep_dbg(dev
, "unhandled irqstat1 %08x\n", stat
);
3377 static irqreturn_t
net2280_irq(int irq
, void *_dev
)
3379 struct net2280
*dev
= _dev
;
3381 /* shared interrupt, not ours */
3382 if ((dev
->quirks
& PLX_LEGACY
) &&
3383 (!(readl(&dev
->regs
->irqstat0
) & BIT(INTA_ASSERTED
))))
3386 spin_lock(&dev
->lock
);
3388 /* handle disconnect, dma, and more */
3389 handle_stat1_irqs(dev
, readl(&dev
->regs
->irqstat1
));
3391 /* control requests and PIO */
3392 handle_stat0_irqs(dev
, readl(&dev
->regs
->irqstat0
));
3394 if (dev
->quirks
& PLX_SUPERSPEED
) {
3395 /* re-enable interrupt to trigger any possible new interrupt */
3396 u32 pciirqenb1
= readl(&dev
->regs
->pciirqenb1
);
3397 writel(pciirqenb1
& 0x7FFFFFFF, &dev
->regs
->pciirqenb1
);
3398 writel(pciirqenb1
, &dev
->regs
->pciirqenb1
);
3401 spin_unlock(&dev
->lock
);
3406 /*-------------------------------------------------------------------------*/
3408 static void gadget_release(struct device
*_dev
)
3410 struct net2280
*dev
= dev_get_drvdata(_dev
);
3415 /* tear down the binding between this driver and the pci device */
3417 static void net2280_remove(struct pci_dev
*pdev
)
3419 struct net2280
*dev
= pci_get_drvdata(pdev
);
3421 usb_del_gadget_udc(&dev
->gadget
);
3423 BUG_ON(dev
->driver
);
3425 /* then clean up the resources we allocated during probe() */
3426 net2280_led_shutdown(dev
);
3427 if (dev
->requests
) {
3429 for (i
= 1; i
< 5; i
++) {
3430 if (!dev
->ep
[i
].dummy
)
3432 pci_pool_free(dev
->requests
, dev
->ep
[i
].dummy
,
3435 pci_pool_destroy(dev
->requests
);
3438 free_irq(pdev
->irq
, dev
);
3439 if (dev
->quirks
& PLX_SUPERSPEED
)
3440 pci_disable_msi(pdev
);
3444 release_mem_region(pci_resource_start(pdev
, 0),
3445 pci_resource_len(pdev
, 0));
3447 pci_disable_device(pdev
);
3448 device_remove_file(&pdev
->dev
, &dev_attr_registers
);
3450 ep_info(dev
, "unbind\n");
3453 /* wrap this driver around the specified device, but
3454 * don't respond over USB until a gadget driver binds to us.
3457 static int net2280_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
3459 struct net2280
*dev
;
3460 unsigned long resource
, len
;
3461 void __iomem
*base
= NULL
;
3464 /* alloc, and start init */
3465 dev
= kzalloc(sizeof(*dev
), GFP_KERNEL
);
3471 pci_set_drvdata(pdev
, dev
);
3472 spin_lock_init(&dev
->lock
);
3473 dev
->quirks
= id
->driver_data
;
3475 dev
->gadget
.ops
= &net2280_ops
;
3476 dev
->gadget
.max_speed
= (dev
->quirks
& PLX_SUPERSPEED
) ?
3477 USB_SPEED_SUPER
: USB_SPEED_HIGH
;
3479 /* the "gadget" abstracts/virtualizes the controller */
3480 dev
->gadget
.name
= driver_name
;
3482 /* now all the pci goodies ... */
3483 if (pci_enable_device(pdev
) < 0) {
3489 /* BAR 0 holds all the registers
3490 * BAR 1 is 8051 memory; unused here (note erratum 0103)
3491 * BAR 2 is fifo memory; unused here
3493 resource
= pci_resource_start(pdev
, 0);
3494 len
= pci_resource_len(pdev
, 0);
3495 if (!request_mem_region(resource
, len
, driver_name
)) {
3496 ep_dbg(dev
, "controller already in use\n");
3502 /* FIXME provide firmware download interface to put
3503 * 8051 code into the chip, e.g. to turn on PCI PM.
3506 base
= ioremap_nocache(resource
, len
);
3508 ep_dbg(dev
, "can't map memory\n");
3512 dev
->regs
= (struct net2280_regs __iomem
*) base
;
3513 dev
->usb
= (struct net2280_usb_regs __iomem
*) (base
+ 0x0080);
3514 dev
->pci
= (struct net2280_pci_regs __iomem
*) (base
+ 0x0100);
3515 dev
->dma
= (struct net2280_dma_regs __iomem
*) (base
+ 0x0180);
3516 dev
->dep
= (struct net2280_dep_regs __iomem
*) (base
+ 0x0200);
3517 dev
->epregs
= (struct net2280_ep_regs __iomem
*) (base
+ 0x0300);
3519 if (dev
->quirks
& PLX_SUPERSPEED
) {
3522 dev
->usb_ext
= (struct usb338x_usb_ext_regs __iomem
*)
3524 dev
->llregs
= (struct usb338x_ll_regs __iomem
*)
3526 dev
->ll_lfps_regs
= (struct usb338x_ll_lfps_regs __iomem
*)
3528 dev
->ll_tsn_regs
= (struct usb338x_ll_tsn_regs __iomem
*)
3530 dev
->ll_chicken_reg
= (struct usb338x_ll_chi_regs __iomem
*)
3532 dev
->plregs
= (struct usb338x_pl_regs __iomem
*)
3534 usbstat
= readl(&dev
->usb
->usbstat
);
3535 dev
->enhanced_mode
= !!(usbstat
& BIT(11));
3536 dev
->n_ep
= (dev
->enhanced_mode
) ? 9 : 5;
3537 /* put into initial config, link up all endpoints */
3538 fsmvalue
= get_idx_reg(dev
->regs
, SCRATCH
) &
3539 (0xf << DEFECT7374_FSM_FIELD
);
3540 /* See if firmware needs to set up for workaround: */
3541 if (fsmvalue
== DEFECT7374_FSM_SS_CONTROL_READ
) {
3542 dev
->bug7734_patched
= 1;
3543 writel(0, &dev
->usb
->usbctl
);
3545 dev
->bug7734_patched
= 0;
3547 dev
->enhanced_mode
= 0;
3549 /* put into initial config, link up all endpoints */
3550 writel(0, &dev
->usb
->usbctl
);
3556 /* irq setup after old hardware is cleaned up */
3558 ep_err(dev
, "No IRQ. Check PCI setup!\n");
3563 if (dev
->quirks
& PLX_SUPERSPEED
)
3564 if (pci_enable_msi(pdev
))
3565 ep_err(dev
, "Failed to enable MSI mode\n");
3567 if (request_irq(pdev
->irq
, net2280_irq
, IRQF_SHARED
,
3568 driver_name
, dev
)) {
3569 ep_err(dev
, "request interrupt %d failed\n", pdev
->irq
);
3576 /* NOTE: we know only the 32 LSBs of dma addresses may be nonzero */
3577 dev
->requests
= pci_pool_create("requests", pdev
,
3578 sizeof(struct net2280_dma
),
3579 0 /* no alignment requirements */,
3580 0 /* or page-crossing issues */);
3581 if (!dev
->requests
) {
3582 ep_dbg(dev
, "can't get request pool\n");
3586 for (i
= 1; i
< 5; i
++) {
3587 struct net2280_dma
*td
;
3589 td
= pci_pool_alloc(dev
->requests
, GFP_KERNEL
,
3590 &dev
->ep
[i
].td_dma
);
3592 ep_dbg(dev
, "can't get dummy %d\n", i
);
3596 td
->dmacount
= 0; /* not VALID */
3597 td
->dmadesc
= td
->dmaaddr
;
3598 dev
->ep
[i
].dummy
= td
;
3601 /* enable lower-overhead pci memory bursts during DMA */
3602 if (dev
->quirks
& PLX_LEGACY
)
3603 writel(BIT(DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE
) |
3605 * 256 write retries may not be enough...
3606 BIT(PCI_RETRY_ABORT_ENABLE) |
3608 BIT(DMA_READ_MULTIPLE_ENABLE
) |
3609 BIT(DMA_READ_LINE_ENABLE
),
3610 &dev
->pci
->pcimstctl
);
3611 /* erratum 0115 shouldn't appear: Linux inits PCI_LATENCY_TIMER */
3612 pci_set_master(pdev
);
3613 pci_try_set_mwi(pdev
);
3615 /* ... also flushes any posted pci writes */
3616 dev
->chiprev
= get_idx_reg(dev
->regs
, REG_CHIPREV
) & 0xffff;
3619 ep_info(dev
, "%s\n", driver_desc
);
3620 ep_info(dev
, "irq %d, pci mem %p, chip rev %04x\n",
3621 pdev
->irq
, base
, dev
->chiprev
);
3622 ep_info(dev
, "version: " DRIVER_VERSION
"; %s\n",
3623 dev
->enhanced_mode
? "enhanced mode" : "legacy mode");
3624 retval
= device_create_file(&pdev
->dev
, &dev_attr_registers
);
3628 retval
= usb_add_gadget_udc_release(&pdev
->dev
, &dev
->gadget
,
3636 net2280_remove(pdev
);
3640 /* make sure the board is quiescent; otherwise it will continue
3641 * generating IRQs across the upcoming reboot.
3644 static void net2280_shutdown(struct pci_dev
*pdev
)
3646 struct net2280
*dev
= pci_get_drvdata(pdev
);
3649 writel(0, &dev
->regs
->pciirqenb0
);
3650 writel(0, &dev
->regs
->pciirqenb1
);
3652 /* disable the pullup so the host will think we're gone */
3653 writel(0, &dev
->usb
->usbctl
);
3658 /*-------------------------------------------------------------------------*/
3660 static const struct pci_device_id pci_ids
[] = { {
3661 .class = ((PCI_CLASS_SERIAL_USB
<< 8) | 0xfe),
3663 .vendor
= PCI_VENDOR_ID_PLX_LEGACY
,
3665 .subvendor
= PCI_ANY_ID
,
3666 .subdevice
= PCI_ANY_ID
,
3667 .driver_data
= PLX_LEGACY
| PLX_2280
,
3669 .class = ((PCI_CLASS_SERIAL_USB
<< 8) | 0xfe),
3671 .vendor
= PCI_VENDOR_ID_PLX_LEGACY
,
3673 .subvendor
= PCI_ANY_ID
,
3674 .subdevice
= PCI_ANY_ID
,
3675 .driver_data
= PLX_LEGACY
,
3678 .class = ((PCI_CLASS_SERIAL_USB
<< 8) | 0xfe),
3680 .vendor
= PCI_VENDOR_ID_PLX
,
3682 .subvendor
= PCI_ANY_ID
,
3683 .subdevice
= PCI_ANY_ID
,
3684 .driver_data
= PLX_SUPERSPEED
,
3687 .class = ((PCI_CLASS_SERIAL_USB
<< 8) | 0xfe),
3689 .vendor
= PCI_VENDOR_ID_PLX
,
3691 .subvendor
= PCI_ANY_ID
,
3692 .subdevice
= PCI_ANY_ID
,
3693 .driver_data
= PLX_SUPERSPEED
,
3695 { /* end: all zeroes */ }
3697 MODULE_DEVICE_TABLE(pci
, pci_ids
);
3699 /* pci driver glue; this is a "new style" PCI driver module */
3700 static struct pci_driver net2280_pci_driver
= {
3701 .name
= (char *) driver_name
,
3702 .id_table
= pci_ids
,
3704 .probe
= net2280_probe
,
3705 .remove
= net2280_remove
,
3706 .shutdown
= net2280_shutdown
,
3708 /* FIXME add power management support */
3711 module_pci_driver(net2280_pci_driver
);
3713 MODULE_DESCRIPTION(DRIVER_DESC
);
3714 MODULE_AUTHOR("David Brownell");
3715 MODULE_LICENSE("GPL");