]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/usb/gadget/net2280.c
usb: gadget: net2280: Use BIT() macro
[mirror_ubuntu-jammy-kernel.git] / drivers / usb / gadget / net2280.c
CommitLineData
1da177e4
LT
1/*
2 * Driver for the PLX NET2280 USB device controller.
3 * Specs and errata are available from <http://www.plxtech.com>.
4 *
901b3d75 5 * PLX Technology Inc. (formerly NetChip Technology) supported the
1da177e4
LT
6 * development of this driver.
7 *
8 *
9 * CODE STATUS HIGHLIGHTS
10 *
11 * This driver should work well with most "gadget" drivers, including
fa06920a 12 * the Mass Storage, Serial, and Ethernet/RNDIS gadget drivers
1da177e4
LT
13 * as well as Gadget Zero and Gadgetfs.
14 *
15 * DMA is enabled by default. Drivers using transfer queues might use
16 * DMA chaining to remove IRQ latencies between transfers. (Except when
17 * short OUT transfers happen.) Drivers can use the req->no_interrupt
18 * hint to completely eliminate some IRQs, if a later IRQ is guaranteed
19 * and DMA chaining is enabled.
20 *
adc82f77
RR
21 * MSI is enabled by default. The legacy IRQ is used if MSI couldn't
22 * be enabled.
23 *
1da177e4
LT
24 * Note that almost all the errata workarounds here are only needed for
25 * rev1 chips. Rev1a silicon (0110) fixes almost all of them.
26 */
27
28/*
29 * Copyright (C) 2003 David Brownell
30 * Copyright (C) 2003-2005 PLX Technology, Inc.
adc82f77 31 * Copyright (C) 2014 Ricardo Ribalda - Qtechnology/AS
1da177e4 32 *
901b3d75
DB
33 * Modified Seth Levy 2005 PLX Technology, Inc. to provide compatibility
34 * with 2282 chip
950ee4c8 35 *
adc82f77
RR
36 * Modified Ricardo Ribalda Qtechnology AS to provide compatibility
37 * with usb 338x chip. Based on PLX driver
38 *
1da177e4
LT
39 * This program is free software; you can redistribute it and/or modify
40 * it under the terms of the GNU General Public License as published by
41 * the Free Software Foundation; either version 2 of the License, or
42 * (at your option) any later version.
1da177e4
LT
43 */
44
45#undef DEBUG /* messages on error and most fault paths */
46#undef VERBOSE /* extra debug messages (success too) */
47
1da177e4
LT
48#include <linux/module.h>
49#include <linux/pci.h>
682d4c80 50#include <linux/dma-mapping.h>
1da177e4
LT
51#include <linux/kernel.h>
52#include <linux/delay.h>
53#include <linux/ioport.h>
1da177e4 54#include <linux/slab.h>
1da177e4
LT
55#include <linux/errno.h>
56#include <linux/init.h>
57#include <linux/timer.h>
58#include <linux/list.h>
59#include <linux/interrupt.h>
60#include <linux/moduleparam.h>
61#include <linux/device.h>
5f848137 62#include <linux/usb/ch9.h>
9454a57a 63#include <linux/usb/gadget.h>
b38b03b3 64#include <linux/prefetch.h>
1da177e4
LT
65
66#include <asm/byteorder.h>
67#include <asm/io.h>
68#include <asm/irq.h>
1da177e4
LT
69#include <asm/unaligned.h>
70
adc82f77
RR
71#define DRIVER_DESC "PLX NET228x/USB338x USB Peripheral Controller"
72#define DRIVER_VERSION "2005 Sept 27/v3.0"
1da177e4 73
1da177e4
LT
74#define EP_DONTUSE 13 /* nonzero */
75
76#define USE_RDK_LEDS /* GPIO pins control three LEDs */
77
78
79static const char driver_name [] = "net2280";
80static const char driver_desc [] = DRIVER_DESC;
81
adc82f77 82static const u32 ep_bit[9] = { 0, 17, 2, 19, 4, 1, 18, 3, 20 };
1da177e4 83static const char ep0name [] = "ep0";
901b3d75 84static const char *const ep_name [] = {
1da177e4
LT
85 ep0name,
86 "ep-a", "ep-b", "ep-c", "ep-d",
adc82f77 87 "ep-e", "ep-f", "ep-g", "ep-h",
1da177e4
LT
88};
89
90/* use_dma -- general goodness, fewer interrupts, less cpu load (vs PIO)
91 * use_dma_chaining -- dma descriptor queueing gives even more irq reduction
92 *
93 * The net2280 DMA engines are not tightly integrated with their FIFOs;
94 * not all cases are (yet) handled well in this driver or the silicon.
95 * Some gadget drivers work better with the dma support here than others.
96 * These two parameters let you use PIO or more aggressive DMA.
97 */
90ab5ee9
RR
98static bool use_dma = 1;
99static bool use_dma_chaining = 0;
adc82f77 100static bool use_msi = 1;
1da177e4
LT
101
102/* "modprobe net2280 use_dma=n" etc */
103module_param (use_dma, bool, S_IRUGO);
104module_param (use_dma_chaining, bool, S_IRUGO);
adc82f77 105module_param(use_msi, bool, S_IRUGO);
1da177e4
LT
106
107/* mode 0 == ep-{a,b,c,d} 1K fifo each
108 * mode 1 == ep-{a,b} 2K fifo each, ep-{c,d} unavailable
109 * mode 2 == ep-a 2K fifo, ep-{b,c} 1K each, ep-d unavailable
110 */
111static ushort fifo_mode = 0;
112
113/* "modprobe net2280 fifo_mode=1" etc */
114module_param (fifo_mode, ushort, 0644);
115
116/* enable_suspend -- When enabled, the driver will respond to
117 * USB suspend requests by powering down the NET2280. Otherwise,
25985edc 118 * USB suspend requests will be ignored. This is acceptable for
950ee4c8 119 * self-powered devices
1da177e4 120 */
90ab5ee9 121static bool enable_suspend = 0;
1da177e4
LT
122
123/* "modprobe net2280 enable_suspend=1" etc */
124module_param (enable_suspend, bool, S_IRUGO);
125
2f076077
AS
126/* force full-speed operation */
127static bool full_speed;
128module_param(full_speed, bool, 0444);
129MODULE_PARM_DESC(full_speed, "force full-speed mode -- for testing only!");
1da177e4
LT
130
131#define DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out")
132
133#if defined(CONFIG_USB_GADGET_DEBUG_FILES) || defined (DEBUG)
134static char *type_string (u8 bmAttributes)
135{
136 switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
137 case USB_ENDPOINT_XFER_BULK: return "bulk";
138 case USB_ENDPOINT_XFER_ISOC: return "iso";
139 case USB_ENDPOINT_XFER_INT: return "intr";
2b84f92b 140 }
1da177e4
LT
141 return "control";
142}
143#endif
144
145#include "net2280.h"
146
3e76fdcb
RR
147#define valid_bit cpu_to_le32(BIT(VALID_BIT))
148#define dma_done_ie cpu_to_le32(BIT(DMA_DONE_INTERRUPT_ENABLE))
1da177e4
LT
149
150/*-------------------------------------------------------------------------*/
adc82f77
RR
151static inline void enable_pciirqenb(struct net2280_ep *ep)
152{
153 u32 tmp = readl(&ep->dev->regs->pciirqenb0);
154
c2db8a8a 155 if (ep->dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY)
3e76fdcb 156 tmp |= BIT(ep->num);
adc82f77 157 else
3e76fdcb 158 tmp |= BIT(ep_bit[ep->num]);
adc82f77
RR
159 writel(tmp, &ep->dev->regs->pciirqenb0);
160
161 return;
162}
1da177e4
LT
163
164static int
165net2280_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
166{
167 struct net2280 *dev;
168 struct net2280_ep *ep;
169 u32 max, tmp;
170 unsigned long flags;
adc82f77 171 static const u32 ep_key[9] = { 1, 0, 1, 0, 1, 1, 0, 1, 0 };
1da177e4
LT
172
173 ep = container_of (_ep, struct net2280_ep, ep);
174 if (!_ep || !desc || ep->desc || _ep->name == ep0name
175 || desc->bDescriptorType != USB_DT_ENDPOINT)
176 return -EINVAL;
177 dev = ep->dev;
178 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
179 return -ESHUTDOWN;
180
181 /* erratum 0119 workaround ties up an endpoint number */
182 if ((desc->bEndpointAddress & 0x0f) == EP_DONTUSE)
183 return -EDOM;
184
c2db8a8a 185 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX) {
adc82f77
RR
186 if ((desc->bEndpointAddress & 0x0f) >= 0x0c)
187 return -EDOM;
188 ep->is_in = !!usb_endpoint_dir_in(desc);
189 if (dev->enhanced_mode && ep->is_in && ep_key[ep->num])
190 return -EINVAL;
191 }
192
1da177e4 193 /* sanity check ep-e/ep-f since their fifos are small */
29cc8897 194 max = usb_endpoint_maxp (desc) & 0x1fff;
c2db8a8a
RR
195 if (ep->num > 4 && max > 64 &&
196 (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY))
1da177e4
LT
197 return -ERANGE;
198
199 spin_lock_irqsave (&dev->lock, flags);
200 _ep->maxpacket = max & 0x7ff;
201 ep->desc = desc;
202
203 /* ep_reset() has already been called */
204 ep->stopped = 0;
8066134f 205 ep->wedged = 0;
1da177e4
LT
206 ep->out_overflow = 0;
207
208 /* set speed-dependent max packet; may kick in high bandwidth */
adc82f77 209 set_max_speed(ep, max);
1da177e4
LT
210
211 /* FIFO lines can't go to different packets. PIO is ok, so
212 * use it instead of troublesome (non-bulk) multi-packet DMA.
213 */
214 if (ep->dma && (max % 4) != 0 && use_dma_chaining) {
215 DEBUG (ep->dev, "%s, no dma for maxpacket %d\n",
216 ep->ep.name, ep->ep.maxpacket);
217 ep->dma = NULL;
218 }
219
220 /* set type, direction, address; reset fifo counters */
3e76fdcb 221 writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat);
1da177e4
LT
222 tmp = (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
223 if (tmp == USB_ENDPOINT_XFER_INT) {
224 /* erratum 0105 workaround prevents hs NYET */
225 if (dev->chiprev == 0100
226 && dev->gadget.speed == USB_SPEED_HIGH
227 && !(desc->bEndpointAddress & USB_DIR_IN))
3e76fdcb 228 writel(BIT(CLEAR_NAK_OUT_PACKETS_MODE),
1da177e4
LT
229 &ep->regs->ep_rsp);
230 } else if (tmp == USB_ENDPOINT_XFER_BULK) {
231 /* catch some particularly blatant driver bugs */
adc82f77
RR
232 if ((dev->gadget.speed == USB_SPEED_SUPER && max != 1024) ||
233 (dev->gadget.speed == USB_SPEED_HIGH && max != 512) ||
234 (dev->gadget.speed == USB_SPEED_FULL && max > 64)) {
235 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4
LT
236 return -ERANGE;
237 }
238 }
239 ep->is_iso = (tmp == USB_ENDPOINT_XFER_ISOC) ? 1 : 0;
adc82f77 240 /* Enable this endpoint */
c2db8a8a 241 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY) {
adc82f77
RR
242 tmp <<= ENDPOINT_TYPE;
243 tmp |= desc->bEndpointAddress;
244 /* default full fifo lines */
245 tmp |= (4 << ENDPOINT_BYTE_COUNT);
3e76fdcb 246 tmp |= BIT(ENDPOINT_ENABLE);
adc82f77
RR
247 ep->is_in = (tmp & USB_DIR_IN) != 0;
248 } else {
249 /* In Legacy mode, only OUT endpoints are used */
250 if (dev->enhanced_mode && ep->is_in) {
251 tmp <<= IN_ENDPOINT_TYPE;
3e76fdcb 252 tmp |= BIT(IN_ENDPOINT_ENABLE);
adc82f77 253 /* Not applicable to Legacy */
3e76fdcb 254 tmp |= BIT(ENDPOINT_DIRECTION);
adc82f77
RR
255 } else {
256 tmp <<= OUT_ENDPOINT_TYPE;
3e76fdcb 257 tmp |= BIT(OUT_ENDPOINT_ENABLE);
adc82f77
RR
258 tmp |= (ep->is_in << ENDPOINT_DIRECTION);
259 }
260
261 tmp |= usb_endpoint_num(desc);
262 tmp |= (ep->ep.maxburst << MAX_BURST_SIZE);
263 }
264
265 /* Make sure all the registers are written before ep_rsp*/
266 wmb();
1da177e4
LT
267
268 /* for OUT transfers, block the rx fifo until a read is posted */
1da177e4 269 if (!ep->is_in)
3e76fdcb 270 writel(BIT(SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
950ee4c8 271 else if (dev->pdev->device != 0x2280) {
901b3d75
DB
272 /* Added for 2282, Don't use nak packets on an in endpoint,
273 * this was ignored on 2280
274 */
3e76fdcb
RR
275 writel(BIT(CLEAR_NAK_OUT_PACKETS) |
276 BIT(CLEAR_NAK_OUT_PACKETS_MODE), &ep->regs->ep_rsp);
950ee4c8 277 }
1da177e4 278
adc82f77 279 writel(tmp, &ep->cfg->ep_cfg);
1da177e4
LT
280
281 /* enable irqs */
282 if (!ep->dma) { /* pio, per-packet */
adc82f77 283 enable_pciirqenb(ep);
1da177e4 284
3e76fdcb
RR
285 tmp = BIT(DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) |
286 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE);
950ee4c8
GL
287 if (dev->pdev->device == 0x2280)
288 tmp |= readl (&ep->regs->ep_irqenb);
1da177e4
LT
289 writel (tmp, &ep->regs->ep_irqenb);
290 } else { /* dma, per-request */
3e76fdcb 291 tmp = BIT((8 + ep->num)); /* completion */
1da177e4
LT
292 tmp |= readl (&dev->regs->pciirqenb1);
293 writel (tmp, &dev->regs->pciirqenb1);
294
295 /* for short OUT transfers, dma completions can't
296 * advance the queue; do it pio-style, by hand.
297 * NOTE erratum 0112 workaround #2
298 */
299 if ((desc->bEndpointAddress & USB_DIR_IN) == 0) {
3e76fdcb 300 tmp = BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE);
1da177e4
LT
301 writel (tmp, &ep->regs->ep_irqenb);
302
adc82f77 303 enable_pciirqenb(ep);
1da177e4
LT
304 }
305 }
306
307 tmp = desc->bEndpointAddress;
308 DEBUG (dev, "enabled %s (ep%d%s-%s) %s max %04x\n",
309 _ep->name, tmp & 0x0f, DIR_STRING (tmp),
310 type_string (desc->bmAttributes),
311 ep->dma ? "dma" : "pio", max);
312
313 /* pci writes may still be posted */
314 spin_unlock_irqrestore (&dev->lock, flags);
315 return 0;
316}
317
318static int handshake (u32 __iomem *ptr, u32 mask, u32 done, int usec)
319{
320 u32 result;
321
322 do {
323 result = readl (ptr);
324 if (result == ~(u32)0) /* "device unplugged" */
325 return -ENODEV;
326 result &= mask;
327 if (result == done)
328 return 0;
329 udelay (1);
330 usec--;
331 } while (usec > 0);
332 return -ETIMEDOUT;
333}
334
901b3d75 335static const struct usb_ep_ops net2280_ep_ops;
1da177e4 336
adc82f77
RR
337static void ep_reset_228x(struct net2280_regs __iomem *regs,
338 struct net2280_ep *ep)
1da177e4
LT
339{
340 u32 tmp;
341
342 ep->desc = NULL;
343 INIT_LIST_HEAD (&ep->queue);
344
e117e742 345 usb_ep_set_maxpacket_limit(&ep->ep, ~0);
1da177e4
LT
346 ep->ep.ops = &net2280_ep_ops;
347
348 /* disable the dma, irqs, endpoint... */
349 if (ep->dma) {
350 writel (0, &ep->dma->dmactl);
3e76fdcb
RR
351 writel(BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
352 BIT(DMA_TRANSACTION_DONE_INTERRUPT) |
353 BIT(DMA_ABORT),
354 &ep->dma->dmastat);
1da177e4
LT
355
356 tmp = readl (&regs->pciirqenb0);
3e76fdcb 357 tmp &= ~BIT(ep->num);
1da177e4
LT
358 writel (tmp, &regs->pciirqenb0);
359 } else {
360 tmp = readl (&regs->pciirqenb1);
3e76fdcb 361 tmp &= ~BIT((8 + ep->num)); /* completion */
1da177e4
LT
362 writel (tmp, &regs->pciirqenb1);
363 }
364 writel (0, &ep->regs->ep_irqenb);
365
366 /* init to our chosen defaults, notably so that we NAK OUT
367 * packets until the driver queues a read (+note erratum 0112)
368 */
950ee4c8 369 if (!ep->is_in || ep->dev->pdev->device == 0x2280) {
3e76fdcb
RR
370 tmp = BIT(SET_NAK_OUT_PACKETS_MODE) |
371 BIT(SET_NAK_OUT_PACKETS) |
372 BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
373 BIT(CLEAR_INTERRUPT_MODE);
950ee4c8
GL
374 } else {
375 /* added for 2282 */
3e76fdcb
RR
376 tmp = BIT(CLEAR_NAK_OUT_PACKETS_MODE) |
377 BIT(CLEAR_NAK_OUT_PACKETS) |
378 BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
379 BIT(CLEAR_INTERRUPT_MODE);
950ee4c8 380 }
1da177e4
LT
381
382 if (ep->num != 0) {
3e76fdcb
RR
383 tmp |= BIT(CLEAR_ENDPOINT_TOGGLE) |
384 BIT(CLEAR_ENDPOINT_HALT);
1da177e4
LT
385 }
386 writel (tmp, &ep->regs->ep_rsp);
387
388 /* scrub most status bits, and flush any fifo state */
950ee4c8 389 if (ep->dev->pdev->device == 0x2280)
3e76fdcb
RR
390 tmp = BIT(FIFO_OVERFLOW) |
391 BIT(FIFO_UNDERFLOW);
950ee4c8
GL
392 else
393 tmp = 0;
394
3e76fdcb
RR
395 writel(tmp | BIT(TIMEOUT) |
396 BIT(USB_STALL_SENT) |
397 BIT(USB_IN_NAK_SENT) |
398 BIT(USB_IN_ACK_RCVD) |
399 BIT(USB_OUT_PING_NAK_SENT) |
400 BIT(USB_OUT_ACK_SENT) |
401 BIT(FIFO_FLUSH) |
402 BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
403 BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
404 BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
405 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
406 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
407 BIT(DATA_IN_TOKEN_INTERRUPT)
1da177e4
LT
408 , &ep->regs->ep_stat);
409
410 /* fifo size is handled separately */
411}
412
adc82f77
RR
413static void ep_reset_338x(struct net2280_regs __iomem *regs,
414 struct net2280_ep *ep)
415{
416 u32 tmp, dmastat;
417
418 ep->desc = NULL;
419 INIT_LIST_HEAD(&ep->queue);
420
421 usb_ep_set_maxpacket_limit(&ep->ep, ~0);
422 ep->ep.ops = &net2280_ep_ops;
423
424 /* disable the dma, irqs, endpoint... */
425 if (ep->dma) {
426 writel(0, &ep->dma->dmactl);
3e76fdcb
RR
427 writel(BIT(DMA_ABORT_DONE_INTERRUPT) |
428 BIT(DMA_PAUSE_DONE_INTERRUPT) |
429 BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
430 BIT(DMA_TRANSACTION_DONE_INTERRUPT)
431 /* | BIT(DMA_ABORT) */
adc82f77
RR
432 , &ep->dma->dmastat);
433
434 dmastat = readl(&ep->dma->dmastat);
435 if (dmastat == 0x5002) {
436 WARNING(ep->dev, "The dmastat return = %x!!\n",
437 dmastat);
438 writel(0x5a, &ep->dma->dmastat);
439 }
440
441 tmp = readl(&regs->pciirqenb0);
3e76fdcb 442 tmp &= ~BIT(ep_bit[ep->num]);
adc82f77
RR
443 writel(tmp, &regs->pciirqenb0);
444 } else {
445 if (ep->num < 5) {
446 tmp = readl(&regs->pciirqenb1);
3e76fdcb 447 tmp &= ~BIT((8 + ep->num)); /* completion */
adc82f77
RR
448 writel(tmp, &regs->pciirqenb1);
449 }
450 }
451 writel(0, &ep->regs->ep_irqenb);
452
3e76fdcb
RR
453 writel(BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
454 BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
455 BIT(FIFO_OVERFLOW) |
456 BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
457 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
458 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
459 BIT(DATA_IN_TOKEN_INTERRUPT), &ep->regs->ep_stat);
adc82f77
RR
460}
461
1da177e4
LT
462static void nuke (struct net2280_ep *);
463
464static int net2280_disable (struct usb_ep *_ep)
465{
466 struct net2280_ep *ep;
467 unsigned long flags;
468
469 ep = container_of (_ep, struct net2280_ep, ep);
470 if (!_ep || !ep->desc || _ep->name == ep0name)
471 return -EINVAL;
472
473 spin_lock_irqsave (&ep->dev->lock, flags);
474 nuke (ep);
adc82f77 475
c2db8a8a 476 if (ep->dev->pdev->vendor == PCI_VENDOR_ID_PLX)
adc82f77
RR
477 ep_reset_338x(ep->dev->regs, ep);
478 else
479 ep_reset_228x(ep->dev->regs, ep);
1da177e4
LT
480
481 VDEBUG (ep->dev, "disabled %s %s\n",
482 ep->dma ? "dma" : "pio", _ep->name);
483
484 /* synch memory views with the device */
adc82f77 485 (void)readl(&ep->cfg->ep_cfg);
1da177e4
LT
486
487 if (use_dma && !ep->dma && ep->num >= 1 && ep->num <= 4)
488 ep->dma = &ep->dev->dma [ep->num - 1];
489
490 spin_unlock_irqrestore (&ep->dev->lock, flags);
491 return 0;
492}
493
494/*-------------------------------------------------------------------------*/
495
496static struct usb_request *
55016f10 497net2280_alloc_request (struct usb_ep *_ep, gfp_t gfp_flags)
1da177e4
LT
498{
499 struct net2280_ep *ep;
500 struct net2280_request *req;
501
502 if (!_ep)
503 return NULL;
504 ep = container_of (_ep, struct net2280_ep, ep);
505
7039f422 506 req = kzalloc(sizeof(*req), gfp_flags);
1da177e4
LT
507 if (!req)
508 return NULL;
509
1da177e4
LT
510 INIT_LIST_HEAD (&req->queue);
511
512 /* this dma descriptor may be swapped with the previous dummy */
513 if (ep->dma) {
514 struct net2280_dma *td;
515
516 td = pci_pool_alloc (ep->dev->requests, gfp_flags,
517 &req->td_dma);
518 if (!td) {
519 kfree (req);
520 return NULL;
521 }
522 td->dmacount = 0; /* not VALID */
1da177e4
LT
523 td->dmadesc = td->dmaaddr;
524 req->td = td;
525 }
526 return &req->req;
527}
528
529static void
530net2280_free_request (struct usb_ep *_ep, struct usb_request *_req)
531{
532 struct net2280_ep *ep;
533 struct net2280_request *req;
534
535 ep = container_of (_ep, struct net2280_ep, ep);
536 if (!_ep || !_req)
537 return;
538
539 req = container_of (_req, struct net2280_request, req);
540 WARN_ON (!list_empty (&req->queue));
541 if (req->td)
542 pci_pool_free (ep->dev->requests, req->td, req->td_dma);
543 kfree (req);
544}
545
546/*-------------------------------------------------------------------------*/
547
1da177e4
LT
548/* load a packet into the fifo we use for usb IN transfers.
549 * works for all endpoints.
550 *
551 * NOTE: pio with ep-a..ep-d could stuff multiple packets into the fifo
552 * at a time, but this code is simpler because it knows it only writes
553 * one packet. ep-a..ep-d should use dma instead.
554 */
555static void
556write_fifo (struct net2280_ep *ep, struct usb_request *req)
557{
558 struct net2280_ep_regs __iomem *regs = ep->regs;
559 u8 *buf;
560 u32 tmp;
561 unsigned count, total;
562
563 /* INVARIANT: fifo is currently empty. (testable) */
564
565 if (req) {
566 buf = req->buf + req->actual;
567 prefetch (buf);
568 total = req->length - req->actual;
569 } else {
570 total = 0;
571 buf = NULL;
572 }
573
574 /* write just one packet at a time */
575 count = ep->ep.maxpacket;
576 if (count > total) /* min() cannot be used on a bitfield */
577 count = total;
578
579 VDEBUG (ep->dev, "write %s fifo (IN) %d bytes%s req %p\n",
580 ep->ep.name, count,
581 (count != ep->ep.maxpacket) ? " (short)" : "",
582 req);
583 while (count >= 4) {
584 /* NOTE be careful if you try to align these. fifo lines
585 * should normally be full (4 bytes) and successive partial
586 * lines are ok only in certain cases.
587 */
588 tmp = get_unaligned ((u32 *)buf);
589 cpu_to_le32s (&tmp);
590 writel (tmp, &regs->ep_data);
591 buf += 4;
592 count -= 4;
593 }
594
595 /* last fifo entry is "short" unless we wrote a full packet.
596 * also explicitly validate last word in (periodic) transfers
597 * when maxpacket is not a multiple of 4 bytes.
598 */
599 if (count || total < ep->ep.maxpacket) {
600 tmp = count ? get_unaligned ((u32 *)buf) : count;
601 cpu_to_le32s (&tmp);
602 set_fifo_bytecount (ep, count & 0x03);
603 writel (tmp, &regs->ep_data);
604 }
605
606 /* pci writes may still be posted */
607}
608
609/* work around erratum 0106: PCI and USB race over the OUT fifo.
610 * caller guarantees chiprev 0100, out endpoint is NAKing, and
611 * there's no real data in the fifo.
612 *
613 * NOTE: also used in cases where that erratum doesn't apply:
614 * where the host wrote "too much" data to us.
615 */
616static void out_flush (struct net2280_ep *ep)
617{
618 u32 __iomem *statp;
619 u32 tmp;
620
621 ASSERT_OUT_NAKING (ep);
622
623 statp = &ep->regs->ep_stat;
3e76fdcb
RR
624 writel(BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
625 BIT(DATA_PACKET_RECEIVED_INTERRUPT)
1da177e4 626 , statp);
3e76fdcb 627 writel(BIT(FIFO_FLUSH), statp);
1da177e4
LT
628 mb ();
629 tmp = readl (statp);
3e76fdcb 630 if (tmp & BIT(DATA_OUT_PING_TOKEN_INTERRUPT)
1da177e4
LT
631 /* high speed did bulk NYET; fifo isn't filling */
632 && ep->dev->gadget.speed == USB_SPEED_FULL) {
633 unsigned usec;
634
635 usec = 50; /* 64 byte bulk/interrupt */
3e76fdcb
RR
636 handshake(statp, BIT(USB_OUT_PING_NAK_SENT),
637 BIT(USB_OUT_PING_NAK_SENT), usec);
1da177e4
LT
638 /* NAK done; now CLEAR_NAK_OUT_PACKETS is safe */
639 }
640}
641
642/* unload packet(s) from the fifo we use for usb OUT transfers.
643 * returns true iff the request completed, because of short packet
644 * or the request buffer having filled with full packets.
645 *
646 * for ep-a..ep-d this will read multiple packets out when they
647 * have been accepted.
648 */
649static int
650read_fifo (struct net2280_ep *ep, struct net2280_request *req)
651{
652 struct net2280_ep_regs __iomem *regs = ep->regs;
653 u8 *buf = req->req.buf + req->req.actual;
654 unsigned count, tmp, is_short;
655 unsigned cleanup = 0, prevent = 0;
656
657 /* erratum 0106 ... packets coming in during fifo reads might
658 * be incompletely rejected. not all cases have workarounds.
659 */
660 if (ep->dev->chiprev == 0x0100
661 && ep->dev->gadget.speed == USB_SPEED_FULL) {
662 udelay (1);
663 tmp = readl (&ep->regs->ep_stat);
3e76fdcb 664 if ((tmp & BIT(NAK_OUT_PACKETS)))
1da177e4 665 cleanup = 1;
3e76fdcb 666 else if ((tmp & BIT(FIFO_FULL))) {
1da177e4
LT
667 start_out_naking (ep);
668 prevent = 1;
669 }
670 /* else: hope we don't see the problem */
671 }
672
673 /* never overflow the rx buffer. the fifo reads packets until
674 * it sees a short one; we might not be ready for them all.
675 */
676 prefetchw (buf);
677 count = readl (&regs->ep_avail);
678 if (unlikely (count == 0)) {
679 udelay (1);
680 tmp = readl (&ep->regs->ep_stat);
681 count = readl (&regs->ep_avail);
682 /* handled that data already? */
3e76fdcb 683 if (count == 0 && (tmp & BIT(NAK_OUT_PACKETS)) == 0)
1da177e4
LT
684 return 0;
685 }
686
687 tmp = req->req.length - req->req.actual;
688 if (count > tmp) {
689 /* as with DMA, data overflow gets flushed */
690 if ((tmp % ep->ep.maxpacket) != 0) {
691 ERROR (ep->dev,
692 "%s out fifo %d bytes, expected %d\n",
693 ep->ep.name, count, tmp);
694 req->req.status = -EOVERFLOW;
695 cleanup = 1;
696 /* NAK_OUT_PACKETS will be set, so flushing is safe;
697 * the next read will start with the next packet
698 */
699 } /* else it's a ZLP, no worries */
700 count = tmp;
701 }
702 req->req.actual += count;
703
704 is_short = (count == 0) || ((count % ep->ep.maxpacket) != 0);
705
706 VDEBUG (ep->dev, "read %s fifo (OUT) %d bytes%s%s%s req %p %d/%d\n",
707 ep->ep.name, count, is_short ? " (short)" : "",
708 cleanup ? " flush" : "", prevent ? " nak" : "",
709 req, req->req.actual, req->req.length);
710
711 while (count >= 4) {
712 tmp = readl (&regs->ep_data);
713 cpu_to_le32s (&tmp);
714 put_unaligned (tmp, (u32 *)buf);
715 buf += 4;
716 count -= 4;
717 }
718 if (count) {
719 tmp = readl (&regs->ep_data);
720 /* LE conversion is implicit here: */
721 do {
722 *buf++ = (u8) tmp;
723 tmp >>= 8;
724 } while (--count);
725 }
726 if (cleanup)
727 out_flush (ep);
728 if (prevent) {
3e76fdcb 729 writel(BIT(CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
1da177e4
LT
730 (void) readl (&ep->regs->ep_rsp);
731 }
732
733 return is_short || ((req->req.actual == req->req.length)
734 && !req->req.zero);
735}
736
737/* fill out dma descriptor to match a given request */
738static void
739fill_dma_desc (struct net2280_ep *ep, struct net2280_request *req, int valid)
740{
741 struct net2280_dma *td = req->td;
742 u32 dmacount = req->req.length;
743
744 /* don't let DMA continue after a short OUT packet,
745 * so overruns can't affect the next transfer.
746 * in case of overruns on max-size packets, we can't
747 * stop the fifo from filling but we can flush it.
748 */
749 if (ep->is_in)
3e76fdcb 750 dmacount |= BIT(DMA_DIRECTION);
901b3d75
DB
751 if ((!ep->is_in && (dmacount % ep->ep.maxpacket) != 0)
752 || ep->dev->pdev->device != 0x2280)
3e76fdcb 753 dmacount |= BIT(END_OF_CHAIN);
1da177e4
LT
754
755 req->valid = valid;
756 if (valid)
3e76fdcb 757 dmacount |= BIT(VALID_BIT);
1da177e4 758 if (likely(!req->req.no_interrupt || !use_dma_chaining))
3e76fdcb 759 dmacount |= BIT(DMA_DONE_INTERRUPT_ENABLE);
1da177e4
LT
760
761 /* td->dmadesc = previously set by caller */
762 td->dmaaddr = cpu_to_le32 (req->req.dma);
763
764 /* 2280 may be polling VALID_BIT through ep->dma->dmadesc */
765 wmb ();
da2bbdcc 766 td->dmacount = cpu_to_le32(dmacount);
1da177e4
LT
767}
768
769static const u32 dmactl_default =
3e76fdcb
RR
770 BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
771 BIT(DMA_CLEAR_COUNT_ENABLE) |
1da177e4 772 /* erratum 0116 workaround part 1 (use POLLING) */
3e76fdcb
RR
773 (POLL_100_USEC << DESCRIPTOR_POLLING_RATE) |
774 BIT(DMA_VALID_BIT_POLLING_ENABLE) |
775 BIT(DMA_VALID_BIT_ENABLE) |
776 BIT(DMA_SCATTER_GATHER_ENABLE) |
1da177e4 777 /* erratum 0116 workaround part 2 (no AUTOSTART) */
3e76fdcb 778 BIT(DMA_ENABLE);
1da177e4
LT
779
780static inline void spin_stop_dma (struct net2280_dma_regs __iomem *dma)
781{
3e76fdcb 782 handshake(&dma->dmactl, BIT(DMA_ENABLE), 0, 50);
1da177e4
LT
783}
784
785static inline void stop_dma (struct net2280_dma_regs __iomem *dma)
786{
3e76fdcb 787 writel(readl(&dma->dmactl) & ~BIT(DMA_ENABLE), &dma->dmactl);
1da177e4
LT
788 spin_stop_dma (dma);
789}
790
791static void start_queue (struct net2280_ep *ep, u32 dmactl, u32 td_dma)
792{
793 struct net2280_dma_regs __iomem *dma = ep->dma;
3e76fdcb 794 unsigned int tmp = BIT(VALID_BIT) | (ep->is_in << DMA_DIRECTION);
1da177e4 795
950ee4c8 796 if (ep->dev->pdev->device != 0x2280)
3e76fdcb 797 tmp |= BIT(END_OF_CHAIN);
950ee4c8
GL
798
799 writel (tmp, &dma->dmacount);
1da177e4
LT
800 writel (readl (&dma->dmastat), &dma->dmastat);
801
802 writel (td_dma, &dma->dmadesc);
c2db8a8a 803 if (ep->dev->pdev->vendor == PCI_VENDOR_ID_PLX)
3e76fdcb 804 dmactl |= BIT(DMA_REQUEST_OUTSTANDING);
1da177e4
LT
805 writel (dmactl, &dma->dmactl);
806
807 /* erratum 0116 workaround part 3: pci arbiter away from net2280 */
808 (void) readl (&ep->dev->pci->pcimstctl);
809
3e76fdcb 810 writel(BIT(DMA_START), &dma->dmastat);
1da177e4
LT
811
812 if (!ep->is_in)
813 stop_out_naking (ep);
814}
815
816static void start_dma (struct net2280_ep *ep, struct net2280_request *req)
817{
818 u32 tmp;
819 struct net2280_dma_regs __iomem *dma = ep->dma;
820
821 /* FIXME can't use DMA for ZLPs */
822
823 /* on this path we "know" there's no dma active (yet) */
3e76fdcb 824 WARN_ON(readl(&dma->dmactl) & BIT(DMA_ENABLE));
1da177e4
LT
825 writel (0, &ep->dma->dmactl);
826
827 /* previous OUT packet might have been short */
828 if (!ep->is_in && ((tmp = readl (&ep->regs->ep_stat))
3e76fdcb
RR
829 & BIT(NAK_OUT_PACKETS)) != 0) {
830 writel(BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT),
1da177e4
LT
831 &ep->regs->ep_stat);
832
833 tmp = readl (&ep->regs->ep_avail);
834 if (tmp) {
835 writel (readl (&dma->dmastat), &dma->dmastat);
836
837 /* transfer all/some fifo data */
838 writel (req->req.dma, &dma->dmaaddr);
839 tmp = min (tmp, req->req.length);
840
841 /* dma irq, faking scatterlist status */
842 req->td->dmacount = cpu_to_le32 (req->req.length - tmp);
3e76fdcb 843 writel(BIT(DMA_DONE_INTERRUPT_ENABLE)
1da177e4
LT
844 | tmp, &dma->dmacount);
845 req->td->dmadesc = 0;
846 req->valid = 1;
847
3e76fdcb
RR
848 writel(BIT(DMA_ENABLE), &dma->dmactl);
849 writel(BIT(DMA_START), &dma->dmastat);
1da177e4
LT
850 return;
851 }
852 }
853
854 tmp = dmactl_default;
855
856 /* force packet boundaries between dma requests, but prevent the
857 * controller from automagically writing a last "short" packet
858 * (zero length) unless the driver explicitly said to do that.
859 */
860 if (ep->is_in) {
861 if (likely ((req->req.length % ep->ep.maxpacket) != 0
862 || req->req.zero)) {
3e76fdcb 863 tmp |= BIT(DMA_FIFO_VALIDATE);
1da177e4
LT
864 ep->in_fifo_validate = 1;
865 } else
866 ep->in_fifo_validate = 0;
867 }
868
869 /* init req->td, pointing to the current dummy */
870 req->td->dmadesc = cpu_to_le32 (ep->td_dma);
871 fill_dma_desc (ep, req, 1);
872
873 if (!use_dma_chaining)
3e76fdcb 874 req->td->dmacount |= cpu_to_le32(BIT(END_OF_CHAIN));
1da177e4
LT
875
876 start_queue (ep, tmp, req->td_dma);
877}
878
adc82f77
RR
879static inline void resume_dma(struct net2280_ep *ep)
880{
3e76fdcb 881 writel(readl(&ep->dma->dmactl) | BIT(DMA_ENABLE), &ep->dma->dmactl);
adc82f77
RR
882
883 ep->dma_started = true;
884}
885
886static inline void ep_stop_dma(struct net2280_ep *ep)
887{
3e76fdcb 888 writel(readl(&ep->dma->dmactl) & ~BIT(DMA_ENABLE), &ep->dma->dmactl);
adc82f77
RR
889 spin_stop_dma(ep->dma);
890
891 ep->dma_started = false;
892}
893
1da177e4
LT
894static inline void
895queue_dma (struct net2280_ep *ep, struct net2280_request *req, int valid)
896{
897 struct net2280_dma *end;
898 dma_addr_t tmp;
899
900 /* swap new dummy for old, link; fill and maybe activate */
901 end = ep->dummy;
902 ep->dummy = req->td;
903 req->td = end;
904
905 tmp = ep->td_dma;
906 ep->td_dma = req->td_dma;
907 req->td_dma = tmp;
908
909 end->dmadesc = cpu_to_le32 (ep->td_dma);
910
911 fill_dma_desc (ep, req, valid);
912}
913
914static void
915done (struct net2280_ep *ep, struct net2280_request *req, int status)
916{
917 struct net2280 *dev;
918 unsigned stopped = ep->stopped;
919
920 list_del_init (&req->queue);
921
922 if (req->req.status == -EINPROGRESS)
923 req->req.status = status;
924 else
925 status = req->req.status;
926
927 dev = ep->dev;
ae4d7933
FB
928 if (ep->dma)
929 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
1da177e4
LT
930
931 if (status && status != -ESHUTDOWN)
932 VDEBUG (dev, "complete %s req %p stat %d len %u/%u\n",
933 ep->ep.name, &req->req, status,
934 req->req.actual, req->req.length);
935
936 /* don't modify queue heads during completion callback */
937 ep->stopped = 1;
938 spin_unlock (&dev->lock);
939 req->req.complete (&ep->ep, &req->req);
940 spin_lock (&dev->lock);
941 ep->stopped = stopped;
942}
943
944/*-------------------------------------------------------------------------*/
945
946static int
55016f10 947net2280_queue (struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
1da177e4
LT
948{
949 struct net2280_request *req;
950 struct net2280_ep *ep;
951 struct net2280 *dev;
952 unsigned long flags;
953
954 /* we always require a cpu-view buffer, so that we can
955 * always use pio (as fallback or whatever).
956 */
957 req = container_of (_req, struct net2280_request, req);
958 if (!_req || !_req->complete || !_req->buf
959 || !list_empty (&req->queue))
960 return -EINVAL;
961 if (_req->length > (~0 & DMA_BYTE_COUNT_MASK))
962 return -EDOM;
963 ep = container_of (_ep, struct net2280_ep, ep);
964 if (!_ep || (!ep->desc && ep->num != 0))
965 return -EINVAL;
966 dev = ep->dev;
967 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
968 return -ESHUTDOWN;
969
970 /* FIXME implement PIO fallback for ZLPs with DMA */
971 if (ep->dma && _req->length == 0)
972 return -EOPNOTSUPP;
973
974 /* set up dma mapping in case the caller didn't */
ae4d7933
FB
975 if (ep->dma) {
976 int ret;
977
978 ret = usb_gadget_map_request(&dev->gadget, _req,
979 ep->is_in);
980 if (ret)
981 return ret;
1da177e4
LT
982 }
983
984#if 0
985 VDEBUG (dev, "%s queue req %p, len %d buf %p\n",
986 _ep->name, _req, _req->length, _req->buf);
987#endif
988
989 spin_lock_irqsave (&dev->lock, flags);
990
991 _req->status = -EINPROGRESS;
992 _req->actual = 0;
993
994 /* kickstart this i/o queue? */
995 if (list_empty (&ep->queue) && !ep->stopped) {
adc82f77
RR
996 /* DMA request while EP halted */
997 if (ep->dma &&
3e76fdcb 998 (readl(&ep->regs->ep_rsp) & BIT(CLEAR_ENDPOINT_HALT)) &&
c2db8a8a 999 (dev->pdev->vendor == PCI_VENDOR_ID_PLX)) {
adc82f77
RR
1000 int valid = 1;
1001 if (ep->is_in) {
1002 int expect;
1003 expect = likely(req->req.zero ||
1004 ((req->req.length %
1005 ep->ep.maxpacket) != 0));
1006 if (expect != ep->in_fifo_validate)
1007 valid = 0;
1008 }
1009 queue_dma(ep, req, valid);
1010 }
1da177e4 1011 /* use DMA if the endpoint supports it, else pio */
adc82f77 1012 else if (ep->dma)
1da177e4
LT
1013 start_dma (ep, req);
1014 else {
1015 /* maybe there's no control data, just status ack */
1016 if (ep->num == 0 && _req->length == 0) {
1017 allow_status (ep);
1018 done (ep, req, 0);
1019 VDEBUG (dev, "%s status ack\n", ep->ep.name);
1020 goto done;
1021 }
1022
1023 /* PIO ... stuff the fifo, or unblock it. */
1024 if (ep->is_in)
1025 write_fifo (ep, _req);
1026 else if (list_empty (&ep->queue)) {
1027 u32 s;
1028
1029 /* OUT FIFO might have packet(s) buffered */
1030 s = readl (&ep->regs->ep_stat);
3e76fdcb 1031 if ((s & BIT(FIFO_EMPTY)) == 0) {
1da177e4
LT
1032 /* note: _req->short_not_ok is
1033 * ignored here since PIO _always_
1034 * stops queue advance here, and
1035 * _req->status doesn't change for
1036 * short reads (only _req->actual)
1037 */
1038 if (read_fifo (ep, req)) {
1039 done (ep, req, 0);
1040 if (ep->num == 0)
1041 allow_status (ep);
1042 /* don't queue it */
1043 req = NULL;
1044 } else
1045 s = readl (&ep->regs->ep_stat);
1046 }
1047
1048 /* don't NAK, let the fifo fill */
3e76fdcb
RR
1049 if (req && (s & BIT(NAK_OUT_PACKETS)))
1050 writel(BIT(CLEAR_NAK_OUT_PACKETS),
1da177e4
LT
1051 &ep->regs->ep_rsp);
1052 }
1053 }
1054
1055 } else if (ep->dma) {
1056 int valid = 1;
1057
1058 if (ep->is_in) {
1059 int expect;
1060
1061 /* preventing magic zlps is per-engine state, not
1062 * per-transfer; irq logic must recover hiccups.
1063 */
1064 expect = likely (req->req.zero
1065 || (req->req.length % ep->ep.maxpacket) != 0);
1066 if (expect != ep->in_fifo_validate)
1067 valid = 0;
1068 }
1069 queue_dma (ep, req, valid);
1070
1071 } /* else the irq handler advances the queue. */
1072
1f26e28d 1073 ep->responded = 1;
1da177e4
LT
1074 if (req)
1075 list_add_tail (&req->queue, &ep->queue);
1076done:
1077 spin_unlock_irqrestore (&dev->lock, flags);
1078
1079 /* pci writes may still be posted */
1080 return 0;
1081}
1082
1083static inline void
1084dma_done (
1085 struct net2280_ep *ep,
1086 struct net2280_request *req,
1087 u32 dmacount,
1088 int status
1089)
1090{
1091 req->req.actual = req->req.length - (DMA_BYTE_COUNT_MASK & dmacount);
1092 done (ep, req, status);
1093}
1094
1095static void restart_dma (struct net2280_ep *ep);
1096
1097static void scan_dma_completions (struct net2280_ep *ep)
1098{
1099 /* only look at descriptors that were "naturally" retired,
1100 * so fifo and list head state won't matter
1101 */
1102 while (!list_empty (&ep->queue)) {
1103 struct net2280_request *req;
1104 u32 tmp;
1105
1106 req = list_entry (ep->queue.next,
1107 struct net2280_request, queue);
1108 if (!req->valid)
1109 break;
1110 rmb ();
1111 tmp = le32_to_cpup (&req->td->dmacount);
3e76fdcb 1112 if ((tmp & BIT(VALID_BIT)) != 0)
1da177e4
LT
1113 break;
1114
1115 /* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short"
1116 * cases where DMA must be aborted; this code handles
1117 * all non-abort DMA completions.
1118 */
1119 if (unlikely (req->td->dmadesc == 0)) {
1120 /* paranoia */
1121 tmp = readl (&ep->dma->dmacount);
1122 if (tmp & DMA_BYTE_COUNT_MASK)
1123 break;
1124 /* single transfer mode */
1125 dma_done (ep, req, tmp, 0);
1126 break;
1127 } else if (!ep->is_in
1128 && (req->req.length % ep->ep.maxpacket) != 0) {
1129 tmp = readl (&ep->regs->ep_stat);
c2db8a8a 1130 if (ep->dev->pdev->vendor == PCI_VENDOR_ID_PLX)
adc82f77 1131 return dma_done(ep, req, tmp, 0);
1da177e4
LT
1132
1133 /* AVOID TROUBLE HERE by not issuing short reads from
1134 * your gadget driver. That helps avoids errata 0121,
1135 * 0122, and 0124; not all cases trigger the warning.
1136 */
3e76fdcb 1137 if ((tmp & BIT(NAK_OUT_PACKETS)) == 0) {
b6c63937 1138 WARNING (ep->dev, "%s lost packet sync!\n",
1da177e4
LT
1139 ep->ep.name);
1140 req->req.status = -EOVERFLOW;
1141 } else if ((tmp = readl (&ep->regs->ep_avail)) != 0) {
1142 /* fifo gets flushed later */
1143 ep->out_overflow = 1;
1144 DEBUG (ep->dev, "%s dma, discard %d len %d\n",
1145 ep->ep.name, tmp,
1146 req->req.length);
1147 req->req.status = -EOVERFLOW;
1148 }
1149 }
1150 dma_done (ep, req, tmp, 0);
1151 }
1152}
1153
1154static void restart_dma (struct net2280_ep *ep)
1155{
1156 struct net2280_request *req;
1157 u32 dmactl = dmactl_default;
1158
1159 if (ep->stopped)
1160 return;
1161 req = list_entry (ep->queue.next, struct net2280_request, queue);
1162
1163 if (!use_dma_chaining) {
1164 start_dma (ep, req);
1165 return;
1166 }
1167
1168 /* the 2280 will be processing the queue unless queue hiccups after
1169 * the previous transfer:
1170 * IN: wanted automagic zlp, head doesn't (or vice versa)
1171 * DMA_FIFO_VALIDATE doesn't init from dma descriptors.
1172 * OUT: was "usb-short", we must restart.
1173 */
1174 if (ep->is_in && !req->valid) {
1175 struct net2280_request *entry, *prev = NULL;
1176 int reqmode, done = 0;
1177
1178 DEBUG (ep->dev, "%s dma hiccup td %p\n", ep->ep.name, req->td);
1179 ep->in_fifo_validate = likely (req->req.zero
1180 || (req->req.length % ep->ep.maxpacket) != 0);
1181 if (ep->in_fifo_validate)
3e76fdcb 1182 dmactl |= BIT(DMA_FIFO_VALIDATE);
1da177e4 1183 list_for_each_entry (entry, &ep->queue, queue) {
320f3459 1184 __le32 dmacount;
1da177e4
LT
1185
1186 if (entry == req)
1187 continue;
1188 dmacount = entry->td->dmacount;
1189 if (!done) {
1190 reqmode = likely (entry->req.zero
1191 || (entry->req.length
1192 % ep->ep.maxpacket) != 0);
1193 if (reqmode == ep->in_fifo_validate) {
1194 entry->valid = 1;
1195 dmacount |= valid_bit;
1196 entry->td->dmacount = dmacount;
1197 prev = entry;
1198 continue;
1199 } else {
1200 /* force a hiccup */
1201 prev->td->dmacount |= dma_done_ie;
1202 done = 1;
1203 }
1204 }
1205
1206 /* walk the rest of the queue so unlinks behave */
1207 entry->valid = 0;
1208 dmacount &= ~valid_bit;
1209 entry->td->dmacount = dmacount;
1210 prev = entry;
1211 }
1212 }
1213
1214 writel (0, &ep->dma->dmactl);
1215 start_queue (ep, dmactl, req->td_dma);
1216}
1217
adc82f77 1218static void abort_dma_228x(struct net2280_ep *ep)
1da177e4
LT
1219{
1220 /* abort the current transfer */
1221 if (likely (!list_empty (&ep->queue))) {
1222 /* FIXME work around errata 0121, 0122, 0124 */
3e76fdcb 1223 writel(BIT(DMA_ABORT), &ep->dma->dmastat);
1da177e4
LT
1224 spin_stop_dma (ep->dma);
1225 } else
1226 stop_dma (ep->dma);
1227 scan_dma_completions (ep);
1228}
1229
adc82f77
RR
1230static void abort_dma_338x(struct net2280_ep *ep)
1231{
3e76fdcb 1232 writel(BIT(DMA_ABORT), &ep->dma->dmastat);
adc82f77
RR
1233 spin_stop_dma(ep->dma);
1234}
1235
1236static void abort_dma(struct net2280_ep *ep)
1237{
c2db8a8a 1238 if (ep->dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY)
adc82f77
RR
1239 return abort_dma_228x(ep);
1240 return abort_dma_338x(ep);
1241}
1242
1da177e4
LT
1243/* dequeue ALL requests */
1244static void nuke (struct net2280_ep *ep)
1245{
1246 struct net2280_request *req;
1247
1248 /* called with spinlock held */
1249 ep->stopped = 1;
1250 if (ep->dma)
1251 abort_dma (ep);
1252 while (!list_empty (&ep->queue)) {
1253 req = list_entry (ep->queue.next,
1254 struct net2280_request,
1255 queue);
1256 done (ep, req, -ESHUTDOWN);
1257 }
1258}
1259
1260/* dequeue JUST ONE request */
1261static int net2280_dequeue (struct usb_ep *_ep, struct usb_request *_req)
1262{
1263 struct net2280_ep *ep;
1264 struct net2280_request *req;
1265 unsigned long flags;
1266 u32 dmactl;
1267 int stopped;
1268
1269 ep = container_of (_ep, struct net2280_ep, ep);
1270 if (!_ep || (!ep->desc && ep->num != 0) || !_req)
1271 return -EINVAL;
1272
1273 spin_lock_irqsave (&ep->dev->lock, flags);
1274 stopped = ep->stopped;
1275
1276 /* quiesce dma while we patch the queue */
1277 dmactl = 0;
1278 ep->stopped = 1;
1279 if (ep->dma) {
1280 dmactl = readl (&ep->dma->dmactl);
1281 /* WARNING erratum 0127 may kick in ... */
1282 stop_dma (ep->dma);
1283 scan_dma_completions (ep);
1284 }
1285
1286 /* make sure it's still queued on this endpoint */
1287 list_for_each_entry (req, &ep->queue, queue) {
1288 if (&req->req == _req)
1289 break;
1290 }
1291 if (&req->req != _req) {
1292 spin_unlock_irqrestore (&ep->dev->lock, flags);
1293 return -EINVAL;
1294 }
1295
1296 /* queue head may be partially complete. */
1297 if (ep->queue.next == &req->queue) {
1298 if (ep->dma) {
1299 DEBUG (ep->dev, "unlink (%s) dma\n", _ep->name);
1300 _req->status = -ECONNRESET;
1301 abort_dma (ep);
1302 if (likely (ep->queue.next == &req->queue)) {
1303 // NOTE: misreports single-transfer mode
1304 req->td->dmacount = 0; /* invalidate */
1305 dma_done (ep, req,
1306 readl (&ep->dma->dmacount),
1307 -ECONNRESET);
1308 }
1309 } else {
1310 DEBUG (ep->dev, "unlink (%s) pio\n", _ep->name);
1311 done (ep, req, -ECONNRESET);
1312 }
1313 req = NULL;
1314
1315 /* patch up hardware chaining data */
1316 } else if (ep->dma && use_dma_chaining) {
1317 if (req->queue.prev == ep->queue.next) {
1318 writel (le32_to_cpu (req->td->dmadesc),
1319 &ep->dma->dmadesc);
1320 if (req->td->dmacount & dma_done_ie)
1321 writel (readl (&ep->dma->dmacount)
320f3459 1322 | le32_to_cpu(dma_done_ie),
1da177e4
LT
1323 &ep->dma->dmacount);
1324 } else {
1325 struct net2280_request *prev;
1326
1327 prev = list_entry (req->queue.prev,
1328 struct net2280_request, queue);
1329 prev->td->dmadesc = req->td->dmadesc;
1330 if (req->td->dmacount & dma_done_ie)
1331 prev->td->dmacount |= dma_done_ie;
1332 }
1333 }
1334
1335 if (req)
1336 done (ep, req, -ECONNRESET);
1337 ep->stopped = stopped;
1338
1339 if (ep->dma) {
1340 /* turn off dma on inactive queues */
1341 if (list_empty (&ep->queue))
1342 stop_dma (ep->dma);
1343 else if (!ep->stopped) {
1344 /* resume current request, or start new one */
1345 if (req)
1346 writel (dmactl, &ep->dma->dmactl);
1347 else
1348 start_dma (ep, list_entry (ep->queue.next,
1349 struct net2280_request, queue));
1350 }
1351 }
1352
1353 spin_unlock_irqrestore (&ep->dev->lock, flags);
1354 return 0;
1355}
1356
1357/*-------------------------------------------------------------------------*/
1358
1359static int net2280_fifo_status (struct usb_ep *_ep);
1360
1361static int
8066134f 1362net2280_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
1da177e4
LT
1363{
1364 struct net2280_ep *ep;
1365 unsigned long flags;
1366 int retval = 0;
1367
1368 ep = container_of (_ep, struct net2280_ep, ep);
1369 if (!_ep || (!ep->desc && ep->num != 0))
1370 return -EINVAL;
1371 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1372 return -ESHUTDOWN;
1373 if (ep->desc /* not ep0 */ && (ep->desc->bmAttributes & 0x03)
1374 == USB_ENDPOINT_XFER_ISOC)
1375 return -EINVAL;
1376
1377 spin_lock_irqsave (&ep->dev->lock, flags);
1378 if (!list_empty (&ep->queue))
1379 retval = -EAGAIN;
1380 else if (ep->is_in && value && net2280_fifo_status (_ep) != 0)
1381 retval = -EAGAIN;
1382 else {
8066134f
AS
1383 VDEBUG (ep->dev, "%s %s %s\n", _ep->name,
1384 value ? "set" : "clear",
1385 wedged ? "wedge" : "halt");
1da177e4
LT
1386 /* set/clear, then synch memory views with the device */
1387 if (value) {
1388 if (ep->num == 0)
1389 ep->dev->protocol_stall = 1;
1390 else
1391 set_halt (ep);
8066134f
AS
1392 if (wedged)
1393 ep->wedged = 1;
1394 } else {
1da177e4 1395 clear_halt (ep);
c2db8a8a 1396 if (ep->dev->pdev->vendor == PCI_VENDOR_ID_PLX &&
adc82f77
RR
1397 !list_empty(&ep->queue) && ep->td_dma)
1398 restart_dma(ep);
8066134f
AS
1399 ep->wedged = 0;
1400 }
1da177e4
LT
1401 (void) readl (&ep->regs->ep_rsp);
1402 }
1403 spin_unlock_irqrestore (&ep->dev->lock, flags);
1404
1405 return retval;
1406}
1407
8066134f
AS
1408static int
1409net2280_set_halt(struct usb_ep *_ep, int value)
1410{
1411 return net2280_set_halt_and_wedge(_ep, value, 0);
1412}
1413
1414static int
1415net2280_set_wedge(struct usb_ep *_ep)
1416{
1417 if (!_ep || _ep->name == ep0name)
1418 return -EINVAL;
1419 return net2280_set_halt_and_wedge(_ep, 1, 1);
1420}
1421
1da177e4
LT
1422static int
1423net2280_fifo_status (struct usb_ep *_ep)
1424{
1425 struct net2280_ep *ep;
1426 u32 avail;
1427
1428 ep = container_of (_ep, struct net2280_ep, ep);
1429 if (!_ep || (!ep->desc && ep->num != 0))
1430 return -ENODEV;
1431 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1432 return -ESHUTDOWN;
1433
3e76fdcb 1434 avail = readl(&ep->regs->ep_avail) & (BIT(12) - 1);
1da177e4
LT
1435 if (avail > ep->fifo_size)
1436 return -EOVERFLOW;
1437 if (ep->is_in)
1438 avail = ep->fifo_size - avail;
1439 return avail;
1440}
1441
1442static void
1443net2280_fifo_flush (struct usb_ep *_ep)
1444{
1445 struct net2280_ep *ep;
1446
1447 ep = container_of (_ep, struct net2280_ep, ep);
1448 if (!_ep || (!ep->desc && ep->num != 0))
1449 return;
1450 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1451 return;
1452
3e76fdcb 1453 writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat);
1da177e4
LT
1454 (void) readl (&ep->regs->ep_rsp);
1455}
1456
901b3d75 1457static const struct usb_ep_ops net2280_ep_ops = {
1da177e4
LT
1458 .enable = net2280_enable,
1459 .disable = net2280_disable,
1460
1461 .alloc_request = net2280_alloc_request,
1462 .free_request = net2280_free_request,
1463
1da177e4
LT
1464 .queue = net2280_queue,
1465 .dequeue = net2280_dequeue,
1466
1467 .set_halt = net2280_set_halt,
8066134f 1468 .set_wedge = net2280_set_wedge,
1da177e4
LT
1469 .fifo_status = net2280_fifo_status,
1470 .fifo_flush = net2280_fifo_flush,
1471};
1472
1473/*-------------------------------------------------------------------------*/
1474
1475static int net2280_get_frame (struct usb_gadget *_gadget)
1476{
1477 struct net2280 *dev;
1478 unsigned long flags;
1479 u16 retval;
1480
1481 if (!_gadget)
1482 return -ENODEV;
1483 dev = container_of (_gadget, struct net2280, gadget);
1484 spin_lock_irqsave (&dev->lock, flags);
1485 retval = get_idx_reg (dev->regs, REG_FRAME) & 0x03ff;
1486 spin_unlock_irqrestore (&dev->lock, flags);
1487 return retval;
1488}
1489
1490static int net2280_wakeup (struct usb_gadget *_gadget)
1491{
1492 struct net2280 *dev;
1493 u32 tmp;
1494 unsigned long flags;
1495
1496 if (!_gadget)
1497 return 0;
1498 dev = container_of (_gadget, struct net2280, gadget);
1499
1500 spin_lock_irqsave (&dev->lock, flags);
1501 tmp = readl (&dev->usb->usbctl);
3e76fdcb
RR
1502 if (tmp & BIT(DEVICE_REMOTE_WAKEUP_ENABLE))
1503 writel(BIT(GENERATE_RESUME), &dev->usb->usbstat);
1da177e4
LT
1504 spin_unlock_irqrestore (&dev->lock, flags);
1505
1506 /* pci writes may still be posted */
1507 return 0;
1508}
1509
1510static int net2280_set_selfpowered (struct usb_gadget *_gadget, int value)
1511{
1512 struct net2280 *dev;
1513 u32 tmp;
1514 unsigned long flags;
1515
1516 if (!_gadget)
1517 return 0;
1518 dev = container_of (_gadget, struct net2280, gadget);
1519
1520 spin_lock_irqsave (&dev->lock, flags);
1521 tmp = readl (&dev->usb->usbctl);
adc82f77 1522 if (value) {
3e76fdcb 1523 tmp |= BIT(SELF_POWERED_STATUS);
adc82f77
RR
1524 dev->selfpowered = 1;
1525 } else {
3e76fdcb 1526 tmp &= ~BIT(SELF_POWERED_STATUS);
adc82f77
RR
1527 dev->selfpowered = 0;
1528 }
1da177e4
LT
1529 writel (tmp, &dev->usb->usbctl);
1530 spin_unlock_irqrestore (&dev->lock, flags);
1531
1532 return 0;
1533}
1534
1535static int net2280_pullup(struct usb_gadget *_gadget, int is_on)
1536{
1537 struct net2280 *dev;
1538 u32 tmp;
1539 unsigned long flags;
1540
1541 if (!_gadget)
1542 return -ENODEV;
1543 dev = container_of (_gadget, struct net2280, gadget);
1544
1545 spin_lock_irqsave (&dev->lock, flags);
1546 tmp = readl (&dev->usb->usbctl);
1547 dev->softconnect = (is_on != 0);
1548 if (is_on)
3e76fdcb 1549 tmp |= BIT(USB_DETECT_ENABLE);
1da177e4 1550 else
3e76fdcb 1551 tmp &= ~BIT(USB_DETECT_ENABLE);
1da177e4
LT
1552 writel (tmp, &dev->usb->usbctl);
1553 spin_unlock_irqrestore (&dev->lock, flags);
1554
1555 return 0;
1556}
1557
4cf5e00b
FB
1558static int net2280_start(struct usb_gadget *_gadget,
1559 struct usb_gadget_driver *driver);
1560static int net2280_stop(struct usb_gadget *_gadget,
1561 struct usb_gadget_driver *driver);
0f91349b 1562
1da177e4
LT
1563static const struct usb_gadget_ops net2280_ops = {
1564 .get_frame = net2280_get_frame,
1565 .wakeup = net2280_wakeup,
1566 .set_selfpowered = net2280_set_selfpowered,
1567 .pullup = net2280_pullup,
4cf5e00b
FB
1568 .udc_start = net2280_start,
1569 .udc_stop = net2280_stop,
1da177e4
LT
1570};
1571
1572/*-------------------------------------------------------------------------*/
1573
1574#ifdef CONFIG_USB_GADGET_DEBUG_FILES
1575
1576/* FIXME move these into procfs, and use seq_file.
1577 * Sysfs _still_ doesn't behave for arbitrarily sized files,
1578 * and also doesn't help products using this with 2.4 kernels.
1579 */
1580
1581/* "function" sysfs attribute */
ce26bd23
GKH
1582static ssize_t function_show(struct device *_dev, struct device_attribute *attr,
1583 char *buf)
1da177e4
LT
1584{
1585 struct net2280 *dev = dev_get_drvdata (_dev);
1586
1587 if (!dev->driver
1588 || !dev->driver->function
1589 || strlen (dev->driver->function) > PAGE_SIZE)
1590 return 0;
1591 return scnprintf (buf, PAGE_SIZE, "%s\n", dev->driver->function);
1592}
ce26bd23 1593static DEVICE_ATTR_RO(function);
1da177e4 1594
ce26bd23
GKH
1595static ssize_t registers_show(struct device *_dev,
1596 struct device_attribute *attr, char *buf)
1da177e4
LT
1597{
1598 struct net2280 *dev;
1599 char *next;
1600 unsigned size, t;
1601 unsigned long flags;
1602 int i;
1603 u32 t1, t2;
30e69598 1604 const char *s;
1da177e4
LT
1605
1606 dev = dev_get_drvdata (_dev);
1607 next = buf;
1608 size = PAGE_SIZE;
1609 spin_lock_irqsave (&dev->lock, flags);
1610
1611 if (dev->driver)
1612 s = dev->driver->driver.name;
1613 else
1614 s = "(none)";
1615
1616 /* Main Control Registers */
1617 t = scnprintf (next, size, "%s version " DRIVER_VERSION
1618 ", chiprev %04x, dma %s\n\n"
1619 "devinit %03x fifoctl %08x gadget '%s'\n"
1620 "pci irqenb0 %02x irqenb1 %08x "
1621 "irqstat0 %04x irqstat1 %08x\n",
1622 driver_name, dev->chiprev,
1623 use_dma
1624 ? (use_dma_chaining ? "chaining" : "enabled")
1625 : "disabled",
1626 readl (&dev->regs->devinit),
1627 readl (&dev->regs->fifoctl),
1628 s,
1629 readl (&dev->regs->pciirqenb0),
1630 readl (&dev->regs->pciirqenb1),
1631 readl (&dev->regs->irqstat0),
1632 readl (&dev->regs->irqstat1));
1633 size -= t;
1634 next += t;
1635
1636 /* USB Control Registers */
1637 t1 = readl (&dev->usb->usbctl);
1638 t2 = readl (&dev->usb->usbstat);
3e76fdcb
RR
1639 if (t1 & BIT(VBUS_PIN)) {
1640 if (t2 & BIT(HIGH_SPEED))
1da177e4
LT
1641 s = "high speed";
1642 else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1643 s = "powered";
1644 else
1645 s = "full speed";
1646 /* full speed bit (6) not working?? */
1647 } else
1648 s = "not attached";
1649 t = scnprintf (next, size,
1650 "stdrsp %08x usbctl %08x usbstat %08x "
1651 "addr 0x%02x (%s)\n",
1652 readl (&dev->usb->stdrsp), t1, t2,
1653 readl (&dev->usb->ouraddr), s);
1654 size -= t;
1655 next += t;
1656
1657 /* PCI Master Control Registers */
1658
1659 /* DMA Control Registers */
1660
1661 /* Configurable EP Control Registers */
adc82f77 1662 for (i = 0; i < dev->n_ep; i++) {
1da177e4
LT
1663 struct net2280_ep *ep;
1664
1665 ep = &dev->ep [i];
1666 if (i && !ep->desc)
1667 continue;
1668
adc82f77 1669 t1 = readl(&ep->cfg->ep_cfg);
1da177e4
LT
1670 t2 = readl (&ep->regs->ep_rsp) & 0xff;
1671 t = scnprintf (next, size,
1672 "\n%s\tcfg %05x rsp (%02x) %s%s%s%s%s%s%s%s"
1673 "irqenb %02x\n",
1674 ep->ep.name, t1, t2,
3e76fdcb 1675 (t2 & BIT(CLEAR_NAK_OUT_PACKETS))
1da177e4 1676 ? "NAK " : "",
3e76fdcb 1677 (t2 & BIT(CLEAR_EP_HIDE_STATUS_PHASE))
1da177e4 1678 ? "hide " : "",
3e76fdcb 1679 (t2 & BIT(CLEAR_EP_FORCE_CRC_ERROR))
1da177e4 1680 ? "CRC " : "",
3e76fdcb 1681 (t2 & BIT(CLEAR_INTERRUPT_MODE))
1da177e4 1682 ? "interrupt " : "",
3e76fdcb 1683 (t2 & BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE))
1da177e4 1684 ? "status " : "",
3e76fdcb 1685 (t2 & BIT(CLEAR_NAK_OUT_PACKETS_MODE))
1da177e4 1686 ? "NAKmode " : "",
3e76fdcb 1687 (t2 & BIT(CLEAR_ENDPOINT_TOGGLE))
1da177e4 1688 ? "DATA1 " : "DATA0 ",
3e76fdcb 1689 (t2 & BIT(CLEAR_ENDPOINT_HALT))
1da177e4
LT
1690 ? "HALT " : "",
1691 readl (&ep->regs->ep_irqenb));
1692 size -= t;
1693 next += t;
1694
1695 t = scnprintf (next, size,
1696 "\tstat %08x avail %04x "
1697 "(ep%d%s-%s)%s\n",
1698 readl (&ep->regs->ep_stat),
1699 readl (&ep->regs->ep_avail),
1700 t1 & 0x0f, DIR_STRING (t1),
1701 type_string (t1 >> 8),
1702 ep->stopped ? "*" : "");
1703 size -= t;
1704 next += t;
1705
1706 if (!ep->dma)
1707 continue;
1708
1709 t = scnprintf (next, size,
1710 " dma\tctl %08x stat %08x count %08x\n"
1711 "\taddr %08x desc %08x\n",
1712 readl (&ep->dma->dmactl),
1713 readl (&ep->dma->dmastat),
1714 readl (&ep->dma->dmacount),
1715 readl (&ep->dma->dmaaddr),
1716 readl (&ep->dma->dmadesc));
1717 size -= t;
1718 next += t;
1719
1720 }
1721
1722 /* Indexed Registers */
901b3d75 1723 // none yet
1da177e4
LT
1724
1725 /* Statistics */
1726 t = scnprintf (next, size, "\nirqs: ");
1727 size -= t;
1728 next += t;
adc82f77 1729 for (i = 0; i < dev->n_ep; i++) {
1da177e4
LT
1730 struct net2280_ep *ep;
1731
1732 ep = &dev->ep [i];
1733 if (i && !ep->irqs)
1734 continue;
1735 t = scnprintf (next, size, " %s/%lu", ep->ep.name, ep->irqs);
1736 size -= t;
1737 next += t;
1738
1739 }
1740 t = scnprintf (next, size, "\n");
1741 size -= t;
1742 next += t;
1743
1744 spin_unlock_irqrestore (&dev->lock, flags);
1745
1746 return PAGE_SIZE - size;
1747}
ce26bd23 1748static DEVICE_ATTR_RO(registers);
1da177e4 1749
ce26bd23
GKH
1750static ssize_t queues_show(struct device *_dev, struct device_attribute *attr,
1751 char *buf)
1da177e4
LT
1752{
1753 struct net2280 *dev;
1754 char *next;
1755 unsigned size;
1756 unsigned long flags;
1757 int i;
1758
1759 dev = dev_get_drvdata (_dev);
1760 next = buf;
1761 size = PAGE_SIZE;
1762 spin_lock_irqsave (&dev->lock, flags);
1763
adc82f77 1764 for (i = 0; i < dev->n_ep; i++) {
1da177e4
LT
1765 struct net2280_ep *ep = &dev->ep [i];
1766 struct net2280_request *req;
1767 int t;
1768
1769 if (i != 0) {
1770 const struct usb_endpoint_descriptor *d;
1771
1772 d = ep->desc;
1773 if (!d)
1774 continue;
1775 t = d->bEndpointAddress;
1776 t = scnprintf (next, size,
1777 "\n%s (ep%d%s-%s) max %04x %s fifo %d\n",
1778 ep->ep.name, t & USB_ENDPOINT_NUMBER_MASK,
1779 (t & USB_DIR_IN) ? "in" : "out",
1780 ({ char *val;
1781 switch (d->bmAttributes & 0x03) {
1782 case USB_ENDPOINT_XFER_BULK:
901b3d75 1783 val = "bulk"; break;
1da177e4 1784 case USB_ENDPOINT_XFER_INT:
901b3d75 1785 val = "intr"; break;
1da177e4 1786 default:
901b3d75 1787 val = "iso"; break;
2b84f92b 1788 } val; }),
29cc8897 1789 usb_endpoint_maxp (d) & 0x1fff,
1da177e4
LT
1790 ep->dma ? "dma" : "pio", ep->fifo_size
1791 );
1792 } else /* ep0 should only have one transfer queued */
1793 t = scnprintf (next, size, "ep0 max 64 pio %s\n",
1794 ep->is_in ? "in" : "out");
1795 if (t <= 0 || t > size)
1796 goto done;
1797 size -= t;
1798 next += t;
1799
1800 if (list_empty (&ep->queue)) {
1801 t = scnprintf (next, size, "\t(nothing queued)\n");
1802 if (t <= 0 || t > size)
1803 goto done;
1804 size -= t;
1805 next += t;
1806 continue;
1807 }
1808 list_for_each_entry (req, &ep->queue, queue) {
1809 if (ep->dma && req->td_dma == readl (&ep->dma->dmadesc))
1810 t = scnprintf (next, size,
1811 "\treq %p len %d/%d "
1812 "buf %p (dmacount %08x)\n",
1813 &req->req, req->req.actual,
1814 req->req.length, req->req.buf,
1815 readl (&ep->dma->dmacount));
1816 else
1817 t = scnprintf (next, size,
1818 "\treq %p len %d/%d buf %p\n",
1819 &req->req, req->req.actual,
1820 req->req.length, req->req.buf);
1821 if (t <= 0 || t > size)
1822 goto done;
1823 size -= t;
1824 next += t;
1825
1826 if (ep->dma) {
1827 struct net2280_dma *td;
1828
1829 td = req->td;
1830 t = scnprintf (next, size, "\t td %08x "
1831 " count %08x buf %08x desc %08x\n",
1832 (u32) req->td_dma,
1833 le32_to_cpu (td->dmacount),
1834 le32_to_cpu (td->dmaaddr),
1835 le32_to_cpu (td->dmadesc));
1836 if (t <= 0 || t > size)
1837 goto done;
1838 size -= t;
1839 next += t;
1840 }
1841 }
1842 }
1843
1844done:
1845 spin_unlock_irqrestore (&dev->lock, flags);
1846 return PAGE_SIZE - size;
1847}
ce26bd23 1848static DEVICE_ATTR_RO(queues);
1da177e4
LT
1849
1850
1851#else
1852
9950421c
LT
1853#define device_create_file(a,b) (0)
1854#define device_remove_file(a,b) do { } while (0)
1da177e4
LT
1855
1856#endif
1857
1858/*-------------------------------------------------------------------------*/
1859
1860/* another driver-specific mode might be a request type doing dma
1861 * to/from another device fifo instead of to/from memory.
1862 */
1863
1864static void set_fifo_mode (struct net2280 *dev, int mode)
1865{
1866 /* keeping high bits preserves BAR2 */
1867 writel ((0xffff << PCI_BASE2_RANGE) | mode, &dev->regs->fifoctl);
1868
1869 /* always ep-{a,b,e,f} ... maybe not ep-c or ep-d */
1870 INIT_LIST_HEAD (&dev->gadget.ep_list);
1871 list_add_tail (&dev->ep [1].ep.ep_list, &dev->gadget.ep_list);
1872 list_add_tail (&dev->ep [2].ep.ep_list, &dev->gadget.ep_list);
1873 switch (mode) {
1874 case 0:
1875 list_add_tail (&dev->ep [3].ep.ep_list, &dev->gadget.ep_list);
1876 list_add_tail (&dev->ep [4].ep.ep_list, &dev->gadget.ep_list);
1877 dev->ep [1].fifo_size = dev->ep [2].fifo_size = 1024;
1878 break;
1879 case 1:
1880 dev->ep [1].fifo_size = dev->ep [2].fifo_size = 2048;
1881 break;
1882 case 2:
1883 list_add_tail (&dev->ep [3].ep.ep_list, &dev->gadget.ep_list);
1884 dev->ep [1].fifo_size = 2048;
1885 dev->ep [2].fifo_size = 1024;
1886 break;
1887 }
1888 /* fifo sizes for ep0, ep-c, ep-d, ep-e, and ep-f never change */
1889 list_add_tail (&dev->ep [5].ep.ep_list, &dev->gadget.ep_list);
1890 list_add_tail (&dev->ep [6].ep.ep_list, &dev->gadget.ep_list);
1891}
1892
adc82f77
RR
1893static void defect7374_disable_data_eps(struct net2280 *dev)
1894{
1895 /*
1896 * For Defect 7374, disable data EPs (and more):
1897 * - This phase undoes the earlier phase of the Defect 7374 workaround,
1898 * returing ep regs back to normal.
1899 */
1900 struct net2280_ep *ep;
1901 int i;
1902 unsigned char ep_sel;
1903 u32 tmp_reg;
1904
1905 for (i = 1; i < 5; i++) {
1906 ep = &dev->ep[i];
1907 writel(0, &ep->cfg->ep_cfg);
1908 }
1909
1910 /* CSROUT, CSRIN, PCIOUT, PCIIN, STATIN, RCIN */
1911 for (i = 0; i < 6; i++)
1912 writel(0, &dev->dep[i].dep_cfg);
1913
1914 for (ep_sel = 0; ep_sel <= 21; ep_sel++) {
1915 /* Select an endpoint for subsequent operations: */
1916 tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
1917 writel(((tmp_reg & ~0x1f) | ep_sel), &dev->plregs->pl_ep_ctrl);
1918
1919 if (ep_sel < 2 || (ep_sel > 9 && ep_sel < 14) ||
1920 ep_sel == 18 || ep_sel == 20)
1921 continue;
1922
1923 /* Change settings on some selected endpoints */
1924 tmp_reg = readl(&dev->plregs->pl_ep_cfg_4);
3e76fdcb 1925 tmp_reg &= ~BIT(NON_CTRL_IN_TOLERATE_BAD_DIR);
adc82f77
RR
1926 writel(tmp_reg, &dev->plregs->pl_ep_cfg_4);
1927 tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
3e76fdcb 1928 tmp_reg |= BIT(EP_INITIALIZED);
adc82f77
RR
1929 writel(tmp_reg, &dev->plregs->pl_ep_ctrl);
1930 }
1931}
1932
1933static void defect7374_enable_data_eps_zero(struct net2280 *dev)
1934{
1935 u32 tmp = 0, tmp_reg;
1936 u32 fsmvalue, scratch;
1937 int i;
1938 unsigned char ep_sel;
1939
1940 scratch = get_idx_reg(dev->regs, SCRATCH);
1941 fsmvalue = scratch & (0xf << DEFECT7374_FSM_FIELD);
1942 scratch &= ~(0xf << DEFECT7374_FSM_FIELD);
1943
1944 /*See if firmware needs to set up for workaround*/
1945 if (fsmvalue != DEFECT7374_FSM_SS_CONTROL_READ) {
1946 WARNING(dev, "Operate Defect 7374 workaround soft this time");
1947 WARNING(dev, "It will operate on cold-reboot and SS connect");
1948
1949 /*GPEPs:*/
3e76fdcb 1950 tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_DIRECTION) |
adc82f77
RR
1951 (2 << OUT_ENDPOINT_TYPE) | (2 << IN_ENDPOINT_TYPE) |
1952 ((dev->enhanced_mode) ?
3e76fdcb
RR
1953 BIT(OUT_ENDPOINT_ENABLE) : BIT(ENDPOINT_ENABLE)) |
1954 BIT(IN_ENDPOINT_ENABLE));
adc82f77
RR
1955
1956 for (i = 1; i < 5; i++)
1957 writel(tmp, &dev->ep[i].cfg->ep_cfg);
1958
1959 /* CSRIN, PCIIN, STATIN, RCIN*/
3e76fdcb 1960 tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_ENABLE));
adc82f77
RR
1961 writel(tmp, &dev->dep[1].dep_cfg);
1962 writel(tmp, &dev->dep[3].dep_cfg);
1963 writel(tmp, &dev->dep[4].dep_cfg);
1964 writel(tmp, &dev->dep[5].dep_cfg);
1965
1966 /*Implemented for development and debug.
1967 * Can be refined/tuned later.*/
1968 for (ep_sel = 0; ep_sel <= 21; ep_sel++) {
1969 /* Select an endpoint for subsequent operations: */
1970 tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
1971 writel(((tmp_reg & ~0x1f) | ep_sel),
1972 &dev->plregs->pl_ep_ctrl);
1973
1974 if (ep_sel == 1) {
1975 tmp =
1976 (readl(&dev->plregs->pl_ep_ctrl) |
3e76fdcb 1977 BIT(CLEAR_ACK_ERROR_CODE) | 0);
adc82f77
RR
1978 writel(tmp, &dev->plregs->pl_ep_ctrl);
1979 continue;
1980 }
1981
1982 if (ep_sel == 0 || (ep_sel > 9 && ep_sel < 14) ||
1983 ep_sel == 18 || ep_sel == 20)
1984 continue;
1985
1986 tmp = (readl(&dev->plregs->pl_ep_cfg_4) |
3e76fdcb 1987 BIT(NON_CTRL_IN_TOLERATE_BAD_DIR) | 0);
adc82f77
RR
1988 writel(tmp, &dev->plregs->pl_ep_cfg_4);
1989
1990 tmp = readl(&dev->plregs->pl_ep_ctrl) &
3e76fdcb 1991 ~BIT(EP_INITIALIZED);
adc82f77
RR
1992 writel(tmp, &dev->plregs->pl_ep_ctrl);
1993
1994 }
1995
1996 /* Set FSM to focus on the first Control Read:
1997 * - Tip: Connection speed is known upon the first
1998 * setup request.*/
1999 scratch |= DEFECT7374_FSM_WAITING_FOR_CONTROL_READ;
2000 set_idx_reg(dev->regs, SCRATCH, scratch);
2001
2002 } else{
2003 WARNING(dev, "Defect 7374 workaround soft will NOT operate");
2004 WARNING(dev, "It will operate on cold-reboot and SS connect");
2005 }
2006}
2007
1da177e4
LT
2008/* keeping it simple:
2009 * - one bus driver, initted first;
2010 * - one function driver, initted second
2011 *
2012 * most of the work to support multiple net2280 controllers would
2013 * be to associate this gadget driver (yes?) with all of them, or
2014 * perhaps to bind specific drivers to specific devices.
2015 */
2016
adc82f77 2017static void usb_reset_228x(struct net2280 *dev)
1da177e4
LT
2018{
2019 u32 tmp;
2020
2021 dev->gadget.speed = USB_SPEED_UNKNOWN;
2022 (void) readl (&dev->usb->usbctl);
2023
2024 net2280_led_init (dev);
2025
2026 /* disable automatic responses, and irqs */
2027 writel (0, &dev->usb->stdrsp);
2028 writel (0, &dev->regs->pciirqenb0);
2029 writel (0, &dev->regs->pciirqenb1);
2030
2031 /* clear old dma and irq state */
2032 for (tmp = 0; tmp < 4; tmp++) {
adc82f77 2033 struct net2280_ep *ep = &dev->ep[tmp + 1];
1da177e4 2034 if (ep->dma)
adc82f77 2035 abort_dma(ep);
1da177e4 2036 }
adc82f77 2037
1da177e4 2038 writel (~0, &dev->regs->irqstat0),
3e76fdcb 2039 writel(~(u32)BIT(SUSPEND_REQUEST_INTERRUPT), &dev->regs->irqstat1),
1da177e4
LT
2040
2041 /* reset, and enable pci */
3e76fdcb
RR
2042 tmp = readl(&dev->regs->devinit) |
2043 BIT(PCI_ENABLE) |
2044 BIT(FIFO_SOFT_RESET) |
2045 BIT(USB_SOFT_RESET) |
2046 BIT(M8051_RESET);
1da177e4
LT
2047 writel (tmp, &dev->regs->devinit);
2048
2049 /* standard fifo and endpoint allocations */
2050 set_fifo_mode (dev, (fifo_mode <= 2) ? fifo_mode : 0);
2051}
2052
adc82f77
RR
2053static void usb_reset_338x(struct net2280 *dev)
2054{
2055 u32 tmp;
2056 u32 fsmvalue;
2057
2058 dev->gadget.speed = USB_SPEED_UNKNOWN;
2059 (void)readl(&dev->usb->usbctl);
2060
2061 net2280_led_init(dev);
2062
2063 fsmvalue = get_idx_reg(dev->regs, SCRATCH) &
2064 (0xf << DEFECT7374_FSM_FIELD);
2065
2066 /* See if firmware needs to set up for workaround: */
2067 if (fsmvalue != DEFECT7374_FSM_SS_CONTROL_READ) {
2068 INFO(dev, "%s: Defect 7374 FsmValue 0x%08x\n", __func__,
2069 fsmvalue);
2070 } else {
2071 /* disable automatic responses, and irqs */
2072 writel(0, &dev->usb->stdrsp);
2073 writel(0, &dev->regs->pciirqenb0);
2074 writel(0, &dev->regs->pciirqenb1);
2075 }
2076
2077 /* clear old dma and irq state */
2078 for (tmp = 0; tmp < 4; tmp++) {
2079 struct net2280_ep *ep = &dev->ep[tmp + 1];
2080
2081 if (ep->dma)
2082 abort_dma(ep);
2083 }
2084
2085 writel(~0, &dev->regs->irqstat0), writel(~0, &dev->regs->irqstat1);
2086
2087 if (fsmvalue == DEFECT7374_FSM_SS_CONTROL_READ) {
2088 /* reset, and enable pci */
2089 tmp = readl(&dev->regs->devinit) |
3e76fdcb
RR
2090 BIT(PCI_ENABLE) |
2091 BIT(FIFO_SOFT_RESET) |
2092 BIT(USB_SOFT_RESET) |
2093 BIT(M8051_RESET);
adc82f77
RR
2094
2095 writel(tmp, &dev->regs->devinit);
2096 }
2097
2098 /* always ep-{1,2,3,4} ... maybe not ep-3 or ep-4 */
2099 INIT_LIST_HEAD(&dev->gadget.ep_list);
2100
2101 for (tmp = 1; tmp < dev->n_ep; tmp++)
2102 list_add_tail(&dev->ep[tmp].ep.ep_list, &dev->gadget.ep_list);
2103
2104}
2105
2106static void usb_reset(struct net2280 *dev)
2107{
c2db8a8a 2108 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY)
adc82f77
RR
2109 return usb_reset_228x(dev);
2110 return usb_reset_338x(dev);
2111}
2112
2113static void usb_reinit_228x(struct net2280 *dev)
1da177e4
LT
2114{
2115 u32 tmp;
2116 int init_dma;
2117
2118 /* use_dma changes are ignored till next device re-init */
2119 init_dma = use_dma;
2120
2121 /* basic endpoint init */
2122 for (tmp = 0; tmp < 7; tmp++) {
2123 struct net2280_ep *ep = &dev->ep [tmp];
2124
2125 ep->ep.name = ep_name [tmp];
2126 ep->dev = dev;
2127 ep->num = tmp;
2128
2129 if (tmp > 0 && tmp <= 4) {
2130 ep->fifo_size = 1024;
2131 if (init_dma)
2132 ep->dma = &dev->dma [tmp - 1];
2133 } else
2134 ep->fifo_size = 64;
2135 ep->regs = &dev->epregs [tmp];
adc82f77
RR
2136 ep->cfg = &dev->epregs[tmp];
2137 ep_reset_228x(dev->regs, ep);
1da177e4 2138 }
e117e742
RB
2139 usb_ep_set_maxpacket_limit(&dev->ep [0].ep, 64);
2140 usb_ep_set_maxpacket_limit(&dev->ep [5].ep, 64);
2141 usb_ep_set_maxpacket_limit(&dev->ep [6].ep, 64);
1da177e4
LT
2142
2143 dev->gadget.ep0 = &dev->ep [0].ep;
2144 dev->ep [0].stopped = 0;
2145 INIT_LIST_HEAD (&dev->gadget.ep0->ep_list);
2146
2147 /* we want to prevent lowlevel/insecure access from the USB host,
2148 * but erratum 0119 means this enable bit is ignored
2149 */
2150 for (tmp = 0; tmp < 5; tmp++)
2151 writel (EP_DONTUSE, &dev->dep [tmp].dep_cfg);
2152}
2153
adc82f77
RR
2154static void usb_reinit_338x(struct net2280 *dev)
2155{
2156 int init_dma;
2157 int i;
2158 u32 tmp, val;
2159 u32 fsmvalue;
2160 static const u32 ne[9] = { 0, 1, 2, 3, 4, 1, 2, 3, 4 };
2161 static const u32 ep_reg_addr[9] = { 0x00, 0xC0, 0x00, 0xC0, 0x00,
2162 0x00, 0xC0, 0x00, 0xC0 };
2163
2164 /* use_dma changes are ignored till next device re-init */
2165 init_dma = use_dma;
2166
2167 /* basic endpoint init */
2168 for (i = 0; i < dev->n_ep; i++) {
2169 struct net2280_ep *ep = &dev->ep[i];
2170
2171 ep->ep.name = ep_name[i];
2172 ep->dev = dev;
2173 ep->num = i;
2174
2175 if (i > 0 && i <= 4 && init_dma)
2176 ep->dma = &dev->dma[i - 1];
2177
2178 if (dev->enhanced_mode) {
2179 ep->cfg = &dev->epregs[ne[i]];
2180 ep->regs = (struct net2280_ep_regs __iomem *)
2181 (((void *)&dev->epregs[ne[i]]) +
2182 ep_reg_addr[i]);
2183 ep->fiforegs = &dev->fiforegs[i];
2184 } else {
2185 ep->cfg = &dev->epregs[i];
2186 ep->regs = &dev->epregs[i];
2187 ep->fiforegs = &dev->fiforegs[i];
2188 }
2189
2190 ep->fifo_size = (i != 0) ? 2048 : 512;
2191
2192 ep_reset_338x(dev->regs, ep);
2193 }
2194 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 512);
2195
2196 dev->gadget.ep0 = &dev->ep[0].ep;
2197 dev->ep[0].stopped = 0;
2198
2199 /* Link layer set up */
2200 fsmvalue = get_idx_reg(dev->regs, SCRATCH) &
2201 (0xf << DEFECT7374_FSM_FIELD);
2202
2203 /* See if driver needs to set up for workaround: */
2204 if (fsmvalue != DEFECT7374_FSM_SS_CONTROL_READ)
2205 INFO(dev, "%s: Defect 7374 FsmValue %08x\n",
2206 __func__, fsmvalue);
2207 else {
2208 tmp = readl(&dev->usb_ext->usbctl2) &
3e76fdcb 2209 ~(BIT(U1_ENABLE) | BIT(U2_ENABLE) | BIT(LTM_ENABLE));
adc82f77
RR
2210 writel(tmp, &dev->usb_ext->usbctl2);
2211 }
2212
2213 /* Hardware Defect and Workaround */
2214 val = readl(&dev->ll_lfps_regs->ll_lfps_5);
2215 val &= ~(0xf << TIMER_LFPS_6US);
2216 val |= 0x5 << TIMER_LFPS_6US;
2217 writel(val, &dev->ll_lfps_regs->ll_lfps_5);
2218
2219 val = readl(&dev->ll_lfps_regs->ll_lfps_6);
2220 val &= ~(0xffff << TIMER_LFPS_80US);
2221 val |= 0x0100 << TIMER_LFPS_80US;
2222 writel(val, &dev->ll_lfps_regs->ll_lfps_6);
2223
2224 /*
2225 * AA_AB Errata. Issue 4. Workaround for SuperSpeed USB
2226 * Hot Reset Exit Handshake may Fail in Specific Case using
2227 * Default Register Settings. Workaround for Enumeration test.
2228 */
2229 val = readl(&dev->ll_tsn_regs->ll_tsn_counters_2);
2230 val &= ~(0x1f << HOT_TX_NORESET_TS2);
2231 val |= 0x10 << HOT_TX_NORESET_TS2;
2232 writel(val, &dev->ll_tsn_regs->ll_tsn_counters_2);
2233
2234 val = readl(&dev->ll_tsn_regs->ll_tsn_counters_3);
2235 val &= ~(0x1f << HOT_RX_RESET_TS2);
2236 val |= 0x3 << HOT_RX_RESET_TS2;
2237 writel(val, &dev->ll_tsn_regs->ll_tsn_counters_3);
2238
2239 /*
2240 * Set Recovery Idle to Recover bit:
2241 * - On SS connections, setting Recovery Idle to Recover Fmw improves
2242 * link robustness with various hosts and hubs.
2243 * - It is safe to set for all connection speeds; all chip revisions.
2244 * - R-M-W to leave other bits undisturbed.
2245 * - Reference PLX TT-7372
2246 */
2247 val = readl(&dev->ll_chicken_reg->ll_tsn_chicken_bit);
3e76fdcb 2248 val |= BIT(RECOVERY_IDLE_TO_RECOVER_FMW);
adc82f77
RR
2249 writel(val, &dev->ll_chicken_reg->ll_tsn_chicken_bit);
2250
2251 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
2252
2253 /* disable dedicated endpoints */
2254 writel(0x0D, &dev->dep[0].dep_cfg);
2255 writel(0x0D, &dev->dep[1].dep_cfg);
2256 writel(0x0E, &dev->dep[2].dep_cfg);
2257 writel(0x0E, &dev->dep[3].dep_cfg);
2258 writel(0x0F, &dev->dep[4].dep_cfg);
2259 writel(0x0C, &dev->dep[5].dep_cfg);
2260}
2261
2262static void usb_reinit(struct net2280 *dev)
2263{
c2db8a8a 2264 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY)
adc82f77
RR
2265 return usb_reinit_228x(dev);
2266 return usb_reinit_338x(dev);
2267}
2268
2269static void ep0_start_228x(struct net2280 *dev)
1da177e4 2270{
3e76fdcb
RR
2271 writel(BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
2272 BIT(CLEAR_NAK_OUT_PACKETS) |
2273 BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE)
1da177e4
LT
2274 , &dev->epregs [0].ep_rsp);
2275
2276 /*
2277 * hardware optionally handles a bunch of standard requests
2278 * that the API hides from drivers anyway. have it do so.
2279 * endpoint status/features are handled in software, to
2280 * help pass tests for some dubious behavior.
2281 */
3e76fdcb
RR
2282 writel(BIT(SET_TEST_MODE) |
2283 BIT(SET_ADDRESS) |
2284 BIT(DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP) |
2285 BIT(GET_DEVICE_STATUS) |
2286 BIT(GET_INTERFACE_STATUS)
1da177e4 2287 , &dev->usb->stdrsp);
3e76fdcb
RR
2288 writel(BIT(USB_ROOT_PORT_WAKEUP_ENABLE) |
2289 BIT(SELF_POWERED_USB_DEVICE) |
2290 BIT(REMOTE_WAKEUP_SUPPORT) |
2291 (dev->softconnect << USB_DETECT_ENABLE) |
2292 BIT(SELF_POWERED_STATUS),
2293 &dev->usb->usbctl);
1da177e4
LT
2294
2295 /* enable irqs so we can see ep0 and general operation */
3e76fdcb
RR
2296 writel(BIT(SETUP_PACKET_INTERRUPT_ENABLE) |
2297 BIT(ENDPOINT_0_INTERRUPT_ENABLE),
2298 &dev->regs->pciirqenb0);
2299 writel(BIT(PCI_INTERRUPT_ENABLE) |
2300 BIT(PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE) |
2301 BIT(PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE) |
2302 BIT(PCI_RETRY_ABORT_INTERRUPT_ENABLE) |
2303 BIT(VBUS_INTERRUPT_ENABLE) |
2304 BIT(ROOT_PORT_RESET_INTERRUPT_ENABLE) |
2305 BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE),
2306 &dev->regs->pciirqenb1);
1da177e4
LT
2307
2308 /* don't leave any writes posted */
2309 (void) readl (&dev->usb->usbctl);
2310}
2311
adc82f77
RR
2312static void ep0_start_338x(struct net2280 *dev)
2313{
2314 u32 fsmvalue;
2315
2316 fsmvalue = get_idx_reg(dev->regs, SCRATCH) &
2317 (0xf << DEFECT7374_FSM_FIELD);
2318
2319 if (fsmvalue != DEFECT7374_FSM_SS_CONTROL_READ)
2320 INFO(dev, "%s: Defect 7374 FsmValue %08x\n", __func__,
2321 fsmvalue);
2322 else
3e76fdcb
RR
2323 writel(BIT(CLEAR_NAK_OUT_PACKETS_MODE) |
2324 BIT(SET_EP_HIDE_STATUS_PHASE),
adc82f77
RR
2325 &dev->epregs[0].ep_rsp);
2326
2327 /*
2328 * hardware optionally handles a bunch of standard requests
2329 * that the API hides from drivers anyway. have it do so.
2330 * endpoint status/features are handled in software, to
2331 * help pass tests for some dubious behavior.
2332 */
3e76fdcb
RR
2333 writel(BIT(SET_ISOCHRONOUS_DELAY) |
2334 BIT(SET_SEL) |
2335 BIT(SET_TEST_MODE) |
2336 BIT(SET_ADDRESS) |
2337 BIT(GET_INTERFACE_STATUS) |
2338 BIT(GET_DEVICE_STATUS),
adc82f77
RR
2339 &dev->usb->stdrsp);
2340 dev->wakeup_enable = 1;
3e76fdcb 2341 writel(BIT(USB_ROOT_PORT_WAKEUP_ENABLE) |
adc82f77 2342 (dev->softconnect << USB_DETECT_ENABLE) |
3e76fdcb 2343 BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
adc82f77
RR
2344 &dev->usb->usbctl);
2345
2346 /* enable irqs so we can see ep0 and general operation */
3e76fdcb
RR
2347 writel(BIT(SETUP_PACKET_INTERRUPT_ENABLE) |
2348 BIT(ENDPOINT_0_INTERRUPT_ENABLE)
adc82f77 2349 , &dev->regs->pciirqenb0);
3e76fdcb
RR
2350 writel(BIT(PCI_INTERRUPT_ENABLE) |
2351 BIT(ROOT_PORT_RESET_INTERRUPT_ENABLE) |
2352 BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE) |
2353 BIT(VBUS_INTERRUPT_ENABLE),
adc82f77
RR
2354 &dev->regs->pciirqenb1);
2355
2356 /* don't leave any writes posted */
2357 (void)readl(&dev->usb->usbctl);
2358}
2359
2360static void ep0_start(struct net2280 *dev)
2361{
c2db8a8a 2362 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY)
adc82f77
RR
2363 return ep0_start_228x(dev);
2364 return ep0_start_338x(dev);
2365}
2366
1da177e4
LT
2367/* when a driver is successfully registered, it will receive
2368 * control requests including set_configuration(), which enables
2369 * non-control requests. then usb traffic follows until a
2370 * disconnect is reported. then a host may connect again, or
2371 * the driver might get unbound.
2372 */
4cf5e00b
FB
2373static int net2280_start(struct usb_gadget *_gadget,
2374 struct usb_gadget_driver *driver)
1da177e4 2375{
4cf5e00b 2376 struct net2280 *dev;
1da177e4
LT
2377 int retval;
2378 unsigned i;
2379
2380 /* insist on high speed support from the driver, since
2381 * (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE)
2382 * "must not be used in normal operation"
2383 */
7177aed4 2384 if (!driver || driver->max_speed < USB_SPEED_HIGH
4cf5e00b 2385 || !driver->setup)
1da177e4 2386 return -EINVAL;
4cf5e00b
FB
2387
2388 dev = container_of (_gadget, struct net2280, gadget);
1da177e4 2389
adc82f77 2390 for (i = 0; i < dev->n_ep; i++)
1da177e4
LT
2391 dev->ep [i].irqs = 0;
2392
2393 /* hook up the driver ... */
2394 dev->softconnect = 1;
2395 driver->driver.bus = NULL;
2396 dev->driver = driver;
1da177e4 2397
b3899dac
JG
2398 retval = device_create_file (&dev->pdev->dev, &dev_attr_function);
2399 if (retval) goto err_unbind;
2400 retval = device_create_file (&dev->pdev->dev, &dev_attr_queues);
2401 if (retval) goto err_func;
1da177e4 2402
2f076077 2403 /* Enable force-full-speed testing mode, if desired */
c2db8a8a 2404 if (full_speed && dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY)
3e76fdcb 2405 writel(BIT(FORCE_FULL_SPEED_MODE), &dev->usb->xcvrdiag);
2f076077 2406
1da177e4
LT
2407 /* ... then enable host detection and ep0; and we're ready
2408 * for set_configuration as well as eventual disconnect.
2409 */
2410 net2280_led_active (dev, 1);
adc82f77 2411
c2db8a8a 2412 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX)
adc82f77
RR
2413 defect7374_enable_data_eps_zero(dev);
2414
1da177e4
LT
2415 ep0_start (dev);
2416
2417 DEBUG (dev, "%s ready, usbctl %08x stdrsp %08x\n",
2418 driver->driver.name,
2419 readl (&dev->usb->usbctl),
2420 readl (&dev->usb->stdrsp));
2421
2422 /* pci writes may still be posted */
2423 return 0;
b3899dac
JG
2424
2425err_func:
2426 device_remove_file (&dev->pdev->dev, &dev_attr_function);
2427err_unbind:
b3899dac
JG
2428 dev->driver = NULL;
2429 return retval;
1da177e4 2430}
1da177e4
LT
2431
2432static void
2433stop_activity (struct net2280 *dev, struct usb_gadget_driver *driver)
2434{
2435 int i;
2436
2437 /* don't disconnect if it's not connected */
2438 if (dev->gadget.speed == USB_SPEED_UNKNOWN)
2439 driver = NULL;
2440
2441 /* stop hardware; prevent new request submissions;
2442 * and kill any outstanding requests.
2443 */
2444 usb_reset (dev);
adc82f77 2445 for (i = 0; i < dev->n_ep; i++)
1da177e4
LT
2446 nuke (&dev->ep [i]);
2447
699412d9
FB
2448 /* report disconnect; the driver is already quiesced */
2449 if (driver) {
2450 spin_unlock(&dev->lock);
2451 driver->disconnect(&dev->gadget);
2452 spin_lock(&dev->lock);
2453 }
2454
1da177e4
LT
2455 usb_reinit (dev);
2456}
2457
4cf5e00b
FB
2458static int net2280_stop(struct usb_gadget *_gadget,
2459 struct usb_gadget_driver *driver)
1da177e4 2460{
4cf5e00b 2461 struct net2280 *dev;
1da177e4
LT
2462 unsigned long flags;
2463
4cf5e00b 2464 dev = container_of (_gadget, struct net2280, gadget);
1da177e4
LT
2465
2466 spin_lock_irqsave (&dev->lock, flags);
2467 stop_activity (dev, driver);
2468 spin_unlock_irqrestore (&dev->lock, flags);
2469
1da177e4
LT
2470 dev->driver = NULL;
2471
2472 net2280_led_active (dev, 0);
2f076077
AS
2473
2474 /* Disable full-speed test mode */
c2db8a8a 2475 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY)
adc82f77 2476 writel(0, &dev->usb->xcvrdiag);
2f076077 2477
1da177e4
LT
2478 device_remove_file (&dev->pdev->dev, &dev_attr_function);
2479 device_remove_file (&dev->pdev->dev, &dev_attr_queues);
2480
84237bfb
RR
2481 DEBUG(dev, "unregistered driver '%s'\n",
2482 driver ? driver->driver.name : "");
2483
1da177e4
LT
2484 return 0;
2485}
1da177e4
LT
2486
2487/*-------------------------------------------------------------------------*/
2488
2489/* handle ep0, ep-e, ep-f with 64 byte packets: packet per irq.
2490 * also works for dma-capable endpoints, in pio mode or just
2491 * to manually advance the queue after short OUT transfers.
2492 */
2493static void handle_ep_small (struct net2280_ep *ep)
2494{
2495 struct net2280_request *req;
2496 u32 t;
2497 /* 0 error, 1 mid-data, 2 done */
2498 int mode = 1;
2499
2500 if (!list_empty (&ep->queue))
2501 req = list_entry (ep->queue.next,
2502 struct net2280_request, queue);
2503 else
2504 req = NULL;
2505
2506 /* ack all, and handle what we care about */
2507 t = readl (&ep->regs->ep_stat);
2508 ep->irqs++;
2509#if 0
2510 VDEBUG (ep->dev, "%s ack ep_stat %08x, req %p\n",
2511 ep->ep.name, t, req ? &req->req : 0);
2512#endif
950ee4c8 2513 if (!ep->is_in || ep->dev->pdev->device == 0x2280)
3e76fdcb 2514 writel(t & ~BIT(NAK_OUT_PACKETS), &ep->regs->ep_stat);
950ee4c8
GL
2515 else
2516 /* Added for 2282 */
2517 writel (t, &ep->regs->ep_stat);
1da177e4
LT
2518
2519 /* for ep0, monitor token irqs to catch data stage length errors
2520 * and to synchronize on status.
2521 *
2522 * also, to defer reporting of protocol stalls ... here's where
2523 * data or status first appears, handling stalls here should never
2524 * cause trouble on the host side..
2525 *
2526 * control requests could be slightly faster without token synch for
2527 * status, but status can jam up that way.
2528 */
2529 if (unlikely (ep->num == 0)) {
2530 if (ep->is_in) {
2531 /* status; stop NAKing */
3e76fdcb 2532 if (t & BIT(DATA_OUT_PING_TOKEN_INTERRUPT)) {
1da177e4
LT
2533 if (ep->dev->protocol_stall) {
2534 ep->stopped = 1;
2535 set_halt (ep);
2536 }
2537 if (!req)
2538 allow_status (ep);
2539 mode = 2;
2540 /* reply to extra IN data tokens with a zlp */
3e76fdcb 2541 } else if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) {
1da177e4
LT
2542 if (ep->dev->protocol_stall) {
2543 ep->stopped = 1;
2544 set_halt (ep);
2545 mode = 2;
1f26e28d
AS
2546 } else if (ep->responded &&
2547 !req && !ep->stopped)
1da177e4
LT
2548 write_fifo (ep, NULL);
2549 }
2550 } else {
2551 /* status; stop NAKing */
3e76fdcb 2552 if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) {
1da177e4
LT
2553 if (ep->dev->protocol_stall) {
2554 ep->stopped = 1;
2555 set_halt (ep);
2556 }
2557 mode = 2;
2558 /* an extra OUT token is an error */
3e76fdcb 2559 } else if (((t & BIT(DATA_OUT_PING_TOKEN_INTERRUPT))
1da177e4
LT
2560 && req
2561 && req->req.actual == req->req.length)
1f26e28d 2562 || (ep->responded && !req)) {
1da177e4
LT
2563 ep->dev->protocol_stall = 1;
2564 set_halt (ep);
2565 ep->stopped = 1;
2566 if (req)
2567 done (ep, req, -EOVERFLOW);
2568 req = NULL;
2569 }
2570 }
2571 }
2572
2573 if (unlikely (!req))
2574 return;
2575
2576 /* manual DMA queue advance after short OUT */
ad303db6 2577 if (likely (ep->dma)) {
3e76fdcb 2578 if (t & BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
1da177e4
LT
2579 u32 count;
2580 int stopped = ep->stopped;
2581
2582 /* TRANSFERRED works around OUT_DONE erratum 0112.
2583 * we expect (N <= maxpacket) bytes; host wrote M.
2584 * iff (M < N) we won't ever see a DMA interrupt.
2585 */
2586 ep->stopped = 1;
2587 for (count = 0; ; t = readl (&ep->regs->ep_stat)) {
2588
2589 /* any preceding dma transfers must finish.
2590 * dma handles (M >= N), may empty the queue
2591 */
2592 scan_dma_completions (ep);
2593 if (unlikely (list_empty (&ep->queue)
2594 || ep->out_overflow)) {
2595 req = NULL;
2596 break;
2597 }
2598 req = list_entry (ep->queue.next,
2599 struct net2280_request, queue);
2600
2601 /* here either (M < N), a "real" short rx;
2602 * or (M == N) and the queue didn't empty
2603 */
3e76fdcb 2604 if (likely(t & BIT(FIFO_EMPTY))) {
1da177e4
LT
2605 count = readl (&ep->dma->dmacount);
2606 count &= DMA_BYTE_COUNT_MASK;
2607 if (readl (&ep->dma->dmadesc)
2608 != req->td_dma)
2609 req = NULL;
2610 break;
2611 }
2612 udelay(1);
2613 }
2614
2615 /* stop DMA, leave ep NAKing */
3e76fdcb 2616 writel(BIT(DMA_ABORT), &ep->dma->dmastat);
1da177e4
LT
2617 spin_stop_dma (ep->dma);
2618
2619 if (likely (req)) {
2620 req->td->dmacount = 0;
2621 t = readl (&ep->regs->ep_avail);
68dcc688 2622 dma_done (ep, req, count,
901b3d75
DB
2623 (ep->out_overflow || t)
2624 ? -EOVERFLOW : 0);
1da177e4
LT
2625 }
2626
2627 /* also flush to prevent erratum 0106 trouble */
2628 if (unlikely (ep->out_overflow
2629 || (ep->dev->chiprev == 0x0100
2630 && ep->dev->gadget.speed
2631 == USB_SPEED_FULL))) {
2632 out_flush (ep);
2633 ep->out_overflow = 0;
2634 }
2635
2636 /* (re)start dma if needed, stop NAKing */
2637 ep->stopped = stopped;
2638 if (!list_empty (&ep->queue))
2639 restart_dma (ep);
2640 } else
2641 DEBUG (ep->dev, "%s dma ep_stat %08x ??\n",
2642 ep->ep.name, t);
2643 return;
2644
2645 /* data packet(s) received (in the fifo, OUT) */
3e76fdcb 2646 } else if (t & BIT(DATA_PACKET_RECEIVED_INTERRUPT)) {
1da177e4
LT
2647 if (read_fifo (ep, req) && ep->num != 0)
2648 mode = 2;
2649
2650 /* data packet(s) transmitted (IN) */
3e76fdcb 2651 } else if (t & BIT(DATA_PACKET_TRANSMITTED_INTERRUPT)) {
1da177e4
LT
2652 unsigned len;
2653
2654 len = req->req.length - req->req.actual;
2655 if (len > ep->ep.maxpacket)
2656 len = ep->ep.maxpacket;
2657 req->req.actual += len;
2658
2659 /* if we wrote it all, we're usually done */
2660 if (req->req.actual == req->req.length) {
2661 if (ep->num == 0) {
317e83b8 2662 /* send zlps until the status stage */
1da177e4
LT
2663 } else if (!req->req.zero || len != ep->ep.maxpacket)
2664 mode = 2;
2665 }
2666
2667 /* there was nothing to do ... */
2668 } else if (mode == 1)
2669 return;
2670
2671 /* done */
2672 if (mode == 2) {
2673 /* stream endpoints often resubmit/unlink in completion */
2674 done (ep, req, 0);
2675
2676 /* maybe advance queue to next request */
2677 if (ep->num == 0) {
2678 /* NOTE: net2280 could let gadget driver start the
2679 * status stage later. since not all controllers let
2680 * them control that, the api doesn't (yet) allow it.
2681 */
2682 if (!ep->stopped)
2683 allow_status (ep);
2684 req = NULL;
2685 } else {
2686 if (!list_empty (&ep->queue) && !ep->stopped)
2687 req = list_entry (ep->queue.next,
2688 struct net2280_request, queue);
2689 else
2690 req = NULL;
2691 if (req && !ep->is_in)
2692 stop_out_naking (ep);
2693 }
2694 }
2695
2696 /* is there a buffer for the next packet?
2697 * for best streaming performance, make sure there is one.
2698 */
2699 if (req && !ep->stopped) {
2700
2701 /* load IN fifo with next packet (may be zlp) */
3e76fdcb 2702 if (t & BIT(DATA_PACKET_TRANSMITTED_INTERRUPT))
1da177e4
LT
2703 write_fifo (ep, &req->req);
2704 }
2705}
2706
2707static struct net2280_ep *
2708get_ep_by_addr (struct net2280 *dev, u16 wIndex)
2709{
2710 struct net2280_ep *ep;
2711
2712 if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
2713 return &dev->ep [0];
2714 list_for_each_entry (ep, &dev->gadget.ep_list, ep.ep_list) {
2715 u8 bEndpointAddress;
2716
2717 if (!ep->desc)
2718 continue;
2719 bEndpointAddress = ep->desc->bEndpointAddress;
2720 if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
2721 continue;
2722 if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
2723 return ep;
2724 }
2725 return NULL;
2726}
2727
adc82f77
RR
2728static void defect7374_workaround(struct net2280 *dev, struct usb_ctrlrequest r)
2729{
2730 u32 scratch, fsmvalue;
2731 u32 ack_wait_timeout, state;
2732
2733 /* Workaround for Defect 7374 (U1/U2 erroneously rejected): */
2734 scratch = get_idx_reg(dev->regs, SCRATCH);
2735 fsmvalue = scratch & (0xf << DEFECT7374_FSM_FIELD);
2736 scratch &= ~(0xf << DEFECT7374_FSM_FIELD);
2737
2738 if (!((fsmvalue == DEFECT7374_FSM_WAITING_FOR_CONTROL_READ) &&
2739 (r.bRequestType & USB_DIR_IN)))
2740 return;
2741
2742 /* This is the first Control Read for this connection: */
3e76fdcb 2743 if (!(readl(&dev->usb->usbstat) & BIT(SUPER_SPEED_MODE))) {
adc82f77
RR
2744 /*
2745 * Connection is NOT SS:
2746 * - Connection must be FS or HS.
2747 * - This FSM state should allow workaround software to
2748 * run after the next USB connection.
2749 */
2750 scratch |= DEFECT7374_FSM_NON_SS_CONTROL_READ;
2751 goto restore_data_eps;
2752 }
2753
2754 /* Connection is SS: */
2755 for (ack_wait_timeout = 0;
2756 ack_wait_timeout < DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS;
2757 ack_wait_timeout++) {
2758
2759 state = readl(&dev->plregs->pl_ep_status_1)
2760 & (0xff << STATE);
2761 if ((state >= (ACK_GOOD_NORMAL << STATE)) &&
2762 (state <= (ACK_GOOD_MORE_ACKS_TO_COME << STATE))) {
2763 scratch |= DEFECT7374_FSM_SS_CONTROL_READ;
2764 break;
2765 }
2766
2767 /*
2768 * We have not yet received host's Data Phase ACK
2769 * - Wait and try again.
2770 */
2771 udelay(DEFECT_7374_PROCESSOR_WAIT_TIME);
2772
2773 continue;
2774 }
2775
2776
2777 if (ack_wait_timeout >= DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS) {
2778 ERROR(dev, "FAIL: Defect 7374 workaround waited but failed "
2779 "to detect SS host's data phase ACK.");
2780 ERROR(dev, "PL_EP_STATUS_1(23:16):.Expected from 0x11 to 0x16"
2781 "got 0x%2.2x.\n", state >> STATE);
2782 } else {
2783 WARNING(dev, "INFO: Defect 7374 workaround waited about\n"
2784 "%duSec for Control Read Data Phase ACK\n",
2785 DEFECT_7374_PROCESSOR_WAIT_TIME * ack_wait_timeout);
2786 }
2787
2788restore_data_eps:
2789 /*
2790 * Restore data EPs to their pre-workaround settings (disabled,
2791 * initialized, and other details).
2792 */
2793 defect7374_disable_data_eps(dev);
2794
2795 set_idx_reg(dev->regs, SCRATCH, scratch);
2796
2797 return;
2798}
2799
2800static void ep_stall(struct net2280_ep *ep, int stall)
2801{
2802 struct net2280 *dev = ep->dev;
2803 u32 val;
2804 static const u32 ep_pl[9] = { 0, 3, 4, 7, 8, 2, 5, 6, 9 };
2805
2806 if (stall) {
3e76fdcb
RR
2807 writel(BIT(SET_ENDPOINT_HALT) |
2808 /* BIT(SET_NAK_PACKETS) | */
2809 BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE),
adc82f77
RR
2810 &ep->regs->ep_rsp);
2811 ep->is_halt = 1;
2812 } else {
2813 if (dev->gadget.speed == USB_SPEED_SUPER) {
2814 /*
2815 * Workaround for SS SeqNum not cleared via
2816 * Endpoint Halt (Clear) bit. select endpoint
2817 */
2818 val = readl(&dev->plregs->pl_ep_ctrl);
2819 val = (val & ~0x1f) | ep_pl[ep->num];
2820 writel(val, &dev->plregs->pl_ep_ctrl);
2821
3e76fdcb 2822 val |= BIT(SEQUENCE_NUMBER_RESET);
adc82f77
RR
2823 writel(val, &dev->plregs->pl_ep_ctrl);
2824 }
2825 val = readl(&ep->regs->ep_rsp);
3e76fdcb
RR
2826 val |= BIT(CLEAR_ENDPOINT_HALT) |
2827 BIT(CLEAR_ENDPOINT_TOGGLE);
adc82f77 2828 writel(val
3e76fdcb 2829 /* | BIT(CLEAR_NAK_PACKETS)*/
adc82f77
RR
2830 , &ep->regs->ep_rsp);
2831 ep->is_halt = 0;
2832 val = readl(&ep->regs->ep_rsp);
2833 }
2834}
2835
2836static void ep_stdrsp(struct net2280_ep *ep, int value, int wedged)
2837{
2838 /* set/clear, then synch memory views with the device */
2839 if (value) {
2840 ep->stopped = 1;
2841 if (ep->num == 0)
2842 ep->dev->protocol_stall = 1;
2843 else {
2844 if (ep->dma)
2845 ep_stop_dma(ep);
2846 ep_stall(ep, true);
2847 }
2848
2849 if (wedged)
2850 ep->wedged = 1;
2851 } else {
2852 ep->stopped = 0;
2853 ep->wedged = 0;
2854
2855 ep_stall(ep, false);
2856
2857 /* Flush the queue */
2858 if (!list_empty(&ep->queue)) {
2859 struct net2280_request *req =
2860 list_entry(ep->queue.next, struct net2280_request,
2861 queue);
2862 if (ep->dma)
2863 resume_dma(ep);
2864 else {
2865 if (ep->is_in)
2866 write_fifo(ep, &req->req);
2867 else {
2868 if (read_fifo(ep, req))
2869 done(ep, req, 0);
2870 }
2871 }
2872 }
2873 }
2874}
2875
2876static void handle_stat0_irqs_superspeed(struct net2280 *dev,
2877 struct net2280_ep *ep, struct usb_ctrlrequest r)
2878{
2879 int tmp = 0;
2880
2881#define w_value le16_to_cpu(r.wValue)
2882#define w_index le16_to_cpu(r.wIndex)
2883#define w_length le16_to_cpu(r.wLength)
2884
2885 switch (r.bRequest) {
2886 struct net2280_ep *e;
2887 u16 status;
2888
2889 case USB_REQ_SET_CONFIGURATION:
2890 dev->addressed_state = !w_value;
2891 goto usb3_delegate;
2892
2893 case USB_REQ_GET_STATUS:
2894 switch (r.bRequestType) {
2895 case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
2896 status = dev->wakeup_enable ? 0x02 : 0x00;
2897 if (dev->selfpowered)
3e76fdcb 2898 status |= BIT(0);
adc82f77
RR
2899 status |= (dev->u1_enable << 2 | dev->u2_enable << 3 |
2900 dev->ltm_enable << 4);
2901 writel(0, &dev->epregs[0].ep_irqenb);
2902 set_fifo_bytecount(ep, sizeof(status));
2903 writel((__force u32) status, &dev->epregs[0].ep_data);
2904 allow_status_338x(ep);
2905 break;
2906
2907 case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
2908 e = get_ep_by_addr(dev, w_index);
2909 if (!e)
2910 goto do_stall3;
2911 status = readl(&e->regs->ep_rsp) &
3e76fdcb 2912 BIT(CLEAR_ENDPOINT_HALT);
adc82f77
RR
2913 writel(0, &dev->epregs[0].ep_irqenb);
2914 set_fifo_bytecount(ep, sizeof(status));
2915 writel((__force u32) status, &dev->epregs[0].ep_data);
2916 allow_status_338x(ep);
2917 break;
2918
2919 default:
2920 goto usb3_delegate;
2921 }
2922 break;
2923
2924 case USB_REQ_CLEAR_FEATURE:
2925 switch (r.bRequestType) {
2926 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
2927 if (!dev->addressed_state) {
2928 switch (w_value) {
2929 case USB_DEVICE_U1_ENABLE:
2930 dev->u1_enable = 0;
2931 writel(readl(&dev->usb_ext->usbctl2) &
3e76fdcb 2932 ~BIT(U1_ENABLE),
adc82f77
RR
2933 &dev->usb_ext->usbctl2);
2934 allow_status_338x(ep);
2935 goto next_endpoints3;
2936
2937 case USB_DEVICE_U2_ENABLE:
2938 dev->u2_enable = 0;
2939 writel(readl(&dev->usb_ext->usbctl2) &
3e76fdcb 2940 ~BIT(U2_ENABLE),
adc82f77
RR
2941 &dev->usb_ext->usbctl2);
2942 allow_status_338x(ep);
2943 goto next_endpoints3;
2944
2945 case USB_DEVICE_LTM_ENABLE:
2946 dev->ltm_enable = 0;
2947 writel(readl(&dev->usb_ext->usbctl2) &
3e76fdcb 2948 ~BIT(LTM_ENABLE),
adc82f77
RR
2949 &dev->usb_ext->usbctl2);
2950 allow_status_338x(ep);
2951 goto next_endpoints3;
2952
2953 default:
2954 break;
2955 }
2956 }
2957 if (w_value == USB_DEVICE_REMOTE_WAKEUP) {
2958 dev->wakeup_enable = 0;
2959 writel(readl(&dev->usb->usbctl) &
3e76fdcb 2960 ~BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
adc82f77
RR
2961 &dev->usb->usbctl);
2962 allow_status_338x(ep);
2963 break;
2964 }
2965 goto usb3_delegate;
2966
2967 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
2968 e = get_ep_by_addr(dev, w_index);
2969 if (!e)
2970 goto do_stall3;
2971 if (w_value != USB_ENDPOINT_HALT)
2972 goto do_stall3;
2973 VDEBUG(dev, "%s clear halt\n", e->ep.name);
2974 ep_stall(e, false);
2975 if (!list_empty(&e->queue) && e->td_dma)
2976 restart_dma(e);
2977 allow_status(ep);
2978 ep->stopped = 1;
2979 break;
2980
2981 default:
2982 goto usb3_delegate;
2983 }
2984 break;
2985 case USB_REQ_SET_FEATURE:
2986 switch (r.bRequestType) {
2987 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
2988 if (!dev->addressed_state) {
2989 switch (w_value) {
2990 case USB_DEVICE_U1_ENABLE:
2991 dev->u1_enable = 1;
2992 writel(readl(&dev->usb_ext->usbctl2) |
3e76fdcb 2993 BIT(U1_ENABLE),
adc82f77
RR
2994 &dev->usb_ext->usbctl2);
2995 allow_status_338x(ep);
2996 goto next_endpoints3;
2997
2998 case USB_DEVICE_U2_ENABLE:
2999 dev->u2_enable = 1;
3000 writel(readl(&dev->usb_ext->usbctl2) |
3e76fdcb 3001 BIT(U2_ENABLE),
adc82f77
RR
3002 &dev->usb_ext->usbctl2);
3003 allow_status_338x(ep);
3004 goto next_endpoints3;
3005
3006 case USB_DEVICE_LTM_ENABLE:
3007 dev->ltm_enable = 1;
3008 writel(readl(&dev->usb_ext->usbctl2) |
3e76fdcb 3009 BIT(LTM_ENABLE),
adc82f77
RR
3010 &dev->usb_ext->usbctl2);
3011 allow_status_338x(ep);
3012 goto next_endpoints3;
3013 default:
3014 break;
3015 }
3016 }
3017
3018 if (w_value == USB_DEVICE_REMOTE_WAKEUP) {
3019 dev->wakeup_enable = 1;
3020 writel(readl(&dev->usb->usbctl) |
3e76fdcb 3021 BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
adc82f77
RR
3022 &dev->usb->usbctl);
3023 allow_status_338x(ep);
3024 break;
3025 }
3026 goto usb3_delegate;
3027
3028 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
3029 e = get_ep_by_addr(dev, w_index);
3030 if (!e || (w_value != USB_ENDPOINT_HALT))
3031 goto do_stall3;
3032 ep_stdrsp(e, true, false);
3033 allow_status_338x(ep);
3034 break;
3035
3036 default:
3037 goto usb3_delegate;
3038 }
3039
3040 break;
3041 default:
3042
3043usb3_delegate:
3044 VDEBUG(dev, "setup %02x.%02x v%04x i%04x l%04x ep_cfg %08x\n",
3045 r.bRequestType, r.bRequest,
3046 w_value, w_index, w_length,
3047 readl(&ep->cfg->ep_cfg));
3048
3049 ep->responded = 0;
3050 spin_unlock(&dev->lock);
3051 tmp = dev->driver->setup(&dev->gadget, &r);
3052 spin_lock(&dev->lock);
3053 }
3054do_stall3:
3055 if (tmp < 0) {
3056 VDEBUG(dev, "req %02x.%02x protocol STALL; stat %d\n",
3057 r.bRequestType, r.bRequest, tmp);
3058 dev->protocol_stall = 1;
3059 /* TD 9.9 Halt Endpoint test. TD 9.22 Set feature test */
3060 ep_stall(ep, true);
3061 }
3062
3063next_endpoints3:
3064
3065#undef w_value
3066#undef w_index
3067#undef w_length
3068
3069 return;
3070}
3071
1da177e4
LT
3072static void handle_stat0_irqs (struct net2280 *dev, u32 stat)
3073{
3074 struct net2280_ep *ep;
3075 u32 num, scratch;
3076
3077 /* most of these don't need individual acks */
3e76fdcb 3078 stat &= ~BIT(INTA_ASSERTED);
1da177e4
LT
3079 if (!stat)
3080 return;
3081 // DEBUG (dev, "irqstat0 %04x\n", stat);
3082
3083 /* starting a control request? */
3e76fdcb 3084 if (unlikely(stat & BIT(SETUP_PACKET_INTERRUPT))) {
1da177e4
LT
3085 union {
3086 u32 raw [2];
3087 struct usb_ctrlrequest r;
3088 } u;
950ee4c8 3089 int tmp;
1da177e4
LT
3090 struct net2280_request *req;
3091
3092 if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
adc82f77 3093 u32 val = readl(&dev->usb->usbstat);
3e76fdcb 3094 if (val & BIT(SUPER_SPEED)) {
adc82f77
RR
3095 dev->gadget.speed = USB_SPEED_SUPER;
3096 usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
3097 EP0_SS_MAX_PACKET_SIZE);
3e76fdcb 3098 } else if (val & BIT(HIGH_SPEED)) {
1da177e4 3099 dev->gadget.speed = USB_SPEED_HIGH;
adc82f77
RR
3100 usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
3101 EP0_HS_MAX_PACKET_SIZE);
3102 } else {
1da177e4 3103 dev->gadget.speed = USB_SPEED_FULL;
adc82f77
RR
3104 usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
3105 EP0_HS_MAX_PACKET_SIZE);
3106 }
1da177e4 3107 net2280_led_speed (dev, dev->gadget.speed);
e538dfda 3108 DEBUG(dev, "%s\n", usb_speed_string(dev->gadget.speed));
1da177e4
LT
3109 }
3110
3111 ep = &dev->ep [0];
3112 ep->irqs++;
3113
3114 /* make sure any leftover request state is cleared */
3e76fdcb 3115 stat &= ~BIT(ENDPOINT_0_INTERRUPT);
1da177e4
LT
3116 while (!list_empty (&ep->queue)) {
3117 req = list_entry (ep->queue.next,
3118 struct net2280_request, queue);
3119 done (ep, req, (req->req.actual == req->req.length)
3120 ? 0 : -EPROTO);
3121 }
3122 ep->stopped = 0;
3123 dev->protocol_stall = 0;
c2db8a8a 3124 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX)
adc82f77
RR
3125 ep->is_halt = 0;
3126 else{
3127 if (ep->dev->pdev->device == 0x2280)
3e76fdcb
RR
3128 tmp = BIT(FIFO_OVERFLOW) |
3129 BIT(FIFO_UNDERFLOW);
adc82f77
RR
3130 else
3131 tmp = 0;
3132
3e76fdcb
RR
3133 writel(tmp | BIT(TIMEOUT) |
3134 BIT(USB_STALL_SENT) |
3135 BIT(USB_IN_NAK_SENT) |
3136 BIT(USB_IN_ACK_RCVD) |
3137 BIT(USB_OUT_PING_NAK_SENT) |
3138 BIT(USB_OUT_ACK_SENT) |
3139 BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
3140 BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
3141 BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
3142 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
3143 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
3144 BIT(DATA_IN_TOKEN_INTERRUPT)
adc82f77
RR
3145 , &ep->regs->ep_stat);
3146 }
3147 u.raw[0] = readl(&dev->usb->setup0123);
3148 u.raw[1] = readl(&dev->usb->setup4567);
901b3d75 3149
1da177e4
LT
3150 cpu_to_le32s (&u.raw [0]);
3151 cpu_to_le32s (&u.raw [1]);
3152
c2db8a8a 3153 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX)
adc82f77
RR
3154 defect7374_workaround(dev, u.r);
3155
950ee4c8
GL
3156 tmp = 0;
3157
01ee7d70
DB
3158#define w_value le16_to_cpu(u.r.wValue)
3159#define w_index le16_to_cpu(u.r.wIndex)
3160#define w_length le16_to_cpu(u.r.wLength)
1da177e4
LT
3161
3162 /* ack the irq */
3e76fdcb
RR
3163 writel(BIT(SETUP_PACKET_INTERRUPT), &dev->regs->irqstat0);
3164 stat ^= BIT(SETUP_PACKET_INTERRUPT);
1da177e4
LT
3165
3166 /* watch control traffic at the token level, and force
3167 * synchronization before letting the status stage happen.
3168 * FIXME ignore tokens we'll NAK, until driver responds.
3169 * that'll mean a lot less irqs for some drivers.
3170 */
3171 ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
3172 if (ep->is_in) {
3e76fdcb
RR
3173 scratch = BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
3174 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
3175 BIT(DATA_IN_TOKEN_INTERRUPT);
1da177e4
LT
3176 stop_out_naking (ep);
3177 } else
3e76fdcb
RR
3178 scratch = BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
3179 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
3180 BIT(DATA_IN_TOKEN_INTERRUPT);
1da177e4
LT
3181 writel (scratch, &dev->epregs [0].ep_irqenb);
3182
3183 /* we made the hardware handle most lowlevel requests;
3184 * everything else goes uplevel to the gadget code.
3185 */
1f26e28d 3186 ep->responded = 1;
adc82f77
RR
3187
3188 if (dev->gadget.speed == USB_SPEED_SUPER) {
3189 handle_stat0_irqs_superspeed(dev, ep, u.r);
3190 goto next_endpoints;
3191 }
3192
1da177e4
LT
3193 switch (u.r.bRequest) {
3194 case USB_REQ_GET_STATUS: {
3195 struct net2280_ep *e;
320f3459 3196 __le32 status;
1da177e4
LT
3197
3198 /* hw handles device and interface status */
3199 if (u.r.bRequestType != (USB_DIR_IN|USB_RECIP_ENDPOINT))
3200 goto delegate;
ad303db6 3201 if ((e = get_ep_by_addr (dev, w_index)) == NULL
320f3459 3202 || w_length > 2)
1da177e4
LT
3203 goto do_stall;
3204
3e76fdcb 3205 if (readl(&e->regs->ep_rsp) & BIT(SET_ENDPOINT_HALT))
551509d2 3206 status = cpu_to_le32 (1);
1da177e4 3207 else
551509d2 3208 status = cpu_to_le32 (0);
1da177e4
LT
3209
3210 /* don't bother with a request object! */
3211 writel (0, &dev->epregs [0].ep_irqenb);
320f3459
DB
3212 set_fifo_bytecount (ep, w_length);
3213 writel ((__force u32)status, &dev->epregs [0].ep_data);
1da177e4
LT
3214 allow_status (ep);
3215 VDEBUG (dev, "%s stat %02x\n", ep->ep.name, status);
3216 goto next_endpoints;
3217 }
3218 break;
3219 case USB_REQ_CLEAR_FEATURE: {
3220 struct net2280_ep *e;
3221
3222 /* hw handles device features */
3223 if (u.r.bRequestType != USB_RECIP_ENDPOINT)
3224 goto delegate;
320f3459
DB
3225 if (w_value != USB_ENDPOINT_HALT
3226 || w_length != 0)
1da177e4 3227 goto do_stall;
ad303db6 3228 if ((e = get_ep_by_addr (dev, w_index)) == NULL)
1da177e4 3229 goto do_stall;
8066134f
AS
3230 if (e->wedged) {
3231 VDEBUG(dev, "%s wedged, halt not cleared\n",
3232 ep->ep.name);
3233 } else {
adc82f77 3234 VDEBUG(dev, "%s clear halt\n", e->ep.name);
8066134f 3235 clear_halt(e);
c2db8a8a
RR
3236 if (ep->dev->pdev->vendor ==
3237 PCI_VENDOR_ID_PLX &&
adc82f77
RR
3238 !list_empty(&e->queue) && e->td_dma)
3239 restart_dma(e);
8066134f 3240 }
1da177e4 3241 allow_status (ep);
1da177e4
LT
3242 goto next_endpoints;
3243 }
3244 break;
3245 case USB_REQ_SET_FEATURE: {
3246 struct net2280_ep *e;
3247
3248 /* hw handles device features */
3249 if (u.r.bRequestType != USB_RECIP_ENDPOINT)
3250 goto delegate;
320f3459
DB
3251 if (w_value != USB_ENDPOINT_HALT
3252 || w_length != 0)
1da177e4 3253 goto do_stall;
ad303db6 3254 if ((e = get_ep_by_addr (dev, w_index)) == NULL)
1da177e4 3255 goto do_stall;
8066134f
AS
3256 if (e->ep.name == ep0name)
3257 goto do_stall;
1da177e4 3258 set_halt (e);
c2db8a8a 3259 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX && e->dma)
adc82f77 3260 abort_dma(e);
1da177e4
LT
3261 allow_status (ep);
3262 VDEBUG (dev, "%s set halt\n", ep->ep.name);
3263 goto next_endpoints;
3264 }
3265 break;
3266 default:
3267delegate:
fec8de3a 3268 VDEBUG (dev, "setup %02x.%02x v%04x i%04x l%04x "
1da177e4
LT
3269 "ep_cfg %08x\n",
3270 u.r.bRequestType, u.r.bRequest,
320f3459 3271 w_value, w_index, w_length,
adc82f77 3272 readl(&ep->cfg->ep_cfg));
1f26e28d 3273 ep->responded = 0;
1da177e4
LT
3274 spin_unlock (&dev->lock);
3275 tmp = dev->driver->setup (&dev->gadget, &u.r);
3276 spin_lock (&dev->lock);
3277 }
3278
3279 /* stall ep0 on error */
3280 if (tmp < 0) {
3281do_stall:
3282 VDEBUG (dev, "req %02x.%02x protocol STALL; stat %d\n",
3283 u.r.bRequestType, u.r.bRequest, tmp);
3284 dev->protocol_stall = 1;
3285 }
3286
3287 /* some in/out token irq should follow; maybe stall then.
3288 * driver must queue a request (even zlp) or halt ep0
3289 * before the host times out.
3290 */
3291 }
3292
320f3459
DB
3293#undef w_value
3294#undef w_index
3295#undef w_length
3296
1da177e4
LT
3297next_endpoints:
3298 /* endpoint data irq ? */
3299 scratch = stat & 0x7f;
3300 stat &= ~0x7f;
3301 for (num = 0; scratch; num++) {
3302 u32 t;
3303
3304 /* do this endpoint's FIFO and queue need tending? */
3e76fdcb 3305 t = BIT(num);
1da177e4
LT
3306 if ((scratch & t) == 0)
3307 continue;
3308 scratch ^= t;
3309
3310 ep = &dev->ep [num];
3311 handle_ep_small (ep);
3312 }
3313
3314 if (stat)
3315 DEBUG (dev, "unhandled irqstat0 %08x\n", stat);
3316}
3317
3e76fdcb
RR
3318#define DMA_INTERRUPTS (BIT(DMA_D_INTERRUPT) | \
3319 BIT(DMA_C_INTERRUPT) | \
3320 BIT(DMA_B_INTERRUPT) | \
3321 BIT(DMA_A_INTERRUPT))
1da177e4 3322#define PCI_ERROR_INTERRUPTS ( \
3e76fdcb
RR
3323 BIT(PCI_MASTER_ABORT_RECEIVED_INTERRUPT) | \
3324 BIT(PCI_TARGET_ABORT_RECEIVED_INTERRUPT) | \
3325 BIT(PCI_RETRY_ABORT_INTERRUPT))
1da177e4
LT
3326
3327static void handle_stat1_irqs (struct net2280 *dev, u32 stat)
3328{
3329 struct net2280_ep *ep;
3330 u32 tmp, num, mask, scratch;
3331
3332 /* after disconnect there's nothing else to do! */
3e76fdcb
RR
3333 tmp = BIT(VBUS_INTERRUPT) | BIT(ROOT_PORT_RESET_INTERRUPT);
3334 mask = BIT(SUPER_SPEED) | BIT(HIGH_SPEED) | BIT(FULL_SPEED);
1da177e4
LT
3335
3336 /* VBUS disconnect is indicated by VBUS_PIN and VBUS_INTERRUPT set.
fb914ebf 3337 * Root Port Reset is indicated by ROOT_PORT_RESET_INTERRUPT set and
901b3d75 3338 * both HIGH_SPEED and FULL_SPEED clear (as ROOT_PORT_RESET_INTERRUPT
1da177e4
LT
3339 * only indicates a change in the reset state).
3340 */
3341 if (stat & tmp) {
3342 writel (tmp, &dev->regs->irqstat1);
3e76fdcb 3343 if ((((stat & BIT(ROOT_PORT_RESET_INTERRUPT))
901b3d75
DB
3344 && ((readl (&dev->usb->usbstat) & mask)
3345 == 0))
3346 || ((readl (&dev->usb->usbctl)
3e76fdcb 3347 & BIT(VBUS_PIN)) == 0)
1da177e4
LT
3348 ) && ( dev->gadget.speed != USB_SPEED_UNKNOWN)) {
3349 DEBUG (dev, "disconnect %s\n",
3350 dev->driver->driver.name);
3351 stop_activity (dev, dev->driver);
3352 ep0_start (dev);
3353 return;
3354 }
3355 stat &= ~tmp;
3356
3357 /* vBUS can bounce ... one of many reasons to ignore the
3358 * notion of hotplug events on bus connect/disconnect!
3359 */
3360 if (!stat)
3361 return;
3362 }
3363
3364 /* NOTE: chip stays in PCI D0 state for now, but it could
3365 * enter D1 to save more power
3366 */
3e76fdcb 3367 tmp = BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT);
1da177e4
LT
3368 if (stat & tmp) {
3369 writel (tmp, &dev->regs->irqstat1);
3e76fdcb 3370 if (stat & BIT(SUSPEND_REQUEST_INTERRUPT)) {
1da177e4
LT
3371 if (dev->driver->suspend)
3372 dev->driver->suspend (&dev->gadget);
3373 if (!enable_suspend)
3e76fdcb 3374 stat &= ~BIT(SUSPEND_REQUEST_INTERRUPT);
1da177e4
LT
3375 } else {
3376 if (dev->driver->resume)
3377 dev->driver->resume (&dev->gadget);
3378 /* at high speed, note erratum 0133 */
3379 }
3380 stat &= ~tmp;
3381 }
3382
3383 /* clear any other status/irqs */
3384 if (stat)
3385 writel (stat, &dev->regs->irqstat1);
3386
3387 /* some status we can just ignore */
950ee4c8 3388 if (dev->pdev->device == 0x2280)
3e76fdcb
RR
3389 stat &= ~(BIT(CONTROL_STATUS_INTERRUPT) |
3390 BIT(SUSPEND_REQUEST_INTERRUPT) |
3391 BIT(RESUME_INTERRUPT) |
3392 BIT(SOF_INTERRUPT));
950ee4c8 3393 else
3e76fdcb
RR
3394 stat &= ~(BIT(CONTROL_STATUS_INTERRUPT) |
3395 BIT(RESUME_INTERRUPT) |
3396 BIT(SOF_DOWN_INTERRUPT) |
3397 BIT(SOF_INTERRUPT));
950ee4c8 3398
1da177e4
LT
3399 if (!stat)
3400 return;
3401 // DEBUG (dev, "irqstat1 %08x\n", stat);
3402
3403 /* DMA status, for ep-{a,b,c,d} */
3404 scratch = stat & DMA_INTERRUPTS;
3405 stat &= ~DMA_INTERRUPTS;
3406 scratch >>= 9;
3407 for (num = 0; scratch; num++) {
3408 struct net2280_dma_regs __iomem *dma;
3409
3e76fdcb 3410 tmp = BIT(num);
1da177e4
LT
3411 if ((tmp & scratch) == 0)
3412 continue;
3413 scratch ^= tmp;
3414
3415 ep = &dev->ep [num + 1];
3416 dma = ep->dma;
3417
3418 if (!dma)
3419 continue;
3420
3421 /* clear ep's dma status */
3422 tmp = readl (&dma->dmastat);
3423 writel (tmp, &dma->dmastat);
3424
adc82f77 3425 /* dma sync*/
c2db8a8a 3426 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX) {
adc82f77
RR
3427 u32 r_dmacount = readl(&dma->dmacount);
3428 if (!ep->is_in && (r_dmacount & 0x00FFFFFF) &&
3e76fdcb 3429 (tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT)))
adc82f77
RR
3430 continue;
3431 }
3432
1da177e4
LT
3433 /* chaining should stop on abort, short OUT from fifo,
3434 * or (stat0 codepath) short OUT transfer.
3435 */
3436 if (!use_dma_chaining) {
3e76fdcb 3437 if (!(tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT))) {
1da177e4
LT
3438 DEBUG (ep->dev, "%s no xact done? %08x\n",
3439 ep->ep.name, tmp);
3440 continue;
3441 }
3442 stop_dma (ep->dma);
3443 }
3444
3445 /* OUT transfers terminate when the data from the
3446 * host is in our memory. Process whatever's done.
3447 * On this path, we know transfer's last packet wasn't
3448 * less than req->length. NAK_OUT_PACKETS may be set,
3449 * or the FIFO may already be holding new packets.
3450 *
3451 * IN transfers can linger in the FIFO for a very
3452 * long time ... we ignore that for now, accounting
3453 * precisely (like PIO does) needs per-packet irqs
3454 */
3455 scan_dma_completions (ep);
3456
3457 /* disable dma on inactive queues; else maybe restart */
3458 if (list_empty (&ep->queue)) {
3459 if (use_dma_chaining)
3460 stop_dma (ep->dma);
3461 } else {
3462 tmp = readl (&dma->dmactl);
3e76fdcb 3463 if (!use_dma_chaining || (tmp & BIT(DMA_ENABLE)) == 0)
1da177e4
LT
3464 restart_dma (ep);
3465 else if (ep->is_in && use_dma_chaining) {
3466 struct net2280_request *req;
320f3459 3467 __le32 dmacount;
1da177e4
LT
3468
3469 /* the descriptor at the head of the chain
3470 * may still have VALID_BIT clear; that's
3471 * used to trigger changing DMA_FIFO_VALIDATE
3472 * (affects automagic zlp writes).
3473 */
3474 req = list_entry (ep->queue.next,
3475 struct net2280_request, queue);
3476 dmacount = req->td->dmacount;
3e76fdcb
RR
3477 dmacount &= cpu_to_le32(BIT(VALID_BIT) |
3478 DMA_BYTE_COUNT_MASK);
1da177e4
LT
3479 if (dmacount && (dmacount & valid_bit) == 0)
3480 restart_dma (ep);
3481 }
3482 }
3483 ep->irqs++;
3484 }
3485
3486 /* NOTE: there are other PCI errors we might usefully notice.
3487 * if they appear very often, here's where to try recovering.
3488 */
3489 if (stat & PCI_ERROR_INTERRUPTS) {
3490 ERROR (dev, "pci dma error; stat %08x\n", stat);
3491 stat &= ~PCI_ERROR_INTERRUPTS;
3492 /* these are fatal errors, but "maybe" they won't
3493 * happen again ...
3494 */
3495 stop_activity (dev, dev->driver);
3496 ep0_start (dev);
3497 stat = 0;
3498 }
3499
3500 if (stat)
3501 DEBUG (dev, "unhandled irqstat1 %08x\n", stat);
3502}
3503
7d12e780 3504static irqreturn_t net2280_irq (int irq, void *_dev)
1da177e4
LT
3505{
3506 struct net2280 *dev = _dev;
3507
658ad5e0 3508 /* shared interrupt, not ours */
c2db8a8a 3509 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY &&
3e76fdcb 3510 (!(readl(&dev->regs->irqstat0) & BIT(INTA_ASSERTED))))
658ad5e0
AS
3511 return IRQ_NONE;
3512
1da177e4
LT
3513 spin_lock (&dev->lock);
3514
3515 /* handle disconnect, dma, and more */
3516 handle_stat1_irqs (dev, readl (&dev->regs->irqstat1));
3517
3518 /* control requests and PIO */
3519 handle_stat0_irqs (dev, readl (&dev->regs->irqstat0));
3520
c2db8a8a 3521 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX) {
adc82f77
RR
3522 /* re-enable interrupt to trigger any possible new interrupt */
3523 u32 pciirqenb1 = readl(&dev->regs->pciirqenb1);
3524 writel(pciirqenb1 & 0x7FFFFFFF, &dev->regs->pciirqenb1);
3525 writel(pciirqenb1, &dev->regs->pciirqenb1);
3526 }
3527
1da177e4
LT
3528 spin_unlock (&dev->lock);
3529
3530 return IRQ_HANDLED;
3531}
3532
3533/*-------------------------------------------------------------------------*/
3534
3535static void gadget_release (struct device *_dev)
3536{
3537 struct net2280 *dev = dev_get_drvdata (_dev);
3538
3539 kfree (dev);
3540}
3541
3542/* tear down the binding between this driver and the pci device */
3543
3544static void net2280_remove (struct pci_dev *pdev)
3545{
3546 struct net2280 *dev = pci_get_drvdata (pdev);
3547
0f91349b
SAS
3548 usb_del_gadget_udc(&dev->gadget);
3549
6bea476c 3550 BUG_ON(dev->driver);
1da177e4
LT
3551
3552 /* then clean up the resources we allocated during probe() */
3553 net2280_led_shutdown (dev);
3554 if (dev->requests) {
3555 int i;
3556 for (i = 1; i < 5; i++) {
3557 if (!dev->ep [i].dummy)
3558 continue;
3559 pci_pool_free (dev->requests, dev->ep [i].dummy,
3560 dev->ep [i].td_dma);
3561 }
3562 pci_pool_destroy (dev->requests);
3563 }
3564 if (dev->got_irq)
3565 free_irq (pdev->irq, dev);
c2db8a8a 3566 if (use_msi && dev->pdev->vendor == PCI_VENDOR_ID_PLX)
adc82f77 3567 pci_disable_msi(pdev);
1da177e4
LT
3568 if (dev->regs)
3569 iounmap (dev->regs);
3570 if (dev->region)
3571 release_mem_region (pci_resource_start (pdev, 0),
3572 pci_resource_len (pdev, 0));
3573 if (dev->enabled)
3574 pci_disable_device (pdev);
1da177e4 3575 device_remove_file (&pdev->dev, &dev_attr_registers);
1da177e4
LT
3576
3577 INFO (dev, "unbind\n");
1da177e4
LT
3578}
3579
3580/* wrap this driver around the specified device, but
3581 * don't respond over USB until a gadget driver binds to us.
3582 */
3583
3584static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id)
3585{
3586 struct net2280 *dev;
3587 unsigned long resource, len;
3588 void __iomem *base = NULL;
3589 int retval, i;
1da177e4 3590
1da177e4 3591 /* alloc, and start init */
e94b1766 3592 dev = kzalloc (sizeof *dev, GFP_KERNEL);
1da177e4
LT
3593 if (dev == NULL){
3594 retval = -ENOMEM;
3595 goto done;
3596 }
3597
9fb81ce6 3598 pci_set_drvdata (pdev, dev);
1da177e4
LT
3599 spin_lock_init (&dev->lock);
3600 dev->pdev = pdev;
3601 dev->gadget.ops = &net2280_ops;
c2db8a8a 3602 dev->gadget.max_speed = (dev->pdev->vendor == PCI_VENDOR_ID_PLX) ?
adc82f77 3603 USB_SPEED_SUPER : USB_SPEED_HIGH;
1da177e4
LT
3604
3605 /* the "gadget" abstracts/virtualizes the controller */
1da177e4
LT
3606 dev->gadget.name = driver_name;
3607
3608 /* now all the pci goodies ... */
3609 if (pci_enable_device (pdev) < 0) {
901b3d75 3610 retval = -ENODEV;
1da177e4
LT
3611 goto done;
3612 }
3613 dev->enabled = 1;
3614
3615 /* BAR 0 holds all the registers
3616 * BAR 1 is 8051 memory; unused here (note erratum 0103)
3617 * BAR 2 is fifo memory; unused here
3618 */
3619 resource = pci_resource_start (pdev, 0);
3620 len = pci_resource_len (pdev, 0);
3621 if (!request_mem_region (resource, len, driver_name)) {
3622 DEBUG (dev, "controller already in use\n");
3623 retval = -EBUSY;
3624 goto done;
3625 }
3626 dev->region = 1;
3627
901b3d75
DB
3628 /* FIXME provide firmware download interface to put
3629 * 8051 code into the chip, e.g. to turn on PCI PM.
3630 */
3631
1da177e4
LT
3632 base = ioremap_nocache (resource, len);
3633 if (base == NULL) {
3634 DEBUG (dev, "can't map memory\n");
3635 retval = -EFAULT;
3636 goto done;
3637 }
3638 dev->regs = (struct net2280_regs __iomem *) base;
3639 dev->usb = (struct net2280_usb_regs __iomem *) (base + 0x0080);
3640 dev->pci = (struct net2280_pci_regs __iomem *) (base + 0x0100);
3641 dev->dma = (struct net2280_dma_regs __iomem *) (base + 0x0180);
3642 dev->dep = (struct net2280_dep_regs __iomem *) (base + 0x0200);
3643 dev->epregs = (struct net2280_ep_regs __iomem *) (base + 0x0300);
3644
c2db8a8a 3645 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX) {
adc82f77
RR
3646 u32 fsmvalue;
3647 u32 usbstat;
3648 dev->usb_ext = (struct usb338x_usb_ext_regs __iomem *)
3649 (base + 0x00b4);
3650 dev->fiforegs = (struct usb338x_fifo_regs __iomem *)
3651 (base + 0x0500);
3652 dev->llregs = (struct usb338x_ll_regs __iomem *)
3653 (base + 0x0700);
3654 dev->ll_lfps_regs = (struct usb338x_ll_lfps_regs __iomem *)
3655 (base + 0x0748);
3656 dev->ll_tsn_regs = (struct usb338x_ll_tsn_regs __iomem *)
3657 (base + 0x077c);
3658 dev->ll_chicken_reg = (struct usb338x_ll_chi_regs __iomem *)
3659 (base + 0x079c);
3660 dev->plregs = (struct usb338x_pl_regs __iomem *)
3661 (base + 0x0800);
3662 usbstat = readl(&dev->usb->usbstat);
3e76fdcb 3663 dev->enhanced_mode = (usbstat & BIT(11)) ? 1 : 0;
adc82f77
RR
3664 dev->n_ep = (dev->enhanced_mode) ? 9 : 5;
3665 /* put into initial config, link up all endpoints */
3666 fsmvalue = get_idx_reg(dev->regs, SCRATCH) &
3667 (0xf << DEFECT7374_FSM_FIELD);
3668 /* See if firmware needs to set up for workaround: */
3669 if (fsmvalue == DEFECT7374_FSM_SS_CONTROL_READ)
3670 writel(0, &dev->usb->usbctl);
3671 } else{
3672 dev->enhanced_mode = 0;
3673 dev->n_ep = 7;
3674 /* put into initial config, link up all endpoints */
3675 writel(0, &dev->usb->usbctl);
3676 }
3677
1da177e4
LT
3678 usb_reset (dev);
3679 usb_reinit (dev);
3680
3681 /* irq setup after old hardware is cleaned up */
3682 if (!pdev->irq) {
3683 ERROR (dev, "No IRQ. Check PCI setup!\n");
3684 retval = -ENODEV;
3685 goto done;
3686 }
c6387a48 3687
c2db8a8a 3688 if (use_msi && dev->pdev->vendor == PCI_VENDOR_ID_PLX)
adc82f77
RR
3689 if (pci_enable_msi(pdev))
3690 ERROR(dev, "Failed to enable MSI mode\n");
3691
d54b5caa 3692 if (request_irq (pdev->irq, net2280_irq, IRQF_SHARED, driver_name, dev)
1da177e4 3693 != 0) {
c6387a48 3694 ERROR (dev, "request interrupt %d failed\n", pdev->irq);
1da177e4
LT
3695 retval = -EBUSY;
3696 goto done;
3697 }
3698 dev->got_irq = 1;
3699
3700 /* DMA setup */
3701 /* NOTE: we know only the 32 LSBs of dma addresses may be nonzero */
3702 dev->requests = pci_pool_create ("requests", pdev,
3703 sizeof (struct net2280_dma),
3704 0 /* no alignment requirements */,
3705 0 /* or page-crossing issues */);
3706 if (!dev->requests) {
3707 DEBUG (dev, "can't get request pool\n");
3708 retval = -ENOMEM;
3709 goto done;
3710 }
3711 for (i = 1; i < 5; i++) {
3712 struct net2280_dma *td;
3713
3714 td = pci_pool_alloc (dev->requests, GFP_KERNEL,
3715 &dev->ep [i].td_dma);
3716 if (!td) {
3717 DEBUG (dev, "can't get dummy %d\n", i);
3718 retval = -ENOMEM;
3719 goto done;
3720 }
3721 td->dmacount = 0; /* not VALID */
1da177e4
LT
3722 td->dmadesc = td->dmaaddr;
3723 dev->ep [i].dummy = td;
3724 }
3725
3726 /* enable lower-overhead pci memory bursts during DMA */
c2db8a8a 3727 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY)
3e76fdcb
RR
3728 writel(BIT(DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE) |
3729 /*
3730 * 256 write retries may not be enough...
3731 BIT(PCI_RETRY_ABORT_ENABLE) |
3732 */
3733 BIT(DMA_READ_MULTIPLE_ENABLE) |
3734 BIT(DMA_READ_LINE_ENABLE),
3735 &dev->pci->pcimstctl);
1da177e4
LT
3736 /* erratum 0115 shouldn't appear: Linux inits PCI_LATENCY_TIMER */
3737 pci_set_master (pdev);
694625c0 3738 pci_try_set_mwi (pdev);
1da177e4
LT
3739
3740 /* ... also flushes any posted pci writes */
3741 dev->chiprev = get_idx_reg (dev->regs, REG_CHIPREV) & 0xffff;
3742
3743 /* done */
1da177e4 3744 INFO (dev, "%s\n", driver_desc);
c6387a48
DM
3745 INFO (dev, "irq %d, pci mem %p, chip rev %04x\n",
3746 pdev->irq, base, dev->chiprev);
adc82f77
RR
3747 INFO(dev, "version: " DRIVER_VERSION "; dma %s %s\n",
3748 use_dma ? (use_dma_chaining ? "chaining" : "enabled")
3749 : "disabled",
3750 dev->enhanced_mode ? "enhanced mode" : "legacy mode");
b3899dac
JG
3751 retval = device_create_file (&pdev->dev, &dev_attr_registers);
3752 if (retval) goto done;
1da177e4 3753
2901df68
FB
3754 retval = usb_add_gadget_udc_release(&pdev->dev, &dev->gadget,
3755 gadget_release);
0f91349b
SAS
3756 if (retval)
3757 goto done;
1da177e4
LT
3758 return 0;
3759
3760done:
3761 if (dev)
3762 net2280_remove (pdev);
3763 return retval;
3764}
3765
2d61bde7
AS
3766/* make sure the board is quiescent; otherwise it will continue
3767 * generating IRQs across the upcoming reboot.
3768 */
3769
3770static void net2280_shutdown (struct pci_dev *pdev)
3771{
3772 struct net2280 *dev = pci_get_drvdata (pdev);
3773
3774 /* disable IRQs */
3775 writel (0, &dev->regs->pciirqenb0);
3776 writel (0, &dev->regs->pciirqenb1);
3777
3778 /* disable the pullup so the host will think we're gone */
3779 writel (0, &dev->usb->usbctl);
2f076077
AS
3780
3781 /* Disable full-speed test mode */
c2db8a8a 3782 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY)
adc82f77 3783 writel(0, &dev->usb->xcvrdiag);
2d61bde7
AS
3784}
3785
1da177e4
LT
3786
3787/*-------------------------------------------------------------------------*/
3788
901b3d75
DB
3789static const struct pci_device_id pci_ids [] = { {
3790 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
3791 .class_mask = ~0,
c2db8a8a 3792 .vendor = PCI_VENDOR_ID_PLX_LEGACY,
1da177e4
LT
3793 .device = 0x2280,
3794 .subvendor = PCI_ANY_ID,
3795 .subdevice = PCI_ANY_ID,
950ee4c8 3796}, {
901b3d75
DB
3797 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
3798 .class_mask = ~0,
c2db8a8a 3799 .vendor = PCI_VENDOR_ID_PLX_LEGACY,
950ee4c8
GL
3800 .device = 0x2282,
3801 .subvendor = PCI_ANY_ID,
3802 .subdevice = PCI_ANY_ID,
adc82f77
RR
3803},
3804 {
3805 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
3806 .class_mask = ~0,
c2db8a8a 3807 .vendor = PCI_VENDOR_ID_PLX,
adc82f77
RR
3808 .device = 0x3380,
3809 .subvendor = PCI_ANY_ID,
3810 .subdevice = PCI_ANY_ID,
3811 },
3812 {
3813 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
3814 .class_mask = ~0,
c2db8a8a 3815 .vendor = PCI_VENDOR_ID_PLX,
adc82f77
RR
3816 .device = 0x3382,
3817 .subvendor = PCI_ANY_ID,
3818 .subdevice = PCI_ANY_ID,
3819 },
3820{ /* end: all zeroes */ }
1da177e4
LT
3821};
3822MODULE_DEVICE_TABLE (pci, pci_ids);
3823
3824/* pci driver glue; this is a "new style" PCI driver module */
3825static struct pci_driver net2280_pci_driver = {
3826 .name = (char *) driver_name,
3827 .id_table = pci_ids,
3828
3829 .probe = net2280_probe,
3830 .remove = net2280_remove,
2d61bde7 3831 .shutdown = net2280_shutdown,
1da177e4
LT
3832
3833 /* FIXME add power management support */
3834};
3835
3836MODULE_DESCRIPTION (DRIVER_DESC);
3837MODULE_AUTHOR ("David Brownell");
3838MODULE_LICENSE ("GPL");
3839
3840static int __init init (void)
3841{
3842 if (!use_dma)
3843 use_dma_chaining = 0;
3844 return pci_register_driver (&net2280_pci_driver);
3845}
3846module_init (init);
3847
3848static void __exit cleanup (void)
3849{
3850 pci_unregister_driver (&net2280_pci_driver);
3851}
3852module_exit (cleanup);