]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/usb/gadget/udc/net2280.c
usb: gadget: udc: net2280: Declare allow_status_338x as inline
[mirror_ubuntu-jammy-kernel.git] / drivers / usb / gadget / udc / net2280.c
CommitLineData
1da177e4
LT
1/*
2 * Driver for the PLX NET2280 USB device controller.
3 * Specs and errata are available from <http://www.plxtech.com>.
4 *
901b3d75 5 * PLX Technology Inc. (formerly NetChip Technology) supported the
1da177e4
LT
6 * development of this driver.
7 *
8 *
9 * CODE STATUS HIGHLIGHTS
10 *
11 * This driver should work well with most "gadget" drivers, including
fa06920a 12 * the Mass Storage, Serial, and Ethernet/RNDIS gadget drivers
1da177e4
LT
13 * as well as Gadget Zero and Gadgetfs.
14 *
90664198 15 * DMA is enabled by default.
1da177e4 16 *
adc82f77
RR
17 * MSI is enabled by default. The legacy IRQ is used if MSI couldn't
18 * be enabled.
19 *
1da177e4
LT
20 * Note that almost all the errata workarounds here are only needed for
21 * rev1 chips. Rev1a silicon (0110) fixes almost all of them.
22 */
23
24/*
25 * Copyright (C) 2003 David Brownell
26 * Copyright (C) 2003-2005 PLX Technology, Inc.
adc82f77 27 * Copyright (C) 2014 Ricardo Ribalda - Qtechnology/AS
1da177e4 28 *
901b3d75
DB
29 * Modified Seth Levy 2005 PLX Technology, Inc. to provide compatibility
30 * with 2282 chip
950ee4c8 31 *
adc82f77
RR
32 * Modified Ricardo Ribalda Qtechnology AS to provide compatibility
33 * with usb 338x chip. Based on PLX driver
34 *
1da177e4
LT
35 * This program is free software; you can redistribute it and/or modify
36 * it under the terms of the GNU General Public License as published by
37 * the Free Software Foundation; either version 2 of the License, or
38 * (at your option) any later version.
1da177e4
LT
39 */
40
1da177e4
LT
41#include <linux/module.h>
42#include <linux/pci.h>
682d4c80 43#include <linux/dma-mapping.h>
1da177e4
LT
44#include <linux/kernel.h>
45#include <linux/delay.h>
46#include <linux/ioport.h>
1da177e4 47#include <linux/slab.h>
1da177e4
LT
48#include <linux/errno.h>
49#include <linux/init.h>
50#include <linux/timer.h>
51#include <linux/list.h>
52#include <linux/interrupt.h>
53#include <linux/moduleparam.h>
54#include <linux/device.h>
5f848137 55#include <linux/usb/ch9.h>
9454a57a 56#include <linux/usb/gadget.h>
b38b03b3 57#include <linux/prefetch.h>
fae3c158 58#include <linux/io.h>
1da177e4
LT
59
60#include <asm/byteorder.h>
1da177e4 61#include <asm/irq.h>
1da177e4
LT
62#include <asm/unaligned.h>
63
adc82f77
RR
64#define DRIVER_DESC "PLX NET228x/USB338x USB Peripheral Controller"
65#define DRIVER_VERSION "2005 Sept 27/v3.0"
1da177e4 66
1da177e4
LT
67#define EP_DONTUSE 13 /* nonzero */
68
69#define USE_RDK_LEDS /* GPIO pins control three LEDs */
70
71
fae3c158
RR
72static const char driver_name[] = "net2280";
73static const char driver_desc[] = DRIVER_DESC;
1da177e4 74
adc82f77 75static const u32 ep_bit[9] = { 0, 17, 2, 19, 4, 1, 18, 3, 20 };
fae3c158
RR
76static const char ep0name[] = "ep0";
77static const char *const ep_name[] = {
1da177e4
LT
78 ep0name,
79 "ep-a", "ep-b", "ep-c", "ep-d",
adc82f77 80 "ep-e", "ep-f", "ep-g", "ep-h",
1da177e4
LT
81};
82
1da177e4
LT
83/* mode 0 == ep-{a,b,c,d} 1K fifo each
84 * mode 1 == ep-{a,b} 2K fifo each, ep-{c,d} unavailable
85 * mode 2 == ep-a 2K fifo, ep-{b,c} 1K each, ep-d unavailable
86 */
fae3c158 87static ushort fifo_mode;
1da177e4
LT
88
89/* "modprobe net2280 fifo_mode=1" etc */
ae8e530a 90module_param(fifo_mode, ushort, 0644);
1da177e4
LT
91
92/* enable_suspend -- When enabled, the driver will respond to
93 * USB suspend requests by powering down the NET2280. Otherwise,
25985edc 94 * USB suspend requests will be ignored. This is acceptable for
950ee4c8 95 * self-powered devices
1da177e4 96 */
00d4db0e 97static bool enable_suspend;
1da177e4
LT
98
99/* "modprobe net2280 enable_suspend=1" etc */
ae8e530a 100module_param(enable_suspend, bool, 0444);
1da177e4 101
1da177e4
LT
102#define DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out")
103
fae3c158 104static char *type_string(u8 bmAttributes)
1da177e4
LT
105{
106 switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
107 case USB_ENDPOINT_XFER_BULK: return "bulk";
108 case USB_ENDPOINT_XFER_ISOC: return "iso";
109 case USB_ENDPOINT_XFER_INT: return "intr";
2b84f92b 110 }
1da177e4
LT
111 return "control";
112}
1da177e4
LT
113
114#include "net2280.h"
115
3e76fdcb
RR
116#define valid_bit cpu_to_le32(BIT(VALID_BIT))
117#define dma_done_ie cpu_to_le32(BIT(DMA_DONE_INTERRUPT_ENABLE))
1da177e4
LT
118
119/*-------------------------------------------------------------------------*/
adc82f77
RR
120static inline void enable_pciirqenb(struct net2280_ep *ep)
121{
122 u32 tmp = readl(&ep->dev->regs->pciirqenb0);
123
2eeb0016 124 if (ep->dev->quirks & PLX_LEGACY)
3e76fdcb 125 tmp |= BIT(ep->num);
adc82f77 126 else
3e76fdcb 127 tmp |= BIT(ep_bit[ep->num]);
adc82f77
RR
128 writel(tmp, &ep->dev->regs->pciirqenb0);
129
130 return;
131}
1da177e4
LT
132
133static int
fae3c158 134net2280_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
1da177e4
LT
135{
136 struct net2280 *dev;
137 struct net2280_ep *ep;
138 u32 max, tmp;
139 unsigned long flags;
adc82f77 140 static const u32 ep_key[9] = { 1, 0, 1, 0, 1, 1, 0, 1, 0 };
1da177e4 141
fae3c158 142 ep = container_of(_ep, struct net2280_ep, ep);
ae8e530a
RR
143 if (!_ep || !desc || ep->desc || _ep->name == ep0name ||
144 desc->bDescriptorType != USB_DT_ENDPOINT)
1da177e4
LT
145 return -EINVAL;
146 dev = ep->dev;
147 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
148 return -ESHUTDOWN;
149
150 /* erratum 0119 workaround ties up an endpoint number */
151 if ((desc->bEndpointAddress & 0x0f) == EP_DONTUSE)
152 return -EDOM;
153
2eeb0016 154 if (dev->quirks & PLX_SUPERSPEED) {
adc82f77
RR
155 if ((desc->bEndpointAddress & 0x0f) >= 0x0c)
156 return -EDOM;
157 ep->is_in = !!usb_endpoint_dir_in(desc);
158 if (dev->enhanced_mode && ep->is_in && ep_key[ep->num])
159 return -EINVAL;
160 }
161
1da177e4 162 /* sanity check ep-e/ep-f since their fifos are small */
fae3c158 163 max = usb_endpoint_maxp(desc) & 0x1fff;
2eeb0016 164 if (ep->num > 4 && max > 64 && (dev->quirks & PLX_LEGACY))
1da177e4
LT
165 return -ERANGE;
166
fae3c158 167 spin_lock_irqsave(&dev->lock, flags);
1da177e4
LT
168 _ep->maxpacket = max & 0x7ff;
169 ep->desc = desc;
170
171 /* ep_reset() has already been called */
172 ep->stopped = 0;
8066134f 173 ep->wedged = 0;
1da177e4
LT
174 ep->out_overflow = 0;
175
176 /* set speed-dependent max packet; may kick in high bandwidth */
adc82f77 177 set_max_speed(ep, max);
1da177e4 178
1da177e4 179 /* set type, direction, address; reset fifo counters */
3e76fdcb 180 writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat);
1da177e4
LT
181 tmp = (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
182 if (tmp == USB_ENDPOINT_XFER_INT) {
183 /* erratum 0105 workaround prevents hs NYET */
ae8e530a
RR
184 if (dev->chiprev == 0100 &&
185 dev->gadget.speed == USB_SPEED_HIGH &&
186 !(desc->bEndpointAddress & USB_DIR_IN))
3e76fdcb 187 writel(BIT(CLEAR_NAK_OUT_PACKETS_MODE),
1da177e4
LT
188 &ep->regs->ep_rsp);
189 } else if (tmp == USB_ENDPOINT_XFER_BULK) {
190 /* catch some particularly blatant driver bugs */
adc82f77
RR
191 if ((dev->gadget.speed == USB_SPEED_SUPER && max != 1024) ||
192 (dev->gadget.speed == USB_SPEED_HIGH && max != 512) ||
193 (dev->gadget.speed == USB_SPEED_FULL && max > 64)) {
194 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4
LT
195 return -ERANGE;
196 }
197 }
fae3c158 198 ep->is_iso = (tmp == USB_ENDPOINT_XFER_ISOC);
adc82f77 199 /* Enable this endpoint */
2eeb0016 200 if (dev->quirks & PLX_LEGACY) {
adc82f77
RR
201 tmp <<= ENDPOINT_TYPE;
202 tmp |= desc->bEndpointAddress;
203 /* default full fifo lines */
204 tmp |= (4 << ENDPOINT_BYTE_COUNT);
3e76fdcb 205 tmp |= BIT(ENDPOINT_ENABLE);
adc82f77
RR
206 ep->is_in = (tmp & USB_DIR_IN) != 0;
207 } else {
208 /* In Legacy mode, only OUT endpoints are used */
209 if (dev->enhanced_mode && ep->is_in) {
210 tmp <<= IN_ENDPOINT_TYPE;
3e76fdcb 211 tmp |= BIT(IN_ENDPOINT_ENABLE);
adc82f77 212 /* Not applicable to Legacy */
3e76fdcb 213 tmp |= BIT(ENDPOINT_DIRECTION);
adc82f77
RR
214 } else {
215 tmp <<= OUT_ENDPOINT_TYPE;
3e76fdcb 216 tmp |= BIT(OUT_ENDPOINT_ENABLE);
adc82f77
RR
217 tmp |= (ep->is_in << ENDPOINT_DIRECTION);
218 }
219
220 tmp |= usb_endpoint_num(desc);
221 tmp |= (ep->ep.maxburst << MAX_BURST_SIZE);
222 }
223
224 /* Make sure all the registers are written before ep_rsp*/
225 wmb();
1da177e4
LT
226
227 /* for OUT transfers, block the rx fifo until a read is posted */
1da177e4 228 if (!ep->is_in)
3e76fdcb 229 writel(BIT(SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
2eeb0016 230 else if (!(dev->quirks & PLX_2280)) {
901b3d75
DB
231 /* Added for 2282, Don't use nak packets on an in endpoint,
232 * this was ignored on 2280
233 */
3e76fdcb
RR
234 writel(BIT(CLEAR_NAK_OUT_PACKETS) |
235 BIT(CLEAR_NAK_OUT_PACKETS_MODE), &ep->regs->ep_rsp);
950ee4c8 236 }
1da177e4 237
adc82f77 238 writel(tmp, &ep->cfg->ep_cfg);
1da177e4
LT
239
240 /* enable irqs */
241 if (!ep->dma) { /* pio, per-packet */
adc82f77 242 enable_pciirqenb(ep);
1da177e4 243
3e76fdcb
RR
244 tmp = BIT(DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) |
245 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE);
2eeb0016 246 if (dev->quirks & PLX_2280)
fae3c158
RR
247 tmp |= readl(&ep->regs->ep_irqenb);
248 writel(tmp, &ep->regs->ep_irqenb);
1da177e4 249 } else { /* dma, per-request */
3e76fdcb 250 tmp = BIT((8 + ep->num)); /* completion */
fae3c158
RR
251 tmp |= readl(&dev->regs->pciirqenb1);
252 writel(tmp, &dev->regs->pciirqenb1);
1da177e4
LT
253
254 /* for short OUT transfers, dma completions can't
255 * advance the queue; do it pio-style, by hand.
256 * NOTE erratum 0112 workaround #2
257 */
258 if ((desc->bEndpointAddress & USB_DIR_IN) == 0) {
3e76fdcb 259 tmp = BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE);
fae3c158 260 writel(tmp, &ep->regs->ep_irqenb);
1da177e4 261
adc82f77 262 enable_pciirqenb(ep);
1da177e4
LT
263 }
264 }
265
266 tmp = desc->bEndpointAddress;
e56e69cc 267 ep_dbg(dev, "enabled %s (ep%d%s-%s) %s max %04x\n",
fae3c158
RR
268 _ep->name, tmp & 0x0f, DIR_STRING(tmp),
269 type_string(desc->bmAttributes),
1da177e4
LT
270 ep->dma ? "dma" : "pio", max);
271
272 /* pci writes may still be posted */
fae3c158 273 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4
LT
274 return 0;
275}
276
fae3c158 277static int handshake(u32 __iomem *ptr, u32 mask, u32 done, int usec)
1da177e4
LT
278{
279 u32 result;
280
281 do {
fae3c158 282 result = readl(ptr);
1da177e4
LT
283 if (result == ~(u32)0) /* "device unplugged" */
284 return -ENODEV;
285 result &= mask;
286 if (result == done)
287 return 0;
fae3c158 288 udelay(1);
1da177e4
LT
289 usec--;
290 } while (usec > 0);
291 return -ETIMEDOUT;
292}
293
901b3d75 294static const struct usb_ep_ops net2280_ep_ops;
1da177e4 295
adc82f77
RR
296static void ep_reset_228x(struct net2280_regs __iomem *regs,
297 struct net2280_ep *ep)
1da177e4
LT
298{
299 u32 tmp;
300
301 ep->desc = NULL;
fae3c158 302 INIT_LIST_HEAD(&ep->queue);
1da177e4 303
e117e742 304 usb_ep_set_maxpacket_limit(&ep->ep, ~0);
1da177e4
LT
305 ep->ep.ops = &net2280_ep_ops;
306
307 /* disable the dma, irqs, endpoint... */
308 if (ep->dma) {
fae3c158 309 writel(0, &ep->dma->dmactl);
3e76fdcb
RR
310 writel(BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
311 BIT(DMA_TRANSACTION_DONE_INTERRUPT) |
312 BIT(DMA_ABORT),
313 &ep->dma->dmastat);
1da177e4 314
fae3c158 315 tmp = readl(&regs->pciirqenb0);
3e76fdcb 316 tmp &= ~BIT(ep->num);
fae3c158 317 writel(tmp, &regs->pciirqenb0);
1da177e4 318 } else {
fae3c158 319 tmp = readl(&regs->pciirqenb1);
3e76fdcb 320 tmp &= ~BIT((8 + ep->num)); /* completion */
fae3c158 321 writel(tmp, &regs->pciirqenb1);
1da177e4 322 }
fae3c158 323 writel(0, &ep->regs->ep_irqenb);
1da177e4
LT
324
325 /* init to our chosen defaults, notably so that we NAK OUT
326 * packets until the driver queues a read (+note erratum 0112)
327 */
2eeb0016 328 if (!ep->is_in || (ep->dev->quirks & PLX_2280)) {
3e76fdcb
RR
329 tmp = BIT(SET_NAK_OUT_PACKETS_MODE) |
330 BIT(SET_NAK_OUT_PACKETS) |
331 BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
332 BIT(CLEAR_INTERRUPT_MODE);
950ee4c8
GL
333 } else {
334 /* added for 2282 */
3e76fdcb
RR
335 tmp = BIT(CLEAR_NAK_OUT_PACKETS_MODE) |
336 BIT(CLEAR_NAK_OUT_PACKETS) |
337 BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
338 BIT(CLEAR_INTERRUPT_MODE);
950ee4c8 339 }
1da177e4
LT
340
341 if (ep->num != 0) {
3e76fdcb
RR
342 tmp |= BIT(CLEAR_ENDPOINT_TOGGLE) |
343 BIT(CLEAR_ENDPOINT_HALT);
1da177e4 344 }
fae3c158 345 writel(tmp, &ep->regs->ep_rsp);
1da177e4
LT
346
347 /* scrub most status bits, and flush any fifo state */
2eeb0016 348 if (ep->dev->quirks & PLX_2280)
3e76fdcb
RR
349 tmp = BIT(FIFO_OVERFLOW) |
350 BIT(FIFO_UNDERFLOW);
950ee4c8
GL
351 else
352 tmp = 0;
353
3e76fdcb
RR
354 writel(tmp | BIT(TIMEOUT) |
355 BIT(USB_STALL_SENT) |
356 BIT(USB_IN_NAK_SENT) |
357 BIT(USB_IN_ACK_RCVD) |
358 BIT(USB_OUT_PING_NAK_SENT) |
359 BIT(USB_OUT_ACK_SENT) |
360 BIT(FIFO_FLUSH) |
361 BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
362 BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
363 BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
364 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
365 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
ae8e530a
RR
366 BIT(DATA_IN_TOKEN_INTERRUPT),
367 &ep->regs->ep_stat);
1da177e4
LT
368
369 /* fifo size is handled separately */
370}
371
adc82f77
RR
372static void ep_reset_338x(struct net2280_regs __iomem *regs,
373 struct net2280_ep *ep)
374{
375 u32 tmp, dmastat;
376
377 ep->desc = NULL;
378 INIT_LIST_HEAD(&ep->queue);
379
380 usb_ep_set_maxpacket_limit(&ep->ep, ~0);
381 ep->ep.ops = &net2280_ep_ops;
382
383 /* disable the dma, irqs, endpoint... */
384 if (ep->dma) {
385 writel(0, &ep->dma->dmactl);
3e76fdcb
RR
386 writel(BIT(DMA_ABORT_DONE_INTERRUPT) |
387 BIT(DMA_PAUSE_DONE_INTERRUPT) |
388 BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
ae8e530a
RR
389 BIT(DMA_TRANSACTION_DONE_INTERRUPT),
390 /* | BIT(DMA_ABORT), */
391 &ep->dma->dmastat);
adc82f77
RR
392
393 dmastat = readl(&ep->dma->dmastat);
394 if (dmastat == 0x5002) {
e56e69cc 395 ep_warn(ep->dev, "The dmastat return = %x!!\n",
adc82f77
RR
396 dmastat);
397 writel(0x5a, &ep->dma->dmastat);
398 }
399
400 tmp = readl(&regs->pciirqenb0);
3e76fdcb 401 tmp &= ~BIT(ep_bit[ep->num]);
adc82f77
RR
402 writel(tmp, &regs->pciirqenb0);
403 } else {
404 if (ep->num < 5) {
405 tmp = readl(&regs->pciirqenb1);
3e76fdcb 406 tmp &= ~BIT((8 + ep->num)); /* completion */
adc82f77
RR
407 writel(tmp, &regs->pciirqenb1);
408 }
409 }
410 writel(0, &ep->regs->ep_irqenb);
411
3e76fdcb
RR
412 writel(BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
413 BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
414 BIT(FIFO_OVERFLOW) |
415 BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
416 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
417 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
418 BIT(DATA_IN_TOKEN_INTERRUPT), &ep->regs->ep_stat);
adc82f77
RR
419}
420
fae3c158 421static void nuke(struct net2280_ep *);
1da177e4 422
fae3c158 423static int net2280_disable(struct usb_ep *_ep)
1da177e4
LT
424{
425 struct net2280_ep *ep;
426 unsigned long flags;
427
fae3c158 428 ep = container_of(_ep, struct net2280_ep, ep);
1da177e4
LT
429 if (!_ep || !ep->desc || _ep->name == ep0name)
430 return -EINVAL;
431
fae3c158
RR
432 spin_lock_irqsave(&ep->dev->lock, flags);
433 nuke(ep);
adc82f77 434
2eeb0016 435 if (ep->dev->quirks & PLX_SUPERSPEED)
adc82f77
RR
436 ep_reset_338x(ep->dev->regs, ep);
437 else
438 ep_reset_228x(ep->dev->regs, ep);
1da177e4 439
e56e69cc 440 ep_vdbg(ep->dev, "disabled %s %s\n",
1da177e4
LT
441 ep->dma ? "dma" : "pio", _ep->name);
442
443 /* synch memory views with the device */
adc82f77 444 (void)readl(&ep->cfg->ep_cfg);
1da177e4 445
d588ff58 446 if (!ep->dma && ep->num >= 1 && ep->num <= 4)
fae3c158 447 ep->dma = &ep->dev->dma[ep->num - 1];
1da177e4 448
fae3c158 449 spin_unlock_irqrestore(&ep->dev->lock, flags);
1da177e4
LT
450 return 0;
451}
452
453/*-------------------------------------------------------------------------*/
454
fae3c158
RR
455static struct usb_request
456*net2280_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
1da177e4
LT
457{
458 struct net2280_ep *ep;
459 struct net2280_request *req;
460
461 if (!_ep)
462 return NULL;
fae3c158 463 ep = container_of(_ep, struct net2280_ep, ep);
1da177e4 464
7039f422 465 req = kzalloc(sizeof(*req), gfp_flags);
1da177e4
LT
466 if (!req)
467 return NULL;
468
fae3c158 469 INIT_LIST_HEAD(&req->queue);
1da177e4
LT
470
471 /* this dma descriptor may be swapped with the previous dummy */
472 if (ep->dma) {
473 struct net2280_dma *td;
474
fae3c158 475 td = pci_pool_alloc(ep->dev->requests, gfp_flags,
1da177e4
LT
476 &req->td_dma);
477 if (!td) {
fae3c158 478 kfree(req);
1da177e4
LT
479 return NULL;
480 }
481 td->dmacount = 0; /* not VALID */
1da177e4
LT
482 td->dmadesc = td->dmaaddr;
483 req->td = td;
484 }
485 return &req->req;
486}
487
fae3c158 488static void net2280_free_request(struct usb_ep *_ep, struct usb_request *_req)
1da177e4
LT
489{
490 struct net2280_ep *ep;
491 struct net2280_request *req;
492
fae3c158 493 ep = container_of(_ep, struct net2280_ep, ep);
1da177e4
LT
494 if (!_ep || !_req)
495 return;
496
fae3c158
RR
497 req = container_of(_req, struct net2280_request, req);
498 WARN_ON(!list_empty(&req->queue));
1da177e4 499 if (req->td)
fae3c158
RR
500 pci_pool_free(ep->dev->requests, req->td, req->td_dma);
501 kfree(req);
1da177e4
LT
502}
503
504/*-------------------------------------------------------------------------*/
505
1da177e4
LT
506/* load a packet into the fifo we use for usb IN transfers.
507 * works for all endpoints.
508 *
509 * NOTE: pio with ep-a..ep-d could stuff multiple packets into the fifo
510 * at a time, but this code is simpler because it knows it only writes
511 * one packet. ep-a..ep-d should use dma instead.
512 */
fae3c158 513static void write_fifo(struct net2280_ep *ep, struct usb_request *req)
1da177e4
LT
514{
515 struct net2280_ep_regs __iomem *regs = ep->regs;
516 u8 *buf;
517 u32 tmp;
518 unsigned count, total;
519
520 /* INVARIANT: fifo is currently empty. (testable) */
521
522 if (req) {
523 buf = req->buf + req->actual;
fae3c158 524 prefetch(buf);
1da177e4
LT
525 total = req->length - req->actual;
526 } else {
527 total = 0;
528 buf = NULL;
529 }
530
531 /* write just one packet at a time */
532 count = ep->ep.maxpacket;
533 if (count > total) /* min() cannot be used on a bitfield */
534 count = total;
535
e56e69cc 536 ep_vdbg(ep->dev, "write %s fifo (IN) %d bytes%s req %p\n",
1da177e4
LT
537 ep->ep.name, count,
538 (count != ep->ep.maxpacket) ? " (short)" : "",
539 req);
540 while (count >= 4) {
541 /* NOTE be careful if you try to align these. fifo lines
542 * should normally be full (4 bytes) and successive partial
543 * lines are ok only in certain cases.
544 */
fae3c158
RR
545 tmp = get_unaligned((u32 *)buf);
546 cpu_to_le32s(&tmp);
547 writel(tmp, &regs->ep_data);
1da177e4
LT
548 buf += 4;
549 count -= 4;
550 }
551
552 /* last fifo entry is "short" unless we wrote a full packet.
553 * also explicitly validate last word in (periodic) transfers
554 * when maxpacket is not a multiple of 4 bytes.
555 */
556 if (count || total < ep->ep.maxpacket) {
fae3c158
RR
557 tmp = count ? get_unaligned((u32 *)buf) : count;
558 cpu_to_le32s(&tmp);
559 set_fifo_bytecount(ep, count & 0x03);
560 writel(tmp, &regs->ep_data);
1da177e4
LT
561 }
562
563 /* pci writes may still be posted */
564}
565
566/* work around erratum 0106: PCI and USB race over the OUT fifo.
567 * caller guarantees chiprev 0100, out endpoint is NAKing, and
568 * there's no real data in the fifo.
569 *
570 * NOTE: also used in cases where that erratum doesn't apply:
571 * where the host wrote "too much" data to us.
572 */
fae3c158 573static void out_flush(struct net2280_ep *ep)
1da177e4
LT
574{
575 u32 __iomem *statp;
576 u32 tmp;
577
fae3c158 578 ASSERT_OUT_NAKING(ep);
1da177e4
LT
579
580 statp = &ep->regs->ep_stat;
3e76fdcb 581 writel(BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
ae8e530a
RR
582 BIT(DATA_PACKET_RECEIVED_INTERRUPT),
583 statp);
3e76fdcb 584 writel(BIT(FIFO_FLUSH), statp);
fae3c158
RR
585 /* Make sure that stap is written */
586 mb();
587 tmp = readl(statp);
ae8e530a 588 if (tmp & BIT(DATA_OUT_PING_TOKEN_INTERRUPT) &&
1da177e4 589 /* high speed did bulk NYET; fifo isn't filling */
ae8e530a 590 ep->dev->gadget.speed == USB_SPEED_FULL) {
1da177e4
LT
591 unsigned usec;
592
593 usec = 50; /* 64 byte bulk/interrupt */
3e76fdcb
RR
594 handshake(statp, BIT(USB_OUT_PING_NAK_SENT),
595 BIT(USB_OUT_PING_NAK_SENT), usec);
1da177e4
LT
596 /* NAK done; now CLEAR_NAK_OUT_PACKETS is safe */
597 }
598}
599
600/* unload packet(s) from the fifo we use for usb OUT transfers.
601 * returns true iff the request completed, because of short packet
602 * or the request buffer having filled with full packets.
603 *
604 * for ep-a..ep-d this will read multiple packets out when they
605 * have been accepted.
606 */
fae3c158 607static int read_fifo(struct net2280_ep *ep, struct net2280_request *req)
1da177e4
LT
608{
609 struct net2280_ep_regs __iomem *regs = ep->regs;
610 u8 *buf = req->req.buf + req->req.actual;
611 unsigned count, tmp, is_short;
612 unsigned cleanup = 0, prevent = 0;
613
614 /* erratum 0106 ... packets coming in during fifo reads might
615 * be incompletely rejected. not all cases have workarounds.
616 */
ae8e530a
RR
617 if (ep->dev->chiprev == 0x0100 &&
618 ep->dev->gadget.speed == USB_SPEED_FULL) {
fae3c158
RR
619 udelay(1);
620 tmp = readl(&ep->regs->ep_stat);
3e76fdcb 621 if ((tmp & BIT(NAK_OUT_PACKETS)))
1da177e4 622 cleanup = 1;
3e76fdcb 623 else if ((tmp & BIT(FIFO_FULL))) {
fae3c158 624 start_out_naking(ep);
1da177e4
LT
625 prevent = 1;
626 }
627 /* else: hope we don't see the problem */
628 }
629
630 /* never overflow the rx buffer. the fifo reads packets until
631 * it sees a short one; we might not be ready for them all.
632 */
fae3c158
RR
633 prefetchw(buf);
634 count = readl(&regs->ep_avail);
635 if (unlikely(count == 0)) {
636 udelay(1);
637 tmp = readl(&ep->regs->ep_stat);
638 count = readl(&regs->ep_avail);
1da177e4 639 /* handled that data already? */
3e76fdcb 640 if (count == 0 && (tmp & BIT(NAK_OUT_PACKETS)) == 0)
1da177e4
LT
641 return 0;
642 }
643
644 tmp = req->req.length - req->req.actual;
645 if (count > tmp) {
646 /* as with DMA, data overflow gets flushed */
647 if ((tmp % ep->ep.maxpacket) != 0) {
e56e69cc 648 ep_err(ep->dev,
1da177e4
LT
649 "%s out fifo %d bytes, expected %d\n",
650 ep->ep.name, count, tmp);
651 req->req.status = -EOVERFLOW;
652 cleanup = 1;
653 /* NAK_OUT_PACKETS will be set, so flushing is safe;
654 * the next read will start with the next packet
655 */
656 } /* else it's a ZLP, no worries */
657 count = tmp;
658 }
659 req->req.actual += count;
660
661 is_short = (count == 0) || ((count % ep->ep.maxpacket) != 0);
662
e56e69cc 663 ep_vdbg(ep->dev, "read %s fifo (OUT) %d bytes%s%s%s req %p %d/%d\n",
1da177e4
LT
664 ep->ep.name, count, is_short ? " (short)" : "",
665 cleanup ? " flush" : "", prevent ? " nak" : "",
666 req, req->req.actual, req->req.length);
667
668 while (count >= 4) {
fae3c158
RR
669 tmp = readl(&regs->ep_data);
670 cpu_to_le32s(&tmp);
671 put_unaligned(tmp, (u32 *)buf);
1da177e4
LT
672 buf += 4;
673 count -= 4;
674 }
675 if (count) {
fae3c158 676 tmp = readl(&regs->ep_data);
1da177e4
LT
677 /* LE conversion is implicit here: */
678 do {
679 *buf++ = (u8) tmp;
680 tmp >>= 8;
681 } while (--count);
682 }
683 if (cleanup)
fae3c158 684 out_flush(ep);
1da177e4 685 if (prevent) {
3e76fdcb 686 writel(BIT(CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
fae3c158 687 (void) readl(&ep->regs->ep_rsp);
1da177e4
LT
688 }
689
ae8e530a
RR
690 return is_short || ((req->req.actual == req->req.length) &&
691 !req->req.zero);
1da177e4
LT
692}
693
694/* fill out dma descriptor to match a given request */
fae3c158
RR
695static void fill_dma_desc(struct net2280_ep *ep,
696 struct net2280_request *req, int valid)
1da177e4
LT
697{
698 struct net2280_dma *td = req->td;
699 u32 dmacount = req->req.length;
700
701 /* don't let DMA continue after a short OUT packet,
702 * so overruns can't affect the next transfer.
703 * in case of overruns on max-size packets, we can't
704 * stop the fifo from filling but we can flush it.
705 */
706 if (ep->is_in)
3e76fdcb 707 dmacount |= BIT(DMA_DIRECTION);
ae8e530a 708 if ((!ep->is_in && (dmacount % ep->ep.maxpacket) != 0) ||
2eeb0016 709 !(ep->dev->quirks & PLX_2280))
3e76fdcb 710 dmacount |= BIT(END_OF_CHAIN);
1da177e4
LT
711
712 req->valid = valid;
713 if (valid)
3e76fdcb 714 dmacount |= BIT(VALID_BIT);
90664198 715 dmacount |= BIT(DMA_DONE_INTERRUPT_ENABLE);
1da177e4
LT
716
717 /* td->dmadesc = previously set by caller */
718 td->dmaaddr = cpu_to_le32 (req->req.dma);
719
720 /* 2280 may be polling VALID_BIT through ep->dma->dmadesc */
fae3c158 721 wmb();
da2bbdcc 722 td->dmacount = cpu_to_le32(dmacount);
1da177e4
LT
723}
724
725static const u32 dmactl_default =
3e76fdcb
RR
726 BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
727 BIT(DMA_CLEAR_COUNT_ENABLE) |
1da177e4 728 /* erratum 0116 workaround part 1 (use POLLING) */
3e76fdcb
RR
729 (POLL_100_USEC << DESCRIPTOR_POLLING_RATE) |
730 BIT(DMA_VALID_BIT_POLLING_ENABLE) |
731 BIT(DMA_VALID_BIT_ENABLE) |
732 BIT(DMA_SCATTER_GATHER_ENABLE) |
1da177e4 733 /* erratum 0116 workaround part 2 (no AUTOSTART) */
3e76fdcb 734 BIT(DMA_ENABLE);
1da177e4 735
fae3c158 736static inline void spin_stop_dma(struct net2280_dma_regs __iomem *dma)
1da177e4 737{
3e76fdcb 738 handshake(&dma->dmactl, BIT(DMA_ENABLE), 0, 50);
1da177e4
LT
739}
740
fae3c158 741static inline void stop_dma(struct net2280_dma_regs __iomem *dma)
1da177e4 742{
3e76fdcb 743 writel(readl(&dma->dmactl) & ~BIT(DMA_ENABLE), &dma->dmactl);
fae3c158 744 spin_stop_dma(dma);
1da177e4
LT
745}
746
fae3c158 747static void start_queue(struct net2280_ep *ep, u32 dmactl, u32 td_dma)
1da177e4
LT
748{
749 struct net2280_dma_regs __iomem *dma = ep->dma;
3e76fdcb 750 unsigned int tmp = BIT(VALID_BIT) | (ep->is_in << DMA_DIRECTION);
1da177e4 751
2eeb0016 752 if (!(ep->dev->quirks & PLX_2280))
3e76fdcb 753 tmp |= BIT(END_OF_CHAIN);
950ee4c8 754
fae3c158
RR
755 writel(tmp, &dma->dmacount);
756 writel(readl(&dma->dmastat), &dma->dmastat);
1da177e4 757
fae3c158 758 writel(td_dma, &dma->dmadesc);
2eeb0016 759 if (ep->dev->quirks & PLX_SUPERSPEED)
3e76fdcb 760 dmactl |= BIT(DMA_REQUEST_OUTSTANDING);
fae3c158 761 writel(dmactl, &dma->dmactl);
1da177e4
LT
762
763 /* erratum 0116 workaround part 3: pci arbiter away from net2280 */
fae3c158 764 (void) readl(&ep->dev->pci->pcimstctl);
1da177e4 765
3e76fdcb 766 writel(BIT(DMA_START), &dma->dmastat);
1da177e4
LT
767
768 if (!ep->is_in)
fae3c158 769 stop_out_naking(ep);
1da177e4
LT
770}
771
fae3c158 772static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
1da177e4
LT
773{
774 u32 tmp;
775 struct net2280_dma_regs __iomem *dma = ep->dma;
776
777 /* FIXME can't use DMA for ZLPs */
778
779 /* on this path we "know" there's no dma active (yet) */
3e76fdcb 780 WARN_ON(readl(&dma->dmactl) & BIT(DMA_ENABLE));
fae3c158 781 writel(0, &ep->dma->dmactl);
1da177e4
LT
782
783 /* previous OUT packet might have been short */
fae3c158
RR
784 if (!ep->is_in && (readl(&ep->regs->ep_stat) &
785 BIT(NAK_OUT_PACKETS))) {
3e76fdcb 786 writel(BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT),
1da177e4
LT
787 &ep->regs->ep_stat);
788
fae3c158 789 tmp = readl(&ep->regs->ep_avail);
1da177e4 790 if (tmp) {
fae3c158 791 writel(readl(&dma->dmastat), &dma->dmastat);
1da177e4
LT
792
793 /* transfer all/some fifo data */
fae3c158
RR
794 writel(req->req.dma, &dma->dmaaddr);
795 tmp = min(tmp, req->req.length);
1da177e4
LT
796
797 /* dma irq, faking scatterlist status */
fae3c158 798 req->td->dmacount = cpu_to_le32(req->req.length - tmp);
ae8e530a
RR
799 writel(BIT(DMA_DONE_INTERRUPT_ENABLE) | tmp,
800 &dma->dmacount);
1da177e4
LT
801 req->td->dmadesc = 0;
802 req->valid = 1;
803
3e76fdcb
RR
804 writel(BIT(DMA_ENABLE), &dma->dmactl);
805 writel(BIT(DMA_START), &dma->dmastat);
1da177e4
LT
806 return;
807 }
808 }
809
810 tmp = dmactl_default;
811
812 /* force packet boundaries between dma requests, but prevent the
813 * controller from automagically writing a last "short" packet
814 * (zero length) unless the driver explicitly said to do that.
815 */
816 if (ep->is_in) {
fae3c158
RR
817 if (likely((req->req.length % ep->ep.maxpacket) ||
818 req->req.zero)){
3e76fdcb 819 tmp |= BIT(DMA_FIFO_VALIDATE);
1da177e4
LT
820 ep->in_fifo_validate = 1;
821 } else
822 ep->in_fifo_validate = 0;
823 }
824
825 /* init req->td, pointing to the current dummy */
826 req->td->dmadesc = cpu_to_le32 (ep->td_dma);
fae3c158 827 fill_dma_desc(ep, req, 1);
1da177e4 828
90664198 829 req->td->dmacount |= cpu_to_le32(BIT(END_OF_CHAIN));
1da177e4 830
fae3c158 831 start_queue(ep, tmp, req->td_dma);
1da177e4
LT
832}
833
834static inline void
fae3c158 835queue_dma(struct net2280_ep *ep, struct net2280_request *req, int valid)
1da177e4
LT
836{
837 struct net2280_dma *end;
838 dma_addr_t tmp;
839
840 /* swap new dummy for old, link; fill and maybe activate */
841 end = ep->dummy;
842 ep->dummy = req->td;
843 req->td = end;
844
845 tmp = ep->td_dma;
846 ep->td_dma = req->td_dma;
847 req->td_dma = tmp;
848
849 end->dmadesc = cpu_to_le32 (ep->td_dma);
850
fae3c158 851 fill_dma_desc(ep, req, valid);
1da177e4
LT
852}
853
854static void
fae3c158 855done(struct net2280_ep *ep, struct net2280_request *req, int status)
1da177e4
LT
856{
857 struct net2280 *dev;
858 unsigned stopped = ep->stopped;
859
fae3c158 860 list_del_init(&req->queue);
1da177e4
LT
861
862 if (req->req.status == -EINPROGRESS)
863 req->req.status = status;
864 else
865 status = req->req.status;
866
867 dev = ep->dev;
ae4d7933
FB
868 if (ep->dma)
869 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
1da177e4
LT
870
871 if (status && status != -ESHUTDOWN)
e56e69cc 872 ep_vdbg(dev, "complete %s req %p stat %d len %u/%u\n",
1da177e4
LT
873 ep->ep.name, &req->req, status,
874 req->req.actual, req->req.length);
875
876 /* don't modify queue heads during completion callback */
877 ep->stopped = 1;
fae3c158 878 spin_unlock(&dev->lock);
304f7e5e 879 usb_gadget_giveback_request(&ep->ep, &req->req);
fae3c158 880 spin_lock(&dev->lock);
1da177e4
LT
881 ep->stopped = stopped;
882}
883
884/*-------------------------------------------------------------------------*/
885
886static int
fae3c158 887net2280_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
1da177e4
LT
888{
889 struct net2280_request *req;
890 struct net2280_ep *ep;
891 struct net2280 *dev;
892 unsigned long flags;
893
894 /* we always require a cpu-view buffer, so that we can
895 * always use pio (as fallback or whatever).
896 */
fae3c158
RR
897 req = container_of(_req, struct net2280_request, req);
898 if (!_req || !_req->complete || !_req->buf ||
899 !list_empty(&req->queue))
1da177e4
LT
900 return -EINVAL;
901 if (_req->length > (~0 & DMA_BYTE_COUNT_MASK))
902 return -EDOM;
fae3c158 903 ep = container_of(_ep, struct net2280_ep, ep);
1da177e4
LT
904 if (!_ep || (!ep->desc && ep->num != 0))
905 return -EINVAL;
906 dev = ep->dev;
907 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
908 return -ESHUTDOWN;
909
910 /* FIXME implement PIO fallback for ZLPs with DMA */
911 if (ep->dma && _req->length == 0)
912 return -EOPNOTSUPP;
913
914 /* set up dma mapping in case the caller didn't */
ae4d7933
FB
915 if (ep->dma) {
916 int ret;
917
918 ret = usb_gadget_map_request(&dev->gadget, _req,
919 ep->is_in);
920 if (ret)
921 return ret;
1da177e4
LT
922 }
923
924#if 0
e56e69cc 925 ep_vdbg(dev, "%s queue req %p, len %d buf %p\n",
1da177e4
LT
926 _ep->name, _req, _req->length, _req->buf);
927#endif
928
fae3c158 929 spin_lock_irqsave(&dev->lock, flags);
1da177e4
LT
930
931 _req->status = -EINPROGRESS;
932 _req->actual = 0;
933
934 /* kickstart this i/o queue? */
485f44d0
RR
935 if (list_empty(&ep->queue) && !ep->stopped &&
936 !((dev->quirks & PLX_SUPERSPEED) && ep->dma &&
937 (readl(&ep->regs->ep_rsp) & BIT(CLEAR_ENDPOINT_HALT)))) {
938
1da177e4 939 /* use DMA if the endpoint supports it, else pio */
485f44d0 940 if (ep->dma)
fae3c158 941 start_dma(ep, req);
1da177e4
LT
942 else {
943 /* maybe there's no control data, just status ack */
944 if (ep->num == 0 && _req->length == 0) {
fae3c158
RR
945 allow_status(ep);
946 done(ep, req, 0);
e56e69cc 947 ep_vdbg(dev, "%s status ack\n", ep->ep.name);
1da177e4
LT
948 goto done;
949 }
950
951 /* PIO ... stuff the fifo, or unblock it. */
952 if (ep->is_in)
fae3c158
RR
953 write_fifo(ep, _req);
954 else if (list_empty(&ep->queue)) {
1da177e4
LT
955 u32 s;
956
957 /* OUT FIFO might have packet(s) buffered */
fae3c158 958 s = readl(&ep->regs->ep_stat);
3e76fdcb 959 if ((s & BIT(FIFO_EMPTY)) == 0) {
1da177e4
LT
960 /* note: _req->short_not_ok is
961 * ignored here since PIO _always_
962 * stops queue advance here, and
963 * _req->status doesn't change for
964 * short reads (only _req->actual)
965 */
fae3c158
RR
966 if (read_fifo(ep, req) &&
967 ep->num == 0) {
968 done(ep, req, 0);
969 allow_status(ep);
1da177e4
LT
970 /* don't queue it */
971 req = NULL;
fae3c158
RR
972 } else if (read_fifo(ep, req) &&
973 ep->num != 0) {
974 done(ep, req, 0);
975 req = NULL;
1da177e4 976 } else
fae3c158 977 s = readl(&ep->regs->ep_stat);
1da177e4
LT
978 }
979
980 /* don't NAK, let the fifo fill */
3e76fdcb
RR
981 if (req && (s & BIT(NAK_OUT_PACKETS)))
982 writel(BIT(CLEAR_NAK_OUT_PACKETS),
1da177e4
LT
983 &ep->regs->ep_rsp);
984 }
985 }
986
987 } else if (ep->dma) {
988 int valid = 1;
989
990 if (ep->is_in) {
991 int expect;
992
993 /* preventing magic zlps is per-engine state, not
994 * per-transfer; irq logic must recover hiccups.
995 */
fae3c158
RR
996 expect = likely(req->req.zero ||
997 (req->req.length % ep->ep.maxpacket));
1da177e4
LT
998 if (expect != ep->in_fifo_validate)
999 valid = 0;
1000 }
fae3c158 1001 queue_dma(ep, req, valid);
1da177e4
LT
1002
1003 } /* else the irq handler advances the queue. */
1004
1f26e28d 1005 ep->responded = 1;
1da177e4 1006 if (req)
fae3c158 1007 list_add_tail(&req->queue, &ep->queue);
1da177e4 1008done:
fae3c158 1009 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4
LT
1010
1011 /* pci writes may still be posted */
1012 return 0;
1013}
1014
1015static inline void
fae3c158
RR
1016dma_done(struct net2280_ep *ep, struct net2280_request *req, u32 dmacount,
1017 int status)
1da177e4
LT
1018{
1019 req->req.actual = req->req.length - (DMA_BYTE_COUNT_MASK & dmacount);
fae3c158 1020 done(ep, req, status);
1da177e4
LT
1021}
1022
fae3c158 1023static void scan_dma_completions(struct net2280_ep *ep)
1da177e4
LT
1024{
1025 /* only look at descriptors that were "naturally" retired,
1026 * so fifo and list head state won't matter
1027 */
fae3c158 1028 while (!list_empty(&ep->queue)) {
1da177e4
LT
1029 struct net2280_request *req;
1030 u32 tmp;
1031
fae3c158 1032 req = list_entry(ep->queue.next,
1da177e4
LT
1033 struct net2280_request, queue);
1034 if (!req->valid)
1035 break;
fae3c158
RR
1036 rmb();
1037 tmp = le32_to_cpup(&req->td->dmacount);
3e76fdcb 1038 if ((tmp & BIT(VALID_BIT)) != 0)
1da177e4
LT
1039 break;
1040
1041 /* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short"
1042 * cases where DMA must be aborted; this code handles
1043 * all non-abort DMA completions.
1044 */
fae3c158 1045 if (unlikely(req->td->dmadesc == 0)) {
1da177e4 1046 /* paranoia */
fae3c158 1047 tmp = readl(&ep->dma->dmacount);
1da177e4
LT
1048 if (tmp & DMA_BYTE_COUNT_MASK)
1049 break;
1050 /* single transfer mode */
fae3c158 1051 dma_done(ep, req, tmp, 0);
1da177e4 1052 break;
ae8e530a
RR
1053 } else if (!ep->is_in &&
1054 (req->req.length % ep->ep.maxpacket) != 0) {
2eeb0016 1055 if (ep->dev->quirks & PLX_SUPERSPEED)
adc82f77 1056 return dma_done(ep, req, tmp, 0);
1da177e4 1057
18a4e65f 1058 tmp = readl(&ep->regs->ep_stat);
1da177e4
LT
1059 /* AVOID TROUBLE HERE by not issuing short reads from
1060 * your gadget driver. That helps avoids errata 0121,
1061 * 0122, and 0124; not all cases trigger the warning.
1062 */
3e76fdcb 1063 if ((tmp & BIT(NAK_OUT_PACKETS)) == 0) {
e56e69cc 1064 ep_warn(ep->dev, "%s lost packet sync!\n",
1da177e4
LT
1065 ep->ep.name);
1066 req->req.status = -EOVERFLOW;
fae3c158
RR
1067 } else {
1068 tmp = readl(&ep->regs->ep_avail);
1069 if (tmp) {
1070 /* fifo gets flushed later */
1071 ep->out_overflow = 1;
e56e69cc 1072 ep_dbg(ep->dev,
fae3c158 1073 "%s dma, discard %d len %d\n",
1da177e4
LT
1074 ep->ep.name, tmp,
1075 req->req.length);
fae3c158
RR
1076 req->req.status = -EOVERFLOW;
1077 }
1da177e4
LT
1078 }
1079 }
fae3c158 1080 dma_done(ep, req, tmp, 0);
1da177e4
LT
1081 }
1082}
1083
fae3c158 1084static void restart_dma(struct net2280_ep *ep)
1da177e4
LT
1085{
1086 struct net2280_request *req;
1da177e4
LT
1087
1088 if (ep->stopped)
1089 return;
fae3c158 1090 req = list_entry(ep->queue.next, struct net2280_request, queue);
1da177e4 1091
90664198 1092 start_dma(ep, req);
1da177e4
LT
1093}
1094
e721c457 1095static void abort_dma(struct net2280_ep *ep)
1da177e4
LT
1096{
1097 /* abort the current transfer */
fae3c158 1098 if (likely(!list_empty(&ep->queue))) {
1da177e4 1099 /* FIXME work around errata 0121, 0122, 0124 */
3e76fdcb 1100 writel(BIT(DMA_ABORT), &ep->dma->dmastat);
fae3c158 1101 spin_stop_dma(ep->dma);
1da177e4 1102 } else
fae3c158
RR
1103 stop_dma(ep->dma);
1104 scan_dma_completions(ep);
1da177e4
LT
1105}
1106
1107/* dequeue ALL requests */
fae3c158 1108static void nuke(struct net2280_ep *ep)
1da177e4
LT
1109{
1110 struct net2280_request *req;
1111
1112 /* called with spinlock held */
1113 ep->stopped = 1;
1114 if (ep->dma)
fae3c158
RR
1115 abort_dma(ep);
1116 while (!list_empty(&ep->queue)) {
1117 req = list_entry(ep->queue.next,
1da177e4
LT
1118 struct net2280_request,
1119 queue);
fae3c158 1120 done(ep, req, -ESHUTDOWN);
1da177e4
LT
1121 }
1122}
1123
1124/* dequeue JUST ONE request */
fae3c158 1125static int net2280_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1da177e4
LT
1126{
1127 struct net2280_ep *ep;
1128 struct net2280_request *req;
1129 unsigned long flags;
1130 u32 dmactl;
1131 int stopped;
1132
fae3c158 1133 ep = container_of(_ep, struct net2280_ep, ep);
1da177e4
LT
1134 if (!_ep || (!ep->desc && ep->num != 0) || !_req)
1135 return -EINVAL;
1136
fae3c158 1137 spin_lock_irqsave(&ep->dev->lock, flags);
1da177e4
LT
1138 stopped = ep->stopped;
1139
1140 /* quiesce dma while we patch the queue */
1141 dmactl = 0;
1142 ep->stopped = 1;
1143 if (ep->dma) {
fae3c158 1144 dmactl = readl(&ep->dma->dmactl);
1da177e4 1145 /* WARNING erratum 0127 may kick in ... */
fae3c158
RR
1146 stop_dma(ep->dma);
1147 scan_dma_completions(ep);
1da177e4
LT
1148 }
1149
1150 /* make sure it's still queued on this endpoint */
fae3c158 1151 list_for_each_entry(req, &ep->queue, queue) {
1da177e4
LT
1152 if (&req->req == _req)
1153 break;
1154 }
1155 if (&req->req != _req) {
fae3c158 1156 spin_unlock_irqrestore(&ep->dev->lock, flags);
1da177e4
LT
1157 return -EINVAL;
1158 }
1159
1160 /* queue head may be partially complete. */
1161 if (ep->queue.next == &req->queue) {
1162 if (ep->dma) {
e56e69cc 1163 ep_dbg(ep->dev, "unlink (%s) dma\n", _ep->name);
1da177e4 1164 _req->status = -ECONNRESET;
fae3c158
RR
1165 abort_dma(ep);
1166 if (likely(ep->queue.next == &req->queue)) {
1167 /* NOTE: misreports single-transfer mode*/
1da177e4 1168 req->td->dmacount = 0; /* invalidate */
fae3c158
RR
1169 dma_done(ep, req,
1170 readl(&ep->dma->dmacount),
1da177e4
LT
1171 -ECONNRESET);
1172 }
1173 } else {
e56e69cc 1174 ep_dbg(ep->dev, "unlink (%s) pio\n", _ep->name);
fae3c158 1175 done(ep, req, -ECONNRESET);
1da177e4
LT
1176 }
1177 req = NULL;
1da177e4
LT
1178 }
1179
1180 if (req)
fae3c158 1181 done(ep, req, -ECONNRESET);
1da177e4
LT
1182 ep->stopped = stopped;
1183
1184 if (ep->dma) {
1185 /* turn off dma on inactive queues */
fae3c158
RR
1186 if (list_empty(&ep->queue))
1187 stop_dma(ep->dma);
1da177e4
LT
1188 else if (!ep->stopped) {
1189 /* resume current request, or start new one */
1190 if (req)
fae3c158 1191 writel(dmactl, &ep->dma->dmactl);
1da177e4 1192 else
fae3c158 1193 start_dma(ep, list_entry(ep->queue.next,
1da177e4
LT
1194 struct net2280_request, queue));
1195 }
1196 }
1197
fae3c158 1198 spin_unlock_irqrestore(&ep->dev->lock, flags);
1da177e4
LT
1199 return 0;
1200}
1201
1202/*-------------------------------------------------------------------------*/
1203
fae3c158 1204static int net2280_fifo_status(struct usb_ep *_ep);
1da177e4
LT
1205
1206static int
8066134f 1207net2280_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
1da177e4
LT
1208{
1209 struct net2280_ep *ep;
1210 unsigned long flags;
1211 int retval = 0;
1212
fae3c158 1213 ep = container_of(_ep, struct net2280_ep, ep);
1da177e4
LT
1214 if (!_ep || (!ep->desc && ep->num != 0))
1215 return -EINVAL;
1216 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1217 return -ESHUTDOWN;
1218 if (ep->desc /* not ep0 */ && (ep->desc->bmAttributes & 0x03)
1219 == USB_ENDPOINT_XFER_ISOC)
1220 return -EINVAL;
1221
fae3c158
RR
1222 spin_lock_irqsave(&ep->dev->lock, flags);
1223 if (!list_empty(&ep->queue))
1da177e4 1224 retval = -EAGAIN;
fae3c158 1225 else if (ep->is_in && value && net2280_fifo_status(_ep) != 0)
1da177e4
LT
1226 retval = -EAGAIN;
1227 else {
e56e69cc 1228 ep_vdbg(ep->dev, "%s %s %s\n", _ep->name,
8066134f
AS
1229 value ? "set" : "clear",
1230 wedged ? "wedge" : "halt");
1da177e4
LT
1231 /* set/clear, then synch memory views with the device */
1232 if (value) {
1233 if (ep->num == 0)
1234 ep->dev->protocol_stall = 1;
1235 else
fae3c158 1236 set_halt(ep);
8066134f
AS
1237 if (wedged)
1238 ep->wedged = 1;
1239 } else {
fae3c158 1240 clear_halt(ep);
2eeb0016 1241 if (ep->dev->quirks & PLX_SUPERSPEED &&
adc82f77
RR
1242 !list_empty(&ep->queue) && ep->td_dma)
1243 restart_dma(ep);
8066134f
AS
1244 ep->wedged = 0;
1245 }
fae3c158 1246 (void) readl(&ep->regs->ep_rsp);
1da177e4 1247 }
fae3c158 1248 spin_unlock_irqrestore(&ep->dev->lock, flags);
1da177e4
LT
1249
1250 return retval;
1251}
1252
fae3c158 1253static int net2280_set_halt(struct usb_ep *_ep, int value)
8066134f
AS
1254{
1255 return net2280_set_halt_and_wedge(_ep, value, 0);
1256}
1257
fae3c158 1258static int net2280_set_wedge(struct usb_ep *_ep)
8066134f
AS
1259{
1260 if (!_ep || _ep->name == ep0name)
1261 return -EINVAL;
1262 return net2280_set_halt_and_wedge(_ep, 1, 1);
1263}
1264
fae3c158 1265static int net2280_fifo_status(struct usb_ep *_ep)
1da177e4
LT
1266{
1267 struct net2280_ep *ep;
1268 u32 avail;
1269
fae3c158 1270 ep = container_of(_ep, struct net2280_ep, ep);
1da177e4
LT
1271 if (!_ep || (!ep->desc && ep->num != 0))
1272 return -ENODEV;
1273 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1274 return -ESHUTDOWN;
1275
3e76fdcb 1276 avail = readl(&ep->regs->ep_avail) & (BIT(12) - 1);
1da177e4
LT
1277 if (avail > ep->fifo_size)
1278 return -EOVERFLOW;
1279 if (ep->is_in)
1280 avail = ep->fifo_size - avail;
1281 return avail;
1282}
1283
fae3c158 1284static void net2280_fifo_flush(struct usb_ep *_ep)
1da177e4
LT
1285{
1286 struct net2280_ep *ep;
1287
fae3c158 1288 ep = container_of(_ep, struct net2280_ep, ep);
1da177e4
LT
1289 if (!_ep || (!ep->desc && ep->num != 0))
1290 return;
1291 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1292 return;
1293
3e76fdcb 1294 writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat);
fae3c158 1295 (void) readl(&ep->regs->ep_rsp);
1da177e4
LT
1296}
1297
901b3d75 1298static const struct usb_ep_ops net2280_ep_ops = {
1da177e4
LT
1299 .enable = net2280_enable,
1300 .disable = net2280_disable,
1301
1302 .alloc_request = net2280_alloc_request,
1303 .free_request = net2280_free_request,
1304
1da177e4
LT
1305 .queue = net2280_queue,
1306 .dequeue = net2280_dequeue,
1307
1308 .set_halt = net2280_set_halt,
8066134f 1309 .set_wedge = net2280_set_wedge,
1da177e4
LT
1310 .fifo_status = net2280_fifo_status,
1311 .fifo_flush = net2280_fifo_flush,
1312};
1313
1314/*-------------------------------------------------------------------------*/
1315
fae3c158 1316static int net2280_get_frame(struct usb_gadget *_gadget)
1da177e4
LT
1317{
1318 struct net2280 *dev;
1319 unsigned long flags;
1320 u16 retval;
1321
1322 if (!_gadget)
1323 return -ENODEV;
fae3c158
RR
1324 dev = container_of(_gadget, struct net2280, gadget);
1325 spin_lock_irqsave(&dev->lock, flags);
1326 retval = get_idx_reg(dev->regs, REG_FRAME) & 0x03ff;
1327 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4
LT
1328 return retval;
1329}
1330
fae3c158 1331static int net2280_wakeup(struct usb_gadget *_gadget)
1da177e4
LT
1332{
1333 struct net2280 *dev;
1334 u32 tmp;
1335 unsigned long flags;
1336
1337 if (!_gadget)
1338 return 0;
fae3c158 1339 dev = container_of(_gadget, struct net2280, gadget);
1da177e4 1340
fae3c158
RR
1341 spin_lock_irqsave(&dev->lock, flags);
1342 tmp = readl(&dev->usb->usbctl);
3e76fdcb
RR
1343 if (tmp & BIT(DEVICE_REMOTE_WAKEUP_ENABLE))
1344 writel(BIT(GENERATE_RESUME), &dev->usb->usbstat);
fae3c158 1345 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4
LT
1346
1347 /* pci writes may still be posted */
1348 return 0;
1349}
1350
fae3c158 1351static int net2280_set_selfpowered(struct usb_gadget *_gadget, int value)
1da177e4
LT
1352{
1353 struct net2280 *dev;
1354 u32 tmp;
1355 unsigned long flags;
1356
1357 if (!_gadget)
1358 return 0;
fae3c158 1359 dev = container_of(_gadget, struct net2280, gadget);
1da177e4 1360
fae3c158
RR
1361 spin_lock_irqsave(&dev->lock, flags);
1362 tmp = readl(&dev->usb->usbctl);
adc82f77 1363 if (value) {
3e76fdcb 1364 tmp |= BIT(SELF_POWERED_STATUS);
adc82f77
RR
1365 dev->selfpowered = 1;
1366 } else {
3e76fdcb 1367 tmp &= ~BIT(SELF_POWERED_STATUS);
adc82f77
RR
1368 dev->selfpowered = 0;
1369 }
fae3c158
RR
1370 writel(tmp, &dev->usb->usbctl);
1371 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4
LT
1372
1373 return 0;
1374}
1375
1376static int net2280_pullup(struct usb_gadget *_gadget, int is_on)
1377{
1378 struct net2280 *dev;
1379 u32 tmp;
1380 unsigned long flags;
1381
1382 if (!_gadget)
1383 return -ENODEV;
fae3c158 1384 dev = container_of(_gadget, struct net2280, gadget);
1da177e4 1385
fae3c158
RR
1386 spin_lock_irqsave(&dev->lock, flags);
1387 tmp = readl(&dev->usb->usbctl);
1da177e4
LT
1388 dev->softconnect = (is_on != 0);
1389 if (is_on)
3e76fdcb 1390 tmp |= BIT(USB_DETECT_ENABLE);
1da177e4 1391 else
3e76fdcb 1392 tmp &= ~BIT(USB_DETECT_ENABLE);
fae3c158
RR
1393 writel(tmp, &dev->usb->usbctl);
1394 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4
LT
1395
1396 return 0;
1397}
1398
4cf5e00b
FB
1399static int net2280_start(struct usb_gadget *_gadget,
1400 struct usb_gadget_driver *driver);
22835b80 1401static int net2280_stop(struct usb_gadget *_gadget);
0f91349b 1402
1da177e4
LT
1403static const struct usb_gadget_ops net2280_ops = {
1404 .get_frame = net2280_get_frame,
1405 .wakeup = net2280_wakeup,
1406 .set_selfpowered = net2280_set_selfpowered,
1407 .pullup = net2280_pullup,
4cf5e00b
FB
1408 .udc_start = net2280_start,
1409 .udc_stop = net2280_stop,
1da177e4
LT
1410};
1411
1412/*-------------------------------------------------------------------------*/
1413
b99b406c 1414#ifdef CONFIG_USB_GADGET_DEBUG_FILES
1da177e4
LT
1415
1416/* FIXME move these into procfs, and use seq_file.
1417 * Sysfs _still_ doesn't behave for arbitrarily sized files,
1418 * and also doesn't help products using this with 2.4 kernels.
1419 */
1420
1421/* "function" sysfs attribute */
ce26bd23
GKH
1422static ssize_t function_show(struct device *_dev, struct device_attribute *attr,
1423 char *buf)
1da177e4 1424{
fae3c158 1425 struct net2280 *dev = dev_get_drvdata(_dev);
1da177e4 1426
fae3c158
RR
1427 if (!dev->driver || !dev->driver->function ||
1428 strlen(dev->driver->function) > PAGE_SIZE)
1da177e4 1429 return 0;
fae3c158 1430 return scnprintf(buf, PAGE_SIZE, "%s\n", dev->driver->function);
1da177e4 1431}
ce26bd23 1432static DEVICE_ATTR_RO(function);
1da177e4 1433
ce26bd23
GKH
1434static ssize_t registers_show(struct device *_dev,
1435 struct device_attribute *attr, char *buf)
1da177e4
LT
1436{
1437 struct net2280 *dev;
1438 char *next;
1439 unsigned size, t;
1440 unsigned long flags;
1441 int i;
1442 u32 t1, t2;
30e69598 1443 const char *s;
1da177e4 1444
fae3c158 1445 dev = dev_get_drvdata(_dev);
1da177e4
LT
1446 next = buf;
1447 size = PAGE_SIZE;
fae3c158 1448 spin_lock_irqsave(&dev->lock, flags);
1da177e4
LT
1449
1450 if (dev->driver)
1451 s = dev->driver->driver.name;
1452 else
1453 s = "(none)";
1454
1455 /* Main Control Registers */
fae3c158 1456 t = scnprintf(next, size, "%s version " DRIVER_VERSION
d588ff58 1457 ", chiprev %04x\n\n"
1da177e4
LT
1458 "devinit %03x fifoctl %08x gadget '%s'\n"
1459 "pci irqenb0 %02x irqenb1 %08x "
1460 "irqstat0 %04x irqstat1 %08x\n",
1461 driver_name, dev->chiprev,
fae3c158
RR
1462 readl(&dev->regs->devinit),
1463 readl(&dev->regs->fifoctl),
1da177e4 1464 s,
fae3c158
RR
1465 readl(&dev->regs->pciirqenb0),
1466 readl(&dev->regs->pciirqenb1),
1467 readl(&dev->regs->irqstat0),
1468 readl(&dev->regs->irqstat1));
1da177e4
LT
1469 size -= t;
1470 next += t;
1471
1472 /* USB Control Registers */
fae3c158
RR
1473 t1 = readl(&dev->usb->usbctl);
1474 t2 = readl(&dev->usb->usbstat);
3e76fdcb
RR
1475 if (t1 & BIT(VBUS_PIN)) {
1476 if (t2 & BIT(HIGH_SPEED))
1da177e4
LT
1477 s = "high speed";
1478 else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1479 s = "powered";
1480 else
1481 s = "full speed";
1482 /* full speed bit (6) not working?? */
1483 } else
1484 s = "not attached";
fae3c158 1485 t = scnprintf(next, size,
1da177e4
LT
1486 "stdrsp %08x usbctl %08x usbstat %08x "
1487 "addr 0x%02x (%s)\n",
fae3c158
RR
1488 readl(&dev->usb->stdrsp), t1, t2,
1489 readl(&dev->usb->ouraddr), s);
1da177e4
LT
1490 size -= t;
1491 next += t;
1492
1493 /* PCI Master Control Registers */
1494
1495 /* DMA Control Registers */
1496
1497 /* Configurable EP Control Registers */
adc82f77 1498 for (i = 0; i < dev->n_ep; i++) {
1da177e4
LT
1499 struct net2280_ep *ep;
1500
fae3c158 1501 ep = &dev->ep[i];
1da177e4
LT
1502 if (i && !ep->desc)
1503 continue;
1504
adc82f77 1505 t1 = readl(&ep->cfg->ep_cfg);
fae3c158
RR
1506 t2 = readl(&ep->regs->ep_rsp) & 0xff;
1507 t = scnprintf(next, size,
1da177e4
LT
1508 "\n%s\tcfg %05x rsp (%02x) %s%s%s%s%s%s%s%s"
1509 "irqenb %02x\n",
1510 ep->ep.name, t1, t2,
3e76fdcb 1511 (t2 & BIT(CLEAR_NAK_OUT_PACKETS))
1da177e4 1512 ? "NAK " : "",
3e76fdcb 1513 (t2 & BIT(CLEAR_EP_HIDE_STATUS_PHASE))
1da177e4 1514 ? "hide " : "",
3e76fdcb 1515 (t2 & BIT(CLEAR_EP_FORCE_CRC_ERROR))
1da177e4 1516 ? "CRC " : "",
3e76fdcb 1517 (t2 & BIT(CLEAR_INTERRUPT_MODE))
1da177e4 1518 ? "interrupt " : "",
3e76fdcb 1519 (t2 & BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE))
1da177e4 1520 ? "status " : "",
3e76fdcb 1521 (t2 & BIT(CLEAR_NAK_OUT_PACKETS_MODE))
1da177e4 1522 ? "NAKmode " : "",
3e76fdcb 1523 (t2 & BIT(CLEAR_ENDPOINT_TOGGLE))
1da177e4 1524 ? "DATA1 " : "DATA0 ",
3e76fdcb 1525 (t2 & BIT(CLEAR_ENDPOINT_HALT))
1da177e4 1526 ? "HALT " : "",
fae3c158 1527 readl(&ep->regs->ep_irqenb));
1da177e4
LT
1528 size -= t;
1529 next += t;
1530
fae3c158 1531 t = scnprintf(next, size,
1da177e4
LT
1532 "\tstat %08x avail %04x "
1533 "(ep%d%s-%s)%s\n",
fae3c158
RR
1534 readl(&ep->regs->ep_stat),
1535 readl(&ep->regs->ep_avail),
1536 t1 & 0x0f, DIR_STRING(t1),
1537 type_string(t1 >> 8),
1da177e4
LT
1538 ep->stopped ? "*" : "");
1539 size -= t;
1540 next += t;
1541
1542 if (!ep->dma)
1543 continue;
1544
fae3c158 1545 t = scnprintf(next, size,
1da177e4
LT
1546 " dma\tctl %08x stat %08x count %08x\n"
1547 "\taddr %08x desc %08x\n",
fae3c158
RR
1548 readl(&ep->dma->dmactl),
1549 readl(&ep->dma->dmastat),
1550 readl(&ep->dma->dmacount),
1551 readl(&ep->dma->dmaaddr),
1552 readl(&ep->dma->dmadesc));
1da177e4
LT
1553 size -= t;
1554 next += t;
1555
1556 }
1557
fae3c158 1558 /* Indexed Registers (none yet) */
1da177e4
LT
1559
1560 /* Statistics */
fae3c158 1561 t = scnprintf(next, size, "\nirqs: ");
1da177e4
LT
1562 size -= t;
1563 next += t;
adc82f77 1564 for (i = 0; i < dev->n_ep; i++) {
1da177e4
LT
1565 struct net2280_ep *ep;
1566
fae3c158 1567 ep = &dev->ep[i];
1da177e4
LT
1568 if (i && !ep->irqs)
1569 continue;
fae3c158 1570 t = scnprintf(next, size, " %s/%lu", ep->ep.name, ep->irqs);
1da177e4
LT
1571 size -= t;
1572 next += t;
1573
1574 }
fae3c158 1575 t = scnprintf(next, size, "\n");
1da177e4
LT
1576 size -= t;
1577 next += t;
1578
fae3c158 1579 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4
LT
1580
1581 return PAGE_SIZE - size;
1582}
ce26bd23 1583static DEVICE_ATTR_RO(registers);
1da177e4 1584
ce26bd23
GKH
1585static ssize_t queues_show(struct device *_dev, struct device_attribute *attr,
1586 char *buf)
1da177e4
LT
1587{
1588 struct net2280 *dev;
1589 char *next;
1590 unsigned size;
1591 unsigned long flags;
1592 int i;
1593
fae3c158 1594 dev = dev_get_drvdata(_dev);
1da177e4
LT
1595 next = buf;
1596 size = PAGE_SIZE;
fae3c158 1597 spin_lock_irqsave(&dev->lock, flags);
1da177e4 1598
adc82f77 1599 for (i = 0; i < dev->n_ep; i++) {
fae3c158 1600 struct net2280_ep *ep = &dev->ep[i];
1da177e4
LT
1601 struct net2280_request *req;
1602 int t;
1603
1604 if (i != 0) {
1605 const struct usb_endpoint_descriptor *d;
1606
1607 d = ep->desc;
1608 if (!d)
1609 continue;
1610 t = d->bEndpointAddress;
fae3c158 1611 t = scnprintf(next, size,
1da177e4
LT
1612 "\n%s (ep%d%s-%s) max %04x %s fifo %d\n",
1613 ep->ep.name, t & USB_ENDPOINT_NUMBER_MASK,
1614 (t & USB_DIR_IN) ? "in" : "out",
a27f37a1 1615 type_string(d->bmAttributes),
fae3c158 1616 usb_endpoint_maxp(d) & 0x1fff,
1da177e4
LT
1617 ep->dma ? "dma" : "pio", ep->fifo_size
1618 );
1619 } else /* ep0 should only have one transfer queued */
fae3c158 1620 t = scnprintf(next, size, "ep0 max 64 pio %s\n",
1da177e4
LT
1621 ep->is_in ? "in" : "out");
1622 if (t <= 0 || t > size)
1623 goto done;
1624 size -= t;
1625 next += t;
1626
fae3c158
RR
1627 if (list_empty(&ep->queue)) {
1628 t = scnprintf(next, size, "\t(nothing queued)\n");
1da177e4
LT
1629 if (t <= 0 || t > size)
1630 goto done;
1631 size -= t;
1632 next += t;
1633 continue;
1634 }
fae3c158
RR
1635 list_for_each_entry(req, &ep->queue, queue) {
1636 if (ep->dma && req->td_dma == readl(&ep->dma->dmadesc))
1637 t = scnprintf(next, size,
1da177e4
LT
1638 "\treq %p len %d/%d "
1639 "buf %p (dmacount %08x)\n",
1640 &req->req, req->req.actual,
1641 req->req.length, req->req.buf,
fae3c158 1642 readl(&ep->dma->dmacount));
1da177e4 1643 else
fae3c158 1644 t = scnprintf(next, size,
1da177e4
LT
1645 "\treq %p len %d/%d buf %p\n",
1646 &req->req, req->req.actual,
1647 req->req.length, req->req.buf);
1648 if (t <= 0 || t > size)
1649 goto done;
1650 size -= t;
1651 next += t;
1652
1653 if (ep->dma) {
1654 struct net2280_dma *td;
1655
1656 td = req->td;
fae3c158 1657 t = scnprintf(next, size, "\t td %08x "
1da177e4
LT
1658 " count %08x buf %08x desc %08x\n",
1659 (u32) req->td_dma,
fae3c158
RR
1660 le32_to_cpu(td->dmacount),
1661 le32_to_cpu(td->dmaaddr),
1662 le32_to_cpu(td->dmadesc));
1da177e4
LT
1663 if (t <= 0 || t > size)
1664 goto done;
1665 size -= t;
1666 next += t;
1667 }
1668 }
1669 }
1670
1671done:
fae3c158 1672 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4
LT
1673 return PAGE_SIZE - size;
1674}
ce26bd23 1675static DEVICE_ATTR_RO(queues);
1da177e4
LT
1676
1677
1678#else
1679
fae3c158
RR
1680#define device_create_file(a, b) (0)
1681#define device_remove_file(a, b) do { } while (0)
1da177e4
LT
1682
1683#endif
1684
1685/*-------------------------------------------------------------------------*/
1686
1687/* another driver-specific mode might be a request type doing dma
1688 * to/from another device fifo instead of to/from memory.
1689 */
1690
fae3c158 1691static void set_fifo_mode(struct net2280 *dev, int mode)
1da177e4
LT
1692{
1693 /* keeping high bits preserves BAR2 */
fae3c158 1694 writel((0xffff << PCI_BASE2_RANGE) | mode, &dev->regs->fifoctl);
1da177e4
LT
1695
1696 /* always ep-{a,b,e,f} ... maybe not ep-c or ep-d */
fae3c158
RR
1697 INIT_LIST_HEAD(&dev->gadget.ep_list);
1698 list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list);
1699 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1da177e4
LT
1700 switch (mode) {
1701 case 0:
fae3c158
RR
1702 list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
1703 list_add_tail(&dev->ep[4].ep.ep_list, &dev->gadget.ep_list);
1704 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024;
1da177e4
LT
1705 break;
1706 case 1:
fae3c158 1707 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 2048;
1da177e4
LT
1708 break;
1709 case 2:
fae3c158
RR
1710 list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
1711 dev->ep[1].fifo_size = 2048;
1712 dev->ep[2].fifo_size = 1024;
1da177e4
LT
1713 break;
1714 }
1715 /* fifo sizes for ep0, ep-c, ep-d, ep-e, and ep-f never change */
fae3c158
RR
1716 list_add_tail(&dev->ep[5].ep.ep_list, &dev->gadget.ep_list);
1717 list_add_tail(&dev->ep[6].ep.ep_list, &dev->gadget.ep_list);
1da177e4
LT
1718}
1719
adc82f77
RR
1720static void defect7374_disable_data_eps(struct net2280 *dev)
1721{
1722 /*
1723 * For Defect 7374, disable data EPs (and more):
1724 * - This phase undoes the earlier phase of the Defect 7374 workaround,
1725 * returing ep regs back to normal.
1726 */
1727 struct net2280_ep *ep;
1728 int i;
1729 unsigned char ep_sel;
1730 u32 tmp_reg;
1731
1732 for (i = 1; i < 5; i++) {
1733 ep = &dev->ep[i];
1734 writel(0, &ep->cfg->ep_cfg);
1735 }
1736
1737 /* CSROUT, CSRIN, PCIOUT, PCIIN, STATIN, RCIN */
1738 for (i = 0; i < 6; i++)
1739 writel(0, &dev->dep[i].dep_cfg);
1740
1741 for (ep_sel = 0; ep_sel <= 21; ep_sel++) {
1742 /* Select an endpoint for subsequent operations: */
1743 tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
1744 writel(((tmp_reg & ~0x1f) | ep_sel), &dev->plregs->pl_ep_ctrl);
1745
1746 if (ep_sel < 2 || (ep_sel > 9 && ep_sel < 14) ||
1747 ep_sel == 18 || ep_sel == 20)
1748 continue;
1749
1750 /* Change settings on some selected endpoints */
1751 tmp_reg = readl(&dev->plregs->pl_ep_cfg_4);
3e76fdcb 1752 tmp_reg &= ~BIT(NON_CTRL_IN_TOLERATE_BAD_DIR);
adc82f77
RR
1753 writel(tmp_reg, &dev->plregs->pl_ep_cfg_4);
1754 tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
3e76fdcb 1755 tmp_reg |= BIT(EP_INITIALIZED);
adc82f77
RR
1756 writel(tmp_reg, &dev->plregs->pl_ep_ctrl);
1757 }
1758}
1759
1760static void defect7374_enable_data_eps_zero(struct net2280 *dev)
1761{
1762 u32 tmp = 0, tmp_reg;
5517525e 1763 u32 scratch;
adc82f77
RR
1764 int i;
1765 unsigned char ep_sel;
1766
1767 scratch = get_idx_reg(dev->regs, SCRATCH);
5517525e
RR
1768
1769 WARN_ON((scratch & (0xf << DEFECT7374_FSM_FIELD))
1770 == DEFECT7374_FSM_SS_CONTROL_READ);
1771
adc82f77
RR
1772 scratch &= ~(0xf << DEFECT7374_FSM_FIELD);
1773
5517525e
RR
1774 ep_warn(dev, "Operate Defect 7374 workaround soft this time");
1775 ep_warn(dev, "It will operate on cold-reboot and SS connect");
adc82f77 1776
5517525e
RR
1777 /*GPEPs:*/
1778 tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_DIRECTION) |
1779 (2 << OUT_ENDPOINT_TYPE) | (2 << IN_ENDPOINT_TYPE) |
1780 ((dev->enhanced_mode) ?
1781 BIT(OUT_ENDPOINT_ENABLE) : BIT(ENDPOINT_ENABLE)) |
1782 BIT(IN_ENDPOINT_ENABLE));
adc82f77 1783
5517525e
RR
1784 for (i = 1; i < 5; i++)
1785 writel(tmp, &dev->ep[i].cfg->ep_cfg);
adc82f77 1786
5517525e
RR
1787 /* CSRIN, PCIIN, STATIN, RCIN*/
1788 tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_ENABLE));
1789 writel(tmp, &dev->dep[1].dep_cfg);
1790 writel(tmp, &dev->dep[3].dep_cfg);
1791 writel(tmp, &dev->dep[4].dep_cfg);
1792 writel(tmp, &dev->dep[5].dep_cfg);
adc82f77 1793
5517525e
RR
1794 /*Implemented for development and debug.
1795 * Can be refined/tuned later.*/
1796 for (ep_sel = 0; ep_sel <= 21; ep_sel++) {
1797 /* Select an endpoint for subsequent operations: */
1798 tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
1799 writel(((tmp_reg & ~0x1f) | ep_sel),
1800 &dev->plregs->pl_ep_ctrl);
1801
1802 if (ep_sel == 1) {
1803 tmp =
1804 (readl(&dev->plregs->pl_ep_ctrl) |
1805 BIT(CLEAR_ACK_ERROR_CODE) | 0);
1806 writel(tmp, &dev->plregs->pl_ep_ctrl);
1807 continue;
adc82f77
RR
1808 }
1809
5517525e
RR
1810 if (ep_sel == 0 || (ep_sel > 9 && ep_sel < 14) ||
1811 ep_sel == 18 || ep_sel == 20)
1812 continue;
1813
1814 tmp = (readl(&dev->plregs->pl_ep_cfg_4) |
1815 BIT(NON_CTRL_IN_TOLERATE_BAD_DIR) | 0);
1816 writel(tmp, &dev->plregs->pl_ep_cfg_4);
1817
1818 tmp = readl(&dev->plregs->pl_ep_ctrl) &
1819 ~BIT(EP_INITIALIZED);
1820 writel(tmp, &dev->plregs->pl_ep_ctrl);
adc82f77 1821
adc82f77 1822 }
5517525e
RR
1823
1824 /* Set FSM to focus on the first Control Read:
1825 * - Tip: Connection speed is known upon the first
1826 * setup request.*/
1827 scratch |= DEFECT7374_FSM_WAITING_FOR_CONTROL_READ;
1828 set_idx_reg(dev->regs, SCRATCH, scratch);
1829
adc82f77
RR
1830}
1831
1da177e4
LT
1832/* keeping it simple:
1833 * - one bus driver, initted first;
1834 * - one function driver, initted second
1835 *
1836 * most of the work to support multiple net2280 controllers would
1837 * be to associate this gadget driver (yes?) with all of them, or
1838 * perhaps to bind specific drivers to specific devices.
1839 */
1840
adc82f77 1841static void usb_reset_228x(struct net2280 *dev)
1da177e4
LT
1842{
1843 u32 tmp;
1844
1845 dev->gadget.speed = USB_SPEED_UNKNOWN;
fae3c158 1846 (void) readl(&dev->usb->usbctl);
1da177e4 1847
fae3c158 1848 net2280_led_init(dev);
1da177e4
LT
1849
1850 /* disable automatic responses, and irqs */
fae3c158
RR
1851 writel(0, &dev->usb->stdrsp);
1852 writel(0, &dev->regs->pciirqenb0);
1853 writel(0, &dev->regs->pciirqenb1);
1da177e4
LT
1854
1855 /* clear old dma and irq state */
1856 for (tmp = 0; tmp < 4; tmp++) {
adc82f77 1857 struct net2280_ep *ep = &dev->ep[tmp + 1];
1da177e4 1858 if (ep->dma)
adc82f77 1859 abort_dma(ep);
1da177e4 1860 }
adc82f77 1861
fae3c158 1862 writel(~0, &dev->regs->irqstat0),
3e76fdcb 1863 writel(~(u32)BIT(SUSPEND_REQUEST_INTERRUPT), &dev->regs->irqstat1),
1da177e4
LT
1864
1865 /* reset, and enable pci */
3e76fdcb
RR
1866 tmp = readl(&dev->regs->devinit) |
1867 BIT(PCI_ENABLE) |
1868 BIT(FIFO_SOFT_RESET) |
1869 BIT(USB_SOFT_RESET) |
1870 BIT(M8051_RESET);
fae3c158 1871 writel(tmp, &dev->regs->devinit);
1da177e4
LT
1872
1873 /* standard fifo and endpoint allocations */
fae3c158 1874 set_fifo_mode(dev, (fifo_mode <= 2) ? fifo_mode : 0);
1da177e4
LT
1875}
1876
adc82f77
RR
1877static void usb_reset_338x(struct net2280 *dev)
1878{
1879 u32 tmp;
adc82f77
RR
1880
1881 dev->gadget.speed = USB_SPEED_UNKNOWN;
1882 (void)readl(&dev->usb->usbctl);
1883
1884 net2280_led_init(dev);
1885
5517525e 1886 if (dev->bug7734_patched) {
adc82f77
RR
1887 /* disable automatic responses, and irqs */
1888 writel(0, &dev->usb->stdrsp);
1889 writel(0, &dev->regs->pciirqenb0);
1890 writel(0, &dev->regs->pciirqenb1);
1891 }
1892
1893 /* clear old dma and irq state */
1894 for (tmp = 0; tmp < 4; tmp++) {
1895 struct net2280_ep *ep = &dev->ep[tmp + 1];
1896
1897 if (ep->dma)
1898 abort_dma(ep);
1899 }
1900
1901 writel(~0, &dev->regs->irqstat0), writel(~0, &dev->regs->irqstat1);
1902
5517525e 1903 if (dev->bug7734_patched) {
adc82f77
RR
1904 /* reset, and enable pci */
1905 tmp = readl(&dev->regs->devinit) |
3e76fdcb
RR
1906 BIT(PCI_ENABLE) |
1907 BIT(FIFO_SOFT_RESET) |
1908 BIT(USB_SOFT_RESET) |
1909 BIT(M8051_RESET);
adc82f77
RR
1910
1911 writel(tmp, &dev->regs->devinit);
1912 }
1913
1914 /* always ep-{1,2,3,4} ... maybe not ep-3 or ep-4 */
1915 INIT_LIST_HEAD(&dev->gadget.ep_list);
1916
1917 for (tmp = 1; tmp < dev->n_ep; tmp++)
1918 list_add_tail(&dev->ep[tmp].ep.ep_list, &dev->gadget.ep_list);
1919
1920}
1921
1922static void usb_reset(struct net2280 *dev)
1923{
2eeb0016 1924 if (dev->quirks & PLX_LEGACY)
adc82f77
RR
1925 return usb_reset_228x(dev);
1926 return usb_reset_338x(dev);
1927}
1928
1929static void usb_reinit_228x(struct net2280 *dev)
1da177e4
LT
1930{
1931 u32 tmp;
1da177e4
LT
1932
1933 /* basic endpoint init */
1934 for (tmp = 0; tmp < 7; tmp++) {
fae3c158 1935 struct net2280_ep *ep = &dev->ep[tmp];
1da177e4 1936
fae3c158 1937 ep->ep.name = ep_name[tmp];
1da177e4
LT
1938 ep->dev = dev;
1939 ep->num = tmp;
1940
1941 if (tmp > 0 && tmp <= 4) {
1942 ep->fifo_size = 1024;
d588ff58 1943 ep->dma = &dev->dma[tmp - 1];
1da177e4
LT
1944 } else
1945 ep->fifo_size = 64;
fae3c158 1946 ep->regs = &dev->epregs[tmp];
adc82f77
RR
1947 ep->cfg = &dev->epregs[tmp];
1948 ep_reset_228x(dev->regs, ep);
1da177e4 1949 }
fae3c158
RR
1950 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64);
1951 usb_ep_set_maxpacket_limit(&dev->ep[5].ep, 64);
1952 usb_ep_set_maxpacket_limit(&dev->ep[6].ep, 64);
1da177e4 1953
fae3c158
RR
1954 dev->gadget.ep0 = &dev->ep[0].ep;
1955 dev->ep[0].stopped = 0;
1956 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1da177e4
LT
1957
1958 /* we want to prevent lowlevel/insecure access from the USB host,
1959 * but erratum 0119 means this enable bit is ignored
1960 */
1961 for (tmp = 0; tmp < 5; tmp++)
fae3c158 1962 writel(EP_DONTUSE, &dev->dep[tmp].dep_cfg);
1da177e4
LT
1963}
1964
adc82f77
RR
1965static void usb_reinit_338x(struct net2280 *dev)
1966{
adc82f77
RR
1967 int i;
1968 u32 tmp, val;
adc82f77
RR
1969 static const u32 ne[9] = { 0, 1, 2, 3, 4, 1, 2, 3, 4 };
1970 static const u32 ep_reg_addr[9] = { 0x00, 0xC0, 0x00, 0xC0, 0x00,
1971 0x00, 0xC0, 0x00, 0xC0 };
1972
adc82f77
RR
1973 /* basic endpoint init */
1974 for (i = 0; i < dev->n_ep; i++) {
1975 struct net2280_ep *ep = &dev->ep[i];
1976
1977 ep->ep.name = ep_name[i];
1978 ep->dev = dev;
1979 ep->num = i;
1980
d588ff58 1981 if (i > 0 && i <= 4)
adc82f77
RR
1982 ep->dma = &dev->dma[i - 1];
1983
1984 if (dev->enhanced_mode) {
1985 ep->cfg = &dev->epregs[ne[i]];
1986 ep->regs = (struct net2280_ep_regs __iomem *)
c43e97b2 1987 (((void __iomem *)&dev->epregs[ne[i]]) +
adc82f77
RR
1988 ep_reg_addr[i]);
1989 ep->fiforegs = &dev->fiforegs[i];
1990 } else {
1991 ep->cfg = &dev->epregs[i];
1992 ep->regs = &dev->epregs[i];
1993 ep->fiforegs = &dev->fiforegs[i];
1994 }
1995
1996 ep->fifo_size = (i != 0) ? 2048 : 512;
1997
1998 ep_reset_338x(dev->regs, ep);
1999 }
2000 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 512);
2001
2002 dev->gadget.ep0 = &dev->ep[0].ep;
2003 dev->ep[0].stopped = 0;
2004
2005 /* Link layer set up */
5517525e 2006 if (dev->bug7734_patched) {
adc82f77 2007 tmp = readl(&dev->usb_ext->usbctl2) &
3e76fdcb 2008 ~(BIT(U1_ENABLE) | BIT(U2_ENABLE) | BIT(LTM_ENABLE));
adc82f77
RR
2009 writel(tmp, &dev->usb_ext->usbctl2);
2010 }
2011
2012 /* Hardware Defect and Workaround */
2013 val = readl(&dev->ll_lfps_regs->ll_lfps_5);
2014 val &= ~(0xf << TIMER_LFPS_6US);
2015 val |= 0x5 << TIMER_LFPS_6US;
2016 writel(val, &dev->ll_lfps_regs->ll_lfps_5);
2017
2018 val = readl(&dev->ll_lfps_regs->ll_lfps_6);
2019 val &= ~(0xffff << TIMER_LFPS_80US);
2020 val |= 0x0100 << TIMER_LFPS_80US;
2021 writel(val, &dev->ll_lfps_regs->ll_lfps_6);
2022
2023 /*
2024 * AA_AB Errata. Issue 4. Workaround for SuperSpeed USB
2025 * Hot Reset Exit Handshake may Fail in Specific Case using
2026 * Default Register Settings. Workaround for Enumeration test.
2027 */
2028 val = readl(&dev->ll_tsn_regs->ll_tsn_counters_2);
2029 val &= ~(0x1f << HOT_TX_NORESET_TS2);
2030 val |= 0x10 << HOT_TX_NORESET_TS2;
2031 writel(val, &dev->ll_tsn_regs->ll_tsn_counters_2);
2032
2033 val = readl(&dev->ll_tsn_regs->ll_tsn_counters_3);
2034 val &= ~(0x1f << HOT_RX_RESET_TS2);
2035 val |= 0x3 << HOT_RX_RESET_TS2;
2036 writel(val, &dev->ll_tsn_regs->ll_tsn_counters_3);
2037
2038 /*
2039 * Set Recovery Idle to Recover bit:
2040 * - On SS connections, setting Recovery Idle to Recover Fmw improves
2041 * link robustness with various hosts and hubs.
2042 * - It is safe to set for all connection speeds; all chip revisions.
2043 * - R-M-W to leave other bits undisturbed.
2044 * - Reference PLX TT-7372
2045 */
2046 val = readl(&dev->ll_chicken_reg->ll_tsn_chicken_bit);
3e76fdcb 2047 val |= BIT(RECOVERY_IDLE_TO_RECOVER_FMW);
adc82f77
RR
2048 writel(val, &dev->ll_chicken_reg->ll_tsn_chicken_bit);
2049
2050 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
2051
2052 /* disable dedicated endpoints */
2053 writel(0x0D, &dev->dep[0].dep_cfg);
2054 writel(0x0D, &dev->dep[1].dep_cfg);
2055 writel(0x0E, &dev->dep[2].dep_cfg);
2056 writel(0x0E, &dev->dep[3].dep_cfg);
2057 writel(0x0F, &dev->dep[4].dep_cfg);
2058 writel(0x0C, &dev->dep[5].dep_cfg);
2059}
2060
2061static void usb_reinit(struct net2280 *dev)
2062{
2eeb0016 2063 if (dev->quirks & PLX_LEGACY)
adc82f77
RR
2064 return usb_reinit_228x(dev);
2065 return usb_reinit_338x(dev);
2066}
2067
2068static void ep0_start_228x(struct net2280 *dev)
1da177e4 2069{
3e76fdcb
RR
2070 writel(BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
2071 BIT(CLEAR_NAK_OUT_PACKETS) |
ae8e530a
RR
2072 BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE),
2073 &dev->epregs[0].ep_rsp);
1da177e4
LT
2074
2075 /*
2076 * hardware optionally handles a bunch of standard requests
2077 * that the API hides from drivers anyway. have it do so.
2078 * endpoint status/features are handled in software, to
2079 * help pass tests for some dubious behavior.
2080 */
3e76fdcb
RR
2081 writel(BIT(SET_TEST_MODE) |
2082 BIT(SET_ADDRESS) |
2083 BIT(DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP) |
2084 BIT(GET_DEVICE_STATUS) |
ae8e530a
RR
2085 BIT(GET_INTERFACE_STATUS),
2086 &dev->usb->stdrsp);
3e76fdcb
RR
2087 writel(BIT(USB_ROOT_PORT_WAKEUP_ENABLE) |
2088 BIT(SELF_POWERED_USB_DEVICE) |
2089 BIT(REMOTE_WAKEUP_SUPPORT) |
2090 (dev->softconnect << USB_DETECT_ENABLE) |
2091 BIT(SELF_POWERED_STATUS),
2092 &dev->usb->usbctl);
1da177e4
LT
2093
2094 /* enable irqs so we can see ep0 and general operation */
3e76fdcb
RR
2095 writel(BIT(SETUP_PACKET_INTERRUPT_ENABLE) |
2096 BIT(ENDPOINT_0_INTERRUPT_ENABLE),
2097 &dev->regs->pciirqenb0);
2098 writel(BIT(PCI_INTERRUPT_ENABLE) |
2099 BIT(PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE) |
2100 BIT(PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE) |
2101 BIT(PCI_RETRY_ABORT_INTERRUPT_ENABLE) |
2102 BIT(VBUS_INTERRUPT_ENABLE) |
2103 BIT(ROOT_PORT_RESET_INTERRUPT_ENABLE) |
2104 BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE),
2105 &dev->regs->pciirqenb1);
1da177e4
LT
2106
2107 /* don't leave any writes posted */
fae3c158 2108 (void) readl(&dev->usb->usbctl);
1da177e4
LT
2109}
2110
adc82f77
RR
2111static void ep0_start_338x(struct net2280 *dev)
2112{
adc82f77 2113
5517525e 2114 if (dev->bug7734_patched)
3e76fdcb
RR
2115 writel(BIT(CLEAR_NAK_OUT_PACKETS_MODE) |
2116 BIT(SET_EP_HIDE_STATUS_PHASE),
adc82f77
RR
2117 &dev->epregs[0].ep_rsp);
2118
2119 /*
2120 * hardware optionally handles a bunch of standard requests
2121 * that the API hides from drivers anyway. have it do so.
2122 * endpoint status/features are handled in software, to
2123 * help pass tests for some dubious behavior.
2124 */
3e76fdcb
RR
2125 writel(BIT(SET_ISOCHRONOUS_DELAY) |
2126 BIT(SET_SEL) |
2127 BIT(SET_TEST_MODE) |
2128 BIT(SET_ADDRESS) |
2129 BIT(GET_INTERFACE_STATUS) |
2130 BIT(GET_DEVICE_STATUS),
adc82f77
RR
2131 &dev->usb->stdrsp);
2132 dev->wakeup_enable = 1;
3e76fdcb 2133 writel(BIT(USB_ROOT_PORT_WAKEUP_ENABLE) |
adc82f77 2134 (dev->softconnect << USB_DETECT_ENABLE) |
3e76fdcb 2135 BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
adc82f77
RR
2136 &dev->usb->usbctl);
2137
2138 /* enable irqs so we can see ep0 and general operation */
3e76fdcb 2139 writel(BIT(SETUP_PACKET_INTERRUPT_ENABLE) |
ae8e530a
RR
2140 BIT(ENDPOINT_0_INTERRUPT_ENABLE),
2141 &dev->regs->pciirqenb0);
3e76fdcb
RR
2142 writel(BIT(PCI_INTERRUPT_ENABLE) |
2143 BIT(ROOT_PORT_RESET_INTERRUPT_ENABLE) |
2144 BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE) |
2145 BIT(VBUS_INTERRUPT_ENABLE),
adc82f77
RR
2146 &dev->regs->pciirqenb1);
2147
2148 /* don't leave any writes posted */
2149 (void)readl(&dev->usb->usbctl);
2150}
2151
2152static void ep0_start(struct net2280 *dev)
2153{
2eeb0016 2154 if (dev->quirks & PLX_LEGACY)
adc82f77
RR
2155 return ep0_start_228x(dev);
2156 return ep0_start_338x(dev);
2157}
2158
1da177e4
LT
2159/* when a driver is successfully registered, it will receive
2160 * control requests including set_configuration(), which enables
2161 * non-control requests. then usb traffic follows until a
2162 * disconnect is reported. then a host may connect again, or
2163 * the driver might get unbound.
2164 */
4cf5e00b
FB
2165static int net2280_start(struct usb_gadget *_gadget,
2166 struct usb_gadget_driver *driver)
1da177e4 2167{
4cf5e00b 2168 struct net2280 *dev;
1da177e4
LT
2169 int retval;
2170 unsigned i;
2171
2172 /* insist on high speed support from the driver, since
2173 * (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE)
2174 * "must not be used in normal operation"
2175 */
ae8e530a
RR
2176 if (!driver || driver->max_speed < USB_SPEED_HIGH ||
2177 !driver->setup)
1da177e4 2178 return -EINVAL;
4cf5e00b 2179
fae3c158 2180 dev = container_of(_gadget, struct net2280, gadget);
1da177e4 2181
adc82f77 2182 for (i = 0; i < dev->n_ep; i++)
fae3c158 2183 dev->ep[i].irqs = 0;
1da177e4
LT
2184
2185 /* hook up the driver ... */
2186 dev->softconnect = 1;
2187 driver->driver.bus = NULL;
2188 dev->driver = driver;
1da177e4 2189
fae3c158
RR
2190 retval = device_create_file(&dev->pdev->dev, &dev_attr_function);
2191 if (retval)
2192 goto err_unbind;
2193 retval = device_create_file(&dev->pdev->dev, &dev_attr_queues);
2194 if (retval)
2195 goto err_func;
1da177e4 2196
7a74c481 2197 /* enable host detection and ep0; and we're ready
1da177e4
LT
2198 * for set_configuration as well as eventual disconnect.
2199 */
fae3c158 2200 net2280_led_active(dev, 1);
adc82f77 2201
5517525e 2202 if ((dev->quirks & PLX_SUPERSPEED) && !dev->bug7734_patched)
adc82f77
RR
2203 defect7374_enable_data_eps_zero(dev);
2204
fae3c158 2205 ep0_start(dev);
1da177e4 2206
1da177e4
LT
2207 /* pci writes may still be posted */
2208 return 0;
b3899dac
JG
2209
2210err_func:
fae3c158 2211 device_remove_file(&dev->pdev->dev, &dev_attr_function);
b3899dac 2212err_unbind:
b3899dac
JG
2213 dev->driver = NULL;
2214 return retval;
1da177e4 2215}
1da177e4 2216
fae3c158 2217static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver)
1da177e4
LT
2218{
2219 int i;
2220
2221 /* don't disconnect if it's not connected */
2222 if (dev->gadget.speed == USB_SPEED_UNKNOWN)
2223 driver = NULL;
2224
2225 /* stop hardware; prevent new request submissions;
2226 * and kill any outstanding requests.
2227 */
fae3c158 2228 usb_reset(dev);
adc82f77 2229 for (i = 0; i < dev->n_ep; i++)
fae3c158 2230 nuke(&dev->ep[i]);
1da177e4 2231
699412d9
FB
2232 /* report disconnect; the driver is already quiesced */
2233 if (driver) {
2234 spin_unlock(&dev->lock);
2235 driver->disconnect(&dev->gadget);
2236 spin_lock(&dev->lock);
2237 }
2238
fae3c158 2239 usb_reinit(dev);
1da177e4
LT
2240}
2241
22835b80 2242static int net2280_stop(struct usb_gadget *_gadget)
1da177e4 2243{
4cf5e00b 2244 struct net2280 *dev;
1da177e4
LT
2245 unsigned long flags;
2246
fae3c158 2247 dev = container_of(_gadget, struct net2280, gadget);
1da177e4 2248
fae3c158 2249 spin_lock_irqsave(&dev->lock, flags);
bfd0ed57 2250 stop_activity(dev, NULL);
fae3c158 2251 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4 2252
fae3c158 2253 net2280_led_active(dev, 0);
2f076077 2254
fae3c158
RR
2255 device_remove_file(&dev->pdev->dev, &dev_attr_function);
2256 device_remove_file(&dev->pdev->dev, &dev_attr_queues);
1da177e4 2257
bfd0ed57 2258 dev->driver = NULL;
84237bfb 2259
1da177e4
LT
2260 return 0;
2261}
1da177e4
LT
2262
2263/*-------------------------------------------------------------------------*/
2264
2265/* handle ep0, ep-e, ep-f with 64 byte packets: packet per irq.
2266 * also works for dma-capable endpoints, in pio mode or just
2267 * to manually advance the queue after short OUT transfers.
2268 */
fae3c158 2269static void handle_ep_small(struct net2280_ep *ep)
1da177e4
LT
2270{
2271 struct net2280_request *req;
2272 u32 t;
2273 /* 0 error, 1 mid-data, 2 done */
2274 int mode = 1;
2275
fae3c158
RR
2276 if (!list_empty(&ep->queue))
2277 req = list_entry(ep->queue.next,
1da177e4
LT
2278 struct net2280_request, queue);
2279 else
2280 req = NULL;
2281
2282 /* ack all, and handle what we care about */
fae3c158 2283 t = readl(&ep->regs->ep_stat);
1da177e4
LT
2284 ep->irqs++;
2285#if 0
e56e69cc 2286 ep_vdbg(ep->dev, "%s ack ep_stat %08x, req %p\n",
1da177e4
LT
2287 ep->ep.name, t, req ? &req->req : 0);
2288#endif
2eeb0016 2289 if (!ep->is_in || (ep->dev->quirks & PLX_2280))
3e76fdcb 2290 writel(t & ~BIT(NAK_OUT_PACKETS), &ep->regs->ep_stat);
950ee4c8
GL
2291 else
2292 /* Added for 2282 */
fae3c158 2293 writel(t, &ep->regs->ep_stat);
1da177e4
LT
2294
2295 /* for ep0, monitor token irqs to catch data stage length errors
2296 * and to synchronize on status.
2297 *
2298 * also, to defer reporting of protocol stalls ... here's where
2299 * data or status first appears, handling stalls here should never
2300 * cause trouble on the host side..
2301 *
2302 * control requests could be slightly faster without token synch for
2303 * status, but status can jam up that way.
2304 */
fae3c158 2305 if (unlikely(ep->num == 0)) {
1da177e4
LT
2306 if (ep->is_in) {
2307 /* status; stop NAKing */
3e76fdcb 2308 if (t & BIT(DATA_OUT_PING_TOKEN_INTERRUPT)) {
1da177e4
LT
2309 if (ep->dev->protocol_stall) {
2310 ep->stopped = 1;
fae3c158 2311 set_halt(ep);
1da177e4
LT
2312 }
2313 if (!req)
fae3c158 2314 allow_status(ep);
1da177e4
LT
2315 mode = 2;
2316 /* reply to extra IN data tokens with a zlp */
3e76fdcb 2317 } else if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) {
1da177e4
LT
2318 if (ep->dev->protocol_stall) {
2319 ep->stopped = 1;
fae3c158 2320 set_halt(ep);
1da177e4 2321 mode = 2;
1f26e28d
AS
2322 } else if (ep->responded &&
2323 !req && !ep->stopped)
fae3c158 2324 write_fifo(ep, NULL);
1da177e4
LT
2325 }
2326 } else {
2327 /* status; stop NAKing */
3e76fdcb 2328 if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) {
1da177e4
LT
2329 if (ep->dev->protocol_stall) {
2330 ep->stopped = 1;
fae3c158 2331 set_halt(ep);
1da177e4
LT
2332 }
2333 mode = 2;
2334 /* an extra OUT token is an error */
ae8e530a
RR
2335 } else if (((t & BIT(DATA_OUT_PING_TOKEN_INTERRUPT)) &&
2336 req &&
2337 req->req.actual == req->req.length) ||
2338 (ep->responded && !req)) {
1da177e4 2339 ep->dev->protocol_stall = 1;
fae3c158 2340 set_halt(ep);
1da177e4
LT
2341 ep->stopped = 1;
2342 if (req)
fae3c158 2343 done(ep, req, -EOVERFLOW);
1da177e4
LT
2344 req = NULL;
2345 }
2346 }
2347 }
2348
fae3c158 2349 if (unlikely(!req))
1da177e4
LT
2350 return;
2351
2352 /* manual DMA queue advance after short OUT */
fae3c158 2353 if (likely(ep->dma)) {
3e76fdcb 2354 if (t & BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
1da177e4
LT
2355 u32 count;
2356 int stopped = ep->stopped;
2357
2358 /* TRANSFERRED works around OUT_DONE erratum 0112.
2359 * we expect (N <= maxpacket) bytes; host wrote M.
2360 * iff (M < N) we won't ever see a DMA interrupt.
2361 */
2362 ep->stopped = 1;
fae3c158 2363 for (count = 0; ; t = readl(&ep->regs->ep_stat)) {
1da177e4
LT
2364
2365 /* any preceding dma transfers must finish.
2366 * dma handles (M >= N), may empty the queue
2367 */
fae3c158 2368 scan_dma_completions(ep);
ae8e530a
RR
2369 if (unlikely(list_empty(&ep->queue) ||
2370 ep->out_overflow)) {
1da177e4
LT
2371 req = NULL;
2372 break;
2373 }
fae3c158 2374 req = list_entry(ep->queue.next,
1da177e4
LT
2375 struct net2280_request, queue);
2376
2377 /* here either (M < N), a "real" short rx;
2378 * or (M == N) and the queue didn't empty
2379 */
3e76fdcb 2380 if (likely(t & BIT(FIFO_EMPTY))) {
fae3c158 2381 count = readl(&ep->dma->dmacount);
1da177e4 2382 count &= DMA_BYTE_COUNT_MASK;
fae3c158 2383 if (readl(&ep->dma->dmadesc)
1da177e4
LT
2384 != req->td_dma)
2385 req = NULL;
2386 break;
2387 }
2388 udelay(1);
2389 }
2390
2391 /* stop DMA, leave ep NAKing */
3e76fdcb 2392 writel(BIT(DMA_ABORT), &ep->dma->dmastat);
fae3c158 2393 spin_stop_dma(ep->dma);
1da177e4 2394
fae3c158 2395 if (likely(req)) {
1da177e4 2396 req->td->dmacount = 0;
fae3c158
RR
2397 t = readl(&ep->regs->ep_avail);
2398 dma_done(ep, req, count,
901b3d75
DB
2399 (ep->out_overflow || t)
2400 ? -EOVERFLOW : 0);
1da177e4
LT
2401 }
2402
2403 /* also flush to prevent erratum 0106 trouble */
ae8e530a
RR
2404 if (unlikely(ep->out_overflow ||
2405 (ep->dev->chiprev == 0x0100 &&
2406 ep->dev->gadget.speed
2407 == USB_SPEED_FULL))) {
fae3c158 2408 out_flush(ep);
1da177e4
LT
2409 ep->out_overflow = 0;
2410 }
2411
2412 /* (re)start dma if needed, stop NAKing */
2413 ep->stopped = stopped;
fae3c158
RR
2414 if (!list_empty(&ep->queue))
2415 restart_dma(ep);
1da177e4 2416 } else
e56e69cc 2417 ep_dbg(ep->dev, "%s dma ep_stat %08x ??\n",
1da177e4
LT
2418 ep->ep.name, t);
2419 return;
2420
2421 /* data packet(s) received (in the fifo, OUT) */
3e76fdcb 2422 } else if (t & BIT(DATA_PACKET_RECEIVED_INTERRUPT)) {
fae3c158 2423 if (read_fifo(ep, req) && ep->num != 0)
1da177e4
LT
2424 mode = 2;
2425
2426 /* data packet(s) transmitted (IN) */
3e76fdcb 2427 } else if (t & BIT(DATA_PACKET_TRANSMITTED_INTERRUPT)) {
1da177e4
LT
2428 unsigned len;
2429
2430 len = req->req.length - req->req.actual;
2431 if (len > ep->ep.maxpacket)
2432 len = ep->ep.maxpacket;
2433 req->req.actual += len;
2434
2435 /* if we wrote it all, we're usually done */
fae3c158
RR
2436 /* send zlps until the status stage */
2437 if ((req->req.actual == req->req.length) &&
2438 (!req->req.zero || len != ep->ep.maxpacket) && ep->num)
1da177e4 2439 mode = 2;
1da177e4
LT
2440
2441 /* there was nothing to do ... */
2442 } else if (mode == 1)
2443 return;
2444
2445 /* done */
2446 if (mode == 2) {
2447 /* stream endpoints often resubmit/unlink in completion */
fae3c158 2448 done(ep, req, 0);
1da177e4
LT
2449
2450 /* maybe advance queue to next request */
2451 if (ep->num == 0) {
2452 /* NOTE: net2280 could let gadget driver start the
2453 * status stage later. since not all controllers let
2454 * them control that, the api doesn't (yet) allow it.
2455 */
2456 if (!ep->stopped)
fae3c158 2457 allow_status(ep);
1da177e4
LT
2458 req = NULL;
2459 } else {
fae3c158
RR
2460 if (!list_empty(&ep->queue) && !ep->stopped)
2461 req = list_entry(ep->queue.next,
1da177e4
LT
2462 struct net2280_request, queue);
2463 else
2464 req = NULL;
2465 if (req && !ep->is_in)
fae3c158 2466 stop_out_naking(ep);
1da177e4
LT
2467 }
2468 }
2469
2470 /* is there a buffer for the next packet?
2471 * for best streaming performance, make sure there is one.
2472 */
2473 if (req && !ep->stopped) {
2474
2475 /* load IN fifo with next packet (may be zlp) */
3e76fdcb 2476 if (t & BIT(DATA_PACKET_TRANSMITTED_INTERRUPT))
fae3c158 2477 write_fifo(ep, &req->req);
1da177e4
LT
2478 }
2479}
2480
fae3c158 2481static struct net2280_ep *get_ep_by_addr(struct net2280 *dev, u16 wIndex)
1da177e4
LT
2482{
2483 struct net2280_ep *ep;
2484
2485 if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
fae3c158
RR
2486 return &dev->ep[0];
2487 list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
1da177e4
LT
2488 u8 bEndpointAddress;
2489
2490 if (!ep->desc)
2491 continue;
2492 bEndpointAddress = ep->desc->bEndpointAddress;
2493 if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
2494 continue;
2495 if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
2496 return ep;
2497 }
2498 return NULL;
2499}
2500
adc82f77
RR
2501static void defect7374_workaround(struct net2280 *dev, struct usb_ctrlrequest r)
2502{
2503 u32 scratch, fsmvalue;
2504 u32 ack_wait_timeout, state;
2505
2506 /* Workaround for Defect 7374 (U1/U2 erroneously rejected): */
2507 scratch = get_idx_reg(dev->regs, SCRATCH);
2508 fsmvalue = scratch & (0xf << DEFECT7374_FSM_FIELD);
2509 scratch &= ~(0xf << DEFECT7374_FSM_FIELD);
2510
2511 if (!((fsmvalue == DEFECT7374_FSM_WAITING_FOR_CONTROL_READ) &&
2512 (r.bRequestType & USB_DIR_IN)))
2513 return;
2514
2515 /* This is the first Control Read for this connection: */
3e76fdcb 2516 if (!(readl(&dev->usb->usbstat) & BIT(SUPER_SPEED_MODE))) {
adc82f77
RR
2517 /*
2518 * Connection is NOT SS:
2519 * - Connection must be FS or HS.
2520 * - This FSM state should allow workaround software to
2521 * run after the next USB connection.
2522 */
2523 scratch |= DEFECT7374_FSM_NON_SS_CONTROL_READ;
5517525e 2524 dev->bug7734_patched = 1;
adc82f77
RR
2525 goto restore_data_eps;
2526 }
2527
2528 /* Connection is SS: */
2529 for (ack_wait_timeout = 0;
2530 ack_wait_timeout < DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS;
2531 ack_wait_timeout++) {
2532
2533 state = readl(&dev->plregs->pl_ep_status_1)
2534 & (0xff << STATE);
2535 if ((state >= (ACK_GOOD_NORMAL << STATE)) &&
2536 (state <= (ACK_GOOD_MORE_ACKS_TO_COME << STATE))) {
2537 scratch |= DEFECT7374_FSM_SS_CONTROL_READ;
5517525e 2538 dev->bug7734_patched = 1;
adc82f77
RR
2539 break;
2540 }
2541
2542 /*
2543 * We have not yet received host's Data Phase ACK
2544 * - Wait and try again.
2545 */
2546 udelay(DEFECT_7374_PROCESSOR_WAIT_TIME);
2547
2548 continue;
2549 }
2550
2551
2552 if (ack_wait_timeout >= DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS) {
e56e69cc 2553 ep_err(dev, "FAIL: Defect 7374 workaround waited but failed "
adc82f77 2554 "to detect SS host's data phase ACK.");
e56e69cc 2555 ep_err(dev, "PL_EP_STATUS_1(23:16):.Expected from 0x11 to 0x16"
adc82f77
RR
2556 "got 0x%2.2x.\n", state >> STATE);
2557 } else {
e56e69cc 2558 ep_warn(dev, "INFO: Defect 7374 workaround waited about\n"
adc82f77
RR
2559 "%duSec for Control Read Data Phase ACK\n",
2560 DEFECT_7374_PROCESSOR_WAIT_TIME * ack_wait_timeout);
2561 }
2562
2563restore_data_eps:
2564 /*
2565 * Restore data EPs to their pre-workaround settings (disabled,
2566 * initialized, and other details).
2567 */
2568 defect7374_disable_data_eps(dev);
2569
2570 set_idx_reg(dev->regs, SCRATCH, scratch);
2571
2572 return;
2573}
2574
e0cbb046 2575static void ep_clear_seqnum(struct net2280_ep *ep)
adc82f77
RR
2576{
2577 struct net2280 *dev = ep->dev;
2578 u32 val;
2579 static const u32 ep_pl[9] = { 0, 3, 4, 7, 8, 2, 5, 6, 9 };
2580
e0cbb046
RR
2581 val = readl(&dev->plregs->pl_ep_ctrl) & ~0x1f;
2582 val |= ep_pl[ep->num];
2583 writel(val, &dev->plregs->pl_ep_ctrl);
2584 val |= BIT(SEQUENCE_NUMBER_RESET);
2585 writel(val, &dev->plregs->pl_ep_ctrl);
adc82f77 2586
e0cbb046 2587 return;
adc82f77
RR
2588}
2589
adc82f77
RR
2590static void handle_stat0_irqs_superspeed(struct net2280 *dev,
2591 struct net2280_ep *ep, struct usb_ctrlrequest r)
2592{
2593 int tmp = 0;
2594
2595#define w_value le16_to_cpu(r.wValue)
2596#define w_index le16_to_cpu(r.wIndex)
2597#define w_length le16_to_cpu(r.wLength)
2598
2599 switch (r.bRequest) {
2600 struct net2280_ep *e;
2601 u16 status;
2602
2603 case USB_REQ_SET_CONFIGURATION:
2604 dev->addressed_state = !w_value;
2605 goto usb3_delegate;
2606
2607 case USB_REQ_GET_STATUS:
2608 switch (r.bRequestType) {
2609 case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
2610 status = dev->wakeup_enable ? 0x02 : 0x00;
2611 if (dev->selfpowered)
3e76fdcb 2612 status |= BIT(0);
adc82f77
RR
2613 status |= (dev->u1_enable << 2 | dev->u2_enable << 3 |
2614 dev->ltm_enable << 4);
2615 writel(0, &dev->epregs[0].ep_irqenb);
2616 set_fifo_bytecount(ep, sizeof(status));
2617 writel((__force u32) status, &dev->epregs[0].ep_data);
2618 allow_status_338x(ep);
2619 break;
2620
2621 case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
2622 e = get_ep_by_addr(dev, w_index);
2623 if (!e)
2624 goto do_stall3;
2625 status = readl(&e->regs->ep_rsp) &
3e76fdcb 2626 BIT(CLEAR_ENDPOINT_HALT);
adc82f77
RR
2627 writel(0, &dev->epregs[0].ep_irqenb);
2628 set_fifo_bytecount(ep, sizeof(status));
2629 writel((__force u32) status, &dev->epregs[0].ep_data);
2630 allow_status_338x(ep);
2631 break;
2632
2633 default:
2634 goto usb3_delegate;
2635 }
2636 break;
2637
2638 case USB_REQ_CLEAR_FEATURE:
2639 switch (r.bRequestType) {
2640 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
2641 if (!dev->addressed_state) {
2642 switch (w_value) {
2643 case USB_DEVICE_U1_ENABLE:
2644 dev->u1_enable = 0;
2645 writel(readl(&dev->usb_ext->usbctl2) &
3e76fdcb 2646 ~BIT(U1_ENABLE),
adc82f77
RR
2647 &dev->usb_ext->usbctl2);
2648 allow_status_338x(ep);
2649 goto next_endpoints3;
2650
2651 case USB_DEVICE_U2_ENABLE:
2652 dev->u2_enable = 0;
2653 writel(readl(&dev->usb_ext->usbctl2) &
3e76fdcb 2654 ~BIT(U2_ENABLE),
adc82f77
RR
2655 &dev->usb_ext->usbctl2);
2656 allow_status_338x(ep);
2657 goto next_endpoints3;
2658
2659 case USB_DEVICE_LTM_ENABLE:
2660 dev->ltm_enable = 0;
2661 writel(readl(&dev->usb_ext->usbctl2) &
3e76fdcb 2662 ~BIT(LTM_ENABLE),
adc82f77
RR
2663 &dev->usb_ext->usbctl2);
2664 allow_status_338x(ep);
2665 goto next_endpoints3;
2666
2667 default:
2668 break;
2669 }
2670 }
2671 if (w_value == USB_DEVICE_REMOTE_WAKEUP) {
2672 dev->wakeup_enable = 0;
2673 writel(readl(&dev->usb->usbctl) &
3e76fdcb 2674 ~BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
adc82f77
RR
2675 &dev->usb->usbctl);
2676 allow_status_338x(ep);
2677 break;
2678 }
2679 goto usb3_delegate;
2680
2681 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
2682 e = get_ep_by_addr(dev, w_index);
2683 if (!e)
2684 goto do_stall3;
2685 if (w_value != USB_ENDPOINT_HALT)
2686 goto do_stall3;
e56e69cc 2687 ep_vdbg(dev, "%s clear halt\n", e->ep.name);
e0cbb046
RR
2688 /*
2689 * Workaround for SS SeqNum not cleared via
2690 * Endpoint Halt (Clear) bit. select endpoint
2691 */
2692 ep_clear_seqnum(e);
2693 clear_halt(e);
adc82f77
RR
2694 if (!list_empty(&e->queue) && e->td_dma)
2695 restart_dma(e);
2696 allow_status(ep);
2697 ep->stopped = 1;
2698 break;
2699
2700 default:
2701 goto usb3_delegate;
2702 }
2703 break;
2704 case USB_REQ_SET_FEATURE:
2705 switch (r.bRequestType) {
2706 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
2707 if (!dev->addressed_state) {
2708 switch (w_value) {
2709 case USB_DEVICE_U1_ENABLE:
2710 dev->u1_enable = 1;
2711 writel(readl(&dev->usb_ext->usbctl2) |
3e76fdcb 2712 BIT(U1_ENABLE),
adc82f77
RR
2713 &dev->usb_ext->usbctl2);
2714 allow_status_338x(ep);
2715 goto next_endpoints3;
2716
2717 case USB_DEVICE_U2_ENABLE:
2718 dev->u2_enable = 1;
2719 writel(readl(&dev->usb_ext->usbctl2) |
3e76fdcb 2720 BIT(U2_ENABLE),
adc82f77
RR
2721 &dev->usb_ext->usbctl2);
2722 allow_status_338x(ep);
2723 goto next_endpoints3;
2724
2725 case USB_DEVICE_LTM_ENABLE:
2726 dev->ltm_enable = 1;
2727 writel(readl(&dev->usb_ext->usbctl2) |
3e76fdcb 2728 BIT(LTM_ENABLE),
adc82f77
RR
2729 &dev->usb_ext->usbctl2);
2730 allow_status_338x(ep);
2731 goto next_endpoints3;
2732 default:
2733 break;
2734 }
2735 }
2736
2737 if (w_value == USB_DEVICE_REMOTE_WAKEUP) {
2738 dev->wakeup_enable = 1;
2739 writel(readl(&dev->usb->usbctl) |
3e76fdcb 2740 BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
adc82f77
RR
2741 &dev->usb->usbctl);
2742 allow_status_338x(ep);
2743 break;
2744 }
2745 goto usb3_delegate;
2746
2747 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
2748 e = get_ep_by_addr(dev, w_index);
2749 if (!e || (w_value != USB_ENDPOINT_HALT))
2750 goto do_stall3;
cf8b1cde
RR
2751 ep->stopped = 1;
2752 if (ep->num == 0)
2753 ep->dev->protocol_stall = 1;
2754 else {
2755 if (ep->dma)
e721c457 2756 abort_dma(ep);
e0cbb046 2757 set_halt(ep);
cf8b1cde 2758 }
adc82f77
RR
2759 allow_status_338x(ep);
2760 break;
2761
2762 default:
2763 goto usb3_delegate;
2764 }
2765
2766 break;
2767 default:
2768
2769usb3_delegate:
e56e69cc 2770 ep_vdbg(dev, "setup %02x.%02x v%04x i%04x l%04x ep_cfg %08x\n",
adc82f77
RR
2771 r.bRequestType, r.bRequest,
2772 w_value, w_index, w_length,
2773 readl(&ep->cfg->ep_cfg));
2774
2775 ep->responded = 0;
2776 spin_unlock(&dev->lock);
2777 tmp = dev->driver->setup(&dev->gadget, &r);
2778 spin_lock(&dev->lock);
2779 }
2780do_stall3:
2781 if (tmp < 0) {
e56e69cc 2782 ep_vdbg(dev, "req %02x.%02x protocol STALL; stat %d\n",
adc82f77
RR
2783 r.bRequestType, r.bRequest, tmp);
2784 dev->protocol_stall = 1;
2785 /* TD 9.9 Halt Endpoint test. TD 9.22 Set feature test */
e0cbb046 2786 set_halt(ep);
adc82f77
RR
2787 }
2788
2789next_endpoints3:
2790
2791#undef w_value
2792#undef w_index
2793#undef w_length
2794
2795 return;
2796}
2797
fae3c158 2798static void handle_stat0_irqs(struct net2280 *dev, u32 stat)
1da177e4
LT
2799{
2800 struct net2280_ep *ep;
2801 u32 num, scratch;
2802
2803 /* most of these don't need individual acks */
3e76fdcb 2804 stat &= ~BIT(INTA_ASSERTED);
1da177e4
LT
2805 if (!stat)
2806 return;
e56e69cc 2807 /* ep_dbg(dev, "irqstat0 %04x\n", stat); */
1da177e4
LT
2808
2809 /* starting a control request? */
3e76fdcb 2810 if (unlikely(stat & BIT(SETUP_PACKET_INTERRUPT))) {
1da177e4 2811 union {
fae3c158 2812 u32 raw[2];
1da177e4
LT
2813 struct usb_ctrlrequest r;
2814 } u;
950ee4c8 2815 int tmp;
1da177e4
LT
2816 struct net2280_request *req;
2817
2818 if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
adc82f77 2819 u32 val = readl(&dev->usb->usbstat);
3e76fdcb 2820 if (val & BIT(SUPER_SPEED)) {
adc82f77
RR
2821 dev->gadget.speed = USB_SPEED_SUPER;
2822 usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
2823 EP0_SS_MAX_PACKET_SIZE);
3e76fdcb 2824 } else if (val & BIT(HIGH_SPEED)) {
1da177e4 2825 dev->gadget.speed = USB_SPEED_HIGH;
adc82f77
RR
2826 usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
2827 EP0_HS_MAX_PACKET_SIZE);
2828 } else {
1da177e4 2829 dev->gadget.speed = USB_SPEED_FULL;
adc82f77
RR
2830 usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
2831 EP0_HS_MAX_PACKET_SIZE);
2832 }
fae3c158 2833 net2280_led_speed(dev, dev->gadget.speed);
e56e69cc 2834 ep_dbg(dev, "%s\n",
fae3c158 2835 usb_speed_string(dev->gadget.speed));
1da177e4
LT
2836 }
2837
fae3c158 2838 ep = &dev->ep[0];
1da177e4
LT
2839 ep->irqs++;
2840
2841 /* make sure any leftover request state is cleared */
3e76fdcb 2842 stat &= ~BIT(ENDPOINT_0_INTERRUPT);
fae3c158
RR
2843 while (!list_empty(&ep->queue)) {
2844 req = list_entry(ep->queue.next,
1da177e4 2845 struct net2280_request, queue);
fae3c158 2846 done(ep, req, (req->req.actual == req->req.length)
1da177e4
LT
2847 ? 0 : -EPROTO);
2848 }
2849 ep->stopped = 0;
2850 dev->protocol_stall = 0;
5d1b6840 2851 if (!(dev->quirks & PLX_SUPERSPEED)) {
2eeb0016 2852 if (ep->dev->quirks & PLX_2280)
3e76fdcb
RR
2853 tmp = BIT(FIFO_OVERFLOW) |
2854 BIT(FIFO_UNDERFLOW);
adc82f77
RR
2855 else
2856 tmp = 0;
2857
3e76fdcb
RR
2858 writel(tmp | BIT(TIMEOUT) |
2859 BIT(USB_STALL_SENT) |
2860 BIT(USB_IN_NAK_SENT) |
2861 BIT(USB_IN_ACK_RCVD) |
2862 BIT(USB_OUT_PING_NAK_SENT) |
2863 BIT(USB_OUT_ACK_SENT) |
2864 BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
2865 BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
2866 BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
2867 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
2868 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
ae8e530a
RR
2869 BIT(DATA_IN_TOKEN_INTERRUPT),
2870 &ep->regs->ep_stat);
adc82f77
RR
2871 }
2872 u.raw[0] = readl(&dev->usb->setup0123);
2873 u.raw[1] = readl(&dev->usb->setup4567);
901b3d75 2874
fae3c158
RR
2875 cpu_to_le32s(&u.raw[0]);
2876 cpu_to_le32s(&u.raw[1]);
1da177e4 2877
5517525e 2878 if ((dev->quirks & PLX_SUPERSPEED) && !dev->bug7734_patched)
adc82f77
RR
2879 defect7374_workaround(dev, u.r);
2880
950ee4c8
GL
2881 tmp = 0;
2882
01ee7d70
DB
2883#define w_value le16_to_cpu(u.r.wValue)
2884#define w_index le16_to_cpu(u.r.wIndex)
2885#define w_length le16_to_cpu(u.r.wLength)
1da177e4
LT
2886
2887 /* ack the irq */
3e76fdcb
RR
2888 writel(BIT(SETUP_PACKET_INTERRUPT), &dev->regs->irqstat0);
2889 stat ^= BIT(SETUP_PACKET_INTERRUPT);
1da177e4
LT
2890
2891 /* watch control traffic at the token level, and force
2892 * synchronization before letting the status stage happen.
2893 * FIXME ignore tokens we'll NAK, until driver responds.
2894 * that'll mean a lot less irqs for some drivers.
2895 */
2896 ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
2897 if (ep->is_in) {
3e76fdcb
RR
2898 scratch = BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
2899 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
2900 BIT(DATA_IN_TOKEN_INTERRUPT);
fae3c158 2901 stop_out_naking(ep);
1da177e4 2902 } else
3e76fdcb
RR
2903 scratch = BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
2904 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
2905 BIT(DATA_IN_TOKEN_INTERRUPT);
fae3c158 2906 writel(scratch, &dev->epregs[0].ep_irqenb);
1da177e4
LT
2907
2908 /* we made the hardware handle most lowlevel requests;
2909 * everything else goes uplevel to the gadget code.
2910 */
1f26e28d 2911 ep->responded = 1;
adc82f77
RR
2912
2913 if (dev->gadget.speed == USB_SPEED_SUPER) {
2914 handle_stat0_irqs_superspeed(dev, ep, u.r);
2915 goto next_endpoints;
2916 }
2917
1da177e4
LT
2918 switch (u.r.bRequest) {
2919 case USB_REQ_GET_STATUS: {
2920 struct net2280_ep *e;
320f3459 2921 __le32 status;
1da177e4
LT
2922
2923 /* hw handles device and interface status */
2924 if (u.r.bRequestType != (USB_DIR_IN|USB_RECIP_ENDPOINT))
2925 goto delegate;
fae3c158
RR
2926 e = get_ep_by_addr(dev, w_index);
2927 if (!e || w_length > 2)
1da177e4
LT
2928 goto do_stall;
2929
3e76fdcb 2930 if (readl(&e->regs->ep_rsp) & BIT(SET_ENDPOINT_HALT))
fae3c158 2931 status = cpu_to_le32(1);
1da177e4 2932 else
fae3c158 2933 status = cpu_to_le32(0);
1da177e4
LT
2934
2935 /* don't bother with a request object! */
fae3c158
RR
2936 writel(0, &dev->epregs[0].ep_irqenb);
2937 set_fifo_bytecount(ep, w_length);
2938 writel((__force u32)status, &dev->epregs[0].ep_data);
2939 allow_status(ep);
e56e69cc 2940 ep_vdbg(dev, "%s stat %02x\n", ep->ep.name, status);
1da177e4
LT
2941 goto next_endpoints;
2942 }
2943 break;
2944 case USB_REQ_CLEAR_FEATURE: {
2945 struct net2280_ep *e;
2946
2947 /* hw handles device features */
2948 if (u.r.bRequestType != USB_RECIP_ENDPOINT)
2949 goto delegate;
ae8e530a 2950 if (w_value != USB_ENDPOINT_HALT || w_length != 0)
1da177e4 2951 goto do_stall;
fae3c158
RR
2952 e = get_ep_by_addr(dev, w_index);
2953 if (!e)
1da177e4 2954 goto do_stall;
8066134f 2955 if (e->wedged) {
e56e69cc 2956 ep_vdbg(dev, "%s wedged, halt not cleared\n",
8066134f
AS
2957 ep->ep.name);
2958 } else {
e56e69cc 2959 ep_vdbg(dev, "%s clear halt\n", e->ep.name);
8066134f 2960 clear_halt(e);
2eeb0016 2961 if ((ep->dev->quirks & PLX_SUPERSPEED) &&
adc82f77
RR
2962 !list_empty(&e->queue) && e->td_dma)
2963 restart_dma(e);
8066134f 2964 }
fae3c158 2965 allow_status(ep);
1da177e4
LT
2966 goto next_endpoints;
2967 }
2968 break;
2969 case USB_REQ_SET_FEATURE: {
2970 struct net2280_ep *e;
2971
2972 /* hw handles device features */
2973 if (u.r.bRequestType != USB_RECIP_ENDPOINT)
2974 goto delegate;
ae8e530a 2975 if (w_value != USB_ENDPOINT_HALT || w_length != 0)
1da177e4 2976 goto do_stall;
fae3c158
RR
2977 e = get_ep_by_addr(dev, w_index);
2978 if (!e)
1da177e4 2979 goto do_stall;
8066134f
AS
2980 if (e->ep.name == ep0name)
2981 goto do_stall;
fae3c158 2982 set_halt(e);
2eeb0016 2983 if ((dev->quirks & PLX_SUPERSPEED) && e->dma)
adc82f77 2984 abort_dma(e);
fae3c158 2985 allow_status(ep);
e56e69cc 2986 ep_vdbg(dev, "%s set halt\n", ep->ep.name);
1da177e4
LT
2987 goto next_endpoints;
2988 }
2989 break;
2990 default:
2991delegate:
e56e69cc 2992 ep_vdbg(dev, "setup %02x.%02x v%04x i%04x l%04x "
1da177e4
LT
2993 "ep_cfg %08x\n",
2994 u.r.bRequestType, u.r.bRequest,
320f3459 2995 w_value, w_index, w_length,
adc82f77 2996 readl(&ep->cfg->ep_cfg));
1f26e28d 2997 ep->responded = 0;
fae3c158
RR
2998 spin_unlock(&dev->lock);
2999 tmp = dev->driver->setup(&dev->gadget, &u.r);
3000 spin_lock(&dev->lock);
1da177e4
LT
3001 }
3002
3003 /* stall ep0 on error */
3004 if (tmp < 0) {
3005do_stall:
e56e69cc 3006 ep_vdbg(dev, "req %02x.%02x protocol STALL; stat %d\n",
1da177e4
LT
3007 u.r.bRequestType, u.r.bRequest, tmp);
3008 dev->protocol_stall = 1;
3009 }
3010
3011 /* some in/out token irq should follow; maybe stall then.
3012 * driver must queue a request (even zlp) or halt ep0
3013 * before the host times out.
3014 */
3015 }
3016
320f3459
DB
3017#undef w_value
3018#undef w_index
3019#undef w_length
3020
1da177e4
LT
3021next_endpoints:
3022 /* endpoint data irq ? */
3023 scratch = stat & 0x7f;
3024 stat &= ~0x7f;
3025 for (num = 0; scratch; num++) {
3026 u32 t;
3027
3028 /* do this endpoint's FIFO and queue need tending? */
3e76fdcb 3029 t = BIT(num);
1da177e4
LT
3030 if ((scratch & t) == 0)
3031 continue;
3032 scratch ^= t;
3033
fae3c158
RR
3034 ep = &dev->ep[num];
3035 handle_ep_small(ep);
1da177e4
LT
3036 }
3037
3038 if (stat)
e56e69cc 3039 ep_dbg(dev, "unhandled irqstat0 %08x\n", stat);
1da177e4
LT
3040}
3041
3e76fdcb
RR
3042#define DMA_INTERRUPTS (BIT(DMA_D_INTERRUPT) | \
3043 BIT(DMA_C_INTERRUPT) | \
3044 BIT(DMA_B_INTERRUPT) | \
3045 BIT(DMA_A_INTERRUPT))
1da177e4 3046#define PCI_ERROR_INTERRUPTS ( \
3e76fdcb
RR
3047 BIT(PCI_MASTER_ABORT_RECEIVED_INTERRUPT) | \
3048 BIT(PCI_TARGET_ABORT_RECEIVED_INTERRUPT) | \
3049 BIT(PCI_RETRY_ABORT_INTERRUPT))
1da177e4 3050
fae3c158 3051static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
1da177e4
LT
3052{
3053 struct net2280_ep *ep;
3054 u32 tmp, num, mask, scratch;
3055
3056 /* after disconnect there's nothing else to do! */
3e76fdcb
RR
3057 tmp = BIT(VBUS_INTERRUPT) | BIT(ROOT_PORT_RESET_INTERRUPT);
3058 mask = BIT(SUPER_SPEED) | BIT(HIGH_SPEED) | BIT(FULL_SPEED);
1da177e4
LT
3059
3060 /* VBUS disconnect is indicated by VBUS_PIN and VBUS_INTERRUPT set.
fb914ebf 3061 * Root Port Reset is indicated by ROOT_PORT_RESET_INTERRUPT set and
901b3d75 3062 * both HIGH_SPEED and FULL_SPEED clear (as ROOT_PORT_RESET_INTERRUPT
1da177e4
LT
3063 * only indicates a change in the reset state).
3064 */
3065 if (stat & tmp) {
b611e424
AS
3066 bool reset = false;
3067 bool disconnect = false;
3068
3069 /*
3070 * Ignore disconnects and resets if the speed hasn't been set.
3071 * VBUS can bounce and there's always an initial reset.
3072 */
fae3c158 3073 writel(tmp, &dev->regs->irqstat1);
b611e424
AS
3074 if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
3075 if ((stat & BIT(VBUS_INTERRUPT)) &&
3076 (readl(&dev->usb->usbctl) &
3077 BIT(VBUS_PIN)) == 0) {
3078 disconnect = true;
3079 ep_dbg(dev, "disconnect %s\n",
3080 dev->driver->driver.name);
3081 } else if ((stat & BIT(ROOT_PORT_RESET_INTERRUPT)) &&
3082 (readl(&dev->usb->usbstat) & mask)
3083 == 0) {
3084 reset = true;
3085 ep_dbg(dev, "reset %s\n",
3086 dev->driver->driver.name);
3087 }
3088
3089 if (disconnect || reset) {
3090 stop_activity(dev, dev->driver);
3091 ep0_start(dev);
3092 spin_unlock(&dev->lock);
3093 if (reset)
3094 usb_gadget_udc_reset
3095 (&dev->gadget, dev->driver);
3096 else
3097 (dev->driver->disconnect)
3098 (&dev->gadget);
3099 spin_lock(&dev->lock);
3100 return;
3101 }
1da177e4
LT
3102 }
3103 stat &= ~tmp;
3104
3105 /* vBUS can bounce ... one of many reasons to ignore the
3106 * notion of hotplug events on bus connect/disconnect!
3107 */
3108 if (!stat)
3109 return;
3110 }
3111
3112 /* NOTE: chip stays in PCI D0 state for now, but it could
3113 * enter D1 to save more power
3114 */
3e76fdcb 3115 tmp = BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT);
1da177e4 3116 if (stat & tmp) {
fae3c158 3117 writel(tmp, &dev->regs->irqstat1);
3e76fdcb 3118 if (stat & BIT(SUSPEND_REQUEST_INTERRUPT)) {
1da177e4 3119 if (dev->driver->suspend)
fae3c158 3120 dev->driver->suspend(&dev->gadget);
1da177e4 3121 if (!enable_suspend)
3e76fdcb 3122 stat &= ~BIT(SUSPEND_REQUEST_INTERRUPT);
1da177e4
LT
3123 } else {
3124 if (dev->driver->resume)
fae3c158 3125 dev->driver->resume(&dev->gadget);
1da177e4
LT
3126 /* at high speed, note erratum 0133 */
3127 }
3128 stat &= ~tmp;
3129 }
3130
3131 /* clear any other status/irqs */
3132 if (stat)
fae3c158 3133 writel(stat, &dev->regs->irqstat1);
1da177e4
LT
3134
3135 /* some status we can just ignore */
2eeb0016 3136 if (dev->quirks & PLX_2280)
3e76fdcb
RR
3137 stat &= ~(BIT(CONTROL_STATUS_INTERRUPT) |
3138 BIT(SUSPEND_REQUEST_INTERRUPT) |
3139 BIT(RESUME_INTERRUPT) |
3140 BIT(SOF_INTERRUPT));
950ee4c8 3141 else
3e76fdcb
RR
3142 stat &= ~(BIT(CONTROL_STATUS_INTERRUPT) |
3143 BIT(RESUME_INTERRUPT) |
3144 BIT(SOF_DOWN_INTERRUPT) |
3145 BIT(SOF_INTERRUPT));
950ee4c8 3146
1da177e4
LT
3147 if (!stat)
3148 return;
e56e69cc 3149 /* ep_dbg(dev, "irqstat1 %08x\n", stat);*/
1da177e4
LT
3150
3151 /* DMA status, for ep-{a,b,c,d} */
3152 scratch = stat & DMA_INTERRUPTS;
3153 stat &= ~DMA_INTERRUPTS;
3154 scratch >>= 9;
3155 for (num = 0; scratch; num++) {
3156 struct net2280_dma_regs __iomem *dma;
3157
3e76fdcb 3158 tmp = BIT(num);
1da177e4
LT
3159 if ((tmp & scratch) == 0)
3160 continue;
3161 scratch ^= tmp;
3162
fae3c158 3163 ep = &dev->ep[num + 1];
1da177e4
LT
3164 dma = ep->dma;
3165
3166 if (!dma)
3167 continue;
3168
3169 /* clear ep's dma status */
fae3c158
RR
3170 tmp = readl(&dma->dmastat);
3171 writel(tmp, &dma->dmastat);
1da177e4 3172
adc82f77 3173 /* dma sync*/
2eeb0016 3174 if (dev->quirks & PLX_SUPERSPEED) {
adc82f77
RR
3175 u32 r_dmacount = readl(&dma->dmacount);
3176 if (!ep->is_in && (r_dmacount & 0x00FFFFFF) &&
3e76fdcb 3177 (tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT)))
adc82f77
RR
3178 continue;
3179 }
3180
90664198
RR
3181 if (!(tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT))) {
3182 ep_dbg(ep->dev, "%s no xact done? %08x\n",
3183 ep->ep.name, tmp);
3184 continue;
1da177e4 3185 }
90664198 3186 stop_dma(ep->dma);
1da177e4
LT
3187
3188 /* OUT transfers terminate when the data from the
3189 * host is in our memory. Process whatever's done.
3190 * On this path, we know transfer's last packet wasn't
3191 * less than req->length. NAK_OUT_PACKETS may be set,
3192 * or the FIFO may already be holding new packets.
3193 *
3194 * IN transfers can linger in the FIFO for a very
3195 * long time ... we ignore that for now, accounting
3196 * precisely (like PIO does) needs per-packet irqs
3197 */
fae3c158 3198 scan_dma_completions(ep);
1da177e4
LT
3199
3200 /* disable dma on inactive queues; else maybe restart */
90664198 3201 if (!list_empty(&ep->queue)) {
fae3c158 3202 tmp = readl(&dma->dmactl);
90664198 3203 restart_dma(ep);
1da177e4
LT
3204 }
3205 ep->irqs++;
3206 }
3207
3208 /* NOTE: there are other PCI errors we might usefully notice.
3209 * if they appear very often, here's where to try recovering.
3210 */
3211 if (stat & PCI_ERROR_INTERRUPTS) {
e56e69cc 3212 ep_err(dev, "pci dma error; stat %08x\n", stat);
1da177e4
LT
3213 stat &= ~PCI_ERROR_INTERRUPTS;
3214 /* these are fatal errors, but "maybe" they won't
3215 * happen again ...
3216 */
fae3c158
RR
3217 stop_activity(dev, dev->driver);
3218 ep0_start(dev);
1da177e4
LT
3219 stat = 0;
3220 }
3221
3222 if (stat)
e56e69cc 3223 ep_dbg(dev, "unhandled irqstat1 %08x\n", stat);
1da177e4
LT
3224}
3225
fae3c158 3226static irqreturn_t net2280_irq(int irq, void *_dev)
1da177e4
LT
3227{
3228 struct net2280 *dev = _dev;
3229
658ad5e0 3230 /* shared interrupt, not ours */
2eeb0016 3231 if ((dev->quirks & PLX_LEGACY) &&
3e76fdcb 3232 (!(readl(&dev->regs->irqstat0) & BIT(INTA_ASSERTED))))
658ad5e0
AS
3233 return IRQ_NONE;
3234
fae3c158 3235 spin_lock(&dev->lock);
1da177e4
LT
3236
3237 /* handle disconnect, dma, and more */
fae3c158 3238 handle_stat1_irqs(dev, readl(&dev->regs->irqstat1));
1da177e4
LT
3239
3240 /* control requests and PIO */
fae3c158 3241 handle_stat0_irqs(dev, readl(&dev->regs->irqstat0));
1da177e4 3242
2eeb0016 3243 if (dev->quirks & PLX_SUPERSPEED) {
adc82f77
RR
3244 /* re-enable interrupt to trigger any possible new interrupt */
3245 u32 pciirqenb1 = readl(&dev->regs->pciirqenb1);
3246 writel(pciirqenb1 & 0x7FFFFFFF, &dev->regs->pciirqenb1);
3247 writel(pciirqenb1, &dev->regs->pciirqenb1);
3248 }
3249
fae3c158 3250 spin_unlock(&dev->lock);
1da177e4
LT
3251
3252 return IRQ_HANDLED;
3253}
3254
3255/*-------------------------------------------------------------------------*/
3256
fae3c158 3257static void gadget_release(struct device *_dev)
1da177e4 3258{
fae3c158 3259 struct net2280 *dev = dev_get_drvdata(_dev);
1da177e4 3260
fae3c158 3261 kfree(dev);
1da177e4
LT
3262}
3263
3264/* tear down the binding between this driver and the pci device */
3265
fae3c158 3266static void net2280_remove(struct pci_dev *pdev)
1da177e4 3267{
fae3c158 3268 struct net2280 *dev = pci_get_drvdata(pdev);
1da177e4 3269
0f91349b
SAS
3270 usb_del_gadget_udc(&dev->gadget);
3271
6bea476c 3272 BUG_ON(dev->driver);
1da177e4
LT
3273
3274 /* then clean up the resources we allocated during probe() */
fae3c158 3275 net2280_led_shutdown(dev);
1da177e4
LT
3276 if (dev->requests) {
3277 int i;
3278 for (i = 1; i < 5; i++) {
fae3c158 3279 if (!dev->ep[i].dummy)
1da177e4 3280 continue;
fae3c158
RR
3281 pci_pool_free(dev->requests, dev->ep[i].dummy,
3282 dev->ep[i].td_dma);
1da177e4 3283 }
fae3c158 3284 pci_pool_destroy(dev->requests);
1da177e4
LT
3285 }
3286 if (dev->got_irq)
fae3c158 3287 free_irq(pdev->irq, dev);
9c864c23 3288 if (dev->quirks & PLX_SUPERSPEED)
adc82f77 3289 pci_disable_msi(pdev);
1da177e4 3290 if (dev->regs)
fae3c158 3291 iounmap(dev->regs);
1da177e4 3292 if (dev->region)
fae3c158
RR
3293 release_mem_region(pci_resource_start(pdev, 0),
3294 pci_resource_len(pdev, 0));
1da177e4 3295 if (dev->enabled)
fae3c158
RR
3296 pci_disable_device(pdev);
3297 device_remove_file(&pdev->dev, &dev_attr_registers);
1da177e4 3298
e56e69cc 3299 ep_info(dev, "unbind\n");
1da177e4
LT
3300}
3301
3302/* wrap this driver around the specified device, but
3303 * don't respond over USB until a gadget driver binds to us.
3304 */
3305
fae3c158 3306static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1da177e4
LT
3307{
3308 struct net2280 *dev;
3309 unsigned long resource, len;
3310 void __iomem *base = NULL;
3311 int retval, i;
1da177e4 3312
1da177e4 3313 /* alloc, and start init */
fae3c158
RR
3314 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
3315 if (dev == NULL) {
1da177e4
LT
3316 retval = -ENOMEM;
3317 goto done;
3318 }
3319
fae3c158
RR
3320 pci_set_drvdata(pdev, dev);
3321 spin_lock_init(&dev->lock);
2eeb0016 3322 dev->quirks = id->driver_data;
1da177e4
LT
3323 dev->pdev = pdev;
3324 dev->gadget.ops = &net2280_ops;
2eeb0016 3325 dev->gadget.max_speed = (dev->quirks & PLX_SUPERSPEED) ?
adc82f77 3326 USB_SPEED_SUPER : USB_SPEED_HIGH;
1da177e4
LT
3327
3328 /* the "gadget" abstracts/virtualizes the controller */
1da177e4
LT
3329 dev->gadget.name = driver_name;
3330
3331 /* now all the pci goodies ... */
fae3c158
RR
3332 if (pci_enable_device(pdev) < 0) {
3333 retval = -ENODEV;
1da177e4
LT
3334 goto done;
3335 }
3336 dev->enabled = 1;
3337
3338 /* BAR 0 holds all the registers
3339 * BAR 1 is 8051 memory; unused here (note erratum 0103)
3340 * BAR 2 is fifo memory; unused here
3341 */
fae3c158
RR
3342 resource = pci_resource_start(pdev, 0);
3343 len = pci_resource_len(pdev, 0);
3344 if (!request_mem_region(resource, len, driver_name)) {
e56e69cc 3345 ep_dbg(dev, "controller already in use\n");
1da177e4
LT
3346 retval = -EBUSY;
3347 goto done;
3348 }
3349 dev->region = 1;
3350
901b3d75
DB
3351 /* FIXME provide firmware download interface to put
3352 * 8051 code into the chip, e.g. to turn on PCI PM.
3353 */
3354
fae3c158 3355 base = ioremap_nocache(resource, len);
1da177e4 3356 if (base == NULL) {
e56e69cc 3357 ep_dbg(dev, "can't map memory\n");
1da177e4
LT
3358 retval = -EFAULT;
3359 goto done;
3360 }
3361 dev->regs = (struct net2280_regs __iomem *) base;
3362 dev->usb = (struct net2280_usb_regs __iomem *) (base + 0x0080);
3363 dev->pci = (struct net2280_pci_regs __iomem *) (base + 0x0100);
3364 dev->dma = (struct net2280_dma_regs __iomem *) (base + 0x0180);
3365 dev->dep = (struct net2280_dep_regs __iomem *) (base + 0x0200);
3366 dev->epregs = (struct net2280_ep_regs __iomem *) (base + 0x0300);
3367
2eeb0016 3368 if (dev->quirks & PLX_SUPERSPEED) {
adc82f77
RR
3369 u32 fsmvalue;
3370 u32 usbstat;
3371 dev->usb_ext = (struct usb338x_usb_ext_regs __iomem *)
3372 (base + 0x00b4);
3373 dev->fiforegs = (struct usb338x_fifo_regs __iomem *)
3374 (base + 0x0500);
3375 dev->llregs = (struct usb338x_ll_regs __iomem *)
3376 (base + 0x0700);
3377 dev->ll_lfps_regs = (struct usb338x_ll_lfps_regs __iomem *)
3378 (base + 0x0748);
3379 dev->ll_tsn_regs = (struct usb338x_ll_tsn_regs __iomem *)
3380 (base + 0x077c);
3381 dev->ll_chicken_reg = (struct usb338x_ll_chi_regs __iomem *)
3382 (base + 0x079c);
3383 dev->plregs = (struct usb338x_pl_regs __iomem *)
3384 (base + 0x0800);
3385 usbstat = readl(&dev->usb->usbstat);
fae3c158 3386 dev->enhanced_mode = !!(usbstat & BIT(11));
adc82f77
RR
3387 dev->n_ep = (dev->enhanced_mode) ? 9 : 5;
3388 /* put into initial config, link up all endpoints */
3389 fsmvalue = get_idx_reg(dev->regs, SCRATCH) &
3390 (0xf << DEFECT7374_FSM_FIELD);
3391 /* See if firmware needs to set up for workaround: */
5517525e
RR
3392 if (fsmvalue == DEFECT7374_FSM_SS_CONTROL_READ) {
3393 dev->bug7734_patched = 1;
adc82f77 3394 writel(0, &dev->usb->usbctl);
5517525e
RR
3395 } else
3396 dev->bug7734_patched = 0;
3397 } else {
adc82f77
RR
3398 dev->enhanced_mode = 0;
3399 dev->n_ep = 7;
3400 /* put into initial config, link up all endpoints */
3401 writel(0, &dev->usb->usbctl);
3402 }
3403
fae3c158
RR
3404 usb_reset(dev);
3405 usb_reinit(dev);
1da177e4
LT
3406
3407 /* irq setup after old hardware is cleaned up */
3408 if (!pdev->irq) {
e56e69cc 3409 ep_err(dev, "No IRQ. Check PCI setup!\n");
1da177e4
LT
3410 retval = -ENODEV;
3411 goto done;
3412 }
c6387a48 3413
9c864c23 3414 if (dev->quirks & PLX_SUPERSPEED)
adc82f77 3415 if (pci_enable_msi(pdev))
e56e69cc 3416 ep_err(dev, "Failed to enable MSI mode\n");
adc82f77 3417
fae3c158
RR
3418 if (request_irq(pdev->irq, net2280_irq, IRQF_SHARED,
3419 driver_name, dev)) {
e56e69cc 3420 ep_err(dev, "request interrupt %d failed\n", pdev->irq);
1da177e4
LT
3421 retval = -EBUSY;
3422 goto done;
3423 }
3424 dev->got_irq = 1;
3425
3426 /* DMA setup */
3427 /* NOTE: we know only the 32 LSBs of dma addresses may be nonzero */
fae3c158
RR
3428 dev->requests = pci_pool_create("requests", pdev,
3429 sizeof(struct net2280_dma),
1da177e4
LT
3430 0 /* no alignment requirements */,
3431 0 /* or page-crossing issues */);
3432 if (!dev->requests) {
e56e69cc 3433 ep_dbg(dev, "can't get request pool\n");
1da177e4
LT
3434 retval = -ENOMEM;
3435 goto done;
3436 }
3437 for (i = 1; i < 5; i++) {
3438 struct net2280_dma *td;
3439
fae3c158
RR
3440 td = pci_pool_alloc(dev->requests, GFP_KERNEL,
3441 &dev->ep[i].td_dma);
1da177e4 3442 if (!td) {
e56e69cc 3443 ep_dbg(dev, "can't get dummy %d\n", i);
1da177e4
LT
3444 retval = -ENOMEM;
3445 goto done;
3446 }
3447 td->dmacount = 0; /* not VALID */
1da177e4 3448 td->dmadesc = td->dmaaddr;
fae3c158 3449 dev->ep[i].dummy = td;
1da177e4
LT
3450 }
3451
3452 /* enable lower-overhead pci memory bursts during DMA */
2eeb0016 3453 if (dev->quirks & PLX_LEGACY)
3e76fdcb
RR
3454 writel(BIT(DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE) |
3455 /*
3456 * 256 write retries may not be enough...
3457 BIT(PCI_RETRY_ABORT_ENABLE) |
3458 */
3459 BIT(DMA_READ_MULTIPLE_ENABLE) |
3460 BIT(DMA_READ_LINE_ENABLE),
3461 &dev->pci->pcimstctl);
1da177e4 3462 /* erratum 0115 shouldn't appear: Linux inits PCI_LATENCY_TIMER */
fae3c158
RR
3463 pci_set_master(pdev);
3464 pci_try_set_mwi(pdev);
1da177e4
LT
3465
3466 /* ... also flushes any posted pci writes */
fae3c158 3467 dev->chiprev = get_idx_reg(dev->regs, REG_CHIPREV) & 0xffff;
1da177e4
LT
3468
3469 /* done */
e56e69cc
RR
3470 ep_info(dev, "%s\n", driver_desc);
3471 ep_info(dev, "irq %d, pci mem %p, chip rev %04x\n",
c6387a48 3472 pdev->irq, base, dev->chiprev);
d588ff58 3473 ep_info(dev, "version: " DRIVER_VERSION "; %s\n",
adc82f77 3474 dev->enhanced_mode ? "enhanced mode" : "legacy mode");
fae3c158
RR
3475 retval = device_create_file(&pdev->dev, &dev_attr_registers);
3476 if (retval)
3477 goto done;
1da177e4 3478
2901df68
FB
3479 retval = usb_add_gadget_udc_release(&pdev->dev, &dev->gadget,
3480 gadget_release);
0f91349b
SAS
3481 if (retval)
3482 goto done;
1da177e4
LT
3483 return 0;
3484
3485done:
3486 if (dev)
fae3c158 3487 net2280_remove(pdev);
1da177e4
LT
3488 return retval;
3489}
3490
2d61bde7
AS
3491/* make sure the board is quiescent; otherwise it will continue
3492 * generating IRQs across the upcoming reboot.
3493 */
3494
fae3c158 3495static void net2280_shutdown(struct pci_dev *pdev)
2d61bde7 3496{
fae3c158 3497 struct net2280 *dev = pci_get_drvdata(pdev);
2d61bde7
AS
3498
3499 /* disable IRQs */
fae3c158
RR
3500 writel(0, &dev->regs->pciirqenb0);
3501 writel(0, &dev->regs->pciirqenb1);
2d61bde7
AS
3502
3503 /* disable the pullup so the host will think we're gone */
fae3c158 3504 writel(0, &dev->usb->usbctl);
2f076077 3505
2d61bde7
AS
3506}
3507
1da177e4
LT
3508
3509/*-------------------------------------------------------------------------*/
3510
fae3c158 3511static const struct pci_device_id pci_ids[] = { {
901b3d75
DB
3512 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
3513 .class_mask = ~0,
c2db8a8a 3514 .vendor = PCI_VENDOR_ID_PLX_LEGACY,
1da177e4
LT
3515 .device = 0x2280,
3516 .subvendor = PCI_ANY_ID,
3517 .subdevice = PCI_ANY_ID,
2eeb0016 3518 .driver_data = PLX_LEGACY | PLX_2280,
ae8e530a 3519 }, {
901b3d75
DB
3520 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
3521 .class_mask = ~0,
c2db8a8a 3522 .vendor = PCI_VENDOR_ID_PLX_LEGACY,
950ee4c8
GL
3523 .device = 0x2282,
3524 .subvendor = PCI_ANY_ID,
3525 .subdevice = PCI_ANY_ID,
2eeb0016 3526 .driver_data = PLX_LEGACY,
ae8e530a 3527 },
adc82f77 3528 {
ae8e530a
RR
3529 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
3530 .class_mask = ~0,
3531 .vendor = PCI_VENDOR_ID_PLX,
3532 .device = 0x3380,
3533 .subvendor = PCI_ANY_ID,
3534 .subdevice = PCI_ANY_ID,
2eeb0016 3535 .driver_data = PLX_SUPERSPEED,
adc82f77
RR
3536 },
3537 {
ae8e530a
RR
3538 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
3539 .class_mask = ~0,
3540 .vendor = PCI_VENDOR_ID_PLX,
3541 .device = 0x3382,
3542 .subvendor = PCI_ANY_ID,
3543 .subdevice = PCI_ANY_ID,
2eeb0016 3544 .driver_data = PLX_SUPERSPEED,
adc82f77
RR
3545 },
3546{ /* end: all zeroes */ }
1da177e4 3547};
fae3c158 3548MODULE_DEVICE_TABLE(pci, pci_ids);
1da177e4
LT
3549
3550/* pci driver glue; this is a "new style" PCI driver module */
3551static struct pci_driver net2280_pci_driver = {
3552 .name = (char *) driver_name,
3553 .id_table = pci_ids,
3554
3555 .probe = net2280_probe,
3556 .remove = net2280_remove,
2d61bde7 3557 .shutdown = net2280_shutdown,
1da177e4
LT
3558
3559 /* FIXME add power management support */
3560};
3561
9a028e46
RR
3562module_pci_driver(net2280_pci_driver);
3563
fae3c158
RR
3564MODULE_DESCRIPTION(DRIVER_DESC);
3565MODULE_AUTHOR("David Brownell");
3566MODULE_LICENSE("GPL");