]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/usb/gadget/udc/net2280.c
usb: gadget: r8a66597-udc: set value for common is_selfpowered
[mirror_ubuntu-jammy-kernel.git] / drivers / usb / gadget / udc / net2280.c
CommitLineData
1da177e4
LT
1/*
2 * Driver for the PLX NET2280 USB device controller.
3 * Specs and errata are available from <http://www.plxtech.com>.
4 *
901b3d75 5 * PLX Technology Inc. (formerly NetChip Technology) supported the
1da177e4
LT
6 * development of this driver.
7 *
8 *
9 * CODE STATUS HIGHLIGHTS
10 *
11 * This driver should work well with most "gadget" drivers, including
fa06920a 12 * the Mass Storage, Serial, and Ethernet/RNDIS gadget drivers
1da177e4
LT
13 * as well as Gadget Zero and Gadgetfs.
14 *
90664198 15 * DMA is enabled by default.
1da177e4 16 *
adc82f77
RR
17 * MSI is enabled by default. The legacy IRQ is used if MSI couldn't
18 * be enabled.
19 *
1da177e4
LT
20 * Note that almost all the errata workarounds here are only needed for
21 * rev1 chips. Rev1a silicon (0110) fixes almost all of them.
22 */
23
24/*
25 * Copyright (C) 2003 David Brownell
26 * Copyright (C) 2003-2005 PLX Technology, Inc.
adc82f77 27 * Copyright (C) 2014 Ricardo Ribalda - Qtechnology/AS
1da177e4 28 *
901b3d75
DB
29 * Modified Seth Levy 2005 PLX Technology, Inc. to provide compatibility
30 * with 2282 chip
950ee4c8 31 *
adc82f77
RR
32 * Modified Ricardo Ribalda Qtechnology AS to provide compatibility
33 * with usb 338x chip. Based on PLX driver
34 *
1da177e4
LT
35 * This program is free software; you can redistribute it and/or modify
36 * it under the terms of the GNU General Public License as published by
37 * the Free Software Foundation; either version 2 of the License, or
38 * (at your option) any later version.
1da177e4
LT
39 */
40
1da177e4
LT
41#include <linux/module.h>
42#include <linux/pci.h>
682d4c80 43#include <linux/dma-mapping.h>
1da177e4
LT
44#include <linux/kernel.h>
45#include <linux/delay.h>
46#include <linux/ioport.h>
1da177e4 47#include <linux/slab.h>
1da177e4
LT
48#include <linux/errno.h>
49#include <linux/init.h>
50#include <linux/timer.h>
51#include <linux/list.h>
52#include <linux/interrupt.h>
53#include <linux/moduleparam.h>
54#include <linux/device.h>
5f848137 55#include <linux/usb/ch9.h>
9454a57a 56#include <linux/usb/gadget.h>
b38b03b3 57#include <linux/prefetch.h>
fae3c158 58#include <linux/io.h>
1da177e4
LT
59
60#include <asm/byteorder.h>
1da177e4 61#include <asm/irq.h>
1da177e4
LT
62#include <asm/unaligned.h>
63
adc82f77
RR
64#define DRIVER_DESC "PLX NET228x/USB338x USB Peripheral Controller"
65#define DRIVER_VERSION "2005 Sept 27/v3.0"
1da177e4 66
1da177e4
LT
67#define EP_DONTUSE 13 /* nonzero */
68
69#define USE_RDK_LEDS /* GPIO pins control three LEDs */
70
71
fae3c158
RR
72static const char driver_name[] = "net2280";
73static const char driver_desc[] = DRIVER_DESC;
1da177e4 74
adc82f77 75static const u32 ep_bit[9] = { 0, 17, 2, 19, 4, 1, 18, 3, 20 };
fae3c158
RR
76static const char ep0name[] = "ep0";
77static const char *const ep_name[] = {
1da177e4
LT
78 ep0name,
79 "ep-a", "ep-b", "ep-c", "ep-d",
adc82f77 80 "ep-e", "ep-f", "ep-g", "ep-h",
1da177e4
LT
81};
82
1da177e4
LT
83/* mode 0 == ep-{a,b,c,d} 1K fifo each
84 * mode 1 == ep-{a,b} 2K fifo each, ep-{c,d} unavailable
85 * mode 2 == ep-a 2K fifo, ep-{b,c} 1K each, ep-d unavailable
86 */
fae3c158 87static ushort fifo_mode;
1da177e4
LT
88
89/* "modprobe net2280 fifo_mode=1" etc */
ae8e530a 90module_param(fifo_mode, ushort, 0644);
1da177e4
LT
91
92/* enable_suspend -- When enabled, the driver will respond to
93 * USB suspend requests by powering down the NET2280. Otherwise,
25985edc 94 * USB suspend requests will be ignored. This is acceptable for
950ee4c8 95 * self-powered devices
1da177e4 96 */
00d4db0e 97static bool enable_suspend;
1da177e4
LT
98
99/* "modprobe net2280 enable_suspend=1" etc */
ae8e530a 100module_param(enable_suspend, bool, 0444);
1da177e4 101
1da177e4
LT
102#define DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out")
103
fae3c158 104static char *type_string(u8 bmAttributes)
1da177e4
LT
105{
106 switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
107 case USB_ENDPOINT_XFER_BULK: return "bulk";
108 case USB_ENDPOINT_XFER_ISOC: return "iso";
109 case USB_ENDPOINT_XFER_INT: return "intr";
2b84f92b 110 }
1da177e4
LT
111 return "control";
112}
1da177e4
LT
113
114#include "net2280.h"
115
3e76fdcb
RR
116#define valid_bit cpu_to_le32(BIT(VALID_BIT))
117#define dma_done_ie cpu_to_le32(BIT(DMA_DONE_INTERRUPT_ENABLE))
1da177e4
LT
118
119/*-------------------------------------------------------------------------*/
adc82f77
RR
120static inline void enable_pciirqenb(struct net2280_ep *ep)
121{
122 u32 tmp = readl(&ep->dev->regs->pciirqenb0);
123
2eeb0016 124 if (ep->dev->quirks & PLX_LEGACY)
3e76fdcb 125 tmp |= BIT(ep->num);
adc82f77 126 else
3e76fdcb 127 tmp |= BIT(ep_bit[ep->num]);
adc82f77
RR
128 writel(tmp, &ep->dev->regs->pciirqenb0);
129
130 return;
131}
1da177e4
LT
132
133static int
fae3c158 134net2280_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
1da177e4
LT
135{
136 struct net2280 *dev;
137 struct net2280_ep *ep;
138 u32 max, tmp;
139 unsigned long flags;
adc82f77 140 static const u32 ep_key[9] = { 1, 0, 1, 0, 1, 1, 0, 1, 0 };
1da177e4 141
fae3c158 142 ep = container_of(_ep, struct net2280_ep, ep);
ae8e530a
RR
143 if (!_ep || !desc || ep->desc || _ep->name == ep0name ||
144 desc->bDescriptorType != USB_DT_ENDPOINT)
1da177e4
LT
145 return -EINVAL;
146 dev = ep->dev;
147 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
148 return -ESHUTDOWN;
149
150 /* erratum 0119 workaround ties up an endpoint number */
151 if ((desc->bEndpointAddress & 0x0f) == EP_DONTUSE)
152 return -EDOM;
153
2eeb0016 154 if (dev->quirks & PLX_SUPERSPEED) {
adc82f77
RR
155 if ((desc->bEndpointAddress & 0x0f) >= 0x0c)
156 return -EDOM;
157 ep->is_in = !!usb_endpoint_dir_in(desc);
158 if (dev->enhanced_mode && ep->is_in && ep_key[ep->num])
159 return -EINVAL;
160 }
161
1da177e4 162 /* sanity check ep-e/ep-f since their fifos are small */
fae3c158 163 max = usb_endpoint_maxp(desc) & 0x1fff;
2eeb0016 164 if (ep->num > 4 && max > 64 && (dev->quirks & PLX_LEGACY))
1da177e4
LT
165 return -ERANGE;
166
fae3c158 167 spin_lock_irqsave(&dev->lock, flags);
1da177e4
LT
168 _ep->maxpacket = max & 0x7ff;
169 ep->desc = desc;
170
171 /* ep_reset() has already been called */
172 ep->stopped = 0;
8066134f 173 ep->wedged = 0;
1da177e4
LT
174 ep->out_overflow = 0;
175
176 /* set speed-dependent max packet; may kick in high bandwidth */
adc82f77 177 set_max_speed(ep, max);
1da177e4 178
1da177e4 179 /* set type, direction, address; reset fifo counters */
3e76fdcb 180 writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat);
1da177e4
LT
181 tmp = (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
182 if (tmp == USB_ENDPOINT_XFER_INT) {
183 /* erratum 0105 workaround prevents hs NYET */
ae8e530a
RR
184 if (dev->chiprev == 0100 &&
185 dev->gadget.speed == USB_SPEED_HIGH &&
186 !(desc->bEndpointAddress & USB_DIR_IN))
3e76fdcb 187 writel(BIT(CLEAR_NAK_OUT_PACKETS_MODE),
1da177e4
LT
188 &ep->regs->ep_rsp);
189 } else if (tmp == USB_ENDPOINT_XFER_BULK) {
190 /* catch some particularly blatant driver bugs */
adc82f77
RR
191 if ((dev->gadget.speed == USB_SPEED_SUPER && max != 1024) ||
192 (dev->gadget.speed == USB_SPEED_HIGH && max != 512) ||
193 (dev->gadget.speed == USB_SPEED_FULL && max > 64)) {
194 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4
LT
195 return -ERANGE;
196 }
197 }
fae3c158 198 ep->is_iso = (tmp == USB_ENDPOINT_XFER_ISOC);
adc82f77 199 /* Enable this endpoint */
2eeb0016 200 if (dev->quirks & PLX_LEGACY) {
adc82f77
RR
201 tmp <<= ENDPOINT_TYPE;
202 tmp |= desc->bEndpointAddress;
203 /* default full fifo lines */
204 tmp |= (4 << ENDPOINT_BYTE_COUNT);
3e76fdcb 205 tmp |= BIT(ENDPOINT_ENABLE);
adc82f77
RR
206 ep->is_in = (tmp & USB_DIR_IN) != 0;
207 } else {
208 /* In Legacy mode, only OUT endpoints are used */
209 if (dev->enhanced_mode && ep->is_in) {
210 tmp <<= IN_ENDPOINT_TYPE;
3e76fdcb 211 tmp |= BIT(IN_ENDPOINT_ENABLE);
adc82f77 212 /* Not applicable to Legacy */
3e76fdcb 213 tmp |= BIT(ENDPOINT_DIRECTION);
adc82f77
RR
214 } else {
215 tmp <<= OUT_ENDPOINT_TYPE;
3e76fdcb 216 tmp |= BIT(OUT_ENDPOINT_ENABLE);
adc82f77
RR
217 tmp |= (ep->is_in << ENDPOINT_DIRECTION);
218 }
219
220 tmp |= usb_endpoint_num(desc);
221 tmp |= (ep->ep.maxburst << MAX_BURST_SIZE);
222 }
223
224 /* Make sure all the registers are written before ep_rsp*/
225 wmb();
1da177e4
LT
226
227 /* for OUT transfers, block the rx fifo until a read is posted */
1da177e4 228 if (!ep->is_in)
3e76fdcb 229 writel(BIT(SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
2eeb0016 230 else if (!(dev->quirks & PLX_2280)) {
901b3d75
DB
231 /* Added for 2282, Don't use nak packets on an in endpoint,
232 * this was ignored on 2280
233 */
3e76fdcb
RR
234 writel(BIT(CLEAR_NAK_OUT_PACKETS) |
235 BIT(CLEAR_NAK_OUT_PACKETS_MODE), &ep->regs->ep_rsp);
950ee4c8 236 }
1da177e4 237
adc82f77 238 writel(tmp, &ep->cfg->ep_cfg);
1da177e4
LT
239
240 /* enable irqs */
241 if (!ep->dma) { /* pio, per-packet */
adc82f77 242 enable_pciirqenb(ep);
1da177e4 243
3e76fdcb
RR
244 tmp = BIT(DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) |
245 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE);
2eeb0016 246 if (dev->quirks & PLX_2280)
fae3c158
RR
247 tmp |= readl(&ep->regs->ep_irqenb);
248 writel(tmp, &ep->regs->ep_irqenb);
1da177e4 249 } else { /* dma, per-request */
3e76fdcb 250 tmp = BIT((8 + ep->num)); /* completion */
fae3c158
RR
251 tmp |= readl(&dev->regs->pciirqenb1);
252 writel(tmp, &dev->regs->pciirqenb1);
1da177e4
LT
253
254 /* for short OUT transfers, dma completions can't
255 * advance the queue; do it pio-style, by hand.
256 * NOTE erratum 0112 workaround #2
257 */
258 if ((desc->bEndpointAddress & USB_DIR_IN) == 0) {
3e76fdcb 259 tmp = BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE);
fae3c158 260 writel(tmp, &ep->regs->ep_irqenb);
1da177e4 261
adc82f77 262 enable_pciirqenb(ep);
1da177e4
LT
263 }
264 }
265
266 tmp = desc->bEndpointAddress;
e56e69cc 267 ep_dbg(dev, "enabled %s (ep%d%s-%s) %s max %04x\n",
fae3c158
RR
268 _ep->name, tmp & 0x0f, DIR_STRING(tmp),
269 type_string(desc->bmAttributes),
1da177e4
LT
270 ep->dma ? "dma" : "pio", max);
271
272 /* pci writes may still be posted */
fae3c158 273 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4
LT
274 return 0;
275}
276
fae3c158 277static int handshake(u32 __iomem *ptr, u32 mask, u32 done, int usec)
1da177e4
LT
278{
279 u32 result;
280
281 do {
fae3c158 282 result = readl(ptr);
1da177e4
LT
283 if (result == ~(u32)0) /* "device unplugged" */
284 return -ENODEV;
285 result &= mask;
286 if (result == done)
287 return 0;
fae3c158 288 udelay(1);
1da177e4
LT
289 usec--;
290 } while (usec > 0);
291 return -ETIMEDOUT;
292}
293
901b3d75 294static const struct usb_ep_ops net2280_ep_ops;
1da177e4 295
adc82f77
RR
296static void ep_reset_228x(struct net2280_regs __iomem *regs,
297 struct net2280_ep *ep)
1da177e4
LT
298{
299 u32 tmp;
300
301 ep->desc = NULL;
fae3c158 302 INIT_LIST_HEAD(&ep->queue);
1da177e4 303
e117e742 304 usb_ep_set_maxpacket_limit(&ep->ep, ~0);
1da177e4
LT
305 ep->ep.ops = &net2280_ep_ops;
306
307 /* disable the dma, irqs, endpoint... */
308 if (ep->dma) {
fae3c158 309 writel(0, &ep->dma->dmactl);
3e76fdcb
RR
310 writel(BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
311 BIT(DMA_TRANSACTION_DONE_INTERRUPT) |
312 BIT(DMA_ABORT),
313 &ep->dma->dmastat);
1da177e4 314
fae3c158 315 tmp = readl(&regs->pciirqenb0);
3e76fdcb 316 tmp &= ~BIT(ep->num);
fae3c158 317 writel(tmp, &regs->pciirqenb0);
1da177e4 318 } else {
fae3c158 319 tmp = readl(&regs->pciirqenb1);
3e76fdcb 320 tmp &= ~BIT((8 + ep->num)); /* completion */
fae3c158 321 writel(tmp, &regs->pciirqenb1);
1da177e4 322 }
fae3c158 323 writel(0, &ep->regs->ep_irqenb);
1da177e4
LT
324
325 /* init to our chosen defaults, notably so that we NAK OUT
326 * packets until the driver queues a read (+note erratum 0112)
327 */
2eeb0016 328 if (!ep->is_in || (ep->dev->quirks & PLX_2280)) {
3e76fdcb
RR
329 tmp = BIT(SET_NAK_OUT_PACKETS_MODE) |
330 BIT(SET_NAK_OUT_PACKETS) |
331 BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
332 BIT(CLEAR_INTERRUPT_MODE);
950ee4c8
GL
333 } else {
334 /* added for 2282 */
3e76fdcb
RR
335 tmp = BIT(CLEAR_NAK_OUT_PACKETS_MODE) |
336 BIT(CLEAR_NAK_OUT_PACKETS) |
337 BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
338 BIT(CLEAR_INTERRUPT_MODE);
950ee4c8 339 }
1da177e4
LT
340
341 if (ep->num != 0) {
3e76fdcb
RR
342 tmp |= BIT(CLEAR_ENDPOINT_TOGGLE) |
343 BIT(CLEAR_ENDPOINT_HALT);
1da177e4 344 }
fae3c158 345 writel(tmp, &ep->regs->ep_rsp);
1da177e4
LT
346
347 /* scrub most status bits, and flush any fifo state */
2eeb0016 348 if (ep->dev->quirks & PLX_2280)
3e76fdcb
RR
349 tmp = BIT(FIFO_OVERFLOW) |
350 BIT(FIFO_UNDERFLOW);
950ee4c8
GL
351 else
352 tmp = 0;
353
3e76fdcb
RR
354 writel(tmp | BIT(TIMEOUT) |
355 BIT(USB_STALL_SENT) |
356 BIT(USB_IN_NAK_SENT) |
357 BIT(USB_IN_ACK_RCVD) |
358 BIT(USB_OUT_PING_NAK_SENT) |
359 BIT(USB_OUT_ACK_SENT) |
360 BIT(FIFO_FLUSH) |
361 BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
362 BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
363 BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
364 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
365 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
ae8e530a
RR
366 BIT(DATA_IN_TOKEN_INTERRUPT),
367 &ep->regs->ep_stat);
1da177e4
LT
368
369 /* fifo size is handled separately */
370}
371
adc82f77
RR
372static void ep_reset_338x(struct net2280_regs __iomem *regs,
373 struct net2280_ep *ep)
374{
375 u32 tmp, dmastat;
376
377 ep->desc = NULL;
378 INIT_LIST_HEAD(&ep->queue);
379
380 usb_ep_set_maxpacket_limit(&ep->ep, ~0);
381 ep->ep.ops = &net2280_ep_ops;
382
383 /* disable the dma, irqs, endpoint... */
384 if (ep->dma) {
385 writel(0, &ep->dma->dmactl);
3e76fdcb
RR
386 writel(BIT(DMA_ABORT_DONE_INTERRUPT) |
387 BIT(DMA_PAUSE_DONE_INTERRUPT) |
388 BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
ae8e530a
RR
389 BIT(DMA_TRANSACTION_DONE_INTERRUPT),
390 /* | BIT(DMA_ABORT), */
391 &ep->dma->dmastat);
adc82f77
RR
392
393 dmastat = readl(&ep->dma->dmastat);
394 if (dmastat == 0x5002) {
e56e69cc 395 ep_warn(ep->dev, "The dmastat return = %x!!\n",
adc82f77
RR
396 dmastat);
397 writel(0x5a, &ep->dma->dmastat);
398 }
399
400 tmp = readl(&regs->pciirqenb0);
3e76fdcb 401 tmp &= ~BIT(ep_bit[ep->num]);
adc82f77
RR
402 writel(tmp, &regs->pciirqenb0);
403 } else {
404 if (ep->num < 5) {
405 tmp = readl(&regs->pciirqenb1);
3e76fdcb 406 tmp &= ~BIT((8 + ep->num)); /* completion */
adc82f77
RR
407 writel(tmp, &regs->pciirqenb1);
408 }
409 }
410 writel(0, &ep->regs->ep_irqenb);
411
3e76fdcb
RR
412 writel(BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
413 BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
414 BIT(FIFO_OVERFLOW) |
415 BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
416 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
417 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
418 BIT(DATA_IN_TOKEN_INTERRUPT), &ep->regs->ep_stat);
adc82f77
RR
419}
420
fae3c158 421static void nuke(struct net2280_ep *);
1da177e4 422
fae3c158 423static int net2280_disable(struct usb_ep *_ep)
1da177e4
LT
424{
425 struct net2280_ep *ep;
426 unsigned long flags;
427
fae3c158 428 ep = container_of(_ep, struct net2280_ep, ep);
1da177e4
LT
429 if (!_ep || !ep->desc || _ep->name == ep0name)
430 return -EINVAL;
431
fae3c158
RR
432 spin_lock_irqsave(&ep->dev->lock, flags);
433 nuke(ep);
adc82f77 434
2eeb0016 435 if (ep->dev->quirks & PLX_SUPERSPEED)
adc82f77
RR
436 ep_reset_338x(ep->dev->regs, ep);
437 else
438 ep_reset_228x(ep->dev->regs, ep);
1da177e4 439
e56e69cc 440 ep_vdbg(ep->dev, "disabled %s %s\n",
1da177e4
LT
441 ep->dma ? "dma" : "pio", _ep->name);
442
443 /* synch memory views with the device */
adc82f77 444 (void)readl(&ep->cfg->ep_cfg);
1da177e4 445
d588ff58 446 if (!ep->dma && ep->num >= 1 && ep->num <= 4)
fae3c158 447 ep->dma = &ep->dev->dma[ep->num - 1];
1da177e4 448
fae3c158 449 spin_unlock_irqrestore(&ep->dev->lock, flags);
1da177e4
LT
450 return 0;
451}
452
453/*-------------------------------------------------------------------------*/
454
fae3c158
RR
455static struct usb_request
456*net2280_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
1da177e4
LT
457{
458 struct net2280_ep *ep;
459 struct net2280_request *req;
460
461 if (!_ep)
462 return NULL;
fae3c158 463 ep = container_of(_ep, struct net2280_ep, ep);
1da177e4 464
7039f422 465 req = kzalloc(sizeof(*req), gfp_flags);
1da177e4
LT
466 if (!req)
467 return NULL;
468
fae3c158 469 INIT_LIST_HEAD(&req->queue);
1da177e4
LT
470
471 /* this dma descriptor may be swapped with the previous dummy */
472 if (ep->dma) {
473 struct net2280_dma *td;
474
fae3c158 475 td = pci_pool_alloc(ep->dev->requests, gfp_flags,
1da177e4
LT
476 &req->td_dma);
477 if (!td) {
fae3c158 478 kfree(req);
1da177e4
LT
479 return NULL;
480 }
481 td->dmacount = 0; /* not VALID */
1da177e4
LT
482 td->dmadesc = td->dmaaddr;
483 req->td = td;
484 }
485 return &req->req;
486}
487
fae3c158 488static void net2280_free_request(struct usb_ep *_ep, struct usb_request *_req)
1da177e4
LT
489{
490 struct net2280_ep *ep;
491 struct net2280_request *req;
492
fae3c158 493 ep = container_of(_ep, struct net2280_ep, ep);
1da177e4
LT
494 if (!_ep || !_req)
495 return;
496
fae3c158
RR
497 req = container_of(_req, struct net2280_request, req);
498 WARN_ON(!list_empty(&req->queue));
1da177e4 499 if (req->td)
fae3c158
RR
500 pci_pool_free(ep->dev->requests, req->td, req->td_dma);
501 kfree(req);
1da177e4
LT
502}
503
504/*-------------------------------------------------------------------------*/
505
1da177e4
LT
506/* load a packet into the fifo we use for usb IN transfers.
507 * works for all endpoints.
508 *
509 * NOTE: pio with ep-a..ep-d could stuff multiple packets into the fifo
510 * at a time, but this code is simpler because it knows it only writes
511 * one packet. ep-a..ep-d should use dma instead.
512 */
fae3c158 513static void write_fifo(struct net2280_ep *ep, struct usb_request *req)
1da177e4
LT
514{
515 struct net2280_ep_regs __iomem *regs = ep->regs;
516 u8 *buf;
517 u32 tmp;
518 unsigned count, total;
519
520 /* INVARIANT: fifo is currently empty. (testable) */
521
522 if (req) {
523 buf = req->buf + req->actual;
fae3c158 524 prefetch(buf);
1da177e4
LT
525 total = req->length - req->actual;
526 } else {
527 total = 0;
528 buf = NULL;
529 }
530
531 /* write just one packet at a time */
532 count = ep->ep.maxpacket;
533 if (count > total) /* min() cannot be used on a bitfield */
534 count = total;
535
e56e69cc 536 ep_vdbg(ep->dev, "write %s fifo (IN) %d bytes%s req %p\n",
1da177e4
LT
537 ep->ep.name, count,
538 (count != ep->ep.maxpacket) ? " (short)" : "",
539 req);
540 while (count >= 4) {
541 /* NOTE be careful if you try to align these. fifo lines
542 * should normally be full (4 bytes) and successive partial
543 * lines are ok only in certain cases.
544 */
fae3c158
RR
545 tmp = get_unaligned((u32 *)buf);
546 cpu_to_le32s(&tmp);
547 writel(tmp, &regs->ep_data);
1da177e4
LT
548 buf += 4;
549 count -= 4;
550 }
551
552 /* last fifo entry is "short" unless we wrote a full packet.
553 * also explicitly validate last word in (periodic) transfers
554 * when maxpacket is not a multiple of 4 bytes.
555 */
556 if (count || total < ep->ep.maxpacket) {
fae3c158
RR
557 tmp = count ? get_unaligned((u32 *)buf) : count;
558 cpu_to_le32s(&tmp);
559 set_fifo_bytecount(ep, count & 0x03);
560 writel(tmp, &regs->ep_data);
1da177e4
LT
561 }
562
563 /* pci writes may still be posted */
564}
565
566/* work around erratum 0106: PCI and USB race over the OUT fifo.
567 * caller guarantees chiprev 0100, out endpoint is NAKing, and
568 * there's no real data in the fifo.
569 *
570 * NOTE: also used in cases where that erratum doesn't apply:
571 * where the host wrote "too much" data to us.
572 */
fae3c158 573static void out_flush(struct net2280_ep *ep)
1da177e4
LT
574{
575 u32 __iomem *statp;
576 u32 tmp;
577
1da177e4 578 statp = &ep->regs->ep_stat;
d82f3db2
RR
579
580 tmp = readl(statp);
581 if (tmp & BIT(NAK_OUT_PACKETS)) {
582 ep_dbg(ep->dev, "%s %s %08x !NAK\n",
583 ep->ep.name, __func__, tmp);
584 writel(BIT(SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
585 }
586
3e76fdcb 587 writel(BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
ae8e530a
RR
588 BIT(DATA_PACKET_RECEIVED_INTERRUPT),
589 statp);
3e76fdcb 590 writel(BIT(FIFO_FLUSH), statp);
fae3c158
RR
591 /* Make sure that stap is written */
592 mb();
593 tmp = readl(statp);
ae8e530a 594 if (tmp & BIT(DATA_OUT_PING_TOKEN_INTERRUPT) &&
1da177e4 595 /* high speed did bulk NYET; fifo isn't filling */
ae8e530a 596 ep->dev->gadget.speed == USB_SPEED_FULL) {
1da177e4
LT
597 unsigned usec;
598
599 usec = 50; /* 64 byte bulk/interrupt */
3e76fdcb
RR
600 handshake(statp, BIT(USB_OUT_PING_NAK_SENT),
601 BIT(USB_OUT_PING_NAK_SENT), usec);
1da177e4
LT
602 /* NAK done; now CLEAR_NAK_OUT_PACKETS is safe */
603 }
604}
605
606/* unload packet(s) from the fifo we use for usb OUT transfers.
607 * returns true iff the request completed, because of short packet
608 * or the request buffer having filled with full packets.
609 *
610 * for ep-a..ep-d this will read multiple packets out when they
611 * have been accepted.
612 */
fae3c158 613static int read_fifo(struct net2280_ep *ep, struct net2280_request *req)
1da177e4
LT
614{
615 struct net2280_ep_regs __iomem *regs = ep->regs;
616 u8 *buf = req->req.buf + req->req.actual;
617 unsigned count, tmp, is_short;
618 unsigned cleanup = 0, prevent = 0;
619
620 /* erratum 0106 ... packets coming in during fifo reads might
621 * be incompletely rejected. not all cases have workarounds.
622 */
ae8e530a
RR
623 if (ep->dev->chiprev == 0x0100 &&
624 ep->dev->gadget.speed == USB_SPEED_FULL) {
fae3c158
RR
625 udelay(1);
626 tmp = readl(&ep->regs->ep_stat);
3e76fdcb 627 if ((tmp & BIT(NAK_OUT_PACKETS)))
1da177e4 628 cleanup = 1;
3e76fdcb 629 else if ((tmp & BIT(FIFO_FULL))) {
fae3c158 630 start_out_naking(ep);
1da177e4
LT
631 prevent = 1;
632 }
633 /* else: hope we don't see the problem */
634 }
635
636 /* never overflow the rx buffer. the fifo reads packets until
637 * it sees a short one; we might not be ready for them all.
638 */
fae3c158
RR
639 prefetchw(buf);
640 count = readl(&regs->ep_avail);
641 if (unlikely(count == 0)) {
642 udelay(1);
643 tmp = readl(&ep->regs->ep_stat);
644 count = readl(&regs->ep_avail);
1da177e4 645 /* handled that data already? */
3e76fdcb 646 if (count == 0 && (tmp & BIT(NAK_OUT_PACKETS)) == 0)
1da177e4
LT
647 return 0;
648 }
649
650 tmp = req->req.length - req->req.actual;
651 if (count > tmp) {
652 /* as with DMA, data overflow gets flushed */
653 if ((tmp % ep->ep.maxpacket) != 0) {
e56e69cc 654 ep_err(ep->dev,
1da177e4
LT
655 "%s out fifo %d bytes, expected %d\n",
656 ep->ep.name, count, tmp);
657 req->req.status = -EOVERFLOW;
658 cleanup = 1;
659 /* NAK_OUT_PACKETS will be set, so flushing is safe;
660 * the next read will start with the next packet
661 */
662 } /* else it's a ZLP, no worries */
663 count = tmp;
664 }
665 req->req.actual += count;
666
667 is_short = (count == 0) || ((count % ep->ep.maxpacket) != 0);
668
e56e69cc 669 ep_vdbg(ep->dev, "read %s fifo (OUT) %d bytes%s%s%s req %p %d/%d\n",
1da177e4
LT
670 ep->ep.name, count, is_short ? " (short)" : "",
671 cleanup ? " flush" : "", prevent ? " nak" : "",
672 req, req->req.actual, req->req.length);
673
674 while (count >= 4) {
fae3c158
RR
675 tmp = readl(&regs->ep_data);
676 cpu_to_le32s(&tmp);
677 put_unaligned(tmp, (u32 *)buf);
1da177e4
LT
678 buf += 4;
679 count -= 4;
680 }
681 if (count) {
fae3c158 682 tmp = readl(&regs->ep_data);
1da177e4
LT
683 /* LE conversion is implicit here: */
684 do {
685 *buf++ = (u8) tmp;
686 tmp >>= 8;
687 } while (--count);
688 }
689 if (cleanup)
fae3c158 690 out_flush(ep);
1da177e4 691 if (prevent) {
3e76fdcb 692 writel(BIT(CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
fae3c158 693 (void) readl(&ep->regs->ep_rsp);
1da177e4
LT
694 }
695
ae8e530a
RR
696 return is_short || ((req->req.actual == req->req.length) &&
697 !req->req.zero);
1da177e4
LT
698}
699
700/* fill out dma descriptor to match a given request */
fae3c158
RR
701static void fill_dma_desc(struct net2280_ep *ep,
702 struct net2280_request *req, int valid)
1da177e4
LT
703{
704 struct net2280_dma *td = req->td;
705 u32 dmacount = req->req.length;
706
707 /* don't let DMA continue after a short OUT packet,
708 * so overruns can't affect the next transfer.
709 * in case of overruns on max-size packets, we can't
710 * stop the fifo from filling but we can flush it.
711 */
712 if (ep->is_in)
3e76fdcb 713 dmacount |= BIT(DMA_DIRECTION);
ae8e530a 714 if ((!ep->is_in && (dmacount % ep->ep.maxpacket) != 0) ||
2eeb0016 715 !(ep->dev->quirks & PLX_2280))
3e76fdcb 716 dmacount |= BIT(END_OF_CHAIN);
1da177e4
LT
717
718 req->valid = valid;
719 if (valid)
3e76fdcb 720 dmacount |= BIT(VALID_BIT);
90664198 721 dmacount |= BIT(DMA_DONE_INTERRUPT_ENABLE);
1da177e4
LT
722
723 /* td->dmadesc = previously set by caller */
724 td->dmaaddr = cpu_to_le32 (req->req.dma);
725
726 /* 2280 may be polling VALID_BIT through ep->dma->dmadesc */
fae3c158 727 wmb();
da2bbdcc 728 td->dmacount = cpu_to_le32(dmacount);
1da177e4
LT
729}
730
731static const u32 dmactl_default =
3e76fdcb
RR
732 BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
733 BIT(DMA_CLEAR_COUNT_ENABLE) |
1da177e4 734 /* erratum 0116 workaround part 1 (use POLLING) */
3e76fdcb
RR
735 (POLL_100_USEC << DESCRIPTOR_POLLING_RATE) |
736 BIT(DMA_VALID_BIT_POLLING_ENABLE) |
737 BIT(DMA_VALID_BIT_ENABLE) |
738 BIT(DMA_SCATTER_GATHER_ENABLE) |
1da177e4 739 /* erratum 0116 workaround part 2 (no AUTOSTART) */
3e76fdcb 740 BIT(DMA_ENABLE);
1da177e4 741
fae3c158 742static inline void spin_stop_dma(struct net2280_dma_regs __iomem *dma)
1da177e4 743{
3e76fdcb 744 handshake(&dma->dmactl, BIT(DMA_ENABLE), 0, 50);
1da177e4
LT
745}
746
fae3c158 747static inline void stop_dma(struct net2280_dma_regs __iomem *dma)
1da177e4 748{
3e76fdcb 749 writel(readl(&dma->dmactl) & ~BIT(DMA_ENABLE), &dma->dmactl);
fae3c158 750 spin_stop_dma(dma);
1da177e4
LT
751}
752
fae3c158 753static void start_queue(struct net2280_ep *ep, u32 dmactl, u32 td_dma)
1da177e4
LT
754{
755 struct net2280_dma_regs __iomem *dma = ep->dma;
3e76fdcb 756 unsigned int tmp = BIT(VALID_BIT) | (ep->is_in << DMA_DIRECTION);
1da177e4 757
2eeb0016 758 if (!(ep->dev->quirks & PLX_2280))
3e76fdcb 759 tmp |= BIT(END_OF_CHAIN);
950ee4c8 760
fae3c158
RR
761 writel(tmp, &dma->dmacount);
762 writel(readl(&dma->dmastat), &dma->dmastat);
1da177e4 763
fae3c158 764 writel(td_dma, &dma->dmadesc);
2eeb0016 765 if (ep->dev->quirks & PLX_SUPERSPEED)
3e76fdcb 766 dmactl |= BIT(DMA_REQUEST_OUTSTANDING);
fae3c158 767 writel(dmactl, &dma->dmactl);
1da177e4
LT
768
769 /* erratum 0116 workaround part 3: pci arbiter away from net2280 */
fae3c158 770 (void) readl(&ep->dev->pci->pcimstctl);
1da177e4 771
3e76fdcb 772 writel(BIT(DMA_START), &dma->dmastat);
1da177e4
LT
773
774 if (!ep->is_in)
fae3c158 775 stop_out_naking(ep);
1da177e4
LT
776}
777
fae3c158 778static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
1da177e4
LT
779{
780 u32 tmp;
781 struct net2280_dma_regs __iomem *dma = ep->dma;
782
783 /* FIXME can't use DMA for ZLPs */
784
785 /* on this path we "know" there's no dma active (yet) */
3e76fdcb 786 WARN_ON(readl(&dma->dmactl) & BIT(DMA_ENABLE));
fae3c158 787 writel(0, &ep->dma->dmactl);
1da177e4
LT
788
789 /* previous OUT packet might have been short */
fae3c158
RR
790 if (!ep->is_in && (readl(&ep->regs->ep_stat) &
791 BIT(NAK_OUT_PACKETS))) {
3e76fdcb 792 writel(BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT),
1da177e4
LT
793 &ep->regs->ep_stat);
794
fae3c158 795 tmp = readl(&ep->regs->ep_avail);
1da177e4 796 if (tmp) {
fae3c158 797 writel(readl(&dma->dmastat), &dma->dmastat);
1da177e4
LT
798
799 /* transfer all/some fifo data */
fae3c158
RR
800 writel(req->req.dma, &dma->dmaaddr);
801 tmp = min(tmp, req->req.length);
1da177e4
LT
802
803 /* dma irq, faking scatterlist status */
fae3c158 804 req->td->dmacount = cpu_to_le32(req->req.length - tmp);
ae8e530a
RR
805 writel(BIT(DMA_DONE_INTERRUPT_ENABLE) | tmp,
806 &dma->dmacount);
1da177e4
LT
807 req->td->dmadesc = 0;
808 req->valid = 1;
809
3e76fdcb
RR
810 writel(BIT(DMA_ENABLE), &dma->dmactl);
811 writel(BIT(DMA_START), &dma->dmastat);
1da177e4
LT
812 return;
813 }
814 }
815
816 tmp = dmactl_default;
817
818 /* force packet boundaries between dma requests, but prevent the
819 * controller from automagically writing a last "short" packet
820 * (zero length) unless the driver explicitly said to do that.
821 */
822 if (ep->is_in) {
fae3c158
RR
823 if (likely((req->req.length % ep->ep.maxpacket) ||
824 req->req.zero)){
3e76fdcb 825 tmp |= BIT(DMA_FIFO_VALIDATE);
1da177e4
LT
826 ep->in_fifo_validate = 1;
827 } else
828 ep->in_fifo_validate = 0;
829 }
830
831 /* init req->td, pointing to the current dummy */
832 req->td->dmadesc = cpu_to_le32 (ep->td_dma);
fae3c158 833 fill_dma_desc(ep, req, 1);
1da177e4 834
90664198 835 req->td->dmacount |= cpu_to_le32(BIT(END_OF_CHAIN));
1da177e4 836
fae3c158 837 start_queue(ep, tmp, req->td_dma);
1da177e4
LT
838}
839
840static inline void
fae3c158 841queue_dma(struct net2280_ep *ep, struct net2280_request *req, int valid)
1da177e4
LT
842{
843 struct net2280_dma *end;
844 dma_addr_t tmp;
845
846 /* swap new dummy for old, link; fill and maybe activate */
847 end = ep->dummy;
848 ep->dummy = req->td;
849 req->td = end;
850
851 tmp = ep->td_dma;
852 ep->td_dma = req->td_dma;
853 req->td_dma = tmp;
854
855 end->dmadesc = cpu_to_le32 (ep->td_dma);
856
fae3c158 857 fill_dma_desc(ep, req, valid);
1da177e4
LT
858}
859
860static void
fae3c158 861done(struct net2280_ep *ep, struct net2280_request *req, int status)
1da177e4
LT
862{
863 struct net2280 *dev;
864 unsigned stopped = ep->stopped;
865
fae3c158 866 list_del_init(&req->queue);
1da177e4
LT
867
868 if (req->req.status == -EINPROGRESS)
869 req->req.status = status;
870 else
871 status = req->req.status;
872
873 dev = ep->dev;
ae4d7933
FB
874 if (ep->dma)
875 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
1da177e4
LT
876
877 if (status && status != -ESHUTDOWN)
e56e69cc 878 ep_vdbg(dev, "complete %s req %p stat %d len %u/%u\n",
1da177e4
LT
879 ep->ep.name, &req->req, status,
880 req->req.actual, req->req.length);
881
882 /* don't modify queue heads during completion callback */
883 ep->stopped = 1;
fae3c158 884 spin_unlock(&dev->lock);
304f7e5e 885 usb_gadget_giveback_request(&ep->ep, &req->req);
fae3c158 886 spin_lock(&dev->lock);
1da177e4
LT
887 ep->stopped = stopped;
888}
889
890/*-------------------------------------------------------------------------*/
891
892static int
fae3c158 893net2280_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
1da177e4
LT
894{
895 struct net2280_request *req;
896 struct net2280_ep *ep;
897 struct net2280 *dev;
898 unsigned long flags;
899
900 /* we always require a cpu-view buffer, so that we can
901 * always use pio (as fallback or whatever).
902 */
fae3c158
RR
903 req = container_of(_req, struct net2280_request, req);
904 if (!_req || !_req->complete || !_req->buf ||
905 !list_empty(&req->queue))
1da177e4
LT
906 return -EINVAL;
907 if (_req->length > (~0 & DMA_BYTE_COUNT_MASK))
908 return -EDOM;
fae3c158 909 ep = container_of(_ep, struct net2280_ep, ep);
1da177e4
LT
910 if (!_ep || (!ep->desc && ep->num != 0))
911 return -EINVAL;
912 dev = ep->dev;
913 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
914 return -ESHUTDOWN;
915
916 /* FIXME implement PIO fallback for ZLPs with DMA */
917 if (ep->dma && _req->length == 0)
918 return -EOPNOTSUPP;
919
920 /* set up dma mapping in case the caller didn't */
ae4d7933
FB
921 if (ep->dma) {
922 int ret;
923
924 ret = usb_gadget_map_request(&dev->gadget, _req,
925 ep->is_in);
926 if (ret)
927 return ret;
1da177e4
LT
928 }
929
e56e69cc 930 ep_vdbg(dev, "%s queue req %p, len %d buf %p\n",
1da177e4 931 _ep->name, _req, _req->length, _req->buf);
1da177e4 932
fae3c158 933 spin_lock_irqsave(&dev->lock, flags);
1da177e4
LT
934
935 _req->status = -EINPROGRESS;
936 _req->actual = 0;
937
938 /* kickstart this i/o queue? */
485f44d0
RR
939 if (list_empty(&ep->queue) && !ep->stopped &&
940 !((dev->quirks & PLX_SUPERSPEED) && ep->dma &&
941 (readl(&ep->regs->ep_rsp) & BIT(CLEAR_ENDPOINT_HALT)))) {
942
1da177e4 943 /* use DMA if the endpoint supports it, else pio */
485f44d0 944 if (ep->dma)
fae3c158 945 start_dma(ep, req);
1da177e4
LT
946 else {
947 /* maybe there's no control data, just status ack */
948 if (ep->num == 0 && _req->length == 0) {
fae3c158
RR
949 allow_status(ep);
950 done(ep, req, 0);
e56e69cc 951 ep_vdbg(dev, "%s status ack\n", ep->ep.name);
1da177e4
LT
952 goto done;
953 }
954
955 /* PIO ... stuff the fifo, or unblock it. */
956 if (ep->is_in)
fae3c158
RR
957 write_fifo(ep, _req);
958 else if (list_empty(&ep->queue)) {
1da177e4
LT
959 u32 s;
960
961 /* OUT FIFO might have packet(s) buffered */
fae3c158 962 s = readl(&ep->regs->ep_stat);
3e76fdcb 963 if ((s & BIT(FIFO_EMPTY)) == 0) {
1da177e4
LT
964 /* note: _req->short_not_ok is
965 * ignored here since PIO _always_
966 * stops queue advance here, and
967 * _req->status doesn't change for
968 * short reads (only _req->actual)
969 */
fae3c158
RR
970 if (read_fifo(ep, req) &&
971 ep->num == 0) {
972 done(ep, req, 0);
973 allow_status(ep);
1da177e4
LT
974 /* don't queue it */
975 req = NULL;
fae3c158
RR
976 } else if (read_fifo(ep, req) &&
977 ep->num != 0) {
978 done(ep, req, 0);
979 req = NULL;
1da177e4 980 } else
fae3c158 981 s = readl(&ep->regs->ep_stat);
1da177e4
LT
982 }
983
984 /* don't NAK, let the fifo fill */
3e76fdcb
RR
985 if (req && (s & BIT(NAK_OUT_PACKETS)))
986 writel(BIT(CLEAR_NAK_OUT_PACKETS),
1da177e4
LT
987 &ep->regs->ep_rsp);
988 }
989 }
990
991 } else if (ep->dma) {
992 int valid = 1;
993
994 if (ep->is_in) {
995 int expect;
996
997 /* preventing magic zlps is per-engine state, not
998 * per-transfer; irq logic must recover hiccups.
999 */
fae3c158
RR
1000 expect = likely(req->req.zero ||
1001 (req->req.length % ep->ep.maxpacket));
1da177e4
LT
1002 if (expect != ep->in_fifo_validate)
1003 valid = 0;
1004 }
fae3c158 1005 queue_dma(ep, req, valid);
1da177e4
LT
1006
1007 } /* else the irq handler advances the queue. */
1008
1f26e28d 1009 ep->responded = 1;
1da177e4 1010 if (req)
fae3c158 1011 list_add_tail(&req->queue, &ep->queue);
1da177e4 1012done:
fae3c158 1013 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4
LT
1014
1015 /* pci writes may still be posted */
1016 return 0;
1017}
1018
1019static inline void
fae3c158
RR
1020dma_done(struct net2280_ep *ep, struct net2280_request *req, u32 dmacount,
1021 int status)
1da177e4
LT
1022{
1023 req->req.actual = req->req.length - (DMA_BYTE_COUNT_MASK & dmacount);
fae3c158 1024 done(ep, req, status);
1da177e4
LT
1025}
1026
fae3c158 1027static void scan_dma_completions(struct net2280_ep *ep)
1da177e4
LT
1028{
1029 /* only look at descriptors that were "naturally" retired,
1030 * so fifo and list head state won't matter
1031 */
fae3c158 1032 while (!list_empty(&ep->queue)) {
1da177e4
LT
1033 struct net2280_request *req;
1034 u32 tmp;
1035
fae3c158 1036 req = list_entry(ep->queue.next,
1da177e4
LT
1037 struct net2280_request, queue);
1038 if (!req->valid)
1039 break;
fae3c158
RR
1040 rmb();
1041 tmp = le32_to_cpup(&req->td->dmacount);
3e76fdcb 1042 if ((tmp & BIT(VALID_BIT)) != 0)
1da177e4
LT
1043 break;
1044
1045 /* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short"
1046 * cases where DMA must be aborted; this code handles
1047 * all non-abort DMA completions.
1048 */
fae3c158 1049 if (unlikely(req->td->dmadesc == 0)) {
1da177e4 1050 /* paranoia */
fae3c158 1051 tmp = readl(&ep->dma->dmacount);
1da177e4
LT
1052 if (tmp & DMA_BYTE_COUNT_MASK)
1053 break;
1054 /* single transfer mode */
fae3c158 1055 dma_done(ep, req, tmp, 0);
1da177e4 1056 break;
ae8e530a 1057 } else if (!ep->is_in &&
43780aaa
RR
1058 (req->req.length % ep->ep.maxpacket) &&
1059 !(ep->dev->quirks & PLX_SUPERSPEED)) {
1da177e4 1060
18a4e65f 1061 tmp = readl(&ep->regs->ep_stat);
1da177e4
LT
1062 /* AVOID TROUBLE HERE by not issuing short reads from
1063 * your gadget driver. That helps avoids errata 0121,
1064 * 0122, and 0124; not all cases trigger the warning.
1065 */
3e76fdcb 1066 if ((tmp & BIT(NAK_OUT_PACKETS)) == 0) {
e56e69cc 1067 ep_warn(ep->dev, "%s lost packet sync!\n",
1da177e4
LT
1068 ep->ep.name);
1069 req->req.status = -EOVERFLOW;
fae3c158
RR
1070 } else {
1071 tmp = readl(&ep->regs->ep_avail);
1072 if (tmp) {
1073 /* fifo gets flushed later */
1074 ep->out_overflow = 1;
e56e69cc 1075 ep_dbg(ep->dev,
fae3c158 1076 "%s dma, discard %d len %d\n",
1da177e4
LT
1077 ep->ep.name, tmp,
1078 req->req.length);
fae3c158
RR
1079 req->req.status = -EOVERFLOW;
1080 }
1da177e4
LT
1081 }
1082 }
fae3c158 1083 dma_done(ep, req, tmp, 0);
1da177e4
LT
1084 }
1085}
1086
fae3c158 1087static void restart_dma(struct net2280_ep *ep)
1da177e4
LT
1088{
1089 struct net2280_request *req;
1da177e4
LT
1090
1091 if (ep->stopped)
1092 return;
fae3c158 1093 req = list_entry(ep->queue.next, struct net2280_request, queue);
1da177e4 1094
90664198 1095 start_dma(ep, req);
1da177e4
LT
1096}
1097
e721c457 1098static void abort_dma(struct net2280_ep *ep)
1da177e4
LT
1099{
1100 /* abort the current transfer */
fae3c158 1101 if (likely(!list_empty(&ep->queue))) {
1da177e4 1102 /* FIXME work around errata 0121, 0122, 0124 */
3e76fdcb 1103 writel(BIT(DMA_ABORT), &ep->dma->dmastat);
fae3c158 1104 spin_stop_dma(ep->dma);
1da177e4 1105 } else
fae3c158
RR
1106 stop_dma(ep->dma);
1107 scan_dma_completions(ep);
1da177e4
LT
1108}
1109
1110/* dequeue ALL requests */
fae3c158 1111static void nuke(struct net2280_ep *ep)
1da177e4
LT
1112{
1113 struct net2280_request *req;
1114
1115 /* called with spinlock held */
1116 ep->stopped = 1;
1117 if (ep->dma)
fae3c158
RR
1118 abort_dma(ep);
1119 while (!list_empty(&ep->queue)) {
1120 req = list_entry(ep->queue.next,
1da177e4
LT
1121 struct net2280_request,
1122 queue);
fae3c158 1123 done(ep, req, -ESHUTDOWN);
1da177e4
LT
1124 }
1125}
1126
1127/* dequeue JUST ONE request */
fae3c158 1128static int net2280_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1da177e4
LT
1129{
1130 struct net2280_ep *ep;
1131 struct net2280_request *req;
1132 unsigned long flags;
1133 u32 dmactl;
1134 int stopped;
1135
fae3c158 1136 ep = container_of(_ep, struct net2280_ep, ep);
1da177e4
LT
1137 if (!_ep || (!ep->desc && ep->num != 0) || !_req)
1138 return -EINVAL;
1139
fae3c158 1140 spin_lock_irqsave(&ep->dev->lock, flags);
1da177e4
LT
1141 stopped = ep->stopped;
1142
1143 /* quiesce dma while we patch the queue */
1144 dmactl = 0;
1145 ep->stopped = 1;
1146 if (ep->dma) {
fae3c158 1147 dmactl = readl(&ep->dma->dmactl);
1da177e4 1148 /* WARNING erratum 0127 may kick in ... */
fae3c158
RR
1149 stop_dma(ep->dma);
1150 scan_dma_completions(ep);
1da177e4
LT
1151 }
1152
1153 /* make sure it's still queued on this endpoint */
fae3c158 1154 list_for_each_entry(req, &ep->queue, queue) {
1da177e4
LT
1155 if (&req->req == _req)
1156 break;
1157 }
1158 if (&req->req != _req) {
fae3c158 1159 spin_unlock_irqrestore(&ep->dev->lock, flags);
1da177e4
LT
1160 return -EINVAL;
1161 }
1162
1163 /* queue head may be partially complete. */
1164 if (ep->queue.next == &req->queue) {
1165 if (ep->dma) {
e56e69cc 1166 ep_dbg(ep->dev, "unlink (%s) dma\n", _ep->name);
1da177e4 1167 _req->status = -ECONNRESET;
fae3c158
RR
1168 abort_dma(ep);
1169 if (likely(ep->queue.next == &req->queue)) {
1170 /* NOTE: misreports single-transfer mode*/
1da177e4 1171 req->td->dmacount = 0; /* invalidate */
fae3c158
RR
1172 dma_done(ep, req,
1173 readl(&ep->dma->dmacount),
1da177e4
LT
1174 -ECONNRESET);
1175 }
1176 } else {
e56e69cc 1177 ep_dbg(ep->dev, "unlink (%s) pio\n", _ep->name);
fae3c158 1178 done(ep, req, -ECONNRESET);
1da177e4
LT
1179 }
1180 req = NULL;
1da177e4
LT
1181 }
1182
1183 if (req)
fae3c158 1184 done(ep, req, -ECONNRESET);
1da177e4
LT
1185 ep->stopped = stopped;
1186
1187 if (ep->dma) {
1188 /* turn off dma on inactive queues */
fae3c158
RR
1189 if (list_empty(&ep->queue))
1190 stop_dma(ep->dma);
1da177e4
LT
1191 else if (!ep->stopped) {
1192 /* resume current request, or start new one */
1193 if (req)
fae3c158 1194 writel(dmactl, &ep->dma->dmactl);
1da177e4 1195 else
fae3c158 1196 start_dma(ep, list_entry(ep->queue.next,
1da177e4
LT
1197 struct net2280_request, queue));
1198 }
1199 }
1200
fae3c158 1201 spin_unlock_irqrestore(&ep->dev->lock, flags);
1da177e4
LT
1202 return 0;
1203}
1204
1205/*-------------------------------------------------------------------------*/
1206
fae3c158 1207static int net2280_fifo_status(struct usb_ep *_ep);
1da177e4
LT
1208
1209static int
8066134f 1210net2280_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
1da177e4
LT
1211{
1212 struct net2280_ep *ep;
1213 unsigned long flags;
1214 int retval = 0;
1215
fae3c158 1216 ep = container_of(_ep, struct net2280_ep, ep);
1da177e4
LT
1217 if (!_ep || (!ep->desc && ep->num != 0))
1218 return -EINVAL;
1219 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1220 return -ESHUTDOWN;
1221 if (ep->desc /* not ep0 */ && (ep->desc->bmAttributes & 0x03)
1222 == USB_ENDPOINT_XFER_ISOC)
1223 return -EINVAL;
1224
fae3c158
RR
1225 spin_lock_irqsave(&ep->dev->lock, flags);
1226 if (!list_empty(&ep->queue))
1da177e4 1227 retval = -EAGAIN;
fae3c158 1228 else if (ep->is_in && value && net2280_fifo_status(_ep) != 0)
1da177e4
LT
1229 retval = -EAGAIN;
1230 else {
e56e69cc 1231 ep_vdbg(ep->dev, "%s %s %s\n", _ep->name,
8066134f
AS
1232 value ? "set" : "clear",
1233 wedged ? "wedge" : "halt");
1da177e4
LT
1234 /* set/clear, then synch memory views with the device */
1235 if (value) {
1236 if (ep->num == 0)
1237 ep->dev->protocol_stall = 1;
1238 else
fae3c158 1239 set_halt(ep);
8066134f
AS
1240 if (wedged)
1241 ep->wedged = 1;
1242 } else {
fae3c158 1243 clear_halt(ep);
2eeb0016 1244 if (ep->dev->quirks & PLX_SUPERSPEED &&
adc82f77
RR
1245 !list_empty(&ep->queue) && ep->td_dma)
1246 restart_dma(ep);
8066134f
AS
1247 ep->wedged = 0;
1248 }
fae3c158 1249 (void) readl(&ep->regs->ep_rsp);
1da177e4 1250 }
fae3c158 1251 spin_unlock_irqrestore(&ep->dev->lock, flags);
1da177e4
LT
1252
1253 return retval;
1254}
1255
fae3c158 1256static int net2280_set_halt(struct usb_ep *_ep, int value)
8066134f
AS
1257{
1258 return net2280_set_halt_and_wedge(_ep, value, 0);
1259}
1260
fae3c158 1261static int net2280_set_wedge(struct usb_ep *_ep)
8066134f
AS
1262{
1263 if (!_ep || _ep->name == ep0name)
1264 return -EINVAL;
1265 return net2280_set_halt_and_wedge(_ep, 1, 1);
1266}
1267
fae3c158 1268static int net2280_fifo_status(struct usb_ep *_ep)
1da177e4
LT
1269{
1270 struct net2280_ep *ep;
1271 u32 avail;
1272
fae3c158 1273 ep = container_of(_ep, struct net2280_ep, ep);
1da177e4
LT
1274 if (!_ep || (!ep->desc && ep->num != 0))
1275 return -ENODEV;
1276 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1277 return -ESHUTDOWN;
1278
3e76fdcb 1279 avail = readl(&ep->regs->ep_avail) & (BIT(12) - 1);
1da177e4
LT
1280 if (avail > ep->fifo_size)
1281 return -EOVERFLOW;
1282 if (ep->is_in)
1283 avail = ep->fifo_size - avail;
1284 return avail;
1285}
1286
fae3c158 1287static void net2280_fifo_flush(struct usb_ep *_ep)
1da177e4
LT
1288{
1289 struct net2280_ep *ep;
1290
fae3c158 1291 ep = container_of(_ep, struct net2280_ep, ep);
1da177e4
LT
1292 if (!_ep || (!ep->desc && ep->num != 0))
1293 return;
1294 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1295 return;
1296
3e76fdcb 1297 writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat);
fae3c158 1298 (void) readl(&ep->regs->ep_rsp);
1da177e4
LT
1299}
1300
901b3d75 1301static const struct usb_ep_ops net2280_ep_ops = {
1da177e4
LT
1302 .enable = net2280_enable,
1303 .disable = net2280_disable,
1304
1305 .alloc_request = net2280_alloc_request,
1306 .free_request = net2280_free_request,
1307
1da177e4
LT
1308 .queue = net2280_queue,
1309 .dequeue = net2280_dequeue,
1310
1311 .set_halt = net2280_set_halt,
8066134f 1312 .set_wedge = net2280_set_wedge,
1da177e4
LT
1313 .fifo_status = net2280_fifo_status,
1314 .fifo_flush = net2280_fifo_flush,
1315};
1316
1317/*-------------------------------------------------------------------------*/
1318
fae3c158 1319static int net2280_get_frame(struct usb_gadget *_gadget)
1da177e4
LT
1320{
1321 struct net2280 *dev;
1322 unsigned long flags;
1323 u16 retval;
1324
1325 if (!_gadget)
1326 return -ENODEV;
fae3c158
RR
1327 dev = container_of(_gadget, struct net2280, gadget);
1328 spin_lock_irqsave(&dev->lock, flags);
1329 retval = get_idx_reg(dev->regs, REG_FRAME) & 0x03ff;
1330 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4
LT
1331 return retval;
1332}
1333
fae3c158 1334static int net2280_wakeup(struct usb_gadget *_gadget)
1da177e4
LT
1335{
1336 struct net2280 *dev;
1337 u32 tmp;
1338 unsigned long flags;
1339
1340 if (!_gadget)
1341 return 0;
fae3c158 1342 dev = container_of(_gadget, struct net2280, gadget);
1da177e4 1343
fae3c158
RR
1344 spin_lock_irqsave(&dev->lock, flags);
1345 tmp = readl(&dev->usb->usbctl);
3e76fdcb
RR
1346 if (tmp & BIT(DEVICE_REMOTE_WAKEUP_ENABLE))
1347 writel(BIT(GENERATE_RESUME), &dev->usb->usbstat);
fae3c158 1348 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4
LT
1349
1350 /* pci writes may still be posted */
1351 return 0;
1352}
1353
fae3c158 1354static int net2280_set_selfpowered(struct usb_gadget *_gadget, int value)
1da177e4
LT
1355{
1356 struct net2280 *dev;
1357 u32 tmp;
1358 unsigned long flags;
1359
1360 if (!_gadget)
1361 return 0;
fae3c158 1362 dev = container_of(_gadget, struct net2280, gadget);
1da177e4 1363
fae3c158
RR
1364 spin_lock_irqsave(&dev->lock, flags);
1365 tmp = readl(&dev->usb->usbctl);
adc82f77 1366 if (value) {
3e76fdcb 1367 tmp |= BIT(SELF_POWERED_STATUS);
adc82f77
RR
1368 dev->selfpowered = 1;
1369 } else {
3e76fdcb 1370 tmp &= ~BIT(SELF_POWERED_STATUS);
adc82f77
RR
1371 dev->selfpowered = 0;
1372 }
fae3c158
RR
1373 writel(tmp, &dev->usb->usbctl);
1374 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4
LT
1375
1376 return 0;
1377}
1378
1379static int net2280_pullup(struct usb_gadget *_gadget, int is_on)
1380{
1381 struct net2280 *dev;
1382 u32 tmp;
1383 unsigned long flags;
1384
1385 if (!_gadget)
1386 return -ENODEV;
fae3c158 1387 dev = container_of(_gadget, struct net2280, gadget);
1da177e4 1388
fae3c158
RR
1389 spin_lock_irqsave(&dev->lock, flags);
1390 tmp = readl(&dev->usb->usbctl);
1da177e4
LT
1391 dev->softconnect = (is_on != 0);
1392 if (is_on)
3e76fdcb 1393 tmp |= BIT(USB_DETECT_ENABLE);
1da177e4 1394 else
3e76fdcb 1395 tmp &= ~BIT(USB_DETECT_ENABLE);
fae3c158
RR
1396 writel(tmp, &dev->usb->usbctl);
1397 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4
LT
1398
1399 return 0;
1400}
1401
4cf5e00b
FB
1402static int net2280_start(struct usb_gadget *_gadget,
1403 struct usb_gadget_driver *driver);
22835b80 1404static int net2280_stop(struct usb_gadget *_gadget);
0f91349b 1405
1da177e4
LT
1406static const struct usb_gadget_ops net2280_ops = {
1407 .get_frame = net2280_get_frame,
1408 .wakeup = net2280_wakeup,
1409 .set_selfpowered = net2280_set_selfpowered,
1410 .pullup = net2280_pullup,
4cf5e00b
FB
1411 .udc_start = net2280_start,
1412 .udc_stop = net2280_stop,
1da177e4
LT
1413};
1414
1415/*-------------------------------------------------------------------------*/
1416
b99b406c 1417#ifdef CONFIG_USB_GADGET_DEBUG_FILES
1da177e4
LT
1418
1419/* FIXME move these into procfs, and use seq_file.
1420 * Sysfs _still_ doesn't behave for arbitrarily sized files,
1421 * and also doesn't help products using this with 2.4 kernels.
1422 */
1423
1424/* "function" sysfs attribute */
ce26bd23
GKH
1425static ssize_t function_show(struct device *_dev, struct device_attribute *attr,
1426 char *buf)
1da177e4 1427{
fae3c158 1428 struct net2280 *dev = dev_get_drvdata(_dev);
1da177e4 1429
fae3c158
RR
1430 if (!dev->driver || !dev->driver->function ||
1431 strlen(dev->driver->function) > PAGE_SIZE)
1da177e4 1432 return 0;
fae3c158 1433 return scnprintf(buf, PAGE_SIZE, "%s\n", dev->driver->function);
1da177e4 1434}
ce26bd23 1435static DEVICE_ATTR_RO(function);
1da177e4 1436
ce26bd23
GKH
1437static ssize_t registers_show(struct device *_dev,
1438 struct device_attribute *attr, char *buf)
1da177e4
LT
1439{
1440 struct net2280 *dev;
1441 char *next;
1442 unsigned size, t;
1443 unsigned long flags;
1444 int i;
1445 u32 t1, t2;
30e69598 1446 const char *s;
1da177e4 1447
fae3c158 1448 dev = dev_get_drvdata(_dev);
1da177e4
LT
1449 next = buf;
1450 size = PAGE_SIZE;
fae3c158 1451 spin_lock_irqsave(&dev->lock, flags);
1da177e4
LT
1452
1453 if (dev->driver)
1454 s = dev->driver->driver.name;
1455 else
1456 s = "(none)";
1457
1458 /* Main Control Registers */
fae3c158 1459 t = scnprintf(next, size, "%s version " DRIVER_VERSION
d588ff58 1460 ", chiprev %04x\n\n"
1da177e4
LT
1461 "devinit %03x fifoctl %08x gadget '%s'\n"
1462 "pci irqenb0 %02x irqenb1 %08x "
1463 "irqstat0 %04x irqstat1 %08x\n",
1464 driver_name, dev->chiprev,
fae3c158
RR
1465 readl(&dev->regs->devinit),
1466 readl(&dev->regs->fifoctl),
1da177e4 1467 s,
fae3c158
RR
1468 readl(&dev->regs->pciirqenb0),
1469 readl(&dev->regs->pciirqenb1),
1470 readl(&dev->regs->irqstat0),
1471 readl(&dev->regs->irqstat1));
1da177e4
LT
1472 size -= t;
1473 next += t;
1474
1475 /* USB Control Registers */
fae3c158
RR
1476 t1 = readl(&dev->usb->usbctl);
1477 t2 = readl(&dev->usb->usbstat);
3e76fdcb
RR
1478 if (t1 & BIT(VBUS_PIN)) {
1479 if (t2 & BIT(HIGH_SPEED))
1da177e4
LT
1480 s = "high speed";
1481 else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1482 s = "powered";
1483 else
1484 s = "full speed";
1485 /* full speed bit (6) not working?? */
1486 } else
1487 s = "not attached";
fae3c158 1488 t = scnprintf(next, size,
1da177e4
LT
1489 "stdrsp %08x usbctl %08x usbstat %08x "
1490 "addr 0x%02x (%s)\n",
fae3c158
RR
1491 readl(&dev->usb->stdrsp), t1, t2,
1492 readl(&dev->usb->ouraddr), s);
1da177e4
LT
1493 size -= t;
1494 next += t;
1495
1496 /* PCI Master Control Registers */
1497
1498 /* DMA Control Registers */
1499
1500 /* Configurable EP Control Registers */
adc82f77 1501 for (i = 0; i < dev->n_ep; i++) {
1da177e4
LT
1502 struct net2280_ep *ep;
1503
fae3c158 1504 ep = &dev->ep[i];
1da177e4
LT
1505 if (i && !ep->desc)
1506 continue;
1507
adc82f77 1508 t1 = readl(&ep->cfg->ep_cfg);
fae3c158
RR
1509 t2 = readl(&ep->regs->ep_rsp) & 0xff;
1510 t = scnprintf(next, size,
1da177e4
LT
1511 "\n%s\tcfg %05x rsp (%02x) %s%s%s%s%s%s%s%s"
1512 "irqenb %02x\n",
1513 ep->ep.name, t1, t2,
3e76fdcb 1514 (t2 & BIT(CLEAR_NAK_OUT_PACKETS))
1da177e4 1515 ? "NAK " : "",
3e76fdcb 1516 (t2 & BIT(CLEAR_EP_HIDE_STATUS_PHASE))
1da177e4 1517 ? "hide " : "",
3e76fdcb 1518 (t2 & BIT(CLEAR_EP_FORCE_CRC_ERROR))
1da177e4 1519 ? "CRC " : "",
3e76fdcb 1520 (t2 & BIT(CLEAR_INTERRUPT_MODE))
1da177e4 1521 ? "interrupt " : "",
3e76fdcb 1522 (t2 & BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE))
1da177e4 1523 ? "status " : "",
3e76fdcb 1524 (t2 & BIT(CLEAR_NAK_OUT_PACKETS_MODE))
1da177e4 1525 ? "NAKmode " : "",
3e76fdcb 1526 (t2 & BIT(CLEAR_ENDPOINT_TOGGLE))
1da177e4 1527 ? "DATA1 " : "DATA0 ",
3e76fdcb 1528 (t2 & BIT(CLEAR_ENDPOINT_HALT))
1da177e4 1529 ? "HALT " : "",
fae3c158 1530 readl(&ep->regs->ep_irqenb));
1da177e4
LT
1531 size -= t;
1532 next += t;
1533
fae3c158 1534 t = scnprintf(next, size,
1da177e4
LT
1535 "\tstat %08x avail %04x "
1536 "(ep%d%s-%s)%s\n",
fae3c158
RR
1537 readl(&ep->regs->ep_stat),
1538 readl(&ep->regs->ep_avail),
1539 t1 & 0x0f, DIR_STRING(t1),
1540 type_string(t1 >> 8),
1da177e4
LT
1541 ep->stopped ? "*" : "");
1542 size -= t;
1543 next += t;
1544
1545 if (!ep->dma)
1546 continue;
1547
fae3c158 1548 t = scnprintf(next, size,
1da177e4
LT
1549 " dma\tctl %08x stat %08x count %08x\n"
1550 "\taddr %08x desc %08x\n",
fae3c158
RR
1551 readl(&ep->dma->dmactl),
1552 readl(&ep->dma->dmastat),
1553 readl(&ep->dma->dmacount),
1554 readl(&ep->dma->dmaaddr),
1555 readl(&ep->dma->dmadesc));
1da177e4
LT
1556 size -= t;
1557 next += t;
1558
1559 }
1560
fae3c158 1561 /* Indexed Registers (none yet) */
1da177e4
LT
1562
1563 /* Statistics */
fae3c158 1564 t = scnprintf(next, size, "\nirqs: ");
1da177e4
LT
1565 size -= t;
1566 next += t;
adc82f77 1567 for (i = 0; i < dev->n_ep; i++) {
1da177e4
LT
1568 struct net2280_ep *ep;
1569
fae3c158 1570 ep = &dev->ep[i];
1da177e4
LT
1571 if (i && !ep->irqs)
1572 continue;
fae3c158 1573 t = scnprintf(next, size, " %s/%lu", ep->ep.name, ep->irqs);
1da177e4
LT
1574 size -= t;
1575 next += t;
1576
1577 }
fae3c158 1578 t = scnprintf(next, size, "\n");
1da177e4
LT
1579 size -= t;
1580 next += t;
1581
fae3c158 1582 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4
LT
1583
1584 return PAGE_SIZE - size;
1585}
ce26bd23 1586static DEVICE_ATTR_RO(registers);
1da177e4 1587
ce26bd23
GKH
1588static ssize_t queues_show(struct device *_dev, struct device_attribute *attr,
1589 char *buf)
1da177e4
LT
1590{
1591 struct net2280 *dev;
1592 char *next;
1593 unsigned size;
1594 unsigned long flags;
1595 int i;
1596
fae3c158 1597 dev = dev_get_drvdata(_dev);
1da177e4
LT
1598 next = buf;
1599 size = PAGE_SIZE;
fae3c158 1600 spin_lock_irqsave(&dev->lock, flags);
1da177e4 1601
adc82f77 1602 for (i = 0; i < dev->n_ep; i++) {
fae3c158 1603 struct net2280_ep *ep = &dev->ep[i];
1da177e4
LT
1604 struct net2280_request *req;
1605 int t;
1606
1607 if (i != 0) {
1608 const struct usb_endpoint_descriptor *d;
1609
1610 d = ep->desc;
1611 if (!d)
1612 continue;
1613 t = d->bEndpointAddress;
fae3c158 1614 t = scnprintf(next, size,
1da177e4
LT
1615 "\n%s (ep%d%s-%s) max %04x %s fifo %d\n",
1616 ep->ep.name, t & USB_ENDPOINT_NUMBER_MASK,
1617 (t & USB_DIR_IN) ? "in" : "out",
a27f37a1 1618 type_string(d->bmAttributes),
fae3c158 1619 usb_endpoint_maxp(d) & 0x1fff,
1da177e4
LT
1620 ep->dma ? "dma" : "pio", ep->fifo_size
1621 );
1622 } else /* ep0 should only have one transfer queued */
fae3c158 1623 t = scnprintf(next, size, "ep0 max 64 pio %s\n",
1da177e4
LT
1624 ep->is_in ? "in" : "out");
1625 if (t <= 0 || t > size)
1626 goto done;
1627 size -= t;
1628 next += t;
1629
fae3c158
RR
1630 if (list_empty(&ep->queue)) {
1631 t = scnprintf(next, size, "\t(nothing queued)\n");
1da177e4
LT
1632 if (t <= 0 || t > size)
1633 goto done;
1634 size -= t;
1635 next += t;
1636 continue;
1637 }
fae3c158
RR
1638 list_for_each_entry(req, &ep->queue, queue) {
1639 if (ep->dma && req->td_dma == readl(&ep->dma->dmadesc))
1640 t = scnprintf(next, size,
1da177e4
LT
1641 "\treq %p len %d/%d "
1642 "buf %p (dmacount %08x)\n",
1643 &req->req, req->req.actual,
1644 req->req.length, req->req.buf,
fae3c158 1645 readl(&ep->dma->dmacount));
1da177e4 1646 else
fae3c158 1647 t = scnprintf(next, size,
1da177e4
LT
1648 "\treq %p len %d/%d buf %p\n",
1649 &req->req, req->req.actual,
1650 req->req.length, req->req.buf);
1651 if (t <= 0 || t > size)
1652 goto done;
1653 size -= t;
1654 next += t;
1655
1656 if (ep->dma) {
1657 struct net2280_dma *td;
1658
1659 td = req->td;
fae3c158 1660 t = scnprintf(next, size, "\t td %08x "
1da177e4
LT
1661 " count %08x buf %08x desc %08x\n",
1662 (u32) req->td_dma,
fae3c158
RR
1663 le32_to_cpu(td->dmacount),
1664 le32_to_cpu(td->dmaaddr),
1665 le32_to_cpu(td->dmadesc));
1da177e4
LT
1666 if (t <= 0 || t > size)
1667 goto done;
1668 size -= t;
1669 next += t;
1670 }
1671 }
1672 }
1673
1674done:
fae3c158 1675 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4
LT
1676 return PAGE_SIZE - size;
1677}
ce26bd23 1678static DEVICE_ATTR_RO(queues);
1da177e4
LT
1679
1680
1681#else
1682
fae3c158
RR
1683#define device_create_file(a, b) (0)
1684#define device_remove_file(a, b) do { } while (0)
1da177e4
LT
1685
1686#endif
1687
1688/*-------------------------------------------------------------------------*/
1689
1690/* another driver-specific mode might be a request type doing dma
1691 * to/from another device fifo instead of to/from memory.
1692 */
1693
fae3c158 1694static void set_fifo_mode(struct net2280 *dev, int mode)
1da177e4
LT
1695{
1696 /* keeping high bits preserves BAR2 */
fae3c158 1697 writel((0xffff << PCI_BASE2_RANGE) | mode, &dev->regs->fifoctl);
1da177e4
LT
1698
1699 /* always ep-{a,b,e,f} ... maybe not ep-c or ep-d */
fae3c158
RR
1700 INIT_LIST_HEAD(&dev->gadget.ep_list);
1701 list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list);
1702 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1da177e4
LT
1703 switch (mode) {
1704 case 0:
fae3c158
RR
1705 list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
1706 list_add_tail(&dev->ep[4].ep.ep_list, &dev->gadget.ep_list);
1707 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024;
1da177e4
LT
1708 break;
1709 case 1:
fae3c158 1710 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 2048;
1da177e4
LT
1711 break;
1712 case 2:
fae3c158
RR
1713 list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
1714 dev->ep[1].fifo_size = 2048;
1715 dev->ep[2].fifo_size = 1024;
1da177e4
LT
1716 break;
1717 }
1718 /* fifo sizes for ep0, ep-c, ep-d, ep-e, and ep-f never change */
fae3c158
RR
1719 list_add_tail(&dev->ep[5].ep.ep_list, &dev->gadget.ep_list);
1720 list_add_tail(&dev->ep[6].ep.ep_list, &dev->gadget.ep_list);
1da177e4
LT
1721}
1722
adc82f77
RR
1723static void defect7374_disable_data_eps(struct net2280 *dev)
1724{
1725 /*
1726 * For Defect 7374, disable data EPs (and more):
1727 * - This phase undoes the earlier phase of the Defect 7374 workaround,
1728 * returing ep regs back to normal.
1729 */
1730 struct net2280_ep *ep;
1731 int i;
1732 unsigned char ep_sel;
1733 u32 tmp_reg;
1734
1735 for (i = 1; i < 5; i++) {
1736 ep = &dev->ep[i];
1737 writel(0, &ep->cfg->ep_cfg);
1738 }
1739
1740 /* CSROUT, CSRIN, PCIOUT, PCIIN, STATIN, RCIN */
1741 for (i = 0; i < 6; i++)
1742 writel(0, &dev->dep[i].dep_cfg);
1743
1744 for (ep_sel = 0; ep_sel <= 21; ep_sel++) {
1745 /* Select an endpoint for subsequent operations: */
1746 tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
1747 writel(((tmp_reg & ~0x1f) | ep_sel), &dev->plregs->pl_ep_ctrl);
1748
1749 if (ep_sel < 2 || (ep_sel > 9 && ep_sel < 14) ||
1750 ep_sel == 18 || ep_sel == 20)
1751 continue;
1752
1753 /* Change settings on some selected endpoints */
1754 tmp_reg = readl(&dev->plregs->pl_ep_cfg_4);
3e76fdcb 1755 tmp_reg &= ~BIT(NON_CTRL_IN_TOLERATE_BAD_DIR);
adc82f77
RR
1756 writel(tmp_reg, &dev->plregs->pl_ep_cfg_4);
1757 tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
3e76fdcb 1758 tmp_reg |= BIT(EP_INITIALIZED);
adc82f77
RR
1759 writel(tmp_reg, &dev->plregs->pl_ep_ctrl);
1760 }
1761}
1762
1763static void defect7374_enable_data_eps_zero(struct net2280 *dev)
1764{
1765 u32 tmp = 0, tmp_reg;
5517525e 1766 u32 scratch;
adc82f77
RR
1767 int i;
1768 unsigned char ep_sel;
1769
1770 scratch = get_idx_reg(dev->regs, SCRATCH);
5517525e
RR
1771
1772 WARN_ON((scratch & (0xf << DEFECT7374_FSM_FIELD))
1773 == DEFECT7374_FSM_SS_CONTROL_READ);
1774
adc82f77
RR
1775 scratch &= ~(0xf << DEFECT7374_FSM_FIELD);
1776
5517525e
RR
1777 ep_warn(dev, "Operate Defect 7374 workaround soft this time");
1778 ep_warn(dev, "It will operate on cold-reboot and SS connect");
adc82f77 1779
5517525e
RR
1780 /*GPEPs:*/
1781 tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_DIRECTION) |
1782 (2 << OUT_ENDPOINT_TYPE) | (2 << IN_ENDPOINT_TYPE) |
1783 ((dev->enhanced_mode) ?
1784 BIT(OUT_ENDPOINT_ENABLE) : BIT(ENDPOINT_ENABLE)) |
1785 BIT(IN_ENDPOINT_ENABLE));
adc82f77 1786
5517525e
RR
1787 for (i = 1; i < 5; i++)
1788 writel(tmp, &dev->ep[i].cfg->ep_cfg);
adc82f77 1789
5517525e
RR
1790 /* CSRIN, PCIIN, STATIN, RCIN*/
1791 tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_ENABLE));
1792 writel(tmp, &dev->dep[1].dep_cfg);
1793 writel(tmp, &dev->dep[3].dep_cfg);
1794 writel(tmp, &dev->dep[4].dep_cfg);
1795 writel(tmp, &dev->dep[5].dep_cfg);
adc82f77 1796
5517525e
RR
1797 /*Implemented for development and debug.
1798 * Can be refined/tuned later.*/
1799 for (ep_sel = 0; ep_sel <= 21; ep_sel++) {
1800 /* Select an endpoint for subsequent operations: */
1801 tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
1802 writel(((tmp_reg & ~0x1f) | ep_sel),
1803 &dev->plregs->pl_ep_ctrl);
1804
1805 if (ep_sel == 1) {
1806 tmp =
1807 (readl(&dev->plregs->pl_ep_ctrl) |
1808 BIT(CLEAR_ACK_ERROR_CODE) | 0);
1809 writel(tmp, &dev->plregs->pl_ep_ctrl);
1810 continue;
adc82f77
RR
1811 }
1812
5517525e
RR
1813 if (ep_sel == 0 || (ep_sel > 9 && ep_sel < 14) ||
1814 ep_sel == 18 || ep_sel == 20)
1815 continue;
1816
1817 tmp = (readl(&dev->plregs->pl_ep_cfg_4) |
1818 BIT(NON_CTRL_IN_TOLERATE_BAD_DIR) | 0);
1819 writel(tmp, &dev->plregs->pl_ep_cfg_4);
1820
1821 tmp = readl(&dev->plregs->pl_ep_ctrl) &
1822 ~BIT(EP_INITIALIZED);
1823 writel(tmp, &dev->plregs->pl_ep_ctrl);
adc82f77 1824
adc82f77 1825 }
5517525e
RR
1826
1827 /* Set FSM to focus on the first Control Read:
1828 * - Tip: Connection speed is known upon the first
1829 * setup request.*/
1830 scratch |= DEFECT7374_FSM_WAITING_FOR_CONTROL_READ;
1831 set_idx_reg(dev->regs, SCRATCH, scratch);
1832
adc82f77
RR
1833}
1834
1da177e4
LT
1835/* keeping it simple:
1836 * - one bus driver, initted first;
1837 * - one function driver, initted second
1838 *
1839 * most of the work to support multiple net2280 controllers would
1840 * be to associate this gadget driver (yes?) with all of them, or
1841 * perhaps to bind specific drivers to specific devices.
1842 */
1843
adc82f77 1844static void usb_reset_228x(struct net2280 *dev)
1da177e4
LT
1845{
1846 u32 tmp;
1847
1848 dev->gadget.speed = USB_SPEED_UNKNOWN;
fae3c158 1849 (void) readl(&dev->usb->usbctl);
1da177e4 1850
fae3c158 1851 net2280_led_init(dev);
1da177e4
LT
1852
1853 /* disable automatic responses, and irqs */
fae3c158
RR
1854 writel(0, &dev->usb->stdrsp);
1855 writel(0, &dev->regs->pciirqenb0);
1856 writel(0, &dev->regs->pciirqenb1);
1da177e4
LT
1857
1858 /* clear old dma and irq state */
1859 for (tmp = 0; tmp < 4; tmp++) {
adc82f77 1860 struct net2280_ep *ep = &dev->ep[tmp + 1];
1da177e4 1861 if (ep->dma)
adc82f77 1862 abort_dma(ep);
1da177e4 1863 }
adc82f77 1864
fae3c158 1865 writel(~0, &dev->regs->irqstat0),
3e76fdcb 1866 writel(~(u32)BIT(SUSPEND_REQUEST_INTERRUPT), &dev->regs->irqstat1),
1da177e4
LT
1867
1868 /* reset, and enable pci */
3e76fdcb
RR
1869 tmp = readl(&dev->regs->devinit) |
1870 BIT(PCI_ENABLE) |
1871 BIT(FIFO_SOFT_RESET) |
1872 BIT(USB_SOFT_RESET) |
1873 BIT(M8051_RESET);
fae3c158 1874 writel(tmp, &dev->regs->devinit);
1da177e4
LT
1875
1876 /* standard fifo and endpoint allocations */
fae3c158 1877 set_fifo_mode(dev, (fifo_mode <= 2) ? fifo_mode : 0);
1da177e4
LT
1878}
1879
adc82f77
RR
1880static void usb_reset_338x(struct net2280 *dev)
1881{
1882 u32 tmp;
adc82f77
RR
1883
1884 dev->gadget.speed = USB_SPEED_UNKNOWN;
1885 (void)readl(&dev->usb->usbctl);
1886
1887 net2280_led_init(dev);
1888
5517525e 1889 if (dev->bug7734_patched) {
adc82f77
RR
1890 /* disable automatic responses, and irqs */
1891 writel(0, &dev->usb->stdrsp);
1892 writel(0, &dev->regs->pciirqenb0);
1893 writel(0, &dev->regs->pciirqenb1);
1894 }
1895
1896 /* clear old dma and irq state */
1897 for (tmp = 0; tmp < 4; tmp++) {
1898 struct net2280_ep *ep = &dev->ep[tmp + 1];
1899
1900 if (ep->dma)
1901 abort_dma(ep);
1902 }
1903
1904 writel(~0, &dev->regs->irqstat0), writel(~0, &dev->regs->irqstat1);
1905
5517525e 1906 if (dev->bug7734_patched) {
adc82f77
RR
1907 /* reset, and enable pci */
1908 tmp = readl(&dev->regs->devinit) |
3e76fdcb
RR
1909 BIT(PCI_ENABLE) |
1910 BIT(FIFO_SOFT_RESET) |
1911 BIT(USB_SOFT_RESET) |
1912 BIT(M8051_RESET);
adc82f77
RR
1913
1914 writel(tmp, &dev->regs->devinit);
1915 }
1916
1917 /* always ep-{1,2,3,4} ... maybe not ep-3 or ep-4 */
1918 INIT_LIST_HEAD(&dev->gadget.ep_list);
1919
1920 for (tmp = 1; tmp < dev->n_ep; tmp++)
1921 list_add_tail(&dev->ep[tmp].ep.ep_list, &dev->gadget.ep_list);
1922
1923}
1924
1925static void usb_reset(struct net2280 *dev)
1926{
2eeb0016 1927 if (dev->quirks & PLX_LEGACY)
adc82f77
RR
1928 return usb_reset_228x(dev);
1929 return usb_reset_338x(dev);
1930}
1931
1932static void usb_reinit_228x(struct net2280 *dev)
1da177e4
LT
1933{
1934 u32 tmp;
1da177e4
LT
1935
1936 /* basic endpoint init */
1937 for (tmp = 0; tmp < 7; tmp++) {
fae3c158 1938 struct net2280_ep *ep = &dev->ep[tmp];
1da177e4 1939
fae3c158 1940 ep->ep.name = ep_name[tmp];
1da177e4
LT
1941 ep->dev = dev;
1942 ep->num = tmp;
1943
1944 if (tmp > 0 && tmp <= 4) {
1945 ep->fifo_size = 1024;
d588ff58 1946 ep->dma = &dev->dma[tmp - 1];
1da177e4
LT
1947 } else
1948 ep->fifo_size = 64;
fae3c158 1949 ep->regs = &dev->epregs[tmp];
adc82f77
RR
1950 ep->cfg = &dev->epregs[tmp];
1951 ep_reset_228x(dev->regs, ep);
1da177e4 1952 }
fae3c158
RR
1953 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64);
1954 usb_ep_set_maxpacket_limit(&dev->ep[5].ep, 64);
1955 usb_ep_set_maxpacket_limit(&dev->ep[6].ep, 64);
1da177e4 1956
fae3c158
RR
1957 dev->gadget.ep0 = &dev->ep[0].ep;
1958 dev->ep[0].stopped = 0;
1959 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1da177e4
LT
1960
1961 /* we want to prevent lowlevel/insecure access from the USB host,
1962 * but erratum 0119 means this enable bit is ignored
1963 */
1964 for (tmp = 0; tmp < 5; tmp++)
fae3c158 1965 writel(EP_DONTUSE, &dev->dep[tmp].dep_cfg);
1da177e4
LT
1966}
1967
adc82f77
RR
1968static void usb_reinit_338x(struct net2280 *dev)
1969{
adc82f77
RR
1970 int i;
1971 u32 tmp, val;
adc82f77
RR
1972 static const u32 ne[9] = { 0, 1, 2, 3, 4, 1, 2, 3, 4 };
1973 static const u32 ep_reg_addr[9] = { 0x00, 0xC0, 0x00, 0xC0, 0x00,
1974 0x00, 0xC0, 0x00, 0xC0 };
1975
adc82f77
RR
1976 /* basic endpoint init */
1977 for (i = 0; i < dev->n_ep; i++) {
1978 struct net2280_ep *ep = &dev->ep[i];
1979
1980 ep->ep.name = ep_name[i];
1981 ep->dev = dev;
1982 ep->num = i;
1983
d588ff58 1984 if (i > 0 && i <= 4)
adc82f77
RR
1985 ep->dma = &dev->dma[i - 1];
1986
1987 if (dev->enhanced_mode) {
1988 ep->cfg = &dev->epregs[ne[i]];
1989 ep->regs = (struct net2280_ep_regs __iomem *)
c43e97b2 1990 (((void __iomem *)&dev->epregs[ne[i]]) +
adc82f77
RR
1991 ep_reg_addr[i]);
1992 ep->fiforegs = &dev->fiforegs[i];
1993 } else {
1994 ep->cfg = &dev->epregs[i];
1995 ep->regs = &dev->epregs[i];
1996 ep->fiforegs = &dev->fiforegs[i];
1997 }
1998
1999 ep->fifo_size = (i != 0) ? 2048 : 512;
2000
2001 ep_reset_338x(dev->regs, ep);
2002 }
2003 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 512);
2004
2005 dev->gadget.ep0 = &dev->ep[0].ep;
2006 dev->ep[0].stopped = 0;
2007
2008 /* Link layer set up */
5517525e 2009 if (dev->bug7734_patched) {
adc82f77 2010 tmp = readl(&dev->usb_ext->usbctl2) &
3e76fdcb 2011 ~(BIT(U1_ENABLE) | BIT(U2_ENABLE) | BIT(LTM_ENABLE));
adc82f77
RR
2012 writel(tmp, &dev->usb_ext->usbctl2);
2013 }
2014
2015 /* Hardware Defect and Workaround */
2016 val = readl(&dev->ll_lfps_regs->ll_lfps_5);
2017 val &= ~(0xf << TIMER_LFPS_6US);
2018 val |= 0x5 << TIMER_LFPS_6US;
2019 writel(val, &dev->ll_lfps_regs->ll_lfps_5);
2020
2021 val = readl(&dev->ll_lfps_regs->ll_lfps_6);
2022 val &= ~(0xffff << TIMER_LFPS_80US);
2023 val |= 0x0100 << TIMER_LFPS_80US;
2024 writel(val, &dev->ll_lfps_regs->ll_lfps_6);
2025
2026 /*
2027 * AA_AB Errata. Issue 4. Workaround for SuperSpeed USB
2028 * Hot Reset Exit Handshake may Fail in Specific Case using
2029 * Default Register Settings. Workaround for Enumeration test.
2030 */
2031 val = readl(&dev->ll_tsn_regs->ll_tsn_counters_2);
2032 val &= ~(0x1f << HOT_TX_NORESET_TS2);
2033 val |= 0x10 << HOT_TX_NORESET_TS2;
2034 writel(val, &dev->ll_tsn_regs->ll_tsn_counters_2);
2035
2036 val = readl(&dev->ll_tsn_regs->ll_tsn_counters_3);
2037 val &= ~(0x1f << HOT_RX_RESET_TS2);
2038 val |= 0x3 << HOT_RX_RESET_TS2;
2039 writel(val, &dev->ll_tsn_regs->ll_tsn_counters_3);
2040
2041 /*
2042 * Set Recovery Idle to Recover bit:
2043 * - On SS connections, setting Recovery Idle to Recover Fmw improves
2044 * link robustness with various hosts and hubs.
2045 * - It is safe to set for all connection speeds; all chip revisions.
2046 * - R-M-W to leave other bits undisturbed.
2047 * - Reference PLX TT-7372
2048 */
2049 val = readl(&dev->ll_chicken_reg->ll_tsn_chicken_bit);
3e76fdcb 2050 val |= BIT(RECOVERY_IDLE_TO_RECOVER_FMW);
adc82f77
RR
2051 writel(val, &dev->ll_chicken_reg->ll_tsn_chicken_bit);
2052
2053 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
2054
2055 /* disable dedicated endpoints */
2056 writel(0x0D, &dev->dep[0].dep_cfg);
2057 writel(0x0D, &dev->dep[1].dep_cfg);
2058 writel(0x0E, &dev->dep[2].dep_cfg);
2059 writel(0x0E, &dev->dep[3].dep_cfg);
2060 writel(0x0F, &dev->dep[4].dep_cfg);
2061 writel(0x0C, &dev->dep[5].dep_cfg);
2062}
2063
2064static void usb_reinit(struct net2280 *dev)
2065{
2eeb0016 2066 if (dev->quirks & PLX_LEGACY)
adc82f77
RR
2067 return usb_reinit_228x(dev);
2068 return usb_reinit_338x(dev);
2069}
2070
2071static void ep0_start_228x(struct net2280 *dev)
1da177e4 2072{
3e76fdcb
RR
2073 writel(BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
2074 BIT(CLEAR_NAK_OUT_PACKETS) |
ae8e530a
RR
2075 BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE),
2076 &dev->epregs[0].ep_rsp);
1da177e4
LT
2077
2078 /*
2079 * hardware optionally handles a bunch of standard requests
2080 * that the API hides from drivers anyway. have it do so.
2081 * endpoint status/features are handled in software, to
2082 * help pass tests for some dubious behavior.
2083 */
3e76fdcb
RR
2084 writel(BIT(SET_TEST_MODE) |
2085 BIT(SET_ADDRESS) |
2086 BIT(DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP) |
2087 BIT(GET_DEVICE_STATUS) |
ae8e530a
RR
2088 BIT(GET_INTERFACE_STATUS),
2089 &dev->usb->stdrsp);
3e76fdcb
RR
2090 writel(BIT(USB_ROOT_PORT_WAKEUP_ENABLE) |
2091 BIT(SELF_POWERED_USB_DEVICE) |
2092 BIT(REMOTE_WAKEUP_SUPPORT) |
2093 (dev->softconnect << USB_DETECT_ENABLE) |
2094 BIT(SELF_POWERED_STATUS),
2095 &dev->usb->usbctl);
1da177e4
LT
2096
2097 /* enable irqs so we can see ep0 and general operation */
3e76fdcb
RR
2098 writel(BIT(SETUP_PACKET_INTERRUPT_ENABLE) |
2099 BIT(ENDPOINT_0_INTERRUPT_ENABLE),
2100 &dev->regs->pciirqenb0);
2101 writel(BIT(PCI_INTERRUPT_ENABLE) |
2102 BIT(PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE) |
2103 BIT(PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE) |
2104 BIT(PCI_RETRY_ABORT_INTERRUPT_ENABLE) |
2105 BIT(VBUS_INTERRUPT_ENABLE) |
2106 BIT(ROOT_PORT_RESET_INTERRUPT_ENABLE) |
2107 BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE),
2108 &dev->regs->pciirqenb1);
1da177e4
LT
2109
2110 /* don't leave any writes posted */
fae3c158 2111 (void) readl(&dev->usb->usbctl);
1da177e4
LT
2112}
2113
adc82f77
RR
2114static void ep0_start_338x(struct net2280 *dev)
2115{
adc82f77 2116
5517525e 2117 if (dev->bug7734_patched)
3e76fdcb
RR
2118 writel(BIT(CLEAR_NAK_OUT_PACKETS_MODE) |
2119 BIT(SET_EP_HIDE_STATUS_PHASE),
adc82f77
RR
2120 &dev->epregs[0].ep_rsp);
2121
2122 /*
2123 * hardware optionally handles a bunch of standard requests
2124 * that the API hides from drivers anyway. have it do so.
2125 * endpoint status/features are handled in software, to
2126 * help pass tests for some dubious behavior.
2127 */
3e76fdcb
RR
2128 writel(BIT(SET_ISOCHRONOUS_DELAY) |
2129 BIT(SET_SEL) |
2130 BIT(SET_TEST_MODE) |
2131 BIT(SET_ADDRESS) |
2132 BIT(GET_INTERFACE_STATUS) |
2133 BIT(GET_DEVICE_STATUS),
adc82f77
RR
2134 &dev->usb->stdrsp);
2135 dev->wakeup_enable = 1;
3e76fdcb 2136 writel(BIT(USB_ROOT_PORT_WAKEUP_ENABLE) |
adc82f77 2137 (dev->softconnect << USB_DETECT_ENABLE) |
3e76fdcb 2138 BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
adc82f77
RR
2139 &dev->usb->usbctl);
2140
2141 /* enable irqs so we can see ep0 and general operation */
3e76fdcb 2142 writel(BIT(SETUP_PACKET_INTERRUPT_ENABLE) |
ae8e530a
RR
2143 BIT(ENDPOINT_0_INTERRUPT_ENABLE),
2144 &dev->regs->pciirqenb0);
3e76fdcb
RR
2145 writel(BIT(PCI_INTERRUPT_ENABLE) |
2146 BIT(ROOT_PORT_RESET_INTERRUPT_ENABLE) |
2147 BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE) |
2148 BIT(VBUS_INTERRUPT_ENABLE),
adc82f77
RR
2149 &dev->regs->pciirqenb1);
2150
2151 /* don't leave any writes posted */
2152 (void)readl(&dev->usb->usbctl);
2153}
2154
2155static void ep0_start(struct net2280 *dev)
2156{
2eeb0016 2157 if (dev->quirks & PLX_LEGACY)
adc82f77
RR
2158 return ep0_start_228x(dev);
2159 return ep0_start_338x(dev);
2160}
2161
1da177e4
LT
2162/* when a driver is successfully registered, it will receive
2163 * control requests including set_configuration(), which enables
2164 * non-control requests. then usb traffic follows until a
2165 * disconnect is reported. then a host may connect again, or
2166 * the driver might get unbound.
2167 */
4cf5e00b
FB
2168static int net2280_start(struct usb_gadget *_gadget,
2169 struct usb_gadget_driver *driver)
1da177e4 2170{
4cf5e00b 2171 struct net2280 *dev;
1da177e4
LT
2172 int retval;
2173 unsigned i;
2174
2175 /* insist on high speed support from the driver, since
2176 * (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE)
2177 * "must not be used in normal operation"
2178 */
ae8e530a
RR
2179 if (!driver || driver->max_speed < USB_SPEED_HIGH ||
2180 !driver->setup)
1da177e4 2181 return -EINVAL;
4cf5e00b 2182
fae3c158 2183 dev = container_of(_gadget, struct net2280, gadget);
1da177e4 2184
adc82f77 2185 for (i = 0; i < dev->n_ep; i++)
fae3c158 2186 dev->ep[i].irqs = 0;
1da177e4
LT
2187
2188 /* hook up the driver ... */
2189 dev->softconnect = 1;
2190 driver->driver.bus = NULL;
2191 dev->driver = driver;
1da177e4 2192
fae3c158
RR
2193 retval = device_create_file(&dev->pdev->dev, &dev_attr_function);
2194 if (retval)
2195 goto err_unbind;
2196 retval = device_create_file(&dev->pdev->dev, &dev_attr_queues);
2197 if (retval)
2198 goto err_func;
1da177e4 2199
7a74c481 2200 /* enable host detection and ep0; and we're ready
1da177e4
LT
2201 * for set_configuration as well as eventual disconnect.
2202 */
fae3c158 2203 net2280_led_active(dev, 1);
adc82f77 2204
5517525e 2205 if ((dev->quirks & PLX_SUPERSPEED) && !dev->bug7734_patched)
adc82f77
RR
2206 defect7374_enable_data_eps_zero(dev);
2207
fae3c158 2208 ep0_start(dev);
1da177e4 2209
1da177e4
LT
2210 /* pci writes may still be posted */
2211 return 0;
b3899dac
JG
2212
2213err_func:
fae3c158 2214 device_remove_file(&dev->pdev->dev, &dev_attr_function);
b3899dac 2215err_unbind:
b3899dac
JG
2216 dev->driver = NULL;
2217 return retval;
1da177e4 2218}
1da177e4 2219
fae3c158 2220static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver)
1da177e4
LT
2221{
2222 int i;
2223
2224 /* don't disconnect if it's not connected */
2225 if (dev->gadget.speed == USB_SPEED_UNKNOWN)
2226 driver = NULL;
2227
2228 /* stop hardware; prevent new request submissions;
2229 * and kill any outstanding requests.
2230 */
fae3c158 2231 usb_reset(dev);
adc82f77 2232 for (i = 0; i < dev->n_ep; i++)
fae3c158 2233 nuke(&dev->ep[i]);
1da177e4 2234
699412d9
FB
2235 /* report disconnect; the driver is already quiesced */
2236 if (driver) {
2237 spin_unlock(&dev->lock);
2238 driver->disconnect(&dev->gadget);
2239 spin_lock(&dev->lock);
2240 }
2241
fae3c158 2242 usb_reinit(dev);
1da177e4
LT
2243}
2244
22835b80 2245static int net2280_stop(struct usb_gadget *_gadget)
1da177e4 2246{
4cf5e00b 2247 struct net2280 *dev;
1da177e4
LT
2248 unsigned long flags;
2249
fae3c158 2250 dev = container_of(_gadget, struct net2280, gadget);
1da177e4 2251
fae3c158 2252 spin_lock_irqsave(&dev->lock, flags);
bfd0ed57 2253 stop_activity(dev, NULL);
fae3c158 2254 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4 2255
fae3c158 2256 net2280_led_active(dev, 0);
2f076077 2257
fae3c158
RR
2258 device_remove_file(&dev->pdev->dev, &dev_attr_function);
2259 device_remove_file(&dev->pdev->dev, &dev_attr_queues);
1da177e4 2260
bfd0ed57 2261 dev->driver = NULL;
84237bfb 2262
1da177e4
LT
2263 return 0;
2264}
1da177e4
LT
2265
2266/*-------------------------------------------------------------------------*/
2267
2268/* handle ep0, ep-e, ep-f with 64 byte packets: packet per irq.
2269 * also works for dma-capable endpoints, in pio mode or just
2270 * to manually advance the queue after short OUT transfers.
2271 */
fae3c158 2272static void handle_ep_small(struct net2280_ep *ep)
1da177e4
LT
2273{
2274 struct net2280_request *req;
2275 u32 t;
2276 /* 0 error, 1 mid-data, 2 done */
2277 int mode = 1;
2278
fae3c158
RR
2279 if (!list_empty(&ep->queue))
2280 req = list_entry(ep->queue.next,
1da177e4
LT
2281 struct net2280_request, queue);
2282 else
2283 req = NULL;
2284
2285 /* ack all, and handle what we care about */
fae3c158 2286 t = readl(&ep->regs->ep_stat);
1da177e4 2287 ep->irqs++;
cb442ee1 2288
e56e69cc 2289 ep_vdbg(ep->dev, "%s ack ep_stat %08x, req %p\n",
fc12c68b 2290 ep->ep.name, t, req ? &req->req : NULL);
cb442ee1 2291
2eeb0016 2292 if (!ep->is_in || (ep->dev->quirks & PLX_2280))
3e76fdcb 2293 writel(t & ~BIT(NAK_OUT_PACKETS), &ep->regs->ep_stat);
950ee4c8
GL
2294 else
2295 /* Added for 2282 */
fae3c158 2296 writel(t, &ep->regs->ep_stat);
1da177e4
LT
2297
2298 /* for ep0, monitor token irqs to catch data stage length errors
2299 * and to synchronize on status.
2300 *
2301 * also, to defer reporting of protocol stalls ... here's where
2302 * data or status first appears, handling stalls here should never
2303 * cause trouble on the host side..
2304 *
2305 * control requests could be slightly faster without token synch for
2306 * status, but status can jam up that way.
2307 */
fae3c158 2308 if (unlikely(ep->num == 0)) {
1da177e4
LT
2309 if (ep->is_in) {
2310 /* status; stop NAKing */
3e76fdcb 2311 if (t & BIT(DATA_OUT_PING_TOKEN_INTERRUPT)) {
1da177e4
LT
2312 if (ep->dev->protocol_stall) {
2313 ep->stopped = 1;
fae3c158 2314 set_halt(ep);
1da177e4
LT
2315 }
2316 if (!req)
fae3c158 2317 allow_status(ep);
1da177e4
LT
2318 mode = 2;
2319 /* reply to extra IN data tokens with a zlp */
3e76fdcb 2320 } else if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) {
1da177e4
LT
2321 if (ep->dev->protocol_stall) {
2322 ep->stopped = 1;
fae3c158 2323 set_halt(ep);
1da177e4 2324 mode = 2;
1f26e28d
AS
2325 } else if (ep->responded &&
2326 !req && !ep->stopped)
fae3c158 2327 write_fifo(ep, NULL);
1da177e4
LT
2328 }
2329 } else {
2330 /* status; stop NAKing */
3e76fdcb 2331 if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) {
1da177e4
LT
2332 if (ep->dev->protocol_stall) {
2333 ep->stopped = 1;
fae3c158 2334 set_halt(ep);
1da177e4
LT
2335 }
2336 mode = 2;
2337 /* an extra OUT token is an error */
ae8e530a
RR
2338 } else if (((t & BIT(DATA_OUT_PING_TOKEN_INTERRUPT)) &&
2339 req &&
2340 req->req.actual == req->req.length) ||
2341 (ep->responded && !req)) {
1da177e4 2342 ep->dev->protocol_stall = 1;
fae3c158 2343 set_halt(ep);
1da177e4
LT
2344 ep->stopped = 1;
2345 if (req)
fae3c158 2346 done(ep, req, -EOVERFLOW);
1da177e4
LT
2347 req = NULL;
2348 }
2349 }
2350 }
2351
fae3c158 2352 if (unlikely(!req))
1da177e4
LT
2353 return;
2354
2355 /* manual DMA queue advance after short OUT */
fae3c158 2356 if (likely(ep->dma)) {
3e76fdcb 2357 if (t & BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
1da177e4
LT
2358 u32 count;
2359 int stopped = ep->stopped;
2360
2361 /* TRANSFERRED works around OUT_DONE erratum 0112.
2362 * we expect (N <= maxpacket) bytes; host wrote M.
2363 * iff (M < N) we won't ever see a DMA interrupt.
2364 */
2365 ep->stopped = 1;
fae3c158 2366 for (count = 0; ; t = readl(&ep->regs->ep_stat)) {
1da177e4
LT
2367
2368 /* any preceding dma transfers must finish.
2369 * dma handles (M >= N), may empty the queue
2370 */
fae3c158 2371 scan_dma_completions(ep);
ae8e530a
RR
2372 if (unlikely(list_empty(&ep->queue) ||
2373 ep->out_overflow)) {
1da177e4
LT
2374 req = NULL;
2375 break;
2376 }
fae3c158 2377 req = list_entry(ep->queue.next,
1da177e4
LT
2378 struct net2280_request, queue);
2379
2380 /* here either (M < N), a "real" short rx;
2381 * or (M == N) and the queue didn't empty
2382 */
3e76fdcb 2383 if (likely(t & BIT(FIFO_EMPTY))) {
fae3c158 2384 count = readl(&ep->dma->dmacount);
1da177e4 2385 count &= DMA_BYTE_COUNT_MASK;
fae3c158 2386 if (readl(&ep->dma->dmadesc)
1da177e4
LT
2387 != req->td_dma)
2388 req = NULL;
2389 break;
2390 }
2391 udelay(1);
2392 }
2393
2394 /* stop DMA, leave ep NAKing */
3e76fdcb 2395 writel(BIT(DMA_ABORT), &ep->dma->dmastat);
fae3c158 2396 spin_stop_dma(ep->dma);
1da177e4 2397
fae3c158 2398 if (likely(req)) {
1da177e4 2399 req->td->dmacount = 0;
fae3c158
RR
2400 t = readl(&ep->regs->ep_avail);
2401 dma_done(ep, req, count,
901b3d75
DB
2402 (ep->out_overflow || t)
2403 ? -EOVERFLOW : 0);
1da177e4
LT
2404 }
2405
2406 /* also flush to prevent erratum 0106 trouble */
ae8e530a
RR
2407 if (unlikely(ep->out_overflow ||
2408 (ep->dev->chiprev == 0x0100 &&
2409 ep->dev->gadget.speed
2410 == USB_SPEED_FULL))) {
fae3c158 2411 out_flush(ep);
1da177e4
LT
2412 ep->out_overflow = 0;
2413 }
2414
2415 /* (re)start dma if needed, stop NAKing */
2416 ep->stopped = stopped;
fae3c158
RR
2417 if (!list_empty(&ep->queue))
2418 restart_dma(ep);
1da177e4 2419 } else
e56e69cc 2420 ep_dbg(ep->dev, "%s dma ep_stat %08x ??\n",
1da177e4
LT
2421 ep->ep.name, t);
2422 return;
2423
2424 /* data packet(s) received (in the fifo, OUT) */
3e76fdcb 2425 } else if (t & BIT(DATA_PACKET_RECEIVED_INTERRUPT)) {
fae3c158 2426 if (read_fifo(ep, req) && ep->num != 0)
1da177e4
LT
2427 mode = 2;
2428
2429 /* data packet(s) transmitted (IN) */
3e76fdcb 2430 } else if (t & BIT(DATA_PACKET_TRANSMITTED_INTERRUPT)) {
1da177e4
LT
2431 unsigned len;
2432
2433 len = req->req.length - req->req.actual;
2434 if (len > ep->ep.maxpacket)
2435 len = ep->ep.maxpacket;
2436 req->req.actual += len;
2437
2438 /* if we wrote it all, we're usually done */
fae3c158
RR
2439 /* send zlps until the status stage */
2440 if ((req->req.actual == req->req.length) &&
2441 (!req->req.zero || len != ep->ep.maxpacket) && ep->num)
1da177e4 2442 mode = 2;
1da177e4
LT
2443
2444 /* there was nothing to do ... */
2445 } else if (mode == 1)
2446 return;
2447
2448 /* done */
2449 if (mode == 2) {
2450 /* stream endpoints often resubmit/unlink in completion */
fae3c158 2451 done(ep, req, 0);
1da177e4
LT
2452
2453 /* maybe advance queue to next request */
2454 if (ep->num == 0) {
2455 /* NOTE: net2280 could let gadget driver start the
2456 * status stage later. since not all controllers let
2457 * them control that, the api doesn't (yet) allow it.
2458 */
2459 if (!ep->stopped)
fae3c158 2460 allow_status(ep);
1da177e4
LT
2461 req = NULL;
2462 } else {
fae3c158
RR
2463 if (!list_empty(&ep->queue) && !ep->stopped)
2464 req = list_entry(ep->queue.next,
1da177e4
LT
2465 struct net2280_request, queue);
2466 else
2467 req = NULL;
2468 if (req && !ep->is_in)
fae3c158 2469 stop_out_naking(ep);
1da177e4
LT
2470 }
2471 }
2472
2473 /* is there a buffer for the next packet?
2474 * for best streaming performance, make sure there is one.
2475 */
2476 if (req && !ep->stopped) {
2477
2478 /* load IN fifo with next packet (may be zlp) */
3e76fdcb 2479 if (t & BIT(DATA_PACKET_TRANSMITTED_INTERRUPT))
fae3c158 2480 write_fifo(ep, &req->req);
1da177e4
LT
2481 }
2482}
2483
fae3c158 2484static struct net2280_ep *get_ep_by_addr(struct net2280 *dev, u16 wIndex)
1da177e4
LT
2485{
2486 struct net2280_ep *ep;
2487
2488 if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
fae3c158
RR
2489 return &dev->ep[0];
2490 list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
1da177e4
LT
2491 u8 bEndpointAddress;
2492
2493 if (!ep->desc)
2494 continue;
2495 bEndpointAddress = ep->desc->bEndpointAddress;
2496 if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
2497 continue;
2498 if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
2499 return ep;
2500 }
2501 return NULL;
2502}
2503
adc82f77
RR
2504static void defect7374_workaround(struct net2280 *dev, struct usb_ctrlrequest r)
2505{
2506 u32 scratch, fsmvalue;
2507 u32 ack_wait_timeout, state;
2508
2509 /* Workaround for Defect 7374 (U1/U2 erroneously rejected): */
2510 scratch = get_idx_reg(dev->regs, SCRATCH);
2511 fsmvalue = scratch & (0xf << DEFECT7374_FSM_FIELD);
2512 scratch &= ~(0xf << DEFECT7374_FSM_FIELD);
2513
2514 if (!((fsmvalue == DEFECT7374_FSM_WAITING_FOR_CONTROL_READ) &&
2515 (r.bRequestType & USB_DIR_IN)))
2516 return;
2517
2518 /* This is the first Control Read for this connection: */
3e76fdcb 2519 if (!(readl(&dev->usb->usbstat) & BIT(SUPER_SPEED_MODE))) {
adc82f77
RR
2520 /*
2521 * Connection is NOT SS:
2522 * - Connection must be FS or HS.
2523 * - This FSM state should allow workaround software to
2524 * run after the next USB connection.
2525 */
2526 scratch |= DEFECT7374_FSM_NON_SS_CONTROL_READ;
5517525e 2527 dev->bug7734_patched = 1;
adc82f77
RR
2528 goto restore_data_eps;
2529 }
2530
2531 /* Connection is SS: */
2532 for (ack_wait_timeout = 0;
2533 ack_wait_timeout < DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS;
2534 ack_wait_timeout++) {
2535
2536 state = readl(&dev->plregs->pl_ep_status_1)
2537 & (0xff << STATE);
2538 if ((state >= (ACK_GOOD_NORMAL << STATE)) &&
2539 (state <= (ACK_GOOD_MORE_ACKS_TO_COME << STATE))) {
2540 scratch |= DEFECT7374_FSM_SS_CONTROL_READ;
5517525e 2541 dev->bug7734_patched = 1;
adc82f77
RR
2542 break;
2543 }
2544
2545 /*
2546 * We have not yet received host's Data Phase ACK
2547 * - Wait and try again.
2548 */
2549 udelay(DEFECT_7374_PROCESSOR_WAIT_TIME);
2550
2551 continue;
2552 }
2553
2554
2555 if (ack_wait_timeout >= DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS) {
e56e69cc 2556 ep_err(dev, "FAIL: Defect 7374 workaround waited but failed "
adc82f77 2557 "to detect SS host's data phase ACK.");
e56e69cc 2558 ep_err(dev, "PL_EP_STATUS_1(23:16):.Expected from 0x11 to 0x16"
adc82f77
RR
2559 "got 0x%2.2x.\n", state >> STATE);
2560 } else {
e56e69cc 2561 ep_warn(dev, "INFO: Defect 7374 workaround waited about\n"
adc82f77
RR
2562 "%duSec for Control Read Data Phase ACK\n",
2563 DEFECT_7374_PROCESSOR_WAIT_TIME * ack_wait_timeout);
2564 }
2565
2566restore_data_eps:
2567 /*
2568 * Restore data EPs to their pre-workaround settings (disabled,
2569 * initialized, and other details).
2570 */
2571 defect7374_disable_data_eps(dev);
2572
2573 set_idx_reg(dev->regs, SCRATCH, scratch);
2574
2575 return;
2576}
2577
e0cbb046 2578static void ep_clear_seqnum(struct net2280_ep *ep)
adc82f77
RR
2579{
2580 struct net2280 *dev = ep->dev;
2581 u32 val;
2582 static const u32 ep_pl[9] = { 0, 3, 4, 7, 8, 2, 5, 6, 9 };
2583
e0cbb046
RR
2584 val = readl(&dev->plregs->pl_ep_ctrl) & ~0x1f;
2585 val |= ep_pl[ep->num];
2586 writel(val, &dev->plregs->pl_ep_ctrl);
2587 val |= BIT(SEQUENCE_NUMBER_RESET);
2588 writel(val, &dev->plregs->pl_ep_ctrl);
adc82f77 2589
e0cbb046 2590 return;
adc82f77
RR
2591}
2592
adc82f77
RR
2593static void handle_stat0_irqs_superspeed(struct net2280 *dev,
2594 struct net2280_ep *ep, struct usb_ctrlrequest r)
2595{
2596 int tmp = 0;
2597
2598#define w_value le16_to_cpu(r.wValue)
2599#define w_index le16_to_cpu(r.wIndex)
2600#define w_length le16_to_cpu(r.wLength)
2601
2602 switch (r.bRequest) {
2603 struct net2280_ep *e;
2604 u16 status;
2605
2606 case USB_REQ_SET_CONFIGURATION:
2607 dev->addressed_state = !w_value;
2608 goto usb3_delegate;
2609
2610 case USB_REQ_GET_STATUS:
2611 switch (r.bRequestType) {
2612 case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
2613 status = dev->wakeup_enable ? 0x02 : 0x00;
2614 if (dev->selfpowered)
3e76fdcb 2615 status |= BIT(0);
adc82f77
RR
2616 status |= (dev->u1_enable << 2 | dev->u2_enable << 3 |
2617 dev->ltm_enable << 4);
2618 writel(0, &dev->epregs[0].ep_irqenb);
2619 set_fifo_bytecount(ep, sizeof(status));
2620 writel((__force u32) status, &dev->epregs[0].ep_data);
2621 allow_status_338x(ep);
2622 break;
2623
2624 case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
2625 e = get_ep_by_addr(dev, w_index);
2626 if (!e)
2627 goto do_stall3;
2628 status = readl(&e->regs->ep_rsp) &
3e76fdcb 2629 BIT(CLEAR_ENDPOINT_HALT);
adc82f77
RR
2630 writel(0, &dev->epregs[0].ep_irqenb);
2631 set_fifo_bytecount(ep, sizeof(status));
2632 writel((__force u32) status, &dev->epregs[0].ep_data);
2633 allow_status_338x(ep);
2634 break;
2635
2636 default:
2637 goto usb3_delegate;
2638 }
2639 break;
2640
2641 case USB_REQ_CLEAR_FEATURE:
2642 switch (r.bRequestType) {
2643 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
2644 if (!dev->addressed_state) {
2645 switch (w_value) {
2646 case USB_DEVICE_U1_ENABLE:
2647 dev->u1_enable = 0;
2648 writel(readl(&dev->usb_ext->usbctl2) &
3e76fdcb 2649 ~BIT(U1_ENABLE),
adc82f77
RR
2650 &dev->usb_ext->usbctl2);
2651 allow_status_338x(ep);
2652 goto next_endpoints3;
2653
2654 case USB_DEVICE_U2_ENABLE:
2655 dev->u2_enable = 0;
2656 writel(readl(&dev->usb_ext->usbctl2) &
3e76fdcb 2657 ~BIT(U2_ENABLE),
adc82f77
RR
2658 &dev->usb_ext->usbctl2);
2659 allow_status_338x(ep);
2660 goto next_endpoints3;
2661
2662 case USB_DEVICE_LTM_ENABLE:
2663 dev->ltm_enable = 0;
2664 writel(readl(&dev->usb_ext->usbctl2) &
3e76fdcb 2665 ~BIT(LTM_ENABLE),
adc82f77
RR
2666 &dev->usb_ext->usbctl2);
2667 allow_status_338x(ep);
2668 goto next_endpoints3;
2669
2670 default:
2671 break;
2672 }
2673 }
2674 if (w_value == USB_DEVICE_REMOTE_WAKEUP) {
2675 dev->wakeup_enable = 0;
2676 writel(readl(&dev->usb->usbctl) &
3e76fdcb 2677 ~BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
adc82f77
RR
2678 &dev->usb->usbctl);
2679 allow_status_338x(ep);
2680 break;
2681 }
2682 goto usb3_delegate;
2683
2684 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
2685 e = get_ep_by_addr(dev, w_index);
2686 if (!e)
2687 goto do_stall3;
2688 if (w_value != USB_ENDPOINT_HALT)
2689 goto do_stall3;
e56e69cc 2690 ep_vdbg(dev, "%s clear halt\n", e->ep.name);
e0cbb046
RR
2691 /*
2692 * Workaround for SS SeqNum not cleared via
2693 * Endpoint Halt (Clear) bit. select endpoint
2694 */
2695 ep_clear_seqnum(e);
2696 clear_halt(e);
adc82f77
RR
2697 if (!list_empty(&e->queue) && e->td_dma)
2698 restart_dma(e);
2699 allow_status(ep);
2700 ep->stopped = 1;
2701 break;
2702
2703 default:
2704 goto usb3_delegate;
2705 }
2706 break;
2707 case USB_REQ_SET_FEATURE:
2708 switch (r.bRequestType) {
2709 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
2710 if (!dev->addressed_state) {
2711 switch (w_value) {
2712 case USB_DEVICE_U1_ENABLE:
2713 dev->u1_enable = 1;
2714 writel(readl(&dev->usb_ext->usbctl2) |
3e76fdcb 2715 BIT(U1_ENABLE),
adc82f77
RR
2716 &dev->usb_ext->usbctl2);
2717 allow_status_338x(ep);
2718 goto next_endpoints3;
2719
2720 case USB_DEVICE_U2_ENABLE:
2721 dev->u2_enable = 1;
2722 writel(readl(&dev->usb_ext->usbctl2) |
3e76fdcb 2723 BIT(U2_ENABLE),
adc82f77
RR
2724 &dev->usb_ext->usbctl2);
2725 allow_status_338x(ep);
2726 goto next_endpoints3;
2727
2728 case USB_DEVICE_LTM_ENABLE:
2729 dev->ltm_enable = 1;
2730 writel(readl(&dev->usb_ext->usbctl2) |
3e76fdcb 2731 BIT(LTM_ENABLE),
adc82f77
RR
2732 &dev->usb_ext->usbctl2);
2733 allow_status_338x(ep);
2734 goto next_endpoints3;
2735 default:
2736 break;
2737 }
2738 }
2739
2740 if (w_value == USB_DEVICE_REMOTE_WAKEUP) {
2741 dev->wakeup_enable = 1;
2742 writel(readl(&dev->usb->usbctl) |
3e76fdcb 2743 BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
adc82f77
RR
2744 &dev->usb->usbctl);
2745 allow_status_338x(ep);
2746 break;
2747 }
2748 goto usb3_delegate;
2749
2750 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
2751 e = get_ep_by_addr(dev, w_index);
2752 if (!e || (w_value != USB_ENDPOINT_HALT))
2753 goto do_stall3;
cf8b1cde
RR
2754 ep->stopped = 1;
2755 if (ep->num == 0)
2756 ep->dev->protocol_stall = 1;
2757 else {
2758 if (ep->dma)
e721c457 2759 abort_dma(ep);
e0cbb046 2760 set_halt(ep);
cf8b1cde 2761 }
adc82f77
RR
2762 allow_status_338x(ep);
2763 break;
2764
2765 default:
2766 goto usb3_delegate;
2767 }
2768
2769 break;
2770 default:
2771
2772usb3_delegate:
e56e69cc 2773 ep_vdbg(dev, "setup %02x.%02x v%04x i%04x l%04x ep_cfg %08x\n",
adc82f77
RR
2774 r.bRequestType, r.bRequest,
2775 w_value, w_index, w_length,
2776 readl(&ep->cfg->ep_cfg));
2777
2778 ep->responded = 0;
2779 spin_unlock(&dev->lock);
2780 tmp = dev->driver->setup(&dev->gadget, &r);
2781 spin_lock(&dev->lock);
2782 }
2783do_stall3:
2784 if (tmp < 0) {
e56e69cc 2785 ep_vdbg(dev, "req %02x.%02x protocol STALL; stat %d\n",
adc82f77
RR
2786 r.bRequestType, r.bRequest, tmp);
2787 dev->protocol_stall = 1;
2788 /* TD 9.9 Halt Endpoint test. TD 9.22 Set feature test */
e0cbb046 2789 set_halt(ep);
adc82f77
RR
2790 }
2791
2792next_endpoints3:
2793
2794#undef w_value
2795#undef w_index
2796#undef w_length
2797
2798 return;
2799}
2800
fae3c158 2801static void handle_stat0_irqs(struct net2280 *dev, u32 stat)
1da177e4
LT
2802{
2803 struct net2280_ep *ep;
2804 u32 num, scratch;
2805
2806 /* most of these don't need individual acks */
3e76fdcb 2807 stat &= ~BIT(INTA_ASSERTED);
1da177e4
LT
2808 if (!stat)
2809 return;
e56e69cc 2810 /* ep_dbg(dev, "irqstat0 %04x\n", stat); */
1da177e4
LT
2811
2812 /* starting a control request? */
3e76fdcb 2813 if (unlikely(stat & BIT(SETUP_PACKET_INTERRUPT))) {
1da177e4 2814 union {
fae3c158 2815 u32 raw[2];
1da177e4
LT
2816 struct usb_ctrlrequest r;
2817 } u;
950ee4c8 2818 int tmp;
1da177e4
LT
2819 struct net2280_request *req;
2820
2821 if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
adc82f77 2822 u32 val = readl(&dev->usb->usbstat);
3e76fdcb 2823 if (val & BIT(SUPER_SPEED)) {
adc82f77
RR
2824 dev->gadget.speed = USB_SPEED_SUPER;
2825 usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
2826 EP0_SS_MAX_PACKET_SIZE);
3e76fdcb 2827 } else if (val & BIT(HIGH_SPEED)) {
1da177e4 2828 dev->gadget.speed = USB_SPEED_HIGH;
adc82f77
RR
2829 usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
2830 EP0_HS_MAX_PACKET_SIZE);
2831 } else {
1da177e4 2832 dev->gadget.speed = USB_SPEED_FULL;
adc82f77
RR
2833 usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
2834 EP0_HS_MAX_PACKET_SIZE);
2835 }
fae3c158 2836 net2280_led_speed(dev, dev->gadget.speed);
e56e69cc 2837 ep_dbg(dev, "%s\n",
fae3c158 2838 usb_speed_string(dev->gadget.speed));
1da177e4
LT
2839 }
2840
fae3c158 2841 ep = &dev->ep[0];
1da177e4
LT
2842 ep->irqs++;
2843
2844 /* make sure any leftover request state is cleared */
3e76fdcb 2845 stat &= ~BIT(ENDPOINT_0_INTERRUPT);
fae3c158
RR
2846 while (!list_empty(&ep->queue)) {
2847 req = list_entry(ep->queue.next,
1da177e4 2848 struct net2280_request, queue);
fae3c158 2849 done(ep, req, (req->req.actual == req->req.length)
1da177e4
LT
2850 ? 0 : -EPROTO);
2851 }
2852 ep->stopped = 0;
2853 dev->protocol_stall = 0;
5d1b6840 2854 if (!(dev->quirks & PLX_SUPERSPEED)) {
2eeb0016 2855 if (ep->dev->quirks & PLX_2280)
3e76fdcb
RR
2856 tmp = BIT(FIFO_OVERFLOW) |
2857 BIT(FIFO_UNDERFLOW);
adc82f77
RR
2858 else
2859 tmp = 0;
2860
3e76fdcb
RR
2861 writel(tmp | BIT(TIMEOUT) |
2862 BIT(USB_STALL_SENT) |
2863 BIT(USB_IN_NAK_SENT) |
2864 BIT(USB_IN_ACK_RCVD) |
2865 BIT(USB_OUT_PING_NAK_SENT) |
2866 BIT(USB_OUT_ACK_SENT) |
2867 BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
2868 BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
2869 BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
2870 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
2871 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
ae8e530a
RR
2872 BIT(DATA_IN_TOKEN_INTERRUPT),
2873 &ep->regs->ep_stat);
adc82f77
RR
2874 }
2875 u.raw[0] = readl(&dev->usb->setup0123);
2876 u.raw[1] = readl(&dev->usb->setup4567);
901b3d75 2877
fae3c158
RR
2878 cpu_to_le32s(&u.raw[0]);
2879 cpu_to_le32s(&u.raw[1]);
1da177e4 2880
5517525e 2881 if ((dev->quirks & PLX_SUPERSPEED) && !dev->bug7734_patched)
adc82f77
RR
2882 defect7374_workaround(dev, u.r);
2883
950ee4c8
GL
2884 tmp = 0;
2885
01ee7d70
DB
2886#define w_value le16_to_cpu(u.r.wValue)
2887#define w_index le16_to_cpu(u.r.wIndex)
2888#define w_length le16_to_cpu(u.r.wLength)
1da177e4
LT
2889
2890 /* ack the irq */
3e76fdcb
RR
2891 writel(BIT(SETUP_PACKET_INTERRUPT), &dev->regs->irqstat0);
2892 stat ^= BIT(SETUP_PACKET_INTERRUPT);
1da177e4
LT
2893
2894 /* watch control traffic at the token level, and force
2895 * synchronization before letting the status stage happen.
2896 * FIXME ignore tokens we'll NAK, until driver responds.
2897 * that'll mean a lot less irqs for some drivers.
2898 */
2899 ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
2900 if (ep->is_in) {
3e76fdcb
RR
2901 scratch = BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
2902 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
2903 BIT(DATA_IN_TOKEN_INTERRUPT);
fae3c158 2904 stop_out_naking(ep);
1da177e4 2905 } else
3e76fdcb
RR
2906 scratch = BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
2907 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
2908 BIT(DATA_IN_TOKEN_INTERRUPT);
fae3c158 2909 writel(scratch, &dev->epregs[0].ep_irqenb);
1da177e4
LT
2910
2911 /* we made the hardware handle most lowlevel requests;
2912 * everything else goes uplevel to the gadget code.
2913 */
1f26e28d 2914 ep->responded = 1;
adc82f77
RR
2915
2916 if (dev->gadget.speed == USB_SPEED_SUPER) {
2917 handle_stat0_irqs_superspeed(dev, ep, u.r);
2918 goto next_endpoints;
2919 }
2920
1da177e4
LT
2921 switch (u.r.bRequest) {
2922 case USB_REQ_GET_STATUS: {
2923 struct net2280_ep *e;
320f3459 2924 __le32 status;
1da177e4
LT
2925
2926 /* hw handles device and interface status */
2927 if (u.r.bRequestType != (USB_DIR_IN|USB_RECIP_ENDPOINT))
2928 goto delegate;
fae3c158
RR
2929 e = get_ep_by_addr(dev, w_index);
2930 if (!e || w_length > 2)
1da177e4
LT
2931 goto do_stall;
2932
3e76fdcb 2933 if (readl(&e->regs->ep_rsp) & BIT(SET_ENDPOINT_HALT))
fae3c158 2934 status = cpu_to_le32(1);
1da177e4 2935 else
fae3c158 2936 status = cpu_to_le32(0);
1da177e4
LT
2937
2938 /* don't bother with a request object! */
fae3c158
RR
2939 writel(0, &dev->epregs[0].ep_irqenb);
2940 set_fifo_bytecount(ep, w_length);
2941 writel((__force u32)status, &dev->epregs[0].ep_data);
2942 allow_status(ep);
e56e69cc 2943 ep_vdbg(dev, "%s stat %02x\n", ep->ep.name, status);
1da177e4
LT
2944 goto next_endpoints;
2945 }
2946 break;
2947 case USB_REQ_CLEAR_FEATURE: {
2948 struct net2280_ep *e;
2949
2950 /* hw handles device features */
2951 if (u.r.bRequestType != USB_RECIP_ENDPOINT)
2952 goto delegate;
ae8e530a 2953 if (w_value != USB_ENDPOINT_HALT || w_length != 0)
1da177e4 2954 goto do_stall;
fae3c158
RR
2955 e = get_ep_by_addr(dev, w_index);
2956 if (!e)
1da177e4 2957 goto do_stall;
8066134f 2958 if (e->wedged) {
e56e69cc 2959 ep_vdbg(dev, "%s wedged, halt not cleared\n",
8066134f
AS
2960 ep->ep.name);
2961 } else {
e56e69cc 2962 ep_vdbg(dev, "%s clear halt\n", e->ep.name);
8066134f 2963 clear_halt(e);
2eeb0016 2964 if ((ep->dev->quirks & PLX_SUPERSPEED) &&
adc82f77
RR
2965 !list_empty(&e->queue) && e->td_dma)
2966 restart_dma(e);
8066134f 2967 }
fae3c158 2968 allow_status(ep);
1da177e4
LT
2969 goto next_endpoints;
2970 }
2971 break;
2972 case USB_REQ_SET_FEATURE: {
2973 struct net2280_ep *e;
2974
2975 /* hw handles device features */
2976 if (u.r.bRequestType != USB_RECIP_ENDPOINT)
2977 goto delegate;
ae8e530a 2978 if (w_value != USB_ENDPOINT_HALT || w_length != 0)
1da177e4 2979 goto do_stall;
fae3c158
RR
2980 e = get_ep_by_addr(dev, w_index);
2981 if (!e)
1da177e4 2982 goto do_stall;
8066134f
AS
2983 if (e->ep.name == ep0name)
2984 goto do_stall;
fae3c158 2985 set_halt(e);
2eeb0016 2986 if ((dev->quirks & PLX_SUPERSPEED) && e->dma)
adc82f77 2987 abort_dma(e);
fae3c158 2988 allow_status(ep);
e56e69cc 2989 ep_vdbg(dev, "%s set halt\n", ep->ep.name);
1da177e4
LT
2990 goto next_endpoints;
2991 }
2992 break;
2993 default:
2994delegate:
e56e69cc 2995 ep_vdbg(dev, "setup %02x.%02x v%04x i%04x l%04x "
1da177e4
LT
2996 "ep_cfg %08x\n",
2997 u.r.bRequestType, u.r.bRequest,
320f3459 2998 w_value, w_index, w_length,
adc82f77 2999 readl(&ep->cfg->ep_cfg));
1f26e28d 3000 ep->responded = 0;
fae3c158
RR
3001 spin_unlock(&dev->lock);
3002 tmp = dev->driver->setup(&dev->gadget, &u.r);
3003 spin_lock(&dev->lock);
1da177e4
LT
3004 }
3005
3006 /* stall ep0 on error */
3007 if (tmp < 0) {
3008do_stall:
e56e69cc 3009 ep_vdbg(dev, "req %02x.%02x protocol STALL; stat %d\n",
1da177e4
LT
3010 u.r.bRequestType, u.r.bRequest, tmp);
3011 dev->protocol_stall = 1;
3012 }
3013
3014 /* some in/out token irq should follow; maybe stall then.
3015 * driver must queue a request (even zlp) or halt ep0
3016 * before the host times out.
3017 */
3018 }
3019
320f3459
DB
3020#undef w_value
3021#undef w_index
3022#undef w_length
3023
1da177e4
LT
3024next_endpoints:
3025 /* endpoint data irq ? */
3026 scratch = stat & 0x7f;
3027 stat &= ~0x7f;
3028 for (num = 0; scratch; num++) {
3029 u32 t;
3030
3031 /* do this endpoint's FIFO and queue need tending? */
3e76fdcb 3032 t = BIT(num);
1da177e4
LT
3033 if ((scratch & t) == 0)
3034 continue;
3035 scratch ^= t;
3036
fae3c158
RR
3037 ep = &dev->ep[num];
3038 handle_ep_small(ep);
1da177e4
LT
3039 }
3040
3041 if (stat)
e56e69cc 3042 ep_dbg(dev, "unhandled irqstat0 %08x\n", stat);
1da177e4
LT
3043}
3044
3e76fdcb
RR
3045#define DMA_INTERRUPTS (BIT(DMA_D_INTERRUPT) | \
3046 BIT(DMA_C_INTERRUPT) | \
3047 BIT(DMA_B_INTERRUPT) | \
3048 BIT(DMA_A_INTERRUPT))
1da177e4 3049#define PCI_ERROR_INTERRUPTS ( \
3e76fdcb
RR
3050 BIT(PCI_MASTER_ABORT_RECEIVED_INTERRUPT) | \
3051 BIT(PCI_TARGET_ABORT_RECEIVED_INTERRUPT) | \
3052 BIT(PCI_RETRY_ABORT_INTERRUPT))
1da177e4 3053
fae3c158 3054static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
1da177e4
LT
3055{
3056 struct net2280_ep *ep;
3057 u32 tmp, num, mask, scratch;
3058
3059 /* after disconnect there's nothing else to do! */
3e76fdcb
RR
3060 tmp = BIT(VBUS_INTERRUPT) | BIT(ROOT_PORT_RESET_INTERRUPT);
3061 mask = BIT(SUPER_SPEED) | BIT(HIGH_SPEED) | BIT(FULL_SPEED);
1da177e4
LT
3062
3063 /* VBUS disconnect is indicated by VBUS_PIN and VBUS_INTERRUPT set.
fb914ebf 3064 * Root Port Reset is indicated by ROOT_PORT_RESET_INTERRUPT set and
901b3d75 3065 * both HIGH_SPEED and FULL_SPEED clear (as ROOT_PORT_RESET_INTERRUPT
1da177e4
LT
3066 * only indicates a change in the reset state).
3067 */
3068 if (stat & tmp) {
b611e424
AS
3069 bool reset = false;
3070 bool disconnect = false;
3071
3072 /*
3073 * Ignore disconnects and resets if the speed hasn't been set.
3074 * VBUS can bounce and there's always an initial reset.
3075 */
fae3c158 3076 writel(tmp, &dev->regs->irqstat1);
b611e424
AS
3077 if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
3078 if ((stat & BIT(VBUS_INTERRUPT)) &&
3079 (readl(&dev->usb->usbctl) &
3080 BIT(VBUS_PIN)) == 0) {
3081 disconnect = true;
3082 ep_dbg(dev, "disconnect %s\n",
3083 dev->driver->driver.name);
3084 } else if ((stat & BIT(ROOT_PORT_RESET_INTERRUPT)) &&
3085 (readl(&dev->usb->usbstat) & mask)
3086 == 0) {
3087 reset = true;
3088 ep_dbg(dev, "reset %s\n",
3089 dev->driver->driver.name);
3090 }
3091
3092 if (disconnect || reset) {
3093 stop_activity(dev, dev->driver);
3094 ep0_start(dev);
3095 spin_unlock(&dev->lock);
3096 if (reset)
3097 usb_gadget_udc_reset
3098 (&dev->gadget, dev->driver);
3099 else
3100 (dev->driver->disconnect)
3101 (&dev->gadget);
3102 spin_lock(&dev->lock);
3103 return;
3104 }
1da177e4
LT
3105 }
3106 stat &= ~tmp;
3107
3108 /* vBUS can bounce ... one of many reasons to ignore the
3109 * notion of hotplug events on bus connect/disconnect!
3110 */
3111 if (!stat)
3112 return;
3113 }
3114
3115 /* NOTE: chip stays in PCI D0 state for now, but it could
3116 * enter D1 to save more power
3117 */
3e76fdcb 3118 tmp = BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT);
1da177e4 3119 if (stat & tmp) {
fae3c158 3120 writel(tmp, &dev->regs->irqstat1);
3e76fdcb 3121 if (stat & BIT(SUSPEND_REQUEST_INTERRUPT)) {
1da177e4 3122 if (dev->driver->suspend)
fae3c158 3123 dev->driver->suspend(&dev->gadget);
1da177e4 3124 if (!enable_suspend)
3e76fdcb 3125 stat &= ~BIT(SUSPEND_REQUEST_INTERRUPT);
1da177e4
LT
3126 } else {
3127 if (dev->driver->resume)
fae3c158 3128 dev->driver->resume(&dev->gadget);
1da177e4
LT
3129 /* at high speed, note erratum 0133 */
3130 }
3131 stat &= ~tmp;
3132 }
3133
3134 /* clear any other status/irqs */
3135 if (stat)
fae3c158 3136 writel(stat, &dev->regs->irqstat1);
1da177e4
LT
3137
3138 /* some status we can just ignore */
2eeb0016 3139 if (dev->quirks & PLX_2280)
3e76fdcb
RR
3140 stat &= ~(BIT(CONTROL_STATUS_INTERRUPT) |
3141 BIT(SUSPEND_REQUEST_INTERRUPT) |
3142 BIT(RESUME_INTERRUPT) |
3143 BIT(SOF_INTERRUPT));
950ee4c8 3144 else
3e76fdcb
RR
3145 stat &= ~(BIT(CONTROL_STATUS_INTERRUPT) |
3146 BIT(RESUME_INTERRUPT) |
3147 BIT(SOF_DOWN_INTERRUPT) |
3148 BIT(SOF_INTERRUPT));
950ee4c8 3149
1da177e4
LT
3150 if (!stat)
3151 return;
e56e69cc 3152 /* ep_dbg(dev, "irqstat1 %08x\n", stat);*/
1da177e4
LT
3153
3154 /* DMA status, for ep-{a,b,c,d} */
3155 scratch = stat & DMA_INTERRUPTS;
3156 stat &= ~DMA_INTERRUPTS;
3157 scratch >>= 9;
3158 for (num = 0; scratch; num++) {
3159 struct net2280_dma_regs __iomem *dma;
3160
3e76fdcb 3161 tmp = BIT(num);
1da177e4
LT
3162 if ((tmp & scratch) == 0)
3163 continue;
3164 scratch ^= tmp;
3165
fae3c158 3166 ep = &dev->ep[num + 1];
1da177e4
LT
3167 dma = ep->dma;
3168
3169 if (!dma)
3170 continue;
3171
3172 /* clear ep's dma status */
fae3c158
RR
3173 tmp = readl(&dma->dmastat);
3174 writel(tmp, &dma->dmastat);
1da177e4 3175
adc82f77 3176 /* dma sync*/
2eeb0016 3177 if (dev->quirks & PLX_SUPERSPEED) {
adc82f77
RR
3178 u32 r_dmacount = readl(&dma->dmacount);
3179 if (!ep->is_in && (r_dmacount & 0x00FFFFFF) &&
3e76fdcb 3180 (tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT)))
adc82f77
RR
3181 continue;
3182 }
3183
90664198
RR
3184 if (!(tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT))) {
3185 ep_dbg(ep->dev, "%s no xact done? %08x\n",
3186 ep->ep.name, tmp);
3187 continue;
1da177e4 3188 }
90664198 3189 stop_dma(ep->dma);
1da177e4
LT
3190
3191 /* OUT transfers terminate when the data from the
3192 * host is in our memory. Process whatever's done.
3193 * On this path, we know transfer's last packet wasn't
3194 * less than req->length. NAK_OUT_PACKETS may be set,
3195 * or the FIFO may already be holding new packets.
3196 *
3197 * IN transfers can linger in the FIFO for a very
3198 * long time ... we ignore that for now, accounting
3199 * precisely (like PIO does) needs per-packet irqs
3200 */
fae3c158 3201 scan_dma_completions(ep);
1da177e4
LT
3202
3203 /* disable dma on inactive queues; else maybe restart */
90664198 3204 if (!list_empty(&ep->queue)) {
fae3c158 3205 tmp = readl(&dma->dmactl);
90664198 3206 restart_dma(ep);
1da177e4
LT
3207 }
3208 ep->irqs++;
3209 }
3210
3211 /* NOTE: there are other PCI errors we might usefully notice.
3212 * if they appear very often, here's where to try recovering.
3213 */
3214 if (stat & PCI_ERROR_INTERRUPTS) {
e56e69cc 3215 ep_err(dev, "pci dma error; stat %08x\n", stat);
1da177e4
LT
3216 stat &= ~PCI_ERROR_INTERRUPTS;
3217 /* these are fatal errors, but "maybe" they won't
3218 * happen again ...
3219 */
fae3c158
RR
3220 stop_activity(dev, dev->driver);
3221 ep0_start(dev);
1da177e4
LT
3222 stat = 0;
3223 }
3224
3225 if (stat)
e56e69cc 3226 ep_dbg(dev, "unhandled irqstat1 %08x\n", stat);
1da177e4
LT
3227}
3228
fae3c158 3229static irqreturn_t net2280_irq(int irq, void *_dev)
1da177e4
LT
3230{
3231 struct net2280 *dev = _dev;
3232
658ad5e0 3233 /* shared interrupt, not ours */
2eeb0016 3234 if ((dev->quirks & PLX_LEGACY) &&
3e76fdcb 3235 (!(readl(&dev->regs->irqstat0) & BIT(INTA_ASSERTED))))
658ad5e0
AS
3236 return IRQ_NONE;
3237
fae3c158 3238 spin_lock(&dev->lock);
1da177e4
LT
3239
3240 /* handle disconnect, dma, and more */
fae3c158 3241 handle_stat1_irqs(dev, readl(&dev->regs->irqstat1));
1da177e4
LT
3242
3243 /* control requests and PIO */
fae3c158 3244 handle_stat0_irqs(dev, readl(&dev->regs->irqstat0));
1da177e4 3245
2eeb0016 3246 if (dev->quirks & PLX_SUPERSPEED) {
adc82f77
RR
3247 /* re-enable interrupt to trigger any possible new interrupt */
3248 u32 pciirqenb1 = readl(&dev->regs->pciirqenb1);
3249 writel(pciirqenb1 & 0x7FFFFFFF, &dev->regs->pciirqenb1);
3250 writel(pciirqenb1, &dev->regs->pciirqenb1);
3251 }
3252
fae3c158 3253 spin_unlock(&dev->lock);
1da177e4
LT
3254
3255 return IRQ_HANDLED;
3256}
3257
3258/*-------------------------------------------------------------------------*/
3259
fae3c158 3260static void gadget_release(struct device *_dev)
1da177e4 3261{
fae3c158 3262 struct net2280 *dev = dev_get_drvdata(_dev);
1da177e4 3263
fae3c158 3264 kfree(dev);
1da177e4
LT
3265}
3266
3267/* tear down the binding between this driver and the pci device */
3268
fae3c158 3269static void net2280_remove(struct pci_dev *pdev)
1da177e4 3270{
fae3c158 3271 struct net2280 *dev = pci_get_drvdata(pdev);
1da177e4 3272
0f91349b
SAS
3273 usb_del_gadget_udc(&dev->gadget);
3274
6bea476c 3275 BUG_ON(dev->driver);
1da177e4
LT
3276
3277 /* then clean up the resources we allocated during probe() */
fae3c158 3278 net2280_led_shutdown(dev);
1da177e4
LT
3279 if (dev->requests) {
3280 int i;
3281 for (i = 1; i < 5; i++) {
fae3c158 3282 if (!dev->ep[i].dummy)
1da177e4 3283 continue;
fae3c158
RR
3284 pci_pool_free(dev->requests, dev->ep[i].dummy,
3285 dev->ep[i].td_dma);
1da177e4 3286 }
fae3c158 3287 pci_pool_destroy(dev->requests);
1da177e4
LT
3288 }
3289 if (dev->got_irq)
fae3c158 3290 free_irq(pdev->irq, dev);
9c864c23 3291 if (dev->quirks & PLX_SUPERSPEED)
adc82f77 3292 pci_disable_msi(pdev);
1da177e4 3293 if (dev->regs)
fae3c158 3294 iounmap(dev->regs);
1da177e4 3295 if (dev->region)
fae3c158
RR
3296 release_mem_region(pci_resource_start(pdev, 0),
3297 pci_resource_len(pdev, 0));
1da177e4 3298 if (dev->enabled)
fae3c158
RR
3299 pci_disable_device(pdev);
3300 device_remove_file(&pdev->dev, &dev_attr_registers);
1da177e4 3301
e56e69cc 3302 ep_info(dev, "unbind\n");
1da177e4
LT
3303}
3304
3305/* wrap this driver around the specified device, but
3306 * don't respond over USB until a gadget driver binds to us.
3307 */
3308
fae3c158 3309static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1da177e4
LT
3310{
3311 struct net2280 *dev;
3312 unsigned long resource, len;
3313 void __iomem *base = NULL;
3314 int retval, i;
1da177e4 3315
1da177e4 3316 /* alloc, and start init */
fae3c158
RR
3317 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
3318 if (dev == NULL) {
1da177e4
LT
3319 retval = -ENOMEM;
3320 goto done;
3321 }
3322
fae3c158
RR
3323 pci_set_drvdata(pdev, dev);
3324 spin_lock_init(&dev->lock);
2eeb0016 3325 dev->quirks = id->driver_data;
1da177e4
LT
3326 dev->pdev = pdev;
3327 dev->gadget.ops = &net2280_ops;
2eeb0016 3328 dev->gadget.max_speed = (dev->quirks & PLX_SUPERSPEED) ?
adc82f77 3329 USB_SPEED_SUPER : USB_SPEED_HIGH;
1da177e4
LT
3330
3331 /* the "gadget" abstracts/virtualizes the controller */
1da177e4
LT
3332 dev->gadget.name = driver_name;
3333
3334 /* now all the pci goodies ... */
fae3c158
RR
3335 if (pci_enable_device(pdev) < 0) {
3336 retval = -ENODEV;
1da177e4
LT
3337 goto done;
3338 }
3339 dev->enabled = 1;
3340
3341 /* BAR 0 holds all the registers
3342 * BAR 1 is 8051 memory; unused here (note erratum 0103)
3343 * BAR 2 is fifo memory; unused here
3344 */
fae3c158
RR
3345 resource = pci_resource_start(pdev, 0);
3346 len = pci_resource_len(pdev, 0);
3347 if (!request_mem_region(resource, len, driver_name)) {
e56e69cc 3348 ep_dbg(dev, "controller already in use\n");
1da177e4
LT
3349 retval = -EBUSY;
3350 goto done;
3351 }
3352 dev->region = 1;
3353
901b3d75
DB
3354 /* FIXME provide firmware download interface to put
3355 * 8051 code into the chip, e.g. to turn on PCI PM.
3356 */
3357
fae3c158 3358 base = ioremap_nocache(resource, len);
1da177e4 3359 if (base == NULL) {
e56e69cc 3360 ep_dbg(dev, "can't map memory\n");
1da177e4
LT
3361 retval = -EFAULT;
3362 goto done;
3363 }
3364 dev->regs = (struct net2280_regs __iomem *) base;
3365 dev->usb = (struct net2280_usb_regs __iomem *) (base + 0x0080);
3366 dev->pci = (struct net2280_pci_regs __iomem *) (base + 0x0100);
3367 dev->dma = (struct net2280_dma_regs __iomem *) (base + 0x0180);
3368 dev->dep = (struct net2280_dep_regs __iomem *) (base + 0x0200);
3369 dev->epregs = (struct net2280_ep_regs __iomem *) (base + 0x0300);
3370
2eeb0016 3371 if (dev->quirks & PLX_SUPERSPEED) {
adc82f77
RR
3372 u32 fsmvalue;
3373 u32 usbstat;
3374 dev->usb_ext = (struct usb338x_usb_ext_regs __iomem *)
3375 (base + 0x00b4);
3376 dev->fiforegs = (struct usb338x_fifo_regs __iomem *)
3377 (base + 0x0500);
3378 dev->llregs = (struct usb338x_ll_regs __iomem *)
3379 (base + 0x0700);
3380 dev->ll_lfps_regs = (struct usb338x_ll_lfps_regs __iomem *)
3381 (base + 0x0748);
3382 dev->ll_tsn_regs = (struct usb338x_ll_tsn_regs __iomem *)
3383 (base + 0x077c);
3384 dev->ll_chicken_reg = (struct usb338x_ll_chi_regs __iomem *)
3385 (base + 0x079c);
3386 dev->plregs = (struct usb338x_pl_regs __iomem *)
3387 (base + 0x0800);
3388 usbstat = readl(&dev->usb->usbstat);
fae3c158 3389 dev->enhanced_mode = !!(usbstat & BIT(11));
adc82f77
RR
3390 dev->n_ep = (dev->enhanced_mode) ? 9 : 5;
3391 /* put into initial config, link up all endpoints */
3392 fsmvalue = get_idx_reg(dev->regs, SCRATCH) &
3393 (0xf << DEFECT7374_FSM_FIELD);
3394 /* See if firmware needs to set up for workaround: */
5517525e
RR
3395 if (fsmvalue == DEFECT7374_FSM_SS_CONTROL_READ) {
3396 dev->bug7734_patched = 1;
adc82f77 3397 writel(0, &dev->usb->usbctl);
5517525e
RR
3398 } else
3399 dev->bug7734_patched = 0;
3400 } else {
adc82f77
RR
3401 dev->enhanced_mode = 0;
3402 dev->n_ep = 7;
3403 /* put into initial config, link up all endpoints */
3404 writel(0, &dev->usb->usbctl);
3405 }
3406
fae3c158
RR
3407 usb_reset(dev);
3408 usb_reinit(dev);
1da177e4
LT
3409
3410 /* irq setup after old hardware is cleaned up */
3411 if (!pdev->irq) {
e56e69cc 3412 ep_err(dev, "No IRQ. Check PCI setup!\n");
1da177e4
LT
3413 retval = -ENODEV;
3414 goto done;
3415 }
c6387a48 3416
9c864c23 3417 if (dev->quirks & PLX_SUPERSPEED)
adc82f77 3418 if (pci_enable_msi(pdev))
e56e69cc 3419 ep_err(dev, "Failed to enable MSI mode\n");
adc82f77 3420
fae3c158
RR
3421 if (request_irq(pdev->irq, net2280_irq, IRQF_SHARED,
3422 driver_name, dev)) {
e56e69cc 3423 ep_err(dev, "request interrupt %d failed\n", pdev->irq);
1da177e4
LT
3424 retval = -EBUSY;
3425 goto done;
3426 }
3427 dev->got_irq = 1;
3428
3429 /* DMA setup */
3430 /* NOTE: we know only the 32 LSBs of dma addresses may be nonzero */
fae3c158
RR
3431 dev->requests = pci_pool_create("requests", pdev,
3432 sizeof(struct net2280_dma),
1da177e4
LT
3433 0 /* no alignment requirements */,
3434 0 /* or page-crossing issues */);
3435 if (!dev->requests) {
e56e69cc 3436 ep_dbg(dev, "can't get request pool\n");
1da177e4
LT
3437 retval = -ENOMEM;
3438 goto done;
3439 }
3440 for (i = 1; i < 5; i++) {
3441 struct net2280_dma *td;
3442
fae3c158
RR
3443 td = pci_pool_alloc(dev->requests, GFP_KERNEL,
3444 &dev->ep[i].td_dma);
1da177e4 3445 if (!td) {
e56e69cc 3446 ep_dbg(dev, "can't get dummy %d\n", i);
1da177e4
LT
3447 retval = -ENOMEM;
3448 goto done;
3449 }
3450 td->dmacount = 0; /* not VALID */
1da177e4 3451 td->dmadesc = td->dmaaddr;
fae3c158 3452 dev->ep[i].dummy = td;
1da177e4
LT
3453 }
3454
3455 /* enable lower-overhead pci memory bursts during DMA */
2eeb0016 3456 if (dev->quirks & PLX_LEGACY)
3e76fdcb
RR
3457 writel(BIT(DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE) |
3458 /*
3459 * 256 write retries may not be enough...
3460 BIT(PCI_RETRY_ABORT_ENABLE) |
3461 */
3462 BIT(DMA_READ_MULTIPLE_ENABLE) |
3463 BIT(DMA_READ_LINE_ENABLE),
3464 &dev->pci->pcimstctl);
1da177e4 3465 /* erratum 0115 shouldn't appear: Linux inits PCI_LATENCY_TIMER */
fae3c158
RR
3466 pci_set_master(pdev);
3467 pci_try_set_mwi(pdev);
1da177e4
LT
3468
3469 /* ... also flushes any posted pci writes */
fae3c158 3470 dev->chiprev = get_idx_reg(dev->regs, REG_CHIPREV) & 0xffff;
1da177e4
LT
3471
3472 /* done */
e56e69cc
RR
3473 ep_info(dev, "%s\n", driver_desc);
3474 ep_info(dev, "irq %d, pci mem %p, chip rev %04x\n",
c6387a48 3475 pdev->irq, base, dev->chiprev);
d588ff58 3476 ep_info(dev, "version: " DRIVER_VERSION "; %s\n",
adc82f77 3477 dev->enhanced_mode ? "enhanced mode" : "legacy mode");
fae3c158
RR
3478 retval = device_create_file(&pdev->dev, &dev_attr_registers);
3479 if (retval)
3480 goto done;
1da177e4 3481
2901df68
FB
3482 retval = usb_add_gadget_udc_release(&pdev->dev, &dev->gadget,
3483 gadget_release);
0f91349b
SAS
3484 if (retval)
3485 goto done;
1da177e4
LT
3486 return 0;
3487
3488done:
3489 if (dev)
fae3c158 3490 net2280_remove(pdev);
1da177e4
LT
3491 return retval;
3492}
3493
2d61bde7
AS
3494/* make sure the board is quiescent; otherwise it will continue
3495 * generating IRQs across the upcoming reboot.
3496 */
3497
fae3c158 3498static void net2280_shutdown(struct pci_dev *pdev)
2d61bde7 3499{
fae3c158 3500 struct net2280 *dev = pci_get_drvdata(pdev);
2d61bde7
AS
3501
3502 /* disable IRQs */
fae3c158
RR
3503 writel(0, &dev->regs->pciirqenb0);
3504 writel(0, &dev->regs->pciirqenb1);
2d61bde7
AS
3505
3506 /* disable the pullup so the host will think we're gone */
fae3c158 3507 writel(0, &dev->usb->usbctl);
2f076077 3508
2d61bde7
AS
3509}
3510
1da177e4
LT
3511
3512/*-------------------------------------------------------------------------*/
3513
fae3c158 3514static const struct pci_device_id pci_ids[] = { {
901b3d75
DB
3515 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
3516 .class_mask = ~0,
c2db8a8a 3517 .vendor = PCI_VENDOR_ID_PLX_LEGACY,
1da177e4
LT
3518 .device = 0x2280,
3519 .subvendor = PCI_ANY_ID,
3520 .subdevice = PCI_ANY_ID,
2eeb0016 3521 .driver_data = PLX_LEGACY | PLX_2280,
ae8e530a 3522 }, {
901b3d75
DB
3523 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
3524 .class_mask = ~0,
c2db8a8a 3525 .vendor = PCI_VENDOR_ID_PLX_LEGACY,
950ee4c8
GL
3526 .device = 0x2282,
3527 .subvendor = PCI_ANY_ID,
3528 .subdevice = PCI_ANY_ID,
2eeb0016 3529 .driver_data = PLX_LEGACY,
ae8e530a 3530 },
adc82f77 3531 {
ae8e530a
RR
3532 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
3533 .class_mask = ~0,
3534 .vendor = PCI_VENDOR_ID_PLX,
3535 .device = 0x3380,
3536 .subvendor = PCI_ANY_ID,
3537 .subdevice = PCI_ANY_ID,
2eeb0016 3538 .driver_data = PLX_SUPERSPEED,
adc82f77
RR
3539 },
3540 {
ae8e530a
RR
3541 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
3542 .class_mask = ~0,
3543 .vendor = PCI_VENDOR_ID_PLX,
3544 .device = 0x3382,
3545 .subvendor = PCI_ANY_ID,
3546 .subdevice = PCI_ANY_ID,
2eeb0016 3547 .driver_data = PLX_SUPERSPEED,
adc82f77
RR
3548 },
3549{ /* end: all zeroes */ }
1da177e4 3550};
fae3c158 3551MODULE_DEVICE_TABLE(pci, pci_ids);
1da177e4
LT
3552
3553/* pci driver glue; this is a "new style" PCI driver module */
3554static struct pci_driver net2280_pci_driver = {
3555 .name = (char *) driver_name,
3556 .id_table = pci_ids,
3557
3558 .probe = net2280_probe,
3559 .remove = net2280_remove,
2d61bde7 3560 .shutdown = net2280_shutdown,
1da177e4
LT
3561
3562 /* FIXME add power management support */
3563};
3564
9a028e46
RR
3565module_pci_driver(net2280_pci_driver);
3566
fae3c158
RR
3567MODULE_DESCRIPTION(DRIVER_DESC);
3568MODULE_AUTHOR("David Brownell");
3569MODULE_LICENSE("GPL");