]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/usb/gadget/net2272.c
Merge tag 'pinctrl-for-3.4' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
[mirror_ubuntu-bionic-kernel.git] / drivers / usb / gadget / net2272.c
1 /*
2 * Driver for PLX NET2272 USB device controller
3 *
4 * Copyright (C) 2005-2006 PLX Technology, Inc.
5 * Copyright (C) 2006-2011 Analog Devices, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/errno.h>
25 #include <linux/gpio.h>
26 #include <linux/init.h>
27 #include <linux/interrupt.h>
28 #include <linux/io.h>
29 #include <linux/ioport.h>
30 #include <linux/kernel.h>
31 #include <linux/list.h>
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/pci.h>
35 #include <linux/platform_device.h>
36 #include <linux/prefetch.h>
37 #include <linux/sched.h>
38 #include <linux/slab.h>
39 #include <linux/timer.h>
40 #include <linux/usb.h>
41 #include <linux/usb/ch9.h>
42 #include <linux/usb/gadget.h>
43
44 #include <asm/byteorder.h>
45 #include <asm/system.h>
46 #include <asm/unaligned.h>
47
48 #include "net2272.h"
49
50 #define DRIVER_DESC "PLX NET2272 USB Peripheral Controller"
51
52 static const char driver_name[] = "net2272";
53 static const char driver_vers[] = "2006 October 17/mainline";
54 static const char driver_desc[] = DRIVER_DESC;
55
56 static const char ep0name[] = "ep0";
57 static const char * const ep_name[] = {
58 ep0name,
59 "ep-a", "ep-b", "ep-c",
60 };
61
62 #define DMA_ADDR_INVALID (~(dma_addr_t)0)
63 #ifdef CONFIG_USB_GADGET_NET2272_DMA
64 /*
65 * use_dma: the NET2272 can use an external DMA controller.
66 * Note that since there is no generic DMA api, some functions,
67 * notably request_dma, start_dma, and cancel_dma will need to be
68 * modified for your platform's particular dma controller.
69 *
70 * If use_dma is disabled, pio will be used instead.
71 */
72 static bool use_dma = 0;
73 module_param(use_dma, bool, 0644);
74
75 /*
76 * dma_ep: selects the endpoint for use with dma (1=ep-a, 2=ep-b)
77 * The NET2272 can only use dma for a single endpoint at a time.
78 * At some point this could be modified to allow either endpoint
79 * to take control of dma as it becomes available.
80 *
81 * Note that DMA should not be used on OUT endpoints unless it can
82 * be guaranteed that no short packets will arrive on an IN endpoint
83 * while the DMA operation is pending. Otherwise the OUT DMA will
84 * terminate prematurely (See NET2272 Errata 630-0213-0101)
85 */
86 static ushort dma_ep = 1;
87 module_param(dma_ep, ushort, 0644);
88
89 /*
90 * dma_mode: net2272 dma mode setting (see LOCCTL1 definiton):
91 * mode 0 == Slow DREQ mode
92 * mode 1 == Fast DREQ mode
93 * mode 2 == Burst mode
94 */
95 static ushort dma_mode = 2;
96 module_param(dma_mode, ushort, 0644);
97 #else
98 #define use_dma 0
99 #define dma_ep 1
100 #define dma_mode 2
101 #endif
102
103 /*
104 * fifo_mode: net2272 buffer configuration:
105 * mode 0 == ep-{a,b,c} 512db each
106 * mode 1 == ep-a 1k, ep-{b,c} 512db
107 * mode 2 == ep-a 1k, ep-b 1k, ep-c 512db
108 * mode 3 == ep-a 1k, ep-b disabled, ep-c 512db
109 */
110 static ushort fifo_mode = 0;
111 module_param(fifo_mode, ushort, 0644);
112
113 /*
114 * enable_suspend: When enabled, the driver will respond to
115 * USB suspend requests by powering down the NET2272. Otherwise,
116 * USB suspend requests will be ignored. This is acceptible for
117 * self-powered devices. For bus powered devices set this to 1.
118 */
119 static ushort enable_suspend = 0;
120 module_param(enable_suspend, ushort, 0644);
121
122 static void assert_out_naking(struct net2272_ep *ep, const char *where)
123 {
124 u8 tmp;
125
126 #ifndef DEBUG
127 return;
128 #endif
129
130 tmp = net2272_ep_read(ep, EP_STAT0);
131 if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) {
132 dev_dbg(ep->dev->dev, "%s %s %02x !NAK\n",
133 ep->ep.name, where, tmp);
134 net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
135 }
136 }
137 #define ASSERT_OUT_NAKING(ep) assert_out_naking(ep, __func__)
138
139 static void stop_out_naking(struct net2272_ep *ep)
140 {
141 u8 tmp = net2272_ep_read(ep, EP_STAT0);
142
143 if ((tmp & (1 << NAK_OUT_PACKETS)) != 0)
144 net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
145 }
146
147 #define PIPEDIR(bAddress) (usb_pipein(bAddress) ? "in" : "out")
148
149 static char *type_string(u8 bmAttributes)
150 {
151 switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
152 case USB_ENDPOINT_XFER_BULK: return "bulk";
153 case USB_ENDPOINT_XFER_ISOC: return "iso";
154 case USB_ENDPOINT_XFER_INT: return "intr";
155 default: return "control";
156 }
157 }
158
159 static char *buf_state_string(unsigned state)
160 {
161 switch (state) {
162 case BUFF_FREE: return "free";
163 case BUFF_VALID: return "valid";
164 case BUFF_LCL: return "local";
165 case BUFF_USB: return "usb";
166 default: return "unknown";
167 }
168 }
169
170 static char *dma_mode_string(void)
171 {
172 if (!use_dma)
173 return "PIO";
174 switch (dma_mode) {
175 case 0: return "SLOW DREQ";
176 case 1: return "FAST DREQ";
177 case 2: return "BURST";
178 default: return "invalid";
179 }
180 }
181
182 static void net2272_dequeue_all(struct net2272_ep *);
183 static int net2272_kick_dma(struct net2272_ep *, struct net2272_request *);
184 static int net2272_fifo_status(struct usb_ep *);
185
186 static struct usb_ep_ops net2272_ep_ops;
187
188 /*---------------------------------------------------------------------------*/
189
190 static int
191 net2272_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
192 {
193 struct net2272 *dev;
194 struct net2272_ep *ep;
195 u32 max;
196 u8 tmp;
197 unsigned long flags;
198
199 ep = container_of(_ep, struct net2272_ep, ep);
200 if (!_ep || !desc || ep->desc || _ep->name == ep0name
201 || desc->bDescriptorType != USB_DT_ENDPOINT)
202 return -EINVAL;
203 dev = ep->dev;
204 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
205 return -ESHUTDOWN;
206
207 max = usb_endpoint_maxp(desc) & 0x1fff;
208
209 spin_lock_irqsave(&dev->lock, flags);
210 _ep->maxpacket = max & 0x7fff;
211 ep->desc = desc;
212
213 /* net2272_ep_reset() has already been called */
214 ep->stopped = 0;
215 ep->wedged = 0;
216
217 /* set speed-dependent max packet */
218 net2272_ep_write(ep, EP_MAXPKT0, max & 0xff);
219 net2272_ep_write(ep, EP_MAXPKT1, (max & 0xff00) >> 8);
220
221 /* set type, direction, address; reset fifo counters */
222 net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
223 tmp = usb_endpoint_type(desc);
224 if (usb_endpoint_xfer_bulk(desc)) {
225 /* catch some particularly blatant driver bugs */
226 if ((dev->gadget.speed == USB_SPEED_HIGH && max != 512) ||
227 (dev->gadget.speed == USB_SPEED_FULL && max > 64)) {
228 spin_unlock_irqrestore(&dev->lock, flags);
229 return -ERANGE;
230 }
231 }
232 ep->is_iso = usb_endpoint_xfer_isoc(desc) ? 1 : 0;
233 tmp <<= ENDPOINT_TYPE;
234 tmp |= ((desc->bEndpointAddress & 0x0f) << ENDPOINT_NUMBER);
235 tmp |= usb_endpoint_dir_in(desc) << ENDPOINT_DIRECTION;
236 tmp |= (1 << ENDPOINT_ENABLE);
237
238 /* for OUT transfers, block the rx fifo until a read is posted */
239 ep->is_in = usb_endpoint_dir_in(desc);
240 if (!ep->is_in)
241 net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
242
243 net2272_ep_write(ep, EP_CFG, tmp);
244
245 /* enable irqs */
246 tmp = (1 << ep->num) | net2272_read(dev, IRQENB0);
247 net2272_write(dev, IRQENB0, tmp);
248
249 tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
250 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
251 | net2272_ep_read(ep, EP_IRQENB);
252 net2272_ep_write(ep, EP_IRQENB, tmp);
253
254 tmp = desc->bEndpointAddress;
255 dev_dbg(dev->dev, "enabled %s (ep%d%s-%s) max %04x cfg %02x\n",
256 _ep->name, tmp & 0x0f, PIPEDIR(tmp),
257 type_string(desc->bmAttributes), max,
258 net2272_ep_read(ep, EP_CFG));
259
260 spin_unlock_irqrestore(&dev->lock, flags);
261 return 0;
262 }
263
264 static void net2272_ep_reset(struct net2272_ep *ep)
265 {
266 u8 tmp;
267
268 ep->desc = NULL;
269 INIT_LIST_HEAD(&ep->queue);
270
271 ep->ep.maxpacket = ~0;
272 ep->ep.ops = &net2272_ep_ops;
273
274 /* disable irqs, endpoint */
275 net2272_ep_write(ep, EP_IRQENB, 0);
276
277 /* init to our chosen defaults, notably so that we NAK OUT
278 * packets until the driver queues a read.
279 */
280 tmp = (1 << NAK_OUT_PACKETS_MODE) | (1 << ALT_NAK_OUT_PACKETS);
281 net2272_ep_write(ep, EP_RSPSET, tmp);
282
283 tmp = (1 << INTERRUPT_MODE) | (1 << HIDE_STATUS_PHASE);
284 if (ep->num != 0)
285 tmp |= (1 << ENDPOINT_TOGGLE) | (1 << ENDPOINT_HALT);
286
287 net2272_ep_write(ep, EP_RSPCLR, tmp);
288
289 /* scrub most status bits, and flush any fifo state */
290 net2272_ep_write(ep, EP_STAT0,
291 (1 << DATA_IN_TOKEN_INTERRUPT)
292 | (1 << DATA_OUT_TOKEN_INTERRUPT)
293 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
294 | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
295 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
296
297 net2272_ep_write(ep, EP_STAT1,
298 (1 << TIMEOUT)
299 | (1 << USB_OUT_ACK_SENT)
300 | (1 << USB_OUT_NAK_SENT)
301 | (1 << USB_IN_ACK_RCVD)
302 | (1 << USB_IN_NAK_SENT)
303 | (1 << USB_STALL_SENT)
304 | (1 << LOCAL_OUT_ZLP)
305 | (1 << BUFFER_FLUSH));
306
307 /* fifo size is handled seperately */
308 }
309
310 static int net2272_disable(struct usb_ep *_ep)
311 {
312 struct net2272_ep *ep;
313 unsigned long flags;
314
315 ep = container_of(_ep, struct net2272_ep, ep);
316 if (!_ep || !ep->desc || _ep->name == ep0name)
317 return -EINVAL;
318
319 spin_lock_irqsave(&ep->dev->lock, flags);
320 net2272_dequeue_all(ep);
321 net2272_ep_reset(ep);
322
323 dev_vdbg(ep->dev->dev, "disabled %s\n", _ep->name);
324
325 spin_unlock_irqrestore(&ep->dev->lock, flags);
326 return 0;
327 }
328
329 /*---------------------------------------------------------------------------*/
330
331 static struct usb_request *
332 net2272_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
333 {
334 struct net2272_ep *ep;
335 struct net2272_request *req;
336
337 if (!_ep)
338 return NULL;
339 ep = container_of(_ep, struct net2272_ep, ep);
340
341 req = kzalloc(sizeof(*req), gfp_flags);
342 if (!req)
343 return NULL;
344
345 req->req.dma = DMA_ADDR_INVALID;
346 INIT_LIST_HEAD(&req->queue);
347
348 return &req->req;
349 }
350
351 static void
352 net2272_free_request(struct usb_ep *_ep, struct usb_request *_req)
353 {
354 struct net2272_ep *ep;
355 struct net2272_request *req;
356
357 ep = container_of(_ep, struct net2272_ep, ep);
358 if (!_ep || !_req)
359 return;
360
361 req = container_of(_req, struct net2272_request, req);
362 WARN_ON(!list_empty(&req->queue));
363 kfree(req);
364 }
365
366 static void
367 net2272_done(struct net2272_ep *ep, struct net2272_request *req, int status)
368 {
369 struct net2272 *dev;
370 unsigned stopped = ep->stopped;
371
372 if (ep->num == 0) {
373 if (ep->dev->protocol_stall) {
374 ep->stopped = 1;
375 set_halt(ep);
376 }
377 allow_status(ep);
378 }
379
380 list_del_init(&req->queue);
381
382 if (req->req.status == -EINPROGRESS)
383 req->req.status = status;
384 else
385 status = req->req.status;
386
387 dev = ep->dev;
388 if (use_dma && ep->dma)
389 usb_gadget_unmap_request(&dev->gadget, &req->req,
390 ep->is_in);
391
392 if (status && status != -ESHUTDOWN)
393 dev_vdbg(dev->dev, "complete %s req %p stat %d len %u/%u buf %p\n",
394 ep->ep.name, &req->req, status,
395 req->req.actual, req->req.length, req->req.buf);
396
397 /* don't modify queue heads during completion callback */
398 ep->stopped = 1;
399 spin_unlock(&dev->lock);
400 req->req.complete(&ep->ep, &req->req);
401 spin_lock(&dev->lock);
402 ep->stopped = stopped;
403 }
404
405 static int
406 net2272_write_packet(struct net2272_ep *ep, u8 *buf,
407 struct net2272_request *req, unsigned max)
408 {
409 u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
410 u16 *bufp;
411 unsigned length, count;
412 u8 tmp;
413
414 length = min(req->req.length - req->req.actual, max);
415 req->req.actual += length;
416
417 dev_vdbg(ep->dev->dev, "write packet %s req %p max %u len %u avail %u\n",
418 ep->ep.name, req, max, length,
419 (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
420
421 count = length;
422 bufp = (u16 *)buf;
423
424 while (likely(count >= 2)) {
425 /* no byte-swap required; chip endian set during init */
426 writew(*bufp++, ep_data);
427 count -= 2;
428 }
429 buf = (u8 *)bufp;
430
431 /* write final byte by placing the NET2272 into 8-bit mode */
432 if (unlikely(count)) {
433 tmp = net2272_read(ep->dev, LOCCTL);
434 net2272_write(ep->dev, LOCCTL, tmp & ~(1 << DATA_WIDTH));
435 writeb(*buf, ep_data);
436 net2272_write(ep->dev, LOCCTL, tmp);
437 }
438 return length;
439 }
440
441 /* returns: 0: still running, 1: completed, negative: errno */
442 static int
443 net2272_write_fifo(struct net2272_ep *ep, struct net2272_request *req)
444 {
445 u8 *buf;
446 unsigned count, max;
447 int status;
448
449 dev_vdbg(ep->dev->dev, "write_fifo %s actual %d len %d\n",
450 ep->ep.name, req->req.actual, req->req.length);
451
452 /*
453 * Keep loading the endpoint until the final packet is loaded,
454 * or the endpoint buffer is full.
455 */
456 top:
457 /*
458 * Clear interrupt status
459 * - Packet Transmitted interrupt will become set again when the
460 * host successfully takes another packet
461 */
462 net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
463 while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_FULL))) {
464 buf = req->req.buf + req->req.actual;
465 prefetch(buf);
466
467 /* force pagesel */
468 net2272_ep_read(ep, EP_STAT0);
469
470 max = (net2272_ep_read(ep, EP_AVAIL1) << 8) |
471 (net2272_ep_read(ep, EP_AVAIL0));
472
473 if (max < ep->ep.maxpacket)
474 max = (net2272_ep_read(ep, EP_AVAIL1) << 8)
475 | (net2272_ep_read(ep, EP_AVAIL0));
476
477 count = net2272_write_packet(ep, buf, req, max);
478 /* see if we are done */
479 if (req->req.length == req->req.actual) {
480 /* validate short or zlp packet */
481 if (count < ep->ep.maxpacket)
482 set_fifo_bytecount(ep, 0);
483 net2272_done(ep, req, 0);
484
485 if (!list_empty(&ep->queue)) {
486 req = list_entry(ep->queue.next,
487 struct net2272_request,
488 queue);
489 status = net2272_kick_dma(ep, req);
490
491 if (status < 0)
492 if ((net2272_ep_read(ep, EP_STAT0)
493 & (1 << BUFFER_EMPTY)))
494 goto top;
495 }
496 return 1;
497 }
498 net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
499 }
500 return 0;
501 }
502
503 static void
504 net2272_out_flush(struct net2272_ep *ep)
505 {
506 ASSERT_OUT_NAKING(ep);
507
508 net2272_ep_write(ep, EP_STAT0, (1 << DATA_OUT_TOKEN_INTERRUPT)
509 | (1 << DATA_PACKET_RECEIVED_INTERRUPT));
510 net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
511 }
512
513 static int
514 net2272_read_packet(struct net2272_ep *ep, u8 *buf,
515 struct net2272_request *req, unsigned avail)
516 {
517 u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
518 unsigned is_short;
519 u16 *bufp;
520
521 req->req.actual += avail;
522
523 dev_vdbg(ep->dev->dev, "read packet %s req %p len %u avail %u\n",
524 ep->ep.name, req, avail,
525 (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
526
527 is_short = (avail < ep->ep.maxpacket);
528
529 if (unlikely(avail == 0)) {
530 /* remove any zlp from the buffer */
531 (void)readw(ep_data);
532 return is_short;
533 }
534
535 /* Ensure we get the final byte */
536 if (unlikely(avail % 2))
537 avail++;
538 bufp = (u16 *)buf;
539
540 do {
541 *bufp++ = readw(ep_data);
542 avail -= 2;
543 } while (avail);
544
545 /*
546 * To avoid false endpoint available race condition must read
547 * ep stat0 twice in the case of a short transfer
548 */
549 if (net2272_ep_read(ep, EP_STAT0) & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT))
550 net2272_ep_read(ep, EP_STAT0);
551
552 return is_short;
553 }
554
555 static int
556 net2272_read_fifo(struct net2272_ep *ep, struct net2272_request *req)
557 {
558 u8 *buf;
559 unsigned is_short;
560 int count;
561 int tmp;
562 int cleanup = 0;
563 int status = -1;
564
565 dev_vdbg(ep->dev->dev, "read_fifo %s actual %d len %d\n",
566 ep->ep.name, req->req.actual, req->req.length);
567
568 top:
569 do {
570 buf = req->req.buf + req->req.actual;
571 prefetchw(buf);
572
573 count = (net2272_ep_read(ep, EP_AVAIL1) << 8)
574 | net2272_ep_read(ep, EP_AVAIL0);
575
576 net2272_ep_write(ep, EP_STAT0,
577 (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT) |
578 (1 << DATA_PACKET_RECEIVED_INTERRUPT));
579
580 tmp = req->req.length - req->req.actual;
581
582 if (count > tmp) {
583 if ((tmp % ep->ep.maxpacket) != 0) {
584 dev_err(ep->dev->dev,
585 "%s out fifo %d bytes, expected %d\n",
586 ep->ep.name, count, tmp);
587 cleanup = 1;
588 }
589 count = (tmp > 0) ? tmp : 0;
590 }
591
592 is_short = net2272_read_packet(ep, buf, req, count);
593
594 /* completion */
595 if (unlikely(cleanup || is_short ||
596 ((req->req.actual == req->req.length)
597 && !req->req.zero))) {
598
599 if (cleanup) {
600 net2272_out_flush(ep);
601 net2272_done(ep, req, -EOVERFLOW);
602 } else
603 net2272_done(ep, req, 0);
604
605 /* re-initialize endpoint transfer registers
606 * otherwise they may result in erroneous pre-validation
607 * for subsequent control reads
608 */
609 if (unlikely(ep->num == 0)) {
610 net2272_ep_write(ep, EP_TRANSFER2, 0);
611 net2272_ep_write(ep, EP_TRANSFER1, 0);
612 net2272_ep_write(ep, EP_TRANSFER0, 0);
613 }
614
615 if (!list_empty(&ep->queue)) {
616 req = list_entry(ep->queue.next,
617 struct net2272_request, queue);
618 status = net2272_kick_dma(ep, req);
619 if ((status < 0) &&
620 !(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)))
621 goto top;
622 }
623 return 1;
624 }
625 } while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)));
626
627 return 0;
628 }
629
630 static void
631 net2272_pio_advance(struct net2272_ep *ep)
632 {
633 struct net2272_request *req;
634
635 if (unlikely(list_empty(&ep->queue)))
636 return;
637
638 req = list_entry(ep->queue.next, struct net2272_request, queue);
639 (ep->is_in ? net2272_write_fifo : net2272_read_fifo)(ep, req);
640 }
641
642 /* returns 0 on success, else negative errno */
643 static int
644 net2272_request_dma(struct net2272 *dev, unsigned ep, u32 buf,
645 unsigned len, unsigned dir)
646 {
647 dev_vdbg(dev->dev, "request_dma ep %d buf %08x len %d dir %d\n",
648 ep, buf, len, dir);
649
650 /* The NET2272 only supports a single dma channel */
651 if (dev->dma_busy)
652 return -EBUSY;
653 /*
654 * EP_TRANSFER (used to determine the number of bytes received
655 * in an OUT transfer) is 24 bits wide; don't ask for more than that.
656 */
657 if ((dir == 1) && (len > 0x1000000))
658 return -EINVAL;
659
660 dev->dma_busy = 1;
661
662 /* initialize platform's dma */
663 #ifdef CONFIG_PCI
664 /* NET2272 addr, buffer addr, length, etc. */
665 switch (dev->dev_id) {
666 case PCI_DEVICE_ID_RDK1:
667 /* Setup PLX 9054 DMA mode */
668 writel((1 << LOCAL_BUS_WIDTH) |
669 (1 << TA_READY_INPUT_ENABLE) |
670 (0 << LOCAL_BURST_ENABLE) |
671 (1 << DONE_INTERRUPT_ENABLE) |
672 (1 << LOCAL_ADDRESSING_MODE) |
673 (1 << DEMAND_MODE) |
674 (1 << DMA_EOT_ENABLE) |
675 (1 << FAST_SLOW_TERMINATE_MODE_SELECT) |
676 (1 << DMA_CHANNEL_INTERRUPT_SELECT),
677 dev->rdk1.plx9054_base_addr + DMAMODE0);
678
679 writel(0x100000, dev->rdk1.plx9054_base_addr + DMALADR0);
680 writel(buf, dev->rdk1.plx9054_base_addr + DMAPADR0);
681 writel(len, dev->rdk1.plx9054_base_addr + DMASIZ0);
682 writel((dir << DIRECTION_OF_TRANSFER) |
683 (1 << INTERRUPT_AFTER_TERMINAL_COUNT),
684 dev->rdk1.plx9054_base_addr + DMADPR0);
685 writel((1 << LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE) |
686 readl(dev->rdk1.plx9054_base_addr + INTCSR),
687 dev->rdk1.plx9054_base_addr + INTCSR);
688
689 break;
690 }
691 #endif
692
693 net2272_write(dev, DMAREQ,
694 (0 << DMA_BUFFER_VALID) |
695 (1 << DMA_REQUEST_ENABLE) |
696 (1 << DMA_CONTROL_DACK) |
697 (dev->dma_eot_polarity << EOT_POLARITY) |
698 (dev->dma_dack_polarity << DACK_POLARITY) |
699 (dev->dma_dreq_polarity << DREQ_POLARITY) |
700 ((ep >> 1) << DMA_ENDPOINT_SELECT));
701
702 (void) net2272_read(dev, SCRATCH);
703
704 return 0;
705 }
706
707 static void
708 net2272_start_dma(struct net2272 *dev)
709 {
710 /* start platform's dma controller */
711 #ifdef CONFIG_PCI
712 switch (dev->dev_id) {
713 case PCI_DEVICE_ID_RDK1:
714 writeb((1 << CHANNEL_ENABLE) | (1 << CHANNEL_START),
715 dev->rdk1.plx9054_base_addr + DMACSR0);
716 break;
717 }
718 #endif
719 }
720
721 /* returns 0 on success, else negative errno */
722 static int
723 net2272_kick_dma(struct net2272_ep *ep, struct net2272_request *req)
724 {
725 unsigned size;
726 u8 tmp;
727
728 if (!use_dma || (ep->num < 1) || (ep->num > 2) || !ep->dma)
729 return -EINVAL;
730
731 /* don't use dma for odd-length transfers
732 * otherwise, we'd need to deal with the last byte with pio
733 */
734 if (req->req.length & 1)
735 return -EINVAL;
736
737 dev_vdbg(ep->dev->dev, "kick_dma %s req %p dma %08llx\n",
738 ep->ep.name, req, (unsigned long long) req->req.dma);
739
740 net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
741
742 /* The NET2272 can only use DMA on one endpoint at a time */
743 if (ep->dev->dma_busy)
744 return -EBUSY;
745
746 /* Make sure we only DMA an even number of bytes (we'll use
747 * pio to complete the transfer)
748 */
749 size = req->req.length;
750 size &= ~1;
751
752 /* device-to-host transfer */
753 if (ep->is_in) {
754 /* initialize platform's dma controller */
755 if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 0))
756 /* unable to obtain DMA channel; return error and use pio mode */
757 return -EBUSY;
758 req->req.actual += size;
759
760 /* host-to-device transfer */
761 } else {
762 tmp = net2272_ep_read(ep, EP_STAT0);
763
764 /* initialize platform's dma controller */
765 if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 1))
766 /* unable to obtain DMA channel; return error and use pio mode */
767 return -EBUSY;
768
769 if (!(tmp & (1 << BUFFER_EMPTY)))
770 ep->not_empty = 1;
771 else
772 ep->not_empty = 0;
773
774
775 /* allow the endpoint's buffer to fill */
776 net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
777
778 /* this transfer completed and data's already in the fifo
779 * return error so pio gets used.
780 */
781 if (tmp & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
782
783 /* deassert dreq */
784 net2272_write(ep->dev, DMAREQ,
785 (0 << DMA_BUFFER_VALID) |
786 (0 << DMA_REQUEST_ENABLE) |
787 (1 << DMA_CONTROL_DACK) |
788 (ep->dev->dma_eot_polarity << EOT_POLARITY) |
789 (ep->dev->dma_dack_polarity << DACK_POLARITY) |
790 (ep->dev->dma_dreq_polarity << DREQ_POLARITY) |
791 ((ep->num >> 1) << DMA_ENDPOINT_SELECT));
792
793 return -EBUSY;
794 }
795 }
796
797 /* Don't use per-packet interrupts: use dma interrupts only */
798 net2272_ep_write(ep, EP_IRQENB, 0);
799
800 net2272_start_dma(ep->dev);
801
802 return 0;
803 }
804
805 static void net2272_cancel_dma(struct net2272 *dev)
806 {
807 #ifdef CONFIG_PCI
808 switch (dev->dev_id) {
809 case PCI_DEVICE_ID_RDK1:
810 writeb(0, dev->rdk1.plx9054_base_addr + DMACSR0);
811 writeb(1 << CHANNEL_ABORT, dev->rdk1.plx9054_base_addr + DMACSR0);
812 while (!(readb(dev->rdk1.plx9054_base_addr + DMACSR0) &
813 (1 << CHANNEL_DONE)))
814 continue; /* wait for dma to stabalize */
815
816 /* dma abort generates an interrupt */
817 writeb(1 << CHANNEL_CLEAR_INTERRUPT,
818 dev->rdk1.plx9054_base_addr + DMACSR0);
819 break;
820 }
821 #endif
822
823 dev->dma_busy = 0;
824 }
825
826 /*---------------------------------------------------------------------------*/
827
828 static int
829 net2272_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
830 {
831 struct net2272_request *req;
832 struct net2272_ep *ep;
833 struct net2272 *dev;
834 unsigned long flags;
835 int status = -1;
836 u8 s;
837
838 req = container_of(_req, struct net2272_request, req);
839 if (!_req || !_req->complete || !_req->buf
840 || !list_empty(&req->queue))
841 return -EINVAL;
842 ep = container_of(_ep, struct net2272_ep, ep);
843 if (!_ep || (!ep->desc && ep->num != 0))
844 return -EINVAL;
845 dev = ep->dev;
846 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
847 return -ESHUTDOWN;
848
849 /* set up dma mapping in case the caller didn't */
850 if (use_dma && ep->dma) {
851 status = usb_gadget_map_request(&dev->gadget, _req,
852 ep->is_in);
853 if (status)
854 return status;
855 }
856
857 dev_vdbg(dev->dev, "%s queue req %p, len %d buf %p dma %08llx %s\n",
858 _ep->name, _req, _req->length, _req->buf,
859 (unsigned long long) _req->dma, _req->zero ? "zero" : "!zero");
860
861 spin_lock_irqsave(&dev->lock, flags);
862
863 _req->status = -EINPROGRESS;
864 _req->actual = 0;
865
866 /* kickstart this i/o queue? */
867 if (list_empty(&ep->queue) && !ep->stopped) {
868 /* maybe there's no control data, just status ack */
869 if (ep->num == 0 && _req->length == 0) {
870 net2272_done(ep, req, 0);
871 dev_vdbg(dev->dev, "%s status ack\n", ep->ep.name);
872 goto done;
873 }
874
875 /* Return zlp, don't let it block subsequent packets */
876 s = net2272_ep_read(ep, EP_STAT0);
877 if (s & (1 << BUFFER_EMPTY)) {
878 /* Buffer is empty check for a blocking zlp, handle it */
879 if ((s & (1 << NAK_OUT_PACKETS)) &&
880 net2272_ep_read(ep, EP_STAT1) & (1 << LOCAL_OUT_ZLP)) {
881 dev_dbg(dev->dev, "WARNING: returning ZLP short packet termination!\n");
882 /*
883 * Request is going to terminate with a short packet ...
884 * hope the client is ready for it!
885 */
886 status = net2272_read_fifo(ep, req);
887 /* clear short packet naking */
888 net2272_ep_write(ep, EP_STAT0, (1 << NAK_OUT_PACKETS));
889 goto done;
890 }
891 }
892
893 /* try dma first */
894 status = net2272_kick_dma(ep, req);
895
896 if (status < 0) {
897 /* dma failed (most likely in use by another endpoint)
898 * fallback to pio
899 */
900 status = 0;
901
902 if (ep->is_in)
903 status = net2272_write_fifo(ep, req);
904 else {
905 s = net2272_ep_read(ep, EP_STAT0);
906 if ((s & (1 << BUFFER_EMPTY)) == 0)
907 status = net2272_read_fifo(ep, req);
908 }
909
910 if (unlikely(status != 0)) {
911 if (status > 0)
912 status = 0;
913 req = NULL;
914 }
915 }
916 }
917 if (likely(req != 0))
918 list_add_tail(&req->queue, &ep->queue);
919
920 if (likely(!list_empty(&ep->queue)))
921 net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
922 done:
923 spin_unlock_irqrestore(&dev->lock, flags);
924
925 return 0;
926 }
927
928 /* dequeue ALL requests */
929 static void
930 net2272_dequeue_all(struct net2272_ep *ep)
931 {
932 struct net2272_request *req;
933
934 /* called with spinlock held */
935 ep->stopped = 1;
936
937 while (!list_empty(&ep->queue)) {
938 req = list_entry(ep->queue.next,
939 struct net2272_request,
940 queue);
941 net2272_done(ep, req, -ESHUTDOWN);
942 }
943 }
944
945 /* dequeue JUST ONE request */
946 static int
947 net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req)
948 {
949 struct net2272_ep *ep;
950 struct net2272_request *req;
951 unsigned long flags;
952 int stopped;
953
954 ep = container_of(_ep, struct net2272_ep, ep);
955 if (!_ep || (!ep->desc && ep->num != 0) || !_req)
956 return -EINVAL;
957
958 spin_lock_irqsave(&ep->dev->lock, flags);
959 stopped = ep->stopped;
960 ep->stopped = 1;
961
962 /* make sure it's still queued on this endpoint */
963 list_for_each_entry(req, &ep->queue, queue) {
964 if (&req->req == _req)
965 break;
966 }
967 if (&req->req != _req) {
968 spin_unlock_irqrestore(&ep->dev->lock, flags);
969 return -EINVAL;
970 }
971
972 /* queue head may be partially complete */
973 if (ep->queue.next == &req->queue) {
974 dev_dbg(ep->dev->dev, "unlink (%s) pio\n", _ep->name);
975 net2272_done(ep, req, -ECONNRESET);
976 }
977 req = NULL;
978 ep->stopped = stopped;
979
980 spin_unlock_irqrestore(&ep->dev->lock, flags);
981 return 0;
982 }
983
984 /*---------------------------------------------------------------------------*/
985
986 static int
987 net2272_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
988 {
989 struct net2272_ep *ep;
990 unsigned long flags;
991 int ret = 0;
992
993 ep = container_of(_ep, struct net2272_ep, ep);
994 if (!_ep || (!ep->desc && ep->num != 0))
995 return -EINVAL;
996 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
997 return -ESHUTDOWN;
998 if (ep->desc /* not ep0 */ && usb_endpoint_xfer_isoc(ep->desc))
999 return -EINVAL;
1000
1001 spin_lock_irqsave(&ep->dev->lock, flags);
1002 if (!list_empty(&ep->queue))
1003 ret = -EAGAIN;
1004 else if (ep->is_in && value && net2272_fifo_status(_ep) != 0)
1005 ret = -EAGAIN;
1006 else {
1007 dev_vdbg(ep->dev->dev, "%s %s %s\n", _ep->name,
1008 value ? "set" : "clear",
1009 wedged ? "wedge" : "halt");
1010 /* set/clear */
1011 if (value) {
1012 if (ep->num == 0)
1013 ep->dev->protocol_stall = 1;
1014 else
1015 set_halt(ep);
1016 if (wedged)
1017 ep->wedged = 1;
1018 } else {
1019 clear_halt(ep);
1020 ep->wedged = 0;
1021 }
1022 }
1023 spin_unlock_irqrestore(&ep->dev->lock, flags);
1024
1025 return ret;
1026 }
1027
1028 static int
1029 net2272_set_halt(struct usb_ep *_ep, int value)
1030 {
1031 return net2272_set_halt_and_wedge(_ep, value, 0);
1032 }
1033
1034 static int
1035 net2272_set_wedge(struct usb_ep *_ep)
1036 {
1037 if (!_ep || _ep->name == ep0name)
1038 return -EINVAL;
1039 return net2272_set_halt_and_wedge(_ep, 1, 1);
1040 }
1041
1042 static int
1043 net2272_fifo_status(struct usb_ep *_ep)
1044 {
1045 struct net2272_ep *ep;
1046 u16 avail;
1047
1048 ep = container_of(_ep, struct net2272_ep, ep);
1049 if (!_ep || (!ep->desc && ep->num != 0))
1050 return -ENODEV;
1051 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1052 return -ESHUTDOWN;
1053
1054 avail = net2272_ep_read(ep, EP_AVAIL1) << 8;
1055 avail |= net2272_ep_read(ep, EP_AVAIL0);
1056 if (avail > ep->fifo_size)
1057 return -EOVERFLOW;
1058 if (ep->is_in)
1059 avail = ep->fifo_size - avail;
1060 return avail;
1061 }
1062
1063 static void
1064 net2272_fifo_flush(struct usb_ep *_ep)
1065 {
1066 struct net2272_ep *ep;
1067
1068 ep = container_of(_ep, struct net2272_ep, ep);
1069 if (!_ep || (!ep->desc && ep->num != 0))
1070 return;
1071 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1072 return;
1073
1074 net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
1075 }
1076
1077 static struct usb_ep_ops net2272_ep_ops = {
1078 .enable = net2272_enable,
1079 .disable = net2272_disable,
1080
1081 .alloc_request = net2272_alloc_request,
1082 .free_request = net2272_free_request,
1083
1084 .queue = net2272_queue,
1085 .dequeue = net2272_dequeue,
1086
1087 .set_halt = net2272_set_halt,
1088 .set_wedge = net2272_set_wedge,
1089 .fifo_status = net2272_fifo_status,
1090 .fifo_flush = net2272_fifo_flush,
1091 };
1092
1093 /*---------------------------------------------------------------------------*/
1094
1095 static int
1096 net2272_get_frame(struct usb_gadget *_gadget)
1097 {
1098 struct net2272 *dev;
1099 unsigned long flags;
1100 u16 ret;
1101
1102 if (!_gadget)
1103 return -ENODEV;
1104 dev = container_of(_gadget, struct net2272, gadget);
1105 spin_lock_irqsave(&dev->lock, flags);
1106
1107 ret = net2272_read(dev, FRAME1) << 8;
1108 ret |= net2272_read(dev, FRAME0);
1109
1110 spin_unlock_irqrestore(&dev->lock, flags);
1111 return ret;
1112 }
1113
1114 static int
1115 net2272_wakeup(struct usb_gadget *_gadget)
1116 {
1117 struct net2272 *dev;
1118 u8 tmp;
1119 unsigned long flags;
1120
1121 if (!_gadget)
1122 return 0;
1123 dev = container_of(_gadget, struct net2272, gadget);
1124
1125 spin_lock_irqsave(&dev->lock, flags);
1126 tmp = net2272_read(dev, USBCTL0);
1127 if (tmp & (1 << IO_WAKEUP_ENABLE))
1128 net2272_write(dev, USBCTL1, (1 << GENERATE_RESUME));
1129
1130 spin_unlock_irqrestore(&dev->lock, flags);
1131
1132 return 0;
1133 }
1134
1135 static int
1136 net2272_set_selfpowered(struct usb_gadget *_gadget, int value)
1137 {
1138 struct net2272 *dev;
1139
1140 if (!_gadget)
1141 return -ENODEV;
1142 dev = container_of(_gadget, struct net2272, gadget);
1143
1144 dev->is_selfpowered = value;
1145
1146 return 0;
1147 }
1148
1149 static int
1150 net2272_pullup(struct usb_gadget *_gadget, int is_on)
1151 {
1152 struct net2272 *dev;
1153 u8 tmp;
1154 unsigned long flags;
1155
1156 if (!_gadget)
1157 return -ENODEV;
1158 dev = container_of(_gadget, struct net2272, gadget);
1159
1160 spin_lock_irqsave(&dev->lock, flags);
1161 tmp = net2272_read(dev, USBCTL0);
1162 dev->softconnect = (is_on != 0);
1163 if (is_on)
1164 tmp |= (1 << USB_DETECT_ENABLE);
1165 else
1166 tmp &= ~(1 << USB_DETECT_ENABLE);
1167 net2272_write(dev, USBCTL0, tmp);
1168 spin_unlock_irqrestore(&dev->lock, flags);
1169
1170 return 0;
1171 }
1172
1173 static int net2272_start(struct usb_gadget *_gadget,
1174 struct usb_gadget_driver *driver);
1175 static int net2272_stop(struct usb_gadget *_gadget,
1176 struct usb_gadget_driver *driver);
1177
1178 static const struct usb_gadget_ops net2272_ops = {
1179 .get_frame = net2272_get_frame,
1180 .wakeup = net2272_wakeup,
1181 .set_selfpowered = net2272_set_selfpowered,
1182 .pullup = net2272_pullup,
1183 .udc_start = net2272_start,
1184 .udc_stop = net2272_stop,
1185 };
1186
1187 /*---------------------------------------------------------------------------*/
1188
1189 static ssize_t
1190 net2272_show_registers(struct device *_dev, struct device_attribute *attr, char *buf)
1191 {
1192 struct net2272 *dev;
1193 char *next;
1194 unsigned size, t;
1195 unsigned long flags;
1196 u8 t1, t2;
1197 int i;
1198 const char *s;
1199
1200 dev = dev_get_drvdata(_dev);
1201 next = buf;
1202 size = PAGE_SIZE;
1203 spin_lock_irqsave(&dev->lock, flags);
1204
1205 if (dev->driver)
1206 s = dev->driver->driver.name;
1207 else
1208 s = "(none)";
1209
1210 /* Main Control Registers */
1211 t = scnprintf(next, size, "%s version %s,"
1212 "chiprev %02x, locctl %02x\n"
1213 "irqenb0 %02x irqenb1 %02x "
1214 "irqstat0 %02x irqstat1 %02x\n",
1215 driver_name, driver_vers, dev->chiprev,
1216 net2272_read(dev, LOCCTL),
1217 net2272_read(dev, IRQENB0),
1218 net2272_read(dev, IRQENB1),
1219 net2272_read(dev, IRQSTAT0),
1220 net2272_read(dev, IRQSTAT1));
1221 size -= t;
1222 next += t;
1223
1224 /* DMA */
1225 t1 = net2272_read(dev, DMAREQ);
1226 t = scnprintf(next, size, "\ndmareq %02x: %s %s%s%s%s\n",
1227 t1, ep_name[(t1 & 0x01) + 1],
1228 t1 & (1 << DMA_CONTROL_DACK) ? "dack " : "",
1229 t1 & (1 << DMA_REQUEST_ENABLE) ? "reqenb " : "",
1230 t1 & (1 << DMA_REQUEST) ? "req " : "",
1231 t1 & (1 << DMA_BUFFER_VALID) ? "valid " : "");
1232 size -= t;
1233 next += t;
1234
1235 /* USB Control Registers */
1236 t1 = net2272_read(dev, USBCTL1);
1237 if (t1 & (1 << VBUS_PIN)) {
1238 if (t1 & (1 << USB_HIGH_SPEED))
1239 s = "high speed";
1240 else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1241 s = "powered";
1242 else
1243 s = "full speed";
1244 } else
1245 s = "not attached";
1246 t = scnprintf(next, size,
1247 "usbctl0 %02x usbctl1 %02x addr 0x%02x (%s)\n",
1248 net2272_read(dev, USBCTL0), t1,
1249 net2272_read(dev, OURADDR), s);
1250 size -= t;
1251 next += t;
1252
1253 /* Endpoint Registers */
1254 for (i = 0; i < 4; ++i) {
1255 struct net2272_ep *ep;
1256
1257 ep = &dev->ep[i];
1258 if (i && !ep->desc)
1259 continue;
1260
1261 t1 = net2272_ep_read(ep, EP_CFG);
1262 t2 = net2272_ep_read(ep, EP_RSPSET);
1263 t = scnprintf(next, size,
1264 "\n%s\tcfg %02x rsp (%02x) %s%s%s%s%s%s%s%s"
1265 "irqenb %02x\n",
1266 ep->ep.name, t1, t2,
1267 (t2 & (1 << ALT_NAK_OUT_PACKETS)) ? "NAK " : "",
1268 (t2 & (1 << HIDE_STATUS_PHASE)) ? "hide " : "",
1269 (t2 & (1 << AUTOVALIDATE)) ? "auto " : "",
1270 (t2 & (1 << INTERRUPT_MODE)) ? "interrupt " : "",
1271 (t2 & (1 << CONTROL_STATUS_PHASE_HANDSHAKE)) ? "status " : "",
1272 (t2 & (1 << NAK_OUT_PACKETS_MODE)) ? "NAKmode " : "",
1273 (t2 & (1 << ENDPOINT_TOGGLE)) ? "DATA1 " : "DATA0 ",
1274 (t2 & (1 << ENDPOINT_HALT)) ? "HALT " : "",
1275 net2272_ep_read(ep, EP_IRQENB));
1276 size -= t;
1277 next += t;
1278
1279 t = scnprintf(next, size,
1280 "\tstat0 %02x stat1 %02x avail %04x "
1281 "(ep%d%s-%s)%s\n",
1282 net2272_ep_read(ep, EP_STAT0),
1283 net2272_ep_read(ep, EP_STAT1),
1284 (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0),
1285 t1 & 0x0f,
1286 ep->is_in ? "in" : "out",
1287 type_string(t1 >> 5),
1288 ep->stopped ? "*" : "");
1289 size -= t;
1290 next += t;
1291
1292 t = scnprintf(next, size,
1293 "\tep_transfer %06x\n",
1294 ((net2272_ep_read(ep, EP_TRANSFER2) & 0xff) << 16) |
1295 ((net2272_ep_read(ep, EP_TRANSFER1) & 0xff) << 8) |
1296 ((net2272_ep_read(ep, EP_TRANSFER0) & 0xff)));
1297 size -= t;
1298 next += t;
1299
1300 t1 = net2272_ep_read(ep, EP_BUFF_STATES) & 0x03;
1301 t2 = (net2272_ep_read(ep, EP_BUFF_STATES) >> 2) & 0x03;
1302 t = scnprintf(next, size,
1303 "\tbuf-a %s buf-b %s\n",
1304 buf_state_string(t1),
1305 buf_state_string(t2));
1306 size -= t;
1307 next += t;
1308 }
1309
1310 spin_unlock_irqrestore(&dev->lock, flags);
1311
1312 return PAGE_SIZE - size;
1313 }
1314 static DEVICE_ATTR(registers, S_IRUGO, net2272_show_registers, NULL);
1315
1316 /*---------------------------------------------------------------------------*/
1317
1318 static void
1319 net2272_set_fifo_mode(struct net2272 *dev, int mode)
1320 {
1321 u8 tmp;
1322
1323 tmp = net2272_read(dev, LOCCTL) & 0x3f;
1324 tmp |= (mode << 6);
1325 net2272_write(dev, LOCCTL, tmp);
1326
1327 INIT_LIST_HEAD(&dev->gadget.ep_list);
1328
1329 /* always ep-a, ep-c ... maybe not ep-b */
1330 list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list);
1331
1332 switch (mode) {
1333 case 0:
1334 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1335 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 512;
1336 break;
1337 case 1:
1338 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1339 dev->ep[1].fifo_size = 1024;
1340 dev->ep[2].fifo_size = 512;
1341 break;
1342 case 2:
1343 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1344 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024;
1345 break;
1346 case 3:
1347 dev->ep[1].fifo_size = 1024;
1348 break;
1349 }
1350
1351 /* ep-c is always 2 512 byte buffers */
1352 list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
1353 dev->ep[3].fifo_size = 512;
1354 }
1355
1356 /*---------------------------------------------------------------------------*/
1357
1358 static void
1359 net2272_usb_reset(struct net2272 *dev)
1360 {
1361 dev->gadget.speed = USB_SPEED_UNKNOWN;
1362
1363 net2272_cancel_dma(dev);
1364
1365 net2272_write(dev, IRQENB0, 0);
1366 net2272_write(dev, IRQENB1, 0);
1367
1368 /* clear irq state */
1369 net2272_write(dev, IRQSTAT0, 0xff);
1370 net2272_write(dev, IRQSTAT1, ~(1 << SUSPEND_REQUEST_INTERRUPT));
1371
1372 net2272_write(dev, DMAREQ,
1373 (0 << DMA_BUFFER_VALID) |
1374 (0 << DMA_REQUEST_ENABLE) |
1375 (1 << DMA_CONTROL_DACK) |
1376 (dev->dma_eot_polarity << EOT_POLARITY) |
1377 (dev->dma_dack_polarity << DACK_POLARITY) |
1378 (dev->dma_dreq_polarity << DREQ_POLARITY) |
1379 ((dma_ep >> 1) << DMA_ENDPOINT_SELECT));
1380
1381 net2272_cancel_dma(dev);
1382 net2272_set_fifo_mode(dev, (fifo_mode <= 3) ? fifo_mode : 0);
1383
1384 /* Set the NET2272 ep fifo data width to 16-bit mode and for correct byte swapping
1385 * note that the higher level gadget drivers are expected to convert data to little endian.
1386 * Enable byte swap for your local bus/cpu if needed by setting BYTE_SWAP in LOCCTL here
1387 */
1388 net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) | (1 << DATA_WIDTH));
1389 net2272_write(dev, LOCCTL1, (dma_mode << DMA_MODE));
1390 }
1391
1392 static void
1393 net2272_usb_reinit(struct net2272 *dev)
1394 {
1395 int i;
1396
1397 /* basic endpoint init */
1398 for (i = 0; i < 4; ++i) {
1399 struct net2272_ep *ep = &dev->ep[i];
1400
1401 ep->ep.name = ep_name[i];
1402 ep->dev = dev;
1403 ep->num = i;
1404 ep->not_empty = 0;
1405
1406 if (use_dma && ep->num == dma_ep)
1407 ep->dma = 1;
1408
1409 if (i > 0 && i <= 3)
1410 ep->fifo_size = 512;
1411 else
1412 ep->fifo_size = 64;
1413 net2272_ep_reset(ep);
1414 }
1415 dev->ep[0].ep.maxpacket = 64;
1416
1417 dev->gadget.ep0 = &dev->ep[0].ep;
1418 dev->ep[0].stopped = 0;
1419 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1420 }
1421
1422 static void
1423 net2272_ep0_start(struct net2272 *dev)
1424 {
1425 struct net2272_ep *ep0 = &dev->ep[0];
1426
1427 net2272_ep_write(ep0, EP_RSPSET,
1428 (1 << NAK_OUT_PACKETS_MODE) |
1429 (1 << ALT_NAK_OUT_PACKETS));
1430 net2272_ep_write(ep0, EP_RSPCLR,
1431 (1 << HIDE_STATUS_PHASE) |
1432 (1 << CONTROL_STATUS_PHASE_HANDSHAKE));
1433 net2272_write(dev, USBCTL0,
1434 (dev->softconnect << USB_DETECT_ENABLE) |
1435 (1 << USB_ROOT_PORT_WAKEUP_ENABLE) |
1436 (1 << IO_WAKEUP_ENABLE));
1437 net2272_write(dev, IRQENB0,
1438 (1 << SETUP_PACKET_INTERRUPT_ENABLE) |
1439 (1 << ENDPOINT_0_INTERRUPT_ENABLE) |
1440 (1 << DMA_DONE_INTERRUPT_ENABLE));
1441 net2272_write(dev, IRQENB1,
1442 (1 << VBUS_INTERRUPT_ENABLE) |
1443 (1 << ROOT_PORT_RESET_INTERRUPT_ENABLE) |
1444 (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE));
1445 }
1446
1447 /* when a driver is successfully registered, it will receive
1448 * control requests including set_configuration(), which enables
1449 * non-control requests. then usb traffic follows until a
1450 * disconnect is reported. then a host may connect again, or
1451 * the driver might get unbound.
1452 */
1453 static int net2272_start(struct usb_gadget *_gadget,
1454 struct usb_gadget_driver *driver)
1455 {
1456 struct net2272 *dev;
1457 unsigned i;
1458
1459 if (!driver || !driver->unbind || !driver->setup ||
1460 driver->max_speed != USB_SPEED_HIGH)
1461 return -EINVAL;
1462
1463 dev = container_of(_gadget, struct net2272, gadget);
1464
1465 for (i = 0; i < 4; ++i)
1466 dev->ep[i].irqs = 0;
1467 /* hook up the driver ... */
1468 dev->softconnect = 1;
1469 driver->driver.bus = NULL;
1470 dev->driver = driver;
1471 dev->gadget.dev.driver = &driver->driver;
1472
1473 /* ... then enable host detection and ep0; and we're ready
1474 * for set_configuration as well as eventual disconnect.
1475 */
1476 net2272_ep0_start(dev);
1477
1478 dev_dbg(dev->dev, "%s ready\n", driver->driver.name);
1479
1480 return 0;
1481 }
1482
1483 static void
1484 stop_activity(struct net2272 *dev, struct usb_gadget_driver *driver)
1485 {
1486 int i;
1487
1488 /* don't disconnect if it's not connected */
1489 if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1490 driver = NULL;
1491
1492 /* stop hardware; prevent new request submissions;
1493 * and kill any outstanding requests.
1494 */
1495 net2272_usb_reset(dev);
1496 for (i = 0; i < 4; ++i)
1497 net2272_dequeue_all(&dev->ep[i]);
1498
1499 net2272_usb_reinit(dev);
1500 }
1501
1502 static int net2272_stop(struct usb_gadget *_gadget,
1503 struct usb_gadget_driver *driver)
1504 {
1505 struct net2272 *dev;
1506 unsigned long flags;
1507
1508 dev = container_of(_gadget, struct net2272, gadget);
1509
1510 spin_lock_irqsave(&dev->lock, flags);
1511 stop_activity(dev, driver);
1512 spin_unlock_irqrestore(&dev->lock, flags);
1513
1514 dev->gadget.dev.driver = NULL;
1515 dev->driver = NULL;
1516
1517 dev_dbg(dev->dev, "unregistered driver '%s'\n", driver->driver.name);
1518 return 0;
1519 }
1520
1521 /*---------------------------------------------------------------------------*/
1522 /* handle ep-a/ep-b dma completions */
1523 static void
1524 net2272_handle_dma(struct net2272_ep *ep)
1525 {
1526 struct net2272_request *req;
1527 unsigned len;
1528 int status;
1529
1530 if (!list_empty(&ep->queue))
1531 req = list_entry(ep->queue.next,
1532 struct net2272_request, queue);
1533 else
1534 req = NULL;
1535
1536 dev_vdbg(ep->dev->dev, "handle_dma %s req %p\n", ep->ep.name, req);
1537
1538 /* Ensure DREQ is de-asserted */
1539 net2272_write(ep->dev, DMAREQ,
1540 (0 << DMA_BUFFER_VALID)
1541 | (0 << DMA_REQUEST_ENABLE)
1542 | (1 << DMA_CONTROL_DACK)
1543 | (ep->dev->dma_eot_polarity << EOT_POLARITY)
1544 | (ep->dev->dma_dack_polarity << DACK_POLARITY)
1545 | (ep->dev->dma_dreq_polarity << DREQ_POLARITY)
1546 | ((ep->dma >> 1) << DMA_ENDPOINT_SELECT));
1547
1548 ep->dev->dma_busy = 0;
1549
1550 net2272_ep_write(ep, EP_IRQENB,
1551 (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
1552 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
1553 | net2272_ep_read(ep, EP_IRQENB));
1554
1555 /* device-to-host transfer completed */
1556 if (ep->is_in) {
1557 /* validate a short packet or zlp if necessary */
1558 if ((req->req.length % ep->ep.maxpacket != 0) ||
1559 req->req.zero)
1560 set_fifo_bytecount(ep, 0);
1561
1562 net2272_done(ep, req, 0);
1563 if (!list_empty(&ep->queue)) {
1564 req = list_entry(ep->queue.next,
1565 struct net2272_request, queue);
1566 status = net2272_kick_dma(ep, req);
1567 if (status < 0)
1568 net2272_pio_advance(ep);
1569 }
1570
1571 /* host-to-device transfer completed */
1572 } else {
1573 /* terminated with a short packet? */
1574 if (net2272_read(ep->dev, IRQSTAT0) &
1575 (1 << DMA_DONE_INTERRUPT)) {
1576 /* abort system dma */
1577 net2272_cancel_dma(ep->dev);
1578 }
1579
1580 /* EP_TRANSFER will contain the number of bytes
1581 * actually received.
1582 * NOTE: There is no overflow detection on EP_TRANSFER:
1583 * We can't deal with transfers larger than 2^24 bytes!
1584 */
1585 len = (net2272_ep_read(ep, EP_TRANSFER2) << 16)
1586 | (net2272_ep_read(ep, EP_TRANSFER1) << 8)
1587 | (net2272_ep_read(ep, EP_TRANSFER0));
1588
1589 if (ep->not_empty)
1590 len += 4;
1591
1592 req->req.actual += len;
1593
1594 /* get any remaining data */
1595 net2272_pio_advance(ep);
1596 }
1597 }
1598
1599 /*---------------------------------------------------------------------------*/
1600
1601 static void
1602 net2272_handle_ep(struct net2272_ep *ep)
1603 {
1604 struct net2272_request *req;
1605 u8 stat0, stat1;
1606
1607 if (!list_empty(&ep->queue))
1608 req = list_entry(ep->queue.next,
1609 struct net2272_request, queue);
1610 else
1611 req = NULL;
1612
1613 /* ack all, and handle what we care about */
1614 stat0 = net2272_ep_read(ep, EP_STAT0);
1615 stat1 = net2272_ep_read(ep, EP_STAT1);
1616 ep->irqs++;
1617
1618 dev_vdbg(ep->dev->dev, "%s ack ep_stat0 %02x, ep_stat1 %02x, req %p\n",
1619 ep->ep.name, stat0, stat1, req ? &req->req : 0);
1620
1621 net2272_ep_write(ep, EP_STAT0, stat0 &
1622 ~((1 << NAK_OUT_PACKETS)
1623 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)));
1624 net2272_ep_write(ep, EP_STAT1, stat1);
1625
1626 /* data packet(s) received (in the fifo, OUT)
1627 * direction must be validated, otherwise control read status phase
1628 * could be interpreted as a valid packet
1629 */
1630 if (!ep->is_in && (stat0 & (1 << DATA_PACKET_RECEIVED_INTERRUPT)))
1631 net2272_pio_advance(ep);
1632 /* data packet(s) transmitted (IN) */
1633 else if (stat0 & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT))
1634 net2272_pio_advance(ep);
1635 }
1636
1637 static struct net2272_ep *
1638 net2272_get_ep_by_addr(struct net2272 *dev, u16 wIndex)
1639 {
1640 struct net2272_ep *ep;
1641
1642 if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
1643 return &dev->ep[0];
1644
1645 list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
1646 u8 bEndpointAddress;
1647
1648 if (!ep->desc)
1649 continue;
1650 bEndpointAddress = ep->desc->bEndpointAddress;
1651 if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
1652 continue;
1653 if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
1654 return ep;
1655 }
1656 return NULL;
1657 }
1658
1659 /*
1660 * USB Test Packet:
1661 * JKJKJKJK * 9
1662 * JJKKJJKK * 8
1663 * JJJJKKKK * 8
1664 * JJJJJJJKKKKKKK * 8
1665 * JJJJJJJK * 8
1666 * {JKKKKKKK * 10}, JK
1667 */
1668 static const u8 net2272_test_packet[] = {
1669 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1670 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
1671 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
1672 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1673 0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD,
1674 0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFD, 0x7E
1675 };
1676
1677 static void
1678 net2272_set_test_mode(struct net2272 *dev, int mode)
1679 {
1680 int i;
1681
1682 /* Disable all net2272 interrupts:
1683 * Nothing but a power cycle should stop the test.
1684 */
1685 net2272_write(dev, IRQENB0, 0x00);
1686 net2272_write(dev, IRQENB1, 0x00);
1687
1688 /* Force tranceiver to high-speed */
1689 net2272_write(dev, XCVRDIAG, 1 << FORCE_HIGH_SPEED);
1690
1691 net2272_write(dev, PAGESEL, 0);
1692 net2272_write(dev, EP_STAT0, 1 << DATA_PACKET_TRANSMITTED_INTERRUPT);
1693 net2272_write(dev, EP_RSPCLR,
1694 (1 << CONTROL_STATUS_PHASE_HANDSHAKE)
1695 | (1 << HIDE_STATUS_PHASE));
1696 net2272_write(dev, EP_CFG, 1 << ENDPOINT_DIRECTION);
1697 net2272_write(dev, EP_STAT1, 1 << BUFFER_FLUSH);
1698
1699 /* wait for status phase to complete */
1700 while (!(net2272_read(dev, EP_STAT0) &
1701 (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)))
1702 ;
1703
1704 /* Enable test mode */
1705 net2272_write(dev, USBTEST, mode);
1706
1707 /* load test packet */
1708 if (mode == TEST_PACKET) {
1709 /* switch to 8 bit mode */
1710 net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) &
1711 ~(1 << DATA_WIDTH));
1712
1713 for (i = 0; i < sizeof(net2272_test_packet); ++i)
1714 net2272_write(dev, EP_DATA, net2272_test_packet[i]);
1715
1716 /* Validate test packet */
1717 net2272_write(dev, EP_TRANSFER0, 0);
1718 }
1719 }
1720
1721 static void
1722 net2272_handle_stat0_irqs(struct net2272 *dev, u8 stat)
1723 {
1724 struct net2272_ep *ep;
1725 u8 num, scratch;
1726
1727 /* starting a control request? */
1728 if (unlikely(stat & (1 << SETUP_PACKET_INTERRUPT))) {
1729 union {
1730 u8 raw[8];
1731 struct usb_ctrlrequest r;
1732 } u;
1733 int tmp = 0;
1734 struct net2272_request *req;
1735
1736 if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
1737 if (net2272_read(dev, USBCTL1) & (1 << USB_HIGH_SPEED))
1738 dev->gadget.speed = USB_SPEED_HIGH;
1739 else
1740 dev->gadget.speed = USB_SPEED_FULL;
1741 dev_dbg(dev->dev, "%s\n",
1742 usb_speed_string(dev->gadget.speed));
1743 }
1744
1745 ep = &dev->ep[0];
1746 ep->irqs++;
1747
1748 /* make sure any leftover interrupt state is cleared */
1749 stat &= ~(1 << ENDPOINT_0_INTERRUPT);
1750 while (!list_empty(&ep->queue)) {
1751 req = list_entry(ep->queue.next,
1752 struct net2272_request, queue);
1753 net2272_done(ep, req,
1754 (req->req.actual == req->req.length) ? 0 : -EPROTO);
1755 }
1756 ep->stopped = 0;
1757 dev->protocol_stall = 0;
1758 net2272_ep_write(ep, EP_STAT0,
1759 (1 << DATA_IN_TOKEN_INTERRUPT)
1760 | (1 << DATA_OUT_TOKEN_INTERRUPT)
1761 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
1762 | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
1763 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
1764 net2272_ep_write(ep, EP_STAT1,
1765 (1 << TIMEOUT)
1766 | (1 << USB_OUT_ACK_SENT)
1767 | (1 << USB_OUT_NAK_SENT)
1768 | (1 << USB_IN_ACK_RCVD)
1769 | (1 << USB_IN_NAK_SENT)
1770 | (1 << USB_STALL_SENT)
1771 | (1 << LOCAL_OUT_ZLP));
1772
1773 /*
1774 * Ensure Control Read pre-validation setting is beyond maximum size
1775 * - Control Writes can leave non-zero values in EP_TRANSFER. If
1776 * an EP0 transfer following the Control Write is a Control Read,
1777 * the NET2272 sees the non-zero EP_TRANSFER as an unexpected
1778 * pre-validation count.
1779 * - Setting EP_TRANSFER beyond the maximum EP0 transfer size ensures
1780 * the pre-validation count cannot cause an unexpected validatation
1781 */
1782 net2272_write(dev, PAGESEL, 0);
1783 net2272_write(dev, EP_TRANSFER2, 0xff);
1784 net2272_write(dev, EP_TRANSFER1, 0xff);
1785 net2272_write(dev, EP_TRANSFER0, 0xff);
1786
1787 u.raw[0] = net2272_read(dev, SETUP0);
1788 u.raw[1] = net2272_read(dev, SETUP1);
1789 u.raw[2] = net2272_read(dev, SETUP2);
1790 u.raw[3] = net2272_read(dev, SETUP3);
1791 u.raw[4] = net2272_read(dev, SETUP4);
1792 u.raw[5] = net2272_read(dev, SETUP5);
1793 u.raw[6] = net2272_read(dev, SETUP6);
1794 u.raw[7] = net2272_read(dev, SETUP7);
1795 /*
1796 * If you have a big endian cpu make sure le16_to_cpus
1797 * performs the proper byte swapping here...
1798 */
1799 le16_to_cpus(&u.r.wValue);
1800 le16_to_cpus(&u.r.wIndex);
1801 le16_to_cpus(&u.r.wLength);
1802
1803 /* ack the irq */
1804 net2272_write(dev, IRQSTAT0, 1 << SETUP_PACKET_INTERRUPT);
1805 stat ^= (1 << SETUP_PACKET_INTERRUPT);
1806
1807 /* watch control traffic at the token level, and force
1808 * synchronization before letting the status phase happen.
1809 */
1810 ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
1811 if (ep->is_in) {
1812 scratch = (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
1813 | (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
1814 | (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
1815 stop_out_naking(ep);
1816 } else
1817 scratch = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
1818 | (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
1819 | (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
1820 net2272_ep_write(ep, EP_IRQENB, scratch);
1821
1822 if ((u.r.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD)
1823 goto delegate;
1824 switch (u.r.bRequest) {
1825 case USB_REQ_GET_STATUS: {
1826 struct net2272_ep *e;
1827 u16 status = 0;
1828
1829 switch (u.r.bRequestType & USB_RECIP_MASK) {
1830 case USB_RECIP_ENDPOINT:
1831 e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1832 if (!e || u.r.wLength > 2)
1833 goto do_stall;
1834 if (net2272_ep_read(e, EP_RSPSET) & (1 << ENDPOINT_HALT))
1835 status = __constant_cpu_to_le16(1);
1836 else
1837 status = __constant_cpu_to_le16(0);
1838
1839 /* don't bother with a request object! */
1840 net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1841 writew(status, net2272_reg_addr(dev, EP_DATA));
1842 set_fifo_bytecount(&dev->ep[0], 0);
1843 allow_status(ep);
1844 dev_vdbg(dev->dev, "%s stat %02x\n",
1845 ep->ep.name, status);
1846 goto next_endpoints;
1847 case USB_RECIP_DEVICE:
1848 if (u.r.wLength > 2)
1849 goto do_stall;
1850 if (dev->is_selfpowered)
1851 status = (1 << USB_DEVICE_SELF_POWERED);
1852
1853 /* don't bother with a request object! */
1854 net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1855 writew(status, net2272_reg_addr(dev, EP_DATA));
1856 set_fifo_bytecount(&dev->ep[0], 0);
1857 allow_status(ep);
1858 dev_vdbg(dev->dev, "device stat %02x\n", status);
1859 goto next_endpoints;
1860 case USB_RECIP_INTERFACE:
1861 if (u.r.wLength > 2)
1862 goto do_stall;
1863
1864 /* don't bother with a request object! */
1865 net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1866 writew(status, net2272_reg_addr(dev, EP_DATA));
1867 set_fifo_bytecount(&dev->ep[0], 0);
1868 allow_status(ep);
1869 dev_vdbg(dev->dev, "interface status %02x\n", status);
1870 goto next_endpoints;
1871 }
1872
1873 break;
1874 }
1875 case USB_REQ_CLEAR_FEATURE: {
1876 struct net2272_ep *e;
1877
1878 if (u.r.bRequestType != USB_RECIP_ENDPOINT)
1879 goto delegate;
1880 if (u.r.wValue != USB_ENDPOINT_HALT ||
1881 u.r.wLength != 0)
1882 goto do_stall;
1883 e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1884 if (!e)
1885 goto do_stall;
1886 if (e->wedged) {
1887 dev_vdbg(dev->dev, "%s wedged, halt not cleared\n",
1888 ep->ep.name);
1889 } else {
1890 dev_vdbg(dev->dev, "%s clear halt\n", ep->ep.name);
1891 clear_halt(e);
1892 }
1893 allow_status(ep);
1894 goto next_endpoints;
1895 }
1896 case USB_REQ_SET_FEATURE: {
1897 struct net2272_ep *e;
1898
1899 if (u.r.bRequestType == USB_RECIP_DEVICE) {
1900 if (u.r.wIndex != NORMAL_OPERATION)
1901 net2272_set_test_mode(dev, (u.r.wIndex >> 8));
1902 allow_status(ep);
1903 dev_vdbg(dev->dev, "test mode: %d\n", u.r.wIndex);
1904 goto next_endpoints;
1905 } else if (u.r.bRequestType != USB_RECIP_ENDPOINT)
1906 goto delegate;
1907 if (u.r.wValue != USB_ENDPOINT_HALT ||
1908 u.r.wLength != 0)
1909 goto do_stall;
1910 e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1911 if (!e)
1912 goto do_stall;
1913 set_halt(e);
1914 allow_status(ep);
1915 dev_vdbg(dev->dev, "%s set halt\n", ep->ep.name);
1916 goto next_endpoints;
1917 }
1918 case USB_REQ_SET_ADDRESS: {
1919 net2272_write(dev, OURADDR, u.r.wValue & 0xff);
1920 allow_status(ep);
1921 break;
1922 }
1923 default:
1924 delegate:
1925 dev_vdbg(dev->dev, "setup %02x.%02x v%04x i%04x "
1926 "ep_cfg %08x\n",
1927 u.r.bRequestType, u.r.bRequest,
1928 u.r.wValue, u.r.wIndex,
1929 net2272_ep_read(ep, EP_CFG));
1930 spin_unlock(&dev->lock);
1931 tmp = dev->driver->setup(&dev->gadget, &u.r);
1932 spin_lock(&dev->lock);
1933 }
1934
1935 /* stall ep0 on error */
1936 if (tmp < 0) {
1937 do_stall:
1938 dev_vdbg(dev->dev, "req %02x.%02x protocol STALL; stat %d\n",
1939 u.r.bRequestType, u.r.bRequest, tmp);
1940 dev->protocol_stall = 1;
1941 }
1942 /* endpoint dma irq? */
1943 } else if (stat & (1 << DMA_DONE_INTERRUPT)) {
1944 net2272_cancel_dma(dev);
1945 net2272_write(dev, IRQSTAT0, 1 << DMA_DONE_INTERRUPT);
1946 stat &= ~(1 << DMA_DONE_INTERRUPT);
1947 num = (net2272_read(dev, DMAREQ) & (1 << DMA_ENDPOINT_SELECT))
1948 ? 2 : 1;
1949
1950 ep = &dev->ep[num];
1951 net2272_handle_dma(ep);
1952 }
1953
1954 next_endpoints:
1955 /* endpoint data irq? */
1956 scratch = stat & 0x0f;
1957 stat &= ~0x0f;
1958 for (num = 0; scratch; num++) {
1959 u8 t;
1960
1961 /* does this endpoint's FIFO and queue need tending? */
1962 t = 1 << num;
1963 if ((scratch & t) == 0)
1964 continue;
1965 scratch ^= t;
1966
1967 ep = &dev->ep[num];
1968 net2272_handle_ep(ep);
1969 }
1970
1971 /* some interrupts we can just ignore */
1972 stat &= ~(1 << SOF_INTERRUPT);
1973
1974 if (stat)
1975 dev_dbg(dev->dev, "unhandled irqstat0 %02x\n", stat);
1976 }
1977
1978 static void
1979 net2272_handle_stat1_irqs(struct net2272 *dev, u8 stat)
1980 {
1981 u8 tmp, mask;
1982
1983 /* after disconnect there's nothing else to do! */
1984 tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT);
1985 mask = (1 << USB_HIGH_SPEED) | (1 << USB_FULL_SPEED);
1986
1987 if (stat & tmp) {
1988 net2272_write(dev, IRQSTAT1, tmp);
1989 if ((((stat & (1 << ROOT_PORT_RESET_INTERRUPT)) &&
1990 ((net2272_read(dev, USBCTL1) & mask) == 0))
1991 || ((net2272_read(dev, USBCTL1) & (1 << VBUS_PIN))
1992 == 0))
1993 && (dev->gadget.speed != USB_SPEED_UNKNOWN)) {
1994 dev_dbg(dev->dev, "disconnect %s\n",
1995 dev->driver->driver.name);
1996 stop_activity(dev, dev->driver);
1997 net2272_ep0_start(dev);
1998 return;
1999 }
2000 stat &= ~tmp;
2001
2002 if (!stat)
2003 return;
2004 }
2005
2006 tmp = (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT);
2007 if (stat & tmp) {
2008 net2272_write(dev, IRQSTAT1, tmp);
2009 if (stat & (1 << SUSPEND_REQUEST_INTERRUPT)) {
2010 if (dev->driver->suspend)
2011 dev->driver->suspend(&dev->gadget);
2012 if (!enable_suspend) {
2013 stat &= ~(1 << SUSPEND_REQUEST_INTERRUPT);
2014 dev_dbg(dev->dev, "Suspend disabled, ignoring\n");
2015 }
2016 } else {
2017 if (dev->driver->resume)
2018 dev->driver->resume(&dev->gadget);
2019 }
2020 stat &= ~tmp;
2021 }
2022
2023 /* clear any other status/irqs */
2024 if (stat)
2025 net2272_write(dev, IRQSTAT1, stat);
2026
2027 /* some status we can just ignore */
2028 stat &= ~((1 << CONTROL_STATUS_INTERRUPT)
2029 | (1 << SUSPEND_REQUEST_INTERRUPT)
2030 | (1 << RESUME_INTERRUPT));
2031 if (!stat)
2032 return;
2033 else
2034 dev_dbg(dev->dev, "unhandled irqstat1 %02x\n", stat);
2035 }
2036
2037 static irqreturn_t net2272_irq(int irq, void *_dev)
2038 {
2039 struct net2272 *dev = _dev;
2040 #if defined(PLX_PCI_RDK) || defined(PLX_PCI_RDK2)
2041 u32 intcsr;
2042 #endif
2043 #if defined(PLX_PCI_RDK)
2044 u8 dmareq;
2045 #endif
2046 spin_lock(&dev->lock);
2047 #if defined(PLX_PCI_RDK)
2048 intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
2049
2050 if ((intcsr & LOCAL_INTERRUPT_TEST) == LOCAL_INTERRUPT_TEST) {
2051 writel(intcsr & ~(1 << PCI_INTERRUPT_ENABLE),
2052 dev->rdk1.plx9054_base_addr + INTCSR);
2053 net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
2054 net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
2055 intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
2056 writel(intcsr | (1 << PCI_INTERRUPT_ENABLE),
2057 dev->rdk1.plx9054_base_addr + INTCSR);
2058 }
2059 if ((intcsr & DMA_CHANNEL_0_TEST) == DMA_CHANNEL_0_TEST) {
2060 writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
2061 dev->rdk1.plx9054_base_addr + DMACSR0);
2062
2063 dmareq = net2272_read(dev, DMAREQ);
2064 if (dmareq & 0x01)
2065 net2272_handle_dma(&dev->ep[2]);
2066 else
2067 net2272_handle_dma(&dev->ep[1]);
2068 }
2069 #endif
2070 #if defined(PLX_PCI_RDK2)
2071 /* see if PCI int for us by checking irqstat */
2072 intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT);
2073 if (!intcsr & (1 << NET2272_PCI_IRQ))
2074 return IRQ_NONE;
2075 /* check dma interrupts */
2076 #endif
2077 /* Platform/devcice interrupt handler */
2078 #if !defined(PLX_PCI_RDK)
2079 net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
2080 net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
2081 #endif
2082 spin_unlock(&dev->lock);
2083
2084 return IRQ_HANDLED;
2085 }
2086
2087 static int net2272_present(struct net2272 *dev)
2088 {
2089 /*
2090 * Quick test to see if CPU can communicate properly with the NET2272.
2091 * Verifies connection using writes and reads to write/read and
2092 * read-only registers.
2093 *
2094 * This routine is strongly recommended especially during early bring-up
2095 * of new hardware, however for designs that do not apply Power On System
2096 * Tests (POST) it may discarded (or perhaps minimized).
2097 */
2098 unsigned int ii;
2099 u8 val, refval;
2100
2101 /* Verify NET2272 write/read SCRATCH register can write and read */
2102 refval = net2272_read(dev, SCRATCH);
2103 for (ii = 0; ii < 0x100; ii += 7) {
2104 net2272_write(dev, SCRATCH, ii);
2105 val = net2272_read(dev, SCRATCH);
2106 if (val != ii) {
2107 dev_dbg(dev->dev,
2108 "%s: write/read SCRATCH register test failed: "
2109 "wrote:0x%2.2x, read:0x%2.2x\n",
2110 __func__, ii, val);
2111 return -EINVAL;
2112 }
2113 }
2114 /* To be nice, we write the original SCRATCH value back: */
2115 net2272_write(dev, SCRATCH, refval);
2116
2117 /* Verify NET2272 CHIPREV register is read-only: */
2118 refval = net2272_read(dev, CHIPREV_2272);
2119 for (ii = 0; ii < 0x100; ii += 7) {
2120 net2272_write(dev, CHIPREV_2272, ii);
2121 val = net2272_read(dev, CHIPREV_2272);
2122 if (val != refval) {
2123 dev_dbg(dev->dev,
2124 "%s: write/read CHIPREV register test failed: "
2125 "wrote 0x%2.2x, read:0x%2.2x expected:0x%2.2x\n",
2126 __func__, ii, val, refval);
2127 return -EINVAL;
2128 }
2129 }
2130
2131 /*
2132 * Verify NET2272's "NET2270 legacy revision" register
2133 * - NET2272 has two revision registers. The NET2270 legacy revision
2134 * register should read the same value, regardless of the NET2272
2135 * silicon revision. The legacy register applies to NET2270
2136 * firmware being applied to the NET2272.
2137 */
2138 val = net2272_read(dev, CHIPREV_LEGACY);
2139 if (val != NET2270_LEGACY_REV) {
2140 /*
2141 * Unexpected legacy revision value
2142 * - Perhaps the chip is a NET2270?
2143 */
2144 dev_dbg(dev->dev,
2145 "%s: WARNING: UNEXPECTED NET2272 LEGACY REGISTER VALUE:\n"
2146 " - CHIPREV_LEGACY: expected 0x%2.2x, got:0x%2.2x. (Not NET2272?)\n",
2147 __func__, NET2270_LEGACY_REV, val);
2148 return -EINVAL;
2149 }
2150
2151 /*
2152 * Verify NET2272 silicon revision
2153 * - This revision register is appropriate for the silicon version
2154 * of the NET2272
2155 */
2156 val = net2272_read(dev, CHIPREV_2272);
2157 switch (val) {
2158 case CHIPREV_NET2272_R1:
2159 /*
2160 * NET2272 Rev 1 has DMA related errata:
2161 * - Newer silicon (Rev 1A or better) required
2162 */
2163 dev_dbg(dev->dev,
2164 "%s: Rev 1 detected: newer silicon recommended for DMA support\n",
2165 __func__);
2166 break;
2167 case CHIPREV_NET2272_R1A:
2168 break;
2169 default:
2170 /* NET2272 silicon version *may* not work with this firmware */
2171 dev_dbg(dev->dev,
2172 "%s: unexpected silicon revision register value: "
2173 " CHIPREV_2272: 0x%2.2x\n",
2174 __func__, val);
2175 /*
2176 * Return Success, even though the chip rev is not an expected value
2177 * - Older, pre-built firmware can attempt to operate on newer silicon
2178 * - Often, new silicon is perfectly compatible
2179 */
2180 }
2181
2182 /* Success: NET2272 checks out OK */
2183 return 0;
2184 }
2185
2186 static void
2187 net2272_gadget_release(struct device *_dev)
2188 {
2189 struct net2272 *dev = dev_get_drvdata(_dev);
2190 kfree(dev);
2191 }
2192
2193 /*---------------------------------------------------------------------------*/
2194
2195 static void __devexit
2196 net2272_remove(struct net2272 *dev)
2197 {
2198 usb_del_gadget_udc(&dev->gadget);
2199
2200 /* start with the driver above us */
2201 if (dev->driver) {
2202 /* should have been done already by driver model core */
2203 dev_warn(dev->dev, "pci remove, driver '%s' is still registered\n",
2204 dev->driver->driver.name);
2205 usb_gadget_unregister_driver(dev->driver);
2206 }
2207
2208 free_irq(dev->irq, dev);
2209 iounmap(dev->base_addr);
2210
2211 device_unregister(&dev->gadget.dev);
2212 device_remove_file(dev->dev, &dev_attr_registers);
2213
2214 dev_info(dev->dev, "unbind\n");
2215 }
2216
2217 static struct net2272 * __devinit
2218 net2272_probe_init(struct device *dev, unsigned int irq)
2219 {
2220 struct net2272 *ret;
2221
2222 if (!irq) {
2223 dev_dbg(dev, "No IRQ!\n");
2224 return ERR_PTR(-ENODEV);
2225 }
2226
2227 /* alloc, and start init */
2228 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
2229 if (!ret)
2230 return ERR_PTR(-ENOMEM);
2231
2232 spin_lock_init(&ret->lock);
2233 ret->irq = irq;
2234 ret->dev = dev;
2235 ret->gadget.ops = &net2272_ops;
2236 ret->gadget.max_speed = USB_SPEED_HIGH;
2237
2238 /* the "gadget" abstracts/virtualizes the controller */
2239 dev_set_name(&ret->gadget.dev, "gadget");
2240 ret->gadget.dev.parent = dev;
2241 ret->gadget.dev.dma_mask = dev->dma_mask;
2242 ret->gadget.dev.release = net2272_gadget_release;
2243 ret->gadget.name = driver_name;
2244
2245 return ret;
2246 }
2247
2248 static int __devinit
2249 net2272_probe_fin(struct net2272 *dev, unsigned int irqflags)
2250 {
2251 int ret;
2252
2253 /* See if there... */
2254 if (net2272_present(dev)) {
2255 dev_warn(dev->dev, "2272 not found!\n");
2256 ret = -ENODEV;
2257 goto err;
2258 }
2259
2260 net2272_usb_reset(dev);
2261 net2272_usb_reinit(dev);
2262
2263 ret = request_irq(dev->irq, net2272_irq, irqflags, driver_name, dev);
2264 if (ret) {
2265 dev_err(dev->dev, "request interrupt %i failed\n", dev->irq);
2266 goto err;
2267 }
2268
2269 dev->chiprev = net2272_read(dev, CHIPREV_2272);
2270
2271 /* done */
2272 dev_info(dev->dev, "%s\n", driver_desc);
2273 dev_info(dev->dev, "irq %i, mem %p, chip rev %04x, dma %s\n",
2274 dev->irq, dev->base_addr, dev->chiprev,
2275 dma_mode_string());
2276 dev_info(dev->dev, "version: %s\n", driver_vers);
2277
2278 ret = device_register(&dev->gadget.dev);
2279 if (ret)
2280 goto err_irq;
2281 ret = device_create_file(dev->dev, &dev_attr_registers);
2282 if (ret)
2283 goto err_dev_reg;
2284
2285 ret = usb_add_gadget_udc(dev->dev, &dev->gadget);
2286 if (ret)
2287 goto err_add_udc;
2288
2289 return 0;
2290
2291 err_add_udc:
2292 device_remove_file(dev->dev, &dev_attr_registers);
2293 err_dev_reg:
2294 device_unregister(&dev->gadget.dev);
2295 err_irq:
2296 free_irq(dev->irq, dev);
2297 err:
2298 return ret;
2299 }
2300
2301 #ifdef CONFIG_PCI
2302
2303 /*
2304 * wrap this driver around the specified device, but
2305 * don't respond over USB until a gadget driver binds to us
2306 */
2307
2308 static int __devinit
2309 net2272_rdk1_probe(struct pci_dev *pdev, struct net2272 *dev)
2310 {
2311 unsigned long resource, len, tmp;
2312 void __iomem *mem_mapped_addr[4];
2313 int ret, i;
2314
2315 /*
2316 * BAR 0 holds PLX 9054 config registers
2317 * BAR 1 is i/o memory; unused here
2318 * BAR 2 holds EPLD config registers
2319 * BAR 3 holds NET2272 registers
2320 */
2321
2322 /* Find and map all address spaces */
2323 for (i = 0; i < 4; ++i) {
2324 if (i == 1)
2325 continue; /* BAR1 unused */
2326
2327 resource = pci_resource_start(pdev, i);
2328 len = pci_resource_len(pdev, i);
2329
2330 if (!request_mem_region(resource, len, driver_name)) {
2331 dev_dbg(dev->dev, "controller already in use\n");
2332 ret = -EBUSY;
2333 goto err;
2334 }
2335
2336 mem_mapped_addr[i] = ioremap_nocache(resource, len);
2337 if (mem_mapped_addr[i] == NULL) {
2338 release_mem_region(resource, len);
2339 dev_dbg(dev->dev, "can't map memory\n");
2340 ret = -EFAULT;
2341 goto err;
2342 }
2343 }
2344
2345 dev->rdk1.plx9054_base_addr = mem_mapped_addr[0];
2346 dev->rdk1.epld_base_addr = mem_mapped_addr[2];
2347 dev->base_addr = mem_mapped_addr[3];
2348
2349 /* Set PLX 9054 bus width (16 bits) */
2350 tmp = readl(dev->rdk1.plx9054_base_addr + LBRD1);
2351 writel((tmp & ~(3 << MEMORY_SPACE_LOCAL_BUS_WIDTH)) | W16_BIT,
2352 dev->rdk1.plx9054_base_addr + LBRD1);
2353
2354 /* Enable PLX 9054 Interrupts */
2355 writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) |
2356 (1 << PCI_INTERRUPT_ENABLE) |
2357 (1 << LOCAL_INTERRUPT_INPUT_ENABLE),
2358 dev->rdk1.plx9054_base_addr + INTCSR);
2359
2360 writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
2361 dev->rdk1.plx9054_base_addr + DMACSR0);
2362
2363 /* reset */
2364 writeb((1 << EPLD_DMA_ENABLE) |
2365 (1 << DMA_CTL_DACK) |
2366 (1 << DMA_TIMEOUT_ENABLE) |
2367 (1 << USER) |
2368 (0 << MPX_MODE) |
2369 (1 << BUSWIDTH) |
2370 (1 << NET2272_RESET),
2371 dev->base_addr + EPLD_IO_CONTROL_REGISTER);
2372
2373 mb();
2374 writeb(readb(dev->base_addr + EPLD_IO_CONTROL_REGISTER) &
2375 ~(1 << NET2272_RESET),
2376 dev->base_addr + EPLD_IO_CONTROL_REGISTER);
2377 udelay(200);
2378
2379 return 0;
2380
2381 err:
2382 while (--i >= 0) {
2383 iounmap(mem_mapped_addr[i]);
2384 release_mem_region(pci_resource_start(pdev, i),
2385 pci_resource_len(pdev, i));
2386 }
2387
2388 return ret;
2389 }
2390
2391 static int __devinit
2392 net2272_rdk2_probe(struct pci_dev *pdev, struct net2272 *dev)
2393 {
2394 unsigned long resource, len;
2395 void __iomem *mem_mapped_addr[2];
2396 int ret, i;
2397
2398 /*
2399 * BAR 0 holds FGPA config registers
2400 * BAR 1 holds NET2272 registers
2401 */
2402
2403 /* Find and map all address spaces, bar2-3 unused in rdk 2 */
2404 for (i = 0; i < 2; ++i) {
2405 resource = pci_resource_start(pdev, i);
2406 len = pci_resource_len(pdev, i);
2407
2408 if (!request_mem_region(resource, len, driver_name)) {
2409 dev_dbg(dev->dev, "controller already in use\n");
2410 ret = -EBUSY;
2411 goto err;
2412 }
2413
2414 mem_mapped_addr[i] = ioremap_nocache(resource, len);
2415 if (mem_mapped_addr[i] == NULL) {
2416 release_mem_region(resource, len);
2417 dev_dbg(dev->dev, "can't map memory\n");
2418 ret = -EFAULT;
2419 goto err;
2420 }
2421 }
2422
2423 dev->rdk2.fpga_base_addr = mem_mapped_addr[0];
2424 dev->base_addr = mem_mapped_addr[1];
2425
2426 mb();
2427 /* Set 2272 bus width (16 bits) and reset */
2428 writel((1 << CHIP_RESET), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
2429 udelay(200);
2430 writel((1 << BUS_WIDTH), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
2431 /* Print fpga version number */
2432 dev_info(dev->dev, "RDK2 FPGA version %08x\n",
2433 readl(dev->rdk2.fpga_base_addr + RDK2_FPGAREV));
2434 /* Enable FPGA Interrupts */
2435 writel((1 << NET2272_PCI_IRQ), dev->rdk2.fpga_base_addr + RDK2_IRQENB);
2436
2437 return 0;
2438
2439 err:
2440 while (--i >= 0) {
2441 iounmap(mem_mapped_addr[i]);
2442 release_mem_region(pci_resource_start(pdev, i),
2443 pci_resource_len(pdev, i));
2444 }
2445
2446 return ret;
2447 }
2448
2449 static int __devinit
2450 net2272_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2451 {
2452 struct net2272 *dev;
2453 int ret;
2454
2455 dev = net2272_probe_init(&pdev->dev, pdev->irq);
2456 if (IS_ERR(dev))
2457 return PTR_ERR(dev);
2458 dev->dev_id = pdev->device;
2459
2460 if (pci_enable_device(pdev) < 0) {
2461 ret = -ENODEV;
2462 goto err_free;
2463 }
2464
2465 pci_set_master(pdev);
2466
2467 switch (pdev->device) {
2468 case PCI_DEVICE_ID_RDK1: ret = net2272_rdk1_probe(pdev, dev); break;
2469 case PCI_DEVICE_ID_RDK2: ret = net2272_rdk2_probe(pdev, dev); break;
2470 default: BUG();
2471 }
2472 if (ret)
2473 goto err_pci;
2474
2475 ret = net2272_probe_fin(dev, 0);
2476 if (ret)
2477 goto err_pci;
2478
2479 pci_set_drvdata(pdev, dev);
2480
2481 return 0;
2482
2483 err_pci:
2484 pci_disable_device(pdev);
2485 err_free:
2486 kfree(dev);
2487
2488 return ret;
2489 }
2490
2491 static void __devexit
2492 net2272_rdk1_remove(struct pci_dev *pdev, struct net2272 *dev)
2493 {
2494 int i;
2495
2496 /* disable PLX 9054 interrupts */
2497 writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
2498 ~(1 << PCI_INTERRUPT_ENABLE),
2499 dev->rdk1.plx9054_base_addr + INTCSR);
2500
2501 /* clean up resources allocated during probe() */
2502 iounmap(dev->rdk1.plx9054_base_addr);
2503 iounmap(dev->rdk1.epld_base_addr);
2504
2505 for (i = 0; i < 4; ++i) {
2506 if (i == 1)
2507 continue; /* BAR1 unused */
2508 release_mem_region(pci_resource_start(pdev, i),
2509 pci_resource_len(pdev, i));
2510 }
2511 }
2512
2513 static void __devexit
2514 net2272_rdk2_remove(struct pci_dev *pdev, struct net2272 *dev)
2515 {
2516 int i;
2517
2518 /* disable fpga interrupts
2519 writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
2520 ~(1 << PCI_INTERRUPT_ENABLE),
2521 dev->rdk1.plx9054_base_addr + INTCSR);
2522 */
2523
2524 /* clean up resources allocated during probe() */
2525 iounmap(dev->rdk2.fpga_base_addr);
2526
2527 for (i = 0; i < 2; ++i)
2528 release_mem_region(pci_resource_start(pdev, i),
2529 pci_resource_len(pdev, i));
2530 }
2531
2532 static void __devexit
2533 net2272_pci_remove(struct pci_dev *pdev)
2534 {
2535 struct net2272 *dev = pci_get_drvdata(pdev);
2536
2537 net2272_remove(dev);
2538
2539 switch (pdev->device) {
2540 case PCI_DEVICE_ID_RDK1: net2272_rdk1_remove(pdev, dev); break;
2541 case PCI_DEVICE_ID_RDK2: net2272_rdk2_remove(pdev, dev); break;
2542 default: BUG();
2543 }
2544
2545 pci_disable_device(pdev);
2546
2547 kfree(dev);
2548 }
2549
2550 /* Table of matching PCI IDs */
2551 static struct pci_device_id __devinitdata pci_ids[] = {
2552 { /* RDK 1 card */
2553 .class = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
2554 .class_mask = 0,
2555 .vendor = PCI_VENDOR_ID_PLX,
2556 .device = PCI_DEVICE_ID_RDK1,
2557 .subvendor = PCI_ANY_ID,
2558 .subdevice = PCI_ANY_ID,
2559 },
2560 { /* RDK 2 card */
2561 .class = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
2562 .class_mask = 0,
2563 .vendor = PCI_VENDOR_ID_PLX,
2564 .device = PCI_DEVICE_ID_RDK2,
2565 .subvendor = PCI_ANY_ID,
2566 .subdevice = PCI_ANY_ID,
2567 },
2568 { }
2569 };
2570 MODULE_DEVICE_TABLE(pci, pci_ids);
2571
2572 static struct pci_driver net2272_pci_driver = {
2573 .name = driver_name,
2574 .id_table = pci_ids,
2575
2576 .probe = net2272_pci_probe,
2577 .remove = __devexit_p(net2272_pci_remove),
2578 };
2579
2580 static int net2272_pci_register(void)
2581 {
2582 return pci_register_driver(&net2272_pci_driver);
2583 }
2584
2585 static void net2272_pci_unregister(void)
2586 {
2587 pci_unregister_driver(&net2272_pci_driver);
2588 }
2589
2590 #else
2591 static inline int net2272_pci_register(void) { return 0; }
2592 static inline void net2272_pci_unregister(void) { }
2593 #endif
2594
2595 /*---------------------------------------------------------------------------*/
2596
2597 static int __devinit
2598 net2272_plat_probe(struct platform_device *pdev)
2599 {
2600 struct net2272 *dev;
2601 int ret;
2602 unsigned int irqflags;
2603 resource_size_t base, len;
2604 struct resource *iomem, *iomem_bus, *irq_res;
2605
2606 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2607 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2608 iomem_bus = platform_get_resource(pdev, IORESOURCE_BUS, 0);
2609 if (!irq_res || !iomem) {
2610 dev_err(&pdev->dev, "must provide irq/base addr");
2611 return -EINVAL;
2612 }
2613
2614 dev = net2272_probe_init(&pdev->dev, irq_res->start);
2615 if (IS_ERR(dev))
2616 return PTR_ERR(dev);
2617
2618 irqflags = 0;
2619 if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
2620 irqflags |= IRQF_TRIGGER_RISING;
2621 if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
2622 irqflags |= IRQF_TRIGGER_FALLING;
2623 if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
2624 irqflags |= IRQF_TRIGGER_HIGH;
2625 if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
2626 irqflags |= IRQF_TRIGGER_LOW;
2627
2628 base = iomem->start;
2629 len = resource_size(iomem);
2630 if (iomem_bus)
2631 dev->base_shift = iomem_bus->start;
2632
2633 if (!request_mem_region(base, len, driver_name)) {
2634 dev_dbg(dev->dev, "get request memory region!\n");
2635 ret = -EBUSY;
2636 goto err;
2637 }
2638 dev->base_addr = ioremap_nocache(base, len);
2639 if (!dev->base_addr) {
2640 dev_dbg(dev->dev, "can't map memory\n");
2641 ret = -EFAULT;
2642 goto err_req;
2643 }
2644
2645 ret = net2272_probe_fin(dev, IRQF_TRIGGER_LOW);
2646 if (ret)
2647 goto err_io;
2648
2649 platform_set_drvdata(pdev, dev);
2650 dev_info(&pdev->dev, "running in 16-bit, %sbyte swap local bus mode\n",
2651 (net2272_read(dev, LOCCTL) & (1 << BYTE_SWAP)) ? "" : "no ");
2652
2653 return 0;
2654
2655 err_io:
2656 iounmap(dev->base_addr);
2657 err_req:
2658 release_mem_region(base, len);
2659 err:
2660 return ret;
2661 }
2662
2663 static int __devexit
2664 net2272_plat_remove(struct platform_device *pdev)
2665 {
2666 struct net2272 *dev = platform_get_drvdata(pdev);
2667
2668 net2272_remove(dev);
2669
2670 release_mem_region(pdev->resource[0].start,
2671 resource_size(&pdev->resource[0]));
2672
2673 kfree(dev);
2674
2675 return 0;
2676 }
2677
2678 static struct platform_driver net2272_plat_driver = {
2679 .probe = net2272_plat_probe,
2680 .remove = __devexit_p(net2272_plat_remove),
2681 .driver = {
2682 .name = driver_name,
2683 .owner = THIS_MODULE,
2684 },
2685 /* FIXME .suspend, .resume */
2686 };
2687 MODULE_ALIAS("platform:net2272");
2688
2689 static int __init net2272_init(void)
2690 {
2691 int ret;
2692
2693 ret = net2272_pci_register();
2694 if (ret)
2695 return ret;
2696 ret = platform_driver_register(&net2272_plat_driver);
2697 if (ret)
2698 goto err_pci;
2699 return ret;
2700
2701 err_pci:
2702 net2272_pci_unregister();
2703 return ret;
2704 }
2705 module_init(net2272_init);
2706
2707 static void __exit net2272_cleanup(void)
2708 {
2709 net2272_pci_unregister();
2710 platform_driver_unregister(&net2272_plat_driver);
2711 }
2712 module_exit(net2272_cleanup);
2713
2714 MODULE_DESCRIPTION(DRIVER_DESC);
2715 MODULE_AUTHOR("PLX Technology, Inc.");
2716 MODULE_LICENSE("GPL");