2 * Driver for PLX NET2272 USB device controller
4 * Copyright (C) 2005-2006 PLX Technology, Inc.
5 * Copyright (C) 2006-2011 Analog Devices, Inc.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/errno.h>
25 #include <linux/gpio.h>
26 #include <linux/init.h>
27 #include <linux/interrupt.h>
29 #include <linux/ioport.h>
30 #include <linux/irq.h>
31 #include <linux/kernel.h>
32 #include <linux/list.h>
33 #include <linux/module.h>
34 #include <linux/moduleparam.h>
35 #include <linux/pci.h>
36 #include <linux/platform_device.h>
37 #include <linux/sched.h>
38 #include <linux/slab.h>
39 #include <linux/timer.h>
40 #include <linux/usb.h>
41 #include <linux/usb/ch9.h>
42 #include <linux/usb/gadget.h>
44 #include <asm/byteorder.h>
45 #include <asm/system.h>
46 #include <asm/unaligned.h>
50 #define DRIVER_DESC "PLX NET2272 USB Peripheral Controller"
52 static const char driver_name
[] = "net2272";
53 static const char driver_vers
[] = "2006 October 17/mainline";
54 static const char driver_desc
[] = DRIVER_DESC
;
56 static const char ep0name
[] = "ep0";
57 static const char * const ep_name
[] = {
59 "ep-a", "ep-b", "ep-c",
62 #define DMA_ADDR_INVALID (~(dma_addr_t)0)
63 #ifdef CONFIG_USB_GADGET_NET2272_DMA
65 * use_dma: the NET2272 can use an external DMA controller.
66 * Note that since there is no generic DMA api, some functions,
67 * notably request_dma, start_dma, and cancel_dma will need to be
68 * modified for your platform's particular dma controller.
70 * If use_dma is disabled, pio will be used instead.
72 static int use_dma
= 0;
73 module_param(use_dma
, bool, 0644);
76 * dma_ep: selects the endpoint for use with dma (1=ep-a, 2=ep-b)
77 * The NET2272 can only use dma for a single endpoint at a time.
78 * At some point this could be modified to allow either endpoint
79 * to take control of dma as it becomes available.
81 * Note that DMA should not be used on OUT endpoints unless it can
82 * be guaranteed that no short packets will arrive on an IN endpoint
83 * while the DMA operation is pending. Otherwise the OUT DMA will
84 * terminate prematurely (See NET2272 Errata 630-0213-0101)
86 static ushort dma_ep
= 1;
87 module_param(dma_ep
, ushort
, 0644);
90 * dma_mode: net2272 dma mode setting (see LOCCTL1 definiton):
91 * mode 0 == Slow DREQ mode
92 * mode 1 == Fast DREQ mode
93 * mode 2 == Burst mode
95 static ushort dma_mode
= 2;
96 module_param(dma_mode
, ushort
, 0644);
104 * fifo_mode: net2272 buffer configuration:
105 * mode 0 == ep-{a,b,c} 512db each
106 * mode 1 == ep-a 1k, ep-{b,c} 512db
107 * mode 2 == ep-a 1k, ep-b 1k, ep-c 512db
108 * mode 3 == ep-a 1k, ep-b disabled, ep-c 512db
110 static ushort fifo_mode
= 0;
111 module_param(fifo_mode
, ushort
, 0644);
114 * enable_suspend: When enabled, the driver will respond to
115 * USB suspend requests by powering down the NET2272. Otherwise,
116 * USB suspend requests will be ignored. This is acceptible for
117 * self-powered devices. For bus powered devices set this to 1.
119 static ushort enable_suspend
= 0;
120 module_param(enable_suspend
, ushort
, 0644);
122 static void assert_out_naking(struct net2272_ep
*ep
, const char *where
)
130 tmp
= net2272_ep_read(ep
, EP_STAT0
);
131 if ((tmp
& (1 << NAK_OUT_PACKETS
)) == 0) {
132 dev_dbg(ep
->dev
->dev
, "%s %s %02x !NAK\n",
133 ep
->ep
.name
, where
, tmp
);
134 net2272_ep_write(ep
, EP_RSPSET
, 1 << ALT_NAK_OUT_PACKETS
);
137 #define ASSERT_OUT_NAKING(ep) assert_out_naking(ep, __func__)
139 static void stop_out_naking(struct net2272_ep
*ep
)
141 u8 tmp
= net2272_ep_read(ep
, EP_STAT0
);
143 if ((tmp
& (1 << NAK_OUT_PACKETS
)) != 0)
144 net2272_ep_write(ep
, EP_RSPCLR
, 1 << ALT_NAK_OUT_PACKETS
);
147 #define PIPEDIR(bAddress) (usb_pipein(bAddress) ? "in" : "out")
149 static char *type_string(u8 bmAttributes
)
151 switch ((bmAttributes
) & USB_ENDPOINT_XFERTYPE_MASK
) {
152 case USB_ENDPOINT_XFER_BULK
: return "bulk";
153 case USB_ENDPOINT_XFER_ISOC
: return "iso";
154 case USB_ENDPOINT_XFER_INT
: return "intr";
155 default: return "control";
159 static char *buf_state_string(unsigned state
)
162 case BUFF_FREE
: return "free";
163 case BUFF_VALID
: return "valid";
164 case BUFF_LCL
: return "local";
165 case BUFF_USB
: return "usb";
166 default: return "unknown";
170 static char *dma_mode_string(void)
175 case 0: return "SLOW DREQ";
176 case 1: return "FAST DREQ";
177 case 2: return "BURST";
178 default: return "invalid";
182 static void net2272_dequeue_all(struct net2272_ep
*);
183 static int net2272_kick_dma(struct net2272_ep
*, struct net2272_request
*);
184 static int net2272_fifo_status(struct usb_ep
*);
186 static struct usb_ep_ops net2272_ep_ops
;
188 /*---------------------------------------------------------------------------*/
191 net2272_enable(struct usb_ep
*_ep
, const struct usb_endpoint_descriptor
*desc
)
194 struct net2272_ep
*ep
;
199 ep
= container_of(_ep
, struct net2272_ep
, ep
);
200 if (!_ep
|| !desc
|| ep
->desc
|| _ep
->name
== ep0name
201 || desc
->bDescriptorType
!= USB_DT_ENDPOINT
)
204 if (!dev
->driver
|| dev
->gadget
.speed
== USB_SPEED_UNKNOWN
)
207 max
= le16_to_cpu(desc
->wMaxPacketSize
) & 0x1fff;
209 spin_lock_irqsave(&dev
->lock
, flags
);
210 _ep
->maxpacket
= max
& 0x7fff;
213 /* net2272_ep_reset() has already been called */
217 /* set speed-dependent max packet */
218 net2272_ep_write(ep
, EP_MAXPKT0
, max
& 0xff);
219 net2272_ep_write(ep
, EP_MAXPKT1
, (max
& 0xff00) >> 8);
221 /* set type, direction, address; reset fifo counters */
222 net2272_ep_write(ep
, EP_STAT1
, 1 << BUFFER_FLUSH
);
223 tmp
= usb_endpoint_type(desc
);
224 if (usb_endpoint_xfer_bulk(desc
)) {
225 /* catch some particularly blatant driver bugs */
226 if ((dev
->gadget
.speed
== USB_SPEED_HIGH
&& max
!= 512) ||
227 (dev
->gadget
.speed
== USB_SPEED_FULL
&& max
> 64)) {
228 spin_unlock_irqrestore(&dev
->lock
, flags
);
232 ep
->is_iso
= usb_endpoint_xfer_isoc(desc
) ? 1 : 0;
233 tmp
<<= ENDPOINT_TYPE
;
234 tmp
|= ((desc
->bEndpointAddress
& 0x0f) << ENDPOINT_NUMBER
);
235 tmp
|= usb_endpoint_dir_in(desc
) << ENDPOINT_DIRECTION
;
236 tmp
|= (1 << ENDPOINT_ENABLE
);
238 /* for OUT transfers, block the rx fifo until a read is posted */
239 ep
->is_in
= usb_endpoint_dir_in(desc
);
241 net2272_ep_write(ep
, EP_RSPSET
, 1 << ALT_NAK_OUT_PACKETS
);
243 net2272_ep_write(ep
, EP_CFG
, tmp
);
246 tmp
= (1 << ep
->num
) | net2272_read(dev
, IRQENB0
);
247 net2272_write(dev
, IRQENB0
, tmp
);
249 tmp
= (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE
)
250 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE
)
251 | net2272_ep_read(ep
, EP_IRQENB
);
252 net2272_ep_write(ep
, EP_IRQENB
, tmp
);
254 tmp
= desc
->bEndpointAddress
;
255 dev_dbg(dev
->dev
, "enabled %s (ep%d%s-%s) max %04x cfg %02x\n",
256 _ep
->name
, tmp
& 0x0f, PIPEDIR(tmp
),
257 type_string(desc
->bmAttributes
), max
,
258 net2272_ep_read(ep
, EP_CFG
));
260 spin_unlock_irqrestore(&dev
->lock
, flags
);
264 static void net2272_ep_reset(struct net2272_ep
*ep
)
269 INIT_LIST_HEAD(&ep
->queue
);
271 ep
->ep
.maxpacket
= ~0;
272 ep
->ep
.ops
= &net2272_ep_ops
;
274 /* disable irqs, endpoint */
275 net2272_ep_write(ep
, EP_IRQENB
, 0);
277 /* init to our chosen defaults, notably so that we NAK OUT
278 * packets until the driver queues a read.
280 tmp
= (1 << NAK_OUT_PACKETS_MODE
) | (1 << ALT_NAK_OUT_PACKETS
);
281 net2272_ep_write(ep
, EP_RSPSET
, tmp
);
283 tmp
= (1 << INTERRUPT_MODE
) | (1 << HIDE_STATUS_PHASE
);
285 tmp
|= (1 << ENDPOINT_TOGGLE
) | (1 << ENDPOINT_HALT
);
287 net2272_ep_write(ep
, EP_RSPCLR
, tmp
);
289 /* scrub most status bits, and flush any fifo state */
290 net2272_ep_write(ep
, EP_STAT0
,
291 (1 << DATA_IN_TOKEN_INTERRUPT
)
292 | (1 << DATA_OUT_TOKEN_INTERRUPT
)
293 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT
)
294 | (1 << DATA_PACKET_RECEIVED_INTERRUPT
)
295 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT
));
297 net2272_ep_write(ep
, EP_STAT1
,
299 | (1 << USB_OUT_ACK_SENT
)
300 | (1 << USB_OUT_NAK_SENT
)
301 | (1 << USB_IN_ACK_RCVD
)
302 | (1 << USB_IN_NAK_SENT
)
303 | (1 << USB_STALL_SENT
)
304 | (1 << LOCAL_OUT_ZLP
)
305 | (1 << BUFFER_FLUSH
));
307 /* fifo size is handled seperately */
310 static int net2272_disable(struct usb_ep
*_ep
)
312 struct net2272_ep
*ep
;
315 ep
= container_of(_ep
, struct net2272_ep
, ep
);
316 if (!_ep
|| !ep
->desc
|| _ep
->name
== ep0name
)
319 spin_lock_irqsave(&ep
->dev
->lock
, flags
);
320 net2272_dequeue_all(ep
);
321 net2272_ep_reset(ep
);
323 dev_vdbg(ep
->dev
->dev
, "disabled %s\n", _ep
->name
);
325 spin_unlock_irqrestore(&ep
->dev
->lock
, flags
);
329 /*---------------------------------------------------------------------------*/
331 static struct usb_request
*
332 net2272_alloc_request(struct usb_ep
*_ep
, gfp_t gfp_flags
)
334 struct net2272_ep
*ep
;
335 struct net2272_request
*req
;
339 ep
= container_of(_ep
, struct net2272_ep
, ep
);
341 req
= kzalloc(sizeof(*req
), gfp_flags
);
345 req
->req
.dma
= DMA_ADDR_INVALID
;
346 INIT_LIST_HEAD(&req
->queue
);
352 net2272_free_request(struct usb_ep
*_ep
, struct usb_request
*_req
)
354 struct net2272_ep
*ep
;
355 struct net2272_request
*req
;
357 ep
= container_of(_ep
, struct net2272_ep
, ep
);
361 req
= container_of(_req
, struct net2272_request
, req
);
362 WARN_ON(!list_empty(&req
->queue
));
367 net2272_done(struct net2272_ep
*ep
, struct net2272_request
*req
, int status
)
370 unsigned stopped
= ep
->stopped
;
373 if (ep
->dev
->protocol_stall
) {
380 list_del_init(&req
->queue
);
382 if (req
->req
.status
== -EINPROGRESS
)
383 req
->req
.status
= status
;
385 status
= req
->req
.status
;
388 if (use_dma
&& req
->mapped
) {
389 dma_unmap_single(dev
->dev
, req
->req
.dma
, req
->req
.length
,
390 ep
->is_in
? DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
391 req
->req
.dma
= DMA_ADDR_INVALID
;
395 if (status
&& status
!= -ESHUTDOWN
)
396 dev_vdbg(dev
->dev
, "complete %s req %p stat %d len %u/%u buf %p\n",
397 ep
->ep
.name
, &req
->req
, status
,
398 req
->req
.actual
, req
->req
.length
, req
->req
.buf
);
400 /* don't modify queue heads during completion callback */
402 spin_unlock(&dev
->lock
);
403 req
->req
.complete(&ep
->ep
, &req
->req
);
404 spin_lock(&dev
->lock
);
405 ep
->stopped
= stopped
;
409 net2272_write_packet(struct net2272_ep
*ep
, u8
*buf
,
410 struct net2272_request
*req
, unsigned max
)
412 u16 __iomem
*ep_data
= net2272_reg_addr(ep
->dev
, EP_DATA
);
414 unsigned length
, count
;
417 length
= min(req
->req
.length
- req
->req
.actual
, max
);
418 req
->req
.actual
+= length
;
420 dev_vdbg(ep
->dev
->dev
, "write packet %s req %p max %u len %u avail %u\n",
421 ep
->ep
.name
, req
, max
, length
,
422 (net2272_ep_read(ep
, EP_AVAIL1
) << 8) | net2272_ep_read(ep
, EP_AVAIL0
));
427 while (likely(count
>= 2)) {
428 /* no byte-swap required; chip endian set during init */
429 writew(*bufp
++, ep_data
);
434 /* write final byte by placing the NET2272 into 8-bit mode */
435 if (unlikely(count
)) {
436 tmp
= net2272_read(ep
->dev
, LOCCTL
);
437 net2272_write(ep
->dev
, LOCCTL
, tmp
& ~(1 << DATA_WIDTH
));
438 writeb(*buf
, ep_data
);
439 net2272_write(ep
->dev
, LOCCTL
, tmp
);
444 /* returns: 0: still running, 1: completed, negative: errno */
446 net2272_write_fifo(struct net2272_ep
*ep
, struct net2272_request
*req
)
452 dev_vdbg(ep
->dev
->dev
, "write_fifo %s actual %d len %d\n",
453 ep
->ep
.name
, req
->req
.actual
, req
->req
.length
);
456 * Keep loading the endpoint until the final packet is loaded,
457 * or the endpoint buffer is full.
461 * Clear interrupt status
462 * - Packet Transmitted interrupt will become set again when the
463 * host successfully takes another packet
465 net2272_ep_write(ep
, EP_STAT0
, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT
));
466 while (!(net2272_ep_read(ep
, EP_STAT0
) & (1 << BUFFER_FULL
))) {
467 buf
= req
->req
.buf
+ req
->req
.actual
;
471 net2272_ep_read(ep
, EP_STAT0
);
473 max
= (net2272_ep_read(ep
, EP_AVAIL1
) << 8) |
474 (net2272_ep_read(ep
, EP_AVAIL0
));
476 if (max
< ep
->ep
.maxpacket
)
477 max
= (net2272_ep_read(ep
, EP_AVAIL1
) << 8)
478 | (net2272_ep_read(ep
, EP_AVAIL0
));
480 count
= net2272_write_packet(ep
, buf
, req
, max
);
481 /* see if we are done */
482 if (req
->req
.length
== req
->req
.actual
) {
483 /* validate short or zlp packet */
484 if (count
< ep
->ep
.maxpacket
)
485 set_fifo_bytecount(ep
, 0);
486 net2272_done(ep
, req
, 0);
488 if (!list_empty(&ep
->queue
)) {
489 req
= list_entry(ep
->queue
.next
,
490 struct net2272_request
,
492 status
= net2272_kick_dma(ep
, req
);
495 if ((net2272_ep_read(ep
, EP_STAT0
)
496 & (1 << BUFFER_EMPTY
)))
501 net2272_ep_write(ep
, EP_STAT0
, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT
));
507 net2272_out_flush(struct net2272_ep
*ep
)
509 ASSERT_OUT_NAKING(ep
);
511 net2272_ep_write(ep
, EP_STAT0
, (1 << DATA_OUT_TOKEN_INTERRUPT
)
512 | (1 << DATA_PACKET_RECEIVED_INTERRUPT
));
513 net2272_ep_write(ep
, EP_STAT1
, 1 << BUFFER_FLUSH
);
517 net2272_read_packet(struct net2272_ep
*ep
, u8
*buf
,
518 struct net2272_request
*req
, unsigned avail
)
520 u16 __iomem
*ep_data
= net2272_reg_addr(ep
->dev
, EP_DATA
);
524 req
->req
.actual
+= avail
;
526 dev_vdbg(ep
->dev
->dev
, "read packet %s req %p len %u avail %u\n",
527 ep
->ep
.name
, req
, avail
,
528 (net2272_ep_read(ep
, EP_AVAIL1
) << 8) | net2272_ep_read(ep
, EP_AVAIL0
));
530 is_short
= (avail
< ep
->ep
.maxpacket
);
532 if (unlikely(avail
== 0)) {
533 /* remove any zlp from the buffer */
534 (void)readw(ep_data
);
538 /* Ensure we get the final byte */
539 if (unlikely(avail
% 2))
544 *bufp
++ = readw(ep_data
);
549 * To avoid false endpoint available race condition must read
550 * ep stat0 twice in the case of a short transfer
552 if (net2272_ep_read(ep
, EP_STAT0
) & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT
))
553 net2272_ep_read(ep
, EP_STAT0
);
559 net2272_read_fifo(struct net2272_ep
*ep
, struct net2272_request
*req
)
568 dev_vdbg(ep
->dev
->dev
, "read_fifo %s actual %d len %d\n",
569 ep
->ep
.name
, req
->req
.actual
, req
->req
.length
);
573 buf
= req
->req
.buf
+ req
->req
.actual
;
576 count
= (net2272_ep_read(ep
, EP_AVAIL1
) << 8)
577 | net2272_ep_read(ep
, EP_AVAIL0
);
579 net2272_ep_write(ep
, EP_STAT0
,
580 (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT
) |
581 (1 << DATA_PACKET_RECEIVED_INTERRUPT
));
583 tmp
= req
->req
.length
- req
->req
.actual
;
586 if ((tmp
% ep
->ep
.maxpacket
) != 0) {
587 dev_err(ep
->dev
->dev
,
588 "%s out fifo %d bytes, expected %d\n",
589 ep
->ep
.name
, count
, tmp
);
592 count
= (tmp
> 0) ? tmp
: 0;
595 is_short
= net2272_read_packet(ep
, buf
, req
, count
);
598 if (unlikely(cleanup
|| is_short
||
599 ((req
->req
.actual
== req
->req
.length
)
600 && !req
->req
.zero
))) {
603 net2272_out_flush(ep
);
604 net2272_done(ep
, req
, -EOVERFLOW
);
606 net2272_done(ep
, req
, 0);
608 /* re-initialize endpoint transfer registers
609 * otherwise they may result in erroneous pre-validation
610 * for subsequent control reads
612 if (unlikely(ep
->num
== 0)) {
613 net2272_ep_write(ep
, EP_TRANSFER2
, 0);
614 net2272_ep_write(ep
, EP_TRANSFER1
, 0);
615 net2272_ep_write(ep
, EP_TRANSFER0
, 0);
618 if (!list_empty(&ep
->queue
)) {
619 req
= list_entry(ep
->queue
.next
,
620 struct net2272_request
, queue
);
621 status
= net2272_kick_dma(ep
, req
);
623 !(net2272_ep_read(ep
, EP_STAT0
) & (1 << BUFFER_EMPTY
)))
628 } while (!(net2272_ep_read(ep
, EP_STAT0
) & (1 << BUFFER_EMPTY
)));
634 net2272_pio_advance(struct net2272_ep
*ep
)
636 struct net2272_request
*req
;
638 if (unlikely(list_empty(&ep
->queue
)))
641 req
= list_entry(ep
->queue
.next
, struct net2272_request
, queue
);
642 (ep
->is_in
? net2272_write_fifo
: net2272_read_fifo
)(ep
, req
);
645 /* returns 0 on success, else negative errno */
647 net2272_request_dma(struct net2272
*dev
, unsigned ep
, u32 buf
,
648 unsigned len
, unsigned dir
)
650 dev_vdbg(dev
->dev
, "request_dma ep %d buf %08x len %d dir %d\n",
653 /* The NET2272 only supports a single dma channel */
657 * EP_TRANSFER (used to determine the number of bytes received
658 * in an OUT transfer) is 24 bits wide; don't ask for more than that.
660 if ((dir
== 1) && (len
> 0x1000000))
665 /* initialize platform's dma */
667 /* NET2272 addr, buffer addr, length, etc. */
668 switch (dev
->dev_id
) {
669 case PCI_DEVICE_ID_RDK1
:
670 /* Setup PLX 9054 DMA mode */
671 writel((1 << LOCAL_BUS_WIDTH
) |
672 (1 << TA_READY_INPUT_ENABLE
) |
673 (0 << LOCAL_BURST_ENABLE
) |
674 (1 << DONE_INTERRUPT_ENABLE
) |
675 (1 << LOCAL_ADDRESSING_MODE
) |
677 (1 << DMA_EOT_ENABLE
) |
678 (1 << FAST_SLOW_TERMINATE_MODE_SELECT
) |
679 (1 << DMA_CHANNEL_INTERRUPT_SELECT
),
680 dev
->rdk1
.plx9054_base_addr
+ DMAMODE0
);
682 writel(0x100000, dev
->rdk1
.plx9054_base_addr
+ DMALADR0
);
683 writel(buf
, dev
->rdk1
.plx9054_base_addr
+ DMAPADR0
);
684 writel(len
, dev
->rdk1
.plx9054_base_addr
+ DMASIZ0
);
685 writel((dir
<< DIRECTION_OF_TRANSFER
) |
686 (1 << INTERRUPT_AFTER_TERMINAL_COUNT
),
687 dev
->rdk1
.plx9054_base_addr
+ DMADPR0
);
688 writel((1 << LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE
) |
689 readl(dev
->rdk1
.plx9054_base_addr
+ INTCSR
),
690 dev
->rdk1
.plx9054_base_addr
+ INTCSR
);
696 net2272_write(dev
, DMAREQ
,
697 (0 << DMA_BUFFER_VALID
) |
698 (1 << DMA_REQUEST_ENABLE
) |
699 (1 << DMA_CONTROL_DACK
) |
700 (dev
->dma_eot_polarity
<< EOT_POLARITY
) |
701 (dev
->dma_dack_polarity
<< DACK_POLARITY
) |
702 (dev
->dma_dreq_polarity
<< DREQ_POLARITY
) |
703 ((ep
>> 1) << DMA_ENDPOINT_SELECT
));
705 (void) net2272_read(dev
, SCRATCH
);
711 net2272_start_dma(struct net2272
*dev
)
713 /* start platform's dma controller */
715 switch (dev
->dev_id
) {
716 case PCI_DEVICE_ID_RDK1
:
717 writeb((1 << CHANNEL_ENABLE
) | (1 << CHANNEL_START
),
718 dev
->rdk1
.plx9054_base_addr
+ DMACSR0
);
724 /* returns 0 on success, else negative errno */
726 net2272_kick_dma(struct net2272_ep
*ep
, struct net2272_request
*req
)
731 if (!use_dma
|| (ep
->num
< 1) || (ep
->num
> 2) || !ep
->dma
)
734 /* don't use dma for odd-length transfers
735 * otherwise, we'd need to deal with the last byte with pio
737 if (req
->req
.length
& 1)
740 dev_vdbg(ep
->dev
->dev
, "kick_dma %s req %p dma %08x\n",
741 ep
->ep
.name
, req
, req
->req
.dma
);
743 net2272_ep_write(ep
, EP_RSPSET
, 1 << ALT_NAK_OUT_PACKETS
);
745 /* The NET2272 can only use DMA on one endpoint at a time */
746 if (ep
->dev
->dma_busy
)
749 /* Make sure we only DMA an even number of bytes (we'll use
750 * pio to complete the transfer)
752 size
= req
->req
.length
;
755 /* device-to-host transfer */
757 /* initialize platform's dma controller */
758 if (net2272_request_dma(ep
->dev
, ep
->num
, req
->req
.dma
, size
, 0))
759 /* unable to obtain DMA channel; return error and use pio mode */
761 req
->req
.actual
+= size
;
763 /* host-to-device transfer */
765 tmp
= net2272_ep_read(ep
, EP_STAT0
);
767 /* initialize platform's dma controller */
768 if (net2272_request_dma(ep
->dev
, ep
->num
, req
->req
.dma
, size
, 1))
769 /* unable to obtain DMA channel; return error and use pio mode */
772 if (!(tmp
& (1 << BUFFER_EMPTY
)))
778 /* allow the endpoint's buffer to fill */
779 net2272_ep_write(ep
, EP_RSPCLR
, 1 << ALT_NAK_OUT_PACKETS
);
781 /* this transfer completed and data's already in the fifo
782 * return error so pio gets used.
784 if (tmp
& (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT
)) {
787 net2272_write(ep
->dev
, DMAREQ
,
788 (0 << DMA_BUFFER_VALID
) |
789 (0 << DMA_REQUEST_ENABLE
) |
790 (1 << DMA_CONTROL_DACK
) |
791 (ep
->dev
->dma_eot_polarity
<< EOT_POLARITY
) |
792 (ep
->dev
->dma_dack_polarity
<< DACK_POLARITY
) |
793 (ep
->dev
->dma_dreq_polarity
<< DREQ_POLARITY
) |
794 ((ep
->num
>> 1) << DMA_ENDPOINT_SELECT
));
800 /* Don't use per-packet interrupts: use dma interrupts only */
801 net2272_ep_write(ep
, EP_IRQENB
, 0);
803 net2272_start_dma(ep
->dev
);
808 static void net2272_cancel_dma(struct net2272
*dev
)
811 switch (dev
->dev_id
) {
812 case PCI_DEVICE_ID_RDK1
:
813 writeb(0, dev
->rdk1
.plx9054_base_addr
+ DMACSR0
);
814 writeb(1 << CHANNEL_ABORT
, dev
->rdk1
.plx9054_base_addr
+ DMACSR0
);
815 while (!(readb(dev
->rdk1
.plx9054_base_addr
+ DMACSR0
) &
816 (1 << CHANNEL_DONE
)))
817 continue; /* wait for dma to stabalize */
819 /* dma abort generates an interrupt */
820 writeb(1 << CHANNEL_CLEAR_INTERRUPT
,
821 dev
->rdk1
.plx9054_base_addr
+ DMACSR0
);
829 /*---------------------------------------------------------------------------*/
832 net2272_queue(struct usb_ep
*_ep
, struct usb_request
*_req
, gfp_t gfp_flags
)
834 struct net2272_request
*req
;
835 struct net2272_ep
*ep
;
841 req
= container_of(_req
, struct net2272_request
, req
);
842 if (!_req
|| !_req
->complete
|| !_req
->buf
843 || !list_empty(&req
->queue
))
845 ep
= container_of(_ep
, struct net2272_ep
, ep
);
846 if (!_ep
|| (!ep
->desc
&& ep
->num
!= 0))
849 if (!dev
->driver
|| dev
->gadget
.speed
== USB_SPEED_UNKNOWN
)
852 /* set up dma mapping in case the caller didn't */
853 if (use_dma
&& ep
->dma
&& _req
->dma
== DMA_ADDR_INVALID
) {
854 _req
->dma
= dma_map_single(dev
->dev
, _req
->buf
, _req
->length
,
855 ep
->is_in
? DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
859 dev_vdbg(dev
->dev
, "%s queue req %p, len %d buf %p dma %08x %s\n",
860 _ep
->name
, _req
, _req
->length
, _req
->buf
,
861 _req
->dma
, _req
->zero
? "zero" : "!zero");
863 spin_lock_irqsave(&dev
->lock
, flags
);
865 _req
->status
= -EINPROGRESS
;
868 /* kickstart this i/o queue? */
869 if (list_empty(&ep
->queue
) && !ep
->stopped
) {
870 /* maybe there's no control data, just status ack */
871 if (ep
->num
== 0 && _req
->length
== 0) {
872 net2272_done(ep
, req
, 0);
873 dev_vdbg(dev
->dev
, "%s status ack\n", ep
->ep
.name
);
877 /* Return zlp, don't let it block subsequent packets */
878 s
= net2272_ep_read(ep
, EP_STAT0
);
879 if (s
& (1 << BUFFER_EMPTY
)) {
880 /* Buffer is empty check for a blocking zlp, handle it */
881 if ((s
& (1 << NAK_OUT_PACKETS
)) &&
882 net2272_ep_read(ep
, EP_STAT1
) & (1 << LOCAL_OUT_ZLP
)) {
883 dev_dbg(dev
->dev
, "WARNING: returning ZLP short packet termination!\n");
885 * Request is going to terminate with a short packet ...
886 * hope the client is ready for it!
888 status
= net2272_read_fifo(ep
, req
);
889 /* clear short packet naking */
890 net2272_ep_write(ep
, EP_STAT0
, (1 << NAK_OUT_PACKETS
));
896 status
= net2272_kick_dma(ep
, req
);
899 /* dma failed (most likely in use by another endpoint)
905 status
= net2272_write_fifo(ep
, req
);
907 s
= net2272_ep_read(ep
, EP_STAT0
);
908 if ((s
& (1 << BUFFER_EMPTY
)) == 0)
909 status
= net2272_read_fifo(ep
, req
);
912 if (unlikely(status
!= 0)) {
919 if (likely(req
!= 0))
920 list_add_tail(&req
->queue
, &ep
->queue
);
922 if (likely(!list_empty(&ep
->queue
)))
923 net2272_ep_write(ep
, EP_RSPCLR
, 1 << ALT_NAK_OUT_PACKETS
);
925 spin_unlock_irqrestore(&dev
->lock
, flags
);
930 /* dequeue ALL requests */
932 net2272_dequeue_all(struct net2272_ep
*ep
)
934 struct net2272_request
*req
;
936 /* called with spinlock held */
939 while (!list_empty(&ep
->queue
)) {
940 req
= list_entry(ep
->queue
.next
,
941 struct net2272_request
,
943 net2272_done(ep
, req
, -ESHUTDOWN
);
947 /* dequeue JUST ONE request */
949 net2272_dequeue(struct usb_ep
*_ep
, struct usb_request
*_req
)
951 struct net2272_ep
*ep
;
952 struct net2272_request
*req
;
956 ep
= container_of(_ep
, struct net2272_ep
, ep
);
957 if (!_ep
|| (!ep
->desc
&& ep
->num
!= 0) || !_req
)
960 spin_lock_irqsave(&ep
->dev
->lock
, flags
);
961 stopped
= ep
->stopped
;
964 /* make sure it's still queued on this endpoint */
965 list_for_each_entry(req
, &ep
->queue
, queue
) {
966 if (&req
->req
== _req
)
969 if (&req
->req
!= _req
) {
970 spin_unlock_irqrestore(&ep
->dev
->lock
, flags
);
974 /* queue head may be partially complete */
975 if (ep
->queue
.next
== &req
->queue
) {
976 dev_dbg(ep
->dev
->dev
, "unlink (%s) pio\n", _ep
->name
);
977 net2272_done(ep
, req
, -ECONNRESET
);
980 ep
->stopped
= stopped
;
982 spin_unlock_irqrestore(&ep
->dev
->lock
, flags
);
986 /*---------------------------------------------------------------------------*/
989 net2272_set_halt_and_wedge(struct usb_ep
*_ep
, int value
, int wedged
)
991 struct net2272_ep
*ep
;
995 ep
= container_of(_ep
, struct net2272_ep
, ep
);
996 if (!_ep
|| (!ep
->desc
&& ep
->num
!= 0))
998 if (!ep
->dev
->driver
|| ep
->dev
->gadget
.speed
== USB_SPEED_UNKNOWN
)
1000 if (ep
->desc
/* not ep0 */ && usb_endpoint_xfer_isoc(ep
->desc
))
1003 spin_lock_irqsave(&ep
->dev
->lock
, flags
);
1004 if (!list_empty(&ep
->queue
))
1006 else if (ep
->is_in
&& value
&& net2272_fifo_status(_ep
) != 0)
1009 dev_vdbg(ep
->dev
->dev
, "%s %s %s\n", _ep
->name
,
1010 value
? "set" : "clear",
1011 wedged
? "wedge" : "halt");
1015 ep
->dev
->protocol_stall
= 1;
1025 spin_unlock_irqrestore(&ep
->dev
->lock
, flags
);
1031 net2272_set_halt(struct usb_ep
*_ep
, int value
)
1033 return net2272_set_halt_and_wedge(_ep
, value
, 0);
1037 net2272_set_wedge(struct usb_ep
*_ep
)
1039 if (!_ep
|| _ep
->name
== ep0name
)
1041 return net2272_set_halt_and_wedge(_ep
, 1, 1);
1045 net2272_fifo_status(struct usb_ep
*_ep
)
1047 struct net2272_ep
*ep
;
1050 ep
= container_of(_ep
, struct net2272_ep
, ep
);
1051 if (!_ep
|| (!ep
->desc
&& ep
->num
!= 0))
1053 if (!ep
->dev
->driver
|| ep
->dev
->gadget
.speed
== USB_SPEED_UNKNOWN
)
1056 avail
= net2272_ep_read(ep
, EP_AVAIL1
) << 8;
1057 avail
|= net2272_ep_read(ep
, EP_AVAIL0
);
1058 if (avail
> ep
->fifo_size
)
1061 avail
= ep
->fifo_size
- avail
;
1066 net2272_fifo_flush(struct usb_ep
*_ep
)
1068 struct net2272_ep
*ep
;
1070 ep
= container_of(_ep
, struct net2272_ep
, ep
);
1071 if (!_ep
|| (!ep
->desc
&& ep
->num
!= 0))
1073 if (!ep
->dev
->driver
|| ep
->dev
->gadget
.speed
== USB_SPEED_UNKNOWN
)
1076 net2272_ep_write(ep
, EP_STAT1
, 1 << BUFFER_FLUSH
);
1079 static struct usb_ep_ops net2272_ep_ops
= {
1080 .enable
= net2272_enable
,
1081 .disable
= net2272_disable
,
1083 .alloc_request
= net2272_alloc_request
,
1084 .free_request
= net2272_free_request
,
1086 .queue
= net2272_queue
,
1087 .dequeue
= net2272_dequeue
,
1089 .set_halt
= net2272_set_halt
,
1090 .set_wedge
= net2272_set_wedge
,
1091 .fifo_status
= net2272_fifo_status
,
1092 .fifo_flush
= net2272_fifo_flush
,
1095 /*---------------------------------------------------------------------------*/
1098 net2272_get_frame(struct usb_gadget
*_gadget
)
1100 struct net2272
*dev
;
1101 unsigned long flags
;
1106 dev
= container_of(_gadget
, struct net2272
, gadget
);
1107 spin_lock_irqsave(&dev
->lock
, flags
);
1109 ret
= net2272_read(dev
, FRAME1
) << 8;
1110 ret
|= net2272_read(dev
, FRAME0
);
1112 spin_unlock_irqrestore(&dev
->lock
, flags
);
1117 net2272_wakeup(struct usb_gadget
*_gadget
)
1119 struct net2272
*dev
;
1121 unsigned long flags
;
1125 dev
= container_of(_gadget
, struct net2272
, gadget
);
1127 spin_lock_irqsave(&dev
->lock
, flags
);
1128 tmp
= net2272_read(dev
, USBCTL0
);
1129 if (tmp
& (1 << IO_WAKEUP_ENABLE
))
1130 net2272_write(dev
, USBCTL1
, (1 << GENERATE_RESUME
));
1132 spin_unlock_irqrestore(&dev
->lock
, flags
);
1138 net2272_set_selfpowered(struct usb_gadget
*_gadget
, int value
)
1140 struct net2272
*dev
;
1144 dev
= container_of(_gadget
, struct net2272
, gadget
);
1146 dev
->is_selfpowered
= value
;
1152 net2272_pullup(struct usb_gadget
*_gadget
, int is_on
)
1154 struct net2272
*dev
;
1156 unsigned long flags
;
1160 dev
= container_of(_gadget
, struct net2272
, gadget
);
1162 spin_lock_irqsave(&dev
->lock
, flags
);
1163 tmp
= net2272_read(dev
, USBCTL0
);
1164 dev
->softconnect
= (is_on
!= 0);
1166 tmp
|= (1 << USB_DETECT_ENABLE
);
1168 tmp
&= ~(1 << USB_DETECT_ENABLE
);
1169 net2272_write(dev
, USBCTL0
, tmp
);
1170 spin_unlock_irqrestore(&dev
->lock
, flags
);
1175 static const struct usb_gadget_ops net2272_ops
= {
1176 .get_frame
= net2272_get_frame
,
1177 .wakeup
= net2272_wakeup
,
1178 .set_selfpowered
= net2272_set_selfpowered
,
1179 .pullup
= net2272_pullup
1182 /*---------------------------------------------------------------------------*/
1185 net2272_show_registers(struct device
*_dev
, struct device_attribute
*attr
, char *buf
)
1187 struct net2272
*dev
;
1190 unsigned long flags
;
1195 dev
= dev_get_drvdata(_dev
);
1198 spin_lock_irqsave(&dev
->lock
, flags
);
1201 s
= dev
->driver
->driver
.name
;
1205 /* Main Control Registers */
1206 t
= scnprintf(next
, size
, "%s version %s,"
1207 "chiprev %02x, locctl %02x\n"
1208 "irqenb0 %02x irqenb1 %02x "
1209 "irqstat0 %02x irqstat1 %02x\n",
1210 driver_name
, driver_vers
, dev
->chiprev
,
1211 net2272_read(dev
, LOCCTL
),
1212 net2272_read(dev
, IRQENB0
),
1213 net2272_read(dev
, IRQENB1
),
1214 net2272_read(dev
, IRQSTAT0
),
1215 net2272_read(dev
, IRQSTAT1
));
1220 t1
= net2272_read(dev
, DMAREQ
);
1221 t
= scnprintf(next
, size
, "\ndmareq %02x: %s %s%s%s%s\n",
1222 t1
, ep_name
[(t1
& 0x01) + 1],
1223 t1
& (1 << DMA_CONTROL_DACK
) ? "dack " : "",
1224 t1
& (1 << DMA_REQUEST_ENABLE
) ? "reqenb " : "",
1225 t1
& (1 << DMA_REQUEST
) ? "req " : "",
1226 t1
& (1 << DMA_BUFFER_VALID
) ? "valid " : "");
1230 /* USB Control Registers */
1231 t1
= net2272_read(dev
, USBCTL1
);
1232 if (t1
& (1 << VBUS_PIN
)) {
1233 if (t1
& (1 << USB_HIGH_SPEED
))
1235 else if (dev
->gadget
.speed
== USB_SPEED_UNKNOWN
)
1241 t
= scnprintf(next
, size
,
1242 "usbctl0 %02x usbctl1 %02x addr 0x%02x (%s)\n",
1243 net2272_read(dev
, USBCTL0
), t1
,
1244 net2272_read(dev
, OURADDR
), s
);
1248 /* Endpoint Registers */
1249 for (i
= 0; i
< 4; ++i
) {
1250 struct net2272_ep
*ep
;
1256 t1
= net2272_ep_read(ep
, EP_CFG
);
1257 t2
= net2272_ep_read(ep
, EP_RSPSET
);
1258 t
= scnprintf(next
, size
,
1259 "\n%s\tcfg %02x rsp (%02x) %s%s%s%s%s%s%s%s"
1261 ep
->ep
.name
, t1
, t2
,
1262 (t2
& (1 << ALT_NAK_OUT_PACKETS
)) ? "NAK " : "",
1263 (t2
& (1 << HIDE_STATUS_PHASE
)) ? "hide " : "",
1264 (t2
& (1 << AUTOVALIDATE
)) ? "auto " : "",
1265 (t2
& (1 << INTERRUPT_MODE
)) ? "interrupt " : "",
1266 (t2
& (1 << CONTROL_STATUS_PHASE_HANDSHAKE
)) ? "status " : "",
1267 (t2
& (1 << NAK_OUT_PACKETS_MODE
)) ? "NAKmode " : "",
1268 (t2
& (1 << ENDPOINT_TOGGLE
)) ? "DATA1 " : "DATA0 ",
1269 (t2
& (1 << ENDPOINT_HALT
)) ? "HALT " : "",
1270 net2272_ep_read(ep
, EP_IRQENB
));
1274 t
= scnprintf(next
, size
,
1275 "\tstat0 %02x stat1 %02x avail %04x "
1277 net2272_ep_read(ep
, EP_STAT0
),
1278 net2272_ep_read(ep
, EP_STAT1
),
1279 (net2272_ep_read(ep
, EP_AVAIL1
) << 8) | net2272_ep_read(ep
, EP_AVAIL0
),
1281 ep
->is_in
? "in" : "out",
1282 type_string(t1
>> 5),
1283 ep
->stopped
? "*" : "");
1287 t
= scnprintf(next
, size
,
1288 "\tep_transfer %06x\n",
1289 ((net2272_ep_read(ep
, EP_TRANSFER2
) & 0xff) << 16) |
1290 ((net2272_ep_read(ep
, EP_TRANSFER1
) & 0xff) << 8) |
1291 ((net2272_ep_read(ep
, EP_TRANSFER0
) & 0xff)));
1295 t1
= net2272_ep_read(ep
, EP_BUFF_STATES
) & 0x03;
1296 t2
= (net2272_ep_read(ep
, EP_BUFF_STATES
) >> 2) & 0x03;
1297 t
= scnprintf(next
, size
,
1298 "\tbuf-a %s buf-b %s\n",
1299 buf_state_string(t1
),
1300 buf_state_string(t2
));
1305 spin_unlock_irqrestore(&dev
->lock
, flags
);
1307 return PAGE_SIZE
- size
;
1309 static DEVICE_ATTR(registers
, S_IRUGO
, net2272_show_registers
, NULL
);
1311 /*---------------------------------------------------------------------------*/
1314 net2272_set_fifo_mode(struct net2272
*dev
, int mode
)
1318 tmp
= net2272_read(dev
, LOCCTL
) & 0x3f;
1320 net2272_write(dev
, LOCCTL
, tmp
);
1322 INIT_LIST_HEAD(&dev
->gadget
.ep_list
);
1324 /* always ep-a, ep-c ... maybe not ep-b */
1325 list_add_tail(&dev
->ep
[1].ep
.ep_list
, &dev
->gadget
.ep_list
);
1329 list_add_tail(&dev
->ep
[2].ep
.ep_list
, &dev
->gadget
.ep_list
);
1330 dev
->ep
[1].fifo_size
= dev
->ep
[2].fifo_size
= 512;
1333 list_add_tail(&dev
->ep
[2].ep
.ep_list
, &dev
->gadget
.ep_list
);
1334 dev
->ep
[1].fifo_size
= 1024;
1335 dev
->ep
[2].fifo_size
= 512;
1338 list_add_tail(&dev
->ep
[2].ep
.ep_list
, &dev
->gadget
.ep_list
);
1339 dev
->ep
[1].fifo_size
= dev
->ep
[2].fifo_size
= 1024;
1342 dev
->ep
[1].fifo_size
= 1024;
1346 /* ep-c is always 2 512 byte buffers */
1347 list_add_tail(&dev
->ep
[3].ep
.ep_list
, &dev
->gadget
.ep_list
);
1348 dev
->ep
[3].fifo_size
= 512;
1351 /*---------------------------------------------------------------------------*/
1353 static struct net2272
*the_controller
;
1356 net2272_usb_reset(struct net2272
*dev
)
1358 dev
->gadget
.speed
= USB_SPEED_UNKNOWN
;
1360 net2272_cancel_dma(dev
);
1362 net2272_write(dev
, IRQENB0
, 0);
1363 net2272_write(dev
, IRQENB1
, 0);
1365 /* clear irq state */
1366 net2272_write(dev
, IRQSTAT0
, 0xff);
1367 net2272_write(dev
, IRQSTAT1
, ~(1 << SUSPEND_REQUEST_INTERRUPT
));
1369 net2272_write(dev
, DMAREQ
,
1370 (0 << DMA_BUFFER_VALID
) |
1371 (0 << DMA_REQUEST_ENABLE
) |
1372 (1 << DMA_CONTROL_DACK
) |
1373 (dev
->dma_eot_polarity
<< EOT_POLARITY
) |
1374 (dev
->dma_dack_polarity
<< DACK_POLARITY
) |
1375 (dev
->dma_dreq_polarity
<< DREQ_POLARITY
) |
1376 ((dma_ep
>> 1) << DMA_ENDPOINT_SELECT
));
1378 net2272_cancel_dma(dev
);
1379 net2272_set_fifo_mode(dev
, (fifo_mode
<= 3) ? fifo_mode
: 0);
1381 /* Set the NET2272 ep fifo data width to 16-bit mode and for correct byte swapping
1382 * note that the higher level gadget drivers are expected to convert data to little endian.
1383 * Enable byte swap for your local bus/cpu if needed by setting BYTE_SWAP in LOCCTL here
1385 net2272_write(dev
, LOCCTL
, net2272_read(dev
, LOCCTL
) | (1 << DATA_WIDTH
));
1386 net2272_write(dev
, LOCCTL1
, (dma_mode
<< DMA_MODE
));
1390 net2272_usb_reinit(struct net2272
*dev
)
1394 /* basic endpoint init */
1395 for (i
= 0; i
< 4; ++i
) {
1396 struct net2272_ep
*ep
= &dev
->ep
[i
];
1398 ep
->ep
.name
= ep_name
[i
];
1403 if (use_dma
&& ep
->num
== dma_ep
)
1406 if (i
> 0 && i
<= 3)
1407 ep
->fifo_size
= 512;
1410 net2272_ep_reset(ep
);
1412 dev
->ep
[0].ep
.maxpacket
= 64;
1414 dev
->gadget
.ep0
= &dev
->ep
[0].ep
;
1415 dev
->ep
[0].stopped
= 0;
1416 INIT_LIST_HEAD(&dev
->gadget
.ep0
->ep_list
);
1420 net2272_ep0_start(struct net2272
*dev
)
1422 struct net2272_ep
*ep0
= &dev
->ep
[0];
1424 net2272_ep_write(ep0
, EP_RSPSET
,
1425 (1 << NAK_OUT_PACKETS_MODE
) |
1426 (1 << ALT_NAK_OUT_PACKETS
));
1427 net2272_ep_write(ep0
, EP_RSPCLR
,
1428 (1 << HIDE_STATUS_PHASE
) |
1429 (1 << CONTROL_STATUS_PHASE_HANDSHAKE
));
1430 net2272_write(dev
, USBCTL0
,
1431 (dev
->softconnect
<< USB_DETECT_ENABLE
) |
1432 (1 << USB_ROOT_PORT_WAKEUP_ENABLE
) |
1433 (1 << IO_WAKEUP_ENABLE
));
1434 net2272_write(dev
, IRQENB0
,
1435 (1 << SETUP_PACKET_INTERRUPT_ENABLE
) |
1436 (1 << ENDPOINT_0_INTERRUPT_ENABLE
) |
1437 (1 << DMA_DONE_INTERRUPT_ENABLE
));
1438 net2272_write(dev
, IRQENB1
,
1439 (1 << VBUS_INTERRUPT_ENABLE
) |
1440 (1 << ROOT_PORT_RESET_INTERRUPT_ENABLE
) |
1441 (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE
));
1444 /* when a driver is successfully registered, it will receive
1445 * control requests including set_configuration(), which enables
1446 * non-control requests. then usb traffic follows until a
1447 * disconnect is reported. then a host may connect again, or
1448 * the driver might get unbound.
1450 int usb_gadget_probe_driver(struct usb_gadget_driver
*driver
,
1451 int (*bind
)(struct usb_gadget
*))
1453 struct net2272
*dev
= the_controller
;
1457 if (!driver
|| !bind
|| !driver
->unbind
|| !driver
->setup
||
1458 driver
->speed
!= USB_SPEED_HIGH
)
1465 for (i
= 0; i
< 4; ++i
)
1466 dev
->ep
[i
].irqs
= 0;
1467 /* hook up the driver ... */
1468 dev
->softconnect
= 1;
1469 driver
->driver
.bus
= NULL
;
1470 dev
->driver
= driver
;
1471 dev
->gadget
.dev
.driver
= &driver
->driver
;
1472 ret
= bind(&dev
->gadget
);
1474 dev_dbg(dev
->dev
, "bind to driver %s --> %d\n",
1475 driver
->driver
.name
, ret
);
1477 dev
->gadget
.dev
.driver
= NULL
;
1481 /* ... then enable host detection and ep0; and we're ready
1482 * for set_configuration as well as eventual disconnect.
1484 net2272_ep0_start(dev
);
1486 dev_dbg(dev
->dev
, "%s ready\n", driver
->driver
.name
);
1490 EXPORT_SYMBOL(usb_gadget_probe_driver
);
1493 stop_activity(struct net2272
*dev
, struct usb_gadget_driver
*driver
)
1497 /* don't disconnect if it's not connected */
1498 if (dev
->gadget
.speed
== USB_SPEED_UNKNOWN
)
1501 /* stop hardware; prevent new request submissions;
1502 * and kill any outstanding requests.
1504 net2272_usb_reset(dev
);
1505 for (i
= 0; i
< 4; ++i
)
1506 net2272_dequeue_all(&dev
->ep
[i
]);
1508 /* report disconnect; the driver is already quiesced */
1510 spin_unlock(&dev
->lock
);
1511 driver
->disconnect(&dev
->gadget
);
1512 spin_lock(&dev
->lock
);
1515 net2272_usb_reinit(dev
);
1518 int usb_gadget_unregister_driver(struct usb_gadget_driver
*driver
)
1520 struct net2272
*dev
= the_controller
;
1521 unsigned long flags
;
1525 if (!driver
|| driver
!= dev
->driver
)
1528 spin_lock_irqsave(&dev
->lock
, flags
);
1529 stop_activity(dev
, driver
);
1530 spin_unlock_irqrestore(&dev
->lock
, flags
);
1532 net2272_pullup(&dev
->gadget
, 0);
1534 driver
->unbind(&dev
->gadget
);
1535 dev
->gadget
.dev
.driver
= NULL
;
1538 dev_dbg(dev
->dev
, "unregistered driver '%s'\n", driver
->driver
.name
);
1541 EXPORT_SYMBOL(usb_gadget_unregister_driver
);
1543 /*---------------------------------------------------------------------------*/
1544 /* handle ep-a/ep-b dma completions */
1546 net2272_handle_dma(struct net2272_ep
*ep
)
1548 struct net2272_request
*req
;
1552 if (!list_empty(&ep
->queue
))
1553 req
= list_entry(ep
->queue
.next
,
1554 struct net2272_request
, queue
);
1558 dev_vdbg(ep
->dev
->dev
, "handle_dma %s req %p\n", ep
->ep
.name
, req
);
1560 /* Ensure DREQ is de-asserted */
1561 net2272_write(ep
->dev
, DMAREQ
,
1562 (0 << DMA_BUFFER_VALID
)
1563 | (0 << DMA_REQUEST_ENABLE
)
1564 | (1 << DMA_CONTROL_DACK
)
1565 | (ep
->dev
->dma_eot_polarity
<< EOT_POLARITY
)
1566 | (ep
->dev
->dma_dack_polarity
<< DACK_POLARITY
)
1567 | (ep
->dev
->dma_dreq_polarity
<< DREQ_POLARITY
)
1568 | ((ep
->dma
>> 1) << DMA_ENDPOINT_SELECT
));
1570 ep
->dev
->dma_busy
= 0;
1572 net2272_ep_write(ep
, EP_IRQENB
,
1573 (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE
)
1574 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE
)
1575 | net2272_ep_read(ep
, EP_IRQENB
));
1577 /* device-to-host transfer completed */
1579 /* validate a short packet or zlp if necessary */
1580 if ((req
->req
.length
% ep
->ep
.maxpacket
!= 0) ||
1582 set_fifo_bytecount(ep
, 0);
1584 net2272_done(ep
, req
, 0);
1585 if (!list_empty(&ep
->queue
)) {
1586 req
= list_entry(ep
->queue
.next
,
1587 struct net2272_request
, queue
);
1588 status
= net2272_kick_dma(ep
, req
);
1590 net2272_pio_advance(ep
);
1593 /* host-to-device transfer completed */
1595 /* terminated with a short packet? */
1596 if (net2272_read(ep
->dev
, IRQSTAT0
) &
1597 (1 << DMA_DONE_INTERRUPT
)) {
1598 /* abort system dma */
1599 net2272_cancel_dma(ep
->dev
);
1602 /* EP_TRANSFER will contain the number of bytes
1603 * actually received.
1604 * NOTE: There is no overflow detection on EP_TRANSFER:
1605 * We can't deal with transfers larger than 2^24 bytes!
1607 len
= (net2272_ep_read(ep
, EP_TRANSFER2
) << 16)
1608 | (net2272_ep_read(ep
, EP_TRANSFER1
) << 8)
1609 | (net2272_ep_read(ep
, EP_TRANSFER0
));
1614 req
->req
.actual
+= len
;
1616 /* get any remaining data */
1617 net2272_pio_advance(ep
);
1621 /*---------------------------------------------------------------------------*/
1624 net2272_handle_ep(struct net2272_ep
*ep
)
1626 struct net2272_request
*req
;
1629 if (!list_empty(&ep
->queue
))
1630 req
= list_entry(ep
->queue
.next
,
1631 struct net2272_request
, queue
);
1635 /* ack all, and handle what we care about */
1636 stat0
= net2272_ep_read(ep
, EP_STAT0
);
1637 stat1
= net2272_ep_read(ep
, EP_STAT1
);
1640 dev_vdbg(ep
->dev
->dev
, "%s ack ep_stat0 %02x, ep_stat1 %02x, req %p\n",
1641 ep
->ep
.name
, stat0
, stat1
, req
? &req
->req
: 0);
1643 net2272_ep_write(ep
, EP_STAT0
, stat0
&
1644 ~((1 << NAK_OUT_PACKETS
)
1645 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT
)));
1646 net2272_ep_write(ep
, EP_STAT1
, stat1
);
1648 /* data packet(s) received (in the fifo, OUT)
1649 * direction must be validated, otherwise control read status phase
1650 * could be interpreted as a valid packet
1652 if (!ep
->is_in
&& (stat0
& (1 << DATA_PACKET_RECEIVED_INTERRUPT
)))
1653 net2272_pio_advance(ep
);
1654 /* data packet(s) transmitted (IN) */
1655 else if (stat0
& (1 << DATA_PACKET_TRANSMITTED_INTERRUPT
))
1656 net2272_pio_advance(ep
);
1659 static struct net2272_ep
*
1660 net2272_get_ep_by_addr(struct net2272
*dev
, u16 wIndex
)
1662 struct net2272_ep
*ep
;
1664 if ((wIndex
& USB_ENDPOINT_NUMBER_MASK
) == 0)
1667 list_for_each_entry(ep
, &dev
->gadget
.ep_list
, ep
.ep_list
) {
1668 u8 bEndpointAddress
;
1672 bEndpointAddress
= ep
->desc
->bEndpointAddress
;
1673 if ((wIndex
^ bEndpointAddress
) & USB_DIR_IN
)
1675 if ((wIndex
& 0x0f) == (bEndpointAddress
& 0x0f))
1686 * JJJJJJJKKKKKKK * 8
1688 * {JKKKKKKK * 10}, JK
1690 static const u8 net2272_test_packet
[] = {
1691 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1692 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
1693 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
1694 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1695 0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD,
1696 0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFD, 0x7E
1700 net2272_set_test_mode(struct net2272
*dev
, int mode
)
1704 /* Disable all net2272 interrupts:
1705 * Nothing but a power cycle should stop the test.
1707 net2272_write(dev
, IRQENB0
, 0x00);
1708 net2272_write(dev
, IRQENB1
, 0x00);
1710 /* Force tranceiver to high-speed */
1711 net2272_write(dev
, XCVRDIAG
, 1 << FORCE_HIGH_SPEED
);
1713 net2272_write(dev
, PAGESEL
, 0);
1714 net2272_write(dev
, EP_STAT0
, 1 << DATA_PACKET_TRANSMITTED_INTERRUPT
);
1715 net2272_write(dev
, EP_RSPCLR
,
1716 (1 << CONTROL_STATUS_PHASE_HANDSHAKE
)
1717 | (1 << HIDE_STATUS_PHASE
));
1718 net2272_write(dev
, EP_CFG
, 1 << ENDPOINT_DIRECTION
);
1719 net2272_write(dev
, EP_STAT1
, 1 << BUFFER_FLUSH
);
1721 /* wait for status phase to complete */
1722 while (!(net2272_read(dev
, EP_STAT0
) &
1723 (1 << DATA_PACKET_TRANSMITTED_INTERRUPT
)))
1726 /* Enable test mode */
1727 net2272_write(dev
, USBTEST
, mode
);
1729 /* load test packet */
1730 if (mode
== TEST_PACKET
) {
1731 /* switch to 8 bit mode */
1732 net2272_write(dev
, LOCCTL
, net2272_read(dev
, LOCCTL
) &
1733 ~(1 << DATA_WIDTH
));
1735 for (i
= 0; i
< sizeof(net2272_test_packet
); ++i
)
1736 net2272_write(dev
, EP_DATA
, net2272_test_packet
[i
]);
1738 /* Validate test packet */
1739 net2272_write(dev
, EP_TRANSFER0
, 0);
1744 net2272_handle_stat0_irqs(struct net2272
*dev
, u8 stat
)
1746 struct net2272_ep
*ep
;
1749 /* starting a control request? */
1750 if (unlikely(stat
& (1 << SETUP_PACKET_INTERRUPT
))) {
1753 struct usb_ctrlrequest r
;
1756 struct net2272_request
*req
;
1758 if (dev
->gadget
.speed
== USB_SPEED_UNKNOWN
) {
1759 if (net2272_read(dev
, USBCTL1
) & (1 << USB_HIGH_SPEED
))
1760 dev
->gadget
.speed
= USB_SPEED_HIGH
;
1762 dev
->gadget
.speed
= USB_SPEED_FULL
;
1763 dev_dbg(dev
->dev
, "%s speed\n",
1764 (dev
->gadget
.speed
== USB_SPEED_HIGH
) ? "high" : "full");
1770 /* make sure any leftover interrupt state is cleared */
1771 stat
&= ~(1 << ENDPOINT_0_INTERRUPT
);
1772 while (!list_empty(&ep
->queue
)) {
1773 req
= list_entry(ep
->queue
.next
,
1774 struct net2272_request
, queue
);
1775 net2272_done(ep
, req
,
1776 (req
->req
.actual
== req
->req
.length
) ? 0 : -EPROTO
);
1779 dev
->protocol_stall
= 0;
1780 net2272_ep_write(ep
, EP_STAT0
,
1781 (1 << DATA_IN_TOKEN_INTERRUPT
)
1782 | (1 << DATA_OUT_TOKEN_INTERRUPT
)
1783 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT
)
1784 | (1 << DATA_PACKET_RECEIVED_INTERRUPT
)
1785 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT
));
1786 net2272_ep_write(ep
, EP_STAT1
,
1788 | (1 << USB_OUT_ACK_SENT
)
1789 | (1 << USB_OUT_NAK_SENT
)
1790 | (1 << USB_IN_ACK_RCVD
)
1791 | (1 << USB_IN_NAK_SENT
)
1792 | (1 << USB_STALL_SENT
)
1793 | (1 << LOCAL_OUT_ZLP
));
1796 * Ensure Control Read pre-validation setting is beyond maximum size
1797 * - Control Writes can leave non-zero values in EP_TRANSFER. If
1798 * an EP0 transfer following the Control Write is a Control Read,
1799 * the NET2272 sees the non-zero EP_TRANSFER as an unexpected
1800 * pre-validation count.
1801 * - Setting EP_TRANSFER beyond the maximum EP0 transfer size ensures
1802 * the pre-validation count cannot cause an unexpected validatation
1804 net2272_write(dev
, PAGESEL
, 0);
1805 net2272_write(dev
, EP_TRANSFER2
, 0xff);
1806 net2272_write(dev
, EP_TRANSFER1
, 0xff);
1807 net2272_write(dev
, EP_TRANSFER0
, 0xff);
1809 u
.raw
[0] = net2272_read(dev
, SETUP0
);
1810 u
.raw
[1] = net2272_read(dev
, SETUP1
);
1811 u
.raw
[2] = net2272_read(dev
, SETUP2
);
1812 u
.raw
[3] = net2272_read(dev
, SETUP3
);
1813 u
.raw
[4] = net2272_read(dev
, SETUP4
);
1814 u
.raw
[5] = net2272_read(dev
, SETUP5
);
1815 u
.raw
[6] = net2272_read(dev
, SETUP6
);
1816 u
.raw
[7] = net2272_read(dev
, SETUP7
);
1818 * If you have a big endian cpu make sure le16_to_cpus
1819 * performs the proper byte swapping here...
1821 le16_to_cpus(&u
.r
.wValue
);
1822 le16_to_cpus(&u
.r
.wIndex
);
1823 le16_to_cpus(&u
.r
.wLength
);
1826 net2272_write(dev
, IRQSTAT0
, 1 << SETUP_PACKET_INTERRUPT
);
1827 stat
^= (1 << SETUP_PACKET_INTERRUPT
);
1829 /* watch control traffic at the token level, and force
1830 * synchronization before letting the status phase happen.
1832 ep
->is_in
= (u
.r
.bRequestType
& USB_DIR_IN
) != 0;
1834 scratch
= (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE
)
1835 | (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE
)
1836 | (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE
);
1837 stop_out_naking(ep
);
1839 scratch
= (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE
)
1840 | (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE
)
1841 | (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE
);
1842 net2272_ep_write(ep
, EP_IRQENB
, scratch
);
1844 if ((u
.r
.bRequestType
& USB_TYPE_MASK
) != USB_TYPE_STANDARD
)
1846 switch (u
.r
.bRequest
) {
1847 case USB_REQ_GET_STATUS
: {
1848 struct net2272_ep
*e
;
1851 switch (u
.r
.bRequestType
& USB_RECIP_MASK
) {
1852 case USB_RECIP_ENDPOINT
:
1853 e
= net2272_get_ep_by_addr(dev
, u
.r
.wIndex
);
1854 if (!e
|| u
.r
.wLength
> 2)
1856 if (net2272_ep_read(e
, EP_RSPSET
) & (1 << ENDPOINT_HALT
))
1857 status
= __constant_cpu_to_le16(1);
1859 status
= __constant_cpu_to_le16(0);
1861 /* don't bother with a request object! */
1862 net2272_ep_write(&dev
->ep
[0], EP_IRQENB
, 0);
1863 writew(status
, net2272_reg_addr(dev
, EP_DATA
));
1864 set_fifo_bytecount(&dev
->ep
[0], 0);
1866 dev_vdbg(dev
->dev
, "%s stat %02x\n",
1867 ep
->ep
.name
, status
);
1868 goto next_endpoints
;
1869 case USB_RECIP_DEVICE
:
1870 if (u
.r
.wLength
> 2)
1872 if (dev
->is_selfpowered
)
1873 status
= (1 << USB_DEVICE_SELF_POWERED
);
1875 /* don't bother with a request object! */
1876 net2272_ep_write(&dev
->ep
[0], EP_IRQENB
, 0);
1877 writew(status
, net2272_reg_addr(dev
, EP_DATA
));
1878 set_fifo_bytecount(&dev
->ep
[0], 0);
1880 dev_vdbg(dev
->dev
, "device stat %02x\n", status
);
1881 goto next_endpoints
;
1882 case USB_RECIP_INTERFACE
:
1883 if (u
.r
.wLength
> 2)
1886 /* don't bother with a request object! */
1887 net2272_ep_write(&dev
->ep
[0], EP_IRQENB
, 0);
1888 writew(status
, net2272_reg_addr(dev
, EP_DATA
));
1889 set_fifo_bytecount(&dev
->ep
[0], 0);
1891 dev_vdbg(dev
->dev
, "interface status %02x\n", status
);
1892 goto next_endpoints
;
1897 case USB_REQ_CLEAR_FEATURE
: {
1898 struct net2272_ep
*e
;
1900 if (u
.r
.bRequestType
!= USB_RECIP_ENDPOINT
)
1902 if (u
.r
.wValue
!= USB_ENDPOINT_HALT
||
1905 e
= net2272_get_ep_by_addr(dev
, u
.r
.wIndex
);
1909 dev_vdbg(dev
->dev
, "%s wedged, halt not cleared\n",
1912 dev_vdbg(dev
->dev
, "%s clear halt\n", ep
->ep
.name
);
1916 goto next_endpoints
;
1918 case USB_REQ_SET_FEATURE
: {
1919 struct net2272_ep
*e
;
1921 if (u
.r
.bRequestType
== USB_RECIP_DEVICE
) {
1922 if (u
.r
.wIndex
!= NORMAL_OPERATION
)
1923 net2272_set_test_mode(dev
, (u
.r
.wIndex
>> 8));
1925 dev_vdbg(dev
->dev
, "test mode: %d\n", u
.r
.wIndex
);
1926 goto next_endpoints
;
1927 } else if (u
.r
.bRequestType
!= USB_RECIP_ENDPOINT
)
1929 if (u
.r
.wValue
!= USB_ENDPOINT_HALT
||
1932 e
= net2272_get_ep_by_addr(dev
, u
.r
.wIndex
);
1937 dev_vdbg(dev
->dev
, "%s set halt\n", ep
->ep
.name
);
1938 goto next_endpoints
;
1940 case USB_REQ_SET_ADDRESS
: {
1941 net2272_write(dev
, OURADDR
, u
.r
.wValue
& 0xff);
1947 dev_vdbg(dev
->dev
, "setup %02x.%02x v%04x i%04x "
1949 u
.r
.bRequestType
, u
.r
.bRequest
,
1950 u
.r
.wValue
, u
.r
.wIndex
,
1951 net2272_ep_read(ep
, EP_CFG
));
1952 spin_unlock(&dev
->lock
);
1953 tmp
= dev
->driver
->setup(&dev
->gadget
, &u
.r
);
1954 spin_lock(&dev
->lock
);
1957 /* stall ep0 on error */
1960 dev_vdbg(dev
->dev
, "req %02x.%02x protocol STALL; stat %d\n",
1961 u
.r
.bRequestType
, u
.r
.bRequest
, tmp
);
1962 dev
->protocol_stall
= 1;
1964 /* endpoint dma irq? */
1965 } else if (stat
& (1 << DMA_DONE_INTERRUPT
)) {
1966 net2272_cancel_dma(dev
);
1967 net2272_write(dev
, IRQSTAT0
, 1 << DMA_DONE_INTERRUPT
);
1968 stat
&= ~(1 << DMA_DONE_INTERRUPT
);
1969 num
= (net2272_read(dev
, DMAREQ
) & (1 << DMA_ENDPOINT_SELECT
))
1973 net2272_handle_dma(ep
);
1977 /* endpoint data irq? */
1978 scratch
= stat
& 0x0f;
1980 for (num
= 0; scratch
; num
++) {
1983 /* does this endpoint's FIFO and queue need tending? */
1985 if ((scratch
& t
) == 0)
1990 net2272_handle_ep(ep
);
1993 /* some interrupts we can just ignore */
1994 stat
&= ~(1 << SOF_INTERRUPT
);
1997 dev_dbg(dev
->dev
, "unhandled irqstat0 %02x\n", stat
);
2001 net2272_handle_stat1_irqs(struct net2272
*dev
, u8 stat
)
2005 /* after disconnect there's nothing else to do! */
2006 tmp
= (1 << VBUS_INTERRUPT
) | (1 << ROOT_PORT_RESET_INTERRUPT
);
2007 mask
= (1 << USB_HIGH_SPEED
) | (1 << USB_FULL_SPEED
);
2010 net2272_write(dev
, IRQSTAT1
, tmp
);
2011 if ((((stat
& (1 << ROOT_PORT_RESET_INTERRUPT
)) &&
2012 ((net2272_read(dev
, USBCTL1
) & mask
) == 0))
2013 || ((net2272_read(dev
, USBCTL1
) & (1 << VBUS_PIN
))
2015 && (dev
->gadget
.speed
!= USB_SPEED_UNKNOWN
)) {
2016 dev_dbg(dev
->dev
, "disconnect %s\n",
2017 dev
->driver
->driver
.name
);
2018 stop_activity(dev
, dev
->driver
);
2019 net2272_ep0_start(dev
);
2028 tmp
= (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT
);
2030 net2272_write(dev
, IRQSTAT1
, tmp
);
2031 if (stat
& (1 << SUSPEND_REQUEST_INTERRUPT
)) {
2032 if (dev
->driver
->suspend
)
2033 dev
->driver
->suspend(&dev
->gadget
);
2034 if (!enable_suspend
) {
2035 stat
&= ~(1 << SUSPEND_REQUEST_INTERRUPT
);
2036 dev_dbg(dev
->dev
, "Suspend disabled, ignoring\n");
2039 if (dev
->driver
->resume
)
2040 dev
->driver
->resume(&dev
->gadget
);
2045 /* clear any other status/irqs */
2047 net2272_write(dev
, IRQSTAT1
, stat
);
2049 /* some status we can just ignore */
2050 stat
&= ~((1 << CONTROL_STATUS_INTERRUPT
)
2051 | (1 << SUSPEND_REQUEST_INTERRUPT
)
2052 | (1 << RESUME_INTERRUPT
));
2056 dev_dbg(dev
->dev
, "unhandled irqstat1 %02x\n", stat
);
2059 static irqreturn_t
net2272_irq(int irq
, void *_dev
)
2061 struct net2272
*dev
= _dev
;
2062 #if defined(PLX_PCI_RDK) || defined(PLX_PCI_RDK2)
2065 #if defined(PLX_PCI_RDK)
2068 spin_lock(&dev
->lock
);
2069 #if defined(PLX_PCI_RDK)
2070 intcsr
= readl(dev
->rdk1
.plx9054_base_addr
+ INTCSR
);
2072 if ((intcsr
& LOCAL_INTERRUPT_TEST
) == LOCAL_INTERRUPT_TEST
) {
2073 writel(intcsr
& ~(1 << PCI_INTERRUPT_ENABLE
),
2074 dev
->rdk1
.plx9054_base_addr
+ INTCSR
);
2075 net2272_handle_stat1_irqs(dev
, net2272_read(dev
, IRQSTAT1
));
2076 net2272_handle_stat0_irqs(dev
, net2272_read(dev
, IRQSTAT0
));
2077 intcsr
= readl(dev
->rdk1
.plx9054_base_addr
+ INTCSR
);
2078 writel(intcsr
| (1 << PCI_INTERRUPT_ENABLE
),
2079 dev
->rdk1
.plx9054_base_addr
+ INTCSR
);
2081 if ((intcsr
& DMA_CHANNEL_0_TEST
) == DMA_CHANNEL_0_TEST
) {
2082 writeb((1 << CHANNEL_CLEAR_INTERRUPT
| (0 << CHANNEL_ENABLE
)),
2083 dev
->rdk1
.plx9054_base_addr
+ DMACSR0
);
2085 dmareq
= net2272_read(dev
, DMAREQ
);
2087 net2272_handle_dma(&dev
->ep
[2]);
2089 net2272_handle_dma(&dev
->ep
[1]);
2092 #if defined(PLX_PCI_RDK2)
2093 /* see if PCI int for us by checking irqstat */
2094 intcsr
= readl(dev
->rdk2
.fpga_base_addr
+ RDK2_IRQSTAT
);
2095 if (!intcsr
& (1 << NET2272_PCI_IRQ
))
2097 /* check dma interrupts */
2099 /* Platform/devcice interrupt handler */
2100 #if !defined(PLX_PCI_RDK)
2101 net2272_handle_stat1_irqs(dev
, net2272_read(dev
, IRQSTAT1
));
2102 net2272_handle_stat0_irqs(dev
, net2272_read(dev
, IRQSTAT0
));
2104 spin_unlock(&dev
->lock
);
2109 static int net2272_present(struct net2272
*dev
)
2112 * Quick test to see if CPU can communicate properly with the NET2272.
2113 * Verifies connection using writes and reads to write/read and
2114 * read-only registers.
2116 * This routine is strongly recommended especially during early bring-up
2117 * of new hardware, however for designs that do not apply Power On System
2118 * Tests (POST) it may discarded (or perhaps minimized).
2123 /* Verify NET2272 write/read SCRATCH register can write and read */
2124 refval
= net2272_read(dev
, SCRATCH
);
2125 for (ii
= 0; ii
< 0x100; ii
+= 7) {
2126 net2272_write(dev
, SCRATCH
, ii
);
2127 val
= net2272_read(dev
, SCRATCH
);
2130 "%s: write/read SCRATCH register test failed: "
2131 "wrote:0x%2.2x, read:0x%2.2x\n",
2136 /* To be nice, we write the original SCRATCH value back: */
2137 net2272_write(dev
, SCRATCH
, refval
);
2139 /* Verify NET2272 CHIPREV register is read-only: */
2140 refval
= net2272_read(dev
, CHIPREV_2272
);
2141 for (ii
= 0; ii
< 0x100; ii
+= 7) {
2142 net2272_write(dev
, CHIPREV_2272
, ii
);
2143 val
= net2272_read(dev
, CHIPREV_2272
);
2144 if (val
!= refval
) {
2146 "%s: write/read CHIPREV register test failed: "
2147 "wrote 0x%2.2x, read:0x%2.2x expected:0x%2.2x\n",
2148 __func__
, ii
, val
, refval
);
2154 * Verify NET2272's "NET2270 legacy revision" register
2155 * - NET2272 has two revision registers. The NET2270 legacy revision
2156 * register should read the same value, regardless of the NET2272
2157 * silicon revision. The legacy register applies to NET2270
2158 * firmware being applied to the NET2272.
2160 val
= net2272_read(dev
, CHIPREV_LEGACY
);
2161 if (val
!= NET2270_LEGACY_REV
) {
2163 * Unexpected legacy revision value
2164 * - Perhaps the chip is a NET2270?
2167 "%s: WARNING: UNEXPECTED NET2272 LEGACY REGISTER VALUE:\n"
2168 " - CHIPREV_LEGACY: expected 0x%2.2x, got:0x%2.2x. (Not NET2272?)\n",
2169 __func__
, NET2270_LEGACY_REV
, val
);
2174 * Verify NET2272 silicon revision
2175 * - This revision register is appropriate for the silicon version
2178 val
= net2272_read(dev
, CHIPREV_2272
);
2180 case CHIPREV_NET2272_R1
:
2182 * NET2272 Rev 1 has DMA related errata:
2183 * - Newer silicon (Rev 1A or better) required
2186 "%s: Rev 1 detected: newer silicon recommended for DMA support\n",
2189 case CHIPREV_NET2272_R1A
:
2192 /* NET2272 silicon version *may* not work with this firmware */
2194 "%s: unexpected silicon revision register value: "
2195 " CHIPREV_2272: 0x%2.2x\n",
2198 * Return Success, even though the chip rev is not an expected value
2199 * - Older, pre-built firmware can attempt to operate on newer silicon
2200 * - Often, new silicon is perfectly compatible
2204 /* Success: NET2272 checks out OK */
2209 net2272_gadget_release(struct device
*_dev
)
2211 struct net2272
*dev
= dev_get_drvdata(_dev
);
2215 /*---------------------------------------------------------------------------*/
2217 static void __devexit
2218 net2272_remove(struct net2272
*dev
)
2220 /* start with the driver above us */
2222 /* should have been done already by driver model core */
2223 dev_warn(dev
->dev
, "pci remove, driver '%s' is still registered\n",
2224 dev
->driver
->driver
.name
);
2225 usb_gadget_unregister_driver(dev
->driver
);
2228 free_irq(dev
->irq
, dev
);
2229 iounmap(dev
->base_addr
);
2231 device_unregister(&dev
->gadget
.dev
);
2232 device_remove_file(dev
->dev
, &dev_attr_registers
);
2234 dev_info(dev
->dev
, "unbind\n");
2235 the_controller
= NULL
;
2238 static struct net2272
* __devinit
2239 net2272_probe_init(struct device
*dev
, unsigned int irq
)
2241 struct net2272
*ret
;
2243 if (the_controller
) {
2244 dev_warn(dev
, "ignoring\n");
2245 return ERR_PTR(-EBUSY
);
2249 dev_dbg(dev
, "No IRQ!\n");
2250 return ERR_PTR(-ENODEV
);
2253 /* alloc, and start init */
2254 ret
= kzalloc(sizeof(*ret
), GFP_KERNEL
);
2256 return ERR_PTR(-ENOMEM
);
2258 spin_lock_init(&ret
->lock
);
2261 ret
->gadget
.ops
= &net2272_ops
;
2262 ret
->gadget
.is_dualspeed
= 1;
2264 /* the "gadget" abstracts/virtualizes the controller */
2265 dev_set_name(&ret
->gadget
.dev
, "gadget");
2266 ret
->gadget
.dev
.parent
= dev
;
2267 ret
->gadget
.dev
.dma_mask
= dev
->dma_mask
;
2268 ret
->gadget
.dev
.release
= net2272_gadget_release
;
2269 ret
->gadget
.name
= driver_name
;
2274 static int __devinit
2275 net2272_probe_fin(struct net2272
*dev
, unsigned int irqflags
)
2279 /* See if there... */
2280 if (net2272_present(dev
)) {
2281 dev_warn(dev
->dev
, "2272 not found!\n");
2286 net2272_usb_reset(dev
);
2287 net2272_usb_reinit(dev
);
2289 ret
= request_irq(dev
->irq
, net2272_irq
, irqflags
, driver_name
, dev
);
2291 dev_err(dev
->dev
, "request interrupt %i failed\n", dev
->irq
);
2295 dev
->chiprev
= net2272_read(dev
, CHIPREV_2272
);
2298 dev_info(dev
->dev
, "%s\n", driver_desc
);
2299 dev_info(dev
->dev
, "irq %i, mem %p, chip rev %04x, dma %s\n",
2300 dev
->irq
, dev
->base_addr
, dev
->chiprev
,
2302 dev_info(dev
->dev
, "version: %s\n", driver_vers
);
2304 the_controller
= dev
;
2306 ret
= device_register(&dev
->gadget
.dev
);
2309 ret
= device_create_file(dev
->dev
, &dev_attr_registers
);
2316 device_unregister(&dev
->gadget
.dev
);
2318 free_irq(dev
->irq
, dev
);
2326 * wrap this driver around the specified device, but
2327 * don't respond over USB until a gadget driver binds to us
2330 static int __devinit
2331 net2272_rdk1_probe(struct pci_dev
*pdev
, struct net2272
*dev
)
2333 unsigned long resource
, len
, tmp
;
2334 void __iomem
*mem_mapped_addr
[4];
2338 * BAR 0 holds PLX 9054 config registers
2339 * BAR 1 is i/o memory; unused here
2340 * BAR 2 holds EPLD config registers
2341 * BAR 3 holds NET2272 registers
2344 /* Find and map all address spaces */
2345 for (i
= 0; i
< 4; ++i
) {
2347 continue; /* BAR1 unused */
2349 resource
= pci_resource_start(pdev
, i
);
2350 len
= pci_resource_len(pdev
, i
);
2352 if (!request_mem_region(resource
, len
, driver_name
)) {
2353 dev_dbg(dev
->dev
, "controller already in use\n");
2358 mem_mapped_addr
[i
] = ioremap_nocache(resource
, len
);
2359 if (mem_mapped_addr
[i
] == NULL
) {
2360 release_mem_region(resource
, len
);
2361 dev_dbg(dev
->dev
, "can't map memory\n");
2367 dev
->rdk1
.plx9054_base_addr
= mem_mapped_addr
[0];
2368 dev
->rdk1
.epld_base_addr
= mem_mapped_addr
[2];
2369 dev
->base_addr
= mem_mapped_addr
[3];
2371 /* Set PLX 9054 bus width (16 bits) */
2372 tmp
= readl(dev
->rdk1
.plx9054_base_addr
+ LBRD1
);
2373 writel((tmp
& ~(3 << MEMORY_SPACE_LOCAL_BUS_WIDTH
)) | W16_BIT
,
2374 dev
->rdk1
.plx9054_base_addr
+ LBRD1
);
2376 /* Enable PLX 9054 Interrupts */
2377 writel(readl(dev
->rdk1
.plx9054_base_addr
+ INTCSR
) |
2378 (1 << PCI_INTERRUPT_ENABLE
) |
2379 (1 << LOCAL_INTERRUPT_INPUT_ENABLE
),
2380 dev
->rdk1
.plx9054_base_addr
+ INTCSR
);
2382 writeb((1 << CHANNEL_CLEAR_INTERRUPT
| (0 << CHANNEL_ENABLE
)),
2383 dev
->rdk1
.plx9054_base_addr
+ DMACSR0
);
2386 writeb((1 << EPLD_DMA_ENABLE
) |
2387 (1 << DMA_CTL_DACK
) |
2388 (1 << DMA_TIMEOUT_ENABLE
) |
2392 (1 << NET2272_RESET
),
2393 dev
->base_addr
+ EPLD_IO_CONTROL_REGISTER
);
2396 writeb(readb(dev
->base_addr
+ EPLD_IO_CONTROL_REGISTER
) &
2397 ~(1 << NET2272_RESET
),
2398 dev
->base_addr
+ EPLD_IO_CONTROL_REGISTER
);
2405 iounmap(mem_mapped_addr
[i
]);
2406 release_mem_region(pci_resource_start(pdev
, i
),
2407 pci_resource_len(pdev
, i
));
2413 static int __devinit
2414 net2272_rdk2_probe(struct pci_dev
*pdev
, struct net2272
*dev
)
2416 unsigned long resource
, len
;
2417 void __iomem
*mem_mapped_addr
[2];
2421 * BAR 0 holds FGPA config registers
2422 * BAR 1 holds NET2272 registers
2425 /* Find and map all address spaces, bar2-3 unused in rdk 2 */
2426 for (i
= 0; i
< 2; ++i
) {
2427 resource
= pci_resource_start(pdev
, i
);
2428 len
= pci_resource_len(pdev
, i
);
2430 if (!request_mem_region(resource
, len
, driver_name
)) {
2431 dev_dbg(dev
->dev
, "controller already in use\n");
2436 mem_mapped_addr
[i
] = ioremap_nocache(resource
, len
);
2437 if (mem_mapped_addr
[i
] == NULL
) {
2438 release_mem_region(resource
, len
);
2439 dev_dbg(dev
->dev
, "can't map memory\n");
2445 dev
->rdk2
.fpga_base_addr
= mem_mapped_addr
[0];
2446 dev
->base_addr
= mem_mapped_addr
[1];
2449 /* Set 2272 bus width (16 bits) and reset */
2450 writel((1 << CHIP_RESET
), dev
->rdk2
.fpga_base_addr
+ RDK2_LOCCTLRDK
);
2452 writel((1 << BUS_WIDTH
), dev
->rdk2
.fpga_base_addr
+ RDK2_LOCCTLRDK
);
2453 /* Print fpga version number */
2454 dev_info(dev
->dev
, "RDK2 FPGA version %08x\n",
2455 readl(dev
->rdk2
.fpga_base_addr
+ RDK2_FPGAREV
));
2456 /* Enable FPGA Interrupts */
2457 writel((1 << NET2272_PCI_IRQ
), dev
->rdk2
.fpga_base_addr
+ RDK2_IRQENB
);
2463 iounmap(mem_mapped_addr
[i
]);
2464 release_mem_region(pci_resource_start(pdev
, i
),
2465 pci_resource_len(pdev
, i
));
2471 static int __devinit
2472 net2272_pci_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
2474 struct net2272
*dev
;
2477 dev
= net2272_probe_init(&pdev
->dev
, pdev
->irq
);
2479 return PTR_ERR(dev
);
2480 dev
->dev_id
= pdev
->device
;
2482 if (pci_enable_device(pdev
) < 0) {
2487 pci_set_master(pdev
);
2489 switch (pdev
->device
) {
2490 case PCI_DEVICE_ID_RDK1
: ret
= net2272_rdk1_probe(pdev
, dev
); break;
2491 case PCI_DEVICE_ID_RDK2
: ret
= net2272_rdk2_probe(pdev
, dev
); break;
2497 ret
= net2272_probe_fin(dev
, 0);
2501 pci_set_drvdata(pdev
, dev
);
2506 pci_disable_device(pdev
);
2513 static void __devexit
2514 net2272_rdk1_remove(struct pci_dev
*pdev
, struct net2272
*dev
)
2518 /* disable PLX 9054 interrupts */
2519 writel(readl(dev
->rdk1
.plx9054_base_addr
+ INTCSR
) &
2520 ~(1 << PCI_INTERRUPT_ENABLE
),
2521 dev
->rdk1
.plx9054_base_addr
+ INTCSR
);
2523 /* clean up resources allocated during probe() */
2524 iounmap(dev
->rdk1
.plx9054_base_addr
);
2525 iounmap(dev
->rdk1
.epld_base_addr
);
2527 for (i
= 0; i
< 4; ++i
) {
2529 continue; /* BAR1 unused */
2530 release_mem_region(pci_resource_start(pdev
, i
),
2531 pci_resource_len(pdev
, i
));
2535 static void __devexit
2536 net2272_rdk2_remove(struct pci_dev
*pdev
, struct net2272
*dev
)
2540 /* disable fpga interrupts
2541 writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
2542 ~(1 << PCI_INTERRUPT_ENABLE),
2543 dev->rdk1.plx9054_base_addr + INTCSR);
2546 /* clean up resources allocated during probe() */
2547 iounmap(dev
->rdk2
.fpga_base_addr
);
2549 for (i
= 0; i
< 2; ++i
)
2550 release_mem_region(pci_resource_start(pdev
, i
),
2551 pci_resource_len(pdev
, i
));
2554 static void __devexit
2555 net2272_pci_remove(struct pci_dev
*pdev
)
2557 struct net2272
*dev
= pci_get_drvdata(pdev
);
2559 net2272_remove(dev
);
2561 switch (pdev
->device
) {
2562 case PCI_DEVICE_ID_RDK1
: net2272_rdk1_remove(pdev
, dev
); break;
2563 case PCI_DEVICE_ID_RDK2
: net2272_rdk2_remove(pdev
, dev
); break;
2567 pci_disable_device(pdev
);
2572 /* Table of matching PCI IDs */
2573 static struct pci_device_id __devinitdata pci_ids
[] = {
2575 .class = ((PCI_CLASS_BRIDGE_OTHER
<< 8) | 0xfe),
2577 .vendor
= PCI_VENDOR_ID_PLX
,
2578 .device
= PCI_DEVICE_ID_RDK1
,
2579 .subvendor
= PCI_ANY_ID
,
2580 .subdevice
= PCI_ANY_ID
,
2583 .class = ((PCI_CLASS_BRIDGE_OTHER
<< 8) | 0xfe),
2585 .vendor
= PCI_VENDOR_ID_PLX
,
2586 .device
= PCI_DEVICE_ID_RDK2
,
2587 .subvendor
= PCI_ANY_ID
,
2588 .subdevice
= PCI_ANY_ID
,
2592 MODULE_DEVICE_TABLE(pci
, pci_ids
);
2594 static struct pci_driver net2272_pci_driver
= {
2595 .name
= driver_name
,
2596 .id_table
= pci_ids
,
2598 .probe
= net2272_pci_probe
,
2599 .remove
= __devexit_p(net2272_pci_remove
),
2603 # define pci_register_driver(x) 1
2604 # define pci_unregister_driver(x) 1
2607 /*---------------------------------------------------------------------------*/
2609 static int __devinit
2610 net2272_plat_probe(struct platform_device
*pdev
)
2612 struct net2272
*dev
;
2614 unsigned int irqflags
;
2615 resource_size_t base
, len
;
2616 struct resource
*iomem
, *iomem_bus
, *irq_res
;
2618 irq_res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
2619 iomem
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2620 iomem_bus
= platform_get_resource(pdev
, IORESOURCE_BUS
, 0);
2621 if (!irq_res
|| !iomem
) {
2622 dev_err(&pdev
->dev
, "must provide irq/base addr");
2626 dev
= net2272_probe_init(&pdev
->dev
, irq_res
->start
);
2628 return PTR_ERR(dev
);
2631 if (irq_res
->flags
& IORESOURCE_IRQ_HIGHEDGE
)
2632 irqflags
|= IRQF_TRIGGER_RISING
;
2633 if (irq_res
->flags
& IORESOURCE_IRQ_LOWEDGE
)
2634 irqflags
|= IRQF_TRIGGER_FALLING
;
2635 if (irq_res
->flags
& IORESOURCE_IRQ_HIGHLEVEL
)
2636 irqflags
|= IRQF_TRIGGER_HIGH
;
2637 if (irq_res
->flags
& IORESOURCE_IRQ_LOWLEVEL
)
2638 irqflags
|= IRQF_TRIGGER_LOW
;
2640 base
= iomem
->start
;
2641 len
= resource_size(iomem
);
2643 dev
->base_shift
= iomem_bus
->start
;
2645 if (!request_mem_region(base
, len
, driver_name
)) {
2646 dev_dbg(dev
->dev
, "get request memory region!\n");
2650 dev
->base_addr
= ioremap_nocache(base
, len
);
2651 if (!dev
->base_addr
) {
2652 dev_dbg(dev
->dev
, "can't map memory\n");
2657 ret
= net2272_probe_fin(dev
, IRQF_TRIGGER_LOW
);
2661 platform_set_drvdata(pdev
, dev
);
2662 dev_info(&pdev
->dev
, "running in 16-bit, %sbyte swap local bus mode\n",
2663 (net2272_read(dev
, LOCCTL
) & (1 << BYTE_SWAP
)) ? "" : "no ");
2665 the_controller
= dev
;
2670 iounmap(dev
->base_addr
);
2672 release_mem_region(base
, len
);
2677 static int __devexit
2678 net2272_plat_remove(struct platform_device
*pdev
)
2680 struct net2272
*dev
= platform_get_drvdata(pdev
);
2682 net2272_remove(dev
);
2684 release_mem_region(pdev
->resource
[0].start
,
2685 resource_size(&pdev
->resource
[0]));
2692 static struct platform_driver net2272_plat_driver
= {
2693 .probe
= net2272_plat_probe
,
2694 .remove
= __devexit_p(net2272_plat_remove
),
2696 .name
= driver_name
,
2697 .owner
= THIS_MODULE
,
2699 /* FIXME .suspend, .resume */
2702 static int __init
net2272_init(void)
2704 return pci_register_driver(&net2272_pci_driver
) &
2705 platform_driver_register(&net2272_plat_driver
);
2707 module_init(net2272_init
);
2709 static void __exit
net2272_cleanup(void)
2711 pci_unregister_driver(&net2272_pci_driver
);
2712 platform_driver_unregister(&net2272_plat_driver
);
2714 module_exit(net2272_cleanup
);
2716 MODULE_DESCRIPTION(DRIVER_DESC
);
2717 MODULE_AUTHOR("PLX Technology, Inc.");
2718 MODULE_LICENSE("GPL");