]>
Commit | Line | Data |
---|---|---|
55d402d8 TD |
1 | /* |
2 | * amd5536.c -- AMD 5536 UDC high/full speed USB device controller | |
3 | * | |
4 | * Copyright (C) 2005-2007 AMD (http://www.amd.com) | |
5 | * Author: Thomas Dahlmann | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License as published by | |
9 | * the Free Software Foundation; either version 2 of the License, or | |
10 | * (at your option) any later version. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | * GNU General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License | |
18 | * along with this program; if not, write to the Free Software | |
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
20 | */ | |
21 | ||
22 | /* | |
23 | * The AMD5536 UDC is part of the x86 southbridge AMD Geode CS5536. | |
24 | * It is a USB Highspeed DMA capable USB device controller. Beside ep0 it | |
25 | * provides 4 IN and 4 OUT endpoints (bulk or interrupt type). | |
26 | * | |
27 | * Make sure that UDC is assigned to port 4 by BIOS settings (port can also | |
28 | * be used as host port) and UOC bits PAD_EN and APU are set (should be done | |
29 | * by BIOS init). | |
30 | * | |
31 | * UDC DMA requires 32-bit aligned buffers so DMA with gadget ether does not | |
32 | * work without updating NET_IP_ALIGN. Or PIO mode (module param "use_dma=0") | |
33 | * can be used with gadget ether. | |
34 | */ | |
35 | ||
36 | /* debug control */ | |
37 | /* #define UDC_VERBOSE */ | |
38 | ||
39 | /* Driver strings */ | |
40 | #define UDC_MOD_DESCRIPTION "AMD 5536 UDC - USB Device Controller" | |
41 | #define UDC_DRIVER_VERSION_STRING "01.00.0206 - $Revision: #3 $" | |
42 | ||
43 | /* system */ | |
44 | #include <linux/module.h> | |
45 | #include <linux/pci.h> | |
46 | #include <linux/kernel.h> | |
55d402d8 TD |
47 | #include <linux/delay.h> |
48 | #include <linux/ioport.h> | |
49 | #include <linux/sched.h> | |
50 | #include <linux/slab.h> | |
55d402d8 TD |
51 | #include <linux/errno.h> |
52 | #include <linux/init.h> | |
53 | #include <linux/timer.h> | |
54 | #include <linux/list.h> | |
55 | #include <linux/interrupt.h> | |
56 | #include <linux/ioctl.h> | |
57 | #include <linux/fs.h> | |
58 | #include <linux/dmapool.h> | |
59 | #include <linux/moduleparam.h> | |
60 | #include <linux/device.h> | |
61 | #include <linux/io.h> | |
62 | #include <linux/irq.h> | |
63 | ||
64 | #include <asm/byteorder.h> | |
65 | #include <asm/system.h> | |
66 | #include <asm/unaligned.h> | |
67 | ||
68 | /* gadget stack */ | |
69 | #include <linux/usb/ch9.h> | |
9454a57a | 70 | #include <linux/usb/gadget.h> |
55d402d8 TD |
71 | |
72 | /* udc specific */ | |
73 | #include "amd5536udc.h" | |
74 | ||
75 | ||
76 | static void udc_tasklet_disconnect(unsigned long); | |
77 | static void empty_req_queue(struct udc_ep *); | |
78 | static int udc_probe(struct udc *dev); | |
79 | static void udc_basic_init(struct udc *dev); | |
80 | static void udc_setup_endpoints(struct udc *dev); | |
81 | static void udc_soft_reset(struct udc *dev); | |
82 | static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep); | |
83 | static void udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq); | |
84 | static int udc_free_dma_chain(struct udc *dev, struct udc_request *req); | |
85 | static int udc_create_dma_chain(struct udc_ep *ep, struct udc_request *req, | |
86 | unsigned long buf_len, gfp_t gfp_flags); | |
87 | static int udc_remote_wakeup(struct udc *dev); | |
88 | static int udc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id); | |
89 | static void udc_pci_remove(struct pci_dev *pdev); | |
90 | ||
91 | /* description */ | |
92 | static const char mod_desc[] = UDC_MOD_DESCRIPTION; | |
93 | static const char name[] = "amd5536udc"; | |
94 | ||
95 | /* structure to hold endpoint function pointers */ | |
96 | static const struct usb_ep_ops udc_ep_ops; | |
97 | ||
98 | /* received setup data */ | |
99 | static union udc_setup_data setup_data; | |
100 | ||
101 | /* pointer to device object */ | |
102 | static struct udc *udc; | |
103 | ||
104 | /* irq spin lock for soft reset */ | |
105 | static DEFINE_SPINLOCK(udc_irq_spinlock); | |
106 | /* stall spin lock */ | |
107 | static DEFINE_SPINLOCK(udc_stall_spinlock); | |
108 | ||
109 | /* | |
110 | * slave mode: pending bytes in rx fifo after nyet, | |
111 | * used if EPIN irq came but no req was available | |
112 | */ | |
113 | static unsigned int udc_rxfifo_pending; | |
114 | ||
115 | /* count soft resets after suspend to avoid loop */ | |
116 | static int soft_reset_occured; | |
117 | static int soft_reset_after_usbreset_occured; | |
118 | ||
119 | /* timer */ | |
120 | static struct timer_list udc_timer; | |
121 | static int stop_timer; | |
122 | ||
123 | /* set_rde -- Is used to control enabling of RX DMA. Problem is | |
124 | * that UDC has only one bit (RDE) to enable/disable RX DMA for | |
125 | * all OUT endpoints. So we have to handle race conditions like | |
126 | * when OUT data reaches the fifo but no request was queued yet. | |
127 | * This cannot be solved by letting the RX DMA disabled until a | |
128 | * request gets queued because there may be other OUT packets | |
129 | * in the FIFO (important for not blocking control traffic). | |
130 | * The value of set_rde controls the correspondig timer. | |
131 | * | |
132 | * set_rde -1 == not used, means it is alloed to be set to 0 or 1 | |
133 | * set_rde 0 == do not touch RDE, do no start the RDE timer | |
134 | * set_rde 1 == timer function will look whether FIFO has data | |
135 | * set_rde 2 == set by timer function to enable RX DMA on next call | |
136 | */ | |
137 | static int set_rde = -1; | |
138 | ||
139 | static DECLARE_COMPLETION(on_exit); | |
140 | static struct timer_list udc_pollstall_timer; | |
141 | static int stop_pollstall_timer; | |
142 | static DECLARE_COMPLETION(on_pollstall_exit); | |
143 | ||
144 | /* tasklet for usb disconnect */ | |
145 | static DECLARE_TASKLET(disconnect_tasklet, udc_tasklet_disconnect, | |
146 | (unsigned long) &udc); | |
147 | ||
148 | ||
149 | /* endpoint names used for print */ | |
150 | static const char ep0_string[] = "ep0in"; | |
151 | static const char *ep_string[] = { | |
152 | ep0_string, | |
153 | "ep1in-int", "ep2in-bulk", "ep3in-bulk", "ep4in-bulk", "ep5in-bulk", | |
154 | "ep6in-bulk", "ep7in-bulk", "ep8in-bulk", "ep9in-bulk", "ep10in-bulk", | |
155 | "ep11in-bulk", "ep12in-bulk", "ep13in-bulk", "ep14in-bulk", | |
156 | "ep15in-bulk", "ep0out", "ep1out-bulk", "ep2out-bulk", "ep3out-bulk", | |
157 | "ep4out-bulk", "ep5out-bulk", "ep6out-bulk", "ep7out-bulk", | |
158 | "ep8out-bulk", "ep9out-bulk", "ep10out-bulk", "ep11out-bulk", | |
159 | "ep12out-bulk", "ep13out-bulk", "ep14out-bulk", "ep15out-bulk" | |
160 | }; | |
161 | ||
162 | /* DMA usage flag */ | |
163 | static int use_dma = 1; | |
164 | /* packet per buffer dma */ | |
165 | static int use_dma_ppb = 1; | |
166 | /* with per descr. update */ | |
167 | static int use_dma_ppb_du; | |
168 | /* buffer fill mode */ | |
169 | static int use_dma_bufferfill_mode; | |
170 | /* full speed only mode */ | |
171 | static int use_fullspeed; | |
172 | /* tx buffer size for high speed */ | |
173 | static unsigned long hs_tx_buf = UDC_EPIN_BUFF_SIZE; | |
174 | ||
175 | /* module parameters */ | |
176 | module_param(use_dma, bool, S_IRUGO); | |
177 | MODULE_PARM_DESC(use_dma, "true for DMA"); | |
178 | module_param(use_dma_ppb, bool, S_IRUGO); | |
179 | MODULE_PARM_DESC(use_dma_ppb, "true for DMA in packet per buffer mode"); | |
180 | module_param(use_dma_ppb_du, bool, S_IRUGO); | |
181 | MODULE_PARM_DESC(use_dma_ppb_du, | |
182 | "true for DMA in packet per buffer mode with descriptor update"); | |
183 | module_param(use_fullspeed, bool, S_IRUGO); | |
184 | MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only"); | |
185 | ||
186 | /*---------------------------------------------------------------------------*/ | |
187 | /* Prints UDC device registers and endpoint irq registers */ | |
188 | static void print_regs(struct udc *dev) | |
189 | { | |
190 | DBG(dev, "------- Device registers -------\n"); | |
191 | DBG(dev, "dev config = %08x\n", readl(&dev->regs->cfg)); | |
192 | DBG(dev, "dev control = %08x\n", readl(&dev->regs->ctl)); | |
193 | DBG(dev, "dev status = %08x\n", readl(&dev->regs->sts)); | |
194 | DBG(dev, "\n"); | |
195 | DBG(dev, "dev int's = %08x\n", readl(&dev->regs->irqsts)); | |
196 | DBG(dev, "dev intmask = %08x\n", readl(&dev->regs->irqmsk)); | |
197 | DBG(dev, "\n"); | |
198 | DBG(dev, "dev ep int's = %08x\n", readl(&dev->regs->ep_irqsts)); | |
199 | DBG(dev, "dev ep intmask = %08x\n", readl(&dev->regs->ep_irqmsk)); | |
200 | DBG(dev, "\n"); | |
201 | DBG(dev, "USE DMA = %d\n", use_dma); | |
202 | if (use_dma && use_dma_ppb && !use_dma_ppb_du) { | |
203 | DBG(dev, "DMA mode = PPBNDU (packet per buffer " | |
204 | "WITHOUT desc. update)\n"); | |
205 | dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBNDU"); | |
0cf7a633 | 206 | } else if (use_dma && use_dma_ppb && use_dma_ppb_du) { |
55d402d8 TD |
207 | DBG(dev, "DMA mode = PPBDU (packet per buffer " |
208 | "WITH desc. update)\n"); | |
209 | dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBDU"); | |
210 | } | |
211 | if (use_dma && use_dma_bufferfill_mode) { | |
212 | DBG(dev, "DMA mode = BF (buffer fill mode)\n"); | |
213 | dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "BF"); | |
214 | } | |
215 | if (!use_dma) { | |
216 | dev_info(&dev->pdev->dev, "FIFO mode\n"); | |
217 | } | |
218 | DBG(dev, "-------------------------------------------------------\n"); | |
219 | } | |
220 | ||
221 | /* Masks unused interrupts */ | |
222 | static int udc_mask_unused_interrupts(struct udc *dev) | |
223 | { | |
224 | u32 tmp; | |
225 | ||
226 | /* mask all dev interrupts */ | |
227 | tmp = AMD_BIT(UDC_DEVINT_SVC) | | |
228 | AMD_BIT(UDC_DEVINT_ENUM) | | |
229 | AMD_BIT(UDC_DEVINT_US) | | |
230 | AMD_BIT(UDC_DEVINT_UR) | | |
231 | AMD_BIT(UDC_DEVINT_ES) | | |
232 | AMD_BIT(UDC_DEVINT_SI) | | |
233 | AMD_BIT(UDC_DEVINT_SOF)| | |
234 | AMD_BIT(UDC_DEVINT_SC); | |
235 | writel(tmp, &dev->regs->irqmsk); | |
236 | ||
237 | /* mask all ep interrupts */ | |
238 | writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqmsk); | |
239 | ||
240 | return 0; | |
241 | } | |
242 | ||
243 | /* Enables endpoint 0 interrupts */ | |
244 | static int udc_enable_ep0_interrupts(struct udc *dev) | |
245 | { | |
246 | u32 tmp; | |
247 | ||
248 | DBG(dev, "udc_enable_ep0_interrupts()\n"); | |
249 | ||
250 | /* read irq mask */ | |
251 | tmp = readl(&dev->regs->ep_irqmsk); | |
252 | /* enable ep0 irq's */ | |
253 | tmp &= AMD_UNMASK_BIT(UDC_EPINT_IN_EP0) | |
254 | & AMD_UNMASK_BIT(UDC_EPINT_OUT_EP0); | |
255 | writel(tmp, &dev->regs->ep_irqmsk); | |
256 | ||
257 | return 0; | |
258 | } | |
259 | ||
260 | /* Enables device interrupts for SET_INTF and SET_CONFIG */ | |
261 | static int udc_enable_dev_setup_interrupts(struct udc *dev) | |
262 | { | |
263 | u32 tmp; | |
264 | ||
265 | DBG(dev, "enable device interrupts for setup data\n"); | |
266 | ||
267 | /* read irq mask */ | |
268 | tmp = readl(&dev->regs->irqmsk); | |
269 | ||
270 | /* enable SET_INTERFACE, SET_CONFIG and other needed irq's */ | |
271 | tmp &= AMD_UNMASK_BIT(UDC_DEVINT_SI) | |
272 | & AMD_UNMASK_BIT(UDC_DEVINT_SC) | |
273 | & AMD_UNMASK_BIT(UDC_DEVINT_UR) | |
274 | & AMD_UNMASK_BIT(UDC_DEVINT_SVC) | |
275 | & AMD_UNMASK_BIT(UDC_DEVINT_ENUM); | |
276 | writel(tmp, &dev->regs->irqmsk); | |
277 | ||
278 | return 0; | |
279 | } | |
280 | ||
25985edc | 281 | /* Calculates fifo start of endpoint based on preceding endpoints */ |
55d402d8 TD |
282 | static int udc_set_txfifo_addr(struct udc_ep *ep) |
283 | { | |
284 | struct udc *dev; | |
285 | u32 tmp; | |
286 | int i; | |
287 | ||
288 | if (!ep || !(ep->in)) | |
289 | return -EINVAL; | |
290 | ||
291 | dev = ep->dev; | |
292 | ep->txfifo = dev->txfifo; | |
293 | ||
294 | /* traverse ep's */ | |
295 | for (i = 0; i < ep->num; i++) { | |
296 | if (dev->ep[i].regs) { | |
297 | /* read fifo size */ | |
298 | tmp = readl(&dev->ep[i].regs->bufin_framenum); | |
299 | tmp = AMD_GETBITS(tmp, UDC_EPIN_BUFF_SIZE); | |
300 | ep->txfifo += tmp; | |
301 | } | |
302 | } | |
303 | return 0; | |
304 | } | |
305 | ||
306 | /* CNAK pending field: bit0 = ep0in, bit16 = ep0out */ | |
307 | static u32 cnak_pending; | |
308 | ||
309 | static void UDC_QUEUE_CNAK(struct udc_ep *ep, unsigned num) | |
310 | { | |
311 | if (readl(&ep->regs->ctl) & AMD_BIT(UDC_EPCTL_NAK)) { | |
312 | DBG(ep->dev, "NAK could not be cleared for ep%d\n", num); | |
313 | cnak_pending |= 1 << (num); | |
314 | ep->naking = 1; | |
315 | } else | |
316 | cnak_pending = cnak_pending & (~(1 << (num))); | |
317 | } | |
318 | ||
319 | ||
320 | /* Enables endpoint, is called by gadget driver */ | |
321 | static int | |
322 | udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc) | |
323 | { | |
324 | struct udc_ep *ep; | |
325 | struct udc *dev; | |
326 | u32 tmp; | |
327 | unsigned long iflags; | |
328 | u8 udc_csr_epix; | |
fd05e720 | 329 | unsigned maxpacket; |
55d402d8 TD |
330 | |
331 | if (!usbep | |
332 | || usbep->name == ep0_string | |
333 | || !desc | |
334 | || desc->bDescriptorType != USB_DT_ENDPOINT) | |
335 | return -EINVAL; | |
336 | ||
337 | ep = container_of(usbep, struct udc_ep, ep); | |
338 | dev = ep->dev; | |
339 | ||
340 | DBG(dev, "udc_ep_enable() ep %d\n", ep->num); | |
341 | ||
342 | if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) | |
343 | return -ESHUTDOWN; | |
344 | ||
345 | spin_lock_irqsave(&dev->lock, iflags); | |
346 | ep->desc = desc; | |
347 | ||
348 | ep->halted = 0; | |
349 | ||
350 | /* set traffic type */ | |
351 | tmp = readl(&dev->ep[ep->num].regs->ctl); | |
352 | tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_EPCTL_ET); | |
353 | writel(tmp, &dev->ep[ep->num].regs->ctl); | |
354 | ||
355 | /* set max packet size */ | |
fd05e720 | 356 | maxpacket = le16_to_cpu(desc->wMaxPacketSize); |
55d402d8 | 357 | tmp = readl(&dev->ep[ep->num].regs->bufout_maxpkt); |
fd05e720 AV |
358 | tmp = AMD_ADDBITS(tmp, maxpacket, UDC_EP_MAX_PKT_SIZE); |
359 | ep->ep.maxpacket = maxpacket; | |
55d402d8 TD |
360 | writel(tmp, &dev->ep[ep->num].regs->bufout_maxpkt); |
361 | ||
362 | /* IN ep */ | |
363 | if (ep->in) { | |
364 | ||
365 | /* ep ix in UDC CSR register space */ | |
366 | udc_csr_epix = ep->num; | |
367 | ||
368 | /* set buffer size (tx fifo entries) */ | |
369 | tmp = readl(&dev->ep[ep->num].regs->bufin_framenum); | |
370 | /* double buffering: fifo size = 2 x max packet size */ | |
371 | tmp = AMD_ADDBITS( | |
372 | tmp, | |
fd05e720 AV |
373 | maxpacket * UDC_EPIN_BUFF_SIZE_MULT |
374 | / UDC_DWORD_BYTES, | |
55d402d8 TD |
375 | UDC_EPIN_BUFF_SIZE); |
376 | writel(tmp, &dev->ep[ep->num].regs->bufin_framenum); | |
377 | ||
378 | /* calc. tx fifo base addr */ | |
379 | udc_set_txfifo_addr(ep); | |
380 | ||
381 | /* flush fifo */ | |
382 | tmp = readl(&ep->regs->ctl); | |
383 | tmp |= AMD_BIT(UDC_EPCTL_F); | |
384 | writel(tmp, &ep->regs->ctl); | |
385 | ||
386 | /* OUT ep */ | |
387 | } else { | |
388 | /* ep ix in UDC CSR register space */ | |
389 | udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS; | |
390 | ||
391 | /* set max packet size UDC CSR */ | |
392 | tmp = readl(&dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]); | |
fd05e720 | 393 | tmp = AMD_ADDBITS(tmp, maxpacket, |
55d402d8 TD |
394 | UDC_CSR_NE_MAX_PKT); |
395 | writel(tmp, &dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]); | |
396 | ||
397 | if (use_dma && !ep->in) { | |
398 | /* alloc and init BNA dummy request */ | |
399 | ep->bna_dummy_req = udc_alloc_bna_dummy(ep); | |
400 | ep->bna_occurred = 0; | |
401 | } | |
402 | ||
403 | if (ep->num != UDC_EP0OUT_IX) | |
404 | dev->data_ep_enabled = 1; | |
405 | } | |
406 | ||
407 | /* set ep values */ | |
408 | tmp = readl(&dev->csr->ne[udc_csr_epix]); | |
409 | /* max packet */ | |
fd05e720 | 410 | tmp = AMD_ADDBITS(tmp, maxpacket, UDC_CSR_NE_MAX_PKT); |
55d402d8 TD |
411 | /* ep number */ |
412 | tmp = AMD_ADDBITS(tmp, desc->bEndpointAddress, UDC_CSR_NE_NUM); | |
413 | /* ep direction */ | |
414 | tmp = AMD_ADDBITS(tmp, ep->in, UDC_CSR_NE_DIR); | |
415 | /* ep type */ | |
416 | tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_CSR_NE_TYPE); | |
417 | /* ep config */ | |
418 | tmp = AMD_ADDBITS(tmp, ep->dev->cur_config, UDC_CSR_NE_CFG); | |
419 | /* ep interface */ | |
420 | tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf, UDC_CSR_NE_INTF); | |
421 | /* ep alt */ | |
422 | tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt, UDC_CSR_NE_ALT); | |
423 | /* write reg */ | |
424 | writel(tmp, &dev->csr->ne[udc_csr_epix]); | |
425 | ||
426 | /* enable ep irq */ | |
427 | tmp = readl(&dev->regs->ep_irqmsk); | |
428 | tmp &= AMD_UNMASK_BIT(ep->num); | |
429 | writel(tmp, &dev->regs->ep_irqmsk); | |
430 | ||
431 | /* | |
432 | * clear NAK by writing CNAK | |
433 | * avoid BNA for OUT DMA, don't clear NAK until DMA desc. written | |
434 | */ | |
435 | if (!use_dma || ep->in) { | |
436 | tmp = readl(&ep->regs->ctl); | |
437 | tmp |= AMD_BIT(UDC_EPCTL_CNAK); | |
438 | writel(tmp, &ep->regs->ctl); | |
439 | ep->naking = 0; | |
440 | UDC_QUEUE_CNAK(ep, ep->num); | |
441 | } | |
442 | tmp = desc->bEndpointAddress; | |
443 | DBG(dev, "%s enabled\n", usbep->name); | |
444 | ||
445 | spin_unlock_irqrestore(&dev->lock, iflags); | |
446 | return 0; | |
447 | } | |
448 | ||
449 | /* Resets endpoint */ | |
450 | static void ep_init(struct udc_regs __iomem *regs, struct udc_ep *ep) | |
451 | { | |
452 | u32 tmp; | |
453 | ||
454 | VDBG(ep->dev, "ep-%d reset\n", ep->num); | |
455 | ep->desc = NULL; | |
456 | ep->ep.ops = &udc_ep_ops; | |
457 | INIT_LIST_HEAD(&ep->queue); | |
458 | ||
459 | ep->ep.maxpacket = (u16) ~0; | |
460 | /* set NAK */ | |
461 | tmp = readl(&ep->regs->ctl); | |
462 | tmp |= AMD_BIT(UDC_EPCTL_SNAK); | |
463 | writel(tmp, &ep->regs->ctl); | |
464 | ep->naking = 1; | |
465 | ||
466 | /* disable interrupt */ | |
467 | tmp = readl(®s->ep_irqmsk); | |
468 | tmp |= AMD_BIT(ep->num); | |
469 | writel(tmp, ®s->ep_irqmsk); | |
470 | ||
471 | if (ep->in) { | |
472 | /* unset P and IN bit of potential former DMA */ | |
473 | tmp = readl(&ep->regs->ctl); | |
474 | tmp &= AMD_UNMASK_BIT(UDC_EPCTL_P); | |
475 | writel(tmp, &ep->regs->ctl); | |
476 | ||
477 | tmp = readl(&ep->regs->sts); | |
478 | tmp |= AMD_BIT(UDC_EPSTS_IN); | |
479 | writel(tmp, &ep->regs->sts); | |
480 | ||
481 | /* flush the fifo */ | |
482 | tmp = readl(&ep->regs->ctl); | |
483 | tmp |= AMD_BIT(UDC_EPCTL_F); | |
484 | writel(tmp, &ep->regs->ctl); | |
485 | ||
486 | } | |
487 | /* reset desc pointer */ | |
488 | writel(0, &ep->regs->desptr); | |
489 | } | |
490 | ||
491 | /* Disables endpoint, is called by gadget driver */ | |
492 | static int udc_ep_disable(struct usb_ep *usbep) | |
493 | { | |
494 | struct udc_ep *ep = NULL; | |
495 | unsigned long iflags; | |
496 | ||
497 | if (!usbep) | |
498 | return -EINVAL; | |
499 | ||
500 | ep = container_of(usbep, struct udc_ep, ep); | |
501 | if (usbep->name == ep0_string || !ep->desc) | |
502 | return -EINVAL; | |
503 | ||
504 | DBG(ep->dev, "Disable ep-%d\n", ep->num); | |
505 | ||
506 | spin_lock_irqsave(&ep->dev->lock, iflags); | |
507 | udc_free_request(&ep->ep, &ep->bna_dummy_req->req); | |
508 | empty_req_queue(ep); | |
509 | ep_init(ep->dev->regs, ep); | |
510 | spin_unlock_irqrestore(&ep->dev->lock, iflags); | |
511 | ||
512 | return 0; | |
513 | } | |
514 | ||
515 | /* Allocates request packet, called by gadget driver */ | |
516 | static struct usb_request * | |
517 | udc_alloc_request(struct usb_ep *usbep, gfp_t gfp) | |
518 | { | |
519 | struct udc_request *req; | |
520 | struct udc_data_dma *dma_desc; | |
521 | struct udc_ep *ep; | |
522 | ||
523 | if (!usbep) | |
524 | return NULL; | |
525 | ||
526 | ep = container_of(usbep, struct udc_ep, ep); | |
527 | ||
528 | VDBG(ep->dev, "udc_alloc_req(): ep%d\n", ep->num); | |
529 | req = kzalloc(sizeof(struct udc_request), gfp); | |
530 | if (!req) | |
531 | return NULL; | |
532 | ||
533 | req->req.dma = DMA_DONT_USE; | |
534 | INIT_LIST_HEAD(&req->queue); | |
535 | ||
536 | if (ep->dma) { | |
537 | /* ep0 in requests are allocated from data pool here */ | |
538 | dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp, | |
539 | &req->td_phys); | |
540 | if (!dma_desc) { | |
541 | kfree(req); | |
542 | return NULL; | |
543 | } | |
544 | ||
545 | VDBG(ep->dev, "udc_alloc_req: req = %p dma_desc = %p, " | |
546 | "td_phys = %lx\n", | |
547 | req, dma_desc, | |
548 | (unsigned long)req->td_phys); | |
549 | /* prevent from using desc. - set HOST BUSY */ | |
550 | dma_desc->status = AMD_ADDBITS(dma_desc->status, | |
551 | UDC_DMA_STP_STS_BS_HOST_BUSY, | |
552 | UDC_DMA_STP_STS_BS); | |
551509d2 | 553 | dma_desc->bufptr = cpu_to_le32(DMA_DONT_USE); |
55d402d8 TD |
554 | req->td_data = dma_desc; |
555 | req->td_data_last = NULL; | |
556 | req->chain_len = 1; | |
557 | } | |
558 | ||
559 | return &req->req; | |
560 | } | |
561 | ||
562 | /* Frees request packet, called by gadget driver */ | |
563 | static void | |
564 | udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq) | |
565 | { | |
566 | struct udc_ep *ep; | |
567 | struct udc_request *req; | |
568 | ||
569 | if (!usbep || !usbreq) | |
570 | return; | |
571 | ||
572 | ep = container_of(usbep, struct udc_ep, ep); | |
573 | req = container_of(usbreq, struct udc_request, req); | |
574 | VDBG(ep->dev, "free_req req=%p\n", req); | |
575 | BUG_ON(!list_empty(&req->queue)); | |
576 | if (req->td_data) { | |
577 | VDBG(ep->dev, "req->td_data=%p\n", req->td_data); | |
578 | ||
579 | /* free dma chain if created */ | |
580 | if (req->chain_len > 1) { | |
581 | udc_free_dma_chain(ep->dev, req); | |
582 | } | |
583 | ||
584 | pci_pool_free(ep->dev->data_requests, req->td_data, | |
585 | req->td_phys); | |
586 | } | |
587 | kfree(req); | |
588 | } | |
589 | ||
590 | /* Init BNA dummy descriptor for HOST BUSY and pointing to itself */ | |
591 | static void udc_init_bna_dummy(struct udc_request *req) | |
592 | { | |
593 | if (req) { | |
594 | /* set last bit */ | |
595 | req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L); | |
596 | /* set next pointer to itself */ | |
597 | req->td_data->next = req->td_phys; | |
598 | /* set HOST BUSY */ | |
599 | req->td_data->status | |
600 | = AMD_ADDBITS(req->td_data->status, | |
601 | UDC_DMA_STP_STS_BS_DMA_DONE, | |
602 | UDC_DMA_STP_STS_BS); | |
603 | #ifdef UDC_VERBOSE | |
604 | pr_debug("bna desc = %p, sts = %08x\n", | |
605 | req->td_data, req->td_data->status); | |
606 | #endif | |
607 | } | |
608 | } | |
609 | ||
610 | /* Allocate BNA dummy descriptor */ | |
611 | static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep) | |
612 | { | |
613 | struct udc_request *req = NULL; | |
614 | struct usb_request *_req = NULL; | |
615 | ||
616 | /* alloc the dummy request */ | |
617 | _req = udc_alloc_request(&ep->ep, GFP_ATOMIC); | |
618 | if (_req) { | |
619 | req = container_of(_req, struct udc_request, req); | |
620 | ep->bna_dummy_req = req; | |
621 | udc_init_bna_dummy(req); | |
622 | } | |
623 | return req; | |
624 | } | |
625 | ||
626 | /* Write data to TX fifo for IN packets */ | |
627 | static void | |
628 | udc_txfifo_write(struct udc_ep *ep, struct usb_request *req) | |
629 | { | |
630 | u8 *req_buf; | |
631 | u32 *buf; | |
632 | int i, j; | |
633 | unsigned bytes = 0; | |
634 | unsigned remaining = 0; | |
635 | ||
636 | if (!req || !ep) | |
637 | return; | |
638 | ||
639 | req_buf = req->buf + req->actual; | |
640 | prefetch(req_buf); | |
641 | remaining = req->length - req->actual; | |
642 | ||
643 | buf = (u32 *) req_buf; | |
644 | ||
645 | bytes = ep->ep.maxpacket; | |
646 | if (bytes > remaining) | |
647 | bytes = remaining; | |
648 | ||
649 | /* dwords first */ | |
650 | for (i = 0; i < bytes / UDC_DWORD_BYTES; i++) { | |
651 | writel(*(buf + i), ep->txfifo); | |
652 | } | |
653 | ||
654 | /* remaining bytes must be written by byte access */ | |
655 | for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) { | |
656 | writeb((u8)(*(buf + i) >> (j << UDC_BITS_PER_BYTE_SHIFT)), | |
657 | ep->txfifo); | |
658 | } | |
659 | ||
660 | /* dummy write confirm */ | |
661 | writel(0, &ep->regs->confirm); | |
662 | } | |
663 | ||
664 | /* Read dwords from RX fifo for OUT transfers */ | |
665 | static int udc_rxfifo_read_dwords(struct udc *dev, u32 *buf, int dwords) | |
666 | { | |
667 | int i; | |
668 | ||
669 | VDBG(dev, "udc_read_dwords(): %d dwords\n", dwords); | |
670 | ||
671 | for (i = 0; i < dwords; i++) { | |
672 | *(buf + i) = readl(dev->rxfifo); | |
673 | } | |
674 | return 0; | |
675 | } | |
676 | ||
677 | /* Read bytes from RX fifo for OUT transfers */ | |
678 | static int udc_rxfifo_read_bytes(struct udc *dev, u8 *buf, int bytes) | |
679 | { | |
680 | int i, j; | |
681 | u32 tmp; | |
682 | ||
683 | VDBG(dev, "udc_read_bytes(): %d bytes\n", bytes); | |
684 | ||
685 | /* dwords first */ | |
686 | for (i = 0; i < bytes / UDC_DWORD_BYTES; i++) { | |
687 | *((u32 *)(buf + (i<<2))) = readl(dev->rxfifo); | |
688 | } | |
689 | ||
690 | /* remaining bytes must be read by byte access */ | |
691 | if (bytes % UDC_DWORD_BYTES) { | |
692 | tmp = readl(dev->rxfifo); | |
693 | for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) { | |
694 | *(buf + (i<<2) + j) = (u8)(tmp & UDC_BYTE_MASK); | |
695 | tmp = tmp >> UDC_BITS_PER_BYTE; | |
696 | } | |
697 | } | |
698 | ||
699 | return 0; | |
700 | } | |
701 | ||
702 | /* Read data from RX fifo for OUT transfers */ | |
703 | static int | |
704 | udc_rxfifo_read(struct udc_ep *ep, struct udc_request *req) | |
705 | { | |
706 | u8 *buf; | |
707 | unsigned buf_space; | |
708 | unsigned bytes = 0; | |
709 | unsigned finished = 0; | |
710 | ||
711 | /* received number bytes */ | |
712 | bytes = readl(&ep->regs->sts); | |
713 | bytes = AMD_GETBITS(bytes, UDC_EPSTS_RX_PKT_SIZE); | |
714 | ||
715 | buf_space = req->req.length - req->req.actual; | |
716 | buf = req->req.buf + req->req.actual; | |
717 | if (bytes > buf_space) { | |
718 | if ((buf_space % ep->ep.maxpacket) != 0) { | |
719 | DBG(ep->dev, | |
720 | "%s: rx %d bytes, rx-buf space = %d bytesn\n", | |
721 | ep->ep.name, bytes, buf_space); | |
722 | req->req.status = -EOVERFLOW; | |
723 | } | |
724 | bytes = buf_space; | |
725 | } | |
726 | req->req.actual += bytes; | |
727 | ||
728 | /* last packet ? */ | |
729 | if (((bytes % ep->ep.maxpacket) != 0) || (!bytes) | |
730 | || ((req->req.actual == req->req.length) && !req->req.zero)) | |
731 | finished = 1; | |
732 | ||
733 | /* read rx fifo bytes */ | |
734 | VDBG(ep->dev, "ep %s: rxfifo read %d bytes\n", ep->ep.name, bytes); | |
735 | udc_rxfifo_read_bytes(ep->dev, buf, bytes); | |
736 | ||
737 | return finished; | |
738 | } | |
739 | ||
740 | /* create/re-init a DMA descriptor or a DMA descriptor chain */ | |
741 | static int prep_dma(struct udc_ep *ep, struct udc_request *req, gfp_t gfp) | |
742 | { | |
743 | int retval = 0; | |
744 | u32 tmp; | |
745 | ||
746 | VDBG(ep->dev, "prep_dma\n"); | |
747 | VDBG(ep->dev, "prep_dma ep%d req->td_data=%p\n", | |
748 | ep->num, req->td_data); | |
749 | ||
750 | /* set buffer pointer */ | |
751 | req->td_data->bufptr = req->req.dma; | |
752 | ||
753 | /* set last bit */ | |
754 | req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L); | |
755 | ||
756 | /* build/re-init dma chain if maxpkt scatter mode, not for EP0 */ | |
757 | if (use_dma_ppb) { | |
758 | ||
759 | retval = udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp); | |
760 | if (retval != 0) { | |
761 | if (retval == -ENOMEM) | |
762 | DBG(ep->dev, "Out of DMA memory\n"); | |
763 | return retval; | |
764 | } | |
765 | if (ep->in) { | |
766 | if (req->req.length == ep->ep.maxpacket) { | |
767 | /* write tx bytes */ | |
768 | req->td_data->status = | |
769 | AMD_ADDBITS(req->td_data->status, | |
770 | ep->ep.maxpacket, | |
771 | UDC_DMA_IN_STS_TXBYTES); | |
772 | ||
773 | } | |
774 | } | |
775 | ||
776 | } | |
777 | ||
778 | if (ep->in) { | |
779 | VDBG(ep->dev, "IN: use_dma_ppb=%d req->req.len=%d " | |
780 | "maxpacket=%d ep%d\n", | |
781 | use_dma_ppb, req->req.length, | |
782 | ep->ep.maxpacket, ep->num); | |
783 | /* | |
784 | * if bytes < max packet then tx bytes must | |
785 | * be written in packet per buffer mode | |
786 | */ | |
787 | if (!use_dma_ppb || req->req.length < ep->ep.maxpacket | |
788 | || ep->num == UDC_EP0OUT_IX | |
789 | || ep->num == UDC_EP0IN_IX) { | |
790 | /* write tx bytes */ | |
791 | req->td_data->status = | |
792 | AMD_ADDBITS(req->td_data->status, | |
793 | req->req.length, | |
794 | UDC_DMA_IN_STS_TXBYTES); | |
795 | /* reset frame num */ | |
796 | req->td_data->status = | |
797 | AMD_ADDBITS(req->td_data->status, | |
798 | 0, | |
799 | UDC_DMA_IN_STS_FRAMENUM); | |
800 | } | |
801 | /* set HOST BUSY */ | |
802 | req->td_data->status = | |
803 | AMD_ADDBITS(req->td_data->status, | |
804 | UDC_DMA_STP_STS_BS_HOST_BUSY, | |
805 | UDC_DMA_STP_STS_BS); | |
806 | } else { | |
807 | VDBG(ep->dev, "OUT set host ready\n"); | |
808 | /* set HOST READY */ | |
809 | req->td_data->status = | |
810 | AMD_ADDBITS(req->td_data->status, | |
811 | UDC_DMA_STP_STS_BS_HOST_READY, | |
812 | UDC_DMA_STP_STS_BS); | |
813 | ||
814 | ||
815 | /* clear NAK by writing CNAK */ | |
816 | if (ep->naking) { | |
817 | tmp = readl(&ep->regs->ctl); | |
818 | tmp |= AMD_BIT(UDC_EPCTL_CNAK); | |
819 | writel(tmp, &ep->regs->ctl); | |
820 | ep->naking = 0; | |
821 | UDC_QUEUE_CNAK(ep, ep->num); | |
822 | } | |
823 | ||
824 | } | |
825 | ||
826 | return retval; | |
827 | } | |
828 | ||
829 | /* Completes request packet ... caller MUST hold lock */ | |
830 | static void | |
831 | complete_req(struct udc_ep *ep, struct udc_request *req, int sts) | |
832 | __releases(ep->dev->lock) | |
833 | __acquires(ep->dev->lock) | |
834 | { | |
835 | struct udc *dev; | |
836 | unsigned halted; | |
837 | ||
838 | VDBG(ep->dev, "complete_req(): ep%d\n", ep->num); | |
839 | ||
840 | dev = ep->dev; | |
841 | /* unmap DMA */ | |
842 | if (req->dma_mapping) { | |
843 | if (ep->in) | |
844 | pci_unmap_single(dev->pdev, | |
845 | req->req.dma, | |
846 | req->req.length, | |
847 | PCI_DMA_TODEVICE); | |
848 | else | |
849 | pci_unmap_single(dev->pdev, | |
850 | req->req.dma, | |
851 | req->req.length, | |
852 | PCI_DMA_FROMDEVICE); | |
853 | req->dma_mapping = 0; | |
854 | req->req.dma = DMA_DONT_USE; | |
855 | } | |
856 | ||
857 | halted = ep->halted; | |
858 | ep->halted = 1; | |
859 | ||
860 | /* set new status if pending */ | |
861 | if (req->req.status == -EINPROGRESS) | |
862 | req->req.status = sts; | |
863 | ||
864 | /* remove from ep queue */ | |
865 | list_del_init(&req->queue); | |
866 | ||
867 | VDBG(ep->dev, "req %p => complete %d bytes at %s with sts %d\n", | |
868 | &req->req, req->req.length, ep->ep.name, sts); | |
869 | ||
870 | spin_unlock(&dev->lock); | |
871 | req->req.complete(&ep->ep, &req->req); | |
872 | spin_lock(&dev->lock); | |
873 | ep->halted = halted; | |
874 | } | |
875 | ||
876 | /* frees pci pool descriptors of a DMA chain */ | |
877 | static int udc_free_dma_chain(struct udc *dev, struct udc_request *req) | |
878 | { | |
879 | ||
880 | int ret_val = 0; | |
881 | struct udc_data_dma *td; | |
882 | struct udc_data_dma *td_last = NULL; | |
883 | unsigned int i; | |
884 | ||
885 | DBG(dev, "free chain req = %p\n", req); | |
886 | ||
887 | /* do not free first desc., will be done by free for request */ | |
888 | td_last = req->td_data; | |
889 | td = phys_to_virt(td_last->next); | |
890 | ||
891 | for (i = 1; i < req->chain_len; i++) { | |
892 | ||
893 | pci_pool_free(dev->data_requests, td, | |
894 | (dma_addr_t) td_last->next); | |
895 | td_last = td; | |
896 | td = phys_to_virt(td_last->next); | |
897 | } | |
898 | ||
899 | return ret_val; | |
900 | } | |
901 | ||
902 | /* Iterates to the end of a DMA chain and returns last descriptor */ | |
903 | static struct udc_data_dma *udc_get_last_dma_desc(struct udc_request *req) | |
904 | { | |
905 | struct udc_data_dma *td; | |
906 | ||
907 | td = req->td_data; | |
908 | while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) { | |
909 | td = phys_to_virt(td->next); | |
910 | } | |
911 | ||
912 | return td; | |
913 | ||
914 | } | |
915 | ||
916 | /* Iterates to the end of a DMA chain and counts bytes received */ | |
917 | static u32 udc_get_ppbdu_rxbytes(struct udc_request *req) | |
918 | { | |
919 | struct udc_data_dma *td; | |
920 | u32 count; | |
921 | ||
922 | td = req->td_data; | |
923 | /* received number bytes */ | |
924 | count = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_RXBYTES); | |
925 | ||
926 | while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) { | |
927 | td = phys_to_virt(td->next); | |
928 | /* received number bytes */ | |
929 | if (td) { | |
930 | count += AMD_GETBITS(td->status, | |
931 | UDC_DMA_OUT_STS_RXBYTES); | |
932 | } | |
933 | } | |
934 | ||
935 | return count; | |
936 | ||
937 | } | |
938 | ||
939 | /* Creates or re-inits a DMA chain */ | |
940 | static int udc_create_dma_chain( | |
941 | struct udc_ep *ep, | |
942 | struct udc_request *req, | |
943 | unsigned long buf_len, gfp_t gfp_flags | |
944 | ) | |
945 | { | |
946 | unsigned long bytes = req->req.length; | |
947 | unsigned int i; | |
948 | dma_addr_t dma_addr; | |
949 | struct udc_data_dma *td = NULL; | |
950 | struct udc_data_dma *last = NULL; | |
951 | unsigned long txbytes; | |
952 | unsigned create_new_chain = 0; | |
953 | unsigned len; | |
954 | ||
955 | VDBG(ep->dev, "udc_create_dma_chain: bytes=%ld buf_len=%ld\n", | |
956 | bytes, buf_len); | |
957 | dma_addr = DMA_DONT_USE; | |
958 | ||
959 | /* unset L bit in first desc for OUT */ | |
960 | if (!ep->in) { | |
961 | req->td_data->status &= AMD_CLEAR_BIT(UDC_DMA_IN_STS_L); | |
962 | } | |
963 | ||
964 | /* alloc only new desc's if not already available */ | |
965 | len = req->req.length / ep->ep.maxpacket; | |
966 | if (req->req.length % ep->ep.maxpacket) { | |
967 | len++; | |
968 | } | |
969 | ||
970 | if (len > req->chain_len) { | |
971 | /* shorter chain already allocated before */ | |
972 | if (req->chain_len > 1) { | |
973 | udc_free_dma_chain(ep->dev, req); | |
974 | } | |
975 | req->chain_len = len; | |
976 | create_new_chain = 1; | |
977 | } | |
978 | ||
979 | td = req->td_data; | |
980 | /* gen. required number of descriptors and buffers */ | |
981 | for (i = buf_len; i < bytes; i += buf_len) { | |
982 | /* create or determine next desc. */ | |
983 | if (create_new_chain) { | |
984 | ||
985 | td = pci_pool_alloc(ep->dev->data_requests, | |
986 | gfp_flags, &dma_addr); | |
987 | if (!td) | |
988 | return -ENOMEM; | |
989 | ||
990 | td->status = 0; | |
991 | } else if (i == buf_len) { | |
992 | /* first td */ | |
993 | td = (struct udc_data_dma *) phys_to_virt( | |
994 | req->td_data->next); | |
995 | td->status = 0; | |
996 | } else { | |
997 | td = (struct udc_data_dma *) phys_to_virt(last->next); | |
998 | td->status = 0; | |
999 | } | |
1000 | ||
1001 | ||
1002 | if (td) | |
1003 | td->bufptr = req->req.dma + i; /* assign buffer */ | |
1004 | else | |
1005 | break; | |
1006 | ||
1007 | /* short packet ? */ | |
1008 | if ((bytes - i) >= buf_len) { | |
1009 | txbytes = buf_len; | |
1010 | } else { | |
1011 | /* short packet */ | |
1012 | txbytes = bytes - i; | |
1013 | } | |
1014 | ||
1015 | /* link td and assign tx bytes */ | |
1016 | if (i == buf_len) { | |
1017 | if (create_new_chain) { | |
1018 | req->td_data->next = dma_addr; | |
1019 | } else { | |
1020 | /* req->td_data->next = virt_to_phys(td); */ | |
1021 | } | |
1022 | /* write tx bytes */ | |
1023 | if (ep->in) { | |
1024 | /* first desc */ | |
1025 | req->td_data->status = | |
1026 | AMD_ADDBITS(req->td_data->status, | |
1027 | ep->ep.maxpacket, | |
1028 | UDC_DMA_IN_STS_TXBYTES); | |
1029 | /* second desc */ | |
1030 | td->status = AMD_ADDBITS(td->status, | |
1031 | txbytes, | |
1032 | UDC_DMA_IN_STS_TXBYTES); | |
1033 | } | |
1034 | } else { | |
1035 | if (create_new_chain) { | |
1036 | last->next = dma_addr; | |
1037 | } else { | |
1038 | /* last->next = virt_to_phys(td); */ | |
1039 | } | |
1040 | if (ep->in) { | |
1041 | /* write tx bytes */ | |
1042 | td->status = AMD_ADDBITS(td->status, | |
1043 | txbytes, | |
1044 | UDC_DMA_IN_STS_TXBYTES); | |
1045 | } | |
1046 | } | |
1047 | last = td; | |
1048 | } | |
1049 | /* set last bit */ | |
1050 | if (td) { | |
1051 | td->status |= AMD_BIT(UDC_DMA_IN_STS_L); | |
1052 | /* last desc. points to itself */ | |
1053 | req->td_data_last = td; | |
1054 | } | |
1055 | ||
1056 | return 0; | |
1057 | } | |
1058 | ||
1059 | /* Enabling RX DMA */ | |
1060 | static void udc_set_rde(struct udc *dev) | |
1061 | { | |
1062 | u32 tmp; | |
1063 | ||
1064 | VDBG(dev, "udc_set_rde()\n"); | |
1065 | /* stop RDE timer */ | |
1066 | if (timer_pending(&udc_timer)) { | |
1067 | set_rde = 0; | |
1068 | mod_timer(&udc_timer, jiffies - 1); | |
1069 | } | |
1070 | /* set RDE */ | |
1071 | tmp = readl(&dev->regs->ctl); | |
1072 | tmp |= AMD_BIT(UDC_DEVCTL_RDE); | |
1073 | writel(tmp, &dev->regs->ctl); | |
1074 | } | |
1075 | ||
1076 | /* Queues a request packet, called by gadget driver */ | |
1077 | static int | |
1078 | udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp) | |
1079 | { | |
1080 | int retval = 0; | |
1081 | u8 open_rxfifo = 0; | |
1082 | unsigned long iflags; | |
1083 | struct udc_ep *ep; | |
1084 | struct udc_request *req; | |
1085 | struct udc *dev; | |
1086 | u32 tmp; | |
1087 | ||
1088 | /* check the inputs */ | |
1089 | req = container_of(usbreq, struct udc_request, req); | |
1090 | ||
1091 | if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf | |
1092 | || !list_empty(&req->queue)) | |
1093 | return -EINVAL; | |
1094 | ||
1095 | ep = container_of(usbep, struct udc_ep, ep); | |
1096 | if (!ep->desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX)) | |
1097 | return -EINVAL; | |
1098 | ||
1099 | VDBG(ep->dev, "udc_queue(): ep%d-in=%d\n", ep->num, ep->in); | |
1100 | dev = ep->dev; | |
1101 | ||
1102 | if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) | |
1103 | return -ESHUTDOWN; | |
1104 | ||
1105 | /* map dma (usually done before) */ | |
1106 | if (ep->dma && usbreq->length != 0 | |
1107 | && (usbreq->dma == DMA_DONT_USE || usbreq->dma == 0)) { | |
1108 | VDBG(dev, "DMA map req %p\n", req); | |
1109 | if (ep->in) | |
1110 | usbreq->dma = pci_map_single(dev->pdev, | |
1111 | usbreq->buf, | |
1112 | usbreq->length, | |
1113 | PCI_DMA_TODEVICE); | |
1114 | else | |
1115 | usbreq->dma = pci_map_single(dev->pdev, | |
1116 | usbreq->buf, | |
1117 | usbreq->length, | |
1118 | PCI_DMA_FROMDEVICE); | |
1119 | req->dma_mapping = 1; | |
1120 | } | |
1121 | ||
1122 | VDBG(dev, "%s queue req %p, len %d req->td_data=%p buf %p\n", | |
1123 | usbep->name, usbreq, usbreq->length, | |
1124 | req->td_data, usbreq->buf); | |
1125 | ||
1126 | spin_lock_irqsave(&dev->lock, iflags); | |
1127 | usbreq->actual = 0; | |
1128 | usbreq->status = -EINPROGRESS; | |
1129 | req->dma_done = 0; | |
1130 | ||
1131 | /* on empty queue just do first transfer */ | |
1132 | if (list_empty(&ep->queue)) { | |
1133 | /* zlp */ | |
1134 | if (usbreq->length == 0) { | |
1135 | /* IN zlp's are handled by hardware */ | |
1136 | complete_req(ep, req, 0); | |
1137 | VDBG(dev, "%s: zlp\n", ep->ep.name); | |
1138 | /* | |
1139 | * if set_config or set_intf is waiting for ack by zlp | |
1140 | * then set CSR_DONE | |
1141 | */ | |
1142 | if (dev->set_cfg_not_acked) { | |
1143 | tmp = readl(&dev->regs->ctl); | |
1144 | tmp |= AMD_BIT(UDC_DEVCTL_CSR_DONE); | |
1145 | writel(tmp, &dev->regs->ctl); | |
1146 | dev->set_cfg_not_acked = 0; | |
1147 | } | |
1148 | /* setup command is ACK'ed now by zlp */ | |
1149 | if (dev->waiting_zlp_ack_ep0in) { | |
1150 | /* clear NAK by writing CNAK in EP0_IN */ | |
1151 | tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl); | |
1152 | tmp |= AMD_BIT(UDC_EPCTL_CNAK); | |
1153 | writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl); | |
1154 | dev->ep[UDC_EP0IN_IX].naking = 0; | |
1155 | UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], | |
1156 | UDC_EP0IN_IX); | |
1157 | dev->waiting_zlp_ack_ep0in = 0; | |
1158 | } | |
1159 | goto finished; | |
1160 | } | |
1161 | if (ep->dma) { | |
1162 | retval = prep_dma(ep, req, gfp); | |
1163 | if (retval != 0) | |
1164 | goto finished; | |
1165 | /* write desc pointer to enable DMA */ | |
1166 | if (ep->in) { | |
1167 | /* set HOST READY */ | |
1168 | req->td_data->status = | |
1169 | AMD_ADDBITS(req->td_data->status, | |
1170 | UDC_DMA_IN_STS_BS_HOST_READY, | |
1171 | UDC_DMA_IN_STS_BS); | |
1172 | } | |
1173 | ||
1174 | /* disabled rx dma while descriptor update */ | |
1175 | if (!ep->in) { | |
1176 | /* stop RDE timer */ | |
1177 | if (timer_pending(&udc_timer)) { | |
1178 | set_rde = 0; | |
1179 | mod_timer(&udc_timer, jiffies - 1); | |
1180 | } | |
1181 | /* clear RDE */ | |
1182 | tmp = readl(&dev->regs->ctl); | |
1183 | tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE); | |
1184 | writel(tmp, &dev->regs->ctl); | |
1185 | open_rxfifo = 1; | |
1186 | ||
1187 | /* | |
1188 | * if BNA occurred then let BNA dummy desc. | |
1189 | * point to current desc. | |
1190 | */ | |
1191 | if (ep->bna_occurred) { | |
1192 | VDBG(dev, "copy to BNA dummy desc.\n"); | |
1193 | memcpy(ep->bna_dummy_req->td_data, | |
1194 | req->td_data, | |
1195 | sizeof(struct udc_data_dma)); | |
1196 | } | |
1197 | } | |
1198 | /* write desc pointer */ | |
1199 | writel(req->td_phys, &ep->regs->desptr); | |
1200 | ||
1201 | /* clear NAK by writing CNAK */ | |
1202 | if (ep->naking) { | |
1203 | tmp = readl(&ep->regs->ctl); | |
1204 | tmp |= AMD_BIT(UDC_EPCTL_CNAK); | |
1205 | writel(tmp, &ep->regs->ctl); | |
1206 | ep->naking = 0; | |
1207 | UDC_QUEUE_CNAK(ep, ep->num); | |
1208 | } | |
1209 | ||
1210 | if (ep->in) { | |
1211 | /* enable ep irq */ | |
1212 | tmp = readl(&dev->regs->ep_irqmsk); | |
1213 | tmp &= AMD_UNMASK_BIT(ep->num); | |
1214 | writel(tmp, &dev->regs->ep_irqmsk); | |
1215 | } | |
c5deb832 TD |
1216 | } else if (ep->in) { |
1217 | /* enable ep irq */ | |
1218 | tmp = readl(&dev->regs->ep_irqmsk); | |
1219 | tmp &= AMD_UNMASK_BIT(ep->num); | |
1220 | writel(tmp, &dev->regs->ep_irqmsk); | |
1221 | } | |
55d402d8 TD |
1222 | |
1223 | } else if (ep->dma) { | |
1224 | ||
1225 | /* | |
1226 | * prep_dma not used for OUT ep's, this is not possible | |
1227 | * for PPB modes, because of chain creation reasons | |
1228 | */ | |
1229 | if (ep->in) { | |
1230 | retval = prep_dma(ep, req, gfp); | |
1231 | if (retval != 0) | |
1232 | goto finished; | |
1233 | } | |
1234 | } | |
1235 | VDBG(dev, "list_add\n"); | |
1236 | /* add request to ep queue */ | |
1237 | if (req) { | |
1238 | ||
1239 | list_add_tail(&req->queue, &ep->queue); | |
1240 | ||
1241 | /* open rxfifo if out data queued */ | |
1242 | if (open_rxfifo) { | |
1243 | /* enable DMA */ | |
1244 | req->dma_going = 1; | |
1245 | udc_set_rde(dev); | |
1246 | if (ep->num != UDC_EP0OUT_IX) | |
1247 | dev->data_ep_queued = 1; | |
1248 | } | |
1249 | /* stop OUT naking */ | |
1250 | if (!ep->in) { | |
1251 | if (!use_dma && udc_rxfifo_pending) { | |
fec8de3a | 1252 | DBG(dev, "udc_queue(): pending bytes in " |
55d402d8 TD |
1253 | "rxfifo after nyet\n"); |
1254 | /* | |
1255 | * read pending bytes afer nyet: | |
1256 | * referring to isr | |
1257 | */ | |
1258 | if (udc_rxfifo_read(ep, req)) { | |
1259 | /* finish */ | |
1260 | complete_req(ep, req, 0); | |
1261 | } | |
1262 | udc_rxfifo_pending = 0; | |
1263 | ||
1264 | } | |
1265 | } | |
1266 | } | |
1267 | ||
1268 | finished: | |
1269 | spin_unlock_irqrestore(&dev->lock, iflags); | |
1270 | return retval; | |
1271 | } | |
1272 | ||
1273 | /* Empty request queue of an endpoint; caller holds spinlock */ | |
1274 | static void empty_req_queue(struct udc_ep *ep) | |
1275 | { | |
1276 | struct udc_request *req; | |
1277 | ||
1278 | ep->halted = 1; | |
1279 | while (!list_empty(&ep->queue)) { | |
1280 | req = list_entry(ep->queue.next, | |
1281 | struct udc_request, | |
1282 | queue); | |
1283 | complete_req(ep, req, -ESHUTDOWN); | |
1284 | } | |
1285 | } | |
1286 | ||
1287 | /* Dequeues a request packet, called by gadget driver */ | |
1288 | static int udc_dequeue(struct usb_ep *usbep, struct usb_request *usbreq) | |
1289 | { | |
1290 | struct udc_ep *ep; | |
1291 | struct udc_request *req; | |
1292 | unsigned halted; | |
1293 | unsigned long iflags; | |
1294 | ||
1295 | ep = container_of(usbep, struct udc_ep, ep); | |
1296 | if (!usbep || !usbreq || (!ep->desc && (ep->num != 0 | |
1297 | && ep->num != UDC_EP0OUT_IX))) | |
1298 | return -EINVAL; | |
1299 | ||
1300 | req = container_of(usbreq, struct udc_request, req); | |
1301 | ||
1302 | spin_lock_irqsave(&ep->dev->lock, iflags); | |
1303 | halted = ep->halted; | |
1304 | ep->halted = 1; | |
1305 | /* request in processing or next one */ | |
1306 | if (ep->queue.next == &req->queue) { | |
1307 | if (ep->dma && req->dma_going) { | |
1308 | if (ep->in) | |
1309 | ep->cancel_transfer = 1; | |
1310 | else { | |
1311 | u32 tmp; | |
1312 | u32 dma_sts; | |
1313 | /* stop potential receive DMA */ | |
1314 | tmp = readl(&udc->regs->ctl); | |
1315 | writel(tmp & AMD_UNMASK_BIT(UDC_DEVCTL_RDE), | |
1316 | &udc->regs->ctl); | |
1317 | /* | |
1318 | * Cancel transfer later in ISR | |
1319 | * if descriptor was touched. | |
1320 | */ | |
1321 | dma_sts = AMD_GETBITS(req->td_data->status, | |
1322 | UDC_DMA_OUT_STS_BS); | |
1323 | if (dma_sts != UDC_DMA_OUT_STS_BS_HOST_READY) | |
1324 | ep->cancel_transfer = 1; | |
1325 | else { | |
1326 | udc_init_bna_dummy(ep->req); | |
1327 | writel(ep->bna_dummy_req->td_phys, | |
1328 | &ep->regs->desptr); | |
1329 | } | |
1330 | writel(tmp, &udc->regs->ctl); | |
1331 | } | |
1332 | } | |
1333 | } | |
1334 | complete_req(ep, req, -ECONNRESET); | |
1335 | ep->halted = halted; | |
1336 | ||
1337 | spin_unlock_irqrestore(&ep->dev->lock, iflags); | |
1338 | return 0; | |
1339 | } | |
1340 | ||
1341 | /* Halt or clear halt of endpoint */ | |
1342 | static int | |
1343 | udc_set_halt(struct usb_ep *usbep, int halt) | |
1344 | { | |
1345 | struct udc_ep *ep; | |
1346 | u32 tmp; | |
1347 | unsigned long iflags; | |
1348 | int retval = 0; | |
1349 | ||
1350 | if (!usbep) | |
1351 | return -EINVAL; | |
1352 | ||
1353 | pr_debug("set_halt %s: halt=%d\n", usbep->name, halt); | |
1354 | ||
1355 | ep = container_of(usbep, struct udc_ep, ep); | |
1356 | if (!ep->desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX)) | |
1357 | return -EINVAL; | |
1358 | if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) | |
1359 | return -ESHUTDOWN; | |
1360 | ||
1361 | spin_lock_irqsave(&udc_stall_spinlock, iflags); | |
1362 | /* halt or clear halt */ | |
1363 | if (halt) { | |
1364 | if (ep->num == 0) | |
1365 | ep->dev->stall_ep0in = 1; | |
1366 | else { | |
1367 | /* | |
1368 | * set STALL | |
1369 | * rxfifo empty not taken into acount | |
1370 | */ | |
1371 | tmp = readl(&ep->regs->ctl); | |
1372 | tmp |= AMD_BIT(UDC_EPCTL_S); | |
1373 | writel(tmp, &ep->regs->ctl); | |
1374 | ep->halted = 1; | |
1375 | ||
1376 | /* setup poll timer */ | |
1377 | if (!timer_pending(&udc_pollstall_timer)) { | |
1378 | udc_pollstall_timer.expires = jiffies + | |
1379 | HZ * UDC_POLLSTALL_TIMER_USECONDS | |
1380 | / (1000 * 1000); | |
1381 | if (!stop_pollstall_timer) { | |
1382 | DBG(ep->dev, "start polltimer\n"); | |
1383 | add_timer(&udc_pollstall_timer); | |
1384 | } | |
1385 | } | |
1386 | } | |
1387 | } else { | |
1388 | /* ep is halted by set_halt() before */ | |
1389 | if (ep->halted) { | |
1390 | tmp = readl(&ep->regs->ctl); | |
1391 | /* clear stall bit */ | |
1392 | tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S); | |
1393 | /* clear NAK by writing CNAK */ | |
1394 | tmp |= AMD_BIT(UDC_EPCTL_CNAK); | |
1395 | writel(tmp, &ep->regs->ctl); | |
1396 | ep->halted = 0; | |
1397 | UDC_QUEUE_CNAK(ep, ep->num); | |
1398 | } | |
1399 | } | |
1400 | spin_unlock_irqrestore(&udc_stall_spinlock, iflags); | |
1401 | return retval; | |
1402 | } | |
1403 | ||
1404 | /* gadget interface */ | |
1405 | static const struct usb_ep_ops udc_ep_ops = { | |
1406 | .enable = udc_ep_enable, | |
1407 | .disable = udc_ep_disable, | |
1408 | ||
1409 | .alloc_request = udc_alloc_request, | |
1410 | .free_request = udc_free_request, | |
1411 | ||
1412 | .queue = udc_queue, | |
1413 | .dequeue = udc_dequeue, | |
1414 | ||
1415 | .set_halt = udc_set_halt, | |
1416 | /* fifo ops not implemented */ | |
1417 | }; | |
1418 | ||
1419 | /*-------------------------------------------------------------------------*/ | |
1420 | ||
1421 | /* Get frame counter (not implemented) */ | |
1422 | static int udc_get_frame(struct usb_gadget *gadget) | |
1423 | { | |
1424 | return -EOPNOTSUPP; | |
1425 | } | |
1426 | ||
1427 | /* Remote wakeup gadget interface */ | |
1428 | static int udc_wakeup(struct usb_gadget *gadget) | |
1429 | { | |
1430 | struct udc *dev; | |
1431 | ||
1432 | if (!gadget) | |
1433 | return -EINVAL; | |
1434 | dev = container_of(gadget, struct udc, gadget); | |
1435 | udc_remote_wakeup(dev); | |
1436 | ||
1437 | return 0; | |
1438 | } | |
1439 | ||
1440 | /* gadget operations */ | |
1441 | static const struct usb_gadget_ops udc_ops = { | |
1442 | .wakeup = udc_wakeup, | |
1443 | .get_frame = udc_get_frame, | |
1444 | }; | |
1445 | ||
1446 | /* Setups endpoint parameters, adds endpoints to linked list */ | |
1447 | static void make_ep_lists(struct udc *dev) | |
1448 | { | |
1449 | /* make gadget ep lists */ | |
1450 | INIT_LIST_HEAD(&dev->gadget.ep_list); | |
1451 | list_add_tail(&dev->ep[UDC_EPIN_STATUS_IX].ep.ep_list, | |
1452 | &dev->gadget.ep_list); | |
1453 | list_add_tail(&dev->ep[UDC_EPIN_IX].ep.ep_list, | |
1454 | &dev->gadget.ep_list); | |
1455 | list_add_tail(&dev->ep[UDC_EPOUT_IX].ep.ep_list, | |
1456 | &dev->gadget.ep_list); | |
1457 | ||
1458 | /* fifo config */ | |
1459 | dev->ep[UDC_EPIN_STATUS_IX].fifo_depth = UDC_EPIN_SMALLINT_BUFF_SIZE; | |
1460 | if (dev->gadget.speed == USB_SPEED_FULL) | |
1461 | dev->ep[UDC_EPIN_IX].fifo_depth = UDC_FS_EPIN_BUFF_SIZE; | |
1462 | else if (dev->gadget.speed == USB_SPEED_HIGH) | |
1463 | dev->ep[UDC_EPIN_IX].fifo_depth = hs_tx_buf; | |
1464 | dev->ep[UDC_EPOUT_IX].fifo_depth = UDC_RXFIFO_SIZE; | |
1465 | } | |
1466 | ||
1467 | /* init registers at driver load time */ | |
1468 | static int startup_registers(struct udc *dev) | |
1469 | { | |
1470 | u32 tmp; | |
1471 | ||
1472 | /* init controller by soft reset */ | |
1473 | udc_soft_reset(dev); | |
1474 | ||
1475 | /* mask not needed interrupts */ | |
1476 | udc_mask_unused_interrupts(dev); | |
1477 | ||
1478 | /* put into initial config */ | |
1479 | udc_basic_init(dev); | |
1480 | /* link up all endpoints */ | |
1481 | udc_setup_endpoints(dev); | |
1482 | ||
1483 | /* program speed */ | |
1484 | tmp = readl(&dev->regs->cfg); | |
1485 | if (use_fullspeed) { | |
1486 | tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD); | |
1487 | } else { | |
1488 | tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_HS, UDC_DEVCFG_SPD); | |
1489 | } | |
1490 | writel(tmp, &dev->regs->cfg); | |
1491 | ||
1492 | return 0; | |
1493 | } | |
1494 | ||
1495 | /* Inits UDC context */ | |
1496 | static void udc_basic_init(struct udc *dev) | |
1497 | { | |
1498 | u32 tmp; | |
1499 | ||
1500 | DBG(dev, "udc_basic_init()\n"); | |
1501 | ||
1502 | dev->gadget.speed = USB_SPEED_UNKNOWN; | |
1503 | ||
1504 | /* stop RDE timer */ | |
1505 | if (timer_pending(&udc_timer)) { | |
1506 | set_rde = 0; | |
1507 | mod_timer(&udc_timer, jiffies - 1); | |
1508 | } | |
1509 | /* stop poll stall timer */ | |
1510 | if (timer_pending(&udc_pollstall_timer)) { | |
1511 | mod_timer(&udc_pollstall_timer, jiffies - 1); | |
1512 | } | |
1513 | /* disable DMA */ | |
1514 | tmp = readl(&dev->regs->ctl); | |
1515 | tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE); | |
1516 | tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_TDE); | |
1517 | writel(tmp, &dev->regs->ctl); | |
1518 | ||
1519 | /* enable dynamic CSR programming */ | |
1520 | tmp = readl(&dev->regs->cfg); | |
1521 | tmp |= AMD_BIT(UDC_DEVCFG_CSR_PRG); | |
1522 | /* set self powered */ | |
1523 | tmp |= AMD_BIT(UDC_DEVCFG_SP); | |
1524 | /* set remote wakeupable */ | |
1525 | tmp |= AMD_BIT(UDC_DEVCFG_RWKP); | |
1526 | writel(tmp, &dev->regs->cfg); | |
1527 | ||
1528 | make_ep_lists(dev); | |
1529 | ||
1530 | dev->data_ep_enabled = 0; | |
1531 | dev->data_ep_queued = 0; | |
1532 | } | |
1533 | ||
1534 | /* Sets initial endpoint parameters */ | |
1535 | static void udc_setup_endpoints(struct udc *dev) | |
1536 | { | |
1537 | struct udc_ep *ep; | |
1538 | u32 tmp; | |
1539 | u32 reg; | |
1540 | ||
1541 | DBG(dev, "udc_setup_endpoints()\n"); | |
1542 | ||
1543 | /* read enum speed */ | |
1544 | tmp = readl(&dev->regs->sts); | |
1545 | tmp = AMD_GETBITS(tmp, UDC_DEVSTS_ENUM_SPEED); | |
1546 | if (tmp == UDC_DEVSTS_ENUM_SPEED_HIGH) { | |
1547 | dev->gadget.speed = USB_SPEED_HIGH; | |
1548 | } else if (tmp == UDC_DEVSTS_ENUM_SPEED_FULL) { | |
1549 | dev->gadget.speed = USB_SPEED_FULL; | |
1550 | } | |
1551 | ||
1552 | /* set basic ep parameters */ | |
1553 | for (tmp = 0; tmp < UDC_EP_NUM; tmp++) { | |
1554 | ep = &dev->ep[tmp]; | |
1555 | ep->dev = dev; | |
1556 | ep->ep.name = ep_string[tmp]; | |
1557 | ep->num = tmp; | |
1558 | /* txfifo size is calculated at enable time */ | |
1559 | ep->txfifo = dev->txfifo; | |
1560 | ||
1561 | /* fifo size */ | |
1562 | if (tmp < UDC_EPIN_NUM) { | |
1563 | ep->fifo_depth = UDC_TXFIFO_SIZE; | |
1564 | ep->in = 1; | |
1565 | } else { | |
1566 | ep->fifo_depth = UDC_RXFIFO_SIZE; | |
1567 | ep->in = 0; | |
1568 | ||
1569 | } | |
1570 | ep->regs = &dev->ep_regs[tmp]; | |
1571 | /* | |
1572 | * ep will be reset only if ep was not enabled before to avoid | |
1573 | * disabling ep interrupts when ENUM interrupt occurs but ep is | |
1574 | * not enabled by gadget driver | |
1575 | */ | |
1576 | if (!ep->desc) { | |
1577 | ep_init(dev->regs, ep); | |
1578 | } | |
1579 | ||
1580 | if (use_dma) { | |
1581 | /* | |
1582 | * ep->dma is not really used, just to indicate that | |
1583 | * DMA is active: remove this | |
1584 | * dma regs = dev control regs | |
1585 | */ | |
1586 | ep->dma = &dev->regs->ctl; | |
1587 | ||
1588 | /* nak OUT endpoints until enable - not for ep0 */ | |
1589 | if (tmp != UDC_EP0IN_IX && tmp != UDC_EP0OUT_IX | |
1590 | && tmp > UDC_EPIN_NUM) { | |
1591 | /* set NAK */ | |
1592 | reg = readl(&dev->ep[tmp].regs->ctl); | |
1593 | reg |= AMD_BIT(UDC_EPCTL_SNAK); | |
1594 | writel(reg, &dev->ep[tmp].regs->ctl); | |
1595 | dev->ep[tmp].naking = 1; | |
1596 | ||
1597 | } | |
1598 | } | |
1599 | } | |
1600 | /* EP0 max packet */ | |
1601 | if (dev->gadget.speed == USB_SPEED_FULL) { | |
1602 | dev->ep[UDC_EP0IN_IX].ep.maxpacket = UDC_FS_EP0IN_MAX_PKT_SIZE; | |
1603 | dev->ep[UDC_EP0OUT_IX].ep.maxpacket = | |
1604 | UDC_FS_EP0OUT_MAX_PKT_SIZE; | |
1605 | } else if (dev->gadget.speed == USB_SPEED_HIGH) { | |
1606 | dev->ep[UDC_EP0IN_IX].ep.maxpacket = UDC_EP0IN_MAX_PKT_SIZE; | |
1607 | dev->ep[UDC_EP0OUT_IX].ep.maxpacket = UDC_EP0OUT_MAX_PKT_SIZE; | |
1608 | } | |
1609 | ||
1610 | /* | |
1611 | * with suspend bug workaround, ep0 params for gadget driver | |
1612 | * are set at gadget driver bind() call | |
1613 | */ | |
1614 | dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep; | |
1615 | dev->ep[UDC_EP0IN_IX].halted = 0; | |
1616 | INIT_LIST_HEAD(&dev->gadget.ep0->ep_list); | |
1617 | ||
1618 | /* init cfg/alt/int */ | |
1619 | dev->cur_config = 0; | |
1620 | dev->cur_intf = 0; | |
1621 | dev->cur_alt = 0; | |
1622 | } | |
1623 | ||
1624 | /* Bringup after Connect event, initial bringup to be ready for ep0 events */ | |
1625 | static void usb_connect(struct udc *dev) | |
1626 | { | |
1627 | ||
1628 | dev_info(&dev->pdev->dev, "USB Connect\n"); | |
1629 | ||
1630 | dev->connected = 1; | |
1631 | ||
1632 | /* put into initial config */ | |
1633 | udc_basic_init(dev); | |
1634 | ||
1635 | /* enable device setup interrupts */ | |
1636 | udc_enable_dev_setup_interrupts(dev); | |
1637 | } | |
1638 | ||
1639 | /* | |
1640 | * Calls gadget with disconnect event and resets the UDC and makes | |
1641 | * initial bringup to be ready for ep0 events | |
1642 | */ | |
1643 | static void usb_disconnect(struct udc *dev) | |
1644 | { | |
1645 | ||
1646 | dev_info(&dev->pdev->dev, "USB Disconnect\n"); | |
1647 | ||
1648 | dev->connected = 0; | |
1649 | ||
1650 | /* mask interrupts */ | |
1651 | udc_mask_unused_interrupts(dev); | |
1652 | ||
1653 | /* REVISIT there doesn't seem to be a point to having this | |
1654 | * talk to a tasklet ... do it directly, we already hold | |
1655 | * the spinlock needed to process the disconnect. | |
1656 | */ | |
1657 | ||
1658 | tasklet_schedule(&disconnect_tasklet); | |
1659 | } | |
1660 | ||
1661 | /* Tasklet for disconnect to be outside of interrupt context */ | |
1662 | static void udc_tasklet_disconnect(unsigned long par) | |
1663 | { | |
1664 | struct udc *dev = (struct udc *)(*((struct udc **) par)); | |
1665 | u32 tmp; | |
1666 | ||
1667 | DBG(dev, "Tasklet disconnect\n"); | |
1668 | spin_lock_irq(&dev->lock); | |
1669 | ||
1670 | if (dev->driver) { | |
1671 | spin_unlock(&dev->lock); | |
1672 | dev->driver->disconnect(&dev->gadget); | |
1673 | spin_lock(&dev->lock); | |
1674 | ||
1675 | /* empty queues */ | |
1676 | for (tmp = 0; tmp < UDC_EP_NUM; tmp++) { | |
1677 | empty_req_queue(&dev->ep[tmp]); | |
1678 | } | |
1679 | ||
1680 | } | |
1681 | ||
1682 | /* disable ep0 */ | |
1683 | ep_init(dev->regs, | |
1684 | &dev->ep[UDC_EP0IN_IX]); | |
1685 | ||
1686 | ||
1687 | if (!soft_reset_occured) { | |
1688 | /* init controller by soft reset */ | |
1689 | udc_soft_reset(dev); | |
1690 | soft_reset_occured++; | |
1691 | } | |
1692 | ||
1693 | /* re-enable dev interrupts */ | |
1694 | udc_enable_dev_setup_interrupts(dev); | |
1695 | /* back to full speed ? */ | |
1696 | if (use_fullspeed) { | |
1697 | tmp = readl(&dev->regs->cfg); | |
1698 | tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD); | |
1699 | writel(tmp, &dev->regs->cfg); | |
1700 | } | |
1701 | ||
1702 | spin_unlock_irq(&dev->lock); | |
1703 | } | |
1704 | ||
1705 | /* Reset the UDC core */ | |
1706 | static void udc_soft_reset(struct udc *dev) | |
1707 | { | |
1708 | unsigned long flags; | |
1709 | ||
1710 | DBG(dev, "Soft reset\n"); | |
1711 | /* | |
1712 | * reset possible waiting interrupts, because int. | |
1713 | * status is lost after soft reset, | |
1714 | * ep int. status reset | |
1715 | */ | |
1716 | writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqsts); | |
1717 | /* device int. status reset */ | |
1718 | writel(UDC_DEV_MSK_DISABLE, &dev->regs->irqsts); | |
1719 | ||
1720 | spin_lock_irqsave(&udc_irq_spinlock, flags); | |
1721 | writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg); | |
1722 | readl(&dev->regs->cfg); | |
1723 | spin_unlock_irqrestore(&udc_irq_spinlock, flags); | |
1724 | ||
1725 | } | |
1726 | ||
1727 | /* RDE timer callback to set RDE bit */ | |
1728 | static void udc_timer_function(unsigned long v) | |
1729 | { | |
1730 | u32 tmp; | |
1731 | ||
1732 | spin_lock_irq(&udc_irq_spinlock); | |
1733 | ||
1734 | if (set_rde > 0) { | |
1735 | /* | |
1736 | * open the fifo if fifo was filled on last timer call | |
1737 | * conditionally | |
1738 | */ | |
1739 | if (set_rde > 1) { | |
1740 | /* set RDE to receive setup data */ | |
1741 | tmp = readl(&udc->regs->ctl); | |
1742 | tmp |= AMD_BIT(UDC_DEVCTL_RDE); | |
1743 | writel(tmp, &udc->regs->ctl); | |
1744 | set_rde = -1; | |
1745 | } else if (readl(&udc->regs->sts) | |
1746 | & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) { | |
1747 | /* | |
1748 | * if fifo empty setup polling, do not just | |
1749 | * open the fifo | |
1750 | */ | |
1751 | udc_timer.expires = jiffies + HZ/UDC_RDE_TIMER_DIV; | |
1752 | if (!stop_timer) { | |
1753 | add_timer(&udc_timer); | |
1754 | } | |
1755 | } else { | |
1756 | /* | |
1757 | * fifo contains data now, setup timer for opening | |
1758 | * the fifo when timer expires to be able to receive | |
1759 | * setup packets, when data packets gets queued by | |
1760 | * gadget layer then timer will forced to expire with | |
1761 | * set_rde=0 (RDE is set in udc_queue()) | |
1762 | */ | |
1763 | set_rde++; | |
1764 | /* debug: lhadmot_timer_start = 221070 */ | |
1765 | udc_timer.expires = jiffies + HZ*UDC_RDE_TIMER_SECONDS; | |
1766 | if (!stop_timer) { | |
1767 | add_timer(&udc_timer); | |
1768 | } | |
1769 | } | |
1770 | ||
1771 | } else | |
1772 | set_rde = -1; /* RDE was set by udc_queue() */ | |
1773 | spin_unlock_irq(&udc_irq_spinlock); | |
1774 | if (stop_timer) | |
1775 | complete(&on_exit); | |
1776 | ||
1777 | } | |
1778 | ||
1779 | /* Handle halt state, used in stall poll timer */ | |
1780 | static void udc_handle_halt_state(struct udc_ep *ep) | |
1781 | { | |
1782 | u32 tmp; | |
1783 | /* set stall as long not halted */ | |
1784 | if (ep->halted == 1) { | |
1785 | tmp = readl(&ep->regs->ctl); | |
1786 | /* STALL cleared ? */ | |
1787 | if (!(tmp & AMD_BIT(UDC_EPCTL_S))) { | |
1788 | /* | |
1789 | * FIXME: MSC spec requires that stall remains | |
1790 | * even on receivng of CLEAR_FEATURE HALT. So | |
1791 | * we would set STALL again here to be compliant. | |
1792 | * But with current mass storage drivers this does | |
1793 | * not work (would produce endless host retries). | |
1794 | * So we clear halt on CLEAR_FEATURE. | |
1795 | * | |
1796 | DBG(ep->dev, "ep %d: set STALL again\n", ep->num); | |
1797 | tmp |= AMD_BIT(UDC_EPCTL_S); | |
1798 | writel(tmp, &ep->regs->ctl);*/ | |
1799 | ||
1800 | /* clear NAK by writing CNAK */ | |
1801 | tmp |= AMD_BIT(UDC_EPCTL_CNAK); | |
1802 | writel(tmp, &ep->regs->ctl); | |
1803 | ep->halted = 0; | |
1804 | UDC_QUEUE_CNAK(ep, ep->num); | |
1805 | } | |
1806 | } | |
1807 | } | |
1808 | ||
1809 | /* Stall timer callback to poll S bit and set it again after */ | |
1810 | static void udc_pollstall_timer_function(unsigned long v) | |
1811 | { | |
1812 | struct udc_ep *ep; | |
1813 | int halted = 0; | |
1814 | ||
1815 | spin_lock_irq(&udc_stall_spinlock); | |
1816 | /* | |
1817 | * only one IN and OUT endpoints are handled | |
1818 | * IN poll stall | |
1819 | */ | |
1820 | ep = &udc->ep[UDC_EPIN_IX]; | |
1821 | udc_handle_halt_state(ep); | |
1822 | if (ep->halted) | |
1823 | halted = 1; | |
1824 | /* OUT poll stall */ | |
1825 | ep = &udc->ep[UDC_EPOUT_IX]; | |
1826 | udc_handle_halt_state(ep); | |
1827 | if (ep->halted) | |
1828 | halted = 1; | |
1829 | ||
1830 | /* setup timer again when still halted */ | |
1831 | if (!stop_pollstall_timer && halted) { | |
1832 | udc_pollstall_timer.expires = jiffies + | |
1833 | HZ * UDC_POLLSTALL_TIMER_USECONDS | |
1834 | / (1000 * 1000); | |
1835 | add_timer(&udc_pollstall_timer); | |
1836 | } | |
1837 | spin_unlock_irq(&udc_stall_spinlock); | |
1838 | ||
1839 | if (stop_pollstall_timer) | |
1840 | complete(&on_pollstall_exit); | |
1841 | } | |
1842 | ||
1843 | /* Inits endpoint 0 so that SETUP packets are processed */ | |
1844 | static void activate_control_endpoints(struct udc *dev) | |
1845 | { | |
1846 | u32 tmp; | |
1847 | ||
1848 | DBG(dev, "activate_control_endpoints\n"); | |
1849 | ||
1850 | /* flush fifo */ | |
1851 | tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl); | |
1852 | tmp |= AMD_BIT(UDC_EPCTL_F); | |
1853 | writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl); | |
1854 | ||
1855 | /* set ep0 directions */ | |
1856 | dev->ep[UDC_EP0IN_IX].in = 1; | |
1857 | dev->ep[UDC_EP0OUT_IX].in = 0; | |
1858 | ||
1859 | /* set buffer size (tx fifo entries) of EP0_IN */ | |
1860 | tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufin_framenum); | |
1861 | if (dev->gadget.speed == USB_SPEED_FULL) | |
1862 | tmp = AMD_ADDBITS(tmp, UDC_FS_EPIN0_BUFF_SIZE, | |
1863 | UDC_EPIN_BUFF_SIZE); | |
1864 | else if (dev->gadget.speed == USB_SPEED_HIGH) | |
1865 | tmp = AMD_ADDBITS(tmp, UDC_EPIN0_BUFF_SIZE, | |
1866 | UDC_EPIN_BUFF_SIZE); | |
1867 | writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufin_framenum); | |
1868 | ||
1869 | /* set max packet size of EP0_IN */ | |
1870 | tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt); | |
1871 | if (dev->gadget.speed == USB_SPEED_FULL) | |
1872 | tmp = AMD_ADDBITS(tmp, UDC_FS_EP0IN_MAX_PKT_SIZE, | |
1873 | UDC_EP_MAX_PKT_SIZE); | |
1874 | else if (dev->gadget.speed == USB_SPEED_HIGH) | |
1875 | tmp = AMD_ADDBITS(tmp, UDC_EP0IN_MAX_PKT_SIZE, | |
1876 | UDC_EP_MAX_PKT_SIZE); | |
1877 | writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt); | |
1878 | ||
1879 | /* set max packet size of EP0_OUT */ | |
1880 | tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt); | |
1881 | if (dev->gadget.speed == USB_SPEED_FULL) | |
1882 | tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE, | |
1883 | UDC_EP_MAX_PKT_SIZE); | |
1884 | else if (dev->gadget.speed == USB_SPEED_HIGH) | |
1885 | tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE, | |
1886 | UDC_EP_MAX_PKT_SIZE); | |
1887 | writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt); | |
1888 | ||
1889 | /* set max packet size of EP0 in UDC CSR */ | |
1890 | tmp = readl(&dev->csr->ne[0]); | |
1891 | if (dev->gadget.speed == USB_SPEED_FULL) | |
1892 | tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE, | |
1893 | UDC_CSR_NE_MAX_PKT); | |
1894 | else if (dev->gadget.speed == USB_SPEED_HIGH) | |
1895 | tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE, | |
1896 | UDC_CSR_NE_MAX_PKT); | |
1897 | writel(tmp, &dev->csr->ne[0]); | |
1898 | ||
1899 | if (use_dma) { | |
1900 | dev->ep[UDC_EP0OUT_IX].td->status |= | |
1901 | AMD_BIT(UDC_DMA_OUT_STS_L); | |
1902 | /* write dma desc address */ | |
1903 | writel(dev->ep[UDC_EP0OUT_IX].td_stp_dma, | |
1904 | &dev->ep[UDC_EP0OUT_IX].regs->subptr); | |
1905 | writel(dev->ep[UDC_EP0OUT_IX].td_phys, | |
1906 | &dev->ep[UDC_EP0OUT_IX].regs->desptr); | |
1907 | /* stop RDE timer */ | |
1908 | if (timer_pending(&udc_timer)) { | |
1909 | set_rde = 0; | |
1910 | mod_timer(&udc_timer, jiffies - 1); | |
1911 | } | |
1912 | /* stop pollstall timer */ | |
1913 | if (timer_pending(&udc_pollstall_timer)) { | |
1914 | mod_timer(&udc_pollstall_timer, jiffies - 1); | |
1915 | } | |
1916 | /* enable DMA */ | |
1917 | tmp = readl(&dev->regs->ctl); | |
1918 | tmp |= AMD_BIT(UDC_DEVCTL_MODE) | |
1919 | | AMD_BIT(UDC_DEVCTL_RDE) | |
1920 | | AMD_BIT(UDC_DEVCTL_TDE); | |
1921 | if (use_dma_bufferfill_mode) { | |
1922 | tmp |= AMD_BIT(UDC_DEVCTL_BF); | |
1923 | } else if (use_dma_ppb_du) { | |
1924 | tmp |= AMD_BIT(UDC_DEVCTL_DU); | |
1925 | } | |
1926 | writel(tmp, &dev->regs->ctl); | |
1927 | } | |
1928 | ||
1929 | /* clear NAK by writing CNAK for EP0IN */ | |
1930 | tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl); | |
1931 | tmp |= AMD_BIT(UDC_EPCTL_CNAK); | |
1932 | writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl); | |
1933 | dev->ep[UDC_EP0IN_IX].naking = 0; | |
1934 | UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX); | |
1935 | ||
1936 | /* clear NAK by writing CNAK for EP0OUT */ | |
1937 | tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl); | |
1938 | tmp |= AMD_BIT(UDC_EPCTL_CNAK); | |
1939 | writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl); | |
1940 | dev->ep[UDC_EP0OUT_IX].naking = 0; | |
1941 | UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX); | |
1942 | } | |
1943 | ||
1944 | /* Make endpoint 0 ready for control traffic */ | |
1945 | static int setup_ep0(struct udc *dev) | |
1946 | { | |
1947 | activate_control_endpoints(dev); | |
1948 | /* enable ep0 interrupts */ | |
1949 | udc_enable_ep0_interrupts(dev); | |
1950 | /* enable device setup interrupts */ | |
1951 | udc_enable_dev_setup_interrupts(dev); | |
1952 | ||
1953 | return 0; | |
1954 | } | |
1955 | ||
1956 | /* Called by gadget driver to register itself */ | |
b0fca50f UKK |
1957 | int usb_gadget_probe_driver(struct usb_gadget_driver *driver, |
1958 | int (*bind)(struct usb_gadget *)) | |
55d402d8 TD |
1959 | { |
1960 | struct udc *dev = udc; | |
1961 | int retval; | |
1962 | u32 tmp; | |
1963 | ||
b0fca50f | 1964 | if (!driver || !bind || !driver->setup |
55d402d8 TD |
1965 | || driver->speed != USB_SPEED_HIGH) |
1966 | return -EINVAL; | |
1967 | if (!dev) | |
1968 | return -ENODEV; | |
1969 | if (dev->driver) | |
1970 | return -EBUSY; | |
1971 | ||
1972 | driver->driver.bus = NULL; | |
1973 | dev->driver = driver; | |
1974 | dev->gadget.dev.driver = &driver->driver; | |
1975 | ||
b0fca50f | 1976 | retval = bind(&dev->gadget); |
55d402d8 TD |
1977 | |
1978 | /* Some gadget drivers use both ep0 directions. | |
1979 | * NOTE: to gadget driver, ep0 is just one endpoint... | |
1980 | */ | |
1981 | dev->ep[UDC_EP0OUT_IX].ep.driver_data = | |
1982 | dev->ep[UDC_EP0IN_IX].ep.driver_data; | |
1983 | ||
1984 | if (retval) { | |
1985 | DBG(dev, "binding to %s returning %d\n", | |
1986 | driver->driver.name, retval); | |
1987 | dev->driver = NULL; | |
1988 | dev->gadget.dev.driver = NULL; | |
1989 | return retval; | |
1990 | } | |
1991 | ||
1992 | /* get ready for ep0 traffic */ | |
1993 | setup_ep0(dev); | |
1994 | ||
1995 | /* clear SD */ | |
1996 | tmp = readl(&dev->regs->ctl); | |
1997 | tmp = tmp & AMD_CLEAR_BIT(UDC_DEVCTL_SD); | |
1998 | writel(tmp, &dev->regs->ctl); | |
1999 | ||
2000 | usb_connect(dev); | |
2001 | ||
2002 | return 0; | |
2003 | } | |
b0fca50f | 2004 | EXPORT_SYMBOL(usb_gadget_probe_driver); |
55d402d8 TD |
2005 | |
2006 | /* shutdown requests and disconnect from gadget */ | |
2007 | static void | |
2008 | shutdown(struct udc *dev, struct usb_gadget_driver *driver) | |
2009 | __releases(dev->lock) | |
2010 | __acquires(dev->lock) | |
2011 | { | |
2012 | int tmp; | |
2013 | ||
55d402d8 TD |
2014 | if (dev->gadget.speed != USB_SPEED_UNKNOWN) { |
2015 | spin_unlock(&dev->lock); | |
2016 | driver->disconnect(&dev->gadget); | |
2017 | spin_lock(&dev->lock); | |
2018 | } | |
c5deb832 TD |
2019 | |
2020 | /* empty queues and init hardware */ | |
2021 | udc_basic_init(dev); | |
2022 | for (tmp = 0; tmp < UDC_EP_NUM; tmp++) | |
2023 | empty_req_queue(&dev->ep[tmp]); | |
2024 | ||
55d402d8 TD |
2025 | udc_setup_endpoints(dev); |
2026 | } | |
2027 | ||
2028 | /* Called by gadget driver to unregister itself */ | |
2029 | int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) | |
2030 | { | |
2031 | struct udc *dev = udc; | |
2032 | unsigned long flags; | |
2033 | u32 tmp; | |
2034 | ||
2035 | if (!dev) | |
2036 | return -ENODEV; | |
2037 | if (!driver || driver != dev->driver || !driver->unbind) | |
2038 | return -EINVAL; | |
2039 | ||
2040 | spin_lock_irqsave(&dev->lock, flags); | |
2041 | udc_mask_unused_interrupts(dev); | |
2042 | shutdown(dev, driver); | |
2043 | spin_unlock_irqrestore(&dev->lock, flags); | |
2044 | ||
2045 | driver->unbind(&dev->gadget); | |
eb0be47d | 2046 | dev->gadget.dev.driver = NULL; |
55d402d8 TD |
2047 | dev->driver = NULL; |
2048 | ||
2049 | /* set SD */ | |
2050 | tmp = readl(&dev->regs->ctl); | |
2051 | tmp |= AMD_BIT(UDC_DEVCTL_SD); | |
2052 | writel(tmp, &dev->regs->ctl); | |
2053 | ||
2054 | ||
2055 | DBG(dev, "%s: unregistered\n", driver->driver.name); | |
2056 | ||
2057 | return 0; | |
2058 | } | |
2059 | EXPORT_SYMBOL(usb_gadget_unregister_driver); | |
2060 | ||
2061 | ||
2062 | /* Clear pending NAK bits */ | |
2063 | static void udc_process_cnak_queue(struct udc *dev) | |
2064 | { | |
2065 | u32 tmp; | |
2066 | u32 reg; | |
2067 | ||
2068 | /* check epin's */ | |
2069 | DBG(dev, "CNAK pending queue processing\n"); | |
2070 | for (tmp = 0; tmp < UDC_EPIN_NUM_USED; tmp++) { | |
2071 | if (cnak_pending & (1 << tmp)) { | |
2072 | DBG(dev, "CNAK pending for ep%d\n", tmp); | |
2073 | /* clear NAK by writing CNAK */ | |
2074 | reg = readl(&dev->ep[tmp].regs->ctl); | |
2075 | reg |= AMD_BIT(UDC_EPCTL_CNAK); | |
2076 | writel(reg, &dev->ep[tmp].regs->ctl); | |
2077 | dev->ep[tmp].naking = 0; | |
2078 | UDC_QUEUE_CNAK(&dev->ep[tmp], dev->ep[tmp].num); | |
2079 | } | |
2080 | } | |
2081 | /* ... and ep0out */ | |
2082 | if (cnak_pending & (1 << UDC_EP0OUT_IX)) { | |
2083 | DBG(dev, "CNAK pending for ep%d\n", UDC_EP0OUT_IX); | |
2084 | /* clear NAK by writing CNAK */ | |
2085 | reg = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl); | |
2086 | reg |= AMD_BIT(UDC_EPCTL_CNAK); | |
2087 | writel(reg, &dev->ep[UDC_EP0OUT_IX].regs->ctl); | |
2088 | dev->ep[UDC_EP0OUT_IX].naking = 0; | |
2089 | UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], | |
2090 | dev->ep[UDC_EP0OUT_IX].num); | |
2091 | } | |
2092 | } | |
2093 | ||
2094 | /* Enabling RX DMA after setup packet */ | |
2095 | static void udc_ep0_set_rde(struct udc *dev) | |
2096 | { | |
2097 | if (use_dma) { | |
2098 | /* | |
2099 | * only enable RXDMA when no data endpoint enabled | |
2100 | * or data is queued | |
2101 | */ | |
2102 | if (!dev->data_ep_enabled || dev->data_ep_queued) { | |
2103 | udc_set_rde(dev); | |
2104 | } else { | |
2105 | /* | |
2106 | * setup timer for enabling RDE (to not enable | |
2107 | * RXFIFO DMA for data endpoints to early) | |
2108 | */ | |
2109 | if (set_rde != 0 && !timer_pending(&udc_timer)) { | |
2110 | udc_timer.expires = | |
2111 | jiffies + HZ/UDC_RDE_TIMER_DIV; | |
2112 | set_rde = 1; | |
2113 | if (!stop_timer) { | |
2114 | add_timer(&udc_timer); | |
2115 | } | |
2116 | } | |
2117 | } | |
2118 | } | |
2119 | } | |
2120 | ||
2121 | ||
2122 | /* Interrupt handler for data OUT traffic */ | |
2123 | static irqreturn_t udc_data_out_isr(struct udc *dev, int ep_ix) | |
2124 | { | |
2125 | irqreturn_t ret_val = IRQ_NONE; | |
2126 | u32 tmp; | |
2127 | struct udc_ep *ep; | |
2128 | struct udc_request *req; | |
2129 | unsigned int count; | |
2130 | struct udc_data_dma *td = NULL; | |
2131 | unsigned dma_done; | |
2132 | ||
2133 | VDBG(dev, "ep%d irq\n", ep_ix); | |
2134 | ep = &dev->ep[ep_ix]; | |
2135 | ||
2136 | tmp = readl(&ep->regs->sts); | |
2137 | if (use_dma) { | |
2138 | /* BNA event ? */ | |
2139 | if (tmp & AMD_BIT(UDC_EPSTS_BNA)) { | |
25985edc | 2140 | DBG(dev, "BNA ep%dout occurred - DESPTR = %x \n", |
55d402d8 TD |
2141 | ep->num, readl(&ep->regs->desptr)); |
2142 | /* clear BNA */ | |
2143 | writel(tmp | AMD_BIT(UDC_EPSTS_BNA), &ep->regs->sts); | |
2144 | if (!ep->cancel_transfer) | |
2145 | ep->bna_occurred = 1; | |
2146 | else | |
2147 | ep->cancel_transfer = 0; | |
2148 | ret_val = IRQ_HANDLED; | |
2149 | goto finished; | |
2150 | } | |
2151 | } | |
2152 | /* HE event ? */ | |
2153 | if (tmp & AMD_BIT(UDC_EPSTS_HE)) { | |
25985edc | 2154 | dev_err(&dev->pdev->dev, "HE ep%dout occurred\n", ep->num); |
55d402d8 TD |
2155 | |
2156 | /* clear HE */ | |
2157 | writel(tmp | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts); | |
2158 | ret_val = IRQ_HANDLED; | |
2159 | goto finished; | |
2160 | } | |
2161 | ||
2162 | if (!list_empty(&ep->queue)) { | |
2163 | ||
2164 | /* next request */ | |
2165 | req = list_entry(ep->queue.next, | |
2166 | struct udc_request, queue); | |
2167 | } else { | |
2168 | req = NULL; | |
2169 | udc_rxfifo_pending = 1; | |
2170 | } | |
2171 | VDBG(dev, "req = %p\n", req); | |
2172 | /* fifo mode */ | |
2173 | if (!use_dma) { | |
2174 | ||
2175 | /* read fifo */ | |
2176 | if (req && udc_rxfifo_read(ep, req)) { | |
2177 | ret_val = IRQ_HANDLED; | |
2178 | ||
2179 | /* finish */ | |
2180 | complete_req(ep, req, 0); | |
2181 | /* next request */ | |
2182 | if (!list_empty(&ep->queue) && !ep->halted) { | |
2183 | req = list_entry(ep->queue.next, | |
2184 | struct udc_request, queue); | |
2185 | } else | |
2186 | req = NULL; | |
2187 | } | |
2188 | ||
2189 | /* DMA */ | |
2190 | } else if (!ep->cancel_transfer && req != NULL) { | |
2191 | ret_val = IRQ_HANDLED; | |
2192 | ||
2193 | /* check for DMA done */ | |
2194 | if (!use_dma_ppb) { | |
2195 | dma_done = AMD_GETBITS(req->td_data->status, | |
2196 | UDC_DMA_OUT_STS_BS); | |
2197 | /* packet per buffer mode - rx bytes */ | |
2198 | } else { | |
2199 | /* | |
2200 | * if BNA occurred then recover desc. from | |
2201 | * BNA dummy desc. | |
2202 | */ | |
2203 | if (ep->bna_occurred) { | |
2204 | VDBG(dev, "Recover desc. from BNA dummy\n"); | |
2205 | memcpy(req->td_data, ep->bna_dummy_req->td_data, | |
2206 | sizeof(struct udc_data_dma)); | |
2207 | ep->bna_occurred = 0; | |
2208 | udc_init_bna_dummy(ep->req); | |
2209 | } | |
2210 | td = udc_get_last_dma_desc(req); | |
2211 | dma_done = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_BS); | |
2212 | } | |
2213 | if (dma_done == UDC_DMA_OUT_STS_BS_DMA_DONE) { | |
2214 | /* buffer fill mode - rx bytes */ | |
2215 | if (!use_dma_ppb) { | |
2216 | /* received number bytes */ | |
2217 | count = AMD_GETBITS(req->td_data->status, | |
2218 | UDC_DMA_OUT_STS_RXBYTES); | |
2219 | VDBG(dev, "rx bytes=%u\n", count); | |
2220 | /* packet per buffer mode - rx bytes */ | |
2221 | } else { | |
2222 | VDBG(dev, "req->td_data=%p\n", req->td_data); | |
2223 | VDBG(dev, "last desc = %p\n", td); | |
2224 | /* received number bytes */ | |
2225 | if (use_dma_ppb_du) { | |
2226 | /* every desc. counts bytes */ | |
2227 | count = udc_get_ppbdu_rxbytes(req); | |
2228 | } else { | |
2229 | /* last desc. counts bytes */ | |
2230 | count = AMD_GETBITS(td->status, | |
2231 | UDC_DMA_OUT_STS_RXBYTES); | |
2232 | if (!count && req->req.length | |
2233 | == UDC_DMA_MAXPACKET) { | |
2234 | /* | |
2235 | * on 64k packets the RXBYTES | |
2236 | * field is zero | |
2237 | */ | |
2238 | count = UDC_DMA_MAXPACKET; | |
2239 | } | |
2240 | } | |
2241 | VDBG(dev, "last desc rx bytes=%u\n", count); | |
2242 | } | |
2243 | ||
2244 | tmp = req->req.length - req->req.actual; | |
2245 | if (count > tmp) { | |
2246 | if ((tmp % ep->ep.maxpacket) != 0) { | |
2247 | DBG(dev, "%s: rx %db, space=%db\n", | |
2248 | ep->ep.name, count, tmp); | |
2249 | req->req.status = -EOVERFLOW; | |
2250 | } | |
2251 | count = tmp; | |
2252 | } | |
2253 | req->req.actual += count; | |
2254 | req->dma_going = 0; | |
2255 | /* complete request */ | |
2256 | complete_req(ep, req, 0); | |
2257 | ||
2258 | /* next request */ | |
2259 | if (!list_empty(&ep->queue) && !ep->halted) { | |
2260 | req = list_entry(ep->queue.next, | |
2261 | struct udc_request, | |
2262 | queue); | |
2263 | /* | |
2264 | * DMA may be already started by udc_queue() | |
2265 | * called by gadget drivers completion | |
2266 | * routine. This happens when queue | |
2267 | * holds one request only. | |
2268 | */ | |
2269 | if (req->dma_going == 0) { | |
2270 | /* next dma */ | |
2271 | if (prep_dma(ep, req, GFP_ATOMIC) != 0) | |
2272 | goto finished; | |
2273 | /* write desc pointer */ | |
2274 | writel(req->td_phys, | |
2275 | &ep->regs->desptr); | |
2276 | req->dma_going = 1; | |
2277 | /* enable DMA */ | |
2278 | udc_set_rde(dev); | |
2279 | } | |
2280 | } else { | |
2281 | /* | |
2282 | * implant BNA dummy descriptor to allow | |
2283 | * RXFIFO opening by RDE | |
2284 | */ | |
2285 | if (ep->bna_dummy_req) { | |
2286 | /* write desc pointer */ | |
2287 | writel(ep->bna_dummy_req->td_phys, | |
2288 | &ep->regs->desptr); | |
2289 | ep->bna_occurred = 0; | |
2290 | } | |
2291 | ||
2292 | /* | |
2293 | * schedule timer for setting RDE if queue | |
2294 | * remains empty to allow ep0 packets pass | |
2295 | * through | |
2296 | */ | |
2297 | if (set_rde != 0 | |
2298 | && !timer_pending(&udc_timer)) { | |
2299 | udc_timer.expires = | |
2300 | jiffies | |
2301 | + HZ*UDC_RDE_TIMER_SECONDS; | |
2302 | set_rde = 1; | |
2303 | if (!stop_timer) { | |
2304 | add_timer(&udc_timer); | |
2305 | } | |
2306 | } | |
2307 | if (ep->num != UDC_EP0OUT_IX) | |
2308 | dev->data_ep_queued = 0; | |
2309 | } | |
2310 | ||
2311 | } else { | |
2312 | /* | |
2313 | * RX DMA must be reenabled for each desc in PPBDU mode | |
2314 | * and must be enabled for PPBNDU mode in case of BNA | |
2315 | */ | |
2316 | udc_set_rde(dev); | |
2317 | } | |
2318 | ||
2319 | } else if (ep->cancel_transfer) { | |
2320 | ret_val = IRQ_HANDLED; | |
2321 | ep->cancel_transfer = 0; | |
2322 | } | |
2323 | ||
2324 | /* check pending CNAKS */ | |
2325 | if (cnak_pending) { | |
2326 | /* CNAk processing when rxfifo empty only */ | |
2327 | if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) { | |
2328 | udc_process_cnak_queue(dev); | |
2329 | } | |
2330 | } | |
2331 | ||
2332 | /* clear OUT bits in ep status */ | |
2333 | writel(UDC_EPSTS_OUT_CLEAR, &ep->regs->sts); | |
2334 | finished: | |
2335 | return ret_val; | |
2336 | } | |
2337 | ||
2338 | /* Interrupt handler for data IN traffic */ | |
2339 | static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix) | |
2340 | { | |
2341 | irqreturn_t ret_val = IRQ_NONE; | |
2342 | u32 tmp; | |
2343 | u32 epsts; | |
2344 | struct udc_ep *ep; | |
2345 | struct udc_request *req; | |
2346 | struct udc_data_dma *td; | |
2347 | unsigned dma_done; | |
2348 | unsigned len; | |
2349 | ||
2350 | ep = &dev->ep[ep_ix]; | |
2351 | ||
2352 | epsts = readl(&ep->regs->sts); | |
2353 | if (use_dma) { | |
2354 | /* BNA ? */ | |
2355 | if (epsts & AMD_BIT(UDC_EPSTS_BNA)) { | |
2356 | dev_err(&dev->pdev->dev, | |
25985edc | 2357 | "BNA ep%din occurred - DESPTR = %08lx \n", |
55d402d8 TD |
2358 | ep->num, |
2359 | (unsigned long) readl(&ep->regs->desptr)); | |
2360 | ||
2361 | /* clear BNA */ | |
2362 | writel(epsts, &ep->regs->sts); | |
2363 | ret_val = IRQ_HANDLED; | |
2364 | goto finished; | |
2365 | } | |
2366 | } | |
2367 | /* HE event ? */ | |
2368 | if (epsts & AMD_BIT(UDC_EPSTS_HE)) { | |
2369 | dev_err(&dev->pdev->dev, | |
25985edc | 2370 | "HE ep%dn occurred - DESPTR = %08lx \n", |
55d402d8 TD |
2371 | ep->num, (unsigned long) readl(&ep->regs->desptr)); |
2372 | ||
2373 | /* clear HE */ | |
2374 | writel(epsts | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts); | |
2375 | ret_val = IRQ_HANDLED; | |
2376 | goto finished; | |
2377 | } | |
2378 | ||
2379 | /* DMA completion */ | |
2380 | if (epsts & AMD_BIT(UDC_EPSTS_TDC)) { | |
2381 | VDBG(dev, "TDC set- completion\n"); | |
2382 | ret_val = IRQ_HANDLED; | |
2383 | if (!ep->cancel_transfer && !list_empty(&ep->queue)) { | |
2384 | req = list_entry(ep->queue.next, | |
2385 | struct udc_request, queue); | |
058e698b | 2386 | /* |
25985edc | 2387 | * length bytes transferred |
058e698b JL |
2388 | * check dma done of last desc. in PPBDU mode |
2389 | */ | |
2390 | if (use_dma_ppb_du) { | |
2391 | td = udc_get_last_dma_desc(req); | |
2392 | if (td) { | |
2393 | dma_done = | |
2394 | AMD_GETBITS(td->status, | |
2395 | UDC_DMA_IN_STS_BS); | |
2396 | /* don't care DMA done */ | |
55d402d8 TD |
2397 | req->req.actual = req->req.length; |
2398 | } | |
058e698b JL |
2399 | } else { |
2400 | /* assume all bytes transferred */ | |
2401 | req->req.actual = req->req.length; | |
2402 | } | |
55d402d8 | 2403 | |
058e698b JL |
2404 | if (req->req.actual == req->req.length) { |
2405 | /* complete req */ | |
2406 | complete_req(ep, req, 0); | |
2407 | req->dma_going = 0; | |
2408 | /* further request available ? */ | |
2409 | if (list_empty(&ep->queue)) { | |
2410 | /* disable interrupt */ | |
2411 | tmp = readl(&dev->regs->ep_irqmsk); | |
2412 | tmp |= AMD_BIT(ep->num); | |
2413 | writel(tmp, &dev->regs->ep_irqmsk); | |
55d402d8 TD |
2414 | } |
2415 | } | |
2416 | } | |
2417 | ep->cancel_transfer = 0; | |
2418 | ||
2419 | } | |
2420 | /* | |
2421 | * status reg has IN bit set and TDC not set (if TDC was handled, | |
2422 | * IN must not be handled (UDC defect) ? | |
2423 | */ | |
2424 | if ((epsts & AMD_BIT(UDC_EPSTS_IN)) | |
2425 | && !(epsts & AMD_BIT(UDC_EPSTS_TDC))) { | |
2426 | ret_val = IRQ_HANDLED; | |
2427 | if (!list_empty(&ep->queue)) { | |
2428 | /* next request */ | |
2429 | req = list_entry(ep->queue.next, | |
2430 | struct udc_request, queue); | |
2431 | /* FIFO mode */ | |
2432 | if (!use_dma) { | |
2433 | /* write fifo */ | |
2434 | udc_txfifo_write(ep, &req->req); | |
2435 | len = req->req.length - req->req.actual; | |
2436 | if (len > ep->ep.maxpacket) | |
2437 | len = ep->ep.maxpacket; | |
2438 | req->req.actual += len; | |
2439 | if (req->req.actual == req->req.length | |
2440 | || (len != ep->ep.maxpacket)) { | |
2441 | /* complete req */ | |
2442 | complete_req(ep, req, 0); | |
2443 | } | |
2444 | /* DMA */ | |
2445 | } else if (req && !req->dma_going) { | |
2446 | VDBG(dev, "IN DMA : req=%p req->td_data=%p\n", | |
2447 | req, req->td_data); | |
2448 | if (req->td_data) { | |
2449 | ||
2450 | req->dma_going = 1; | |
2451 | ||
2452 | /* | |
2453 | * unset L bit of first desc. | |
2454 | * for chain | |
2455 | */ | |
2456 | if (use_dma_ppb && req->req.length > | |
2457 | ep->ep.maxpacket) { | |
2458 | req->td_data->status &= | |
2459 | AMD_CLEAR_BIT( | |
2460 | UDC_DMA_IN_STS_L); | |
2461 | } | |
2462 | ||
2463 | /* write desc pointer */ | |
2464 | writel(req->td_phys, &ep->regs->desptr); | |
2465 | ||
2466 | /* set HOST READY */ | |
2467 | req->td_data->status = | |
2468 | AMD_ADDBITS( | |
2469 | req->td_data->status, | |
2470 | UDC_DMA_IN_STS_BS_HOST_READY, | |
2471 | UDC_DMA_IN_STS_BS); | |
2472 | ||
2473 | /* set poll demand bit */ | |
2474 | tmp = readl(&ep->regs->ctl); | |
2475 | tmp |= AMD_BIT(UDC_EPCTL_P); | |
2476 | writel(tmp, &ep->regs->ctl); | |
2477 | } | |
2478 | } | |
2479 | ||
c5deb832 TD |
2480 | } else if (!use_dma && ep->in) { |
2481 | /* disable interrupt */ | |
2482 | tmp = readl( | |
2483 | &dev->regs->ep_irqmsk); | |
2484 | tmp |= AMD_BIT(ep->num); | |
2485 | writel(tmp, | |
2486 | &dev->regs->ep_irqmsk); | |
55d402d8 TD |
2487 | } |
2488 | } | |
2489 | /* clear status bits */ | |
2490 | writel(epsts, &ep->regs->sts); | |
2491 | ||
2492 | finished: | |
2493 | return ret_val; | |
2494 | ||
2495 | } | |
2496 | ||
2497 | /* Interrupt handler for Control OUT traffic */ | |
2498 | static irqreturn_t udc_control_out_isr(struct udc *dev) | |
2499 | __releases(dev->lock) | |
2500 | __acquires(dev->lock) | |
2501 | { | |
2502 | irqreturn_t ret_val = IRQ_NONE; | |
2503 | u32 tmp; | |
2504 | int setup_supported; | |
2505 | u32 count; | |
2506 | int set = 0; | |
2507 | struct udc_ep *ep; | |
2508 | struct udc_ep *ep_tmp; | |
2509 | ||
2510 | ep = &dev->ep[UDC_EP0OUT_IX]; | |
2511 | ||
2512 | /* clear irq */ | |
2513 | writel(AMD_BIT(UDC_EPINT_OUT_EP0), &dev->regs->ep_irqsts); | |
2514 | ||
2515 | tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts); | |
2516 | /* check BNA and clear if set */ | |
2517 | if (tmp & AMD_BIT(UDC_EPSTS_BNA)) { | |
2518 | VDBG(dev, "ep0: BNA set\n"); | |
2519 | writel(AMD_BIT(UDC_EPSTS_BNA), | |
2520 | &dev->ep[UDC_EP0OUT_IX].regs->sts); | |
2521 | ep->bna_occurred = 1; | |
2522 | ret_val = IRQ_HANDLED; | |
2523 | goto finished; | |
2524 | } | |
2525 | ||
2526 | /* type of data: SETUP or DATA 0 bytes */ | |
2527 | tmp = AMD_GETBITS(tmp, UDC_EPSTS_OUT); | |
2528 | VDBG(dev, "data_typ = %x\n", tmp); | |
2529 | ||
2530 | /* setup data */ | |
2531 | if (tmp == UDC_EPSTS_OUT_SETUP) { | |
2532 | ret_val = IRQ_HANDLED; | |
2533 | ||
2534 | ep->dev->stall_ep0in = 0; | |
2535 | dev->waiting_zlp_ack_ep0in = 0; | |
2536 | ||
2537 | /* set NAK for EP0_IN */ | |
2538 | tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl); | |
2539 | tmp |= AMD_BIT(UDC_EPCTL_SNAK); | |
2540 | writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl); | |
2541 | dev->ep[UDC_EP0IN_IX].naking = 1; | |
2542 | /* get setup data */ | |
2543 | if (use_dma) { | |
2544 | ||
2545 | /* clear OUT bits in ep status */ | |
2546 | writel(UDC_EPSTS_OUT_CLEAR, | |
2547 | &dev->ep[UDC_EP0OUT_IX].regs->sts); | |
2548 | ||
2549 | setup_data.data[0] = | |
2550 | dev->ep[UDC_EP0OUT_IX].td_stp->data12; | |
2551 | setup_data.data[1] = | |
2552 | dev->ep[UDC_EP0OUT_IX].td_stp->data34; | |
2553 | /* set HOST READY */ | |
2554 | dev->ep[UDC_EP0OUT_IX].td_stp->status = | |
2555 | UDC_DMA_STP_STS_BS_HOST_READY; | |
2556 | } else { | |
2557 | /* read fifo */ | |
2558 | udc_rxfifo_read_dwords(dev, setup_data.data, 2); | |
2559 | } | |
2560 | ||
2561 | /* determine direction of control data */ | |
2562 | if ((setup_data.request.bRequestType & USB_DIR_IN) != 0) { | |
2563 | dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep; | |
2564 | /* enable RDE */ | |
2565 | udc_ep0_set_rde(dev); | |
2566 | set = 0; | |
2567 | } else { | |
2568 | dev->gadget.ep0 = &dev->ep[UDC_EP0OUT_IX].ep; | |
2569 | /* | |
2570 | * implant BNA dummy descriptor to allow RXFIFO opening | |
2571 | * by RDE | |
2572 | */ | |
2573 | if (ep->bna_dummy_req) { | |
2574 | /* write desc pointer */ | |
2575 | writel(ep->bna_dummy_req->td_phys, | |
2576 | &dev->ep[UDC_EP0OUT_IX].regs->desptr); | |
2577 | ep->bna_occurred = 0; | |
2578 | } | |
2579 | ||
2580 | set = 1; | |
2581 | dev->ep[UDC_EP0OUT_IX].naking = 1; | |
2582 | /* | |
2583 | * setup timer for enabling RDE (to not enable | |
2584 | * RXFIFO DMA for data to early) | |
2585 | */ | |
2586 | set_rde = 1; | |
2587 | if (!timer_pending(&udc_timer)) { | |
2588 | udc_timer.expires = jiffies + | |
2589 | HZ/UDC_RDE_TIMER_DIV; | |
2590 | if (!stop_timer) { | |
2591 | add_timer(&udc_timer); | |
2592 | } | |
2593 | } | |
2594 | } | |
2595 | ||
2596 | /* | |
2597 | * mass storage reset must be processed here because | |
2598 | * next packet may be a CLEAR_FEATURE HALT which would not | |
2599 | * clear the stall bit when no STALL handshake was received | |
2600 | * before (autostall can cause this) | |
2601 | */ | |
2602 | if (setup_data.data[0] == UDC_MSCRES_DWORD0 | |
2603 | && setup_data.data[1] == UDC_MSCRES_DWORD1) { | |
2604 | DBG(dev, "MSC Reset\n"); | |
2605 | /* | |
2606 | * clear stall bits | |
2607 | * only one IN and OUT endpoints are handled | |
2608 | */ | |
2609 | ep_tmp = &udc->ep[UDC_EPIN_IX]; | |
2610 | udc_set_halt(&ep_tmp->ep, 0); | |
2611 | ep_tmp = &udc->ep[UDC_EPOUT_IX]; | |
2612 | udc_set_halt(&ep_tmp->ep, 0); | |
2613 | } | |
2614 | ||
2615 | /* call gadget with setup data received */ | |
2616 | spin_unlock(&dev->lock); | |
2617 | setup_supported = dev->driver->setup(&dev->gadget, | |
2618 | &setup_data.request); | |
2619 | spin_lock(&dev->lock); | |
2620 | ||
2621 | tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl); | |
2622 | /* ep0 in returns data (not zlp) on IN phase */ | |
2623 | if (setup_supported >= 0 && setup_supported < | |
2624 | UDC_EP0IN_MAXPACKET) { | |
2625 | /* clear NAK by writing CNAK in EP0_IN */ | |
2626 | tmp |= AMD_BIT(UDC_EPCTL_CNAK); | |
2627 | writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl); | |
2628 | dev->ep[UDC_EP0IN_IX].naking = 0; | |
2629 | UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX); | |
2630 | ||
2631 | /* if unsupported request then stall */ | |
2632 | } else if (setup_supported < 0) { | |
2633 | tmp |= AMD_BIT(UDC_EPCTL_S); | |
2634 | writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl); | |
2635 | } else | |
2636 | dev->waiting_zlp_ack_ep0in = 1; | |
2637 | ||
2638 | ||
2639 | /* clear NAK by writing CNAK in EP0_OUT */ | |
2640 | if (!set) { | |
2641 | tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl); | |
2642 | tmp |= AMD_BIT(UDC_EPCTL_CNAK); | |
2643 | writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl); | |
2644 | dev->ep[UDC_EP0OUT_IX].naking = 0; | |
2645 | UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX); | |
2646 | } | |
2647 | ||
2648 | if (!use_dma) { | |
2649 | /* clear OUT bits in ep status */ | |
2650 | writel(UDC_EPSTS_OUT_CLEAR, | |
2651 | &dev->ep[UDC_EP0OUT_IX].regs->sts); | |
2652 | } | |
2653 | ||
2654 | /* data packet 0 bytes */ | |
2655 | } else if (tmp == UDC_EPSTS_OUT_DATA) { | |
2656 | /* clear OUT bits in ep status */ | |
2657 | writel(UDC_EPSTS_OUT_CLEAR, &dev->ep[UDC_EP0OUT_IX].regs->sts); | |
2658 | ||
2659 | /* get setup data: only 0 packet */ | |
2660 | if (use_dma) { | |
2661 | /* no req if 0 packet, just reactivate */ | |
2662 | if (list_empty(&dev->ep[UDC_EP0OUT_IX].queue)) { | |
2663 | VDBG(dev, "ZLP\n"); | |
2664 | ||
2665 | /* set HOST READY */ | |
2666 | dev->ep[UDC_EP0OUT_IX].td->status = | |
2667 | AMD_ADDBITS( | |
2668 | dev->ep[UDC_EP0OUT_IX].td->status, | |
2669 | UDC_DMA_OUT_STS_BS_HOST_READY, | |
2670 | UDC_DMA_OUT_STS_BS); | |
2671 | /* enable RDE */ | |
2672 | udc_ep0_set_rde(dev); | |
2673 | ret_val = IRQ_HANDLED; | |
2674 | ||
2675 | } else { | |
2676 | /* control write */ | |
2677 | ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX); | |
2678 | /* re-program desc. pointer for possible ZLPs */ | |
2679 | writel(dev->ep[UDC_EP0OUT_IX].td_phys, | |
2680 | &dev->ep[UDC_EP0OUT_IX].regs->desptr); | |
2681 | /* enable RDE */ | |
2682 | udc_ep0_set_rde(dev); | |
2683 | } | |
2684 | } else { | |
2685 | ||
2686 | /* received number bytes */ | |
2687 | count = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts); | |
2688 | count = AMD_GETBITS(count, UDC_EPSTS_RX_PKT_SIZE); | |
2689 | /* out data for fifo mode not working */ | |
2690 | count = 0; | |
2691 | ||
2692 | /* 0 packet or real data ? */ | |
2693 | if (count != 0) { | |
2694 | ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX); | |
2695 | } else { | |
2696 | /* dummy read confirm */ | |
2697 | readl(&dev->ep[UDC_EP0OUT_IX].regs->confirm); | |
2698 | ret_val = IRQ_HANDLED; | |
2699 | } | |
2700 | } | |
2701 | } | |
2702 | ||
2703 | /* check pending CNAKS */ | |
2704 | if (cnak_pending) { | |
2705 | /* CNAk processing when rxfifo empty only */ | |
2706 | if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) { | |
2707 | udc_process_cnak_queue(dev); | |
2708 | } | |
2709 | } | |
2710 | ||
2711 | finished: | |
2712 | return ret_val; | |
2713 | } | |
2714 | ||
2715 | /* Interrupt handler for Control IN traffic */ | |
2716 | static irqreturn_t udc_control_in_isr(struct udc *dev) | |
2717 | { | |
2718 | irqreturn_t ret_val = IRQ_NONE; | |
2719 | u32 tmp; | |
2720 | struct udc_ep *ep; | |
2721 | struct udc_request *req; | |
2722 | unsigned len; | |
2723 | ||
2724 | ep = &dev->ep[UDC_EP0IN_IX]; | |
2725 | ||
2726 | /* clear irq */ | |
2727 | writel(AMD_BIT(UDC_EPINT_IN_EP0), &dev->regs->ep_irqsts); | |
2728 | ||
2729 | tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->sts); | |
2730 | /* DMA completion */ | |
2731 | if (tmp & AMD_BIT(UDC_EPSTS_TDC)) { | |
2732 | VDBG(dev, "isr: TDC clear \n"); | |
2733 | ret_val = IRQ_HANDLED; | |
2734 | ||
2735 | /* clear TDC bit */ | |
2736 | writel(AMD_BIT(UDC_EPSTS_TDC), | |
2737 | &dev->ep[UDC_EP0IN_IX].regs->sts); | |
2738 | ||
2739 | /* status reg has IN bit set ? */ | |
2740 | } else if (tmp & AMD_BIT(UDC_EPSTS_IN)) { | |
2741 | ret_val = IRQ_HANDLED; | |
2742 | ||
2743 | if (ep->dma) { | |
2744 | /* clear IN bit */ | |
2745 | writel(AMD_BIT(UDC_EPSTS_IN), | |
2746 | &dev->ep[UDC_EP0IN_IX].regs->sts); | |
2747 | } | |
2748 | if (dev->stall_ep0in) { | |
2749 | DBG(dev, "stall ep0in\n"); | |
2750 | /* halt ep0in */ | |
2751 | tmp = readl(&ep->regs->ctl); | |
2752 | tmp |= AMD_BIT(UDC_EPCTL_S); | |
2753 | writel(tmp, &ep->regs->ctl); | |
2754 | } else { | |
2755 | if (!list_empty(&ep->queue)) { | |
2756 | /* next request */ | |
2757 | req = list_entry(ep->queue.next, | |
2758 | struct udc_request, queue); | |
2759 | ||
2760 | if (ep->dma) { | |
2761 | /* write desc pointer */ | |
2762 | writel(req->td_phys, &ep->regs->desptr); | |
2763 | /* set HOST READY */ | |
2764 | req->td_data->status = | |
2765 | AMD_ADDBITS( | |
2766 | req->td_data->status, | |
2767 | UDC_DMA_STP_STS_BS_HOST_READY, | |
2768 | UDC_DMA_STP_STS_BS); | |
2769 | ||
2770 | /* set poll demand bit */ | |
2771 | tmp = | |
2772 | readl(&dev->ep[UDC_EP0IN_IX].regs->ctl); | |
2773 | tmp |= AMD_BIT(UDC_EPCTL_P); | |
2774 | writel(tmp, | |
2775 | &dev->ep[UDC_EP0IN_IX].regs->ctl); | |
2776 | ||
2777 | /* all bytes will be transferred */ | |
2778 | req->req.actual = req->req.length; | |
2779 | ||
2780 | /* complete req */ | |
2781 | complete_req(ep, req, 0); | |
2782 | ||
2783 | } else { | |
2784 | /* write fifo */ | |
2785 | udc_txfifo_write(ep, &req->req); | |
2786 | ||
25985edc | 2787 | /* lengh bytes transferred */ |
55d402d8 TD |
2788 | len = req->req.length - req->req.actual; |
2789 | if (len > ep->ep.maxpacket) | |
2790 | len = ep->ep.maxpacket; | |
2791 | ||
2792 | req->req.actual += len; | |
2793 | if (req->req.actual == req->req.length | |
2794 | || (len != ep->ep.maxpacket)) { | |
2795 | /* complete req */ | |
2796 | complete_req(ep, req, 0); | |
2797 | } | |
2798 | } | |
2799 | ||
2800 | } | |
2801 | } | |
2802 | ep->halted = 0; | |
2803 | dev->stall_ep0in = 0; | |
2804 | if (!ep->dma) { | |
2805 | /* clear IN bit */ | |
2806 | writel(AMD_BIT(UDC_EPSTS_IN), | |
2807 | &dev->ep[UDC_EP0IN_IX].regs->sts); | |
2808 | } | |
2809 | } | |
2810 | ||
2811 | return ret_val; | |
2812 | } | |
2813 | ||
2814 | ||
2815 | /* Interrupt handler for global device events */ | |
2816 | static irqreturn_t udc_dev_isr(struct udc *dev, u32 dev_irq) | |
2817 | __releases(dev->lock) | |
2818 | __acquires(dev->lock) | |
2819 | { | |
2820 | irqreturn_t ret_val = IRQ_NONE; | |
2821 | u32 tmp; | |
2822 | u32 cfg; | |
2823 | struct udc_ep *ep; | |
2824 | u16 i; | |
2825 | u8 udc_csr_epix; | |
2826 | ||
2827 | /* SET_CONFIG irq ? */ | |
2828 | if (dev_irq & AMD_BIT(UDC_DEVINT_SC)) { | |
2829 | ret_val = IRQ_HANDLED; | |
2830 | ||
2831 | /* read config value */ | |
2832 | tmp = readl(&dev->regs->sts); | |
2833 | cfg = AMD_GETBITS(tmp, UDC_DEVSTS_CFG); | |
2834 | DBG(dev, "SET_CONFIG interrupt: config=%d\n", cfg); | |
2835 | dev->cur_config = cfg; | |
2836 | dev->set_cfg_not_acked = 1; | |
2837 | ||
2838 | /* make usb request for gadget driver */ | |
2839 | memset(&setup_data, 0 , sizeof(union udc_setup_data)); | |
2840 | setup_data.request.bRequest = USB_REQ_SET_CONFIGURATION; | |
fd05e720 | 2841 | setup_data.request.wValue = cpu_to_le16(dev->cur_config); |
55d402d8 TD |
2842 | |
2843 | /* programm the NE registers */ | |
2844 | for (i = 0; i < UDC_EP_NUM; i++) { | |
2845 | ep = &dev->ep[i]; | |
2846 | if (ep->in) { | |
2847 | ||
2848 | /* ep ix in UDC CSR register space */ | |
2849 | udc_csr_epix = ep->num; | |
2850 | ||
2851 | ||
2852 | /* OUT ep */ | |
2853 | } else { | |
2854 | /* ep ix in UDC CSR register space */ | |
2855 | udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS; | |
2856 | } | |
2857 | ||
2858 | tmp = readl(&dev->csr->ne[udc_csr_epix]); | |
2859 | /* ep cfg */ | |
2860 | tmp = AMD_ADDBITS(tmp, ep->dev->cur_config, | |
2861 | UDC_CSR_NE_CFG); | |
2862 | /* write reg */ | |
2863 | writel(tmp, &dev->csr->ne[udc_csr_epix]); | |
2864 | ||
2865 | /* clear stall bits */ | |
2866 | ep->halted = 0; | |
2867 | tmp = readl(&ep->regs->ctl); | |
2868 | tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S); | |
2869 | writel(tmp, &ep->regs->ctl); | |
2870 | } | |
2871 | /* call gadget zero with setup data received */ | |
2872 | spin_unlock(&dev->lock); | |
2873 | tmp = dev->driver->setup(&dev->gadget, &setup_data.request); | |
2874 | spin_lock(&dev->lock); | |
2875 | ||
2876 | } /* SET_INTERFACE ? */ | |
2877 | if (dev_irq & AMD_BIT(UDC_DEVINT_SI)) { | |
2878 | ret_val = IRQ_HANDLED; | |
2879 | ||
2880 | dev->set_cfg_not_acked = 1; | |
2881 | /* read interface and alt setting values */ | |
2882 | tmp = readl(&dev->regs->sts); | |
2883 | dev->cur_alt = AMD_GETBITS(tmp, UDC_DEVSTS_ALT); | |
2884 | dev->cur_intf = AMD_GETBITS(tmp, UDC_DEVSTS_INTF); | |
2885 | ||
2886 | /* make usb request for gadget driver */ | |
2887 | memset(&setup_data, 0 , sizeof(union udc_setup_data)); | |
2888 | setup_data.request.bRequest = USB_REQ_SET_INTERFACE; | |
2889 | setup_data.request.bRequestType = USB_RECIP_INTERFACE; | |
fd05e720 AV |
2890 | setup_data.request.wValue = cpu_to_le16(dev->cur_alt); |
2891 | setup_data.request.wIndex = cpu_to_le16(dev->cur_intf); | |
55d402d8 TD |
2892 | |
2893 | DBG(dev, "SET_INTERFACE interrupt: alt=%d intf=%d\n", | |
2894 | dev->cur_alt, dev->cur_intf); | |
2895 | ||
2896 | /* programm the NE registers */ | |
2897 | for (i = 0; i < UDC_EP_NUM; i++) { | |
2898 | ep = &dev->ep[i]; | |
2899 | if (ep->in) { | |
2900 | ||
2901 | /* ep ix in UDC CSR register space */ | |
2902 | udc_csr_epix = ep->num; | |
2903 | ||
2904 | ||
2905 | /* OUT ep */ | |
2906 | } else { | |
2907 | /* ep ix in UDC CSR register space */ | |
2908 | udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS; | |
2909 | } | |
2910 | ||
2911 | /* UDC CSR reg */ | |
2912 | /* set ep values */ | |
2913 | tmp = readl(&dev->csr->ne[udc_csr_epix]); | |
2914 | /* ep interface */ | |
2915 | tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf, | |
2916 | UDC_CSR_NE_INTF); | |
2917 | /* tmp = AMD_ADDBITS(tmp, 2, UDC_CSR_NE_INTF); */ | |
2918 | /* ep alt */ | |
2919 | tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt, | |
2920 | UDC_CSR_NE_ALT); | |
2921 | /* write reg */ | |
2922 | writel(tmp, &dev->csr->ne[udc_csr_epix]); | |
2923 | ||
2924 | /* clear stall bits */ | |
2925 | ep->halted = 0; | |
2926 | tmp = readl(&ep->regs->ctl); | |
2927 | tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S); | |
2928 | writel(tmp, &ep->regs->ctl); | |
2929 | } | |
2930 | ||
2931 | /* call gadget zero with setup data received */ | |
2932 | spin_unlock(&dev->lock); | |
2933 | tmp = dev->driver->setup(&dev->gadget, &setup_data.request); | |
2934 | spin_lock(&dev->lock); | |
2935 | ||
2936 | } /* USB reset */ | |
2937 | if (dev_irq & AMD_BIT(UDC_DEVINT_UR)) { | |
2938 | DBG(dev, "USB Reset interrupt\n"); | |
2939 | ret_val = IRQ_HANDLED; | |
2940 | ||
2941 | /* allow soft reset when suspend occurs */ | |
2942 | soft_reset_occured = 0; | |
2943 | ||
2944 | dev->waiting_zlp_ack_ep0in = 0; | |
2945 | dev->set_cfg_not_acked = 0; | |
2946 | ||
2947 | /* mask not needed interrupts */ | |
2948 | udc_mask_unused_interrupts(dev); | |
2949 | ||
2950 | /* call gadget to resume and reset configs etc. */ | |
2951 | spin_unlock(&dev->lock); | |
2952 | if (dev->sys_suspended && dev->driver->resume) { | |
2953 | dev->driver->resume(&dev->gadget); | |
2954 | dev->sys_suspended = 0; | |
2955 | } | |
2956 | dev->driver->disconnect(&dev->gadget); | |
2957 | spin_lock(&dev->lock); | |
2958 | ||
2959 | /* disable ep0 to empty req queue */ | |
2960 | empty_req_queue(&dev->ep[UDC_EP0IN_IX]); | |
2961 | ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]); | |
2962 | ||
2963 | /* soft reset when rxfifo not empty */ | |
2964 | tmp = readl(&dev->regs->sts); | |
2965 | if (!(tmp & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) | |
2966 | && !soft_reset_after_usbreset_occured) { | |
2967 | udc_soft_reset(dev); | |
2968 | soft_reset_after_usbreset_occured++; | |
2969 | } | |
2970 | ||
2971 | /* | |
2972 | * DMA reset to kill potential old DMA hw hang, | |
2973 | * POLL bit is already reset by ep_init() through | |
2974 | * disconnect() | |
2975 | */ | |
2976 | DBG(dev, "DMA machine reset\n"); | |
2977 | tmp = readl(&dev->regs->cfg); | |
2978 | writel(tmp | AMD_BIT(UDC_DEVCFG_DMARST), &dev->regs->cfg); | |
2979 | writel(tmp, &dev->regs->cfg); | |
2980 | ||
2981 | /* put into initial config */ | |
2982 | udc_basic_init(dev); | |
2983 | ||
2984 | /* enable device setup interrupts */ | |
2985 | udc_enable_dev_setup_interrupts(dev); | |
2986 | ||
2987 | /* enable suspend interrupt */ | |
2988 | tmp = readl(&dev->regs->irqmsk); | |
2989 | tmp &= AMD_UNMASK_BIT(UDC_DEVINT_US); | |
2990 | writel(tmp, &dev->regs->irqmsk); | |
2991 | ||
2992 | } /* USB suspend */ | |
2993 | if (dev_irq & AMD_BIT(UDC_DEVINT_US)) { | |
2994 | DBG(dev, "USB Suspend interrupt\n"); | |
2995 | ret_val = IRQ_HANDLED; | |
2996 | if (dev->driver->suspend) { | |
2997 | spin_unlock(&dev->lock); | |
2998 | dev->sys_suspended = 1; | |
2999 | dev->driver->suspend(&dev->gadget); | |
3000 | spin_lock(&dev->lock); | |
3001 | } | |
3002 | } /* new speed ? */ | |
3003 | if (dev_irq & AMD_BIT(UDC_DEVINT_ENUM)) { | |
3004 | DBG(dev, "ENUM interrupt\n"); | |
3005 | ret_val = IRQ_HANDLED; | |
3006 | soft_reset_after_usbreset_occured = 0; | |
3007 | ||
3008 | /* disable ep0 to empty req queue */ | |
3009 | empty_req_queue(&dev->ep[UDC_EP0IN_IX]); | |
3010 | ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]); | |
3011 | ||
3012 | /* link up all endpoints */ | |
3013 | udc_setup_endpoints(dev); | |
3014 | if (dev->gadget.speed == USB_SPEED_HIGH) { | |
3015 | dev_info(&dev->pdev->dev, "Connect: speed = %s\n", | |
3016 | "high"); | |
3017 | } else if (dev->gadget.speed == USB_SPEED_FULL) { | |
3018 | dev_info(&dev->pdev->dev, "Connect: speed = %s\n", | |
3019 | "full"); | |
3020 | } | |
3021 | ||
3022 | /* init ep 0 */ | |
3023 | activate_control_endpoints(dev); | |
3024 | ||
3025 | /* enable ep0 interrupts */ | |
3026 | udc_enable_ep0_interrupts(dev); | |
3027 | } | |
3028 | /* session valid change interrupt */ | |
3029 | if (dev_irq & AMD_BIT(UDC_DEVINT_SVC)) { | |
3030 | DBG(dev, "USB SVC interrupt\n"); | |
3031 | ret_val = IRQ_HANDLED; | |
3032 | ||
3033 | /* check that session is not valid to detect disconnect */ | |
3034 | tmp = readl(&dev->regs->sts); | |
3035 | if (!(tmp & AMD_BIT(UDC_DEVSTS_SESSVLD))) { | |
3036 | /* disable suspend interrupt */ | |
3037 | tmp = readl(&dev->regs->irqmsk); | |
3038 | tmp |= AMD_BIT(UDC_DEVINT_US); | |
3039 | writel(tmp, &dev->regs->irqmsk); | |
3040 | DBG(dev, "USB Disconnect (session valid low)\n"); | |
3041 | /* cleanup on disconnect */ | |
3042 | usb_disconnect(udc); | |
3043 | } | |
3044 | ||
3045 | } | |
3046 | ||
3047 | return ret_val; | |
3048 | } | |
3049 | ||
3050 | /* Interrupt Service Routine, see Linux Kernel Doc for parameters */ | |
3051 | static irqreturn_t udc_irq(int irq, void *pdev) | |
3052 | { | |
3053 | struct udc *dev = pdev; | |
3054 | u32 reg; | |
3055 | u16 i; | |
3056 | u32 ep_irq; | |
3057 | irqreturn_t ret_val = IRQ_NONE; | |
3058 | ||
3059 | spin_lock(&dev->lock); | |
3060 | ||
3061 | /* check for ep irq */ | |
3062 | reg = readl(&dev->regs->ep_irqsts); | |
3063 | if (reg) { | |
3064 | if (reg & AMD_BIT(UDC_EPINT_OUT_EP0)) | |
3065 | ret_val |= udc_control_out_isr(dev); | |
3066 | if (reg & AMD_BIT(UDC_EPINT_IN_EP0)) | |
3067 | ret_val |= udc_control_in_isr(dev); | |
3068 | ||
3069 | /* | |
3070 | * data endpoint | |
3071 | * iterate ep's | |
3072 | */ | |
3073 | for (i = 1; i < UDC_EP_NUM; i++) { | |
3074 | ep_irq = 1 << i; | |
3075 | if (!(reg & ep_irq) || i == UDC_EPINT_OUT_EP0) | |
3076 | continue; | |
3077 | ||
3078 | /* clear irq status */ | |
3079 | writel(ep_irq, &dev->regs->ep_irqsts); | |
3080 | ||
3081 | /* irq for out ep ? */ | |
3082 | if (i > UDC_EPIN_NUM) | |
3083 | ret_val |= udc_data_out_isr(dev, i); | |
3084 | else | |
3085 | ret_val |= udc_data_in_isr(dev, i); | |
3086 | } | |
3087 | ||
3088 | } | |
3089 | ||
3090 | ||
3091 | /* check for dev irq */ | |
3092 | reg = readl(&dev->regs->irqsts); | |
3093 | if (reg) { | |
3094 | /* clear irq */ | |
3095 | writel(reg, &dev->regs->irqsts); | |
3096 | ret_val |= udc_dev_isr(dev, reg); | |
3097 | } | |
3098 | ||
3099 | ||
3100 | spin_unlock(&dev->lock); | |
3101 | return ret_val; | |
3102 | } | |
3103 | ||
3104 | /* Tears down device */ | |
3105 | static void gadget_release(struct device *pdev) | |
3106 | { | |
3107 | struct amd5536udc *dev = dev_get_drvdata(pdev); | |
3108 | kfree(dev); | |
3109 | } | |
3110 | ||
3111 | /* Cleanup on device remove */ | |
3112 | static void udc_remove(struct udc *dev) | |
3113 | { | |
3114 | /* remove timer */ | |
3115 | stop_timer++; | |
3116 | if (timer_pending(&udc_timer)) | |
3117 | wait_for_completion(&on_exit); | |
3118 | if (udc_timer.data) | |
3119 | del_timer_sync(&udc_timer); | |
3120 | /* remove pollstall timer */ | |
3121 | stop_pollstall_timer++; | |
3122 | if (timer_pending(&udc_pollstall_timer)) | |
3123 | wait_for_completion(&on_pollstall_exit); | |
3124 | if (udc_pollstall_timer.data) | |
3125 | del_timer_sync(&udc_pollstall_timer); | |
3126 | udc = NULL; | |
3127 | } | |
3128 | ||
3129 | /* Reset all pci context */ | |
3130 | static void udc_pci_remove(struct pci_dev *pdev) | |
3131 | { | |
3132 | struct udc *dev; | |
3133 | ||
3134 | dev = pci_get_drvdata(pdev); | |
3135 | ||
3136 | /* gadget driver must not be registered */ | |
3137 | BUG_ON(dev->driver != NULL); | |
3138 | ||
3139 | /* dma pool cleanup */ | |
3140 | if (dev->data_requests) | |
3141 | pci_pool_destroy(dev->data_requests); | |
3142 | ||
3143 | if (dev->stp_requests) { | |
3144 | /* cleanup DMA desc's for ep0in */ | |
3145 | pci_pool_free(dev->stp_requests, | |
3146 | dev->ep[UDC_EP0OUT_IX].td_stp, | |
3147 | dev->ep[UDC_EP0OUT_IX].td_stp_dma); | |
3148 | pci_pool_free(dev->stp_requests, | |
3149 | dev->ep[UDC_EP0OUT_IX].td, | |
3150 | dev->ep[UDC_EP0OUT_IX].td_phys); | |
3151 | ||
3152 | pci_pool_destroy(dev->stp_requests); | |
3153 | } | |
3154 | ||
3155 | /* reset controller */ | |
3156 | writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg); | |
3157 | if (dev->irq_registered) | |
3158 | free_irq(pdev->irq, dev); | |
3159 | if (dev->regs) | |
3160 | iounmap(dev->regs); | |
3161 | if (dev->mem_region) | |
3162 | release_mem_region(pci_resource_start(pdev, 0), | |
3163 | pci_resource_len(pdev, 0)); | |
3164 | if (dev->active) | |
3165 | pci_disable_device(pdev); | |
3166 | ||
3167 | device_unregister(&dev->gadget.dev); | |
3168 | pci_set_drvdata(pdev, NULL); | |
3169 | ||
3170 | udc_remove(dev); | |
3171 | } | |
3172 | ||
3173 | /* create dma pools on init */ | |
3174 | static int init_dma_pools(struct udc *dev) | |
3175 | { | |
3176 | struct udc_stp_dma *td_stp; | |
3177 | struct udc_data_dma *td_data; | |
3178 | int retval; | |
3179 | ||
3180 | /* consistent DMA mode setting ? */ | |
3181 | if (use_dma_ppb) { | |
3182 | use_dma_bufferfill_mode = 0; | |
3183 | } else { | |
3184 | use_dma_ppb_du = 0; | |
3185 | use_dma_bufferfill_mode = 1; | |
3186 | } | |
3187 | ||
3188 | /* DMA setup */ | |
3189 | dev->data_requests = dma_pool_create("data_requests", NULL, | |
3190 | sizeof(struct udc_data_dma), 0, 0); | |
3191 | if (!dev->data_requests) { | |
3192 | DBG(dev, "can't get request data pool\n"); | |
3193 | retval = -ENOMEM; | |
3194 | goto finished; | |
3195 | } | |
3196 | ||
3197 | /* EP0 in dma regs = dev control regs */ | |
3198 | dev->ep[UDC_EP0IN_IX].dma = &dev->regs->ctl; | |
3199 | ||
3200 | /* dma desc for setup data */ | |
3201 | dev->stp_requests = dma_pool_create("setup requests", NULL, | |
3202 | sizeof(struct udc_stp_dma), 0, 0); | |
3203 | if (!dev->stp_requests) { | |
3204 | DBG(dev, "can't get stp request pool\n"); | |
3205 | retval = -ENOMEM; | |
3206 | goto finished; | |
3207 | } | |
3208 | /* setup */ | |
3209 | td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL, | |
3210 | &dev->ep[UDC_EP0OUT_IX].td_stp_dma); | |
3211 | if (td_stp == NULL) { | |
3212 | retval = -ENOMEM; | |
3213 | goto finished; | |
3214 | } | |
3215 | dev->ep[UDC_EP0OUT_IX].td_stp = td_stp; | |
3216 | ||
3217 | /* data: 0 packets !? */ | |
3218 | td_data = dma_pool_alloc(dev->stp_requests, GFP_KERNEL, | |
3219 | &dev->ep[UDC_EP0OUT_IX].td_phys); | |
3220 | if (td_data == NULL) { | |
3221 | retval = -ENOMEM; | |
3222 | goto finished; | |
3223 | } | |
3224 | dev->ep[UDC_EP0OUT_IX].td = td_data; | |
3225 | return 0; | |
3226 | ||
3227 | finished: | |
3228 | return retval; | |
3229 | } | |
3230 | ||
3231 | /* Called by pci bus driver to init pci context */ | |
3232 | static int udc_pci_probe( | |
3233 | struct pci_dev *pdev, | |
3234 | const struct pci_device_id *id | |
3235 | ) | |
3236 | { | |
3237 | struct udc *dev; | |
3238 | unsigned long resource; | |
3239 | unsigned long len; | |
3240 | int retval = 0; | |
3241 | ||
3242 | /* one udc only */ | |
3243 | if (udc) { | |
3244 | dev_dbg(&pdev->dev, "already probed\n"); | |
3245 | return -EBUSY; | |
3246 | } | |
3247 | ||
3248 | /* init */ | |
3249 | dev = kzalloc(sizeof(struct udc), GFP_KERNEL); | |
3250 | if (!dev) { | |
3251 | retval = -ENOMEM; | |
3252 | goto finished; | |
3253 | } | |
55d402d8 TD |
3254 | |
3255 | /* pci setup */ | |
3256 | if (pci_enable_device(pdev) < 0) { | |
73d79aab | 3257 | kfree(dev); |
af3d305c | 3258 | dev = NULL; |
55d402d8 TD |
3259 | retval = -ENODEV; |
3260 | goto finished; | |
3261 | } | |
3262 | dev->active = 1; | |
3263 | ||
3264 | /* PCI resource allocation */ | |
3265 | resource = pci_resource_start(pdev, 0); | |
3266 | len = pci_resource_len(pdev, 0); | |
3267 | ||
3268 | if (!request_mem_region(resource, len, name)) { | |
3269 | dev_dbg(&pdev->dev, "pci device used already\n"); | |
73d79aab | 3270 | kfree(dev); |
af3d305c | 3271 | dev = NULL; |
55d402d8 TD |
3272 | retval = -EBUSY; |
3273 | goto finished; | |
3274 | } | |
3275 | dev->mem_region = 1; | |
3276 | ||
3277 | dev->virt_addr = ioremap_nocache(resource, len); | |
3278 | if (dev->virt_addr == NULL) { | |
3279 | dev_dbg(&pdev->dev, "start address cannot be mapped\n"); | |
73d79aab | 3280 | kfree(dev); |
af3d305c | 3281 | dev = NULL; |
55d402d8 TD |
3282 | retval = -EFAULT; |
3283 | goto finished; | |
3284 | } | |
3285 | ||
3286 | if (!pdev->irq) { | |
3287 | dev_err(&dev->pdev->dev, "irq not set\n"); | |
73d79aab | 3288 | kfree(dev); |
af3d305c | 3289 | dev = NULL; |
55d402d8 TD |
3290 | retval = -ENODEV; |
3291 | goto finished; | |
3292 | } | |
3293 | ||
c5deb832 TD |
3294 | spin_lock_init(&dev->lock); |
3295 | /* udc csr registers base */ | |
3296 | dev->csr = dev->virt_addr + UDC_CSR_ADDR; | |
3297 | /* dev registers base */ | |
3298 | dev->regs = dev->virt_addr + UDC_DEVCFG_ADDR; | |
3299 | /* ep registers base */ | |
3300 | dev->ep_regs = dev->virt_addr + UDC_EPREGS_ADDR; | |
3301 | /* fifo's base */ | |
3302 | dev->rxfifo = (u32 __iomem *)(dev->virt_addr + UDC_RXFIFO_ADDR); | |
3303 | dev->txfifo = (u32 __iomem *)(dev->virt_addr + UDC_TXFIFO_ADDR); | |
3304 | ||
55d402d8 TD |
3305 | if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) { |
3306 | dev_dbg(&dev->pdev->dev, "request_irq(%d) fail\n", pdev->irq); | |
73d79aab | 3307 | kfree(dev); |
af3d305c | 3308 | dev = NULL; |
55d402d8 TD |
3309 | retval = -EBUSY; |
3310 | goto finished; | |
3311 | } | |
3312 | dev->irq_registered = 1; | |
3313 | ||
3314 | pci_set_drvdata(pdev, dev); | |
3315 | ||
1d3ee41e AK |
3316 | /* chip revision for Hs AMD5536 */ |
3317 | dev->chiprev = pdev->revision; | |
55d402d8 TD |
3318 | |
3319 | pci_set_master(pdev); | |
51745281 | 3320 | pci_try_set_mwi(pdev); |
55d402d8 | 3321 | |
55d402d8 TD |
3322 | /* init dma pools */ |
3323 | if (use_dma) { | |
3324 | retval = init_dma_pools(dev); | |
3325 | if (retval != 0) | |
3326 | goto finished; | |
3327 | } | |
3328 | ||
3329 | dev->phys_addr = resource; | |
3330 | dev->irq = pdev->irq; | |
3331 | dev->pdev = pdev; | |
3332 | dev->gadget.dev.parent = &pdev->dev; | |
3333 | dev->gadget.dev.dma_mask = pdev->dev.dma_mask; | |
3334 | ||
3335 | /* general probing */ | |
3336 | if (udc_probe(dev) == 0) | |
3337 | return 0; | |
3338 | ||
3339 | finished: | |
3340 | if (dev) | |
3341 | udc_pci_remove(pdev); | |
3342 | return retval; | |
3343 | } | |
3344 | ||
3345 | /* general probe */ | |
3346 | static int udc_probe(struct udc *dev) | |
3347 | { | |
3348 | char tmp[128]; | |
3349 | u32 reg; | |
3350 | int retval; | |
3351 | ||
3352 | /* mark timer as not initialized */ | |
3353 | udc_timer.data = 0; | |
3354 | udc_pollstall_timer.data = 0; | |
3355 | ||
3356 | /* device struct setup */ | |
55d402d8 TD |
3357 | dev->gadget.ops = &udc_ops; |
3358 | ||
0031a06e | 3359 | dev_set_name(&dev->gadget.dev, "gadget"); |
55d402d8 TD |
3360 | dev->gadget.dev.release = gadget_release; |
3361 | dev->gadget.name = name; | |
55d402d8 TD |
3362 | dev->gadget.is_dualspeed = 1; |
3363 | ||
55d402d8 TD |
3364 | /* init registers, interrupts, ... */ |
3365 | startup_registers(dev); | |
3366 | ||
3367 | dev_info(&dev->pdev->dev, "%s\n", mod_desc); | |
3368 | ||
3369 | snprintf(tmp, sizeof tmp, "%d", dev->irq); | |
3370 | dev_info(&dev->pdev->dev, | |
3371 | "irq %s, pci mem %08lx, chip rev %02x(Geode5536 %s)\n", | |
3372 | tmp, dev->phys_addr, dev->chiprev, | |
3373 | (dev->chiprev == UDC_HSA0_REV) ? "A0" : "B1"); | |
3374 | strcpy(tmp, UDC_DRIVER_VERSION_STRING); | |
3375 | if (dev->chiprev == UDC_HSA0_REV) { | |
3376 | dev_err(&dev->pdev->dev, "chip revision is A0; too old\n"); | |
3377 | retval = -ENODEV; | |
3378 | goto finished; | |
3379 | } | |
3380 | dev_info(&dev->pdev->dev, | |
3381 | "driver version: %s(for Geode5536 B1)\n", tmp); | |
3382 | udc = dev; | |
3383 | ||
3384 | retval = device_register(&dev->gadget.dev); | |
f34c25ed RR |
3385 | if (retval) { |
3386 | put_device(&dev->gadget.dev); | |
55d402d8 | 3387 | goto finished; |
f34c25ed | 3388 | } |
55d402d8 TD |
3389 | |
3390 | /* timer init */ | |
3391 | init_timer(&udc_timer); | |
3392 | udc_timer.function = udc_timer_function; | |
3393 | udc_timer.data = 1; | |
3394 | /* timer pollstall init */ | |
3395 | init_timer(&udc_pollstall_timer); | |
3396 | udc_pollstall_timer.function = udc_pollstall_timer_function; | |
3397 | udc_pollstall_timer.data = 1; | |
3398 | ||
3399 | /* set SD */ | |
3400 | reg = readl(&dev->regs->ctl); | |
3401 | reg |= AMD_BIT(UDC_DEVCTL_SD); | |
3402 | writel(reg, &dev->regs->ctl); | |
3403 | ||
3404 | /* print dev register info */ | |
3405 | print_regs(dev); | |
3406 | ||
3407 | return 0; | |
3408 | ||
3409 | finished: | |
3410 | return retval; | |
3411 | } | |
3412 | ||
3413 | /* Initiates a remote wakeup */ | |
3414 | static int udc_remote_wakeup(struct udc *dev) | |
3415 | { | |
3416 | unsigned long flags; | |
3417 | u32 tmp; | |
3418 | ||
3419 | DBG(dev, "UDC initiates remote wakeup\n"); | |
3420 | ||
3421 | spin_lock_irqsave(&dev->lock, flags); | |
3422 | ||
3423 | tmp = readl(&dev->regs->ctl); | |
3424 | tmp |= AMD_BIT(UDC_DEVCTL_RES); | |
3425 | writel(tmp, &dev->regs->ctl); | |
3426 | tmp &= AMD_CLEAR_BIT(UDC_DEVCTL_RES); | |
3427 | writel(tmp, &dev->regs->ctl); | |
3428 | ||
3429 | spin_unlock_irqrestore(&dev->lock, flags); | |
3430 | return 0; | |
3431 | } | |
3432 | ||
3433 | /* PCI device parameters */ | |
3434 | static const struct pci_device_id pci_id[] = { | |
3435 | { | |
3436 | PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x2096), | |
3437 | .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe, | |
3438 | .class_mask = 0xffffffff, | |
3439 | }, | |
3440 | {}, | |
3441 | }; | |
3442 | MODULE_DEVICE_TABLE(pci, pci_id); | |
3443 | ||
3444 | /* PCI functions */ | |
3445 | static struct pci_driver udc_pci_driver = { | |
3446 | .name = (char *) name, | |
3447 | .id_table = pci_id, | |
3448 | .probe = udc_pci_probe, | |
3449 | .remove = udc_pci_remove, | |
3450 | }; | |
3451 | ||
3452 | /* Inits driver */ | |
3453 | static int __init init(void) | |
3454 | { | |
3455 | return pci_register_driver(&udc_pci_driver); | |
3456 | } | |
3457 | module_init(init); | |
3458 | ||
3459 | /* Cleans driver */ | |
3460 | static void __exit cleanup(void) | |
3461 | { | |
3462 | pci_unregister_driver(&udc_pci_driver); | |
3463 | } | |
3464 | module_exit(cleanup); | |
3465 | ||
3466 | MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION); | |
3467 | MODULE_AUTHOR("Thomas Dahlmann"); | |
3468 | MODULE_LICENSE("GPL"); | |
3469 |