]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/usb/gadget/pch_udc.c
module_param: make bool parameters really bool (drivers & misc)
[mirror_ubuntu-hirsute-kernel.git] / drivers / usb / gadget / pch_udc.c
1 /*
2 * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 */
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/delay.h>
13 #include <linux/errno.h>
14 #include <linux/list.h>
15 #include <linux/interrupt.h>
16 #include <linux/usb/ch9.h>
17 #include <linux/usb/gadget.h>
18
19 /* Address offset of Registers */
20 #define UDC_EP_REG_SHIFT 0x20 /* Offset to next EP */
21
22 #define UDC_EPCTL_ADDR 0x00 /* Endpoint control */
23 #define UDC_EPSTS_ADDR 0x04 /* Endpoint status */
24 #define UDC_BUFIN_FRAMENUM_ADDR 0x08 /* buffer size in / frame number out */
25 #define UDC_BUFOUT_MAXPKT_ADDR 0x0C /* buffer size out / maxpkt in */
26 #define UDC_SUBPTR_ADDR 0x10 /* setup buffer pointer */
27 #define UDC_DESPTR_ADDR 0x14 /* Data descriptor pointer */
28 #define UDC_CONFIRM_ADDR 0x18 /* Write/Read confirmation */
29
30 #define UDC_DEVCFG_ADDR 0x400 /* Device configuration */
31 #define UDC_DEVCTL_ADDR 0x404 /* Device control */
32 #define UDC_DEVSTS_ADDR 0x408 /* Device status */
33 #define UDC_DEVIRQSTS_ADDR 0x40C /* Device irq status */
34 #define UDC_DEVIRQMSK_ADDR 0x410 /* Device irq mask */
35 #define UDC_EPIRQSTS_ADDR 0x414 /* Endpoint irq status */
36 #define UDC_EPIRQMSK_ADDR 0x418 /* Endpoint irq mask */
37 #define UDC_DEVLPM_ADDR 0x41C /* LPM control / status */
38 #define UDC_CSR_BUSY_ADDR 0x4f0 /* UDC_CSR_BUSY Status register */
39 #define UDC_SRST_ADDR 0x4fc /* SOFT RESET register */
40 #define UDC_CSR_ADDR 0x500 /* USB_DEVICE endpoint register */
41
42 /* Endpoint control register */
43 /* Bit position */
44 #define UDC_EPCTL_MRXFLUSH (1 << 12)
45 #define UDC_EPCTL_RRDY (1 << 9)
46 #define UDC_EPCTL_CNAK (1 << 8)
47 #define UDC_EPCTL_SNAK (1 << 7)
48 #define UDC_EPCTL_NAK (1 << 6)
49 #define UDC_EPCTL_P (1 << 3)
50 #define UDC_EPCTL_F (1 << 1)
51 #define UDC_EPCTL_S (1 << 0)
52 #define UDC_EPCTL_ET_SHIFT 4
53 /* Mask patern */
54 #define UDC_EPCTL_ET_MASK 0x00000030
55 /* Value for ET field */
56 #define UDC_EPCTL_ET_CONTROL 0
57 #define UDC_EPCTL_ET_ISO 1
58 #define UDC_EPCTL_ET_BULK 2
59 #define UDC_EPCTL_ET_INTERRUPT 3
60
61 /* Endpoint status register */
62 /* Bit position */
63 #define UDC_EPSTS_XFERDONE (1 << 27)
64 #define UDC_EPSTS_RSS (1 << 26)
65 #define UDC_EPSTS_RCS (1 << 25)
66 #define UDC_EPSTS_TXEMPTY (1 << 24)
67 #define UDC_EPSTS_TDC (1 << 10)
68 #define UDC_EPSTS_HE (1 << 9)
69 #define UDC_EPSTS_MRXFIFO_EMP (1 << 8)
70 #define UDC_EPSTS_BNA (1 << 7)
71 #define UDC_EPSTS_IN (1 << 6)
72 #define UDC_EPSTS_OUT_SHIFT 4
73 /* Mask patern */
74 #define UDC_EPSTS_OUT_MASK 0x00000030
75 #define UDC_EPSTS_ALL_CLR_MASK 0x1F0006F0
76 /* Value for OUT field */
77 #define UDC_EPSTS_OUT_SETUP 2
78 #define UDC_EPSTS_OUT_DATA 1
79
80 /* Device configuration register */
81 /* Bit position */
82 #define UDC_DEVCFG_CSR_PRG (1 << 17)
83 #define UDC_DEVCFG_SP (1 << 3)
84 /* SPD Valee */
85 #define UDC_DEVCFG_SPD_HS 0x0
86 #define UDC_DEVCFG_SPD_FS 0x1
87 #define UDC_DEVCFG_SPD_LS 0x2
88
89 /* Device control register */
90 /* Bit position */
91 #define UDC_DEVCTL_THLEN_SHIFT 24
92 #define UDC_DEVCTL_BRLEN_SHIFT 16
93 #define UDC_DEVCTL_CSR_DONE (1 << 13)
94 #define UDC_DEVCTL_SD (1 << 10)
95 #define UDC_DEVCTL_MODE (1 << 9)
96 #define UDC_DEVCTL_BREN (1 << 8)
97 #define UDC_DEVCTL_THE (1 << 7)
98 #define UDC_DEVCTL_DU (1 << 4)
99 #define UDC_DEVCTL_TDE (1 << 3)
100 #define UDC_DEVCTL_RDE (1 << 2)
101 #define UDC_DEVCTL_RES (1 << 0)
102
103 /* Device status register */
104 /* Bit position */
105 #define UDC_DEVSTS_TS_SHIFT 18
106 #define UDC_DEVSTS_ENUM_SPEED_SHIFT 13
107 #define UDC_DEVSTS_ALT_SHIFT 8
108 #define UDC_DEVSTS_INTF_SHIFT 4
109 #define UDC_DEVSTS_CFG_SHIFT 0
110 /* Mask patern */
111 #define UDC_DEVSTS_TS_MASK 0xfffc0000
112 #define UDC_DEVSTS_ENUM_SPEED_MASK 0x00006000
113 #define UDC_DEVSTS_ALT_MASK 0x00000f00
114 #define UDC_DEVSTS_INTF_MASK 0x000000f0
115 #define UDC_DEVSTS_CFG_MASK 0x0000000f
116 /* value for maximum speed for SPEED field */
117 #define UDC_DEVSTS_ENUM_SPEED_FULL 1
118 #define UDC_DEVSTS_ENUM_SPEED_HIGH 0
119 #define UDC_DEVSTS_ENUM_SPEED_LOW 2
120 #define UDC_DEVSTS_ENUM_SPEED_FULLX 3
121
122 /* Device irq register */
123 /* Bit position */
124 #define UDC_DEVINT_RWKP (1 << 7)
125 #define UDC_DEVINT_ENUM (1 << 6)
126 #define UDC_DEVINT_SOF (1 << 5)
127 #define UDC_DEVINT_US (1 << 4)
128 #define UDC_DEVINT_UR (1 << 3)
129 #define UDC_DEVINT_ES (1 << 2)
130 #define UDC_DEVINT_SI (1 << 1)
131 #define UDC_DEVINT_SC (1 << 0)
132 /* Mask patern */
133 #define UDC_DEVINT_MSK 0x7f
134
135 /* Endpoint irq register */
136 /* Bit position */
137 #define UDC_EPINT_IN_SHIFT 0
138 #define UDC_EPINT_OUT_SHIFT 16
139 #define UDC_EPINT_IN_EP0 (1 << 0)
140 #define UDC_EPINT_OUT_EP0 (1 << 16)
141 /* Mask patern */
142 #define UDC_EPINT_MSK_DISABLE_ALL 0xffffffff
143
144 /* UDC_CSR_BUSY Status register */
145 /* Bit position */
146 #define UDC_CSR_BUSY (1 << 0)
147
148 /* SOFT RESET register */
149 /* Bit position */
150 #define UDC_PSRST (1 << 1)
151 #define UDC_SRST (1 << 0)
152
153 /* USB_DEVICE endpoint register */
154 /* Bit position */
155 #define UDC_CSR_NE_NUM_SHIFT 0
156 #define UDC_CSR_NE_DIR_SHIFT 4
157 #define UDC_CSR_NE_TYPE_SHIFT 5
158 #define UDC_CSR_NE_CFG_SHIFT 7
159 #define UDC_CSR_NE_INTF_SHIFT 11
160 #define UDC_CSR_NE_ALT_SHIFT 15
161 #define UDC_CSR_NE_MAX_PKT_SHIFT 19
162 /* Mask patern */
163 #define UDC_CSR_NE_NUM_MASK 0x0000000f
164 #define UDC_CSR_NE_DIR_MASK 0x00000010
165 #define UDC_CSR_NE_TYPE_MASK 0x00000060
166 #define UDC_CSR_NE_CFG_MASK 0x00000780
167 #define UDC_CSR_NE_INTF_MASK 0x00007800
168 #define UDC_CSR_NE_ALT_MASK 0x00078000
169 #define UDC_CSR_NE_MAX_PKT_MASK 0x3ff80000
170
171 #define PCH_UDC_CSR(ep) (UDC_CSR_ADDR + ep*4)
172 #define PCH_UDC_EPINT(in, num)\
173 (1 << (num + (in ? UDC_EPINT_IN_SHIFT : UDC_EPINT_OUT_SHIFT)))
174
175 /* Index of endpoint */
176 #define UDC_EP0IN_IDX 0
177 #define UDC_EP0OUT_IDX 1
178 #define UDC_EPIN_IDX(ep) (ep * 2)
179 #define UDC_EPOUT_IDX(ep) (ep * 2 + 1)
180 #define PCH_UDC_EP0 0
181 #define PCH_UDC_EP1 1
182 #define PCH_UDC_EP2 2
183 #define PCH_UDC_EP3 3
184
185 /* Number of endpoint */
186 #define PCH_UDC_EP_NUM 32 /* Total number of EPs (16 IN,16 OUT) */
187 #define PCH_UDC_USED_EP_NUM 4 /* EP number of EP's really used */
188 /* Length Value */
189 #define PCH_UDC_BRLEN 0x0F /* Burst length */
190 #define PCH_UDC_THLEN 0x1F /* Threshold length */
191 /* Value of EP Buffer Size */
192 #define UDC_EP0IN_BUFF_SIZE 16
193 #define UDC_EPIN_BUFF_SIZE 256
194 #define UDC_EP0OUT_BUFF_SIZE 16
195 #define UDC_EPOUT_BUFF_SIZE 256
196 /* Value of EP maximum packet size */
197 #define UDC_EP0IN_MAX_PKT_SIZE 64
198 #define UDC_EP0OUT_MAX_PKT_SIZE 64
199 #define UDC_BULK_MAX_PKT_SIZE 512
200
201 /* DMA */
202 #define DMA_DIR_RX 1 /* DMA for data receive */
203 #define DMA_DIR_TX 2 /* DMA for data transmit */
204 #define DMA_ADDR_INVALID (~(dma_addr_t)0)
205 #define UDC_DMA_MAXPACKET 65536 /* maximum packet size for DMA */
206
207 /**
208 * struct pch_udc_data_dma_desc - Structure to hold DMA descriptor information
209 * for data
210 * @status: Status quadlet
211 * @reserved: Reserved
212 * @dataptr: Buffer descriptor
213 * @next: Next descriptor
214 */
215 struct pch_udc_data_dma_desc {
216 u32 status;
217 u32 reserved;
218 u32 dataptr;
219 u32 next;
220 };
221
222 /**
223 * struct pch_udc_stp_dma_desc - Structure to hold DMA descriptor information
224 * for control data
225 * @status: Status
226 * @reserved: Reserved
227 * @data12: First setup word
228 * @data34: Second setup word
229 */
230 struct pch_udc_stp_dma_desc {
231 u32 status;
232 u32 reserved;
233 struct usb_ctrlrequest request;
234 } __attribute((packed));
235
236 /* DMA status definitions */
237 /* Buffer status */
238 #define PCH_UDC_BUFF_STS 0xC0000000
239 #define PCH_UDC_BS_HST_RDY 0x00000000
240 #define PCH_UDC_BS_DMA_BSY 0x40000000
241 #define PCH_UDC_BS_DMA_DONE 0x80000000
242 #define PCH_UDC_BS_HST_BSY 0xC0000000
243 /* Rx/Tx Status */
244 #define PCH_UDC_RXTX_STS 0x30000000
245 #define PCH_UDC_RTS_SUCC 0x00000000
246 #define PCH_UDC_RTS_DESERR 0x10000000
247 #define PCH_UDC_RTS_BUFERR 0x30000000
248 /* Last Descriptor Indication */
249 #define PCH_UDC_DMA_LAST 0x08000000
250 /* Number of Rx/Tx Bytes Mask */
251 #define PCH_UDC_RXTX_BYTES 0x0000ffff
252
253 /**
254 * struct pch_udc_cfg_data - Structure to hold current configuration
255 * and interface information
256 * @cur_cfg: current configuration in use
257 * @cur_intf: current interface in use
258 * @cur_alt: current alt interface in use
259 */
260 struct pch_udc_cfg_data {
261 u16 cur_cfg;
262 u16 cur_intf;
263 u16 cur_alt;
264 };
265
266 /**
267 * struct pch_udc_ep - Structure holding a PCH USB device Endpoint information
268 * @ep: embedded ep request
269 * @td_stp_phys: for setup request
270 * @td_data_phys: for data request
271 * @td_stp: for setup request
272 * @td_data: for data request
273 * @dev: reference to device struct
274 * @offset_addr: offset address of ep register
275 * @desc: for this ep
276 * @queue: queue for requests
277 * @num: endpoint number
278 * @in: endpoint is IN
279 * @halted: endpoint halted?
280 * @epsts: Endpoint status
281 */
282 struct pch_udc_ep {
283 struct usb_ep ep;
284 dma_addr_t td_stp_phys;
285 dma_addr_t td_data_phys;
286 struct pch_udc_stp_dma_desc *td_stp;
287 struct pch_udc_data_dma_desc *td_data;
288 struct pch_udc_dev *dev;
289 unsigned long offset_addr;
290 const struct usb_endpoint_descriptor *desc;
291 struct list_head queue;
292 unsigned num:5,
293 in:1,
294 halted:1;
295 unsigned long epsts;
296 };
297
298 /**
299 * struct pch_udc_dev - Structure holding complete information
300 * of the PCH USB device
301 * @gadget: gadget driver data
302 * @driver: reference to gadget driver bound
303 * @pdev: reference to the PCI device
304 * @ep: array of endpoints
305 * @lock: protects all state
306 * @active: enabled the PCI device
307 * @stall: stall requested
308 * @prot_stall: protcol stall requested
309 * @irq_registered: irq registered with system
310 * @mem_region: device memory mapped
311 * @registered: driver regsitered with system
312 * @suspended: driver in suspended state
313 * @connected: gadget driver associated
314 * @set_cfg_not_acked: pending acknowledgement 4 setup
315 * @waiting_zlp_ack: pending acknowledgement 4 ZLP
316 * @data_requests: DMA pool for data requests
317 * @stp_requests: DMA pool for setup requests
318 * @dma_addr: DMA pool for received
319 * @ep0out_buf: Buffer for DMA
320 * @setup_data: Received setup data
321 * @phys_addr: of device memory
322 * @base_addr: for mapped device memory
323 * @irq: IRQ line for the device
324 * @cfg_data: current cfg, intf, and alt in use
325 */
326 struct pch_udc_dev {
327 struct usb_gadget gadget;
328 struct usb_gadget_driver *driver;
329 struct pci_dev *pdev;
330 struct pch_udc_ep ep[PCH_UDC_EP_NUM];
331 spinlock_t lock; /* protects all state */
332 unsigned active:1,
333 stall:1,
334 prot_stall:1,
335 irq_registered:1,
336 mem_region:1,
337 registered:1,
338 suspended:1,
339 connected:1,
340 set_cfg_not_acked:1,
341 waiting_zlp_ack:1;
342 struct pci_pool *data_requests;
343 struct pci_pool *stp_requests;
344 dma_addr_t dma_addr;
345 void *ep0out_buf;
346 struct usb_ctrlrequest setup_data;
347 unsigned long phys_addr;
348 void __iomem *base_addr;
349 unsigned irq;
350 struct pch_udc_cfg_data cfg_data;
351 };
352
353 #define PCH_UDC_PCI_BAR 1
354 #define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808
355 #define PCI_VENDOR_ID_ROHM 0x10DB
356 #define PCI_DEVICE_ID_ML7213_IOH_UDC 0x801D
357 #define PCI_DEVICE_ID_ML7831_IOH_UDC 0x8808
358
359 static const char ep0_string[] = "ep0in";
360 static DEFINE_SPINLOCK(udc_stall_spinlock); /* stall spin lock */
361 struct pch_udc_dev *pch_udc; /* pointer to device object */
362 static bool speed_fs;
363 module_param_named(speed_fs, speed_fs, bool, S_IRUGO);
364 MODULE_PARM_DESC(speed_fs, "true for Full speed operation");
365
366 /**
367 * struct pch_udc_request - Structure holding a PCH USB device request packet
368 * @req: embedded ep request
369 * @td_data_phys: phys. address
370 * @td_data: first dma desc. of chain
371 * @td_data_last: last dma desc. of chain
372 * @queue: associated queue
373 * @dma_going: DMA in progress for request
374 * @dma_mapped: DMA memory mapped for request
375 * @dma_done: DMA completed for request
376 * @chain_len: chain length
377 * @buf: Buffer memory for align adjustment
378 * @dma: DMA memory for align adjustment
379 */
380 struct pch_udc_request {
381 struct usb_request req;
382 dma_addr_t td_data_phys;
383 struct pch_udc_data_dma_desc *td_data;
384 struct pch_udc_data_dma_desc *td_data_last;
385 struct list_head queue;
386 unsigned dma_going:1,
387 dma_mapped:1,
388 dma_done:1;
389 unsigned chain_len;
390 void *buf;
391 dma_addr_t dma;
392 };
393
394 static inline u32 pch_udc_readl(struct pch_udc_dev *dev, unsigned long reg)
395 {
396 return ioread32(dev->base_addr + reg);
397 }
398
399 static inline void pch_udc_writel(struct pch_udc_dev *dev,
400 unsigned long val, unsigned long reg)
401 {
402 iowrite32(val, dev->base_addr + reg);
403 }
404
405 static inline void pch_udc_bit_set(struct pch_udc_dev *dev,
406 unsigned long reg,
407 unsigned long bitmask)
408 {
409 pch_udc_writel(dev, pch_udc_readl(dev, reg) | bitmask, reg);
410 }
411
412 static inline void pch_udc_bit_clr(struct pch_udc_dev *dev,
413 unsigned long reg,
414 unsigned long bitmask)
415 {
416 pch_udc_writel(dev, pch_udc_readl(dev, reg) & ~(bitmask), reg);
417 }
418
419 static inline u32 pch_udc_ep_readl(struct pch_udc_ep *ep, unsigned long reg)
420 {
421 return ioread32(ep->dev->base_addr + ep->offset_addr + reg);
422 }
423
424 static inline void pch_udc_ep_writel(struct pch_udc_ep *ep,
425 unsigned long val, unsigned long reg)
426 {
427 iowrite32(val, ep->dev->base_addr + ep->offset_addr + reg);
428 }
429
430 static inline void pch_udc_ep_bit_set(struct pch_udc_ep *ep,
431 unsigned long reg,
432 unsigned long bitmask)
433 {
434 pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) | bitmask, reg);
435 }
436
437 static inline void pch_udc_ep_bit_clr(struct pch_udc_ep *ep,
438 unsigned long reg,
439 unsigned long bitmask)
440 {
441 pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) & ~(bitmask), reg);
442 }
443
444 /**
445 * pch_udc_csr_busy() - Wait till idle.
446 * @dev: Reference to pch_udc_dev structure
447 */
448 static void pch_udc_csr_busy(struct pch_udc_dev *dev)
449 {
450 unsigned int count = 200;
451
452 /* Wait till idle */
453 while ((pch_udc_readl(dev, UDC_CSR_BUSY_ADDR) & UDC_CSR_BUSY)
454 && --count)
455 cpu_relax();
456 if (!count)
457 dev_err(&dev->pdev->dev, "%s: wait error\n", __func__);
458 }
459
460 /**
461 * pch_udc_write_csr() - Write the command and status registers.
462 * @dev: Reference to pch_udc_dev structure
463 * @val: value to be written to CSR register
464 * @addr: address of CSR register
465 */
466 static void pch_udc_write_csr(struct pch_udc_dev *dev, unsigned long val,
467 unsigned int ep)
468 {
469 unsigned long reg = PCH_UDC_CSR(ep);
470
471 pch_udc_csr_busy(dev); /* Wait till idle */
472 pch_udc_writel(dev, val, reg);
473 pch_udc_csr_busy(dev); /* Wait till idle */
474 }
475
476 /**
477 * pch_udc_read_csr() - Read the command and status registers.
478 * @dev: Reference to pch_udc_dev structure
479 * @addr: address of CSR register
480 *
481 * Return codes: content of CSR register
482 */
483 static u32 pch_udc_read_csr(struct pch_udc_dev *dev, unsigned int ep)
484 {
485 unsigned long reg = PCH_UDC_CSR(ep);
486
487 pch_udc_csr_busy(dev); /* Wait till idle */
488 pch_udc_readl(dev, reg); /* Dummy read */
489 pch_udc_csr_busy(dev); /* Wait till idle */
490 return pch_udc_readl(dev, reg);
491 }
492
493 /**
494 * pch_udc_rmt_wakeup() - Initiate for remote wakeup
495 * @dev: Reference to pch_udc_dev structure
496 */
497 static inline void pch_udc_rmt_wakeup(struct pch_udc_dev *dev)
498 {
499 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
500 mdelay(1);
501 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
502 }
503
504 /**
505 * pch_udc_get_frame() - Get the current frame from device status register
506 * @dev: Reference to pch_udc_dev structure
507 * Retern current frame
508 */
509 static inline int pch_udc_get_frame(struct pch_udc_dev *dev)
510 {
511 u32 frame = pch_udc_readl(dev, UDC_DEVSTS_ADDR);
512 return (frame & UDC_DEVSTS_TS_MASK) >> UDC_DEVSTS_TS_SHIFT;
513 }
514
515 /**
516 * pch_udc_clear_selfpowered() - Clear the self power control
517 * @dev: Reference to pch_udc_regs structure
518 */
519 static inline void pch_udc_clear_selfpowered(struct pch_udc_dev *dev)
520 {
521 pch_udc_bit_clr(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
522 }
523
524 /**
525 * pch_udc_set_selfpowered() - Set the self power control
526 * @dev: Reference to pch_udc_regs structure
527 */
528 static inline void pch_udc_set_selfpowered(struct pch_udc_dev *dev)
529 {
530 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
531 }
532
533 /**
534 * pch_udc_set_disconnect() - Set the disconnect status.
535 * @dev: Reference to pch_udc_regs structure
536 */
537 static inline void pch_udc_set_disconnect(struct pch_udc_dev *dev)
538 {
539 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
540 }
541
542 /**
543 * pch_udc_clear_disconnect() - Clear the disconnect status.
544 * @dev: Reference to pch_udc_regs structure
545 */
546 static void pch_udc_clear_disconnect(struct pch_udc_dev *dev)
547 {
548 /* Clear the disconnect */
549 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
550 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
551 mdelay(1);
552 /* Resume USB signalling */
553 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
554 }
555
556 /**
557 * pch_udc_vbus_session() - set or clearr the disconnect status.
558 * @dev: Reference to pch_udc_regs structure
559 * @is_active: Parameter specifying the action
560 * 0: indicating VBUS power is ending
561 * !0: indicating VBUS power is starting
562 */
563 static inline void pch_udc_vbus_session(struct pch_udc_dev *dev,
564 int is_active)
565 {
566 if (is_active)
567 pch_udc_clear_disconnect(dev);
568 else
569 pch_udc_set_disconnect(dev);
570 }
571
572 /**
573 * pch_udc_ep_set_stall() - Set the stall of endpoint
574 * @ep: Reference to structure of type pch_udc_ep_regs
575 */
576 static void pch_udc_ep_set_stall(struct pch_udc_ep *ep)
577 {
578 if (ep->in) {
579 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
580 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
581 } else {
582 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
583 }
584 }
585
586 /**
587 * pch_udc_ep_clear_stall() - Clear the stall of endpoint
588 * @ep: Reference to structure of type pch_udc_ep_regs
589 */
590 static inline void pch_udc_ep_clear_stall(struct pch_udc_ep *ep)
591 {
592 /* Clear the stall */
593 pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
594 /* Clear NAK by writing CNAK */
595 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
596 }
597
598 /**
599 * pch_udc_ep_set_trfr_type() - Set the transfer type of endpoint
600 * @ep: Reference to structure of type pch_udc_ep_regs
601 * @type: Type of endpoint
602 */
603 static inline void pch_udc_ep_set_trfr_type(struct pch_udc_ep *ep,
604 u8 type)
605 {
606 pch_udc_ep_writel(ep, ((type << UDC_EPCTL_ET_SHIFT) &
607 UDC_EPCTL_ET_MASK), UDC_EPCTL_ADDR);
608 }
609
610 /**
611 * pch_udc_ep_set_bufsz() - Set the maximum packet size for the endpoint
612 * @ep: Reference to structure of type pch_udc_ep_regs
613 * @buf_size: The buffer word size
614 */
615 static void pch_udc_ep_set_bufsz(struct pch_udc_ep *ep,
616 u32 buf_size, u32 ep_in)
617 {
618 u32 data;
619 if (ep_in) {
620 data = pch_udc_ep_readl(ep, UDC_BUFIN_FRAMENUM_ADDR);
621 data = (data & 0xffff0000) | (buf_size & 0xffff);
622 pch_udc_ep_writel(ep, data, UDC_BUFIN_FRAMENUM_ADDR);
623 } else {
624 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
625 data = (buf_size << 16) | (data & 0xffff);
626 pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
627 }
628 }
629
630 /**
631 * pch_udc_ep_set_maxpkt() - Set the Max packet size for the endpoint
632 * @ep: Reference to structure of type pch_udc_ep_regs
633 * @pkt_size: The packet byte size
634 */
635 static void pch_udc_ep_set_maxpkt(struct pch_udc_ep *ep, u32 pkt_size)
636 {
637 u32 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
638 data = (data & 0xffff0000) | (pkt_size & 0xffff);
639 pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
640 }
641
642 /**
643 * pch_udc_ep_set_subptr() - Set the Setup buffer pointer for the endpoint
644 * @ep: Reference to structure of type pch_udc_ep_regs
645 * @addr: Address of the register
646 */
647 static inline void pch_udc_ep_set_subptr(struct pch_udc_ep *ep, u32 addr)
648 {
649 pch_udc_ep_writel(ep, addr, UDC_SUBPTR_ADDR);
650 }
651
652 /**
653 * pch_udc_ep_set_ddptr() - Set the Data descriptor pointer for the endpoint
654 * @ep: Reference to structure of type pch_udc_ep_regs
655 * @addr: Address of the register
656 */
657 static inline void pch_udc_ep_set_ddptr(struct pch_udc_ep *ep, u32 addr)
658 {
659 pch_udc_ep_writel(ep, addr, UDC_DESPTR_ADDR);
660 }
661
662 /**
663 * pch_udc_ep_set_pd() - Set the poll demand bit for the endpoint
664 * @ep: Reference to structure of type pch_udc_ep_regs
665 */
666 static inline void pch_udc_ep_set_pd(struct pch_udc_ep *ep)
667 {
668 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_P);
669 }
670
671 /**
672 * pch_udc_ep_set_rrdy() - Set the receive ready bit for the endpoint
673 * @ep: Reference to structure of type pch_udc_ep_regs
674 */
675 static inline void pch_udc_ep_set_rrdy(struct pch_udc_ep *ep)
676 {
677 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
678 }
679
680 /**
681 * pch_udc_ep_clear_rrdy() - Clear the receive ready bit for the endpoint
682 * @ep: Reference to structure of type pch_udc_ep_regs
683 */
684 static inline void pch_udc_ep_clear_rrdy(struct pch_udc_ep *ep)
685 {
686 pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
687 }
688
689 /**
690 * pch_udc_set_dma() - Set the 'TDE' or RDE bit of device control
691 * register depending on the direction specified
692 * @dev: Reference to structure of type pch_udc_regs
693 * @dir: whether Tx or Rx
694 * DMA_DIR_RX: Receive
695 * DMA_DIR_TX: Transmit
696 */
697 static inline void pch_udc_set_dma(struct pch_udc_dev *dev, int dir)
698 {
699 if (dir == DMA_DIR_RX)
700 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
701 else if (dir == DMA_DIR_TX)
702 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
703 }
704
705 /**
706 * pch_udc_clear_dma() - Clear the 'TDE' or RDE bit of device control
707 * register depending on the direction specified
708 * @dev: Reference to structure of type pch_udc_regs
709 * @dir: Whether Tx or Rx
710 * DMA_DIR_RX: Receive
711 * DMA_DIR_TX: Transmit
712 */
713 static inline void pch_udc_clear_dma(struct pch_udc_dev *dev, int dir)
714 {
715 if (dir == DMA_DIR_RX)
716 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
717 else if (dir == DMA_DIR_TX)
718 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
719 }
720
721 /**
722 * pch_udc_set_csr_done() - Set the device control register
723 * CSR done field (bit 13)
724 * @dev: reference to structure of type pch_udc_regs
725 */
726 static inline void pch_udc_set_csr_done(struct pch_udc_dev *dev)
727 {
728 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_CSR_DONE);
729 }
730
731 /**
732 * pch_udc_disable_interrupts() - Disables the specified interrupts
733 * @dev: Reference to structure of type pch_udc_regs
734 * @mask: Mask to disable interrupts
735 */
736 static inline void pch_udc_disable_interrupts(struct pch_udc_dev *dev,
737 u32 mask)
738 {
739 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, mask);
740 }
741
742 /**
743 * pch_udc_enable_interrupts() - Enable the specified interrupts
744 * @dev: Reference to structure of type pch_udc_regs
745 * @mask: Mask to enable interrupts
746 */
747 static inline void pch_udc_enable_interrupts(struct pch_udc_dev *dev,
748 u32 mask)
749 {
750 pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR, mask);
751 }
752
753 /**
754 * pch_udc_disable_ep_interrupts() - Disable endpoint interrupts
755 * @dev: Reference to structure of type pch_udc_regs
756 * @mask: Mask to disable interrupts
757 */
758 static inline void pch_udc_disable_ep_interrupts(struct pch_udc_dev *dev,
759 u32 mask)
760 {
761 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, mask);
762 }
763
764 /**
765 * pch_udc_enable_ep_interrupts() - Enable endpoint interrupts
766 * @dev: Reference to structure of type pch_udc_regs
767 * @mask: Mask to enable interrupts
768 */
769 static inline void pch_udc_enable_ep_interrupts(struct pch_udc_dev *dev,
770 u32 mask)
771 {
772 pch_udc_bit_clr(dev, UDC_EPIRQMSK_ADDR, mask);
773 }
774
775 /**
776 * pch_udc_read_device_interrupts() - Read the device interrupts
777 * @dev: Reference to structure of type pch_udc_regs
778 * Retern The device interrupts
779 */
780 static inline u32 pch_udc_read_device_interrupts(struct pch_udc_dev *dev)
781 {
782 return pch_udc_readl(dev, UDC_DEVIRQSTS_ADDR);
783 }
784
785 /**
786 * pch_udc_write_device_interrupts() - Write device interrupts
787 * @dev: Reference to structure of type pch_udc_regs
788 * @val: The value to be written to interrupt register
789 */
790 static inline void pch_udc_write_device_interrupts(struct pch_udc_dev *dev,
791 u32 val)
792 {
793 pch_udc_writel(dev, val, UDC_DEVIRQSTS_ADDR);
794 }
795
796 /**
797 * pch_udc_read_ep_interrupts() - Read the endpoint interrupts
798 * @dev: Reference to structure of type pch_udc_regs
799 * Retern The endpoint interrupt
800 */
801 static inline u32 pch_udc_read_ep_interrupts(struct pch_udc_dev *dev)
802 {
803 return pch_udc_readl(dev, UDC_EPIRQSTS_ADDR);
804 }
805
806 /**
807 * pch_udc_write_ep_interrupts() - Clear endpoint interupts
808 * @dev: Reference to structure of type pch_udc_regs
809 * @val: The value to be written to interrupt register
810 */
811 static inline void pch_udc_write_ep_interrupts(struct pch_udc_dev *dev,
812 u32 val)
813 {
814 pch_udc_writel(dev, val, UDC_EPIRQSTS_ADDR);
815 }
816
817 /**
818 * pch_udc_read_device_status() - Read the device status
819 * @dev: Reference to structure of type pch_udc_regs
820 * Retern The device status
821 */
822 static inline u32 pch_udc_read_device_status(struct pch_udc_dev *dev)
823 {
824 return pch_udc_readl(dev, UDC_DEVSTS_ADDR);
825 }
826
827 /**
828 * pch_udc_read_ep_control() - Read the endpoint control
829 * @ep: Reference to structure of type pch_udc_ep_regs
830 * Retern The endpoint control register value
831 */
832 static inline u32 pch_udc_read_ep_control(struct pch_udc_ep *ep)
833 {
834 return pch_udc_ep_readl(ep, UDC_EPCTL_ADDR);
835 }
836
837 /**
838 * pch_udc_clear_ep_control() - Clear the endpoint control register
839 * @ep: Reference to structure of type pch_udc_ep_regs
840 * Retern The endpoint control register value
841 */
842 static inline void pch_udc_clear_ep_control(struct pch_udc_ep *ep)
843 {
844 return pch_udc_ep_writel(ep, 0, UDC_EPCTL_ADDR);
845 }
846
847 /**
848 * pch_udc_read_ep_status() - Read the endpoint status
849 * @ep: Reference to structure of type pch_udc_ep_regs
850 * Retern The endpoint status
851 */
852 static inline u32 pch_udc_read_ep_status(struct pch_udc_ep *ep)
853 {
854 return pch_udc_ep_readl(ep, UDC_EPSTS_ADDR);
855 }
856
857 /**
858 * pch_udc_clear_ep_status() - Clear the endpoint status
859 * @ep: Reference to structure of type pch_udc_ep_regs
860 * @stat: Endpoint status
861 */
862 static inline void pch_udc_clear_ep_status(struct pch_udc_ep *ep,
863 u32 stat)
864 {
865 return pch_udc_ep_writel(ep, stat, UDC_EPSTS_ADDR);
866 }
867
868 /**
869 * pch_udc_ep_set_nak() - Set the bit 7 (SNAK field)
870 * of the endpoint control register
871 * @ep: Reference to structure of type pch_udc_ep_regs
872 */
873 static inline void pch_udc_ep_set_nak(struct pch_udc_ep *ep)
874 {
875 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_SNAK);
876 }
877
878 /**
879 * pch_udc_ep_clear_nak() - Set the bit 8 (CNAK field)
880 * of the endpoint control register
881 * @ep: reference to structure of type pch_udc_ep_regs
882 */
883 static void pch_udc_ep_clear_nak(struct pch_udc_ep *ep)
884 {
885 unsigned int loopcnt = 0;
886 struct pch_udc_dev *dev = ep->dev;
887
888 if (!(pch_udc_ep_readl(ep, UDC_EPCTL_ADDR) & UDC_EPCTL_NAK))
889 return;
890 if (!ep->in) {
891 loopcnt = 10000;
892 while (!(pch_udc_read_ep_status(ep) & UDC_EPSTS_MRXFIFO_EMP) &&
893 --loopcnt)
894 udelay(5);
895 if (!loopcnt)
896 dev_err(&dev->pdev->dev, "%s: RxFIFO not Empty\n",
897 __func__);
898 }
899 loopcnt = 10000;
900 while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_NAK) && --loopcnt) {
901 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
902 udelay(5);
903 }
904 if (!loopcnt)
905 dev_err(&dev->pdev->dev, "%s: Clear NAK not set for ep%d%s\n",
906 __func__, ep->num, (ep->in ? "in" : "out"));
907 }
908
909 /**
910 * pch_udc_ep_fifo_flush() - Flush the endpoint fifo
911 * @ep: reference to structure of type pch_udc_ep_regs
912 * @dir: direction of endpoint
913 * 0: endpoint is OUT
914 * !0: endpoint is IN
915 */
916 static void pch_udc_ep_fifo_flush(struct pch_udc_ep *ep, int dir)
917 {
918 if (dir) { /* IN ep */
919 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
920 return;
921 }
922 }
923
924 /**
925 * pch_udc_ep_enable() - This api enables endpoint
926 * @regs: Reference to structure pch_udc_ep_regs
927 * @desc: endpoint descriptor
928 */
929 static void pch_udc_ep_enable(struct pch_udc_ep *ep,
930 struct pch_udc_cfg_data *cfg,
931 const struct usb_endpoint_descriptor *desc)
932 {
933 u32 val = 0;
934 u32 buff_size = 0;
935
936 pch_udc_ep_set_trfr_type(ep, desc->bmAttributes);
937 if (ep->in)
938 buff_size = UDC_EPIN_BUFF_SIZE;
939 else
940 buff_size = UDC_EPOUT_BUFF_SIZE;
941 pch_udc_ep_set_bufsz(ep, buff_size, ep->in);
942 pch_udc_ep_set_maxpkt(ep, usb_endpoint_maxp(desc));
943 pch_udc_ep_set_nak(ep);
944 pch_udc_ep_fifo_flush(ep, ep->in);
945 /* Configure the endpoint */
946 val = ep->num << UDC_CSR_NE_NUM_SHIFT | ep->in << UDC_CSR_NE_DIR_SHIFT |
947 ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) <<
948 UDC_CSR_NE_TYPE_SHIFT) |
949 (cfg->cur_cfg << UDC_CSR_NE_CFG_SHIFT) |
950 (cfg->cur_intf << UDC_CSR_NE_INTF_SHIFT) |
951 (cfg->cur_alt << UDC_CSR_NE_ALT_SHIFT) |
952 usb_endpoint_maxp(desc) << UDC_CSR_NE_MAX_PKT_SHIFT;
953
954 if (ep->in)
955 pch_udc_write_csr(ep->dev, val, UDC_EPIN_IDX(ep->num));
956 else
957 pch_udc_write_csr(ep->dev, val, UDC_EPOUT_IDX(ep->num));
958 }
959
960 /**
961 * pch_udc_ep_disable() - This api disables endpoint
962 * @regs: Reference to structure pch_udc_ep_regs
963 */
964 static void pch_udc_ep_disable(struct pch_udc_ep *ep)
965 {
966 if (ep->in) {
967 /* flush the fifo */
968 pch_udc_ep_writel(ep, UDC_EPCTL_F, UDC_EPCTL_ADDR);
969 /* set NAK */
970 pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
971 pch_udc_ep_bit_set(ep, UDC_EPSTS_ADDR, UDC_EPSTS_IN);
972 } else {
973 /* set NAK */
974 pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
975 }
976 /* reset desc pointer */
977 pch_udc_ep_writel(ep, 0, UDC_DESPTR_ADDR);
978 }
979
980 /**
981 * pch_udc_wait_ep_stall() - Wait EP stall.
982 * @dev: Reference to pch_udc_dev structure
983 */
984 static void pch_udc_wait_ep_stall(struct pch_udc_ep *ep)
985 {
986 unsigned int count = 10000;
987
988 /* Wait till idle */
989 while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_S) && --count)
990 udelay(5);
991 if (!count)
992 dev_err(&ep->dev->pdev->dev, "%s: wait error\n", __func__);
993 }
994
995 /**
996 * pch_udc_init() - This API initializes usb device controller
997 * @dev: Rreference to pch_udc_regs structure
998 */
999 static void pch_udc_init(struct pch_udc_dev *dev)
1000 {
1001 if (NULL == dev) {
1002 pr_err("%s: Invalid address\n", __func__);
1003 return;
1004 }
1005 /* Soft Reset and Reset PHY */
1006 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1007 pch_udc_writel(dev, UDC_SRST | UDC_PSRST, UDC_SRST_ADDR);
1008 mdelay(1);
1009 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1010 pch_udc_writel(dev, 0x00, UDC_SRST_ADDR);
1011 mdelay(1);
1012 /* mask and clear all device interrupts */
1013 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1014 pch_udc_bit_set(dev, UDC_DEVIRQSTS_ADDR, UDC_DEVINT_MSK);
1015
1016 /* mask and clear all ep interrupts */
1017 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1018 pch_udc_bit_set(dev, UDC_EPIRQSTS_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1019
1020 /* enable dynamic CSR programmingi, self powered and device speed */
1021 if (speed_fs)
1022 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1023 UDC_DEVCFG_SP | UDC_DEVCFG_SPD_FS);
1024 else /* defaul high speed */
1025 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1026 UDC_DEVCFG_SP | UDC_DEVCFG_SPD_HS);
1027 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR,
1028 (PCH_UDC_THLEN << UDC_DEVCTL_THLEN_SHIFT) |
1029 (PCH_UDC_BRLEN << UDC_DEVCTL_BRLEN_SHIFT) |
1030 UDC_DEVCTL_MODE | UDC_DEVCTL_BREN |
1031 UDC_DEVCTL_THE);
1032 }
1033
1034 /**
1035 * pch_udc_exit() - This API exit usb device controller
1036 * @dev: Reference to pch_udc_regs structure
1037 */
1038 static void pch_udc_exit(struct pch_udc_dev *dev)
1039 {
1040 /* mask all device interrupts */
1041 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1042 /* mask all ep interrupts */
1043 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1044 /* put device in disconnected state */
1045 pch_udc_set_disconnect(dev);
1046 }
1047
1048 /**
1049 * pch_udc_pcd_get_frame() - This API is invoked to get the current frame number
1050 * @gadget: Reference to the gadget driver
1051 *
1052 * Return codes:
1053 * 0: Success
1054 * -EINVAL: If the gadget passed is NULL
1055 */
1056 static int pch_udc_pcd_get_frame(struct usb_gadget *gadget)
1057 {
1058 struct pch_udc_dev *dev;
1059
1060 if (!gadget)
1061 return -EINVAL;
1062 dev = container_of(gadget, struct pch_udc_dev, gadget);
1063 return pch_udc_get_frame(dev);
1064 }
1065
1066 /**
1067 * pch_udc_pcd_wakeup() - This API is invoked to initiate a remote wakeup
1068 * @gadget: Reference to the gadget driver
1069 *
1070 * Return codes:
1071 * 0: Success
1072 * -EINVAL: If the gadget passed is NULL
1073 */
1074 static int pch_udc_pcd_wakeup(struct usb_gadget *gadget)
1075 {
1076 struct pch_udc_dev *dev;
1077 unsigned long flags;
1078
1079 if (!gadget)
1080 return -EINVAL;
1081 dev = container_of(gadget, struct pch_udc_dev, gadget);
1082 spin_lock_irqsave(&dev->lock, flags);
1083 pch_udc_rmt_wakeup(dev);
1084 spin_unlock_irqrestore(&dev->lock, flags);
1085 return 0;
1086 }
1087
1088 /**
1089 * pch_udc_pcd_selfpowered() - This API is invoked to specify whether the device
1090 * is self powered or not
1091 * @gadget: Reference to the gadget driver
1092 * @value: Specifies self powered or not
1093 *
1094 * Return codes:
1095 * 0: Success
1096 * -EINVAL: If the gadget passed is NULL
1097 */
1098 static int pch_udc_pcd_selfpowered(struct usb_gadget *gadget, int value)
1099 {
1100 struct pch_udc_dev *dev;
1101
1102 if (!gadget)
1103 return -EINVAL;
1104 dev = container_of(gadget, struct pch_udc_dev, gadget);
1105 if (value)
1106 pch_udc_set_selfpowered(dev);
1107 else
1108 pch_udc_clear_selfpowered(dev);
1109 return 0;
1110 }
1111
1112 /**
1113 * pch_udc_pcd_pullup() - This API is invoked to make the device
1114 * visible/invisible to the host
1115 * @gadget: Reference to the gadget driver
1116 * @is_on: Specifies whether the pull up is made active or inactive
1117 *
1118 * Return codes:
1119 * 0: Success
1120 * -EINVAL: If the gadget passed is NULL
1121 */
1122 static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on)
1123 {
1124 struct pch_udc_dev *dev;
1125
1126 if (!gadget)
1127 return -EINVAL;
1128 dev = container_of(gadget, struct pch_udc_dev, gadget);
1129 pch_udc_vbus_session(dev, is_on);
1130 return 0;
1131 }
1132
1133 /**
1134 * pch_udc_pcd_vbus_session() - This API is used by a driver for an external
1135 * transceiver (or GPIO) that
1136 * detects a VBUS power session starting/ending
1137 * @gadget: Reference to the gadget driver
1138 * @is_active: specifies whether the session is starting or ending
1139 *
1140 * Return codes:
1141 * 0: Success
1142 * -EINVAL: If the gadget passed is NULL
1143 */
1144 static int pch_udc_pcd_vbus_session(struct usb_gadget *gadget, int is_active)
1145 {
1146 struct pch_udc_dev *dev;
1147
1148 if (!gadget)
1149 return -EINVAL;
1150 dev = container_of(gadget, struct pch_udc_dev, gadget);
1151 pch_udc_vbus_session(dev, is_active);
1152 return 0;
1153 }
1154
1155 /**
1156 * pch_udc_pcd_vbus_draw() - This API is used by gadget drivers during
1157 * SET_CONFIGURATION calls to
1158 * specify how much power the device can consume
1159 * @gadget: Reference to the gadget driver
1160 * @mA: specifies the current limit in 2mA unit
1161 *
1162 * Return codes:
1163 * -EINVAL: If the gadget passed is NULL
1164 * -EOPNOTSUPP:
1165 */
1166 static int pch_udc_pcd_vbus_draw(struct usb_gadget *gadget, unsigned int mA)
1167 {
1168 return -EOPNOTSUPP;
1169 }
1170
1171 static int pch_udc_start(struct usb_gadget_driver *driver,
1172 int (*bind)(struct usb_gadget *));
1173 static int pch_udc_stop(struct usb_gadget_driver *driver);
1174 static const struct usb_gadget_ops pch_udc_ops = {
1175 .get_frame = pch_udc_pcd_get_frame,
1176 .wakeup = pch_udc_pcd_wakeup,
1177 .set_selfpowered = pch_udc_pcd_selfpowered,
1178 .pullup = pch_udc_pcd_pullup,
1179 .vbus_session = pch_udc_pcd_vbus_session,
1180 .vbus_draw = pch_udc_pcd_vbus_draw,
1181 .start = pch_udc_start,
1182 .stop = pch_udc_stop,
1183 };
1184
1185 /**
1186 * complete_req() - This API is invoked from the driver when processing
1187 * of a request is complete
1188 * @ep: Reference to the endpoint structure
1189 * @req: Reference to the request structure
1190 * @status: Indicates the success/failure of completion
1191 */
1192 static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req,
1193 int status)
1194 {
1195 struct pch_udc_dev *dev;
1196 unsigned halted = ep->halted;
1197
1198 list_del_init(&req->queue);
1199
1200 /* set new status if pending */
1201 if (req->req.status == -EINPROGRESS)
1202 req->req.status = status;
1203 else
1204 status = req->req.status;
1205
1206 dev = ep->dev;
1207 if (req->dma_mapped) {
1208 if (req->dma == DMA_ADDR_INVALID) {
1209 if (ep->in)
1210 dma_unmap_single(&dev->pdev->dev, req->req.dma,
1211 req->req.length,
1212 DMA_TO_DEVICE);
1213 else
1214 dma_unmap_single(&dev->pdev->dev, req->req.dma,
1215 req->req.length,
1216 DMA_FROM_DEVICE);
1217 req->req.dma = DMA_ADDR_INVALID;
1218 } else {
1219 if (ep->in)
1220 dma_unmap_single(&dev->pdev->dev, req->dma,
1221 req->req.length,
1222 DMA_TO_DEVICE);
1223 else {
1224 dma_unmap_single(&dev->pdev->dev, req->dma,
1225 req->req.length,
1226 DMA_FROM_DEVICE);
1227 memcpy(req->req.buf, req->buf, req->req.length);
1228 }
1229 kfree(req->buf);
1230 req->dma = DMA_ADDR_INVALID;
1231 }
1232 req->dma_mapped = 0;
1233 }
1234 ep->halted = 1;
1235 spin_unlock(&dev->lock);
1236 if (!ep->in)
1237 pch_udc_ep_clear_rrdy(ep);
1238 req->req.complete(&ep->ep, &req->req);
1239 spin_lock(&dev->lock);
1240 ep->halted = halted;
1241 }
1242
1243 /**
1244 * empty_req_queue() - This API empties the request queue of an endpoint
1245 * @ep: Reference to the endpoint structure
1246 */
1247 static void empty_req_queue(struct pch_udc_ep *ep)
1248 {
1249 struct pch_udc_request *req;
1250
1251 ep->halted = 1;
1252 while (!list_empty(&ep->queue)) {
1253 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
1254 complete_req(ep, req, -ESHUTDOWN); /* Remove from list */
1255 }
1256 }
1257
1258 /**
1259 * pch_udc_free_dma_chain() - This function frees the DMA chain created
1260 * for the request
1261 * @dev Reference to the driver structure
1262 * @req Reference to the request to be freed
1263 *
1264 * Return codes:
1265 * 0: Success
1266 */
1267 static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
1268 struct pch_udc_request *req)
1269 {
1270 struct pch_udc_data_dma_desc *td = req->td_data;
1271 unsigned i = req->chain_len;
1272
1273 dma_addr_t addr2;
1274 dma_addr_t addr = (dma_addr_t)td->next;
1275 td->next = 0x00;
1276 for (; i > 1; --i) {
1277 /* do not free first desc., will be done by free for request */
1278 td = phys_to_virt(addr);
1279 addr2 = (dma_addr_t)td->next;
1280 pci_pool_free(dev->data_requests, td, addr);
1281 td->next = 0x00;
1282 addr = addr2;
1283 }
1284 req->chain_len = 1;
1285 }
1286
1287 /**
1288 * pch_udc_create_dma_chain() - This function creates or reinitializes
1289 * a DMA chain
1290 * @ep: Reference to the endpoint structure
1291 * @req: Reference to the request
1292 * @buf_len: The buffer length
1293 * @gfp_flags: Flags to be used while mapping the data buffer
1294 *
1295 * Return codes:
1296 * 0: success,
1297 * -ENOMEM: pci_pool_alloc invocation fails
1298 */
1299 static int pch_udc_create_dma_chain(struct pch_udc_ep *ep,
1300 struct pch_udc_request *req,
1301 unsigned long buf_len,
1302 gfp_t gfp_flags)
1303 {
1304 struct pch_udc_data_dma_desc *td = req->td_data, *last;
1305 unsigned long bytes = req->req.length, i = 0;
1306 dma_addr_t dma_addr;
1307 unsigned len = 1;
1308
1309 if (req->chain_len > 1)
1310 pch_udc_free_dma_chain(ep->dev, req);
1311
1312 if (req->dma == DMA_ADDR_INVALID)
1313 td->dataptr = req->req.dma;
1314 else
1315 td->dataptr = req->dma;
1316
1317 td->status = PCH_UDC_BS_HST_BSY;
1318 for (; ; bytes -= buf_len, ++len) {
1319 td->status = PCH_UDC_BS_HST_BSY | min(buf_len, bytes);
1320 if (bytes <= buf_len)
1321 break;
1322 last = td;
1323 td = pci_pool_alloc(ep->dev->data_requests, gfp_flags,
1324 &dma_addr);
1325 if (!td)
1326 goto nomem;
1327 i += buf_len;
1328 td->dataptr = req->td_data->dataptr + i;
1329 last->next = dma_addr;
1330 }
1331
1332 req->td_data_last = td;
1333 td->status |= PCH_UDC_DMA_LAST;
1334 td->next = req->td_data_phys;
1335 req->chain_len = len;
1336 return 0;
1337
1338 nomem:
1339 if (len > 1) {
1340 req->chain_len = len;
1341 pch_udc_free_dma_chain(ep->dev, req);
1342 }
1343 req->chain_len = 1;
1344 return -ENOMEM;
1345 }
1346
1347 /**
1348 * prepare_dma() - This function creates and initializes the DMA chain
1349 * for the request
1350 * @ep: Reference to the endpoint structure
1351 * @req: Reference to the request
1352 * @gfp: Flag to be used while mapping the data buffer
1353 *
1354 * Return codes:
1355 * 0: Success
1356 * Other 0: linux error number on failure
1357 */
1358 static int prepare_dma(struct pch_udc_ep *ep, struct pch_udc_request *req,
1359 gfp_t gfp)
1360 {
1361 int retval;
1362
1363 /* Allocate and create a DMA chain */
1364 retval = pch_udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
1365 if (retval) {
1366 pr_err("%s: could not create DMA chain:%d\n", __func__, retval);
1367 return retval;
1368 }
1369 if (ep->in)
1370 req->td_data->status = (req->td_data->status &
1371 ~PCH_UDC_BUFF_STS) | PCH_UDC_BS_HST_RDY;
1372 return 0;
1373 }
1374
1375 /**
1376 * process_zlp() - This function process zero length packets
1377 * from the gadget driver
1378 * @ep: Reference to the endpoint structure
1379 * @req: Reference to the request
1380 */
1381 static void process_zlp(struct pch_udc_ep *ep, struct pch_udc_request *req)
1382 {
1383 struct pch_udc_dev *dev = ep->dev;
1384
1385 /* IN zlp's are handled by hardware */
1386 complete_req(ep, req, 0);
1387
1388 /* if set_config or set_intf is waiting for ack by zlp
1389 * then set CSR_DONE
1390 */
1391 if (dev->set_cfg_not_acked) {
1392 pch_udc_set_csr_done(dev);
1393 dev->set_cfg_not_acked = 0;
1394 }
1395 /* setup command is ACK'ed now by zlp */
1396 if (!dev->stall && dev->waiting_zlp_ack) {
1397 pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
1398 dev->waiting_zlp_ack = 0;
1399 }
1400 }
1401
1402 /**
1403 * pch_udc_start_rxrequest() - This function starts the receive requirement.
1404 * @ep: Reference to the endpoint structure
1405 * @req: Reference to the request structure
1406 */
1407 static void pch_udc_start_rxrequest(struct pch_udc_ep *ep,
1408 struct pch_udc_request *req)
1409 {
1410 struct pch_udc_data_dma_desc *td_data;
1411
1412 pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
1413 td_data = req->td_data;
1414 /* Set the status bits for all descriptors */
1415 while (1) {
1416 td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
1417 PCH_UDC_BS_HST_RDY;
1418 if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
1419 break;
1420 td_data = phys_to_virt(td_data->next);
1421 }
1422 /* Write the descriptor pointer */
1423 pch_udc_ep_set_ddptr(ep, req->td_data_phys);
1424 req->dma_going = 1;
1425 pch_udc_enable_ep_interrupts(ep->dev, UDC_EPINT_OUT_EP0 << ep->num);
1426 pch_udc_set_dma(ep->dev, DMA_DIR_RX);
1427 pch_udc_ep_clear_nak(ep);
1428 pch_udc_ep_set_rrdy(ep);
1429 }
1430
1431 /**
1432 * pch_udc_pcd_ep_enable() - This API enables the endpoint. It is called
1433 * from gadget driver
1434 * @usbep: Reference to the USB endpoint structure
1435 * @desc: Reference to the USB endpoint descriptor structure
1436 *
1437 * Return codes:
1438 * 0: Success
1439 * -EINVAL:
1440 * -ESHUTDOWN:
1441 */
1442 static int pch_udc_pcd_ep_enable(struct usb_ep *usbep,
1443 const struct usb_endpoint_descriptor *desc)
1444 {
1445 struct pch_udc_ep *ep;
1446 struct pch_udc_dev *dev;
1447 unsigned long iflags;
1448
1449 if (!usbep || (usbep->name == ep0_string) || !desc ||
1450 (desc->bDescriptorType != USB_DT_ENDPOINT) || !desc->wMaxPacketSize)
1451 return -EINVAL;
1452
1453 ep = container_of(usbep, struct pch_udc_ep, ep);
1454 dev = ep->dev;
1455 if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1456 return -ESHUTDOWN;
1457 spin_lock_irqsave(&dev->lock, iflags);
1458 ep->desc = desc;
1459 ep->halted = 0;
1460 pch_udc_ep_enable(ep, &ep->dev->cfg_data, desc);
1461 ep->ep.maxpacket = usb_endpoint_maxp(desc);
1462 pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1463 spin_unlock_irqrestore(&dev->lock, iflags);
1464 return 0;
1465 }
1466
1467 /**
1468 * pch_udc_pcd_ep_disable() - This API disables endpoint and is called
1469 * from gadget driver
1470 * @usbep Reference to the USB endpoint structure
1471 *
1472 * Return codes:
1473 * 0: Success
1474 * -EINVAL:
1475 */
1476 static int pch_udc_pcd_ep_disable(struct usb_ep *usbep)
1477 {
1478 struct pch_udc_ep *ep;
1479 struct pch_udc_dev *dev;
1480 unsigned long iflags;
1481
1482 if (!usbep)
1483 return -EINVAL;
1484
1485 ep = container_of(usbep, struct pch_udc_ep, ep);
1486 dev = ep->dev;
1487 if ((usbep->name == ep0_string) || !ep->desc)
1488 return -EINVAL;
1489
1490 spin_lock_irqsave(&ep->dev->lock, iflags);
1491 empty_req_queue(ep);
1492 ep->halted = 1;
1493 pch_udc_ep_disable(ep);
1494 pch_udc_disable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1495 ep->desc = NULL;
1496 INIT_LIST_HEAD(&ep->queue);
1497 spin_unlock_irqrestore(&ep->dev->lock, iflags);
1498 return 0;
1499 }
1500
1501 /**
1502 * pch_udc_alloc_request() - This function allocates request structure.
1503 * It is called by gadget driver
1504 * @usbep: Reference to the USB endpoint structure
1505 * @gfp: Flag to be used while allocating memory
1506 *
1507 * Return codes:
1508 * NULL: Failure
1509 * Allocated address: Success
1510 */
1511 static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep,
1512 gfp_t gfp)
1513 {
1514 struct pch_udc_request *req;
1515 struct pch_udc_ep *ep;
1516 struct pch_udc_data_dma_desc *dma_desc;
1517 struct pch_udc_dev *dev;
1518
1519 if (!usbep)
1520 return NULL;
1521 ep = container_of(usbep, struct pch_udc_ep, ep);
1522 dev = ep->dev;
1523 req = kzalloc(sizeof *req, gfp);
1524 if (!req)
1525 return NULL;
1526 req->req.dma = DMA_ADDR_INVALID;
1527 req->dma = DMA_ADDR_INVALID;
1528 INIT_LIST_HEAD(&req->queue);
1529 if (!ep->dev->dma_addr)
1530 return &req->req;
1531 /* ep0 in requests are allocated from data pool here */
1532 dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp,
1533 &req->td_data_phys);
1534 if (NULL == dma_desc) {
1535 kfree(req);
1536 return NULL;
1537 }
1538 /* prevent from using desc. - set HOST BUSY */
1539 dma_desc->status |= PCH_UDC_BS_HST_BSY;
1540 dma_desc->dataptr = __constant_cpu_to_le32(DMA_ADDR_INVALID);
1541 req->td_data = dma_desc;
1542 req->td_data_last = dma_desc;
1543 req->chain_len = 1;
1544 return &req->req;
1545 }
1546
1547 /**
1548 * pch_udc_free_request() - This function frees request structure.
1549 * It is called by gadget driver
1550 * @usbep: Reference to the USB endpoint structure
1551 * @usbreq: Reference to the USB request
1552 */
1553 static void pch_udc_free_request(struct usb_ep *usbep,
1554 struct usb_request *usbreq)
1555 {
1556 struct pch_udc_ep *ep;
1557 struct pch_udc_request *req;
1558 struct pch_udc_dev *dev;
1559
1560 if (!usbep || !usbreq)
1561 return;
1562 ep = container_of(usbep, struct pch_udc_ep, ep);
1563 req = container_of(usbreq, struct pch_udc_request, req);
1564 dev = ep->dev;
1565 if (!list_empty(&req->queue))
1566 dev_err(&dev->pdev->dev, "%s: %s req=0x%p queue not empty\n",
1567 __func__, usbep->name, req);
1568 if (req->td_data != NULL) {
1569 if (req->chain_len > 1)
1570 pch_udc_free_dma_chain(ep->dev, req);
1571 pci_pool_free(ep->dev->data_requests, req->td_data,
1572 req->td_data_phys);
1573 }
1574 kfree(req);
1575 }
1576
1577 /**
1578 * pch_udc_pcd_queue() - This function queues a request packet. It is called
1579 * by gadget driver
1580 * @usbep: Reference to the USB endpoint structure
1581 * @usbreq: Reference to the USB request
1582 * @gfp: Flag to be used while mapping the data buffer
1583 *
1584 * Return codes:
1585 * 0: Success
1586 * linux error number: Failure
1587 */
1588 static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
1589 gfp_t gfp)
1590 {
1591 int retval = 0;
1592 struct pch_udc_ep *ep;
1593 struct pch_udc_dev *dev;
1594 struct pch_udc_request *req;
1595 unsigned long iflags;
1596
1597 if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf)
1598 return -EINVAL;
1599 ep = container_of(usbep, struct pch_udc_ep, ep);
1600 dev = ep->dev;
1601 if (!ep->desc && ep->num)
1602 return -EINVAL;
1603 req = container_of(usbreq, struct pch_udc_request, req);
1604 if (!list_empty(&req->queue))
1605 return -EINVAL;
1606 if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1607 return -ESHUTDOWN;
1608 spin_lock_irqsave(&dev->lock, iflags);
1609 /* map the buffer for dma */
1610 if (usbreq->length &&
1611 ((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) {
1612 if (!((unsigned long)(usbreq->buf) & 0x03)) {
1613 if (ep->in)
1614 usbreq->dma = dma_map_single(&dev->pdev->dev,
1615 usbreq->buf,
1616 usbreq->length,
1617 DMA_TO_DEVICE);
1618 else
1619 usbreq->dma = dma_map_single(&dev->pdev->dev,
1620 usbreq->buf,
1621 usbreq->length,
1622 DMA_FROM_DEVICE);
1623 } else {
1624 req->buf = kzalloc(usbreq->length, GFP_ATOMIC);
1625 if (!req->buf) {
1626 retval = -ENOMEM;
1627 goto probe_end;
1628 }
1629 if (ep->in) {
1630 memcpy(req->buf, usbreq->buf, usbreq->length);
1631 req->dma = dma_map_single(&dev->pdev->dev,
1632 req->buf,
1633 usbreq->length,
1634 DMA_TO_DEVICE);
1635 } else
1636 req->dma = dma_map_single(&dev->pdev->dev,
1637 req->buf,
1638 usbreq->length,
1639 DMA_FROM_DEVICE);
1640 }
1641 req->dma_mapped = 1;
1642 }
1643 if (usbreq->length > 0) {
1644 retval = prepare_dma(ep, req, GFP_ATOMIC);
1645 if (retval)
1646 goto probe_end;
1647 }
1648 usbreq->actual = 0;
1649 usbreq->status = -EINPROGRESS;
1650 req->dma_done = 0;
1651 if (list_empty(&ep->queue) && !ep->halted) {
1652 /* no pending transfer, so start this req */
1653 if (!usbreq->length) {
1654 process_zlp(ep, req);
1655 retval = 0;
1656 goto probe_end;
1657 }
1658 if (!ep->in) {
1659 pch_udc_start_rxrequest(ep, req);
1660 } else {
1661 /*
1662 * For IN trfr the descriptors will be programmed and
1663 * P bit will be set when
1664 * we get an IN token
1665 */
1666 pch_udc_wait_ep_stall(ep);
1667 pch_udc_ep_clear_nak(ep);
1668 pch_udc_enable_ep_interrupts(ep->dev, (1 << ep->num));
1669 }
1670 }
1671 /* Now add this request to the ep's pending requests */
1672 if (req != NULL)
1673 list_add_tail(&req->queue, &ep->queue);
1674
1675 probe_end:
1676 spin_unlock_irqrestore(&dev->lock, iflags);
1677 return retval;
1678 }
1679
1680 /**
1681 * pch_udc_pcd_dequeue() - This function de-queues a request packet.
1682 * It is called by gadget driver
1683 * @usbep: Reference to the USB endpoint structure
1684 * @usbreq: Reference to the USB request
1685 *
1686 * Return codes:
1687 * 0: Success
1688 * linux error number: Failure
1689 */
1690 static int pch_udc_pcd_dequeue(struct usb_ep *usbep,
1691 struct usb_request *usbreq)
1692 {
1693 struct pch_udc_ep *ep;
1694 struct pch_udc_request *req;
1695 struct pch_udc_dev *dev;
1696 unsigned long flags;
1697 int ret = -EINVAL;
1698
1699 ep = container_of(usbep, struct pch_udc_ep, ep);
1700 dev = ep->dev;
1701 if (!usbep || !usbreq || (!ep->desc && ep->num))
1702 return ret;
1703 req = container_of(usbreq, struct pch_udc_request, req);
1704 spin_lock_irqsave(&ep->dev->lock, flags);
1705 /* make sure it's still queued on this endpoint */
1706 list_for_each_entry(req, &ep->queue, queue) {
1707 if (&req->req == usbreq) {
1708 pch_udc_ep_set_nak(ep);
1709 if (!list_empty(&req->queue))
1710 complete_req(ep, req, -ECONNRESET);
1711 ret = 0;
1712 break;
1713 }
1714 }
1715 spin_unlock_irqrestore(&ep->dev->lock, flags);
1716 return ret;
1717 }
1718
1719 /**
1720 * pch_udc_pcd_set_halt() - This function Sets or clear the endpoint halt
1721 * feature
1722 * @usbep: Reference to the USB endpoint structure
1723 * @halt: Specifies whether to set or clear the feature
1724 *
1725 * Return codes:
1726 * 0: Success
1727 * linux error number: Failure
1728 */
1729 static int pch_udc_pcd_set_halt(struct usb_ep *usbep, int halt)
1730 {
1731 struct pch_udc_ep *ep;
1732 struct pch_udc_dev *dev;
1733 unsigned long iflags;
1734 int ret;
1735
1736 if (!usbep)
1737 return -EINVAL;
1738 ep = container_of(usbep, struct pch_udc_ep, ep);
1739 dev = ep->dev;
1740 if (!ep->desc && !ep->num)
1741 return -EINVAL;
1742 if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
1743 return -ESHUTDOWN;
1744 spin_lock_irqsave(&udc_stall_spinlock, iflags);
1745 if (list_empty(&ep->queue)) {
1746 if (halt) {
1747 if (ep->num == PCH_UDC_EP0)
1748 ep->dev->stall = 1;
1749 pch_udc_ep_set_stall(ep);
1750 pch_udc_enable_ep_interrupts(ep->dev,
1751 PCH_UDC_EPINT(ep->in,
1752 ep->num));
1753 } else {
1754 pch_udc_ep_clear_stall(ep);
1755 }
1756 ret = 0;
1757 } else {
1758 ret = -EAGAIN;
1759 }
1760 spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
1761 return ret;
1762 }
1763
1764 /**
1765 * pch_udc_pcd_set_wedge() - This function Sets or clear the endpoint
1766 * halt feature
1767 * @usbep: Reference to the USB endpoint structure
1768 * @halt: Specifies whether to set or clear the feature
1769 *
1770 * Return codes:
1771 * 0: Success
1772 * linux error number: Failure
1773 */
1774 static int pch_udc_pcd_set_wedge(struct usb_ep *usbep)
1775 {
1776 struct pch_udc_ep *ep;
1777 struct pch_udc_dev *dev;
1778 unsigned long iflags;
1779 int ret;
1780
1781 if (!usbep)
1782 return -EINVAL;
1783 ep = container_of(usbep, struct pch_udc_ep, ep);
1784 dev = ep->dev;
1785 if (!ep->desc && !ep->num)
1786 return -EINVAL;
1787 if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
1788 return -ESHUTDOWN;
1789 spin_lock_irqsave(&udc_stall_spinlock, iflags);
1790 if (!list_empty(&ep->queue)) {
1791 ret = -EAGAIN;
1792 } else {
1793 if (ep->num == PCH_UDC_EP0)
1794 ep->dev->stall = 1;
1795 pch_udc_ep_set_stall(ep);
1796 pch_udc_enable_ep_interrupts(ep->dev,
1797 PCH_UDC_EPINT(ep->in, ep->num));
1798 ep->dev->prot_stall = 1;
1799 ret = 0;
1800 }
1801 spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
1802 return ret;
1803 }
1804
1805 /**
1806 * pch_udc_pcd_fifo_flush() - This function Flush the FIFO of specified endpoint
1807 * @usbep: Reference to the USB endpoint structure
1808 */
1809 static void pch_udc_pcd_fifo_flush(struct usb_ep *usbep)
1810 {
1811 struct pch_udc_ep *ep;
1812
1813 if (!usbep)
1814 return;
1815
1816 ep = container_of(usbep, struct pch_udc_ep, ep);
1817 if (ep->desc || !ep->num)
1818 pch_udc_ep_fifo_flush(ep, ep->in);
1819 }
1820
1821 static const struct usb_ep_ops pch_udc_ep_ops = {
1822 .enable = pch_udc_pcd_ep_enable,
1823 .disable = pch_udc_pcd_ep_disable,
1824 .alloc_request = pch_udc_alloc_request,
1825 .free_request = pch_udc_free_request,
1826 .queue = pch_udc_pcd_queue,
1827 .dequeue = pch_udc_pcd_dequeue,
1828 .set_halt = pch_udc_pcd_set_halt,
1829 .set_wedge = pch_udc_pcd_set_wedge,
1830 .fifo_status = NULL,
1831 .fifo_flush = pch_udc_pcd_fifo_flush,
1832 };
1833
1834 /**
1835 * pch_udc_init_setup_buff() - This function initializes the SETUP buffer
1836 * @td_stp: Reference to the SETP buffer structure
1837 */
1838 static void pch_udc_init_setup_buff(struct pch_udc_stp_dma_desc *td_stp)
1839 {
1840 static u32 pky_marker;
1841
1842 if (!td_stp)
1843 return;
1844 td_stp->reserved = ++pky_marker;
1845 memset(&td_stp->request, 0xFF, sizeof td_stp->request);
1846 td_stp->status = PCH_UDC_BS_HST_RDY;
1847 }
1848
1849 /**
1850 * pch_udc_start_next_txrequest() - This function starts
1851 * the next transmission requirement
1852 * @ep: Reference to the endpoint structure
1853 */
1854 static void pch_udc_start_next_txrequest(struct pch_udc_ep *ep)
1855 {
1856 struct pch_udc_request *req;
1857 struct pch_udc_data_dma_desc *td_data;
1858
1859 if (pch_udc_read_ep_control(ep) & UDC_EPCTL_P)
1860 return;
1861
1862 if (list_empty(&ep->queue))
1863 return;
1864
1865 /* next request */
1866 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
1867 if (req->dma_going)
1868 return;
1869 if (!req->td_data)
1870 return;
1871 pch_udc_wait_ep_stall(ep);
1872 req->dma_going = 1;
1873 pch_udc_ep_set_ddptr(ep, 0);
1874 td_data = req->td_data;
1875 while (1) {
1876 td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
1877 PCH_UDC_BS_HST_RDY;
1878 if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
1879 break;
1880 td_data = phys_to_virt(td_data->next);
1881 }
1882 pch_udc_ep_set_ddptr(ep, req->td_data_phys);
1883 pch_udc_set_dma(ep->dev, DMA_DIR_TX);
1884 pch_udc_ep_set_pd(ep);
1885 pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1886 pch_udc_ep_clear_nak(ep);
1887 }
1888
1889 /**
1890 * pch_udc_complete_transfer() - This function completes a transfer
1891 * @ep: Reference to the endpoint structure
1892 */
1893 static void pch_udc_complete_transfer(struct pch_udc_ep *ep)
1894 {
1895 struct pch_udc_request *req;
1896 struct pch_udc_dev *dev = ep->dev;
1897
1898 if (list_empty(&ep->queue))
1899 return;
1900 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
1901 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
1902 PCH_UDC_BS_DMA_DONE)
1903 return;
1904 if ((req->td_data_last->status & PCH_UDC_RXTX_STS) !=
1905 PCH_UDC_RTS_SUCC) {
1906 dev_err(&dev->pdev->dev, "Invalid RXTX status (0x%08x) "
1907 "epstatus=0x%08x\n",
1908 (req->td_data_last->status & PCH_UDC_RXTX_STS),
1909 (int)(ep->epsts));
1910 return;
1911 }
1912
1913 req->req.actual = req->req.length;
1914 req->td_data_last->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
1915 req->td_data->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
1916 complete_req(ep, req, 0);
1917 req->dma_going = 0;
1918 if (!list_empty(&ep->queue)) {
1919 pch_udc_wait_ep_stall(ep);
1920 pch_udc_ep_clear_nak(ep);
1921 pch_udc_enable_ep_interrupts(ep->dev,
1922 PCH_UDC_EPINT(ep->in, ep->num));
1923 } else {
1924 pch_udc_disable_ep_interrupts(ep->dev,
1925 PCH_UDC_EPINT(ep->in, ep->num));
1926 }
1927 }
1928
1929 /**
1930 * pch_udc_complete_receiver() - This function completes a receiver
1931 * @ep: Reference to the endpoint structure
1932 */
1933 static void pch_udc_complete_receiver(struct pch_udc_ep *ep)
1934 {
1935 struct pch_udc_request *req;
1936 struct pch_udc_dev *dev = ep->dev;
1937 unsigned int count;
1938 struct pch_udc_data_dma_desc *td;
1939 dma_addr_t addr;
1940
1941 if (list_empty(&ep->queue))
1942 return;
1943 /* next request */
1944 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
1945 pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
1946 pch_udc_ep_set_ddptr(ep, 0);
1947 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) ==
1948 PCH_UDC_BS_DMA_DONE)
1949 td = req->td_data_last;
1950 else
1951 td = req->td_data;
1952
1953 while (1) {
1954 if ((td->status & PCH_UDC_RXTX_STS) != PCH_UDC_RTS_SUCC) {
1955 dev_err(&dev->pdev->dev, "Invalid RXTX status=0x%08x "
1956 "epstatus=0x%08x\n",
1957 (req->td_data->status & PCH_UDC_RXTX_STS),
1958 (int)(ep->epsts));
1959 return;
1960 }
1961 if ((td->status & PCH_UDC_BUFF_STS) == PCH_UDC_BS_DMA_DONE)
1962 if (td->status | PCH_UDC_DMA_LAST) {
1963 count = td->status & PCH_UDC_RXTX_BYTES;
1964 break;
1965 }
1966 if (td == req->td_data_last) {
1967 dev_err(&dev->pdev->dev, "Not complete RX descriptor");
1968 return;
1969 }
1970 addr = (dma_addr_t)td->next;
1971 td = phys_to_virt(addr);
1972 }
1973 /* on 64k packets the RXBYTES field is zero */
1974 if (!count && (req->req.length == UDC_DMA_MAXPACKET))
1975 count = UDC_DMA_MAXPACKET;
1976 req->td_data->status |= PCH_UDC_DMA_LAST;
1977 td->status |= PCH_UDC_BS_HST_BSY;
1978
1979 req->dma_going = 0;
1980 req->req.actual = count;
1981 complete_req(ep, req, 0);
1982 /* If there is a new/failed requests try that now */
1983 if (!list_empty(&ep->queue)) {
1984 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
1985 pch_udc_start_rxrequest(ep, req);
1986 }
1987 }
1988
1989 /**
1990 * pch_udc_svc_data_in() - This function process endpoint interrupts
1991 * for IN endpoints
1992 * @dev: Reference to the device structure
1993 * @ep_num: Endpoint that generated the interrupt
1994 */
1995 static void pch_udc_svc_data_in(struct pch_udc_dev *dev, int ep_num)
1996 {
1997 u32 epsts;
1998 struct pch_udc_ep *ep;
1999
2000 ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2001 epsts = ep->epsts;
2002 ep->epsts = 0;
2003
2004 if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
2005 UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2006 UDC_EPSTS_RSS | UDC_EPSTS_XFERDONE)))
2007 return;
2008 if ((epsts & UDC_EPSTS_BNA))
2009 return;
2010 if (epsts & UDC_EPSTS_HE)
2011 return;
2012 if (epsts & UDC_EPSTS_RSS) {
2013 pch_udc_ep_set_stall(ep);
2014 pch_udc_enable_ep_interrupts(ep->dev,
2015 PCH_UDC_EPINT(ep->in, ep->num));
2016 }
2017 if (epsts & UDC_EPSTS_RCS) {
2018 if (!dev->prot_stall) {
2019 pch_udc_ep_clear_stall(ep);
2020 } else {
2021 pch_udc_ep_set_stall(ep);
2022 pch_udc_enable_ep_interrupts(ep->dev,
2023 PCH_UDC_EPINT(ep->in, ep->num));
2024 }
2025 }
2026 if (epsts & UDC_EPSTS_TDC)
2027 pch_udc_complete_transfer(ep);
2028 /* On IN interrupt, provide data if we have any */
2029 if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_RSS) &&
2030 !(epsts & UDC_EPSTS_TDC) && !(epsts & UDC_EPSTS_TXEMPTY))
2031 pch_udc_start_next_txrequest(ep);
2032 }
2033
2034 /**
2035 * pch_udc_svc_data_out() - Handles interrupts from OUT endpoint
2036 * @dev: Reference to the device structure
2037 * @ep_num: Endpoint that generated the interrupt
2038 */
2039 static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
2040 {
2041 u32 epsts;
2042 struct pch_udc_ep *ep;
2043 struct pch_udc_request *req = NULL;
2044
2045 ep = &dev->ep[UDC_EPOUT_IDX(ep_num)];
2046 epsts = ep->epsts;
2047 ep->epsts = 0;
2048
2049 if ((epsts & UDC_EPSTS_BNA) && (!list_empty(&ep->queue))) {
2050 /* next request */
2051 req = list_entry(ep->queue.next, struct pch_udc_request,
2052 queue);
2053 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2054 PCH_UDC_BS_DMA_DONE) {
2055 if (!req->dma_going)
2056 pch_udc_start_rxrequest(ep, req);
2057 return;
2058 }
2059 }
2060 if (epsts & UDC_EPSTS_HE)
2061 return;
2062 if (epsts & UDC_EPSTS_RSS) {
2063 pch_udc_ep_set_stall(ep);
2064 pch_udc_enable_ep_interrupts(ep->dev,
2065 PCH_UDC_EPINT(ep->in, ep->num));
2066 }
2067 if (epsts & UDC_EPSTS_RCS) {
2068 if (!dev->prot_stall) {
2069 pch_udc_ep_clear_stall(ep);
2070 } else {
2071 pch_udc_ep_set_stall(ep);
2072 pch_udc_enable_ep_interrupts(ep->dev,
2073 PCH_UDC_EPINT(ep->in, ep->num));
2074 }
2075 }
2076 if (((epsts & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2077 UDC_EPSTS_OUT_DATA) {
2078 if (ep->dev->prot_stall == 1) {
2079 pch_udc_ep_set_stall(ep);
2080 pch_udc_enable_ep_interrupts(ep->dev,
2081 PCH_UDC_EPINT(ep->in, ep->num));
2082 } else {
2083 pch_udc_complete_receiver(ep);
2084 }
2085 }
2086 if (list_empty(&ep->queue))
2087 pch_udc_set_dma(dev, DMA_DIR_RX);
2088 }
2089
2090 /**
2091 * pch_udc_svc_control_in() - Handle Control IN endpoint interrupts
2092 * @dev: Reference to the device structure
2093 */
2094 static void pch_udc_svc_control_in(struct pch_udc_dev *dev)
2095 {
2096 u32 epsts;
2097 struct pch_udc_ep *ep;
2098 struct pch_udc_ep *ep_out;
2099
2100 ep = &dev->ep[UDC_EP0IN_IDX];
2101 ep_out = &dev->ep[UDC_EP0OUT_IDX];
2102 epsts = ep->epsts;
2103 ep->epsts = 0;
2104
2105 if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
2106 UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2107 UDC_EPSTS_XFERDONE)))
2108 return;
2109 if ((epsts & UDC_EPSTS_BNA))
2110 return;
2111 if (epsts & UDC_EPSTS_HE)
2112 return;
2113 if ((epsts & UDC_EPSTS_TDC) && (!dev->stall)) {
2114 pch_udc_complete_transfer(ep);
2115 pch_udc_clear_dma(dev, DMA_DIR_RX);
2116 ep_out->td_data->status = (ep_out->td_data->status &
2117 ~PCH_UDC_BUFF_STS) |
2118 PCH_UDC_BS_HST_RDY;
2119 pch_udc_ep_clear_nak(ep_out);
2120 pch_udc_set_dma(dev, DMA_DIR_RX);
2121 pch_udc_ep_set_rrdy(ep_out);
2122 }
2123 /* On IN interrupt, provide data if we have any */
2124 if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_TDC) &&
2125 !(epsts & UDC_EPSTS_TXEMPTY))
2126 pch_udc_start_next_txrequest(ep);
2127 }
2128
2129 /**
2130 * pch_udc_svc_control_out() - Routine that handle Control
2131 * OUT endpoint interrupts
2132 * @dev: Reference to the device structure
2133 */
2134 static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
2135 {
2136 u32 stat;
2137 int setup_supported;
2138 struct pch_udc_ep *ep;
2139
2140 ep = &dev->ep[UDC_EP0OUT_IDX];
2141 stat = ep->epsts;
2142 ep->epsts = 0;
2143
2144 /* If setup data */
2145 if (((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2146 UDC_EPSTS_OUT_SETUP) {
2147 dev->stall = 0;
2148 dev->ep[UDC_EP0IN_IDX].halted = 0;
2149 dev->ep[UDC_EP0OUT_IDX].halted = 0;
2150 dev->setup_data = ep->td_stp->request;
2151 pch_udc_init_setup_buff(ep->td_stp);
2152 pch_udc_clear_dma(dev, DMA_DIR_RX);
2153 pch_udc_ep_fifo_flush(&(dev->ep[UDC_EP0IN_IDX]),
2154 dev->ep[UDC_EP0IN_IDX].in);
2155 if ((dev->setup_data.bRequestType & USB_DIR_IN))
2156 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2157 else /* OUT */
2158 dev->gadget.ep0 = &ep->ep;
2159 spin_unlock(&dev->lock);
2160 /* If Mass storage Reset */
2161 if ((dev->setup_data.bRequestType == 0x21) &&
2162 (dev->setup_data.bRequest == 0xFF))
2163 dev->prot_stall = 0;
2164 /* call gadget with setup data received */
2165 setup_supported = dev->driver->setup(&dev->gadget,
2166 &dev->setup_data);
2167 spin_lock(&dev->lock);
2168
2169 if (dev->setup_data.bRequestType & USB_DIR_IN) {
2170 ep->td_data->status = (ep->td_data->status &
2171 ~PCH_UDC_BUFF_STS) |
2172 PCH_UDC_BS_HST_RDY;
2173 pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2174 }
2175 /* ep0 in returns data on IN phase */
2176 if (setup_supported >= 0 && setup_supported <
2177 UDC_EP0IN_MAX_PKT_SIZE) {
2178 pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
2179 /* Gadget would have queued a request when
2180 * we called the setup */
2181 if (!(dev->setup_data.bRequestType & USB_DIR_IN)) {
2182 pch_udc_set_dma(dev, DMA_DIR_RX);
2183 pch_udc_ep_clear_nak(ep);
2184 }
2185 } else if (setup_supported < 0) {
2186 /* if unsupported request, then stall */
2187 pch_udc_ep_set_stall(&(dev->ep[UDC_EP0IN_IDX]));
2188 pch_udc_enable_ep_interrupts(ep->dev,
2189 PCH_UDC_EPINT(ep->in, ep->num));
2190 dev->stall = 0;
2191 pch_udc_set_dma(dev, DMA_DIR_RX);
2192 } else {
2193 dev->waiting_zlp_ack = 1;
2194 }
2195 } else if ((((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2196 UDC_EPSTS_OUT_DATA) && !dev->stall) {
2197 pch_udc_clear_dma(dev, DMA_DIR_RX);
2198 pch_udc_ep_set_ddptr(ep, 0);
2199 if (!list_empty(&ep->queue)) {
2200 ep->epsts = stat;
2201 pch_udc_svc_data_out(dev, PCH_UDC_EP0);
2202 }
2203 pch_udc_set_dma(dev, DMA_DIR_RX);
2204 }
2205 pch_udc_ep_set_rrdy(ep);
2206 }
2207
2208
2209 /**
2210 * pch_udc_postsvc_epinters() - This function enables end point interrupts
2211 * and clears NAK status
2212 * @dev: Reference to the device structure
2213 * @ep_num: End point number
2214 */
2215 static void pch_udc_postsvc_epinters(struct pch_udc_dev *dev, int ep_num)
2216 {
2217 struct pch_udc_ep *ep;
2218 struct pch_udc_request *req;
2219
2220 ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2221 if (!list_empty(&ep->queue)) {
2222 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2223 pch_udc_enable_ep_interrupts(ep->dev,
2224 PCH_UDC_EPINT(ep->in, ep->num));
2225 pch_udc_ep_clear_nak(ep);
2226 }
2227 }
2228
2229 /**
2230 * pch_udc_read_all_epstatus() - This function read all endpoint status
2231 * @dev: Reference to the device structure
2232 * @ep_intr: Status of endpoint interrupt
2233 */
2234 static void pch_udc_read_all_epstatus(struct pch_udc_dev *dev, u32 ep_intr)
2235 {
2236 int i;
2237 struct pch_udc_ep *ep;
2238
2239 for (i = 0; i < PCH_UDC_USED_EP_NUM; i++) {
2240 /* IN */
2241 if (ep_intr & (0x1 << i)) {
2242 ep = &dev->ep[UDC_EPIN_IDX(i)];
2243 ep->epsts = pch_udc_read_ep_status(ep);
2244 pch_udc_clear_ep_status(ep, ep->epsts);
2245 }
2246 /* OUT */
2247 if (ep_intr & (0x10000 << i)) {
2248 ep = &dev->ep[UDC_EPOUT_IDX(i)];
2249 ep->epsts = pch_udc_read_ep_status(ep);
2250 pch_udc_clear_ep_status(ep, ep->epsts);
2251 }
2252 }
2253 }
2254
2255 /**
2256 * pch_udc_activate_control_ep() - This function enables the control endpoints
2257 * for traffic after a reset
2258 * @dev: Reference to the device structure
2259 */
2260 static void pch_udc_activate_control_ep(struct pch_udc_dev *dev)
2261 {
2262 struct pch_udc_ep *ep;
2263 u32 val;
2264
2265 /* Setup the IN endpoint */
2266 ep = &dev->ep[UDC_EP0IN_IDX];
2267 pch_udc_clear_ep_control(ep);
2268 pch_udc_ep_fifo_flush(ep, ep->in);
2269 pch_udc_ep_set_bufsz(ep, UDC_EP0IN_BUFF_SIZE, ep->in);
2270 pch_udc_ep_set_maxpkt(ep, UDC_EP0IN_MAX_PKT_SIZE);
2271 /* Initialize the IN EP Descriptor */
2272 ep->td_data = NULL;
2273 ep->td_stp = NULL;
2274 ep->td_data_phys = 0;
2275 ep->td_stp_phys = 0;
2276
2277 /* Setup the OUT endpoint */
2278 ep = &dev->ep[UDC_EP0OUT_IDX];
2279 pch_udc_clear_ep_control(ep);
2280 pch_udc_ep_fifo_flush(ep, ep->in);
2281 pch_udc_ep_set_bufsz(ep, UDC_EP0OUT_BUFF_SIZE, ep->in);
2282 pch_udc_ep_set_maxpkt(ep, UDC_EP0OUT_MAX_PKT_SIZE);
2283 val = UDC_EP0OUT_MAX_PKT_SIZE << UDC_CSR_NE_MAX_PKT_SHIFT;
2284 pch_udc_write_csr(ep->dev, val, UDC_EP0OUT_IDX);
2285
2286 /* Initialize the SETUP buffer */
2287 pch_udc_init_setup_buff(ep->td_stp);
2288 /* Write the pointer address of dma descriptor */
2289 pch_udc_ep_set_subptr(ep, ep->td_stp_phys);
2290 /* Write the pointer address of Setup descriptor */
2291 pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2292
2293 /* Initialize the dma descriptor */
2294 ep->td_data->status = PCH_UDC_DMA_LAST;
2295 ep->td_data->dataptr = dev->dma_addr;
2296 ep->td_data->next = ep->td_data_phys;
2297
2298 pch_udc_ep_clear_nak(ep);
2299 }
2300
2301
2302 /**
2303 * pch_udc_svc_ur_interrupt() - This function handles a USB reset interrupt
2304 * @dev: Reference to driver structure
2305 */
2306 static void pch_udc_svc_ur_interrupt(struct pch_udc_dev *dev)
2307 {
2308 struct pch_udc_ep *ep;
2309 int i;
2310
2311 pch_udc_clear_dma(dev, DMA_DIR_TX);
2312 pch_udc_clear_dma(dev, DMA_DIR_RX);
2313 /* Mask all endpoint interrupts */
2314 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2315 /* clear all endpoint interrupts */
2316 pch_udc_write_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2317
2318 for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2319 ep = &dev->ep[i];
2320 pch_udc_clear_ep_status(ep, UDC_EPSTS_ALL_CLR_MASK);
2321 pch_udc_clear_ep_control(ep);
2322 pch_udc_ep_set_ddptr(ep, 0);
2323 pch_udc_write_csr(ep->dev, 0x00, i);
2324 }
2325 dev->stall = 0;
2326 dev->prot_stall = 0;
2327 dev->waiting_zlp_ack = 0;
2328 dev->set_cfg_not_acked = 0;
2329
2330 /* disable ep to empty req queue. Skip the control EP's */
2331 for (i = 0; i < (PCH_UDC_USED_EP_NUM*2); i++) {
2332 ep = &dev->ep[i];
2333 pch_udc_ep_set_nak(ep);
2334 pch_udc_ep_fifo_flush(ep, ep->in);
2335 /* Complete request queue */
2336 empty_req_queue(ep);
2337 }
2338 if (dev->driver && dev->driver->disconnect)
2339 dev->driver->disconnect(&dev->gadget);
2340 }
2341
2342 /**
2343 * pch_udc_svc_enum_interrupt() - This function handles a USB speed enumeration
2344 * done interrupt
2345 * @dev: Reference to driver structure
2346 */
2347 static void pch_udc_svc_enum_interrupt(struct pch_udc_dev *dev)
2348 {
2349 u32 dev_stat, dev_speed;
2350 u32 speed = USB_SPEED_FULL;
2351
2352 dev_stat = pch_udc_read_device_status(dev);
2353 dev_speed = (dev_stat & UDC_DEVSTS_ENUM_SPEED_MASK) >>
2354 UDC_DEVSTS_ENUM_SPEED_SHIFT;
2355 switch (dev_speed) {
2356 case UDC_DEVSTS_ENUM_SPEED_HIGH:
2357 speed = USB_SPEED_HIGH;
2358 break;
2359 case UDC_DEVSTS_ENUM_SPEED_FULL:
2360 speed = USB_SPEED_FULL;
2361 break;
2362 case UDC_DEVSTS_ENUM_SPEED_LOW:
2363 speed = USB_SPEED_LOW;
2364 break;
2365 default:
2366 BUG();
2367 }
2368 dev->gadget.speed = speed;
2369 pch_udc_activate_control_ep(dev);
2370 pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 | UDC_EPINT_OUT_EP0);
2371 pch_udc_set_dma(dev, DMA_DIR_TX);
2372 pch_udc_set_dma(dev, DMA_DIR_RX);
2373 pch_udc_ep_set_rrdy(&(dev->ep[UDC_EP0OUT_IDX]));
2374 }
2375
2376 /**
2377 * pch_udc_svc_intf_interrupt() - This function handles a set interface
2378 * interrupt
2379 * @dev: Reference to driver structure
2380 */
2381 static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
2382 {
2383 u32 reg, dev_stat = 0;
2384 int i, ret;
2385
2386 dev_stat = pch_udc_read_device_status(dev);
2387 dev->cfg_data.cur_intf = (dev_stat & UDC_DEVSTS_INTF_MASK) >>
2388 UDC_DEVSTS_INTF_SHIFT;
2389 dev->cfg_data.cur_alt = (dev_stat & UDC_DEVSTS_ALT_MASK) >>
2390 UDC_DEVSTS_ALT_SHIFT;
2391 dev->set_cfg_not_acked = 1;
2392 /* Construct the usb request for gadget driver and inform it */
2393 memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2394 dev->setup_data.bRequest = USB_REQ_SET_INTERFACE;
2395 dev->setup_data.bRequestType = USB_RECIP_INTERFACE;
2396 dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_alt);
2397 dev->setup_data.wIndex = cpu_to_le16(dev->cfg_data.cur_intf);
2398 /* programm the Endpoint Cfg registers */
2399 /* Only one end point cfg register */
2400 reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2401 reg = (reg & ~UDC_CSR_NE_INTF_MASK) |
2402 (dev->cfg_data.cur_intf << UDC_CSR_NE_INTF_SHIFT);
2403 reg = (reg & ~UDC_CSR_NE_ALT_MASK) |
2404 (dev->cfg_data.cur_alt << UDC_CSR_NE_ALT_SHIFT);
2405 pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2406 for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2407 /* clear stall bits */
2408 pch_udc_ep_clear_stall(&(dev->ep[i]));
2409 dev->ep[i].halted = 0;
2410 }
2411 dev->stall = 0;
2412 spin_unlock(&dev->lock);
2413 ret = dev->driver->setup(&dev->gadget, &dev->setup_data);
2414 spin_lock(&dev->lock);
2415 }
2416
2417 /**
2418 * pch_udc_svc_cfg_interrupt() - This function handles a set configuration
2419 * interrupt
2420 * @dev: Reference to driver structure
2421 */
2422 static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
2423 {
2424 int i, ret;
2425 u32 reg, dev_stat = 0;
2426
2427 dev_stat = pch_udc_read_device_status(dev);
2428 dev->set_cfg_not_acked = 1;
2429 dev->cfg_data.cur_cfg = (dev_stat & UDC_DEVSTS_CFG_MASK) >>
2430 UDC_DEVSTS_CFG_SHIFT;
2431 /* make usb request for gadget driver */
2432 memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2433 dev->setup_data.bRequest = USB_REQ_SET_CONFIGURATION;
2434 dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_cfg);
2435 /* program the NE registers */
2436 /* Only one end point cfg register */
2437 reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2438 reg = (reg & ~UDC_CSR_NE_CFG_MASK) |
2439 (dev->cfg_data.cur_cfg << UDC_CSR_NE_CFG_SHIFT);
2440 pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2441 for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2442 /* clear stall bits */
2443 pch_udc_ep_clear_stall(&(dev->ep[i]));
2444 dev->ep[i].halted = 0;
2445 }
2446 dev->stall = 0;
2447
2448 /* call gadget zero with setup data received */
2449 spin_unlock(&dev->lock);
2450 ret = dev->driver->setup(&dev->gadget, &dev->setup_data);
2451 spin_lock(&dev->lock);
2452 }
2453
2454 /**
2455 * pch_udc_dev_isr() - This function services device interrupts
2456 * by invoking appropriate routines.
2457 * @dev: Reference to the device structure
2458 * @dev_intr: The Device interrupt status.
2459 */
2460 static void pch_udc_dev_isr(struct pch_udc_dev *dev, u32 dev_intr)
2461 {
2462 /* USB Reset Interrupt */
2463 if (dev_intr & UDC_DEVINT_UR)
2464 pch_udc_svc_ur_interrupt(dev);
2465 /* Enumeration Done Interrupt */
2466 if (dev_intr & UDC_DEVINT_ENUM)
2467 pch_udc_svc_enum_interrupt(dev);
2468 /* Set Interface Interrupt */
2469 if (dev_intr & UDC_DEVINT_SI)
2470 pch_udc_svc_intf_interrupt(dev);
2471 /* Set Config Interrupt */
2472 if (dev_intr & UDC_DEVINT_SC)
2473 pch_udc_svc_cfg_interrupt(dev);
2474 /* USB Suspend interrupt */
2475 if (dev_intr & UDC_DEVINT_US)
2476 dev_dbg(&dev->pdev->dev, "USB_SUSPEND\n");
2477 /* Clear the SOF interrupt, if enabled */
2478 if (dev_intr & UDC_DEVINT_SOF)
2479 dev_dbg(&dev->pdev->dev, "SOF\n");
2480 /* ES interrupt, IDLE > 3ms on the USB */
2481 if (dev_intr & UDC_DEVINT_ES)
2482 dev_dbg(&dev->pdev->dev, "ES\n");
2483 /* RWKP interrupt */
2484 if (dev_intr & UDC_DEVINT_RWKP)
2485 dev_dbg(&dev->pdev->dev, "RWKP\n");
2486 }
2487
2488 /**
2489 * pch_udc_isr() - This function handles interrupts from the PCH USB Device
2490 * @irq: Interrupt request number
2491 * @dev: Reference to the device structure
2492 */
2493 static irqreturn_t pch_udc_isr(int irq, void *pdev)
2494 {
2495 struct pch_udc_dev *dev = (struct pch_udc_dev *) pdev;
2496 u32 dev_intr, ep_intr;
2497 int i;
2498
2499 dev_intr = pch_udc_read_device_interrupts(dev);
2500 ep_intr = pch_udc_read_ep_interrupts(dev);
2501
2502 if (dev_intr)
2503 /* Clear device interrupts */
2504 pch_udc_write_device_interrupts(dev, dev_intr);
2505 if (ep_intr)
2506 /* Clear ep interrupts */
2507 pch_udc_write_ep_interrupts(dev, ep_intr);
2508 if (!dev_intr && !ep_intr)
2509 return IRQ_NONE;
2510 spin_lock(&dev->lock);
2511 if (dev_intr)
2512 pch_udc_dev_isr(dev, dev_intr);
2513 if (ep_intr) {
2514 pch_udc_read_all_epstatus(dev, ep_intr);
2515 /* Process Control In interrupts, if present */
2516 if (ep_intr & UDC_EPINT_IN_EP0) {
2517 pch_udc_svc_control_in(dev);
2518 pch_udc_postsvc_epinters(dev, 0);
2519 }
2520 /* Process Control Out interrupts, if present */
2521 if (ep_intr & UDC_EPINT_OUT_EP0)
2522 pch_udc_svc_control_out(dev);
2523 /* Process data in end point interrupts */
2524 for (i = 1; i < PCH_UDC_USED_EP_NUM; i++) {
2525 if (ep_intr & (1 << i)) {
2526 pch_udc_svc_data_in(dev, i);
2527 pch_udc_postsvc_epinters(dev, i);
2528 }
2529 }
2530 /* Process data out end point interrupts */
2531 for (i = UDC_EPINT_OUT_SHIFT + 1; i < (UDC_EPINT_OUT_SHIFT +
2532 PCH_UDC_USED_EP_NUM); i++)
2533 if (ep_intr & (1 << i))
2534 pch_udc_svc_data_out(dev, i -
2535 UDC_EPINT_OUT_SHIFT);
2536 }
2537 spin_unlock(&dev->lock);
2538 return IRQ_HANDLED;
2539 }
2540
2541 /**
2542 * pch_udc_setup_ep0() - This function enables control endpoint for traffic
2543 * @dev: Reference to the device structure
2544 */
2545 static void pch_udc_setup_ep0(struct pch_udc_dev *dev)
2546 {
2547 /* enable ep0 interrupts */
2548 pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 |
2549 UDC_EPINT_OUT_EP0);
2550 /* enable device interrupts */
2551 pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2552 UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2553 UDC_DEVINT_SI | UDC_DEVINT_SC);
2554 }
2555
2556 /**
2557 * gadget_release() - Free the gadget driver private data
2558 * @pdev reference to struct pci_dev
2559 */
2560 static void gadget_release(struct device *pdev)
2561 {
2562 struct pch_udc_dev *dev = dev_get_drvdata(pdev);
2563
2564 kfree(dev);
2565 }
2566
2567 /**
2568 * pch_udc_pcd_reinit() - This API initializes the endpoint structures
2569 * @dev: Reference to the driver structure
2570 */
2571 static void pch_udc_pcd_reinit(struct pch_udc_dev *dev)
2572 {
2573 const char *const ep_string[] = {
2574 ep0_string, "ep0out", "ep1in", "ep1out", "ep2in", "ep2out",
2575 "ep3in", "ep3out", "ep4in", "ep4out", "ep5in", "ep5out",
2576 "ep6in", "ep6out", "ep7in", "ep7out", "ep8in", "ep8out",
2577 "ep9in", "ep9out", "ep10in", "ep10out", "ep11in", "ep11out",
2578 "ep12in", "ep12out", "ep13in", "ep13out", "ep14in", "ep14out",
2579 "ep15in", "ep15out",
2580 };
2581 int i;
2582
2583 dev->gadget.speed = USB_SPEED_UNKNOWN;
2584 INIT_LIST_HEAD(&dev->gadget.ep_list);
2585
2586 /* Initialize the endpoints structures */
2587 memset(dev->ep, 0, sizeof dev->ep);
2588 for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2589 struct pch_udc_ep *ep = &dev->ep[i];
2590 ep->dev = dev;
2591 ep->halted = 1;
2592 ep->num = i / 2;
2593 ep->in = ~i & 1;
2594 ep->ep.name = ep_string[i];
2595 ep->ep.ops = &pch_udc_ep_ops;
2596 if (ep->in)
2597 ep->offset_addr = ep->num * UDC_EP_REG_SHIFT;
2598 else
2599 ep->offset_addr = (UDC_EPINT_OUT_SHIFT + ep->num) *
2600 UDC_EP_REG_SHIFT;
2601 /* need to set ep->ep.maxpacket and set Default Configuration?*/
2602 ep->ep.maxpacket = UDC_BULK_MAX_PKT_SIZE;
2603 list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
2604 INIT_LIST_HEAD(&ep->queue);
2605 }
2606 dev->ep[UDC_EP0IN_IDX].ep.maxpacket = UDC_EP0IN_MAX_PKT_SIZE;
2607 dev->ep[UDC_EP0OUT_IDX].ep.maxpacket = UDC_EP0OUT_MAX_PKT_SIZE;
2608
2609 /* remove ep0 in and out from the list. They have own pointer */
2610 list_del_init(&dev->ep[UDC_EP0IN_IDX].ep.ep_list);
2611 list_del_init(&dev->ep[UDC_EP0OUT_IDX].ep.ep_list);
2612
2613 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2614 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
2615 }
2616
2617 /**
2618 * pch_udc_pcd_init() - This API initializes the driver structure
2619 * @dev: Reference to the driver structure
2620 *
2621 * Return codes:
2622 * 0: Success
2623 */
2624 static int pch_udc_pcd_init(struct pch_udc_dev *dev)
2625 {
2626 pch_udc_init(dev);
2627 pch_udc_pcd_reinit(dev);
2628 return 0;
2629 }
2630
2631 /**
2632 * init_dma_pools() - create dma pools during initialization
2633 * @pdev: reference to struct pci_dev
2634 */
2635 static int init_dma_pools(struct pch_udc_dev *dev)
2636 {
2637 struct pch_udc_stp_dma_desc *td_stp;
2638 struct pch_udc_data_dma_desc *td_data;
2639
2640 /* DMA setup */
2641 dev->data_requests = pci_pool_create("data_requests", dev->pdev,
2642 sizeof(struct pch_udc_data_dma_desc), 0, 0);
2643 if (!dev->data_requests) {
2644 dev_err(&dev->pdev->dev, "%s: can't get request data pool\n",
2645 __func__);
2646 return -ENOMEM;
2647 }
2648
2649 /* dma desc for setup data */
2650 dev->stp_requests = pci_pool_create("setup requests", dev->pdev,
2651 sizeof(struct pch_udc_stp_dma_desc), 0, 0);
2652 if (!dev->stp_requests) {
2653 dev_err(&dev->pdev->dev, "%s: can't get setup request pool\n",
2654 __func__);
2655 return -ENOMEM;
2656 }
2657 /* setup */
2658 td_stp = pci_pool_alloc(dev->stp_requests, GFP_KERNEL,
2659 &dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
2660 if (!td_stp) {
2661 dev_err(&dev->pdev->dev,
2662 "%s: can't allocate setup dma descriptor\n", __func__);
2663 return -ENOMEM;
2664 }
2665 dev->ep[UDC_EP0OUT_IDX].td_stp = td_stp;
2666
2667 /* data: 0 packets !? */
2668 td_data = pci_pool_alloc(dev->data_requests, GFP_KERNEL,
2669 &dev->ep[UDC_EP0OUT_IDX].td_data_phys);
2670 if (!td_data) {
2671 dev_err(&dev->pdev->dev,
2672 "%s: can't allocate data dma descriptor\n", __func__);
2673 return -ENOMEM;
2674 }
2675 dev->ep[UDC_EP0OUT_IDX].td_data = td_data;
2676 dev->ep[UDC_EP0IN_IDX].td_stp = NULL;
2677 dev->ep[UDC_EP0IN_IDX].td_stp_phys = 0;
2678 dev->ep[UDC_EP0IN_IDX].td_data = NULL;
2679 dev->ep[UDC_EP0IN_IDX].td_data_phys = 0;
2680
2681 dev->ep0out_buf = kzalloc(UDC_EP0OUT_BUFF_SIZE * 4, GFP_KERNEL);
2682 if (!dev->ep0out_buf)
2683 return -ENOMEM;
2684 dev->dma_addr = dma_map_single(&dev->pdev->dev, dev->ep0out_buf,
2685 UDC_EP0OUT_BUFF_SIZE * 4,
2686 DMA_FROM_DEVICE);
2687 return 0;
2688 }
2689
2690 static int pch_udc_start(struct usb_gadget_driver *driver,
2691 int (*bind)(struct usb_gadget *))
2692 {
2693 struct pch_udc_dev *dev = pch_udc;
2694 int retval;
2695
2696 if (!driver || (driver->max_speed == USB_SPEED_UNKNOWN) || !bind ||
2697 !driver->setup || !driver->unbind || !driver->disconnect) {
2698 dev_err(&dev->pdev->dev,
2699 "%s: invalid driver parameter\n", __func__);
2700 return -EINVAL;
2701 }
2702
2703 if (!dev)
2704 return -ENODEV;
2705
2706 if (dev->driver) {
2707 dev_err(&dev->pdev->dev, "%s: already bound\n", __func__);
2708 return -EBUSY;
2709 }
2710 driver->driver.bus = NULL;
2711 dev->driver = driver;
2712 dev->gadget.dev.driver = &driver->driver;
2713
2714 /* Invoke the bind routine of the gadget driver */
2715 retval = bind(&dev->gadget);
2716
2717 if (retval) {
2718 dev_err(&dev->pdev->dev, "%s: binding to %s returning %d\n",
2719 __func__, driver->driver.name, retval);
2720 dev->driver = NULL;
2721 dev->gadget.dev.driver = NULL;
2722 return retval;
2723 }
2724 /* get ready for ep0 traffic */
2725 pch_udc_setup_ep0(dev);
2726
2727 /* clear SD */
2728 pch_udc_clear_disconnect(dev);
2729
2730 dev->connected = 1;
2731 return 0;
2732 }
2733
2734 static int pch_udc_stop(struct usb_gadget_driver *driver)
2735 {
2736 struct pch_udc_dev *dev = pch_udc;
2737
2738 if (!dev)
2739 return -ENODEV;
2740
2741 if (!driver || (driver != dev->driver)) {
2742 dev_err(&dev->pdev->dev,
2743 "%s: invalid driver parameter\n", __func__);
2744 return -EINVAL;
2745 }
2746
2747 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
2748
2749 /* Assures that there are no pending requests with this driver */
2750 driver->disconnect(&dev->gadget);
2751 driver->unbind(&dev->gadget);
2752 dev->gadget.dev.driver = NULL;
2753 dev->driver = NULL;
2754 dev->connected = 0;
2755
2756 /* set SD */
2757 pch_udc_set_disconnect(dev);
2758 return 0;
2759 }
2760
2761 static void pch_udc_shutdown(struct pci_dev *pdev)
2762 {
2763 struct pch_udc_dev *dev = pci_get_drvdata(pdev);
2764
2765 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
2766 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2767
2768 /* disable the pullup so the host will think we're gone */
2769 pch_udc_set_disconnect(dev);
2770 }
2771
2772 static void pch_udc_remove(struct pci_dev *pdev)
2773 {
2774 struct pch_udc_dev *dev = pci_get_drvdata(pdev);
2775
2776 usb_del_gadget_udc(&dev->gadget);
2777
2778 /* gadget driver must not be registered */
2779 if (dev->driver)
2780 dev_err(&pdev->dev,
2781 "%s: gadget driver still bound!!!\n", __func__);
2782 /* dma pool cleanup */
2783 if (dev->data_requests)
2784 pci_pool_destroy(dev->data_requests);
2785
2786 if (dev->stp_requests) {
2787 /* cleanup DMA desc's for ep0in */
2788 if (dev->ep[UDC_EP0OUT_IDX].td_stp) {
2789 pci_pool_free(dev->stp_requests,
2790 dev->ep[UDC_EP0OUT_IDX].td_stp,
2791 dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
2792 }
2793 if (dev->ep[UDC_EP0OUT_IDX].td_data) {
2794 pci_pool_free(dev->stp_requests,
2795 dev->ep[UDC_EP0OUT_IDX].td_data,
2796 dev->ep[UDC_EP0OUT_IDX].td_data_phys);
2797 }
2798 pci_pool_destroy(dev->stp_requests);
2799 }
2800
2801 if (dev->dma_addr)
2802 dma_unmap_single(&dev->pdev->dev, dev->dma_addr,
2803 UDC_EP0OUT_BUFF_SIZE * 4, DMA_FROM_DEVICE);
2804 kfree(dev->ep0out_buf);
2805
2806 pch_udc_exit(dev);
2807
2808 if (dev->irq_registered)
2809 free_irq(pdev->irq, dev);
2810 if (dev->base_addr)
2811 iounmap(dev->base_addr);
2812 if (dev->mem_region)
2813 release_mem_region(dev->phys_addr,
2814 pci_resource_len(pdev, PCH_UDC_PCI_BAR));
2815 if (dev->active)
2816 pci_disable_device(pdev);
2817 if (dev->registered)
2818 device_unregister(&dev->gadget.dev);
2819 kfree(dev);
2820 pci_set_drvdata(pdev, NULL);
2821 }
2822
2823 #ifdef CONFIG_PM
2824 static int pch_udc_suspend(struct pci_dev *pdev, pm_message_t state)
2825 {
2826 struct pch_udc_dev *dev = pci_get_drvdata(pdev);
2827
2828 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
2829 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2830
2831 pci_disable_device(pdev);
2832 pci_enable_wake(pdev, PCI_D3hot, 0);
2833
2834 if (pci_save_state(pdev)) {
2835 dev_err(&pdev->dev,
2836 "%s: could not save PCI config state\n", __func__);
2837 return -ENOMEM;
2838 }
2839 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2840 return 0;
2841 }
2842
2843 static int pch_udc_resume(struct pci_dev *pdev)
2844 {
2845 int ret;
2846
2847 pci_set_power_state(pdev, PCI_D0);
2848 pci_restore_state(pdev);
2849 ret = pci_enable_device(pdev);
2850 if (ret) {
2851 dev_err(&pdev->dev, "%s: pci_enable_device failed\n", __func__);
2852 return ret;
2853 }
2854 pci_enable_wake(pdev, PCI_D3hot, 0);
2855 return 0;
2856 }
2857 #else
2858 #define pch_udc_suspend NULL
2859 #define pch_udc_resume NULL
2860 #endif /* CONFIG_PM */
2861
2862 static int pch_udc_probe(struct pci_dev *pdev,
2863 const struct pci_device_id *id)
2864 {
2865 unsigned long resource;
2866 unsigned long len;
2867 int retval;
2868 struct pch_udc_dev *dev;
2869
2870 /* one udc only */
2871 if (pch_udc) {
2872 pr_err("%s: already probed\n", __func__);
2873 return -EBUSY;
2874 }
2875 /* init */
2876 dev = kzalloc(sizeof *dev, GFP_KERNEL);
2877 if (!dev) {
2878 pr_err("%s: no memory for device structure\n", __func__);
2879 return -ENOMEM;
2880 }
2881 /* pci setup */
2882 if (pci_enable_device(pdev) < 0) {
2883 kfree(dev);
2884 pr_err("%s: pci_enable_device failed\n", __func__);
2885 return -ENODEV;
2886 }
2887 dev->active = 1;
2888 pci_set_drvdata(pdev, dev);
2889
2890 /* PCI resource allocation */
2891 resource = pci_resource_start(pdev, 1);
2892 len = pci_resource_len(pdev, 1);
2893
2894 if (!request_mem_region(resource, len, KBUILD_MODNAME)) {
2895 dev_err(&pdev->dev, "%s: pci device used already\n", __func__);
2896 retval = -EBUSY;
2897 goto finished;
2898 }
2899 dev->phys_addr = resource;
2900 dev->mem_region = 1;
2901
2902 dev->base_addr = ioremap_nocache(resource, len);
2903 if (!dev->base_addr) {
2904 pr_err("%s: device memory cannot be mapped\n", __func__);
2905 retval = -ENOMEM;
2906 goto finished;
2907 }
2908 if (!pdev->irq) {
2909 dev_err(&pdev->dev, "%s: irq not set\n", __func__);
2910 retval = -ENODEV;
2911 goto finished;
2912 }
2913 pch_udc = dev;
2914 /* initialize the hardware */
2915 if (pch_udc_pcd_init(dev))
2916 goto finished;
2917 if (request_irq(pdev->irq, pch_udc_isr, IRQF_SHARED, KBUILD_MODNAME,
2918 dev)) {
2919 dev_err(&pdev->dev, "%s: request_irq(%d) fail\n", __func__,
2920 pdev->irq);
2921 retval = -ENODEV;
2922 goto finished;
2923 }
2924 dev->irq = pdev->irq;
2925 dev->irq_registered = 1;
2926
2927 pci_set_master(pdev);
2928 pci_try_set_mwi(pdev);
2929
2930 /* device struct setup */
2931 spin_lock_init(&dev->lock);
2932 dev->pdev = pdev;
2933 dev->gadget.ops = &pch_udc_ops;
2934
2935 retval = init_dma_pools(dev);
2936 if (retval)
2937 goto finished;
2938
2939 dev_set_name(&dev->gadget.dev, "gadget");
2940 dev->gadget.dev.parent = &pdev->dev;
2941 dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
2942 dev->gadget.dev.release = gadget_release;
2943 dev->gadget.name = KBUILD_MODNAME;
2944 dev->gadget.max_speed = USB_SPEED_HIGH;
2945
2946 retval = device_register(&dev->gadget.dev);
2947 if (retval)
2948 goto finished;
2949 dev->registered = 1;
2950
2951 /* Put the device in disconnected state till a driver is bound */
2952 pch_udc_set_disconnect(dev);
2953 retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
2954 if (retval)
2955 goto finished;
2956 return 0;
2957
2958 finished:
2959 pch_udc_remove(pdev);
2960 return retval;
2961 }
2962
2963 static DEFINE_PCI_DEVICE_TABLE(pch_udc_pcidev_id) = {
2964 {
2965 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EG20T_UDC),
2966 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
2967 .class_mask = 0xffffffff,
2968 },
2969 {
2970 PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7213_IOH_UDC),
2971 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
2972 .class_mask = 0xffffffff,
2973 },
2974 {
2975 PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7831_IOH_UDC),
2976 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
2977 .class_mask = 0xffffffff,
2978 },
2979 { 0 },
2980 };
2981
2982 MODULE_DEVICE_TABLE(pci, pch_udc_pcidev_id);
2983
2984
2985 static struct pci_driver pch_udc_driver = {
2986 .name = KBUILD_MODNAME,
2987 .id_table = pch_udc_pcidev_id,
2988 .probe = pch_udc_probe,
2989 .remove = pch_udc_remove,
2990 .suspend = pch_udc_suspend,
2991 .resume = pch_udc_resume,
2992 .shutdown = pch_udc_shutdown,
2993 };
2994
2995 static int __init pch_udc_pci_init(void)
2996 {
2997 return pci_register_driver(&pch_udc_driver);
2998 }
2999 module_init(pch_udc_pci_init);
3000
3001 static void __exit pch_udc_pci_exit(void)
3002 {
3003 pci_unregister_driver(&pch_udc_driver);
3004 }
3005 module_exit(pch_udc_pci_exit);
3006
3007 MODULE_DESCRIPTION("Intel EG20T USB Device Controller");
3008 MODULE_AUTHOR("LAPIS Semiconductor, <tomoya-linux@dsn.lapis-semi.com>");
3009 MODULE_LICENSE("GPL");