]>
Commit | Line | Data |
---|---|---|
b92a78e5 RG |
1 | /* |
2 | * Copyright (c) 2008 Rodolfo Giometti <giometti@linux.it> | |
3 | * Copyright (c) 2008 Eurotech S.p.A. <info@eurtech.it> | |
4 | * | |
5 | * This code is *strongly* based on EHCI-HCD code by David Brownell since | |
6 | * the chip is a quasi-EHCI compatible. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms of the GNU General Public License as published by the | |
10 | * Free Software Foundation; either version 2 of the License, or (at your | |
11 | * option) any later version. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, but | |
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | |
15 | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
16 | * for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software Foundation, | |
20 | * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
21 | */ | |
22 | ||
23 | #include <linux/module.h> | |
24 | #include <linux/pci.h> | |
25 | #include <linux/dmapool.h> | |
26 | #include <linux/kernel.h> | |
27 | #include <linux/delay.h> | |
28 | #include <linux/ioport.h> | |
29 | #include <linux/sched.h> | |
30 | #include <linux/slab.h> | |
31 | #include <linux/errno.h> | |
32 | #include <linux/init.h> | |
33 | #include <linux/timer.h> | |
34 | #include <linux/list.h> | |
35 | #include <linux/interrupt.h> | |
b92a78e5 | 36 | #include <linux/usb.h> |
27729aad | 37 | #include <linux/usb/hcd.h> |
b92a78e5 RG |
38 | #include <linux/moduleparam.h> |
39 | #include <linux/dma-mapping.h> | |
40 | #include <linux/io.h> | |
41 | ||
b92a78e5 RG |
42 | #include <asm/irq.h> |
43 | #include <asm/system.h> | |
44 | #include <asm/unaligned.h> | |
45 | ||
46 | #include <linux/irq.h> | |
47 | #include <linux/platform_device.h> | |
48 | ||
49 | #include "oxu210hp.h" | |
50 | ||
51 | #define DRIVER_VERSION "0.0.50" | |
52 | ||
53 | /* | |
54 | * Main defines | |
55 | */ | |
56 | ||
57 | #define oxu_dbg(oxu, fmt, args...) \ | |
58 | dev_dbg(oxu_to_hcd(oxu)->self.controller , fmt , ## args) | |
59 | #define oxu_err(oxu, fmt, args...) \ | |
60 | dev_err(oxu_to_hcd(oxu)->self.controller , fmt , ## args) | |
61 | #define oxu_info(oxu, fmt, args...) \ | |
62 | dev_info(oxu_to_hcd(oxu)->self.controller , fmt , ## args) | |
63 | ||
64 | static inline struct usb_hcd *oxu_to_hcd(struct oxu_hcd *oxu) | |
65 | { | |
66 | return container_of((void *) oxu, struct usb_hcd, hcd_priv); | |
67 | } | |
68 | ||
69 | static inline struct oxu_hcd *hcd_to_oxu(struct usb_hcd *hcd) | |
70 | { | |
71 | return (struct oxu_hcd *) (hcd->hcd_priv); | |
72 | } | |
73 | ||
74 | /* | |
75 | * Debug stuff | |
76 | */ | |
77 | ||
78 | #undef OXU_URB_TRACE | |
79 | #undef OXU_VERBOSE_DEBUG | |
80 | ||
81 | #ifdef OXU_VERBOSE_DEBUG | |
82 | #define oxu_vdbg oxu_dbg | |
83 | #else | |
84 | #define oxu_vdbg(oxu, fmt, args...) /* Nop */ | |
85 | #endif | |
86 | ||
87 | #ifdef DEBUG | |
88 | ||
89 | static int __attribute__((__unused__)) | |
90 | dbg_status_buf(char *buf, unsigned len, const char *label, u32 status) | |
91 | { | |
92 | return scnprintf(buf, len, "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s", | |
93 | label, label[0] ? " " : "", status, | |
94 | (status & STS_ASS) ? " Async" : "", | |
95 | (status & STS_PSS) ? " Periodic" : "", | |
96 | (status & STS_RECL) ? " Recl" : "", | |
97 | (status & STS_HALT) ? " Halt" : "", | |
98 | (status & STS_IAA) ? " IAA" : "", | |
99 | (status & STS_FATAL) ? " FATAL" : "", | |
100 | (status & STS_FLR) ? " FLR" : "", | |
101 | (status & STS_PCD) ? " PCD" : "", | |
102 | (status & STS_ERR) ? " ERR" : "", | |
103 | (status & STS_INT) ? " INT" : "" | |
104 | ); | |
105 | } | |
106 | ||
107 | static int __attribute__((__unused__)) | |
108 | dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable) | |
109 | { | |
110 | return scnprintf(buf, len, "%s%sintrenable %02x%s%s%s%s%s%s", | |
111 | label, label[0] ? " " : "", enable, | |
112 | (enable & STS_IAA) ? " IAA" : "", | |
113 | (enable & STS_FATAL) ? " FATAL" : "", | |
114 | (enable & STS_FLR) ? " FLR" : "", | |
115 | (enable & STS_PCD) ? " PCD" : "", | |
116 | (enable & STS_ERR) ? " ERR" : "", | |
117 | (enable & STS_INT) ? " INT" : "" | |
118 | ); | |
119 | } | |
120 | ||
121 | static const char *const fls_strings[] = | |
122 | { "1024", "512", "256", "??" }; | |
123 | ||
124 | static int dbg_command_buf(char *buf, unsigned len, | |
125 | const char *label, u32 command) | |
126 | { | |
127 | return scnprintf(buf, len, | |
128 | "%s%scommand %06x %s=%d ithresh=%d%s%s%s%s period=%s%s %s", | |
129 | label, label[0] ? " " : "", command, | |
130 | (command & CMD_PARK) ? "park" : "(park)", | |
131 | CMD_PARK_CNT(command), | |
132 | (command >> 16) & 0x3f, | |
133 | (command & CMD_LRESET) ? " LReset" : "", | |
134 | (command & CMD_IAAD) ? " IAAD" : "", | |
135 | (command & CMD_ASE) ? " Async" : "", | |
136 | (command & CMD_PSE) ? " Periodic" : "", | |
137 | fls_strings[(command >> 2) & 0x3], | |
138 | (command & CMD_RESET) ? " Reset" : "", | |
139 | (command & CMD_RUN) ? "RUN" : "HALT" | |
140 | ); | |
141 | } | |
142 | ||
143 | static int dbg_port_buf(char *buf, unsigned len, const char *label, | |
144 | int port, u32 status) | |
145 | { | |
146 | char *sig; | |
147 | ||
148 | /* signaling state */ | |
149 | switch (status & (3 << 10)) { | |
150 | case 0 << 10: | |
151 | sig = "se0"; | |
152 | break; | |
153 | case 1 << 10: | |
154 | sig = "k"; /* low speed */ | |
155 | break; | |
156 | case 2 << 10: | |
157 | sig = "j"; | |
158 | break; | |
159 | default: | |
160 | sig = "?"; | |
161 | break; | |
162 | } | |
163 | ||
164 | return scnprintf(buf, len, | |
165 | "%s%sport %d status %06x%s%s sig=%s%s%s%s%s%s%s%s%s%s", | |
166 | label, label[0] ? " " : "", port, status, | |
167 | (status & PORT_POWER) ? " POWER" : "", | |
168 | (status & PORT_OWNER) ? " OWNER" : "", | |
169 | sig, | |
170 | (status & PORT_RESET) ? " RESET" : "", | |
171 | (status & PORT_SUSPEND) ? " SUSPEND" : "", | |
172 | (status & PORT_RESUME) ? " RESUME" : "", | |
173 | (status & PORT_OCC) ? " OCC" : "", | |
174 | (status & PORT_OC) ? " OC" : "", | |
175 | (status & PORT_PEC) ? " PEC" : "", | |
176 | (status & PORT_PE) ? " PE" : "", | |
177 | (status & PORT_CSC) ? " CSC" : "", | |
178 | (status & PORT_CONNECT) ? " CONNECT" : "" | |
179 | ); | |
180 | } | |
181 | ||
182 | #else | |
183 | ||
184 | static inline int __attribute__((__unused__)) | |
185 | dbg_status_buf(char *buf, unsigned len, const char *label, u32 status) | |
186 | { return 0; } | |
187 | ||
188 | static inline int __attribute__((__unused__)) | |
189 | dbg_command_buf(char *buf, unsigned len, const char *label, u32 command) | |
190 | { return 0; } | |
191 | ||
192 | static inline int __attribute__((__unused__)) | |
193 | dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable) | |
194 | { return 0; } | |
195 | ||
196 | static inline int __attribute__((__unused__)) | |
197 | dbg_port_buf(char *buf, unsigned len, const char *label, int port, u32 status) | |
198 | { return 0; } | |
199 | ||
200 | #endif /* DEBUG */ | |
201 | ||
202 | /* functions have the "wrong" filename when they're output... */ | |
203 | #define dbg_status(oxu, label, status) { \ | |
204 | char _buf[80]; \ | |
205 | dbg_status_buf(_buf, sizeof _buf, label, status); \ | |
206 | oxu_dbg(oxu, "%s\n", _buf); \ | |
207 | } | |
208 | ||
209 | #define dbg_cmd(oxu, label, command) { \ | |
210 | char _buf[80]; \ | |
211 | dbg_command_buf(_buf, sizeof _buf, label, command); \ | |
212 | oxu_dbg(oxu, "%s\n", _buf); \ | |
213 | } | |
214 | ||
215 | #define dbg_port(oxu, label, port, status) { \ | |
216 | char _buf[80]; \ | |
217 | dbg_port_buf(_buf, sizeof _buf, label, port, status); \ | |
218 | oxu_dbg(oxu, "%s\n", _buf); \ | |
219 | } | |
220 | ||
221 | /* | |
222 | * Module parameters | |
223 | */ | |
224 | ||
225 | /* Initial IRQ latency: faster than hw default */ | |
226 | static int log2_irq_thresh; /* 0 to 6 */ | |
227 | module_param(log2_irq_thresh, int, S_IRUGO); | |
228 | MODULE_PARM_DESC(log2_irq_thresh, "log2 IRQ latency, 1-64 microframes"); | |
229 | ||
230 | /* Initial park setting: slower than hw default */ | |
231 | static unsigned park; | |
232 | module_param(park, uint, S_IRUGO); | |
233 | MODULE_PARM_DESC(park, "park setting; 1-3 back-to-back async packets"); | |
234 | ||
235 | /* For flakey hardware, ignore overcurrent indicators */ | |
236 | static int ignore_oc; | |
237 | module_param(ignore_oc, bool, S_IRUGO); | |
238 | MODULE_PARM_DESC(ignore_oc, "ignore bogus hardware overcurrent indications"); | |
239 | ||
240 | ||
241 | static void ehci_work(struct oxu_hcd *oxu); | |
242 | static int oxu_hub_control(struct usb_hcd *hcd, | |
243 | u16 typeReq, u16 wValue, u16 wIndex, | |
244 | char *buf, u16 wLength); | |
245 | ||
246 | /* | |
247 | * Local functions | |
248 | */ | |
249 | ||
250 | /* Low level read/write registers functions */ | |
251 | static inline u32 oxu_readl(void *base, u32 reg) | |
252 | { | |
253 | return readl(base + reg); | |
254 | } | |
255 | ||
256 | static inline void oxu_writel(void *base, u32 reg, u32 val) | |
257 | { | |
258 | writel(val, base + reg); | |
259 | } | |
260 | ||
261 | static inline void timer_action_done(struct oxu_hcd *oxu, | |
262 | enum ehci_timer_action action) | |
263 | { | |
264 | clear_bit(action, &oxu->actions); | |
265 | } | |
266 | ||
267 | static inline void timer_action(struct oxu_hcd *oxu, | |
268 | enum ehci_timer_action action) | |
269 | { | |
270 | if (!test_and_set_bit(action, &oxu->actions)) { | |
271 | unsigned long t; | |
272 | ||
273 | switch (action) { | |
274 | case TIMER_IAA_WATCHDOG: | |
275 | t = EHCI_IAA_JIFFIES; | |
276 | break; | |
277 | case TIMER_IO_WATCHDOG: | |
278 | t = EHCI_IO_JIFFIES; | |
279 | break; | |
280 | case TIMER_ASYNC_OFF: | |
281 | t = EHCI_ASYNC_JIFFIES; | |
282 | break; | |
283 | case TIMER_ASYNC_SHRINK: | |
284 | default: | |
285 | t = EHCI_SHRINK_JIFFIES; | |
286 | break; | |
287 | } | |
288 | t += jiffies; | |
289 | /* all timings except IAA watchdog can be overridden. | |
290 | * async queue SHRINK often precedes IAA. while it's ready | |
291 | * to go OFF neither can matter, and afterwards the IO | |
292 | * watchdog stops unless there's still periodic traffic. | |
293 | */ | |
294 | if (action != TIMER_IAA_WATCHDOG | |
295 | && t > oxu->watchdog.expires | |
296 | && timer_pending(&oxu->watchdog)) | |
297 | return; | |
298 | mod_timer(&oxu->watchdog, t); | |
299 | } | |
300 | } | |
301 | ||
302 | /* | |
303 | * handshake - spin reading hc until handshake completes or fails | |
304 | * @ptr: address of hc register to be read | |
305 | * @mask: bits to look at in result of read | |
306 | * @done: value of those bits when handshake succeeds | |
307 | * @usec: timeout in microseconds | |
308 | * | |
309 | * Returns negative errno, or zero on success | |
310 | * | |
311 | * Success happens when the "mask" bits have the specified value (hardware | |
312 | * handshake done). There are two failure modes: "usec" have passed (major | |
313 | * hardware flakeout), or the register reads as all-ones (hardware removed). | |
314 | * | |
315 | * That last failure should_only happen in cases like physical cardbus eject | |
316 | * before driver shutdown. But it also seems to be caused by bugs in cardbus | |
317 | * bridge shutdown: shutting down the bridge before the devices using it. | |
318 | */ | |
319 | static int handshake(struct oxu_hcd *oxu, void __iomem *ptr, | |
320 | u32 mask, u32 done, int usec) | |
321 | { | |
322 | u32 result; | |
323 | ||
324 | do { | |
325 | result = readl(ptr); | |
326 | if (result == ~(u32)0) /* card removed */ | |
327 | return -ENODEV; | |
328 | result &= mask; | |
329 | if (result == done) | |
330 | return 0; | |
331 | udelay(1); | |
332 | usec--; | |
333 | } while (usec > 0); | |
334 | return -ETIMEDOUT; | |
335 | } | |
336 | ||
337 | /* Force HC to halt state from unknown (EHCI spec section 2.3) */ | |
338 | static int ehci_halt(struct oxu_hcd *oxu) | |
339 | { | |
340 | u32 temp = readl(&oxu->regs->status); | |
341 | ||
342 | /* disable any irqs left enabled by previous code */ | |
343 | writel(0, &oxu->regs->intr_enable); | |
344 | ||
345 | if ((temp & STS_HALT) != 0) | |
346 | return 0; | |
347 | ||
348 | temp = readl(&oxu->regs->command); | |
349 | temp &= ~CMD_RUN; | |
350 | writel(temp, &oxu->regs->command); | |
351 | return handshake(oxu, &oxu->regs->status, | |
352 | STS_HALT, STS_HALT, 16 * 125); | |
353 | } | |
354 | ||
355 | /* Put TDI/ARC silicon into EHCI mode */ | |
356 | static void tdi_reset(struct oxu_hcd *oxu) | |
357 | { | |
358 | u32 __iomem *reg_ptr; | |
359 | u32 tmp; | |
360 | ||
361 | reg_ptr = (u32 __iomem *)(((u8 __iomem *)oxu->regs) + 0x68); | |
362 | tmp = readl(reg_ptr); | |
363 | tmp |= 0x3; | |
364 | writel(tmp, reg_ptr); | |
365 | } | |
366 | ||
367 | /* Reset a non-running (STS_HALT == 1) controller */ | |
368 | static int ehci_reset(struct oxu_hcd *oxu) | |
369 | { | |
370 | int retval; | |
371 | u32 command = readl(&oxu->regs->command); | |
372 | ||
373 | command |= CMD_RESET; | |
374 | dbg_cmd(oxu, "reset", command); | |
375 | writel(command, &oxu->regs->command); | |
376 | oxu_to_hcd(oxu)->state = HC_STATE_HALT; | |
377 | oxu->next_statechange = jiffies; | |
378 | retval = handshake(oxu, &oxu->regs->command, | |
379 | CMD_RESET, 0, 250 * 1000); | |
380 | ||
381 | if (retval) | |
382 | return retval; | |
383 | ||
384 | tdi_reset(oxu); | |
385 | ||
386 | return retval; | |
387 | } | |
388 | ||
389 | /* Idle the controller (from running) */ | |
390 | static void ehci_quiesce(struct oxu_hcd *oxu) | |
391 | { | |
392 | u32 temp; | |
393 | ||
394 | #ifdef DEBUG | |
395 | if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state)) | |
396 | BUG(); | |
397 | #endif | |
398 | ||
399 | /* wait for any schedule enables/disables to take effect */ | |
400 | temp = readl(&oxu->regs->command) << 10; | |
401 | temp &= STS_ASS | STS_PSS; | |
402 | if (handshake(oxu, &oxu->regs->status, STS_ASS | STS_PSS, | |
403 | temp, 16 * 125) != 0) { | |
404 | oxu_to_hcd(oxu)->state = HC_STATE_HALT; | |
405 | return; | |
406 | } | |
407 | ||
408 | /* then disable anything that's still active */ | |
409 | temp = readl(&oxu->regs->command); | |
410 | temp &= ~(CMD_ASE | CMD_IAAD | CMD_PSE); | |
411 | writel(temp, &oxu->regs->command); | |
412 | ||
413 | /* hardware can take 16 microframes to turn off ... */ | |
414 | if (handshake(oxu, &oxu->regs->status, STS_ASS | STS_PSS, | |
415 | 0, 16 * 125) != 0) { | |
416 | oxu_to_hcd(oxu)->state = HC_STATE_HALT; | |
417 | return; | |
418 | } | |
419 | } | |
420 | ||
421 | static int check_reset_complete(struct oxu_hcd *oxu, int index, | |
422 | u32 __iomem *status_reg, int port_status) | |
423 | { | |
424 | if (!(port_status & PORT_CONNECT)) { | |
425 | oxu->reset_done[index] = 0; | |
426 | return port_status; | |
427 | } | |
428 | ||
429 | /* if reset finished and it's still not enabled -- handoff */ | |
430 | if (!(port_status & PORT_PE)) { | |
431 | oxu_dbg(oxu, "Failed to enable port %d on root hub TT\n", | |
432 | index+1); | |
433 | return port_status; | |
434 | } else | |
435 | oxu_dbg(oxu, "port %d high speed\n", index + 1); | |
436 | ||
437 | return port_status; | |
438 | } | |
439 | ||
440 | static void ehci_hub_descriptor(struct oxu_hcd *oxu, | |
441 | struct usb_hub_descriptor *desc) | |
442 | { | |
443 | int ports = HCS_N_PORTS(oxu->hcs_params); | |
444 | u16 temp; | |
445 | ||
446 | desc->bDescriptorType = 0x29; | |
447 | desc->bPwrOn2PwrGood = 10; /* oxu 1.0, 2.3.9 says 20ms max */ | |
448 | desc->bHubContrCurrent = 0; | |
449 | ||
450 | desc->bNbrPorts = ports; | |
451 | temp = 1 + (ports / 8); | |
452 | desc->bDescLength = 7 + 2 * temp; | |
453 | ||
da13051c SS |
454 | /* ports removable, and usb 1.0 legacy PortPwrCtrlMask */ |
455 | memset(&desc->DeviceRemovable[0], 0, temp); | |
456 | memset(&desc->DeviceRemovable[temp], 0xff, temp); | |
b92a78e5 RG |
457 | |
458 | temp = 0x0008; /* per-port overcurrent reporting */ | |
459 | if (HCS_PPC(oxu->hcs_params)) | |
460 | temp |= 0x0001; /* per-port power control */ | |
461 | else | |
462 | temp |= 0x0002; /* no power switching */ | |
463 | desc->wHubCharacteristics = (__force __u16)cpu_to_le16(temp); | |
464 | } | |
465 | ||
466 | ||
467 | /* Allocate an OXU210HP on-chip memory data buffer | |
468 | * | |
469 | * An on-chip memory data buffer is required for each OXU210HP USB transfer. | |
470 | * Each transfer descriptor has one or more on-chip memory data buffers. | |
471 | * | |
472 | * Data buffers are allocated from a fix sized pool of data blocks. | |
473 | * To minimise fragmentation and give reasonable memory utlisation, | |
474 | * data buffers are allocated with sizes the power of 2 multiples of | |
475 | * the block size, starting on an address a multiple of the allocated size. | |
476 | * | |
477 | * FIXME: callers of this function require a buffer to be allocated for | |
478 | * len=0. This is a waste of on-chip memory and should be fix. Then this | |
479 | * function should be changed to not allocate a buffer for len=0. | |
480 | */ | |
481 | static int oxu_buf_alloc(struct oxu_hcd *oxu, struct ehci_qtd *qtd, int len) | |
482 | { | |
483 | int n_blocks; /* minium blocks needed to hold len */ | |
484 | int a_blocks; /* blocks allocated */ | |
485 | int i, j; | |
486 | ||
487 | /* Don't allocte bigger than supported */ | |
488 | if (len > BUFFER_SIZE * BUFFER_NUM) { | |
489 | oxu_err(oxu, "buffer too big (%d)\n", len); | |
490 | return -ENOMEM; | |
491 | } | |
492 | ||
493 | spin_lock(&oxu->mem_lock); | |
494 | ||
495 | /* Number of blocks needed to hold len */ | |
496 | n_blocks = (len + BUFFER_SIZE - 1) / BUFFER_SIZE; | |
497 | ||
498 | /* Round the number of blocks up to the power of 2 */ | |
499 | for (a_blocks = 1; a_blocks < n_blocks; a_blocks <<= 1) | |
500 | ; | |
501 | ||
502 | /* Find a suitable available data buffer */ | |
503 | for (i = 0; i < BUFFER_NUM; | |
504 | i += max(a_blocks, (int)oxu->db_used[i])) { | |
505 | ||
506 | /* Check all the required blocks are available */ | |
507 | for (j = 0; j < a_blocks; j++) | |
508 | if (oxu->db_used[i + j]) | |
509 | break; | |
510 | ||
511 | if (j != a_blocks) | |
512 | continue; | |
513 | ||
514 | /* Allocate blocks found! */ | |
515 | qtd->buffer = (void *) &oxu->mem->db_pool[i]; | |
516 | qtd->buffer_dma = virt_to_phys(qtd->buffer); | |
517 | ||
518 | qtd->qtd_buffer_len = BUFFER_SIZE * a_blocks; | |
519 | oxu->db_used[i] = a_blocks; | |
520 | ||
521 | spin_unlock(&oxu->mem_lock); | |
522 | ||
523 | return 0; | |
524 | } | |
525 | ||
526 | /* Failed */ | |
527 | ||
528 | spin_unlock(&oxu->mem_lock); | |
529 | ||
530 | return -ENOMEM; | |
531 | } | |
532 | ||
533 | static void oxu_buf_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd) | |
534 | { | |
535 | int index; | |
536 | ||
537 | spin_lock(&oxu->mem_lock); | |
538 | ||
539 | index = (qtd->buffer - (void *) &oxu->mem->db_pool[0]) | |
540 | / BUFFER_SIZE; | |
541 | oxu->db_used[index] = 0; | |
542 | qtd->qtd_buffer_len = 0; | |
543 | qtd->buffer_dma = 0; | |
544 | qtd->buffer = NULL; | |
545 | ||
546 | spin_unlock(&oxu->mem_lock); | |
b92a78e5 RG |
547 | } |
548 | ||
549 | static inline void ehci_qtd_init(struct ehci_qtd *qtd, dma_addr_t dma) | |
550 | { | |
551 | memset(qtd, 0, sizeof *qtd); | |
552 | qtd->qtd_dma = dma; | |
553 | qtd->hw_token = cpu_to_le32(QTD_STS_HALT); | |
554 | qtd->hw_next = EHCI_LIST_END; | |
555 | qtd->hw_alt_next = EHCI_LIST_END; | |
556 | INIT_LIST_HEAD(&qtd->qtd_list); | |
557 | } | |
558 | ||
559 | static inline void oxu_qtd_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd) | |
560 | { | |
561 | int index; | |
562 | ||
563 | if (qtd->buffer) | |
564 | oxu_buf_free(oxu, qtd); | |
565 | ||
566 | spin_lock(&oxu->mem_lock); | |
567 | ||
568 | index = qtd - &oxu->mem->qtd_pool[0]; | |
569 | oxu->qtd_used[index] = 0; | |
570 | ||
571 | spin_unlock(&oxu->mem_lock); | |
b92a78e5 RG |
572 | } |
573 | ||
574 | static struct ehci_qtd *ehci_qtd_alloc(struct oxu_hcd *oxu) | |
575 | { | |
576 | int i; | |
577 | struct ehci_qtd *qtd = NULL; | |
578 | ||
579 | spin_lock(&oxu->mem_lock); | |
580 | ||
581 | for (i = 0; i < QTD_NUM; i++) | |
582 | if (!oxu->qtd_used[i]) | |
583 | break; | |
584 | ||
585 | if (i < QTD_NUM) { | |
586 | qtd = (struct ehci_qtd *) &oxu->mem->qtd_pool[i]; | |
587 | memset(qtd, 0, sizeof *qtd); | |
588 | ||
589 | qtd->hw_token = cpu_to_le32(QTD_STS_HALT); | |
590 | qtd->hw_next = EHCI_LIST_END; | |
591 | qtd->hw_alt_next = EHCI_LIST_END; | |
592 | INIT_LIST_HEAD(&qtd->qtd_list); | |
593 | ||
594 | qtd->qtd_dma = virt_to_phys(qtd); | |
595 | ||
596 | oxu->qtd_used[i] = 1; | |
597 | } | |
598 | ||
599 | spin_unlock(&oxu->mem_lock); | |
600 | ||
601 | return qtd; | |
602 | } | |
603 | ||
604 | static void oxu_qh_free(struct oxu_hcd *oxu, struct ehci_qh *qh) | |
605 | { | |
606 | int index; | |
607 | ||
608 | spin_lock(&oxu->mem_lock); | |
609 | ||
610 | index = qh - &oxu->mem->qh_pool[0]; | |
611 | oxu->qh_used[index] = 0; | |
612 | ||
613 | spin_unlock(&oxu->mem_lock); | |
b92a78e5 RG |
614 | } |
615 | ||
616 | static void qh_destroy(struct kref *kref) | |
617 | { | |
618 | struct ehci_qh *qh = container_of(kref, struct ehci_qh, kref); | |
619 | struct oxu_hcd *oxu = qh->oxu; | |
620 | ||
621 | /* clean qtds first, and know this is not linked */ | |
622 | if (!list_empty(&qh->qtd_list) || qh->qh_next.ptr) { | |
623 | oxu_dbg(oxu, "unused qh not empty!\n"); | |
624 | BUG(); | |
625 | } | |
626 | if (qh->dummy) | |
627 | oxu_qtd_free(oxu, qh->dummy); | |
628 | oxu_qh_free(oxu, qh); | |
629 | } | |
630 | ||
631 | static struct ehci_qh *oxu_qh_alloc(struct oxu_hcd *oxu) | |
632 | { | |
633 | int i; | |
634 | struct ehci_qh *qh = NULL; | |
635 | ||
636 | spin_lock(&oxu->mem_lock); | |
637 | ||
638 | for (i = 0; i < QHEAD_NUM; i++) | |
639 | if (!oxu->qh_used[i]) | |
640 | break; | |
641 | ||
642 | if (i < QHEAD_NUM) { | |
643 | qh = (struct ehci_qh *) &oxu->mem->qh_pool[i]; | |
644 | memset(qh, 0, sizeof *qh); | |
645 | ||
646 | kref_init(&qh->kref); | |
647 | qh->oxu = oxu; | |
648 | qh->qh_dma = virt_to_phys(qh); | |
649 | INIT_LIST_HEAD(&qh->qtd_list); | |
650 | ||
651 | /* dummy td enables safe urb queuing */ | |
652 | qh->dummy = ehci_qtd_alloc(oxu); | |
653 | if (qh->dummy == NULL) { | |
654 | oxu_dbg(oxu, "no dummy td\n"); | |
655 | oxu->qh_used[i] = 0; | |
82a5eeb9 DC |
656 | qh = NULL; |
657 | goto unlock; | |
b92a78e5 RG |
658 | } |
659 | ||
660 | oxu->qh_used[i] = 1; | |
661 | } | |
82a5eeb9 | 662 | unlock: |
b92a78e5 RG |
663 | spin_unlock(&oxu->mem_lock); |
664 | ||
665 | return qh; | |
666 | } | |
667 | ||
668 | /* to share a qh (cpu threads, or hc) */ | |
669 | static inline struct ehci_qh *qh_get(struct ehci_qh *qh) | |
670 | { | |
671 | kref_get(&qh->kref); | |
672 | return qh; | |
673 | } | |
674 | ||
675 | static inline void qh_put(struct ehci_qh *qh) | |
676 | { | |
677 | kref_put(&qh->kref, qh_destroy); | |
678 | } | |
679 | ||
680 | static void oxu_murb_free(struct oxu_hcd *oxu, struct oxu_murb *murb) | |
681 | { | |
682 | int index; | |
683 | ||
684 | spin_lock(&oxu->mem_lock); | |
685 | ||
686 | index = murb - &oxu->murb_pool[0]; | |
687 | oxu->murb_used[index] = 0; | |
688 | ||
689 | spin_unlock(&oxu->mem_lock); | |
b92a78e5 RG |
690 | } |
691 | ||
692 | static struct oxu_murb *oxu_murb_alloc(struct oxu_hcd *oxu) | |
693 | ||
694 | { | |
695 | int i; | |
696 | struct oxu_murb *murb = NULL; | |
697 | ||
698 | spin_lock(&oxu->mem_lock); | |
699 | ||
700 | for (i = 0; i < MURB_NUM; i++) | |
701 | if (!oxu->murb_used[i]) | |
702 | break; | |
703 | ||
704 | if (i < MURB_NUM) { | |
705 | murb = &(oxu->murb_pool)[i]; | |
706 | ||
707 | oxu->murb_used[i] = 1; | |
708 | } | |
709 | ||
710 | spin_unlock(&oxu->mem_lock); | |
711 | ||
712 | return murb; | |
713 | } | |
714 | ||
715 | /* The queue heads and transfer descriptors are managed from pools tied | |
716 | * to each of the "per device" structures. | |
717 | * This is the initialisation and cleanup code. | |
718 | */ | |
719 | static void ehci_mem_cleanup(struct oxu_hcd *oxu) | |
720 | { | |
721 | kfree(oxu->murb_pool); | |
722 | oxu->murb_pool = NULL; | |
723 | ||
724 | if (oxu->async) | |
725 | qh_put(oxu->async); | |
726 | oxu->async = NULL; | |
727 | ||
728 | del_timer(&oxu->urb_timer); | |
729 | ||
730 | oxu->periodic = NULL; | |
731 | ||
732 | /* shadow periodic table */ | |
733 | kfree(oxu->pshadow); | |
734 | oxu->pshadow = NULL; | |
735 | } | |
736 | ||
737 | /* Remember to add cleanup code (above) if you add anything here. | |
738 | */ | |
739 | static int ehci_mem_init(struct oxu_hcd *oxu, gfp_t flags) | |
740 | { | |
741 | int i; | |
742 | ||
743 | for (i = 0; i < oxu->periodic_size; i++) | |
744 | oxu->mem->frame_list[i] = EHCI_LIST_END; | |
745 | for (i = 0; i < QHEAD_NUM; i++) | |
746 | oxu->qh_used[i] = 0; | |
747 | for (i = 0; i < QTD_NUM; i++) | |
748 | oxu->qtd_used[i] = 0; | |
749 | ||
750 | oxu->murb_pool = kcalloc(MURB_NUM, sizeof(struct oxu_murb), flags); | |
751 | if (!oxu->murb_pool) | |
752 | goto fail; | |
753 | ||
754 | for (i = 0; i < MURB_NUM; i++) | |
755 | oxu->murb_used[i] = 0; | |
756 | ||
757 | oxu->async = oxu_qh_alloc(oxu); | |
758 | if (!oxu->async) | |
759 | goto fail; | |
760 | ||
761 | oxu->periodic = (__le32 *) &oxu->mem->frame_list; | |
762 | oxu->periodic_dma = virt_to_phys(oxu->periodic); | |
763 | ||
764 | for (i = 0; i < oxu->periodic_size; i++) | |
765 | oxu->periodic[i] = EHCI_LIST_END; | |
766 | ||
767 | /* software shadow of hardware table */ | |
768 | oxu->pshadow = kcalloc(oxu->periodic_size, sizeof(void *), flags); | |
769 | if (oxu->pshadow != NULL) | |
770 | return 0; | |
771 | ||
772 | fail: | |
773 | oxu_dbg(oxu, "couldn't init memory\n"); | |
774 | ehci_mem_cleanup(oxu); | |
775 | return -ENOMEM; | |
776 | } | |
777 | ||
778 | /* Fill a qtd, returning how much of the buffer we were able to queue up. | |
779 | */ | |
780 | static int qtd_fill(struct ehci_qtd *qtd, dma_addr_t buf, size_t len, | |
781 | int token, int maxpacket) | |
782 | { | |
783 | int i, count; | |
784 | u64 addr = buf; | |
785 | ||
786 | /* one buffer entry per 4K ... first might be short or unaligned */ | |
787 | qtd->hw_buf[0] = cpu_to_le32((u32)addr); | |
788 | qtd->hw_buf_hi[0] = cpu_to_le32((u32)(addr >> 32)); | |
789 | count = 0x1000 - (buf & 0x0fff); /* rest of that page */ | |
790 | if (likely(len < count)) /* ... iff needed */ | |
791 | count = len; | |
792 | else { | |
793 | buf += 0x1000; | |
794 | buf &= ~0x0fff; | |
795 | ||
796 | /* per-qtd limit: from 16K to 20K (best alignment) */ | |
797 | for (i = 1; count < len && i < 5; i++) { | |
798 | addr = buf; | |
799 | qtd->hw_buf[i] = cpu_to_le32((u32)addr); | |
800 | qtd->hw_buf_hi[i] = cpu_to_le32((u32)(addr >> 32)); | |
801 | buf += 0x1000; | |
802 | if ((count + 0x1000) < len) | |
803 | count += 0x1000; | |
804 | else | |
805 | count = len; | |
806 | } | |
807 | ||
808 | /* short packets may only terminate transfers */ | |
809 | if (count != len) | |
810 | count -= (count % maxpacket); | |
811 | } | |
812 | qtd->hw_token = cpu_to_le32((count << 16) | token); | |
813 | qtd->length = count; | |
814 | ||
815 | return count; | |
816 | } | |
817 | ||
818 | static inline void qh_update(struct oxu_hcd *oxu, | |
819 | struct ehci_qh *qh, struct ehci_qtd *qtd) | |
820 | { | |
821 | /* writes to an active overlay are unsafe */ | |
822 | BUG_ON(qh->qh_state != QH_STATE_IDLE); | |
823 | ||
824 | qh->hw_qtd_next = QTD_NEXT(qtd->qtd_dma); | |
825 | qh->hw_alt_next = EHCI_LIST_END; | |
826 | ||
827 | /* Except for control endpoints, we make hardware maintain data | |
828 | * toggle (like OHCI) ... here (re)initialize the toggle in the QH, | |
829 | * and set the pseudo-toggle in udev. Only usb_clear_halt() will | |
830 | * ever clear it. | |
831 | */ | |
832 | if (!(qh->hw_info1 & cpu_to_le32(1 << 14))) { | |
833 | unsigned is_out, epnum; | |
834 | ||
835 | is_out = !(qtd->hw_token & cpu_to_le32(1 << 8)); | |
836 | epnum = (le32_to_cpup(&qh->hw_info1) >> 8) & 0x0f; | |
837 | if (unlikely(!usb_gettoggle(qh->dev, epnum, is_out))) { | |
551509d2 | 838 | qh->hw_token &= ~cpu_to_le32(QTD_TOGGLE); |
b92a78e5 RG |
839 | usb_settoggle(qh->dev, epnum, is_out, 1); |
840 | } | |
841 | } | |
842 | ||
843 | /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */ | |
844 | wmb(); | |
551509d2 | 845 | qh->hw_token &= cpu_to_le32(QTD_TOGGLE | QTD_STS_PING); |
b92a78e5 RG |
846 | } |
847 | ||
848 | /* If it weren't for a common silicon quirk (writing the dummy into the qh | |
849 | * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault | |
850 | * recovery (including urb dequeue) would need software changes to a QH... | |
851 | */ | |
852 | static void qh_refresh(struct oxu_hcd *oxu, struct ehci_qh *qh) | |
853 | { | |
854 | struct ehci_qtd *qtd; | |
855 | ||
856 | if (list_empty(&qh->qtd_list)) | |
857 | qtd = qh->dummy; | |
858 | else { | |
859 | qtd = list_entry(qh->qtd_list.next, | |
860 | struct ehci_qtd, qtd_list); | |
861 | /* first qtd may already be partially processed */ | |
862 | if (cpu_to_le32(qtd->qtd_dma) == qh->hw_current) | |
863 | qtd = NULL; | |
864 | } | |
865 | ||
866 | if (qtd) | |
867 | qh_update(oxu, qh, qtd); | |
868 | } | |
869 | ||
870 | static void qtd_copy_status(struct oxu_hcd *oxu, struct urb *urb, | |
871 | size_t length, u32 token) | |
872 | { | |
873 | /* count IN/OUT bytes, not SETUP (even short packets) */ | |
874 | if (likely(QTD_PID(token) != 2)) | |
875 | urb->actual_length += length - QTD_LENGTH(token); | |
876 | ||
877 | /* don't modify error codes */ | |
878 | if (unlikely(urb->status != -EINPROGRESS)) | |
879 | return; | |
880 | ||
881 | /* force cleanup after short read; not always an error */ | |
882 | if (unlikely(IS_SHORT_READ(token))) | |
883 | urb->status = -EREMOTEIO; | |
884 | ||
885 | /* serious "can't proceed" faults reported by the hardware */ | |
886 | if (token & QTD_STS_HALT) { | |
887 | if (token & QTD_STS_BABBLE) { | |
888 | /* FIXME "must" disable babbling device's port too */ | |
889 | urb->status = -EOVERFLOW; | |
890 | } else if (token & QTD_STS_MMF) { | |
891 | /* fs/ls interrupt xfer missed the complete-split */ | |
892 | urb->status = -EPROTO; | |
893 | } else if (token & QTD_STS_DBE) { | |
894 | urb->status = (QTD_PID(token) == 1) /* IN ? */ | |
895 | ? -ENOSR /* hc couldn't read data */ | |
896 | : -ECOMM; /* hc couldn't write data */ | |
897 | } else if (token & QTD_STS_XACT) { | |
898 | /* timeout, bad crc, wrong PID, etc; retried */ | |
899 | if (QTD_CERR(token)) | |
900 | urb->status = -EPIPE; | |
901 | else { | |
902 | oxu_dbg(oxu, "devpath %s ep%d%s 3strikes\n", | |
903 | urb->dev->devpath, | |
904 | usb_pipeendpoint(urb->pipe), | |
905 | usb_pipein(urb->pipe) ? "in" : "out"); | |
906 | urb->status = -EPROTO; | |
907 | } | |
908 | /* CERR nonzero + no errors + halt --> stall */ | |
909 | } else if (QTD_CERR(token)) | |
910 | urb->status = -EPIPE; | |
911 | else /* unknown */ | |
912 | urb->status = -EPROTO; | |
913 | ||
914 | oxu_vdbg(oxu, "dev%d ep%d%s qtd token %08x --> status %d\n", | |
915 | usb_pipedevice(urb->pipe), | |
916 | usb_pipeendpoint(urb->pipe), | |
917 | usb_pipein(urb->pipe) ? "in" : "out", | |
918 | token, urb->status); | |
919 | } | |
920 | } | |
921 | ||
922 | static void ehci_urb_done(struct oxu_hcd *oxu, struct urb *urb) | |
923 | __releases(oxu->lock) | |
924 | __acquires(oxu->lock) | |
925 | { | |
926 | if (likely(urb->hcpriv != NULL)) { | |
927 | struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv; | |
928 | ||
929 | /* S-mask in a QH means it's an interrupt urb */ | |
551509d2 | 930 | if ((qh->hw_info2 & cpu_to_le32(QH_SMASK)) != 0) { |
b92a78e5 RG |
931 | |
932 | /* ... update hc-wide periodic stats (for usbfs) */ | |
933 | oxu_to_hcd(oxu)->self.bandwidth_int_reqs--; | |
934 | } | |
935 | qh_put(qh); | |
936 | } | |
937 | ||
938 | urb->hcpriv = NULL; | |
939 | switch (urb->status) { | |
940 | case -EINPROGRESS: /* success */ | |
941 | urb->status = 0; | |
942 | default: /* fault */ | |
943 | break; | |
944 | case -EREMOTEIO: /* fault or normal */ | |
945 | if (!(urb->transfer_flags & URB_SHORT_NOT_OK)) | |
946 | urb->status = 0; | |
947 | break; | |
948 | case -ECONNRESET: /* canceled */ | |
949 | case -ENOENT: | |
950 | break; | |
951 | } | |
952 | ||
953 | #ifdef OXU_URB_TRACE | |
954 | oxu_dbg(oxu, "%s %s urb %p ep%d%s status %d len %d/%d\n", | |
955 | __func__, urb->dev->devpath, urb, | |
956 | usb_pipeendpoint(urb->pipe), | |
957 | usb_pipein(urb->pipe) ? "in" : "out", | |
958 | urb->status, | |
959 | urb->actual_length, urb->transfer_buffer_length); | |
960 | #endif | |
961 | ||
962 | /* complete() can reenter this HCD */ | |
963 | spin_unlock(&oxu->lock); | |
964 | usb_hcd_giveback_urb(oxu_to_hcd(oxu), urb, urb->status); | |
965 | spin_lock(&oxu->lock); | |
966 | } | |
967 | ||
968 | static void start_unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh); | |
969 | static void unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh); | |
970 | ||
971 | static void intr_deschedule(struct oxu_hcd *oxu, struct ehci_qh *qh); | |
972 | static int qh_schedule(struct oxu_hcd *oxu, struct ehci_qh *qh); | |
973 | ||
551509d2 | 974 | #define HALT_BIT cpu_to_le32(QTD_STS_HALT) |
b92a78e5 RG |
975 | |
976 | /* Process and free completed qtds for a qh, returning URBs to drivers. | |
977 | * Chases up to qh->hw_current. Returns number of completions called, | |
978 | * indicating how much "real" work we did. | |
979 | */ | |
980 | static unsigned qh_completions(struct oxu_hcd *oxu, struct ehci_qh *qh) | |
981 | { | |
982 | struct ehci_qtd *last = NULL, *end = qh->dummy; | |
983 | struct list_head *entry, *tmp; | |
984 | int stopped; | |
985 | unsigned count = 0; | |
986 | int do_status = 0; | |
987 | u8 state; | |
988 | struct oxu_murb *murb = NULL; | |
989 | ||
990 | if (unlikely(list_empty(&qh->qtd_list))) | |
991 | return count; | |
992 | ||
993 | /* completions (or tasks on other cpus) must never clobber HALT | |
994 | * till we've gone through and cleaned everything up, even when | |
995 | * they add urbs to this qh's queue or mark them for unlinking. | |
996 | * | |
997 | * NOTE: unlinking expects to be done in queue order. | |
998 | */ | |
999 | state = qh->qh_state; | |
1000 | qh->qh_state = QH_STATE_COMPLETING; | |
1001 | stopped = (state == QH_STATE_IDLE); | |
1002 | ||
1003 | /* remove de-activated QTDs from front of queue. | |
1004 | * after faults (including short reads), cleanup this urb | |
1005 | * then let the queue advance. | |
1006 | * if queue is stopped, handles unlinks. | |
1007 | */ | |
1008 | list_for_each_safe(entry, tmp, &qh->qtd_list) { | |
1009 | struct ehci_qtd *qtd; | |
1010 | struct urb *urb; | |
1011 | u32 token = 0; | |
1012 | ||
1013 | qtd = list_entry(entry, struct ehci_qtd, qtd_list); | |
1014 | urb = qtd->urb; | |
1015 | ||
1016 | /* Clean up any state from previous QTD ...*/ | |
1017 | if (last) { | |
1018 | if (likely(last->urb != urb)) { | |
1019 | if (last->urb->complete == NULL) { | |
1020 | murb = (struct oxu_murb *) last->urb; | |
1021 | last->urb = murb->main; | |
1022 | if (murb->last) { | |
1023 | ehci_urb_done(oxu, last->urb); | |
1024 | count++; | |
1025 | } | |
1026 | oxu_murb_free(oxu, murb); | |
1027 | } else { | |
1028 | ehci_urb_done(oxu, last->urb); | |
1029 | count++; | |
1030 | } | |
1031 | } | |
1032 | oxu_qtd_free(oxu, last); | |
1033 | last = NULL; | |
1034 | } | |
1035 | ||
1036 | /* ignore urbs submitted during completions we reported */ | |
1037 | if (qtd == end) | |
1038 | break; | |
1039 | ||
1040 | /* hardware copies qtd out of qh overlay */ | |
1041 | rmb(); | |
1042 | token = le32_to_cpu(qtd->hw_token); | |
1043 | ||
1044 | /* always clean up qtds the hc de-activated */ | |
1045 | if ((token & QTD_STS_ACTIVE) == 0) { | |
1046 | ||
1047 | if ((token & QTD_STS_HALT) != 0) { | |
1048 | stopped = 1; | |
1049 | ||
1050 | /* magic dummy for some short reads; qh won't advance. | |
1051 | * that silicon quirk can kick in with this dummy too. | |
1052 | */ | |
1053 | } else if (IS_SHORT_READ(token) && | |
1054 | !(qtd->hw_alt_next & EHCI_LIST_END)) { | |
1055 | stopped = 1; | |
1056 | goto halt; | |
1057 | } | |
1058 | ||
1059 | /* stop scanning when we reach qtds the hc is using */ | |
1060 | } else if (likely(!stopped && | |
1061 | HC_IS_RUNNING(oxu_to_hcd(oxu)->state))) { | |
1062 | break; | |
1063 | ||
1064 | } else { | |
1065 | stopped = 1; | |
1066 | ||
1067 | if (unlikely(!HC_IS_RUNNING(oxu_to_hcd(oxu)->state))) | |
1068 | urb->status = -ESHUTDOWN; | |
1069 | ||
1070 | /* ignore active urbs unless some previous qtd | |
1071 | * for the urb faulted (including short read) or | |
1072 | * its urb was canceled. we may patch qh or qtds. | |
1073 | */ | |
1074 | if (likely(urb->status == -EINPROGRESS)) | |
1075 | continue; | |
1076 | ||
1077 | /* issue status after short control reads */ | |
1078 | if (unlikely(do_status != 0) | |
1079 | && QTD_PID(token) == 0 /* OUT */) { | |
1080 | do_status = 0; | |
1081 | continue; | |
1082 | } | |
1083 | ||
1084 | /* token in overlay may be most current */ | |
1085 | if (state == QH_STATE_IDLE | |
1086 | && cpu_to_le32(qtd->qtd_dma) | |
1087 | == qh->hw_current) | |
1088 | token = le32_to_cpu(qh->hw_token); | |
1089 | ||
1090 | /* force halt for unlinked or blocked qh, so we'll | |
1091 | * patch the qh later and so that completions can't | |
1092 | * activate it while we "know" it's stopped. | |
1093 | */ | |
1094 | if ((HALT_BIT & qh->hw_token) == 0) { | |
1095 | halt: | |
1096 | qh->hw_token |= HALT_BIT; | |
1097 | wmb(); | |
1098 | } | |
1099 | } | |
1100 | ||
1101 | /* Remove it from the queue */ | |
1102 | qtd_copy_status(oxu, urb->complete ? | |
1103 | urb : ((struct oxu_murb *) urb)->main, | |
1104 | qtd->length, token); | |
1105 | if ((usb_pipein(qtd->urb->pipe)) && | |
1106 | (NULL != qtd->transfer_buffer)) | |
1107 | memcpy(qtd->transfer_buffer, qtd->buffer, qtd->length); | |
1108 | do_status = (urb->status == -EREMOTEIO) | |
1109 | && usb_pipecontrol(urb->pipe); | |
1110 | ||
1111 | if (stopped && qtd->qtd_list.prev != &qh->qtd_list) { | |
1112 | last = list_entry(qtd->qtd_list.prev, | |
1113 | struct ehci_qtd, qtd_list); | |
1114 | last->hw_next = qtd->hw_next; | |
1115 | } | |
1116 | list_del(&qtd->qtd_list); | |
1117 | last = qtd; | |
1118 | } | |
1119 | ||
1120 | /* last urb's completion might still need calling */ | |
1121 | if (likely(last != NULL)) { | |
1122 | if (last->urb->complete == NULL) { | |
1123 | murb = (struct oxu_murb *) last->urb; | |
1124 | last->urb = murb->main; | |
1125 | if (murb->last) { | |
1126 | ehci_urb_done(oxu, last->urb); | |
1127 | count++; | |
1128 | } | |
1129 | oxu_murb_free(oxu, murb); | |
1130 | } else { | |
1131 | ehci_urb_done(oxu, last->urb); | |
1132 | count++; | |
1133 | } | |
1134 | oxu_qtd_free(oxu, last); | |
1135 | } | |
1136 | ||
1137 | /* restore original state; caller must unlink or relink */ | |
1138 | qh->qh_state = state; | |
1139 | ||
1140 | /* be sure the hardware's done with the qh before refreshing | |
1141 | * it after fault cleanup, or recovering from silicon wrongly | |
1142 | * overlaying the dummy qtd (which reduces DMA chatter). | |
1143 | */ | |
1144 | if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END) { | |
1145 | switch (state) { | |
1146 | case QH_STATE_IDLE: | |
1147 | qh_refresh(oxu, qh); | |
1148 | break; | |
1149 | case QH_STATE_LINKED: | |
1150 | /* should be rare for periodic transfers, | |
1151 | * except maybe high bandwidth ... | |
1152 | */ | |
551509d2 | 1153 | if ((cpu_to_le32(QH_SMASK) |
b92a78e5 RG |
1154 | & qh->hw_info2) != 0) { |
1155 | intr_deschedule(oxu, qh); | |
1156 | (void) qh_schedule(oxu, qh); | |
1157 | } else | |
1158 | unlink_async(oxu, qh); | |
1159 | break; | |
1160 | /* otherwise, unlink already started */ | |
1161 | } | |
1162 | } | |
1163 | ||
1164 | return count; | |
1165 | } | |
1166 | ||
1167 | /* High bandwidth multiplier, as encoded in highspeed endpoint descriptors */ | |
1168 | #define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03)) | |
1169 | /* ... and packet size, for any kind of endpoint descriptor */ | |
1170 | #define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff) | |
1171 | ||
1172 | /* Reverse of qh_urb_transaction: free a list of TDs. | |
1173 | * used for cleanup after errors, before HC sees an URB's TDs. | |
1174 | */ | |
1175 | static void qtd_list_free(struct oxu_hcd *oxu, | |
1176 | struct urb *urb, struct list_head *qtd_list) | |
1177 | { | |
1178 | struct list_head *entry, *temp; | |
1179 | ||
1180 | list_for_each_safe(entry, temp, qtd_list) { | |
1181 | struct ehci_qtd *qtd; | |
1182 | ||
1183 | qtd = list_entry(entry, struct ehci_qtd, qtd_list); | |
1184 | list_del(&qtd->qtd_list); | |
1185 | oxu_qtd_free(oxu, qtd); | |
1186 | } | |
1187 | } | |
1188 | ||
1189 | /* Create a list of filled qtds for this URB; won't link into qh. | |
1190 | */ | |
1191 | static struct list_head *qh_urb_transaction(struct oxu_hcd *oxu, | |
1192 | struct urb *urb, | |
1193 | struct list_head *head, | |
1194 | gfp_t flags) | |
1195 | { | |
1196 | struct ehci_qtd *qtd, *qtd_prev; | |
1197 | dma_addr_t buf; | |
1198 | int len, maxpacket; | |
1199 | int is_input; | |
1200 | u32 token; | |
1201 | void *transfer_buf = NULL; | |
1202 | int ret; | |
1203 | ||
1204 | /* | |
1205 | * URBs map to sequences of QTDs: one logical transaction | |
1206 | */ | |
1207 | qtd = ehci_qtd_alloc(oxu); | |
1208 | if (unlikely(!qtd)) | |
1209 | return NULL; | |
1210 | list_add_tail(&qtd->qtd_list, head); | |
1211 | qtd->urb = urb; | |
1212 | ||
1213 | token = QTD_STS_ACTIVE; | |
1214 | token |= (EHCI_TUNE_CERR << 10); | |
1215 | /* for split transactions, SplitXState initialized to zero */ | |
1216 | ||
1217 | len = urb->transfer_buffer_length; | |
1218 | is_input = usb_pipein(urb->pipe); | |
1219 | if (!urb->transfer_buffer && urb->transfer_buffer_length && is_input) | |
1220 | urb->transfer_buffer = phys_to_virt(urb->transfer_dma); | |
1221 | ||
1222 | if (usb_pipecontrol(urb->pipe)) { | |
1223 | /* SETUP pid */ | |
1224 | ret = oxu_buf_alloc(oxu, qtd, sizeof(struct usb_ctrlrequest)); | |
1225 | if (ret) | |
1226 | goto cleanup; | |
1227 | ||
1228 | qtd_fill(qtd, qtd->buffer_dma, sizeof(struct usb_ctrlrequest), | |
1229 | token | (2 /* "setup" */ << 8), 8); | |
1230 | memcpy(qtd->buffer, qtd->urb->setup_packet, | |
1231 | sizeof(struct usb_ctrlrequest)); | |
1232 | ||
1233 | /* ... and always at least one more pid */ | |
1234 | token ^= QTD_TOGGLE; | |
1235 | qtd_prev = qtd; | |
1236 | qtd = ehci_qtd_alloc(oxu); | |
1237 | if (unlikely(!qtd)) | |
1238 | goto cleanup; | |
1239 | qtd->urb = urb; | |
1240 | qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma); | |
1241 | list_add_tail(&qtd->qtd_list, head); | |
1242 | ||
1243 | /* for zero length DATA stages, STATUS is always IN */ | |
1244 | if (len == 0) | |
1245 | token |= (1 /* "in" */ << 8); | |
1246 | } | |
1247 | ||
1248 | /* | |
1249 | * Data transfer stage: buffer setup | |
1250 | */ | |
1251 | ||
1252 | ret = oxu_buf_alloc(oxu, qtd, len); | |
1253 | if (ret) | |
1254 | goto cleanup; | |
1255 | ||
1256 | buf = qtd->buffer_dma; | |
1257 | transfer_buf = urb->transfer_buffer; | |
1258 | ||
1259 | if (!is_input) | |
1260 | memcpy(qtd->buffer, qtd->urb->transfer_buffer, len); | |
1261 | ||
1262 | if (is_input) | |
1263 | token |= (1 /* "in" */ << 8); | |
1264 | /* else it's already initted to "out" pid (0 << 8) */ | |
1265 | ||
1266 | maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input)); | |
1267 | ||
1268 | /* | |
1269 | * buffer gets wrapped in one or more qtds; | |
1270 | * last one may be "short" (including zero len) | |
1271 | * and may serve as a control status ack | |
1272 | */ | |
1273 | for (;;) { | |
1274 | int this_qtd_len; | |
1275 | ||
1276 | this_qtd_len = qtd_fill(qtd, buf, len, token, maxpacket); | |
1277 | qtd->transfer_buffer = transfer_buf; | |
1278 | len -= this_qtd_len; | |
1279 | buf += this_qtd_len; | |
1280 | transfer_buf += this_qtd_len; | |
1281 | if (is_input) | |
1282 | qtd->hw_alt_next = oxu->async->hw_alt_next; | |
1283 | ||
1284 | /* qh makes control packets use qtd toggle; maybe switch it */ | |
1285 | if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0) | |
1286 | token ^= QTD_TOGGLE; | |
1287 | ||
1288 | if (likely(len <= 0)) | |
1289 | break; | |
1290 | ||
1291 | qtd_prev = qtd; | |
1292 | qtd = ehci_qtd_alloc(oxu); | |
1293 | if (unlikely(!qtd)) | |
1294 | goto cleanup; | |
1295 | if (likely(len > 0)) { | |
1296 | ret = oxu_buf_alloc(oxu, qtd, len); | |
1297 | if (ret) | |
1298 | goto cleanup; | |
1299 | } | |
1300 | qtd->urb = urb; | |
1301 | qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma); | |
1302 | list_add_tail(&qtd->qtd_list, head); | |
1303 | } | |
1304 | ||
1305 | /* unless the bulk/interrupt caller wants a chance to clean | |
1306 | * up after short reads, hc should advance qh past this urb | |
1307 | */ | |
1308 | if (likely((urb->transfer_flags & URB_SHORT_NOT_OK) == 0 | |
1309 | || usb_pipecontrol(urb->pipe))) | |
1310 | qtd->hw_alt_next = EHCI_LIST_END; | |
1311 | ||
1312 | /* | |
1313 | * control requests may need a terminating data "status" ack; | |
1314 | * bulk ones may need a terminating short packet (zero length). | |
1315 | */ | |
1316 | if (likely(urb->transfer_buffer_length != 0)) { | |
1317 | int one_more = 0; | |
1318 | ||
1319 | if (usb_pipecontrol(urb->pipe)) { | |
1320 | one_more = 1; | |
1321 | token ^= 0x0100; /* "in" <--> "out" */ | |
1322 | token |= QTD_TOGGLE; /* force DATA1 */ | |
1323 | } else if (usb_pipebulk(urb->pipe) | |
1324 | && (urb->transfer_flags & URB_ZERO_PACKET) | |
1325 | && !(urb->transfer_buffer_length % maxpacket)) { | |
1326 | one_more = 1; | |
1327 | } | |
1328 | if (one_more) { | |
1329 | qtd_prev = qtd; | |
1330 | qtd = ehci_qtd_alloc(oxu); | |
1331 | if (unlikely(!qtd)) | |
1332 | goto cleanup; | |
1333 | qtd->urb = urb; | |
1334 | qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma); | |
1335 | list_add_tail(&qtd->qtd_list, head); | |
1336 | ||
1337 | /* never any data in such packets */ | |
1338 | qtd_fill(qtd, 0, 0, token, 0); | |
1339 | } | |
1340 | } | |
1341 | ||
1342 | /* by default, enable interrupt on urb completion */ | |
551509d2 | 1343 | qtd->hw_token |= cpu_to_le32(QTD_IOC); |
b92a78e5 RG |
1344 | return head; |
1345 | ||
1346 | cleanup: | |
1347 | qtd_list_free(oxu, urb, head); | |
1348 | return NULL; | |
1349 | } | |
1350 | ||
1351 | /* Each QH holds a qtd list; a QH is used for everything except iso. | |
1352 | * | |
1353 | * For interrupt urbs, the scheduler must set the microframe scheduling | |
1354 | * mask(s) each time the QH gets scheduled. For highspeed, that's | |
1355 | * just one microframe in the s-mask. For split interrupt transactions | |
1356 | * there are additional complications: c-mask, maybe FSTNs. | |
1357 | */ | |
1358 | static struct ehci_qh *qh_make(struct oxu_hcd *oxu, | |
1359 | struct urb *urb, gfp_t flags) | |
1360 | { | |
1361 | struct ehci_qh *qh = oxu_qh_alloc(oxu); | |
1362 | u32 info1 = 0, info2 = 0; | |
1363 | int is_input, type; | |
1364 | int maxp = 0; | |
1365 | ||
1366 | if (!qh) | |
1367 | return qh; | |
1368 | ||
1369 | /* | |
1370 | * init endpoint/device data for this QH | |
1371 | */ | |
1372 | info1 |= usb_pipeendpoint(urb->pipe) << 8; | |
1373 | info1 |= usb_pipedevice(urb->pipe) << 0; | |
1374 | ||
1375 | is_input = usb_pipein(urb->pipe); | |
1376 | type = usb_pipetype(urb->pipe); | |
1377 | maxp = usb_maxpacket(urb->dev, urb->pipe, !is_input); | |
1378 | ||
1379 | /* Compute interrupt scheduling parameters just once, and save. | |
1380 | * - allowing for high bandwidth, how many nsec/uframe are used? | |
1381 | * - split transactions need a second CSPLIT uframe; same question | |
1382 | * - splits also need a schedule gap (for full/low speed I/O) | |
1383 | * - qh has a polling interval | |
1384 | * | |
1385 | * For control/bulk requests, the HC or TT handles these. | |
1386 | */ | |
1387 | if (type == PIPE_INTERRUPT) { | |
1388 | qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH, | |
1389 | is_input, 0, | |
1390 | hb_mult(maxp) * max_packet(maxp))); | |
1391 | qh->start = NO_FRAME; | |
1392 | ||
1393 | if (urb->dev->speed == USB_SPEED_HIGH) { | |
1394 | qh->c_usecs = 0; | |
1395 | qh->gap_uf = 0; | |
1396 | ||
1397 | qh->period = urb->interval >> 3; | |
1398 | if (qh->period == 0 && urb->interval != 1) { | |
1399 | /* NOTE interval 2 or 4 uframes could work. | |
1400 | * But interval 1 scheduling is simpler, and | |
1401 | * includes high bandwidth. | |
1402 | */ | |
1403 | dbg("intr period %d uframes, NYET!", | |
1404 | urb->interval); | |
1405 | goto done; | |
1406 | } | |
1407 | } else { | |
1408 | struct usb_tt *tt = urb->dev->tt; | |
1409 | int think_time; | |
1410 | ||
1411 | /* gap is f(FS/LS transfer times) */ | |
1412 | qh->gap_uf = 1 + usb_calc_bus_time(urb->dev->speed, | |
1413 | is_input, 0, maxp) / (125 * 1000); | |
1414 | ||
1415 | /* FIXME this just approximates SPLIT/CSPLIT times */ | |
1416 | if (is_input) { /* SPLIT, gap, CSPLIT+DATA */ | |
1417 | qh->c_usecs = qh->usecs + HS_USECS(0); | |
1418 | qh->usecs = HS_USECS(1); | |
1419 | } else { /* SPLIT+DATA, gap, CSPLIT */ | |
1420 | qh->usecs += HS_USECS(1); | |
1421 | qh->c_usecs = HS_USECS(0); | |
1422 | } | |
1423 | ||
1424 | think_time = tt ? tt->think_time : 0; | |
1425 | qh->tt_usecs = NS_TO_US(think_time + | |
1426 | usb_calc_bus_time(urb->dev->speed, | |
1427 | is_input, 0, max_packet(maxp))); | |
1428 | qh->period = urb->interval; | |
1429 | } | |
1430 | } | |
1431 | ||
1432 | /* support for tt scheduling, and access to toggles */ | |
1433 | qh->dev = urb->dev; | |
1434 | ||
1435 | /* using TT? */ | |
1436 | switch (urb->dev->speed) { | |
1437 | case USB_SPEED_LOW: | |
1438 | info1 |= (1 << 12); /* EPS "low" */ | |
1439 | /* FALL THROUGH */ | |
1440 | ||
1441 | case USB_SPEED_FULL: | |
1442 | /* EPS 0 means "full" */ | |
1443 | if (type != PIPE_INTERRUPT) | |
1444 | info1 |= (EHCI_TUNE_RL_TT << 28); | |
1445 | if (type == PIPE_CONTROL) { | |
1446 | info1 |= (1 << 27); /* for TT */ | |
1447 | info1 |= 1 << 14; /* toggle from qtd */ | |
1448 | } | |
1449 | info1 |= maxp << 16; | |
1450 | ||
1451 | info2 |= (EHCI_TUNE_MULT_TT << 30); | |
1452 | info2 |= urb->dev->ttport << 23; | |
1453 | ||
1454 | /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */ | |
1455 | ||
1456 | break; | |
1457 | ||
1458 | case USB_SPEED_HIGH: /* no TT involved */ | |
1459 | info1 |= (2 << 12); /* EPS "high" */ | |
1460 | if (type == PIPE_CONTROL) { | |
1461 | info1 |= (EHCI_TUNE_RL_HS << 28); | |
1462 | info1 |= 64 << 16; /* usb2 fixed maxpacket */ | |
1463 | info1 |= 1 << 14; /* toggle from qtd */ | |
1464 | info2 |= (EHCI_TUNE_MULT_HS << 30); | |
1465 | } else if (type == PIPE_BULK) { | |
1466 | info1 |= (EHCI_TUNE_RL_HS << 28); | |
1467 | info1 |= 512 << 16; /* usb2 fixed maxpacket */ | |
1468 | info2 |= (EHCI_TUNE_MULT_HS << 30); | |
1469 | } else { /* PIPE_INTERRUPT */ | |
1470 | info1 |= max_packet(maxp) << 16; | |
1471 | info2 |= hb_mult(maxp) << 30; | |
1472 | } | |
1473 | break; | |
1474 | default: | |
1475 | dbg("bogus dev %p speed %d", urb->dev, urb->dev->speed); | |
1476 | done: | |
1477 | qh_put(qh); | |
1478 | return NULL; | |
1479 | } | |
1480 | ||
1481 | /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */ | |
1482 | ||
1483 | /* init as live, toggle clear, advance to dummy */ | |
1484 | qh->qh_state = QH_STATE_IDLE; | |
1485 | qh->hw_info1 = cpu_to_le32(info1); | |
1486 | qh->hw_info2 = cpu_to_le32(info2); | |
1487 | usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), !is_input, 1); | |
1488 | qh_refresh(oxu, qh); | |
1489 | return qh; | |
1490 | } | |
1491 | ||
1492 | /* Move qh (and its qtds) onto async queue; maybe enable queue. | |
1493 | */ | |
1494 | static void qh_link_async(struct oxu_hcd *oxu, struct ehci_qh *qh) | |
1495 | { | |
1496 | __le32 dma = QH_NEXT(qh->qh_dma); | |
1497 | struct ehci_qh *head; | |
1498 | ||
1499 | /* (re)start the async schedule? */ | |
1500 | head = oxu->async; | |
1501 | timer_action_done(oxu, TIMER_ASYNC_OFF); | |
1502 | if (!head->qh_next.qh) { | |
1503 | u32 cmd = readl(&oxu->regs->command); | |
1504 | ||
1505 | if (!(cmd & CMD_ASE)) { | |
1506 | /* in case a clear of CMD_ASE didn't take yet */ | |
1507 | (void)handshake(oxu, &oxu->regs->status, | |
1508 | STS_ASS, 0, 150); | |
1509 | cmd |= CMD_ASE | CMD_RUN; | |
1510 | writel(cmd, &oxu->regs->command); | |
1511 | oxu_to_hcd(oxu)->state = HC_STATE_RUNNING; | |
1512 | /* posted write need not be known to HC yet ... */ | |
1513 | } | |
1514 | } | |
1515 | ||
1516 | /* clear halt and/or toggle; and maybe recover from silicon quirk */ | |
1517 | if (qh->qh_state == QH_STATE_IDLE) | |
1518 | qh_refresh(oxu, qh); | |
1519 | ||
1520 | /* splice right after start */ | |
1521 | qh->qh_next = head->qh_next; | |
1522 | qh->hw_next = head->hw_next; | |
1523 | wmb(); | |
1524 | ||
1525 | head->qh_next.qh = qh; | |
1526 | head->hw_next = dma; | |
1527 | ||
1528 | qh->qh_state = QH_STATE_LINKED; | |
1529 | /* qtd completions reported later by interrupt */ | |
1530 | } | |
1531 | ||
551509d2 | 1532 | #define QH_ADDR_MASK cpu_to_le32(0x7f) |
b92a78e5 RG |
1533 | |
1534 | /* | |
1535 | * For control/bulk/interrupt, return QH with these TDs appended. | |
1536 | * Allocates and initializes the QH if necessary. | |
1537 | * Returns null if it can't allocate a QH it needs to. | |
1538 | * If the QH has TDs (urbs) already, that's great. | |
1539 | */ | |
1540 | static struct ehci_qh *qh_append_tds(struct oxu_hcd *oxu, | |
1541 | struct urb *urb, struct list_head *qtd_list, | |
1542 | int epnum, void **ptr) | |
1543 | { | |
1544 | struct ehci_qh *qh = NULL; | |
1545 | ||
1546 | qh = (struct ehci_qh *) *ptr; | |
1547 | if (unlikely(qh == NULL)) { | |
1548 | /* can't sleep here, we have oxu->lock... */ | |
1549 | qh = qh_make(oxu, urb, GFP_ATOMIC); | |
1550 | *ptr = qh; | |
1551 | } | |
1552 | if (likely(qh != NULL)) { | |
1553 | struct ehci_qtd *qtd; | |
1554 | ||
1555 | if (unlikely(list_empty(qtd_list))) | |
1556 | qtd = NULL; | |
1557 | else | |
1558 | qtd = list_entry(qtd_list->next, struct ehci_qtd, | |
1559 | qtd_list); | |
1560 | ||
1561 | /* control qh may need patching ... */ | |
1562 | if (unlikely(epnum == 0)) { | |
1563 | ||
1564 | /* usb_reset_device() briefly reverts to address 0 */ | |
1565 | if (usb_pipedevice(urb->pipe) == 0) | |
1566 | qh->hw_info1 &= ~QH_ADDR_MASK; | |
1567 | } | |
1568 | ||
1569 | /* just one way to queue requests: swap with the dummy qtd. | |
1570 | * only hc or qh_refresh() ever modify the overlay. | |
1571 | */ | |
1572 | if (likely(qtd != NULL)) { | |
1573 | struct ehci_qtd *dummy; | |
1574 | dma_addr_t dma; | |
1575 | __le32 token; | |
1576 | ||
1577 | /* to avoid racing the HC, use the dummy td instead of | |
1578 | * the first td of our list (becomes new dummy). both | |
1579 | * tds stay deactivated until we're done, when the | |
1580 | * HC is allowed to fetch the old dummy (4.10.2). | |
1581 | */ | |
1582 | token = qtd->hw_token; | |
1583 | qtd->hw_token = HALT_BIT; | |
1584 | wmb(); | |
1585 | dummy = qh->dummy; | |
1586 | ||
1587 | dma = dummy->qtd_dma; | |
1588 | *dummy = *qtd; | |
1589 | dummy->qtd_dma = dma; | |
1590 | ||
1591 | list_del(&qtd->qtd_list); | |
1592 | list_add(&dummy->qtd_list, qtd_list); | |
1593 | list_splice(qtd_list, qh->qtd_list.prev); | |
1594 | ||
1595 | ehci_qtd_init(qtd, qtd->qtd_dma); | |
1596 | qh->dummy = qtd; | |
1597 | ||
1598 | /* hc must see the new dummy at list end */ | |
1599 | dma = qtd->qtd_dma; | |
1600 | qtd = list_entry(qh->qtd_list.prev, | |
1601 | struct ehci_qtd, qtd_list); | |
1602 | qtd->hw_next = QTD_NEXT(dma); | |
1603 | ||
1604 | /* let the hc process these next qtds */ | |
1605 | dummy->hw_token = (token & ~(0x80)); | |
1606 | wmb(); | |
1607 | dummy->hw_token = token; | |
1608 | ||
1609 | urb->hcpriv = qh_get(qh); | |
1610 | } | |
1611 | } | |
1612 | return qh; | |
1613 | } | |
1614 | ||
1615 | static int submit_async(struct oxu_hcd *oxu, struct urb *urb, | |
1616 | struct list_head *qtd_list, gfp_t mem_flags) | |
1617 | { | |
1618 | struct ehci_qtd *qtd; | |
1619 | int epnum; | |
1620 | unsigned long flags; | |
1621 | struct ehci_qh *qh = NULL; | |
1622 | int rc = 0; | |
1623 | ||
1624 | qtd = list_entry(qtd_list->next, struct ehci_qtd, qtd_list); | |
1625 | epnum = urb->ep->desc.bEndpointAddress; | |
1626 | ||
1627 | #ifdef OXU_URB_TRACE | |
1628 | oxu_dbg(oxu, "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n", | |
1629 | __func__, urb->dev->devpath, urb, | |
1630 | epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out", | |
1631 | urb->transfer_buffer_length, | |
1632 | qtd, urb->ep->hcpriv); | |
1633 | #endif | |
1634 | ||
1635 | spin_lock_irqsave(&oxu->lock, flags); | |
541c7d43 | 1636 | if (unlikely(!HCD_HW_ACCESSIBLE(oxu_to_hcd(oxu)))) { |
b92a78e5 RG |
1637 | rc = -ESHUTDOWN; |
1638 | goto done; | |
1639 | } | |
1640 | ||
1641 | qh = qh_append_tds(oxu, urb, qtd_list, epnum, &urb->ep->hcpriv); | |
1642 | if (unlikely(qh == NULL)) { | |
1643 | rc = -ENOMEM; | |
1644 | goto done; | |
1645 | } | |
1646 | ||
1647 | /* Control/bulk operations through TTs don't need scheduling, | |
1648 | * the HC and TT handle it when the TT has a buffer ready. | |
1649 | */ | |
1650 | if (likely(qh->qh_state == QH_STATE_IDLE)) | |
1651 | qh_link_async(oxu, qh_get(qh)); | |
1652 | done: | |
1653 | spin_unlock_irqrestore(&oxu->lock, flags); | |
1654 | if (unlikely(qh == NULL)) | |
1655 | qtd_list_free(oxu, urb, qtd_list); | |
1656 | return rc; | |
1657 | } | |
1658 | ||
1659 | /* The async qh for the qtds being reclaimed are now unlinked from the HC */ | |
1660 | ||
1661 | static void end_unlink_async(struct oxu_hcd *oxu) | |
1662 | { | |
1663 | struct ehci_qh *qh = oxu->reclaim; | |
1664 | struct ehci_qh *next; | |
1665 | ||
1666 | timer_action_done(oxu, TIMER_IAA_WATCHDOG); | |
1667 | ||
1668 | qh->qh_state = QH_STATE_IDLE; | |
1669 | qh->qh_next.qh = NULL; | |
1670 | qh_put(qh); /* refcount from reclaim */ | |
1671 | ||
1672 | /* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */ | |
1673 | next = qh->reclaim; | |
1674 | oxu->reclaim = next; | |
1675 | oxu->reclaim_ready = 0; | |
1676 | qh->reclaim = NULL; | |
1677 | ||
1678 | qh_completions(oxu, qh); | |
1679 | ||
1680 | if (!list_empty(&qh->qtd_list) | |
1681 | && HC_IS_RUNNING(oxu_to_hcd(oxu)->state)) | |
1682 | qh_link_async(oxu, qh); | |
1683 | else { | |
1684 | qh_put(qh); /* refcount from async list */ | |
1685 | ||
1686 | /* it's not free to turn the async schedule on/off; leave it | |
1687 | * active but idle for a while once it empties. | |
1688 | */ | |
1689 | if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state) | |
1690 | && oxu->async->qh_next.qh == NULL) | |
1691 | timer_action(oxu, TIMER_ASYNC_OFF); | |
1692 | } | |
1693 | ||
1694 | if (next) { | |
1695 | oxu->reclaim = NULL; | |
1696 | start_unlink_async(oxu, next); | |
1697 | } | |
1698 | } | |
1699 | ||
1700 | /* makes sure the async qh will become idle */ | |
1701 | /* caller must own oxu->lock */ | |
1702 | ||
1703 | static void start_unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh) | |
1704 | { | |
1705 | int cmd = readl(&oxu->regs->command); | |
1706 | struct ehci_qh *prev; | |
1707 | ||
1708 | #ifdef DEBUG | |
1709 | assert_spin_locked(&oxu->lock); | |
1710 | if (oxu->reclaim || (qh->qh_state != QH_STATE_LINKED | |
1711 | && qh->qh_state != QH_STATE_UNLINK_WAIT)) | |
1712 | BUG(); | |
1713 | #endif | |
1714 | ||
1715 | /* stop async schedule right now? */ | |
1716 | if (unlikely(qh == oxu->async)) { | |
1717 | /* can't get here without STS_ASS set */ | |
1718 | if (oxu_to_hcd(oxu)->state != HC_STATE_HALT | |
1719 | && !oxu->reclaim) { | |
1720 | /* ... and CMD_IAAD clear */ | |
1721 | writel(cmd & ~CMD_ASE, &oxu->regs->command); | |
1722 | wmb(); | |
1723 | /* handshake later, if we need to */ | |
1724 | timer_action_done(oxu, TIMER_ASYNC_OFF); | |
1725 | } | |
1726 | return; | |
1727 | } | |
1728 | ||
1729 | qh->qh_state = QH_STATE_UNLINK; | |
1730 | oxu->reclaim = qh = qh_get(qh); | |
1731 | ||
1732 | prev = oxu->async; | |
1733 | while (prev->qh_next.qh != qh) | |
1734 | prev = prev->qh_next.qh; | |
1735 | ||
1736 | prev->hw_next = qh->hw_next; | |
1737 | prev->qh_next = qh->qh_next; | |
1738 | wmb(); | |
1739 | ||
1740 | if (unlikely(oxu_to_hcd(oxu)->state == HC_STATE_HALT)) { | |
1741 | /* if (unlikely(qh->reclaim != 0)) | |
1742 | * this will recurse, probably not much | |
1743 | */ | |
1744 | end_unlink_async(oxu); | |
1745 | return; | |
1746 | } | |
1747 | ||
1748 | oxu->reclaim_ready = 0; | |
1749 | cmd |= CMD_IAAD; | |
1750 | writel(cmd, &oxu->regs->command); | |
1751 | (void) readl(&oxu->regs->command); | |
1752 | timer_action(oxu, TIMER_IAA_WATCHDOG); | |
1753 | } | |
1754 | ||
1755 | static void scan_async(struct oxu_hcd *oxu) | |
1756 | { | |
1757 | struct ehci_qh *qh; | |
1758 | enum ehci_timer_action action = TIMER_IO_WATCHDOG; | |
1759 | ||
1760 | if (!++(oxu->stamp)) | |
1761 | oxu->stamp++; | |
1762 | timer_action_done(oxu, TIMER_ASYNC_SHRINK); | |
1763 | rescan: | |
1764 | qh = oxu->async->qh_next.qh; | |
1765 | if (likely(qh != NULL)) { | |
1766 | do { | |
1767 | /* clean any finished work for this qh */ | |
1768 | if (!list_empty(&qh->qtd_list) | |
1769 | && qh->stamp != oxu->stamp) { | |
1770 | int temp; | |
1771 | ||
1772 | /* unlinks could happen here; completion | |
1773 | * reporting drops the lock. rescan using | |
1774 | * the latest schedule, but don't rescan | |
1775 | * qhs we already finished (no looping). | |
1776 | */ | |
1777 | qh = qh_get(qh); | |
1778 | qh->stamp = oxu->stamp; | |
1779 | temp = qh_completions(oxu, qh); | |
1780 | qh_put(qh); | |
1781 | if (temp != 0) | |
1782 | goto rescan; | |
1783 | } | |
1784 | ||
1785 | /* unlink idle entries, reducing HC PCI usage as well | |
1786 | * as HCD schedule-scanning costs. delay for any qh | |
1787 | * we just scanned, there's a not-unusual case that it | |
1788 | * doesn't stay idle for long. | |
1789 | * (plus, avoids some kind of re-activation race.) | |
1790 | */ | |
1791 | if (list_empty(&qh->qtd_list)) { | |
1792 | if (qh->stamp == oxu->stamp) | |
1793 | action = TIMER_ASYNC_SHRINK; | |
1794 | else if (!oxu->reclaim | |
1795 | && qh->qh_state == QH_STATE_LINKED) | |
1796 | start_unlink_async(oxu, qh); | |
1797 | } | |
1798 | ||
1799 | qh = qh->qh_next.qh; | |
1800 | } while (qh); | |
1801 | } | |
1802 | if (action == TIMER_ASYNC_SHRINK) | |
1803 | timer_action(oxu, TIMER_ASYNC_SHRINK); | |
1804 | } | |
1805 | ||
1806 | /* | |
1807 | * periodic_next_shadow - return "next" pointer on shadow list | |
1808 | * @periodic: host pointer to qh/itd/sitd | |
1809 | * @tag: hardware tag for type of this record | |
1810 | */ | |
1811 | static union ehci_shadow *periodic_next_shadow(union ehci_shadow *periodic, | |
1812 | __le32 tag) | |
1813 | { | |
1814 | switch (tag) { | |
1815 | default: | |
1816 | case Q_TYPE_QH: | |
1817 | return &periodic->qh->qh_next; | |
1818 | } | |
1819 | } | |
1820 | ||
1821 | /* caller must hold oxu->lock */ | |
1822 | static void periodic_unlink(struct oxu_hcd *oxu, unsigned frame, void *ptr) | |
1823 | { | |
1824 | union ehci_shadow *prev_p = &oxu->pshadow[frame]; | |
1825 | __le32 *hw_p = &oxu->periodic[frame]; | |
1826 | union ehci_shadow here = *prev_p; | |
1827 | ||
1828 | /* find predecessor of "ptr"; hw and shadow lists are in sync */ | |
1829 | while (here.ptr && here.ptr != ptr) { | |
1830 | prev_p = periodic_next_shadow(prev_p, Q_NEXT_TYPE(*hw_p)); | |
1831 | hw_p = here.hw_next; | |
1832 | here = *prev_p; | |
1833 | } | |
1834 | /* an interrupt entry (at list end) could have been shared */ | |
1835 | if (!here.ptr) | |
1836 | return; | |
1837 | ||
1838 | /* update shadow and hardware lists ... the old "next" pointers | |
1839 | * from ptr may still be in use, the caller updates them. | |
1840 | */ | |
1841 | *prev_p = *periodic_next_shadow(&here, Q_NEXT_TYPE(*hw_p)); | |
1842 | *hw_p = *here.hw_next; | |
1843 | } | |
1844 | ||
1845 | /* how many of the uframe's 125 usecs are allocated? */ | |
1846 | static unsigned short periodic_usecs(struct oxu_hcd *oxu, | |
1847 | unsigned frame, unsigned uframe) | |
1848 | { | |
1849 | __le32 *hw_p = &oxu->periodic[frame]; | |
1850 | union ehci_shadow *q = &oxu->pshadow[frame]; | |
1851 | unsigned usecs = 0; | |
1852 | ||
1853 | while (q->ptr) { | |
1854 | switch (Q_NEXT_TYPE(*hw_p)) { | |
1855 | case Q_TYPE_QH: | |
1856 | default: | |
1857 | /* is it in the S-mask? */ | |
1858 | if (q->qh->hw_info2 & cpu_to_le32(1 << uframe)) | |
1859 | usecs += q->qh->usecs; | |
1860 | /* ... or C-mask? */ | |
1861 | if (q->qh->hw_info2 & cpu_to_le32(1 << (8 + uframe))) | |
1862 | usecs += q->qh->c_usecs; | |
1863 | hw_p = &q->qh->hw_next; | |
1864 | q = &q->qh->qh_next; | |
1865 | break; | |
1866 | } | |
1867 | } | |
1868 | #ifdef DEBUG | |
1869 | if (usecs > 100) | |
1870 | oxu_err(oxu, "uframe %d sched overrun: %d usecs\n", | |
1871 | frame * 8 + uframe, usecs); | |
1872 | #endif | |
1873 | return usecs; | |
1874 | } | |
1875 | ||
1876 | static int enable_periodic(struct oxu_hcd *oxu) | |
1877 | { | |
1878 | u32 cmd; | |
1879 | int status; | |
1880 | ||
1881 | /* did clearing PSE did take effect yet? | |
1882 | * takes effect only at frame boundaries... | |
1883 | */ | |
1884 | status = handshake(oxu, &oxu->regs->status, STS_PSS, 0, 9 * 125); | |
1885 | if (status != 0) { | |
1886 | oxu_to_hcd(oxu)->state = HC_STATE_HALT; | |
1887 | return status; | |
1888 | } | |
1889 | ||
1890 | cmd = readl(&oxu->regs->command) | CMD_PSE; | |
1891 | writel(cmd, &oxu->regs->command); | |
1892 | /* posted write ... PSS happens later */ | |
1893 | oxu_to_hcd(oxu)->state = HC_STATE_RUNNING; | |
1894 | ||
1895 | /* make sure ehci_work scans these */ | |
1896 | oxu->next_uframe = readl(&oxu->regs->frame_index) | |
1897 | % (oxu->periodic_size << 3); | |
1898 | return 0; | |
1899 | } | |
1900 | ||
1901 | static int disable_periodic(struct oxu_hcd *oxu) | |
1902 | { | |
1903 | u32 cmd; | |
1904 | int status; | |
1905 | ||
1906 | /* did setting PSE not take effect yet? | |
1907 | * takes effect only at frame boundaries... | |
1908 | */ | |
1909 | status = handshake(oxu, &oxu->regs->status, STS_PSS, STS_PSS, 9 * 125); | |
1910 | if (status != 0) { | |
1911 | oxu_to_hcd(oxu)->state = HC_STATE_HALT; | |
1912 | return status; | |
1913 | } | |
1914 | ||
1915 | cmd = readl(&oxu->regs->command) & ~CMD_PSE; | |
1916 | writel(cmd, &oxu->regs->command); | |
1917 | /* posted write ... */ | |
1918 | ||
1919 | oxu->next_uframe = -1; | |
1920 | return 0; | |
1921 | } | |
1922 | ||
1923 | /* periodic schedule slots have iso tds (normal or split) first, then a | |
1924 | * sparse tree for active interrupt transfers. | |
1925 | * | |
1926 | * this just links in a qh; caller guarantees uframe masks are set right. | |
1927 | * no FSTN support (yet; oxu 0.96+) | |
1928 | */ | |
1929 | static int qh_link_periodic(struct oxu_hcd *oxu, struct ehci_qh *qh) | |
1930 | { | |
1931 | unsigned i; | |
1932 | unsigned period = qh->period; | |
1933 | ||
1934 | dev_dbg(&qh->dev->dev, | |
1935 | "link qh%d-%04x/%p start %d [%d/%d us]\n", | |
1936 | period, le32_to_cpup(&qh->hw_info2) & (QH_CMASK | QH_SMASK), | |
1937 | qh, qh->start, qh->usecs, qh->c_usecs); | |
1938 | ||
1939 | /* high bandwidth, or otherwise every microframe */ | |
1940 | if (period == 0) | |
1941 | period = 1; | |
1942 | ||
1943 | for (i = qh->start; i < oxu->periodic_size; i += period) { | |
1944 | union ehci_shadow *prev = &oxu->pshadow[i]; | |
1945 | __le32 *hw_p = &oxu->periodic[i]; | |
1946 | union ehci_shadow here = *prev; | |
1947 | __le32 type = 0; | |
1948 | ||
1949 | /* skip the iso nodes at list head */ | |
1950 | while (here.ptr) { | |
1951 | type = Q_NEXT_TYPE(*hw_p); | |
1952 | if (type == Q_TYPE_QH) | |
1953 | break; | |
1954 | prev = periodic_next_shadow(prev, type); | |
1955 | hw_p = &here.qh->hw_next; | |
1956 | here = *prev; | |
1957 | } | |
1958 | ||
1959 | /* sorting each branch by period (slow-->fast) | |
1960 | * enables sharing interior tree nodes | |
1961 | */ | |
1962 | while (here.ptr && qh != here.qh) { | |
1963 | if (qh->period > here.qh->period) | |
1964 | break; | |
1965 | prev = &here.qh->qh_next; | |
1966 | hw_p = &here.qh->hw_next; | |
1967 | here = *prev; | |
1968 | } | |
1969 | /* link in this qh, unless some earlier pass did that */ | |
1970 | if (qh != here.qh) { | |
1971 | qh->qh_next = here; | |
1972 | if (here.qh) | |
1973 | qh->hw_next = *hw_p; | |
1974 | wmb(); | |
1975 | prev->qh = qh; | |
1976 | *hw_p = QH_NEXT(qh->qh_dma); | |
1977 | } | |
1978 | } | |
1979 | qh->qh_state = QH_STATE_LINKED; | |
1980 | qh_get(qh); | |
1981 | ||
1982 | /* update per-qh bandwidth for usbfs */ | |
1983 | oxu_to_hcd(oxu)->self.bandwidth_allocated += qh->period | |
1984 | ? ((qh->usecs + qh->c_usecs) / qh->period) | |
1985 | : (qh->usecs * 8); | |
1986 | ||
1987 | /* maybe enable periodic schedule processing */ | |
1988 | if (!oxu->periodic_sched++) | |
1989 | return enable_periodic(oxu); | |
1990 | ||
1991 | return 0; | |
1992 | } | |
1993 | ||
1994 | static void qh_unlink_periodic(struct oxu_hcd *oxu, struct ehci_qh *qh) | |
1995 | { | |
1996 | unsigned i; | |
1997 | unsigned period; | |
1998 | ||
1999 | /* FIXME: | |
2000 | * IF this isn't high speed | |
2001 | * and this qh is active in the current uframe | |
2002 | * (and overlay token SplitXstate is false?) | |
2003 | * THEN | |
551509d2 | 2004 | * qh->hw_info1 |= cpu_to_le32(1 << 7 "ignore"); |
b92a78e5 RG |
2005 | */ |
2006 | ||
2007 | /* high bandwidth, or otherwise part of every microframe */ | |
2008 | period = qh->period; | |
2009 | if (period == 0) | |
2010 | period = 1; | |
2011 | ||
2012 | for (i = qh->start; i < oxu->periodic_size; i += period) | |
2013 | periodic_unlink(oxu, i, qh); | |
2014 | ||
2015 | /* update per-qh bandwidth for usbfs */ | |
2016 | oxu_to_hcd(oxu)->self.bandwidth_allocated -= qh->period | |
2017 | ? ((qh->usecs + qh->c_usecs) / qh->period) | |
2018 | : (qh->usecs * 8); | |
2019 | ||
2020 | dev_dbg(&qh->dev->dev, | |
2021 | "unlink qh%d-%04x/%p start %d [%d/%d us]\n", | |
2022 | qh->period, | |
2023 | le32_to_cpup(&qh->hw_info2) & (QH_CMASK | QH_SMASK), | |
2024 | qh, qh->start, qh->usecs, qh->c_usecs); | |
2025 | ||
2026 | /* qh->qh_next still "live" to HC */ | |
2027 | qh->qh_state = QH_STATE_UNLINK; | |
2028 | qh->qh_next.ptr = NULL; | |
2029 | qh_put(qh); | |
2030 | ||
2031 | /* maybe turn off periodic schedule */ | |
2032 | oxu->periodic_sched--; | |
2033 | if (!oxu->periodic_sched) | |
2034 | (void) disable_periodic(oxu); | |
2035 | } | |
2036 | ||
2037 | static void intr_deschedule(struct oxu_hcd *oxu, struct ehci_qh *qh) | |
2038 | { | |
2039 | unsigned wait; | |
2040 | ||
2041 | qh_unlink_periodic(oxu, qh); | |
2042 | ||
2043 | /* simple/paranoid: always delay, expecting the HC needs to read | |
2044 | * qh->hw_next or finish a writeback after SPLIT/CSPLIT ... and | |
2045 | * expect khubd to clean up after any CSPLITs we won't issue. | |
2046 | * active high speed queues may need bigger delays... | |
2047 | */ | |
2048 | if (list_empty(&qh->qtd_list) | |
551509d2 | 2049 | || (cpu_to_le32(QH_CMASK) & qh->hw_info2) != 0) |
b92a78e5 RG |
2050 | wait = 2; |
2051 | else | |
2052 | wait = 55; /* worst case: 3 * 1024 */ | |
2053 | ||
2054 | udelay(wait); | |
2055 | qh->qh_state = QH_STATE_IDLE; | |
2056 | qh->hw_next = EHCI_LIST_END; | |
2057 | wmb(); | |
2058 | } | |
2059 | ||
2060 | static int check_period(struct oxu_hcd *oxu, | |
2061 | unsigned frame, unsigned uframe, | |
2062 | unsigned period, unsigned usecs) | |
2063 | { | |
2064 | int claimed; | |
2065 | ||
2066 | /* complete split running into next frame? | |
2067 | * given FSTN support, we could sometimes check... | |
2068 | */ | |
2069 | if (uframe >= 8) | |
2070 | return 0; | |
2071 | ||
2072 | /* | |
2073 | * 80% periodic == 100 usec/uframe available | |
2074 | * convert "usecs we need" to "max already claimed" | |
2075 | */ | |
2076 | usecs = 100 - usecs; | |
2077 | ||
2078 | /* we "know" 2 and 4 uframe intervals were rejected; so | |
2079 | * for period 0, check _every_ microframe in the schedule. | |
2080 | */ | |
2081 | if (unlikely(period == 0)) { | |
2082 | do { | |
2083 | for (uframe = 0; uframe < 7; uframe++) { | |
2084 | claimed = periodic_usecs(oxu, frame, uframe); | |
2085 | if (claimed > usecs) | |
2086 | return 0; | |
2087 | } | |
2088 | } while ((frame += 1) < oxu->periodic_size); | |
2089 | ||
2090 | /* just check the specified uframe, at that period */ | |
2091 | } else { | |
2092 | do { | |
2093 | claimed = periodic_usecs(oxu, frame, uframe); | |
2094 | if (claimed > usecs) | |
2095 | return 0; | |
2096 | } while ((frame += period) < oxu->periodic_size); | |
2097 | } | |
2098 | ||
2099 | return 1; | |
2100 | } | |
2101 | ||
2102 | static int check_intr_schedule(struct oxu_hcd *oxu, | |
2103 | unsigned frame, unsigned uframe, | |
2104 | const struct ehci_qh *qh, __le32 *c_maskp) | |
2105 | { | |
2106 | int retval = -ENOSPC; | |
2107 | ||
2108 | if (qh->c_usecs && uframe >= 6) /* FSTN territory? */ | |
2109 | goto done; | |
2110 | ||
2111 | if (!check_period(oxu, frame, uframe, qh->period, qh->usecs)) | |
2112 | goto done; | |
2113 | if (!qh->c_usecs) { | |
2114 | retval = 0; | |
2115 | *c_maskp = 0; | |
2116 | goto done; | |
2117 | } | |
2118 | ||
2119 | done: | |
2120 | return retval; | |
2121 | } | |
2122 | ||
2123 | /* "first fit" scheduling policy used the first time through, | |
2124 | * or when the previous schedule slot can't be re-used. | |
2125 | */ | |
2126 | static int qh_schedule(struct oxu_hcd *oxu, struct ehci_qh *qh) | |
2127 | { | |
2128 | int status; | |
2129 | unsigned uframe; | |
2130 | __le32 c_mask; | |
2131 | unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */ | |
2132 | ||
2133 | qh_refresh(oxu, qh); | |
2134 | qh->hw_next = EHCI_LIST_END; | |
2135 | frame = qh->start; | |
2136 | ||
2137 | /* reuse the previous schedule slots, if we can */ | |
2138 | if (frame < qh->period) { | |
2139 | uframe = ffs(le32_to_cpup(&qh->hw_info2) & QH_SMASK); | |
2140 | status = check_intr_schedule(oxu, frame, --uframe, | |
2141 | qh, &c_mask); | |
2142 | } else { | |
2143 | uframe = 0; | |
2144 | c_mask = 0; | |
2145 | status = -ENOSPC; | |
2146 | } | |
2147 | ||
2148 | /* else scan the schedule to find a group of slots such that all | |
2149 | * uframes have enough periodic bandwidth available. | |
2150 | */ | |
2151 | if (status) { | |
2152 | /* "normal" case, uframing flexible except with splits */ | |
2153 | if (qh->period) { | |
2154 | frame = qh->period - 1; | |
2155 | do { | |
2156 | for (uframe = 0; uframe < 8; uframe++) { | |
2157 | status = check_intr_schedule(oxu, | |
2158 | frame, uframe, qh, | |
2159 | &c_mask); | |
2160 | if (status == 0) | |
2161 | break; | |
2162 | } | |
2163 | } while (status && frame--); | |
2164 | ||
2165 | /* qh->period == 0 means every uframe */ | |
2166 | } else { | |
2167 | frame = 0; | |
2168 | status = check_intr_schedule(oxu, 0, 0, qh, &c_mask); | |
2169 | } | |
2170 | if (status) | |
2171 | goto done; | |
2172 | qh->start = frame; | |
2173 | ||
2174 | /* reset S-frame and (maybe) C-frame masks */ | |
551509d2 | 2175 | qh->hw_info2 &= cpu_to_le32(~(QH_CMASK | QH_SMASK)); |
b92a78e5 RG |
2176 | qh->hw_info2 |= qh->period |
2177 | ? cpu_to_le32(1 << uframe) | |
551509d2 | 2178 | : cpu_to_le32(QH_SMASK); |
b92a78e5 RG |
2179 | qh->hw_info2 |= c_mask; |
2180 | } else | |
2181 | oxu_dbg(oxu, "reused qh %p schedule\n", qh); | |
2182 | ||
2183 | /* stuff into the periodic schedule */ | |
2184 | status = qh_link_periodic(oxu, qh); | |
2185 | done: | |
2186 | return status; | |
2187 | } | |
2188 | ||
2189 | static int intr_submit(struct oxu_hcd *oxu, struct urb *urb, | |
2190 | struct list_head *qtd_list, gfp_t mem_flags) | |
2191 | { | |
2192 | unsigned epnum; | |
2193 | unsigned long flags; | |
2194 | struct ehci_qh *qh; | |
2195 | int status = 0; | |
2196 | struct list_head empty; | |
2197 | ||
2198 | /* get endpoint and transfer/schedule data */ | |
2199 | epnum = urb->ep->desc.bEndpointAddress; | |
2200 | ||
2201 | spin_lock_irqsave(&oxu->lock, flags); | |
2202 | ||
541c7d43 | 2203 | if (unlikely(!HCD_HW_ACCESSIBLE(oxu_to_hcd(oxu)))) { |
b92a78e5 RG |
2204 | status = -ESHUTDOWN; |
2205 | goto done; | |
2206 | } | |
2207 | ||
2208 | /* get qh and force any scheduling errors */ | |
2209 | INIT_LIST_HEAD(&empty); | |
2210 | qh = qh_append_tds(oxu, urb, &empty, epnum, &urb->ep->hcpriv); | |
2211 | if (qh == NULL) { | |
2212 | status = -ENOMEM; | |
2213 | goto done; | |
2214 | } | |
2215 | if (qh->qh_state == QH_STATE_IDLE) { | |
2216 | status = qh_schedule(oxu, qh); | |
2217 | if (status != 0) | |
2218 | goto done; | |
2219 | } | |
2220 | ||
2221 | /* then queue the urb's tds to the qh */ | |
2222 | qh = qh_append_tds(oxu, urb, qtd_list, epnum, &urb->ep->hcpriv); | |
2223 | BUG_ON(qh == NULL); | |
2224 | ||
2225 | /* ... update usbfs periodic stats */ | |
2226 | oxu_to_hcd(oxu)->self.bandwidth_int_reqs++; | |
2227 | ||
2228 | done: | |
2229 | spin_unlock_irqrestore(&oxu->lock, flags); | |
2230 | if (status) | |
2231 | qtd_list_free(oxu, urb, qtd_list); | |
2232 | ||
2233 | return status; | |
2234 | } | |
2235 | ||
2236 | static inline int itd_submit(struct oxu_hcd *oxu, struct urb *urb, | |
2237 | gfp_t mem_flags) | |
2238 | { | |
2239 | oxu_dbg(oxu, "iso support is missing!\n"); | |
2240 | return -ENOSYS; | |
2241 | } | |
2242 | ||
2243 | static inline int sitd_submit(struct oxu_hcd *oxu, struct urb *urb, | |
2244 | gfp_t mem_flags) | |
2245 | { | |
2246 | oxu_dbg(oxu, "split iso support is missing!\n"); | |
2247 | return -ENOSYS; | |
2248 | } | |
2249 | ||
2250 | static void scan_periodic(struct oxu_hcd *oxu) | |
2251 | { | |
2252 | unsigned frame, clock, now_uframe, mod; | |
2253 | unsigned modified; | |
2254 | ||
2255 | mod = oxu->periodic_size << 3; | |
2256 | ||
2257 | /* | |
2258 | * When running, scan from last scan point up to "now" | |
2259 | * else clean up by scanning everything that's left. | |
2260 | * Touches as few pages as possible: cache-friendly. | |
2261 | */ | |
2262 | now_uframe = oxu->next_uframe; | |
2263 | if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state)) | |
2264 | clock = readl(&oxu->regs->frame_index); | |
2265 | else | |
2266 | clock = now_uframe + mod - 1; | |
2267 | clock %= mod; | |
2268 | ||
2269 | for (;;) { | |
2270 | union ehci_shadow q, *q_p; | |
2271 | __le32 type, *hw_p; | |
2272 | unsigned uframes; | |
2273 | ||
2274 | /* don't scan past the live uframe */ | |
2275 | frame = now_uframe >> 3; | |
2276 | if (frame == (clock >> 3)) | |
2277 | uframes = now_uframe & 0x07; | |
2278 | else { | |
2279 | /* safe to scan the whole frame at once */ | |
2280 | now_uframe |= 0x07; | |
2281 | uframes = 8; | |
2282 | } | |
2283 | ||
2284 | restart: | |
2285 | /* scan each element in frame's queue for completions */ | |
2286 | q_p = &oxu->pshadow[frame]; | |
2287 | hw_p = &oxu->periodic[frame]; | |
2288 | q.ptr = q_p->ptr; | |
2289 | type = Q_NEXT_TYPE(*hw_p); | |
2290 | modified = 0; | |
2291 | ||
2292 | while (q.ptr != NULL) { | |
2293 | union ehci_shadow temp; | |
2294 | int live; | |
2295 | ||
2296 | live = HC_IS_RUNNING(oxu_to_hcd(oxu)->state); | |
2297 | switch (type) { | |
2298 | case Q_TYPE_QH: | |
2299 | /* handle any completions */ | |
2300 | temp.qh = qh_get(q.qh); | |
2301 | type = Q_NEXT_TYPE(q.qh->hw_next); | |
2302 | q = q.qh->qh_next; | |
2303 | modified = qh_completions(oxu, temp.qh); | |
2304 | if (unlikely(list_empty(&temp.qh->qtd_list))) | |
2305 | intr_deschedule(oxu, temp.qh); | |
2306 | qh_put(temp.qh); | |
2307 | break; | |
2308 | default: | |
2309 | dbg("corrupt type %d frame %d shadow %p", | |
2310 | type, frame, q.ptr); | |
2311 | q.ptr = NULL; | |
2312 | } | |
2313 | ||
2314 | /* assume completion callbacks modify the queue */ | |
2315 | if (unlikely(modified)) | |
2316 | goto restart; | |
2317 | } | |
2318 | ||
2319 | /* Stop when we catch up to the HC */ | |
2320 | ||
2321 | /* FIXME: this assumes we won't get lapped when | |
2322 | * latencies climb; that should be rare, but... | |
2323 | * detect it, and just go all the way around. | |
2324 | * FLR might help detect this case, so long as latencies | |
2325 | * don't exceed periodic_size msec (default 1.024 sec). | |
2326 | */ | |
2327 | ||
2328 | /* FIXME: likewise assumes HC doesn't halt mid-scan */ | |
2329 | ||
2330 | if (now_uframe == clock) { | |
2331 | unsigned now; | |
2332 | ||
2333 | if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state)) | |
2334 | break; | |
2335 | oxu->next_uframe = now_uframe; | |
2336 | now = readl(&oxu->regs->frame_index) % mod; | |
2337 | if (now_uframe == now) | |
2338 | break; | |
2339 | ||
2340 | /* rescan the rest of this frame, then ... */ | |
2341 | clock = now; | |
2342 | } else { | |
2343 | now_uframe++; | |
2344 | now_uframe %= mod; | |
2345 | } | |
2346 | } | |
2347 | } | |
2348 | ||
2349 | /* On some systems, leaving remote wakeup enabled prevents system shutdown. | |
2350 | * The firmware seems to think that powering off is a wakeup event! | |
2351 | * This routine turns off remote wakeup and everything else, on all ports. | |
2352 | */ | |
2353 | static void ehci_turn_off_all_ports(struct oxu_hcd *oxu) | |
2354 | { | |
2355 | int port = HCS_N_PORTS(oxu->hcs_params); | |
2356 | ||
2357 | while (port--) | |
2358 | writel(PORT_RWC_BITS, &oxu->regs->port_status[port]); | |
2359 | } | |
2360 | ||
2361 | static void ehci_port_power(struct oxu_hcd *oxu, int is_on) | |
2362 | { | |
2363 | unsigned port; | |
2364 | ||
2365 | if (!HCS_PPC(oxu->hcs_params)) | |
2366 | return; | |
2367 | ||
2368 | oxu_dbg(oxu, "...power%s ports...\n", is_on ? "up" : "down"); | |
2369 | for (port = HCS_N_PORTS(oxu->hcs_params); port > 0; ) | |
2370 | (void) oxu_hub_control(oxu_to_hcd(oxu), | |
2371 | is_on ? SetPortFeature : ClearPortFeature, | |
2372 | USB_PORT_FEAT_POWER, | |
2373 | port--, NULL, 0); | |
2374 | msleep(20); | |
2375 | } | |
2376 | ||
2377 | /* Called from some interrupts, timers, and so on. | |
2378 | * It calls driver completion functions, after dropping oxu->lock. | |
2379 | */ | |
2380 | static void ehci_work(struct oxu_hcd *oxu) | |
2381 | { | |
2382 | timer_action_done(oxu, TIMER_IO_WATCHDOG); | |
2383 | if (oxu->reclaim_ready) | |
2384 | end_unlink_async(oxu); | |
2385 | ||
2386 | /* another CPU may drop oxu->lock during a schedule scan while | |
2387 | * it reports urb completions. this flag guards against bogus | |
2388 | * attempts at re-entrant schedule scanning. | |
2389 | */ | |
2390 | if (oxu->scanning) | |
2391 | return; | |
2392 | oxu->scanning = 1; | |
2393 | scan_async(oxu); | |
2394 | if (oxu->next_uframe != -1) | |
2395 | scan_periodic(oxu); | |
2396 | oxu->scanning = 0; | |
2397 | ||
2398 | /* the IO watchdog guards against hardware or driver bugs that | |
2399 | * misplace IRQs, and should let us run completely without IRQs. | |
2400 | * such lossage has been observed on both VT6202 and VT8235. | |
2401 | */ | |
2402 | if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state) && | |
2403 | (oxu->async->qh_next.ptr != NULL || | |
2404 | oxu->periodic_sched != 0)) | |
2405 | timer_action(oxu, TIMER_IO_WATCHDOG); | |
2406 | } | |
2407 | ||
2408 | static void unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh) | |
2409 | { | |
2410 | /* if we need to use IAA and it's busy, defer */ | |
2411 | if (qh->qh_state == QH_STATE_LINKED | |
2412 | && oxu->reclaim | |
2413 | && HC_IS_RUNNING(oxu_to_hcd(oxu)->state)) { | |
2414 | struct ehci_qh *last; | |
2415 | ||
2416 | for (last = oxu->reclaim; | |
2417 | last->reclaim; | |
2418 | last = last->reclaim) | |
2419 | continue; | |
2420 | qh->qh_state = QH_STATE_UNLINK_WAIT; | |
2421 | last->reclaim = qh; | |
2422 | ||
2423 | /* bypass IAA if the hc can't care */ | |
2424 | } else if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state) && oxu->reclaim) | |
2425 | end_unlink_async(oxu); | |
2426 | ||
2427 | /* something else might have unlinked the qh by now */ | |
2428 | if (qh->qh_state == QH_STATE_LINKED) | |
2429 | start_unlink_async(oxu, qh); | |
2430 | } | |
2431 | ||
2432 | /* | |
2433 | * USB host controller methods | |
2434 | */ | |
2435 | ||
2436 | static irqreturn_t oxu210_hcd_irq(struct usb_hcd *hcd) | |
2437 | { | |
2438 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
2439 | u32 status, pcd_status = 0; | |
2440 | int bh; | |
2441 | ||
2442 | spin_lock(&oxu->lock); | |
2443 | ||
2444 | status = readl(&oxu->regs->status); | |
2445 | ||
2446 | /* e.g. cardbus physical eject */ | |
2447 | if (status == ~(u32) 0) { | |
2448 | oxu_dbg(oxu, "device removed\n"); | |
2449 | goto dead; | |
2450 | } | |
2451 | ||
2452 | status &= INTR_MASK; | |
2453 | if (!status) { /* irq sharing? */ | |
2454 | spin_unlock(&oxu->lock); | |
2455 | return IRQ_NONE; | |
2456 | } | |
2457 | ||
2458 | /* clear (just) interrupts */ | |
2459 | writel(status, &oxu->regs->status); | |
2460 | readl(&oxu->regs->command); /* unblock posted write */ | |
2461 | bh = 0; | |
2462 | ||
2463 | #ifdef OXU_VERBOSE_DEBUG | |
2464 | /* unrequested/ignored: Frame List Rollover */ | |
2465 | dbg_status(oxu, "irq", status); | |
2466 | #endif | |
2467 | ||
2468 | /* INT, ERR, and IAA interrupt rates can be throttled */ | |
2469 | ||
2470 | /* normal [4.15.1.2] or error [4.15.1.1] completion */ | |
2471 | if (likely((status & (STS_INT|STS_ERR)) != 0)) | |
2472 | bh = 1; | |
2473 | ||
2474 | /* complete the unlinking of some qh [4.15.2.3] */ | |
2475 | if (status & STS_IAA) { | |
2476 | oxu->reclaim_ready = 1; | |
2477 | bh = 1; | |
2478 | } | |
2479 | ||
2480 | /* remote wakeup [4.3.1] */ | |
2481 | if (status & STS_PCD) { | |
2482 | unsigned i = HCS_N_PORTS(oxu->hcs_params); | |
2483 | pcd_status = status; | |
2484 | ||
2485 | /* resume root hub? */ | |
2486 | if (!(readl(&oxu->regs->command) & CMD_RUN)) | |
2487 | usb_hcd_resume_root_hub(hcd); | |
2488 | ||
2489 | while (i--) { | |
2490 | int pstatus = readl(&oxu->regs->port_status[i]); | |
2491 | ||
2492 | if (pstatus & PORT_OWNER) | |
2493 | continue; | |
2494 | if (!(pstatus & PORT_RESUME) | |
2495 | || oxu->reset_done[i] != 0) | |
2496 | continue; | |
2497 | ||
2498 | /* start 20 msec resume signaling from this port, | |
2499 | * and make khubd collect PORT_STAT_C_SUSPEND to | |
2500 | * stop that signaling. | |
2501 | */ | |
2502 | oxu->reset_done[i] = jiffies + msecs_to_jiffies(20); | |
2503 | oxu_dbg(oxu, "port %d remote wakeup\n", i + 1); | |
2504 | mod_timer(&hcd->rh_timer, oxu->reset_done[i]); | |
2505 | } | |
2506 | } | |
2507 | ||
2508 | /* PCI errors [4.15.2.4] */ | |
2509 | if (unlikely((status & STS_FATAL) != 0)) { | |
2510 | /* bogus "fatal" IRQs appear on some chips... why? */ | |
2511 | status = readl(&oxu->regs->status); | |
2512 | dbg_cmd(oxu, "fatal", readl(&oxu->regs->command)); | |
2513 | dbg_status(oxu, "fatal", status); | |
2514 | if (status & STS_HALT) { | |
2515 | oxu_err(oxu, "fatal error\n"); | |
2516 | dead: | |
2517 | ehci_reset(oxu); | |
2518 | writel(0, &oxu->regs->configured_flag); | |
2519 | /* generic layer kills/unlinks all urbs, then | |
2520 | * uses oxu_stop to clean up the rest | |
2521 | */ | |
2522 | bh = 1; | |
2523 | } | |
2524 | } | |
2525 | ||
2526 | if (bh) | |
2527 | ehci_work(oxu); | |
2528 | spin_unlock(&oxu->lock); | |
2529 | if (pcd_status & STS_PCD) | |
2530 | usb_hcd_poll_rh_status(hcd); | |
2531 | return IRQ_HANDLED; | |
2532 | } | |
2533 | ||
2534 | static irqreturn_t oxu_irq(struct usb_hcd *hcd) | |
2535 | { | |
2536 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
2537 | int ret = IRQ_HANDLED; | |
2538 | ||
2539 | u32 status = oxu_readl(hcd->regs, OXU_CHIPIRQSTATUS); | |
2540 | u32 enable = oxu_readl(hcd->regs, OXU_CHIPIRQEN_SET); | |
2541 | ||
2542 | /* Disable all interrupt */ | |
2543 | oxu_writel(hcd->regs, OXU_CHIPIRQEN_CLR, enable); | |
2544 | ||
2545 | if ((oxu->is_otg && (status & OXU_USBOTGI)) || | |
2546 | (!oxu->is_otg && (status & OXU_USBSPHI))) | |
2547 | oxu210_hcd_irq(hcd); | |
2548 | else | |
2549 | ret = IRQ_NONE; | |
2550 | ||
2551 | /* Enable all interrupt back */ | |
2552 | oxu_writel(hcd->regs, OXU_CHIPIRQEN_SET, enable); | |
2553 | ||
2554 | return ret; | |
2555 | } | |
2556 | ||
2557 | static void oxu_watchdog(unsigned long param) | |
2558 | { | |
2559 | struct oxu_hcd *oxu = (struct oxu_hcd *) param; | |
2560 | unsigned long flags; | |
2561 | ||
2562 | spin_lock_irqsave(&oxu->lock, flags); | |
2563 | ||
2564 | /* lost IAA irqs wedge things badly; seen with a vt8235 */ | |
2565 | if (oxu->reclaim) { | |
2566 | u32 status = readl(&oxu->regs->status); | |
2567 | if (status & STS_IAA) { | |
2568 | oxu_vdbg(oxu, "lost IAA\n"); | |
2569 | writel(STS_IAA, &oxu->regs->status); | |
2570 | oxu->reclaim_ready = 1; | |
2571 | } | |
2572 | } | |
2573 | ||
2574 | /* stop async processing after it's idled a bit */ | |
2575 | if (test_bit(TIMER_ASYNC_OFF, &oxu->actions)) | |
2576 | start_unlink_async(oxu, oxu->async); | |
2577 | ||
2578 | /* oxu could run by timer, without IRQs ... */ | |
2579 | ehci_work(oxu); | |
2580 | ||
2581 | spin_unlock_irqrestore(&oxu->lock, flags); | |
2582 | } | |
2583 | ||
2584 | /* One-time init, only for memory state. | |
2585 | */ | |
2586 | static int oxu_hcd_init(struct usb_hcd *hcd) | |
2587 | { | |
2588 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
2589 | u32 temp; | |
2590 | int retval; | |
2591 | u32 hcc_params; | |
2592 | ||
2593 | spin_lock_init(&oxu->lock); | |
2594 | ||
2595 | init_timer(&oxu->watchdog); | |
2596 | oxu->watchdog.function = oxu_watchdog; | |
2597 | oxu->watchdog.data = (unsigned long) oxu; | |
2598 | ||
2599 | /* | |
2600 | * hw default: 1K periodic list heads, one per frame. | |
2601 | * periodic_size can shrink by USBCMD update if hcc_params allows. | |
2602 | */ | |
2603 | oxu->periodic_size = DEFAULT_I_TDPS; | |
2604 | retval = ehci_mem_init(oxu, GFP_KERNEL); | |
2605 | if (retval < 0) | |
2606 | return retval; | |
2607 | ||
2608 | /* controllers may cache some of the periodic schedule ... */ | |
2609 | hcc_params = readl(&oxu->caps->hcc_params); | |
2610 | if (HCC_ISOC_CACHE(hcc_params)) /* full frame cache */ | |
2611 | oxu->i_thresh = 8; | |
2612 | else /* N microframes cached */ | |
2613 | oxu->i_thresh = 2 + HCC_ISOC_THRES(hcc_params); | |
2614 | ||
2615 | oxu->reclaim = NULL; | |
2616 | oxu->reclaim_ready = 0; | |
2617 | oxu->next_uframe = -1; | |
2618 | ||
2619 | /* | |
2620 | * dedicate a qh for the async ring head, since we couldn't unlink | |
2621 | * a 'real' qh without stopping the async schedule [4.8]. use it | |
2622 | * as the 'reclamation list head' too. | |
2623 | * its dummy is used in hw_alt_next of many tds, to prevent the qh | |
2624 | * from automatically advancing to the next td after short reads. | |
2625 | */ | |
2626 | oxu->async->qh_next.qh = NULL; | |
2627 | oxu->async->hw_next = QH_NEXT(oxu->async->qh_dma); | |
2628 | oxu->async->hw_info1 = cpu_to_le32(QH_HEAD); | |
2629 | oxu->async->hw_token = cpu_to_le32(QTD_STS_HALT); | |
2630 | oxu->async->hw_qtd_next = EHCI_LIST_END; | |
2631 | oxu->async->qh_state = QH_STATE_LINKED; | |
2632 | oxu->async->hw_alt_next = QTD_NEXT(oxu->async->dummy->qtd_dma); | |
2633 | ||
2634 | /* clear interrupt enables, set irq latency */ | |
2635 | if (log2_irq_thresh < 0 || log2_irq_thresh > 6) | |
2636 | log2_irq_thresh = 0; | |
2637 | temp = 1 << (16 + log2_irq_thresh); | |
2638 | if (HCC_CANPARK(hcc_params)) { | |
2639 | /* HW default park == 3, on hardware that supports it (like | |
2640 | * NVidia and ALI silicon), maximizes throughput on the async | |
2641 | * schedule by avoiding QH fetches between transfers. | |
2642 | * | |
2643 | * With fast usb storage devices and NForce2, "park" seems to | |
2644 | * make problems: throughput reduction (!), data errors... | |
2645 | */ | |
2646 | if (park) { | |
2647 | park = min(park, (unsigned) 3); | |
2648 | temp |= CMD_PARK; | |
2649 | temp |= park << 8; | |
2650 | } | |
2651 | oxu_dbg(oxu, "park %d\n", park); | |
2652 | } | |
2653 | if (HCC_PGM_FRAMELISTLEN(hcc_params)) { | |
2654 | /* periodic schedule size can be smaller than default */ | |
2655 | temp &= ~(3 << 2); | |
2656 | temp |= (EHCI_TUNE_FLS << 2); | |
2657 | } | |
2658 | oxu->command = temp; | |
2659 | ||
2660 | return 0; | |
2661 | } | |
2662 | ||
2663 | /* Called during probe() after chip reset completes. | |
2664 | */ | |
2665 | static int oxu_reset(struct usb_hcd *hcd) | |
2666 | { | |
2667 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
2668 | int ret; | |
2669 | ||
2670 | spin_lock_init(&oxu->mem_lock); | |
2671 | INIT_LIST_HEAD(&oxu->urb_list); | |
2672 | oxu->urb_len = 0; | |
2673 | ||
2674 | /* FIMXE */ | |
a9f8ec4d | 2675 | hcd->self.controller->dma_mask = NULL; |
b92a78e5 RG |
2676 | |
2677 | if (oxu->is_otg) { | |
2678 | oxu->caps = hcd->regs + OXU_OTG_CAP_OFFSET; | |
2679 | oxu->regs = hcd->regs + OXU_OTG_CAP_OFFSET + \ | |
2680 | HC_LENGTH(readl(&oxu->caps->hc_capbase)); | |
2681 | ||
2682 | oxu->mem = hcd->regs + OXU_SPH_MEM; | |
2683 | } else { | |
2684 | oxu->caps = hcd->regs + OXU_SPH_CAP_OFFSET; | |
2685 | oxu->regs = hcd->regs + OXU_SPH_CAP_OFFSET + \ | |
2686 | HC_LENGTH(readl(&oxu->caps->hc_capbase)); | |
2687 | ||
2688 | oxu->mem = hcd->regs + OXU_OTG_MEM; | |
2689 | } | |
2690 | ||
2691 | oxu->hcs_params = readl(&oxu->caps->hcs_params); | |
2692 | oxu->sbrn = 0x20; | |
2693 | ||
2694 | ret = oxu_hcd_init(hcd); | |
2695 | if (ret) | |
2696 | return ret; | |
2697 | ||
2698 | return 0; | |
2699 | } | |
2700 | ||
2701 | static int oxu_run(struct usb_hcd *hcd) | |
2702 | { | |
2703 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
2704 | int retval; | |
2705 | u32 temp, hcc_params; | |
2706 | ||
2707 | hcd->uses_new_polling = 1; | |
b92a78e5 RG |
2708 | |
2709 | /* EHCI spec section 4.1 */ | |
2710 | retval = ehci_reset(oxu); | |
2711 | if (retval != 0) { | |
2712 | ehci_mem_cleanup(oxu); | |
2713 | return retval; | |
2714 | } | |
2715 | writel(oxu->periodic_dma, &oxu->regs->frame_list); | |
2716 | writel((u32) oxu->async->qh_dma, &oxu->regs->async_next); | |
2717 | ||
2718 | /* hcc_params controls whether oxu->regs->segment must (!!!) | |
2719 | * be used; it constrains QH/ITD/SITD and QTD locations. | |
2720 | * pci_pool consistent memory always uses segment zero. | |
2721 | * streaming mappings for I/O buffers, like pci_map_single(), | |
2722 | * can return segments above 4GB, if the device allows. | |
2723 | * | |
2724 | * NOTE: the dma mask is visible through dma_supported(), so | |
2725 | * drivers can pass this info along ... like NETIF_F_HIGHDMA, | |
2726 | * Scsi_Host.highmem_io, and so forth. It's readonly to all | |
2727 | * host side drivers though. | |
2728 | */ | |
2729 | hcc_params = readl(&oxu->caps->hcc_params); | |
2730 | if (HCC_64BIT_ADDR(hcc_params)) | |
2731 | writel(0, &oxu->regs->segment); | |
2732 | ||
2733 | oxu->command &= ~(CMD_LRESET | CMD_IAAD | CMD_PSE | | |
2734 | CMD_ASE | CMD_RESET); | |
2735 | oxu->command |= CMD_RUN; | |
2736 | writel(oxu->command, &oxu->regs->command); | |
2737 | dbg_cmd(oxu, "init", oxu->command); | |
2738 | ||
2739 | /* | |
2740 | * Start, enabling full USB 2.0 functionality ... usb 1.1 devices | |
2741 | * are explicitly handed to companion controller(s), so no TT is | |
2742 | * involved with the root hub. (Except where one is integrated, | |
2743 | * and there's no companion controller unless maybe for USB OTG.) | |
2744 | */ | |
2745 | hcd->state = HC_STATE_RUNNING; | |
2746 | writel(FLAG_CF, &oxu->regs->configured_flag); | |
2747 | readl(&oxu->regs->command); /* unblock posted writes */ | |
2748 | ||
2749 | temp = HC_VERSION(readl(&oxu->caps->hc_capbase)); | |
2750 | oxu_info(oxu, "USB %x.%x started, quasi-EHCI %x.%02x, driver %s%s\n", | |
2751 | ((oxu->sbrn & 0xf0)>>4), (oxu->sbrn & 0x0f), | |
2752 | temp >> 8, temp & 0xff, DRIVER_VERSION, | |
2753 | ignore_oc ? ", overcurrent ignored" : ""); | |
2754 | ||
2755 | writel(INTR_MASK, &oxu->regs->intr_enable); /* Turn On Interrupts */ | |
2756 | ||
2757 | return 0; | |
2758 | } | |
2759 | ||
2760 | static void oxu_stop(struct usb_hcd *hcd) | |
2761 | { | |
2762 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
2763 | ||
2764 | /* Turn off port power on all root hub ports. */ | |
2765 | ehci_port_power(oxu, 0); | |
2766 | ||
2767 | /* no more interrupts ... */ | |
2768 | del_timer_sync(&oxu->watchdog); | |
2769 | ||
2770 | spin_lock_irq(&oxu->lock); | |
2771 | if (HC_IS_RUNNING(hcd->state)) | |
2772 | ehci_quiesce(oxu); | |
2773 | ||
2774 | ehci_reset(oxu); | |
2775 | writel(0, &oxu->regs->intr_enable); | |
2776 | spin_unlock_irq(&oxu->lock); | |
2777 | ||
2778 | /* let companion controllers work when we aren't */ | |
2779 | writel(0, &oxu->regs->configured_flag); | |
2780 | ||
2781 | /* root hub is shut down separately (first, when possible) */ | |
2782 | spin_lock_irq(&oxu->lock); | |
2783 | if (oxu->async) | |
2784 | ehci_work(oxu); | |
2785 | spin_unlock_irq(&oxu->lock); | |
2786 | ehci_mem_cleanup(oxu); | |
2787 | ||
2788 | dbg_status(oxu, "oxu_stop completed", readl(&oxu->regs->status)); | |
2789 | } | |
2790 | ||
2791 | /* Kick in for silicon on any bus (not just pci, etc). | |
2792 | * This forcibly disables dma and IRQs, helping kexec and other cases | |
2793 | * where the next system software may expect clean state. | |
2794 | */ | |
2795 | static void oxu_shutdown(struct usb_hcd *hcd) | |
2796 | { | |
2797 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
2798 | ||
2799 | (void) ehci_halt(oxu); | |
2800 | ehci_turn_off_all_ports(oxu); | |
2801 | ||
2802 | /* make BIOS/etc use companion controller during reboot */ | |
2803 | writel(0, &oxu->regs->configured_flag); | |
2804 | ||
2805 | /* unblock posted writes */ | |
2806 | readl(&oxu->regs->configured_flag); | |
2807 | } | |
2808 | ||
2809 | /* Non-error returns are a promise to giveback() the urb later | |
2810 | * we drop ownership so next owner (or urb unlink) can get it | |
2811 | * | |
2812 | * urb + dev is in hcd.self.controller.urb_list | |
2813 | * we're queueing TDs onto software and hardware lists | |
2814 | * | |
2815 | * hcd-specific init for hcpriv hasn't been done yet | |
2816 | * | |
2817 | * NOTE: control, bulk, and interrupt share the same code to append TDs | |
2818 | * to a (possibly active) QH, and the same QH scanning code. | |
2819 | */ | |
2820 | static int __oxu_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, | |
2821 | gfp_t mem_flags) | |
2822 | { | |
2823 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
2824 | struct list_head qtd_list; | |
2825 | ||
2826 | INIT_LIST_HEAD(&qtd_list); | |
2827 | ||
2828 | switch (usb_pipetype(urb->pipe)) { | |
2829 | case PIPE_CONTROL: | |
2830 | case PIPE_BULK: | |
2831 | default: | |
2832 | if (!qh_urb_transaction(oxu, urb, &qtd_list, mem_flags)) | |
2833 | return -ENOMEM; | |
2834 | return submit_async(oxu, urb, &qtd_list, mem_flags); | |
2835 | ||
2836 | case PIPE_INTERRUPT: | |
2837 | if (!qh_urb_transaction(oxu, urb, &qtd_list, mem_flags)) | |
2838 | return -ENOMEM; | |
2839 | return intr_submit(oxu, urb, &qtd_list, mem_flags); | |
2840 | ||
2841 | case PIPE_ISOCHRONOUS: | |
2842 | if (urb->dev->speed == USB_SPEED_HIGH) | |
2843 | return itd_submit(oxu, urb, mem_flags); | |
2844 | else | |
2845 | return sitd_submit(oxu, urb, mem_flags); | |
2846 | } | |
2847 | } | |
2848 | ||
2849 | /* This function is responsible for breaking URBs with big data size | |
2850 | * into smaller size and processing small urbs in sequence. | |
2851 | */ | |
2852 | static int oxu_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, | |
2853 | gfp_t mem_flags) | |
2854 | { | |
2855 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
2856 | int num, rem; | |
2857 | int transfer_buffer_length; | |
2858 | void *transfer_buffer; | |
2859 | struct urb *murb; | |
2860 | int i, ret; | |
2861 | ||
2862 | /* If not bulk pipe just enqueue the URB */ | |
2863 | if (!usb_pipebulk(urb->pipe)) | |
2864 | return __oxu_urb_enqueue(hcd, urb, mem_flags); | |
2865 | ||
2866 | /* Otherwise we should verify the USB transfer buffer size! */ | |
2867 | transfer_buffer = urb->transfer_buffer; | |
2868 | transfer_buffer_length = urb->transfer_buffer_length; | |
2869 | ||
2870 | num = urb->transfer_buffer_length / 4096; | |
2871 | rem = urb->transfer_buffer_length % 4096; | |
2872 | if (rem != 0) | |
2873 | num++; | |
2874 | ||
2875 | /* If URB is smaller than 4096 bytes just enqueue it! */ | |
2876 | if (num == 1) | |
2877 | return __oxu_urb_enqueue(hcd, urb, mem_flags); | |
2878 | ||
2879 | /* Ok, we have more job to do! :) */ | |
2880 | ||
2881 | for (i = 0; i < num - 1; i++) { | |
2882 | /* Get free micro URB poll till a free urb is recieved */ | |
2883 | ||
2884 | do { | |
2885 | murb = (struct urb *) oxu_murb_alloc(oxu); | |
2886 | if (!murb) | |
2887 | schedule(); | |
2888 | } while (!murb); | |
2889 | ||
2890 | /* Coping the urb */ | |
2891 | memcpy(murb, urb, sizeof(struct urb)); | |
2892 | ||
2893 | murb->transfer_buffer_length = 4096; | |
2894 | murb->transfer_buffer = transfer_buffer + i * 4096; | |
2895 | ||
2896 | /* Null pointer for the encodes that this is a micro urb */ | |
2897 | murb->complete = NULL; | |
2898 | ||
2899 | ((struct oxu_murb *) murb)->main = urb; | |
2900 | ((struct oxu_murb *) murb)->last = 0; | |
2901 | ||
2902 | /* This loop is to guarantee urb to be processed when there's | |
2903 | * not enough resources at a particular time by retrying. | |
2904 | */ | |
2905 | do { | |
2906 | ret = __oxu_urb_enqueue(hcd, murb, mem_flags); | |
2907 | if (ret) | |
2908 | schedule(); | |
2909 | } while (ret); | |
2910 | } | |
2911 | ||
2912 | /* Last urb requires special handling */ | |
2913 | ||
2914 | /* Get free micro URB poll till a free urb is recieved */ | |
2915 | do { | |
2916 | murb = (struct urb *) oxu_murb_alloc(oxu); | |
2917 | if (!murb) | |
2918 | schedule(); | |
2919 | } while (!murb); | |
2920 | ||
2921 | /* Coping the urb */ | |
2922 | memcpy(murb, urb, sizeof(struct urb)); | |
2923 | ||
2924 | murb->transfer_buffer_length = rem > 0 ? rem : 4096; | |
2925 | murb->transfer_buffer = transfer_buffer + (num - 1) * 4096; | |
2926 | ||
2927 | /* Null pointer for the encodes that this is a micro urb */ | |
2928 | murb->complete = NULL; | |
2929 | ||
2930 | ((struct oxu_murb *) murb)->main = urb; | |
2931 | ((struct oxu_murb *) murb)->last = 1; | |
2932 | ||
2933 | do { | |
2934 | ret = __oxu_urb_enqueue(hcd, murb, mem_flags); | |
2935 | if (ret) | |
2936 | schedule(); | |
2937 | } while (ret); | |
2938 | ||
2939 | return ret; | |
2940 | } | |
2941 | ||
2942 | /* Remove from hardware lists. | |
2943 | * Completions normally happen asynchronously | |
2944 | */ | |
2945 | static int oxu_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |
2946 | { | |
2947 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
2948 | struct ehci_qh *qh; | |
2949 | unsigned long flags; | |
2950 | ||
2951 | spin_lock_irqsave(&oxu->lock, flags); | |
2952 | switch (usb_pipetype(urb->pipe)) { | |
2953 | case PIPE_CONTROL: | |
2954 | case PIPE_BULK: | |
2955 | default: | |
2956 | qh = (struct ehci_qh *) urb->hcpriv; | |
2957 | if (!qh) | |
2958 | break; | |
2959 | unlink_async(oxu, qh); | |
2960 | break; | |
2961 | ||
2962 | case PIPE_INTERRUPT: | |
2963 | qh = (struct ehci_qh *) urb->hcpriv; | |
2964 | if (!qh) | |
2965 | break; | |
2966 | switch (qh->qh_state) { | |
2967 | case QH_STATE_LINKED: | |
2968 | intr_deschedule(oxu, qh); | |
2969 | /* FALL THROUGH */ | |
2970 | case QH_STATE_IDLE: | |
2971 | qh_completions(oxu, qh); | |
2972 | break; | |
2973 | default: | |
2974 | oxu_dbg(oxu, "bogus qh %p state %d\n", | |
2975 | qh, qh->qh_state); | |
2976 | goto done; | |
2977 | } | |
2978 | ||
2979 | /* reschedule QH iff another request is queued */ | |
2980 | if (!list_empty(&qh->qtd_list) | |
2981 | && HC_IS_RUNNING(hcd->state)) { | |
2982 | int status; | |
2983 | ||
2984 | status = qh_schedule(oxu, qh); | |
2985 | spin_unlock_irqrestore(&oxu->lock, flags); | |
2986 | ||
2987 | if (status != 0) { | |
2988 | /* shouldn't happen often, but ... | |
2989 | * FIXME kill those tds' urbs | |
2990 | */ | |
2991 | err("can't reschedule qh %p, err %d", | |
2992 | qh, status); | |
2993 | } | |
2994 | return status; | |
2995 | } | |
2996 | break; | |
2997 | } | |
2998 | done: | |
2999 | spin_unlock_irqrestore(&oxu->lock, flags); | |
3000 | return 0; | |
3001 | } | |
3002 | ||
3003 | /* Bulk qh holds the data toggle */ | |
3004 | static void oxu_endpoint_disable(struct usb_hcd *hcd, | |
3005 | struct usb_host_endpoint *ep) | |
3006 | { | |
3007 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
3008 | unsigned long flags; | |
3009 | struct ehci_qh *qh, *tmp; | |
3010 | ||
3011 | /* ASSERT: any requests/urbs are being unlinked */ | |
3012 | /* ASSERT: nobody can be submitting urbs for this any more */ | |
3013 | ||
3014 | rescan: | |
3015 | spin_lock_irqsave(&oxu->lock, flags); | |
3016 | qh = ep->hcpriv; | |
3017 | if (!qh) | |
3018 | goto done; | |
3019 | ||
3020 | /* endpoints can be iso streams. for now, we don't | |
3021 | * accelerate iso completions ... so spin a while. | |
3022 | */ | |
3023 | if (qh->hw_info1 == 0) { | |
3024 | oxu_vdbg(oxu, "iso delay\n"); | |
3025 | goto idle_timeout; | |
3026 | } | |
3027 | ||
3028 | if (!HC_IS_RUNNING(hcd->state)) | |
3029 | qh->qh_state = QH_STATE_IDLE; | |
3030 | switch (qh->qh_state) { | |
3031 | case QH_STATE_LINKED: | |
3032 | for (tmp = oxu->async->qh_next.qh; | |
3033 | tmp && tmp != qh; | |
3034 | tmp = tmp->qh_next.qh) | |
3035 | continue; | |
3036 | /* periodic qh self-unlinks on empty */ | |
3037 | if (!tmp) | |
3038 | goto nogood; | |
3039 | unlink_async(oxu, qh); | |
3040 | /* FALL THROUGH */ | |
3041 | case QH_STATE_UNLINK: /* wait for hw to finish? */ | |
3042 | idle_timeout: | |
3043 | spin_unlock_irqrestore(&oxu->lock, flags); | |
3044 | schedule_timeout_uninterruptible(1); | |
3045 | goto rescan; | |
3046 | case QH_STATE_IDLE: /* fully unlinked */ | |
3047 | if (list_empty(&qh->qtd_list)) { | |
3048 | qh_put(qh); | |
3049 | break; | |
3050 | } | |
3051 | /* else FALL THROUGH */ | |
3052 | default: | |
3053 | nogood: | |
3054 | /* caller was supposed to have unlinked any requests; | |
3055 | * that's not our job. just leak this memory. | |
3056 | */ | |
3057 | oxu_err(oxu, "qh %p (#%02x) state %d%s\n", | |
3058 | qh, ep->desc.bEndpointAddress, qh->qh_state, | |
3059 | list_empty(&qh->qtd_list) ? "" : "(has tds)"); | |
3060 | break; | |
3061 | } | |
3062 | ep->hcpriv = NULL; | |
3063 | done: | |
3064 | spin_unlock_irqrestore(&oxu->lock, flags); | |
b92a78e5 RG |
3065 | } |
3066 | ||
3067 | static int oxu_get_frame(struct usb_hcd *hcd) | |
3068 | { | |
3069 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
3070 | ||
3071 | return (readl(&oxu->regs->frame_index) >> 3) % | |
3072 | oxu->periodic_size; | |
3073 | } | |
3074 | ||
3075 | /* Build "status change" packet (one or two bytes) from HC registers */ | |
3076 | static int oxu_hub_status_data(struct usb_hcd *hcd, char *buf) | |
3077 | { | |
3078 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
3079 | u32 temp, mask, status = 0; | |
3080 | int ports, i, retval = 1; | |
3081 | unsigned long flags; | |
3082 | ||
3083 | /* if !USB_SUSPEND, root hub timers won't get shut down ... */ | |
3084 | if (!HC_IS_RUNNING(hcd->state)) | |
3085 | return 0; | |
3086 | ||
3087 | /* init status to no-changes */ | |
3088 | buf[0] = 0; | |
3089 | ports = HCS_N_PORTS(oxu->hcs_params); | |
3090 | if (ports > 7) { | |
3091 | buf[1] = 0; | |
3092 | retval++; | |
3093 | } | |
3094 | ||
3095 | /* Some boards (mostly VIA?) report bogus overcurrent indications, | |
3096 | * causing massive log spam unless we completely ignore them. It | |
b595076a | 3097 | * may be relevant that VIA VT8235 controllers, where PORT_POWER is |
b92a78e5 RG |
3098 | * always set, seem to clear PORT_OCC and PORT_CSC when writing to |
3099 | * PORT_POWER; that's surprising, but maybe within-spec. | |
3100 | */ | |
3101 | if (!ignore_oc) | |
3102 | mask = PORT_CSC | PORT_PEC | PORT_OCC; | |
3103 | else | |
3104 | mask = PORT_CSC | PORT_PEC; | |
3105 | ||
3106 | /* no hub change reports (bit 0) for now (power, ...) */ | |
3107 | ||
3108 | /* port N changes (bit N)? */ | |
3109 | spin_lock_irqsave(&oxu->lock, flags); | |
3110 | for (i = 0; i < ports; i++) { | |
3111 | temp = readl(&oxu->regs->port_status[i]); | |
3112 | ||
3113 | /* | |
3114 | * Return status information even for ports with OWNER set. | |
3115 | * Otherwise khubd wouldn't see the disconnect event when a | |
3116 | * high-speed device is switched over to the companion | |
3117 | * controller by the user. | |
3118 | */ | |
3119 | ||
3120 | if (!(temp & PORT_CONNECT)) | |
3121 | oxu->reset_done[i] = 0; | |
3122 | if ((temp & mask) != 0 || ((temp & PORT_RESUME) != 0 && | |
3123 | time_after_eq(jiffies, oxu->reset_done[i]))) { | |
3124 | if (i < 7) | |
3125 | buf[0] |= 1 << (i + 1); | |
3126 | else | |
3127 | buf[1] |= 1 << (i - 7); | |
3128 | status = STS_PCD; | |
3129 | } | |
3130 | } | |
3131 | /* FIXME autosuspend idle root hubs */ | |
3132 | spin_unlock_irqrestore(&oxu->lock, flags); | |
3133 | return status ? retval : 0; | |
3134 | } | |
3135 | ||
3136 | /* Returns the speed of a device attached to a port on the root hub. */ | |
3137 | static inline unsigned int oxu_port_speed(struct oxu_hcd *oxu, | |
3138 | unsigned int portsc) | |
3139 | { | |
3140 | switch ((portsc >> 26) & 3) { | |
3141 | case 0: | |
3142 | return 0; | |
3143 | case 1: | |
288ead45 | 3144 | return USB_PORT_STAT_LOW_SPEED; |
b92a78e5 RG |
3145 | case 2: |
3146 | default: | |
288ead45 | 3147 | return USB_PORT_STAT_HIGH_SPEED; |
b92a78e5 RG |
3148 | } |
3149 | } | |
3150 | ||
3151 | #define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E) | |
3152 | static int oxu_hub_control(struct usb_hcd *hcd, u16 typeReq, | |
3153 | u16 wValue, u16 wIndex, char *buf, u16 wLength) | |
3154 | { | |
3155 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
3156 | int ports = HCS_N_PORTS(oxu->hcs_params); | |
3157 | u32 __iomem *status_reg = &oxu->regs->port_status[wIndex - 1]; | |
3158 | u32 temp, status; | |
3159 | unsigned long flags; | |
3160 | int retval = 0; | |
3161 | unsigned selector; | |
3162 | ||
3163 | /* | |
3164 | * FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR. | |
3165 | * HCS_INDICATOR may say we can change LEDs to off/amber/green. | |
3166 | * (track current state ourselves) ... blink for diagnostics, | |
3167 | * power, "this is the one", etc. EHCI spec supports this. | |
3168 | */ | |
3169 | ||
3170 | spin_lock_irqsave(&oxu->lock, flags); | |
3171 | switch (typeReq) { | |
3172 | case ClearHubFeature: | |
3173 | switch (wValue) { | |
3174 | case C_HUB_LOCAL_POWER: | |
3175 | case C_HUB_OVER_CURRENT: | |
3176 | /* no hub-wide feature/status flags */ | |
3177 | break; | |
3178 | default: | |
3179 | goto error; | |
3180 | } | |
3181 | break; | |
3182 | case ClearPortFeature: | |
3183 | if (!wIndex || wIndex > ports) | |
3184 | goto error; | |
3185 | wIndex--; | |
3186 | temp = readl(status_reg); | |
3187 | ||
3188 | /* | |
3189 | * Even if OWNER is set, so the port is owned by the | |
3190 | * companion controller, khubd needs to be able to clear | |
3191 | * the port-change status bits (especially | |
749da5f8 | 3192 | * USB_PORT_STAT_C_CONNECTION). |
b92a78e5 RG |
3193 | */ |
3194 | ||
3195 | switch (wValue) { | |
3196 | case USB_PORT_FEAT_ENABLE: | |
3197 | writel(temp & ~PORT_PE, status_reg); | |
3198 | break; | |
3199 | case USB_PORT_FEAT_C_ENABLE: | |
3200 | writel((temp & ~PORT_RWC_BITS) | PORT_PEC, status_reg); | |
3201 | break; | |
3202 | case USB_PORT_FEAT_SUSPEND: | |
3203 | if (temp & PORT_RESET) | |
3204 | goto error; | |
3205 | if (temp & PORT_SUSPEND) { | |
3206 | if ((temp & PORT_PE) == 0) | |
3207 | goto error; | |
3208 | /* resume signaling for 20 msec */ | |
3209 | temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS); | |
3210 | writel(temp | PORT_RESUME, status_reg); | |
3211 | oxu->reset_done[wIndex] = jiffies | |
3212 | + msecs_to_jiffies(20); | |
3213 | } | |
3214 | break; | |
3215 | case USB_PORT_FEAT_C_SUSPEND: | |
3216 | /* we auto-clear this feature */ | |
3217 | break; | |
3218 | case USB_PORT_FEAT_POWER: | |
3219 | if (HCS_PPC(oxu->hcs_params)) | |
3220 | writel(temp & ~(PORT_RWC_BITS | PORT_POWER), | |
3221 | status_reg); | |
3222 | break; | |
3223 | case USB_PORT_FEAT_C_CONNECTION: | |
3224 | writel((temp & ~PORT_RWC_BITS) | PORT_CSC, status_reg); | |
3225 | break; | |
3226 | case USB_PORT_FEAT_C_OVER_CURRENT: | |
3227 | writel((temp & ~PORT_RWC_BITS) | PORT_OCC, status_reg); | |
3228 | break; | |
3229 | case USB_PORT_FEAT_C_RESET: | |
3230 | /* GetPortStatus clears reset */ | |
3231 | break; | |
3232 | default: | |
3233 | goto error; | |
3234 | } | |
3235 | readl(&oxu->regs->command); /* unblock posted write */ | |
3236 | break; | |
3237 | case GetHubDescriptor: | |
3238 | ehci_hub_descriptor(oxu, (struct usb_hub_descriptor *) | |
3239 | buf); | |
3240 | break; | |
3241 | case GetHubStatus: | |
3242 | /* no hub-wide feature/status flags */ | |
3243 | memset(buf, 0, 4); | |
3244 | break; | |
3245 | case GetPortStatus: | |
3246 | if (!wIndex || wIndex > ports) | |
3247 | goto error; | |
3248 | wIndex--; | |
3249 | status = 0; | |
3250 | temp = readl(status_reg); | |
3251 | ||
3252 | /* wPortChange bits */ | |
3253 | if (temp & PORT_CSC) | |
749da5f8 | 3254 | status |= USB_PORT_STAT_C_CONNECTION << 16; |
b92a78e5 | 3255 | if (temp & PORT_PEC) |
749da5f8 | 3256 | status |= USB_PORT_STAT_C_ENABLE << 16; |
b92a78e5 | 3257 | if ((temp & PORT_OCC) && !ignore_oc) |
749da5f8 | 3258 | status |= USB_PORT_STAT_C_OVERCURRENT << 16; |
b92a78e5 RG |
3259 | |
3260 | /* whoever resumes must GetPortStatus to complete it!! */ | |
3261 | if (temp & PORT_RESUME) { | |
3262 | ||
3263 | /* Remote Wakeup received? */ | |
3264 | if (!oxu->reset_done[wIndex]) { | |
3265 | /* resume signaling for 20 msec */ | |
3266 | oxu->reset_done[wIndex] = jiffies | |
3267 | + msecs_to_jiffies(20); | |
3268 | /* check the port again */ | |
3269 | mod_timer(&oxu_to_hcd(oxu)->rh_timer, | |
3270 | oxu->reset_done[wIndex]); | |
3271 | } | |
3272 | ||
3273 | /* resume completed? */ | |
3274 | else if (time_after_eq(jiffies, | |
3275 | oxu->reset_done[wIndex])) { | |
749da5f8 | 3276 | status |= USB_PORT_STAT_C_SUSPEND << 16; |
b92a78e5 RG |
3277 | oxu->reset_done[wIndex] = 0; |
3278 | ||
3279 | /* stop resume signaling */ | |
3280 | temp = readl(status_reg); | |
3281 | writel(temp & ~(PORT_RWC_BITS | PORT_RESUME), | |
3282 | status_reg); | |
3283 | retval = handshake(oxu, status_reg, | |
3284 | PORT_RESUME, 0, 2000 /* 2msec */); | |
3285 | if (retval != 0) { | |
3286 | oxu_err(oxu, | |
3287 | "port %d resume error %d\n", | |
3288 | wIndex + 1, retval); | |
3289 | goto error; | |
3290 | } | |
3291 | temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10)); | |
3292 | } | |
3293 | } | |
3294 | ||
3295 | /* whoever resets must GetPortStatus to complete it!! */ | |
3296 | if ((temp & PORT_RESET) | |
3297 | && time_after_eq(jiffies, | |
3298 | oxu->reset_done[wIndex])) { | |
749da5f8 | 3299 | status |= USB_PORT_STAT_C_RESET << 16; |
b92a78e5 RG |
3300 | oxu->reset_done[wIndex] = 0; |
3301 | ||
3302 | /* force reset to complete */ | |
3303 | writel(temp & ~(PORT_RWC_BITS | PORT_RESET), | |
3304 | status_reg); | |
3305 | /* REVISIT: some hardware needs 550+ usec to clear | |
3306 | * this bit; seems too long to spin routinely... | |
3307 | */ | |
3308 | retval = handshake(oxu, status_reg, | |
3309 | PORT_RESET, 0, 750); | |
3310 | if (retval != 0) { | |
3311 | oxu_err(oxu, "port %d reset error %d\n", | |
3312 | wIndex + 1, retval); | |
3313 | goto error; | |
3314 | } | |
3315 | ||
3316 | /* see what we found out */ | |
3317 | temp = check_reset_complete(oxu, wIndex, status_reg, | |
3318 | readl(status_reg)); | |
3319 | } | |
3320 | ||
3321 | /* transfer dedicated ports to the companion hc */ | |
3322 | if ((temp & PORT_CONNECT) && | |
3323 | test_bit(wIndex, &oxu->companion_ports)) { | |
3324 | temp &= ~PORT_RWC_BITS; | |
3325 | temp |= PORT_OWNER; | |
3326 | writel(temp, status_reg); | |
3327 | oxu_dbg(oxu, "port %d --> companion\n", wIndex + 1); | |
3328 | temp = readl(status_reg); | |
3329 | } | |
3330 | ||
3331 | /* | |
3332 | * Even if OWNER is set, there's no harm letting khubd | |
3333 | * see the wPortStatus values (they should all be 0 except | |
3334 | * for PORT_POWER anyway). | |
3335 | */ | |
3336 | ||
3337 | if (temp & PORT_CONNECT) { | |
749da5f8 | 3338 | status |= USB_PORT_STAT_CONNECTION; |
b92a78e5 RG |
3339 | /* status may be from integrated TT */ |
3340 | status |= oxu_port_speed(oxu, temp); | |
3341 | } | |
3342 | if (temp & PORT_PE) | |
749da5f8 | 3343 | status |= USB_PORT_STAT_ENABLE; |
b92a78e5 | 3344 | if (temp & (PORT_SUSPEND|PORT_RESUME)) |
749da5f8 | 3345 | status |= USB_PORT_STAT_SUSPEND; |
b92a78e5 | 3346 | if (temp & PORT_OC) |
749da5f8 | 3347 | status |= USB_PORT_STAT_OVERCURRENT; |
b92a78e5 | 3348 | if (temp & PORT_RESET) |
749da5f8 | 3349 | status |= USB_PORT_STAT_RESET; |
b92a78e5 | 3350 | if (temp & PORT_POWER) |
749da5f8 | 3351 | status |= USB_PORT_STAT_POWER; |
b92a78e5 RG |
3352 | |
3353 | #ifndef OXU_VERBOSE_DEBUG | |
3354 | if (status & ~0xffff) /* only if wPortChange is interesting */ | |
3355 | #endif | |
3356 | dbg_port(oxu, "GetStatus", wIndex + 1, temp); | |
3357 | put_unaligned(cpu_to_le32(status), (__le32 *) buf); | |
3358 | break; | |
3359 | case SetHubFeature: | |
3360 | switch (wValue) { | |
3361 | case C_HUB_LOCAL_POWER: | |
3362 | case C_HUB_OVER_CURRENT: | |
3363 | /* no hub-wide feature/status flags */ | |
3364 | break; | |
3365 | default: | |
3366 | goto error; | |
3367 | } | |
3368 | break; | |
3369 | case SetPortFeature: | |
3370 | selector = wIndex >> 8; | |
3371 | wIndex &= 0xff; | |
3372 | if (!wIndex || wIndex > ports) | |
3373 | goto error; | |
3374 | wIndex--; | |
3375 | temp = readl(status_reg); | |
3376 | if (temp & PORT_OWNER) | |
3377 | break; | |
3378 | ||
3379 | temp &= ~PORT_RWC_BITS; | |
3380 | switch (wValue) { | |
3381 | case USB_PORT_FEAT_SUSPEND: | |
3382 | if ((temp & PORT_PE) == 0 | |
3383 | || (temp & PORT_RESET) != 0) | |
3384 | goto error; | |
3385 | if (device_may_wakeup(&hcd->self.root_hub->dev)) | |
3386 | temp |= PORT_WAKE_BITS; | |
3387 | writel(temp | PORT_SUSPEND, status_reg); | |
3388 | break; | |
3389 | case USB_PORT_FEAT_POWER: | |
3390 | if (HCS_PPC(oxu->hcs_params)) | |
3391 | writel(temp | PORT_POWER, status_reg); | |
3392 | break; | |
3393 | case USB_PORT_FEAT_RESET: | |
3394 | if (temp & PORT_RESUME) | |
3395 | goto error; | |
3396 | /* line status bits may report this as low speed, | |
3397 | * which can be fine if this root hub has a | |
3398 | * transaction translator built in. | |
3399 | */ | |
3400 | oxu_vdbg(oxu, "port %d reset\n", wIndex + 1); | |
3401 | temp |= PORT_RESET; | |
3402 | temp &= ~PORT_PE; | |
3403 | ||
3404 | /* | |
3405 | * caller must wait, then call GetPortStatus | |
3406 | * usb 2.0 spec says 50 ms resets on root | |
3407 | */ | |
3408 | oxu->reset_done[wIndex] = jiffies | |
3409 | + msecs_to_jiffies(50); | |
3410 | writel(temp, status_reg); | |
3411 | break; | |
3412 | ||
3413 | /* For downstream facing ports (these): one hub port is put | |
3414 | * into test mode according to USB2 11.24.2.13, then the hub | |
3415 | * must be reset (which for root hub now means rmmod+modprobe, | |
3416 | * or else system reboot). See EHCI 2.3.9 and 4.14 for info | |
3417 | * about the EHCI-specific stuff. | |
3418 | */ | |
3419 | case USB_PORT_FEAT_TEST: | |
3420 | if (!selector || selector > 5) | |
3421 | goto error; | |
3422 | ehci_quiesce(oxu); | |
3423 | ehci_halt(oxu); | |
3424 | temp |= selector << 16; | |
3425 | writel(temp, status_reg); | |
3426 | break; | |
3427 | ||
3428 | default: | |
3429 | goto error; | |
3430 | } | |
3431 | readl(&oxu->regs->command); /* unblock posted writes */ | |
3432 | break; | |
3433 | ||
3434 | default: | |
3435 | error: | |
3436 | /* "stall" on error */ | |
3437 | retval = -EPIPE; | |
3438 | } | |
3439 | spin_unlock_irqrestore(&oxu->lock, flags); | |
3440 | return retval; | |
3441 | } | |
3442 | ||
3443 | #ifdef CONFIG_PM | |
3444 | ||
3445 | static int oxu_bus_suspend(struct usb_hcd *hcd) | |
3446 | { | |
3447 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
3448 | int port; | |
3449 | int mask; | |
3450 | ||
3451 | oxu_dbg(oxu, "suspend root hub\n"); | |
3452 | ||
3453 | if (time_before(jiffies, oxu->next_statechange)) | |
3454 | msleep(5); | |
3455 | ||
3456 | port = HCS_N_PORTS(oxu->hcs_params); | |
3457 | spin_lock_irq(&oxu->lock); | |
3458 | ||
3459 | /* stop schedules, clean any completed work */ | |
3460 | if (HC_IS_RUNNING(hcd->state)) { | |
3461 | ehci_quiesce(oxu); | |
3462 | hcd->state = HC_STATE_QUIESCING; | |
3463 | } | |
3464 | oxu->command = readl(&oxu->regs->command); | |
3465 | if (oxu->reclaim) | |
3466 | oxu->reclaim_ready = 1; | |
3467 | ehci_work(oxu); | |
3468 | ||
3469 | /* Unlike other USB host controller types, EHCI doesn't have | |
3470 | * any notion of "global" or bus-wide suspend. The driver has | |
3471 | * to manually suspend all the active unsuspended ports, and | |
3472 | * then manually resume them in the bus_resume() routine. | |
3473 | */ | |
3474 | oxu->bus_suspended = 0; | |
3475 | while (port--) { | |
3476 | u32 __iomem *reg = &oxu->regs->port_status[port]; | |
3477 | u32 t1 = readl(reg) & ~PORT_RWC_BITS; | |
3478 | u32 t2 = t1; | |
3479 | ||
3480 | /* keep track of which ports we suspend */ | |
3481 | if ((t1 & PORT_PE) && !(t1 & PORT_OWNER) && | |
3482 | !(t1 & PORT_SUSPEND)) { | |
3483 | t2 |= PORT_SUSPEND; | |
3484 | set_bit(port, &oxu->bus_suspended); | |
3485 | } | |
3486 | ||
3487 | /* enable remote wakeup on all ports */ | |
3488 | if (device_may_wakeup(&hcd->self.root_hub->dev)) | |
3489 | t2 |= PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E; | |
3490 | else | |
3491 | t2 &= ~(PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E); | |
3492 | ||
3493 | if (t1 != t2) { | |
3494 | oxu_vdbg(oxu, "port %d, %08x -> %08x\n", | |
3495 | port + 1, t1, t2); | |
3496 | writel(t2, reg); | |
3497 | } | |
3498 | } | |
3499 | ||
3500 | /* turn off now-idle HC */ | |
3501 | del_timer_sync(&oxu->watchdog); | |
3502 | ehci_halt(oxu); | |
3503 | hcd->state = HC_STATE_SUSPENDED; | |
3504 | ||
3505 | /* allow remote wakeup */ | |
3506 | mask = INTR_MASK; | |
3507 | if (!device_may_wakeup(&hcd->self.root_hub->dev)) | |
3508 | mask &= ~STS_PCD; | |
3509 | writel(mask, &oxu->regs->intr_enable); | |
3510 | readl(&oxu->regs->intr_enable); | |
3511 | ||
3512 | oxu->next_statechange = jiffies + msecs_to_jiffies(10); | |
3513 | spin_unlock_irq(&oxu->lock); | |
3514 | return 0; | |
3515 | } | |
3516 | ||
3517 | /* Caller has locked the root hub, and should reset/reinit on error */ | |
3518 | static int oxu_bus_resume(struct usb_hcd *hcd) | |
3519 | { | |
3520 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
3521 | u32 temp; | |
3522 | int i; | |
3523 | ||
3524 | if (time_before(jiffies, oxu->next_statechange)) | |
3525 | msleep(5); | |
3526 | spin_lock_irq(&oxu->lock); | |
3527 | ||
3528 | /* Ideally and we've got a real resume here, and no port's power | |
3529 | * was lost. (For PCI, that means Vaux was maintained.) But we | |
3530 | * could instead be restoring a swsusp snapshot -- so that BIOS was | |
3531 | * the last user of the controller, not reset/pm hardware keeping | |
3532 | * state we gave to it. | |
3533 | */ | |
3534 | temp = readl(&oxu->regs->intr_enable); | |
3535 | oxu_dbg(oxu, "resume root hub%s\n", temp ? "" : " after power loss"); | |
3536 | ||
3537 | /* at least some APM implementations will try to deliver | |
3538 | * IRQs right away, so delay them until we're ready. | |
3539 | */ | |
3540 | writel(0, &oxu->regs->intr_enable); | |
3541 | ||
3542 | /* re-init operational registers */ | |
3543 | writel(0, &oxu->regs->segment); | |
3544 | writel(oxu->periodic_dma, &oxu->regs->frame_list); | |
3545 | writel((u32) oxu->async->qh_dma, &oxu->regs->async_next); | |
3546 | ||
3547 | /* restore CMD_RUN, framelist size, and irq threshold */ | |
3548 | writel(oxu->command, &oxu->regs->command); | |
3549 | ||
3550 | /* Some controller/firmware combinations need a delay during which | |
3551 | * they set up the port statuses. See Bugzilla #8190. */ | |
3552 | mdelay(8); | |
3553 | ||
3554 | /* manually resume the ports we suspended during bus_suspend() */ | |
3555 | i = HCS_N_PORTS(oxu->hcs_params); | |
3556 | while (i--) { | |
3557 | temp = readl(&oxu->regs->port_status[i]); | |
3558 | temp &= ~(PORT_RWC_BITS | |
3559 | | PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E); | |
3560 | if (test_bit(i, &oxu->bus_suspended) && (temp & PORT_SUSPEND)) { | |
3561 | oxu->reset_done[i] = jiffies + msecs_to_jiffies(20); | |
3562 | temp |= PORT_RESUME; | |
3563 | } | |
3564 | writel(temp, &oxu->regs->port_status[i]); | |
3565 | } | |
3566 | i = HCS_N_PORTS(oxu->hcs_params); | |
3567 | mdelay(20); | |
3568 | while (i--) { | |
3569 | temp = readl(&oxu->regs->port_status[i]); | |
3570 | if (test_bit(i, &oxu->bus_suspended) && (temp & PORT_SUSPEND)) { | |
3571 | temp &= ~(PORT_RWC_BITS | PORT_RESUME); | |
3572 | writel(temp, &oxu->regs->port_status[i]); | |
3573 | oxu_vdbg(oxu, "resumed port %d\n", i + 1); | |
3574 | } | |
3575 | } | |
3576 | (void) readl(&oxu->regs->command); | |
3577 | ||
3578 | /* maybe re-activate the schedule(s) */ | |
3579 | temp = 0; | |
3580 | if (oxu->async->qh_next.qh) | |
3581 | temp |= CMD_ASE; | |
3582 | if (oxu->periodic_sched) | |
3583 | temp |= CMD_PSE; | |
3584 | if (temp) { | |
3585 | oxu->command |= temp; | |
3586 | writel(oxu->command, &oxu->regs->command); | |
3587 | } | |
3588 | ||
3589 | oxu->next_statechange = jiffies + msecs_to_jiffies(5); | |
3590 | hcd->state = HC_STATE_RUNNING; | |
3591 | ||
3592 | /* Now we can safely re-enable irqs */ | |
3593 | writel(INTR_MASK, &oxu->regs->intr_enable); | |
3594 | ||
3595 | spin_unlock_irq(&oxu->lock); | |
3596 | return 0; | |
3597 | } | |
3598 | ||
3599 | #else | |
3600 | ||
3601 | static int oxu_bus_suspend(struct usb_hcd *hcd) | |
3602 | { | |
3603 | return 0; | |
3604 | } | |
3605 | ||
3606 | static int oxu_bus_resume(struct usb_hcd *hcd) | |
3607 | { | |
3608 | return 0; | |
3609 | } | |
3610 | ||
3611 | #endif /* CONFIG_PM */ | |
3612 | ||
3613 | static const struct hc_driver oxu_hc_driver = { | |
3614 | .description = "oxu210hp_hcd", | |
3615 | .product_desc = "oxu210hp HCD", | |
3616 | .hcd_priv_size = sizeof(struct oxu_hcd), | |
3617 | ||
3618 | /* | |
3619 | * Generic hardware linkage | |
3620 | */ | |
3621 | .irq = oxu_irq, | |
3622 | .flags = HCD_MEMORY | HCD_USB2, | |
3623 | ||
3624 | /* | |
3625 | * Basic lifecycle operations | |
3626 | */ | |
3627 | .reset = oxu_reset, | |
3628 | .start = oxu_run, | |
3629 | .stop = oxu_stop, | |
3630 | .shutdown = oxu_shutdown, | |
3631 | ||
3632 | /* | |
3633 | * Managing i/o requests and associated device resources | |
3634 | */ | |
3635 | .urb_enqueue = oxu_urb_enqueue, | |
3636 | .urb_dequeue = oxu_urb_dequeue, | |
3637 | .endpoint_disable = oxu_endpoint_disable, | |
3638 | ||
3639 | /* | |
3640 | * Scheduling support | |
3641 | */ | |
3642 | .get_frame_number = oxu_get_frame, | |
3643 | ||
3644 | /* | |
3645 | * Root hub support | |
3646 | */ | |
3647 | .hub_status_data = oxu_hub_status_data, | |
3648 | .hub_control = oxu_hub_control, | |
3649 | .bus_suspend = oxu_bus_suspend, | |
3650 | .bus_resume = oxu_bus_resume, | |
3651 | }; | |
3652 | ||
3653 | /* | |
3654 | * Module stuff | |
3655 | */ | |
3656 | ||
3657 | static void oxu_configuration(struct platform_device *pdev, void *base) | |
3658 | { | |
3659 | u32 tmp; | |
3660 | ||
3661 | /* Initialize top level registers. | |
3662 | * First write ever | |
3663 | */ | |
3664 | oxu_writel(base, OXU_HOSTIFCONFIG, 0x0000037D); | |
3665 | oxu_writel(base, OXU_SOFTRESET, OXU_SRESET); | |
3666 | oxu_writel(base, OXU_HOSTIFCONFIG, 0x0000037D); | |
3667 | ||
3668 | tmp = oxu_readl(base, OXU_PIOBURSTREADCTRL); | |
3669 | oxu_writel(base, OXU_PIOBURSTREADCTRL, tmp | 0x0040); | |
3670 | ||
3671 | oxu_writel(base, OXU_ASO, OXU_SPHPOEN | OXU_OVRCCURPUPDEN | | |
3672 | OXU_COMPARATOR | OXU_ASO_OP); | |
3673 | ||
3674 | tmp = oxu_readl(base, OXU_CLKCTRL_SET); | |
3675 | oxu_writel(base, OXU_CLKCTRL_SET, tmp | OXU_SYSCLKEN | OXU_USBOTGCLKEN); | |
3676 | ||
3677 | /* Clear all top interrupt enable */ | |
3678 | oxu_writel(base, OXU_CHIPIRQEN_CLR, 0xff); | |
3679 | ||
3680 | /* Clear all top interrupt status */ | |
3681 | oxu_writel(base, OXU_CHIPIRQSTATUS, 0xff); | |
3682 | ||
3683 | /* Enable all needed top interrupt except OTG SPH core */ | |
3684 | oxu_writel(base, OXU_CHIPIRQEN_SET, OXU_USBSPHLPWUI | OXU_USBOTGLPWUI); | |
3685 | } | |
3686 | ||
3687 | static int oxu_verify_id(struct platform_device *pdev, void *base) | |
3688 | { | |
3689 | u32 id; | |
82cef0b8 | 3690 | static const char * const bo[] = { |
b92a78e5 RG |
3691 | "reserved", |
3692 | "128-pin LQFP", | |
3693 | "84-pin TFBGA", | |
3694 | "reserved", | |
3695 | }; | |
3696 | ||
3697 | /* Read controller signature register to find a match */ | |
3698 | id = oxu_readl(base, OXU_DEVICEID); | |
3699 | dev_info(&pdev->dev, "device ID %x\n", id); | |
3700 | if ((id & OXU_REV_MASK) != (OXU_REV_2100 << OXU_REV_SHIFT)) | |
3701 | return -1; | |
3702 | ||
3703 | dev_info(&pdev->dev, "found device %x %s (%04x:%04x)\n", | |
3704 | id >> OXU_REV_SHIFT, | |
3705 | bo[(id & OXU_BO_MASK) >> OXU_BO_SHIFT], | |
3706 | (id & OXU_MAJ_REV_MASK) >> OXU_MAJ_REV_SHIFT, | |
3707 | (id & OXU_MIN_REV_MASK) >> OXU_MIN_REV_SHIFT); | |
3708 | ||
3709 | return 0; | |
3710 | } | |
3711 | ||
3712 | static const struct hc_driver oxu_hc_driver; | |
3713 | static struct usb_hcd *oxu_create(struct platform_device *pdev, | |
3714 | unsigned long memstart, unsigned long memlen, | |
3715 | void *base, int irq, int otg) | |
3716 | { | |
3717 | struct device *dev = &pdev->dev; | |
3718 | ||
3719 | struct usb_hcd *hcd; | |
3720 | struct oxu_hcd *oxu; | |
3721 | int ret; | |
3722 | ||
3723 | /* Set endian mode and host mode */ | |
3724 | oxu_writel(base + (otg ? OXU_OTG_CORE_OFFSET : OXU_SPH_CORE_OFFSET), | |
3725 | OXU_USBMODE, | |
3726 | OXU_CM_HOST_ONLY | OXU_ES_LITTLE | OXU_VBPS); | |
3727 | ||
3728 | hcd = usb_create_hcd(&oxu_hc_driver, dev, | |
3729 | otg ? "oxu210hp_otg" : "oxu210hp_sph"); | |
3730 | if (!hcd) | |
3731 | return ERR_PTR(-ENOMEM); | |
3732 | ||
3733 | hcd->rsrc_start = memstart; | |
3734 | hcd->rsrc_len = memlen; | |
3735 | hcd->regs = base; | |
3736 | hcd->irq = irq; | |
3737 | hcd->state = HC_STATE_HALT; | |
3738 | ||
3739 | oxu = hcd_to_oxu(hcd); | |
3740 | oxu->is_otg = otg; | |
3741 | ||
3742 | ret = usb_add_hcd(hcd, irq, IRQF_SHARED); | |
3743 | if (ret < 0) | |
3744 | return ERR_PTR(ret); | |
3745 | ||
3746 | return hcd; | |
3747 | } | |
3748 | ||
3749 | static int oxu_init(struct platform_device *pdev, | |
3750 | unsigned long memstart, unsigned long memlen, | |
3751 | void *base, int irq) | |
3752 | { | |
3753 | struct oxu_info *info = platform_get_drvdata(pdev); | |
3754 | struct usb_hcd *hcd; | |
3755 | int ret; | |
3756 | ||
3757 | /* First time configuration at start up */ | |
3758 | oxu_configuration(pdev, base); | |
3759 | ||
3760 | ret = oxu_verify_id(pdev, base); | |
3761 | if (ret) { | |
3762 | dev_err(&pdev->dev, "no devices found!\n"); | |
3763 | return -ENODEV; | |
3764 | } | |
3765 | ||
3766 | /* Create the OTG controller */ | |
3767 | hcd = oxu_create(pdev, memstart, memlen, base, irq, 1); | |
3768 | if (IS_ERR(hcd)) { | |
3769 | dev_err(&pdev->dev, "cannot create OTG controller!\n"); | |
3770 | ret = PTR_ERR(hcd); | |
3771 | goto error_create_otg; | |
3772 | } | |
3773 | info->hcd[0] = hcd; | |
3774 | ||
3775 | /* Create the SPH host controller */ | |
3776 | hcd = oxu_create(pdev, memstart, memlen, base, irq, 0); | |
3777 | if (IS_ERR(hcd)) { | |
3778 | dev_err(&pdev->dev, "cannot create SPH controller!\n"); | |
3779 | ret = PTR_ERR(hcd); | |
3780 | goto error_create_sph; | |
3781 | } | |
3782 | info->hcd[1] = hcd; | |
3783 | ||
3784 | oxu_writel(base, OXU_CHIPIRQEN_SET, | |
3785 | oxu_readl(base, OXU_CHIPIRQEN_SET) | 3); | |
3786 | ||
3787 | return 0; | |
3788 | ||
3789 | error_create_sph: | |
3790 | usb_remove_hcd(info->hcd[0]); | |
3791 | usb_put_hcd(info->hcd[0]); | |
3792 | ||
3793 | error_create_otg: | |
3794 | return ret; | |
3795 | } | |
3796 | ||
3797 | static int oxu_drv_probe(struct platform_device *pdev) | |
3798 | { | |
3799 | struct resource *res; | |
3800 | void *base; | |
3801 | unsigned long memstart, memlen; | |
3802 | int irq, ret; | |
3803 | struct oxu_info *info; | |
3804 | ||
3805 | if (usb_disabled()) | |
3806 | return -ENODEV; | |
3807 | ||
3808 | /* | |
3809 | * Get the platform resources | |
3810 | */ | |
3811 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | |
3812 | if (!res) { | |
3813 | dev_err(&pdev->dev, | |
74c71ebd | 3814 | "no IRQ! Check %s setup!\n", dev_name(&pdev->dev)); |
b92a78e5 RG |
3815 | return -ENODEV; |
3816 | } | |
3817 | irq = res->start; | |
3818 | dev_dbg(&pdev->dev, "IRQ resource %d\n", irq); | |
3819 | ||
3820 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
3821 | if (!res) { | |
3822 | dev_err(&pdev->dev, "no registers address! Check %s setup!\n", | |
74c71ebd | 3823 | dev_name(&pdev->dev)); |
b92a78e5 RG |
3824 | return -ENODEV; |
3825 | } | |
3826 | memstart = res->start; | |
3827 | memlen = res->end - res->start + 1; | |
3828 | dev_dbg(&pdev->dev, "MEM resource %lx-%lx\n", memstart, memlen); | |
3829 | if (!request_mem_region(memstart, memlen, | |
3830 | oxu_hc_driver.description)) { | |
3831 | dev_dbg(&pdev->dev, "memory area already in use\n"); | |
3832 | return -EBUSY; | |
3833 | } | |
3834 | ||
3835 | ret = set_irq_type(irq, IRQF_TRIGGER_FALLING); | |
3836 | if (ret) { | |
3837 | dev_err(&pdev->dev, "error setting irq type\n"); | |
3838 | ret = -EFAULT; | |
3839 | goto error_set_irq_type; | |
3840 | } | |
3841 | ||
3842 | base = ioremap(memstart, memlen); | |
3843 | if (!base) { | |
3844 | dev_dbg(&pdev->dev, "error mapping memory\n"); | |
3845 | ret = -EFAULT; | |
3846 | goto error_ioremap; | |
3847 | } | |
3848 | ||
3849 | /* Allocate a driver data struct to hold useful info for both | |
3850 | * SPH & OTG devices | |
3851 | */ | |
3852 | info = kzalloc(sizeof(struct oxu_info), GFP_KERNEL); | |
3853 | if (!info) { | |
3854 | dev_dbg(&pdev->dev, "error allocating memory\n"); | |
3855 | ret = -EFAULT; | |
3856 | goto error_alloc; | |
3857 | } | |
3858 | platform_set_drvdata(pdev, info); | |
3859 | ||
3860 | ret = oxu_init(pdev, memstart, memlen, base, irq); | |
3861 | if (ret < 0) { | |
3862 | dev_dbg(&pdev->dev, "cannot init USB devices\n"); | |
3863 | goto error_init; | |
3864 | } | |
3865 | ||
3866 | dev_info(&pdev->dev, "devices enabled and running\n"); | |
3867 | platform_set_drvdata(pdev, info); | |
3868 | ||
3869 | return 0; | |
3870 | ||
3871 | error_init: | |
3872 | kfree(info); | |
3873 | platform_set_drvdata(pdev, NULL); | |
3874 | ||
3875 | error_alloc: | |
3876 | iounmap(base); | |
3877 | ||
3878 | error_set_irq_type: | |
3879 | error_ioremap: | |
3880 | release_mem_region(memstart, memlen); | |
3881 | ||
74c71ebd | 3882 | dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), ret); |
b92a78e5 RG |
3883 | return ret; |
3884 | } | |
3885 | ||
3886 | static void oxu_remove(struct platform_device *pdev, struct usb_hcd *hcd) | |
3887 | { | |
3888 | usb_remove_hcd(hcd); | |
3889 | usb_put_hcd(hcd); | |
3890 | } | |
3891 | ||
3892 | static int oxu_drv_remove(struct platform_device *pdev) | |
3893 | { | |
3894 | struct oxu_info *info = platform_get_drvdata(pdev); | |
3895 | unsigned long memstart = info->hcd[0]->rsrc_start, | |
3896 | memlen = info->hcd[0]->rsrc_len; | |
3897 | void *base = info->hcd[0]->regs; | |
3898 | ||
3899 | oxu_remove(pdev, info->hcd[0]); | |
3900 | oxu_remove(pdev, info->hcd[1]); | |
3901 | ||
3902 | iounmap(base); | |
3903 | release_mem_region(memstart, memlen); | |
3904 | ||
3905 | kfree(info); | |
3906 | platform_set_drvdata(pdev, NULL); | |
3907 | ||
3908 | return 0; | |
3909 | } | |
3910 | ||
3911 | static void oxu_drv_shutdown(struct platform_device *pdev) | |
3912 | { | |
3913 | oxu_drv_remove(pdev); | |
3914 | } | |
3915 | ||
3916 | #if 0 | |
3917 | /* FIXME: TODO */ | |
3918 | static int oxu_drv_suspend(struct device *dev) | |
3919 | { | |
3920 | struct platform_device *pdev = to_platform_device(dev); | |
3921 | struct usb_hcd *hcd = dev_get_drvdata(dev); | |
3922 | ||
3923 | return 0; | |
3924 | } | |
3925 | ||
3926 | static int oxu_drv_resume(struct device *dev) | |
3927 | { | |
3928 | struct platform_device *pdev = to_platform_device(dev); | |
3929 | struct usb_hcd *hcd = dev_get_drvdata(dev); | |
3930 | ||
3931 | return 0; | |
3932 | } | |
3933 | #else | |
3934 | #define oxu_drv_suspend NULL | |
3935 | #define oxu_drv_resume NULL | |
3936 | #endif | |
3937 | ||
3938 | static struct platform_driver oxu_driver = { | |
3939 | .probe = oxu_drv_probe, | |
3940 | .remove = oxu_drv_remove, | |
3941 | .shutdown = oxu_drv_shutdown, | |
3942 | .suspend = oxu_drv_suspend, | |
3943 | .resume = oxu_drv_resume, | |
3944 | .driver = { | |
3945 | .name = "oxu210hp-hcd", | |
3946 | .bus = &platform_bus_type | |
3947 | } | |
3948 | }; | |
3949 | ||
3950 | static int __init oxu_module_init(void) | |
3951 | { | |
3952 | int retval = 0; | |
3953 | ||
3954 | retval = platform_driver_register(&oxu_driver); | |
3955 | if (retval < 0) | |
3956 | return retval; | |
3957 | ||
3958 | return retval; | |
3959 | } | |
3960 | ||
3961 | static void __exit oxu_module_cleanup(void) | |
3962 | { | |
3963 | platform_driver_unregister(&oxu_driver); | |
3964 | } | |
3965 | ||
3966 | module_init(oxu_module_init); | |
3967 | module_exit(oxu_module_cleanup); | |
3968 | ||
3969 | MODULE_DESCRIPTION("Oxford OXU210HP HCD driver - ver. " DRIVER_VERSION); | |
3970 | MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>"); | |
3971 | MODULE_LICENSE("GPL"); |