]>
Commit | Line | Data |
---|---|---|
b92a78e5 RG |
1 | /* |
2 | * Copyright (c) 2008 Rodolfo Giometti <giometti@linux.it> | |
3 | * Copyright (c) 2008 Eurotech S.p.A. <info@eurtech.it> | |
4 | * | |
5 | * This code is *strongly* based on EHCI-HCD code by David Brownell since | |
6 | * the chip is a quasi-EHCI compatible. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms of the GNU General Public License as published by the | |
10 | * Free Software Foundation; either version 2 of the License, or (at your | |
11 | * option) any later version. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, but | |
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | |
15 | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
16 | * for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software Foundation, | |
20 | * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
21 | */ | |
22 | ||
23 | #include <linux/module.h> | |
24 | #include <linux/pci.h> | |
25 | #include <linux/dmapool.h> | |
26 | #include <linux/kernel.h> | |
27 | #include <linux/delay.h> | |
28 | #include <linux/ioport.h> | |
29 | #include <linux/sched.h> | |
30 | #include <linux/slab.h> | |
31 | #include <linux/errno.h> | |
32 | #include <linux/init.h> | |
33 | #include <linux/timer.h> | |
34 | #include <linux/list.h> | |
35 | #include <linux/interrupt.h> | |
36 | #include <linux/reboot.h> | |
37 | #include <linux/usb.h> | |
38 | #include <linux/moduleparam.h> | |
39 | #include <linux/dma-mapping.h> | |
40 | #include <linux/io.h> | |
41 | ||
42 | #include "../core/hcd.h" | |
43 | ||
44 | #include <asm/irq.h> | |
45 | #include <asm/system.h> | |
46 | #include <asm/unaligned.h> | |
47 | ||
48 | #include <linux/irq.h> | |
49 | #include <linux/platform_device.h> | |
50 | ||
51 | #include "oxu210hp.h" | |
52 | ||
53 | #define DRIVER_VERSION "0.0.50" | |
54 | ||
55 | /* | |
56 | * Main defines | |
57 | */ | |
58 | ||
59 | #define oxu_dbg(oxu, fmt, args...) \ | |
60 | dev_dbg(oxu_to_hcd(oxu)->self.controller , fmt , ## args) | |
61 | #define oxu_err(oxu, fmt, args...) \ | |
62 | dev_err(oxu_to_hcd(oxu)->self.controller , fmt , ## args) | |
63 | #define oxu_info(oxu, fmt, args...) \ | |
64 | dev_info(oxu_to_hcd(oxu)->self.controller , fmt , ## args) | |
65 | ||
66 | static inline struct usb_hcd *oxu_to_hcd(struct oxu_hcd *oxu) | |
67 | { | |
68 | return container_of((void *) oxu, struct usb_hcd, hcd_priv); | |
69 | } | |
70 | ||
71 | static inline struct oxu_hcd *hcd_to_oxu(struct usb_hcd *hcd) | |
72 | { | |
73 | return (struct oxu_hcd *) (hcd->hcd_priv); | |
74 | } | |
75 | ||
76 | /* | |
77 | * Debug stuff | |
78 | */ | |
79 | ||
80 | #undef OXU_URB_TRACE | |
81 | #undef OXU_VERBOSE_DEBUG | |
82 | ||
83 | #ifdef OXU_VERBOSE_DEBUG | |
84 | #define oxu_vdbg oxu_dbg | |
85 | #else | |
86 | #define oxu_vdbg(oxu, fmt, args...) /* Nop */ | |
87 | #endif | |
88 | ||
89 | #ifdef DEBUG | |
90 | ||
91 | static int __attribute__((__unused__)) | |
92 | dbg_status_buf(char *buf, unsigned len, const char *label, u32 status) | |
93 | { | |
94 | return scnprintf(buf, len, "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s", | |
95 | label, label[0] ? " " : "", status, | |
96 | (status & STS_ASS) ? " Async" : "", | |
97 | (status & STS_PSS) ? " Periodic" : "", | |
98 | (status & STS_RECL) ? " Recl" : "", | |
99 | (status & STS_HALT) ? " Halt" : "", | |
100 | (status & STS_IAA) ? " IAA" : "", | |
101 | (status & STS_FATAL) ? " FATAL" : "", | |
102 | (status & STS_FLR) ? " FLR" : "", | |
103 | (status & STS_PCD) ? " PCD" : "", | |
104 | (status & STS_ERR) ? " ERR" : "", | |
105 | (status & STS_INT) ? " INT" : "" | |
106 | ); | |
107 | } | |
108 | ||
109 | static int __attribute__((__unused__)) | |
110 | dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable) | |
111 | { | |
112 | return scnprintf(buf, len, "%s%sintrenable %02x%s%s%s%s%s%s", | |
113 | label, label[0] ? " " : "", enable, | |
114 | (enable & STS_IAA) ? " IAA" : "", | |
115 | (enable & STS_FATAL) ? " FATAL" : "", | |
116 | (enable & STS_FLR) ? " FLR" : "", | |
117 | (enable & STS_PCD) ? " PCD" : "", | |
118 | (enable & STS_ERR) ? " ERR" : "", | |
119 | (enable & STS_INT) ? " INT" : "" | |
120 | ); | |
121 | } | |
122 | ||
123 | static const char *const fls_strings[] = | |
124 | { "1024", "512", "256", "??" }; | |
125 | ||
126 | static int dbg_command_buf(char *buf, unsigned len, | |
127 | const char *label, u32 command) | |
128 | { | |
129 | return scnprintf(buf, len, | |
130 | "%s%scommand %06x %s=%d ithresh=%d%s%s%s%s period=%s%s %s", | |
131 | label, label[0] ? " " : "", command, | |
132 | (command & CMD_PARK) ? "park" : "(park)", | |
133 | CMD_PARK_CNT(command), | |
134 | (command >> 16) & 0x3f, | |
135 | (command & CMD_LRESET) ? " LReset" : "", | |
136 | (command & CMD_IAAD) ? " IAAD" : "", | |
137 | (command & CMD_ASE) ? " Async" : "", | |
138 | (command & CMD_PSE) ? " Periodic" : "", | |
139 | fls_strings[(command >> 2) & 0x3], | |
140 | (command & CMD_RESET) ? " Reset" : "", | |
141 | (command & CMD_RUN) ? "RUN" : "HALT" | |
142 | ); | |
143 | } | |
144 | ||
145 | static int dbg_port_buf(char *buf, unsigned len, const char *label, | |
146 | int port, u32 status) | |
147 | { | |
148 | char *sig; | |
149 | ||
150 | /* signaling state */ | |
151 | switch (status & (3 << 10)) { | |
152 | case 0 << 10: | |
153 | sig = "se0"; | |
154 | break; | |
155 | case 1 << 10: | |
156 | sig = "k"; /* low speed */ | |
157 | break; | |
158 | case 2 << 10: | |
159 | sig = "j"; | |
160 | break; | |
161 | default: | |
162 | sig = "?"; | |
163 | break; | |
164 | } | |
165 | ||
166 | return scnprintf(buf, len, | |
167 | "%s%sport %d status %06x%s%s sig=%s%s%s%s%s%s%s%s%s%s", | |
168 | label, label[0] ? " " : "", port, status, | |
169 | (status & PORT_POWER) ? " POWER" : "", | |
170 | (status & PORT_OWNER) ? " OWNER" : "", | |
171 | sig, | |
172 | (status & PORT_RESET) ? " RESET" : "", | |
173 | (status & PORT_SUSPEND) ? " SUSPEND" : "", | |
174 | (status & PORT_RESUME) ? " RESUME" : "", | |
175 | (status & PORT_OCC) ? " OCC" : "", | |
176 | (status & PORT_OC) ? " OC" : "", | |
177 | (status & PORT_PEC) ? " PEC" : "", | |
178 | (status & PORT_PE) ? " PE" : "", | |
179 | (status & PORT_CSC) ? " CSC" : "", | |
180 | (status & PORT_CONNECT) ? " CONNECT" : "" | |
181 | ); | |
182 | } | |
183 | ||
184 | #else | |
185 | ||
186 | static inline int __attribute__((__unused__)) | |
187 | dbg_status_buf(char *buf, unsigned len, const char *label, u32 status) | |
188 | { return 0; } | |
189 | ||
190 | static inline int __attribute__((__unused__)) | |
191 | dbg_command_buf(char *buf, unsigned len, const char *label, u32 command) | |
192 | { return 0; } | |
193 | ||
194 | static inline int __attribute__((__unused__)) | |
195 | dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable) | |
196 | { return 0; } | |
197 | ||
198 | static inline int __attribute__((__unused__)) | |
199 | dbg_port_buf(char *buf, unsigned len, const char *label, int port, u32 status) | |
200 | { return 0; } | |
201 | ||
202 | #endif /* DEBUG */ | |
203 | ||
204 | /* functions have the "wrong" filename when they're output... */ | |
205 | #define dbg_status(oxu, label, status) { \ | |
206 | char _buf[80]; \ | |
207 | dbg_status_buf(_buf, sizeof _buf, label, status); \ | |
208 | oxu_dbg(oxu, "%s\n", _buf); \ | |
209 | } | |
210 | ||
211 | #define dbg_cmd(oxu, label, command) { \ | |
212 | char _buf[80]; \ | |
213 | dbg_command_buf(_buf, sizeof _buf, label, command); \ | |
214 | oxu_dbg(oxu, "%s\n", _buf); \ | |
215 | } | |
216 | ||
217 | #define dbg_port(oxu, label, port, status) { \ | |
218 | char _buf[80]; \ | |
219 | dbg_port_buf(_buf, sizeof _buf, label, port, status); \ | |
220 | oxu_dbg(oxu, "%s\n", _buf); \ | |
221 | } | |
222 | ||
223 | /* | |
224 | * Module parameters | |
225 | */ | |
226 | ||
227 | /* Initial IRQ latency: faster than hw default */ | |
228 | static int log2_irq_thresh; /* 0 to 6 */ | |
229 | module_param(log2_irq_thresh, int, S_IRUGO); | |
230 | MODULE_PARM_DESC(log2_irq_thresh, "log2 IRQ latency, 1-64 microframes"); | |
231 | ||
232 | /* Initial park setting: slower than hw default */ | |
233 | static unsigned park; | |
234 | module_param(park, uint, S_IRUGO); | |
235 | MODULE_PARM_DESC(park, "park setting; 1-3 back-to-back async packets"); | |
236 | ||
237 | /* For flakey hardware, ignore overcurrent indicators */ | |
238 | static int ignore_oc; | |
239 | module_param(ignore_oc, bool, S_IRUGO); | |
240 | MODULE_PARM_DESC(ignore_oc, "ignore bogus hardware overcurrent indications"); | |
241 | ||
242 | ||
243 | static void ehci_work(struct oxu_hcd *oxu); | |
244 | static int oxu_hub_control(struct usb_hcd *hcd, | |
245 | u16 typeReq, u16 wValue, u16 wIndex, | |
246 | char *buf, u16 wLength); | |
247 | ||
248 | /* | |
249 | * Local functions | |
250 | */ | |
251 | ||
252 | /* Low level read/write registers functions */ | |
253 | static inline u32 oxu_readl(void *base, u32 reg) | |
254 | { | |
255 | return readl(base + reg); | |
256 | } | |
257 | ||
258 | static inline void oxu_writel(void *base, u32 reg, u32 val) | |
259 | { | |
260 | writel(val, base + reg); | |
261 | } | |
262 | ||
263 | static inline void timer_action_done(struct oxu_hcd *oxu, | |
264 | enum ehci_timer_action action) | |
265 | { | |
266 | clear_bit(action, &oxu->actions); | |
267 | } | |
268 | ||
269 | static inline void timer_action(struct oxu_hcd *oxu, | |
270 | enum ehci_timer_action action) | |
271 | { | |
272 | if (!test_and_set_bit(action, &oxu->actions)) { | |
273 | unsigned long t; | |
274 | ||
275 | switch (action) { | |
276 | case TIMER_IAA_WATCHDOG: | |
277 | t = EHCI_IAA_JIFFIES; | |
278 | break; | |
279 | case TIMER_IO_WATCHDOG: | |
280 | t = EHCI_IO_JIFFIES; | |
281 | break; | |
282 | case TIMER_ASYNC_OFF: | |
283 | t = EHCI_ASYNC_JIFFIES; | |
284 | break; | |
285 | case TIMER_ASYNC_SHRINK: | |
286 | default: | |
287 | t = EHCI_SHRINK_JIFFIES; | |
288 | break; | |
289 | } | |
290 | t += jiffies; | |
291 | /* all timings except IAA watchdog can be overridden. | |
292 | * async queue SHRINK often precedes IAA. while it's ready | |
293 | * to go OFF neither can matter, and afterwards the IO | |
294 | * watchdog stops unless there's still periodic traffic. | |
295 | */ | |
296 | if (action != TIMER_IAA_WATCHDOG | |
297 | && t > oxu->watchdog.expires | |
298 | && timer_pending(&oxu->watchdog)) | |
299 | return; | |
300 | mod_timer(&oxu->watchdog, t); | |
301 | } | |
302 | } | |
303 | ||
304 | /* | |
305 | * handshake - spin reading hc until handshake completes or fails | |
306 | * @ptr: address of hc register to be read | |
307 | * @mask: bits to look at in result of read | |
308 | * @done: value of those bits when handshake succeeds | |
309 | * @usec: timeout in microseconds | |
310 | * | |
311 | * Returns negative errno, or zero on success | |
312 | * | |
313 | * Success happens when the "mask" bits have the specified value (hardware | |
314 | * handshake done). There are two failure modes: "usec" have passed (major | |
315 | * hardware flakeout), or the register reads as all-ones (hardware removed). | |
316 | * | |
317 | * That last failure should_only happen in cases like physical cardbus eject | |
318 | * before driver shutdown. But it also seems to be caused by bugs in cardbus | |
319 | * bridge shutdown: shutting down the bridge before the devices using it. | |
320 | */ | |
321 | static int handshake(struct oxu_hcd *oxu, void __iomem *ptr, | |
322 | u32 mask, u32 done, int usec) | |
323 | { | |
324 | u32 result; | |
325 | ||
326 | do { | |
327 | result = readl(ptr); | |
328 | if (result == ~(u32)0) /* card removed */ | |
329 | return -ENODEV; | |
330 | result &= mask; | |
331 | if (result == done) | |
332 | return 0; | |
333 | udelay(1); | |
334 | usec--; | |
335 | } while (usec > 0); | |
336 | return -ETIMEDOUT; | |
337 | } | |
338 | ||
339 | /* Force HC to halt state from unknown (EHCI spec section 2.3) */ | |
340 | static int ehci_halt(struct oxu_hcd *oxu) | |
341 | { | |
342 | u32 temp = readl(&oxu->regs->status); | |
343 | ||
344 | /* disable any irqs left enabled by previous code */ | |
345 | writel(0, &oxu->regs->intr_enable); | |
346 | ||
347 | if ((temp & STS_HALT) != 0) | |
348 | return 0; | |
349 | ||
350 | temp = readl(&oxu->regs->command); | |
351 | temp &= ~CMD_RUN; | |
352 | writel(temp, &oxu->regs->command); | |
353 | return handshake(oxu, &oxu->regs->status, | |
354 | STS_HALT, STS_HALT, 16 * 125); | |
355 | } | |
356 | ||
357 | /* Put TDI/ARC silicon into EHCI mode */ | |
358 | static void tdi_reset(struct oxu_hcd *oxu) | |
359 | { | |
360 | u32 __iomem *reg_ptr; | |
361 | u32 tmp; | |
362 | ||
363 | reg_ptr = (u32 __iomem *)(((u8 __iomem *)oxu->regs) + 0x68); | |
364 | tmp = readl(reg_ptr); | |
365 | tmp |= 0x3; | |
366 | writel(tmp, reg_ptr); | |
367 | } | |
368 | ||
369 | /* Reset a non-running (STS_HALT == 1) controller */ | |
370 | static int ehci_reset(struct oxu_hcd *oxu) | |
371 | { | |
372 | int retval; | |
373 | u32 command = readl(&oxu->regs->command); | |
374 | ||
375 | command |= CMD_RESET; | |
376 | dbg_cmd(oxu, "reset", command); | |
377 | writel(command, &oxu->regs->command); | |
378 | oxu_to_hcd(oxu)->state = HC_STATE_HALT; | |
379 | oxu->next_statechange = jiffies; | |
380 | retval = handshake(oxu, &oxu->regs->command, | |
381 | CMD_RESET, 0, 250 * 1000); | |
382 | ||
383 | if (retval) | |
384 | return retval; | |
385 | ||
386 | tdi_reset(oxu); | |
387 | ||
388 | return retval; | |
389 | } | |
390 | ||
391 | /* Idle the controller (from running) */ | |
392 | static void ehci_quiesce(struct oxu_hcd *oxu) | |
393 | { | |
394 | u32 temp; | |
395 | ||
396 | #ifdef DEBUG | |
397 | if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state)) | |
398 | BUG(); | |
399 | #endif | |
400 | ||
401 | /* wait for any schedule enables/disables to take effect */ | |
402 | temp = readl(&oxu->regs->command) << 10; | |
403 | temp &= STS_ASS | STS_PSS; | |
404 | if (handshake(oxu, &oxu->regs->status, STS_ASS | STS_PSS, | |
405 | temp, 16 * 125) != 0) { | |
406 | oxu_to_hcd(oxu)->state = HC_STATE_HALT; | |
407 | return; | |
408 | } | |
409 | ||
410 | /* then disable anything that's still active */ | |
411 | temp = readl(&oxu->regs->command); | |
412 | temp &= ~(CMD_ASE | CMD_IAAD | CMD_PSE); | |
413 | writel(temp, &oxu->regs->command); | |
414 | ||
415 | /* hardware can take 16 microframes to turn off ... */ | |
416 | if (handshake(oxu, &oxu->regs->status, STS_ASS | STS_PSS, | |
417 | 0, 16 * 125) != 0) { | |
418 | oxu_to_hcd(oxu)->state = HC_STATE_HALT; | |
419 | return; | |
420 | } | |
421 | } | |
422 | ||
423 | static int check_reset_complete(struct oxu_hcd *oxu, int index, | |
424 | u32 __iomem *status_reg, int port_status) | |
425 | { | |
426 | if (!(port_status & PORT_CONNECT)) { | |
427 | oxu->reset_done[index] = 0; | |
428 | return port_status; | |
429 | } | |
430 | ||
431 | /* if reset finished and it's still not enabled -- handoff */ | |
432 | if (!(port_status & PORT_PE)) { | |
433 | oxu_dbg(oxu, "Failed to enable port %d on root hub TT\n", | |
434 | index+1); | |
435 | return port_status; | |
436 | } else | |
437 | oxu_dbg(oxu, "port %d high speed\n", index + 1); | |
438 | ||
439 | return port_status; | |
440 | } | |
441 | ||
442 | static void ehci_hub_descriptor(struct oxu_hcd *oxu, | |
443 | struct usb_hub_descriptor *desc) | |
444 | { | |
445 | int ports = HCS_N_PORTS(oxu->hcs_params); | |
446 | u16 temp; | |
447 | ||
448 | desc->bDescriptorType = 0x29; | |
449 | desc->bPwrOn2PwrGood = 10; /* oxu 1.0, 2.3.9 says 20ms max */ | |
450 | desc->bHubContrCurrent = 0; | |
451 | ||
452 | desc->bNbrPorts = ports; | |
453 | temp = 1 + (ports / 8); | |
454 | desc->bDescLength = 7 + 2 * temp; | |
455 | ||
456 | /* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */ | |
457 | memset(&desc->bitmap[0], 0, temp); | |
458 | memset(&desc->bitmap[temp], 0xff, temp); | |
459 | ||
460 | temp = 0x0008; /* per-port overcurrent reporting */ | |
461 | if (HCS_PPC(oxu->hcs_params)) | |
462 | temp |= 0x0001; /* per-port power control */ | |
463 | else | |
464 | temp |= 0x0002; /* no power switching */ | |
465 | desc->wHubCharacteristics = (__force __u16)cpu_to_le16(temp); | |
466 | } | |
467 | ||
468 | ||
469 | /* Allocate an OXU210HP on-chip memory data buffer | |
470 | * | |
471 | * An on-chip memory data buffer is required for each OXU210HP USB transfer. | |
472 | * Each transfer descriptor has one or more on-chip memory data buffers. | |
473 | * | |
474 | * Data buffers are allocated from a fix sized pool of data blocks. | |
475 | * To minimise fragmentation and give reasonable memory utlisation, | |
476 | * data buffers are allocated with sizes the power of 2 multiples of | |
477 | * the block size, starting on an address a multiple of the allocated size. | |
478 | * | |
479 | * FIXME: callers of this function require a buffer to be allocated for | |
480 | * len=0. This is a waste of on-chip memory and should be fix. Then this | |
481 | * function should be changed to not allocate a buffer for len=0. | |
482 | */ | |
483 | static int oxu_buf_alloc(struct oxu_hcd *oxu, struct ehci_qtd *qtd, int len) | |
484 | { | |
485 | int n_blocks; /* minium blocks needed to hold len */ | |
486 | int a_blocks; /* blocks allocated */ | |
487 | int i, j; | |
488 | ||
489 | /* Don't allocte bigger than supported */ | |
490 | if (len > BUFFER_SIZE * BUFFER_NUM) { | |
491 | oxu_err(oxu, "buffer too big (%d)\n", len); | |
492 | return -ENOMEM; | |
493 | } | |
494 | ||
495 | spin_lock(&oxu->mem_lock); | |
496 | ||
497 | /* Number of blocks needed to hold len */ | |
498 | n_blocks = (len + BUFFER_SIZE - 1) / BUFFER_SIZE; | |
499 | ||
500 | /* Round the number of blocks up to the power of 2 */ | |
501 | for (a_blocks = 1; a_blocks < n_blocks; a_blocks <<= 1) | |
502 | ; | |
503 | ||
504 | /* Find a suitable available data buffer */ | |
505 | for (i = 0; i < BUFFER_NUM; | |
506 | i += max(a_blocks, (int)oxu->db_used[i])) { | |
507 | ||
508 | /* Check all the required blocks are available */ | |
509 | for (j = 0; j < a_blocks; j++) | |
510 | if (oxu->db_used[i + j]) | |
511 | break; | |
512 | ||
513 | if (j != a_blocks) | |
514 | continue; | |
515 | ||
516 | /* Allocate blocks found! */ | |
517 | qtd->buffer = (void *) &oxu->mem->db_pool[i]; | |
518 | qtd->buffer_dma = virt_to_phys(qtd->buffer); | |
519 | ||
520 | qtd->qtd_buffer_len = BUFFER_SIZE * a_blocks; | |
521 | oxu->db_used[i] = a_blocks; | |
522 | ||
523 | spin_unlock(&oxu->mem_lock); | |
524 | ||
525 | return 0; | |
526 | } | |
527 | ||
528 | /* Failed */ | |
529 | ||
530 | spin_unlock(&oxu->mem_lock); | |
531 | ||
532 | return -ENOMEM; | |
533 | } | |
534 | ||
535 | static void oxu_buf_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd) | |
536 | { | |
537 | int index; | |
538 | ||
539 | spin_lock(&oxu->mem_lock); | |
540 | ||
541 | index = (qtd->buffer - (void *) &oxu->mem->db_pool[0]) | |
542 | / BUFFER_SIZE; | |
543 | oxu->db_used[index] = 0; | |
544 | qtd->qtd_buffer_len = 0; | |
545 | qtd->buffer_dma = 0; | |
546 | qtd->buffer = NULL; | |
547 | ||
548 | spin_unlock(&oxu->mem_lock); | |
549 | ||
550 | return; | |
551 | } | |
552 | ||
553 | static inline void ehci_qtd_init(struct ehci_qtd *qtd, dma_addr_t dma) | |
554 | { | |
555 | memset(qtd, 0, sizeof *qtd); | |
556 | qtd->qtd_dma = dma; | |
557 | qtd->hw_token = cpu_to_le32(QTD_STS_HALT); | |
558 | qtd->hw_next = EHCI_LIST_END; | |
559 | qtd->hw_alt_next = EHCI_LIST_END; | |
560 | INIT_LIST_HEAD(&qtd->qtd_list); | |
561 | } | |
562 | ||
563 | static inline void oxu_qtd_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd) | |
564 | { | |
565 | int index; | |
566 | ||
567 | if (qtd->buffer) | |
568 | oxu_buf_free(oxu, qtd); | |
569 | ||
570 | spin_lock(&oxu->mem_lock); | |
571 | ||
572 | index = qtd - &oxu->mem->qtd_pool[0]; | |
573 | oxu->qtd_used[index] = 0; | |
574 | ||
575 | spin_unlock(&oxu->mem_lock); | |
576 | ||
577 | return; | |
578 | } | |
579 | ||
580 | static struct ehci_qtd *ehci_qtd_alloc(struct oxu_hcd *oxu) | |
581 | { | |
582 | int i; | |
583 | struct ehci_qtd *qtd = NULL; | |
584 | ||
585 | spin_lock(&oxu->mem_lock); | |
586 | ||
587 | for (i = 0; i < QTD_NUM; i++) | |
588 | if (!oxu->qtd_used[i]) | |
589 | break; | |
590 | ||
591 | if (i < QTD_NUM) { | |
592 | qtd = (struct ehci_qtd *) &oxu->mem->qtd_pool[i]; | |
593 | memset(qtd, 0, sizeof *qtd); | |
594 | ||
595 | qtd->hw_token = cpu_to_le32(QTD_STS_HALT); | |
596 | qtd->hw_next = EHCI_LIST_END; | |
597 | qtd->hw_alt_next = EHCI_LIST_END; | |
598 | INIT_LIST_HEAD(&qtd->qtd_list); | |
599 | ||
600 | qtd->qtd_dma = virt_to_phys(qtd); | |
601 | ||
602 | oxu->qtd_used[i] = 1; | |
603 | } | |
604 | ||
605 | spin_unlock(&oxu->mem_lock); | |
606 | ||
607 | return qtd; | |
608 | } | |
609 | ||
610 | static void oxu_qh_free(struct oxu_hcd *oxu, struct ehci_qh *qh) | |
611 | { | |
612 | int index; | |
613 | ||
614 | spin_lock(&oxu->mem_lock); | |
615 | ||
616 | index = qh - &oxu->mem->qh_pool[0]; | |
617 | oxu->qh_used[index] = 0; | |
618 | ||
619 | spin_unlock(&oxu->mem_lock); | |
620 | ||
621 | return; | |
622 | } | |
623 | ||
624 | static void qh_destroy(struct kref *kref) | |
625 | { | |
626 | struct ehci_qh *qh = container_of(kref, struct ehci_qh, kref); | |
627 | struct oxu_hcd *oxu = qh->oxu; | |
628 | ||
629 | /* clean qtds first, and know this is not linked */ | |
630 | if (!list_empty(&qh->qtd_list) || qh->qh_next.ptr) { | |
631 | oxu_dbg(oxu, "unused qh not empty!\n"); | |
632 | BUG(); | |
633 | } | |
634 | if (qh->dummy) | |
635 | oxu_qtd_free(oxu, qh->dummy); | |
636 | oxu_qh_free(oxu, qh); | |
637 | } | |
638 | ||
639 | static struct ehci_qh *oxu_qh_alloc(struct oxu_hcd *oxu) | |
640 | { | |
641 | int i; | |
642 | struct ehci_qh *qh = NULL; | |
643 | ||
644 | spin_lock(&oxu->mem_lock); | |
645 | ||
646 | for (i = 0; i < QHEAD_NUM; i++) | |
647 | if (!oxu->qh_used[i]) | |
648 | break; | |
649 | ||
650 | if (i < QHEAD_NUM) { | |
651 | qh = (struct ehci_qh *) &oxu->mem->qh_pool[i]; | |
652 | memset(qh, 0, sizeof *qh); | |
653 | ||
654 | kref_init(&qh->kref); | |
655 | qh->oxu = oxu; | |
656 | qh->qh_dma = virt_to_phys(qh); | |
657 | INIT_LIST_HEAD(&qh->qtd_list); | |
658 | ||
659 | /* dummy td enables safe urb queuing */ | |
660 | qh->dummy = ehci_qtd_alloc(oxu); | |
661 | if (qh->dummy == NULL) { | |
662 | oxu_dbg(oxu, "no dummy td\n"); | |
663 | oxu->qh_used[i] = 0; | |
664 | ||
665 | return NULL; | |
666 | } | |
667 | ||
668 | oxu->qh_used[i] = 1; | |
669 | } | |
670 | ||
671 | spin_unlock(&oxu->mem_lock); | |
672 | ||
673 | return qh; | |
674 | } | |
675 | ||
676 | /* to share a qh (cpu threads, or hc) */ | |
677 | static inline struct ehci_qh *qh_get(struct ehci_qh *qh) | |
678 | { | |
679 | kref_get(&qh->kref); | |
680 | return qh; | |
681 | } | |
682 | ||
683 | static inline void qh_put(struct ehci_qh *qh) | |
684 | { | |
685 | kref_put(&qh->kref, qh_destroy); | |
686 | } | |
687 | ||
688 | static void oxu_murb_free(struct oxu_hcd *oxu, struct oxu_murb *murb) | |
689 | { | |
690 | int index; | |
691 | ||
692 | spin_lock(&oxu->mem_lock); | |
693 | ||
694 | index = murb - &oxu->murb_pool[0]; | |
695 | oxu->murb_used[index] = 0; | |
696 | ||
697 | spin_unlock(&oxu->mem_lock); | |
698 | ||
699 | return; | |
700 | } | |
701 | ||
702 | static struct oxu_murb *oxu_murb_alloc(struct oxu_hcd *oxu) | |
703 | ||
704 | { | |
705 | int i; | |
706 | struct oxu_murb *murb = NULL; | |
707 | ||
708 | spin_lock(&oxu->mem_lock); | |
709 | ||
710 | for (i = 0; i < MURB_NUM; i++) | |
711 | if (!oxu->murb_used[i]) | |
712 | break; | |
713 | ||
714 | if (i < MURB_NUM) { | |
715 | murb = &(oxu->murb_pool)[i]; | |
716 | ||
717 | oxu->murb_used[i] = 1; | |
718 | } | |
719 | ||
720 | spin_unlock(&oxu->mem_lock); | |
721 | ||
722 | return murb; | |
723 | } | |
724 | ||
725 | /* The queue heads and transfer descriptors are managed from pools tied | |
726 | * to each of the "per device" structures. | |
727 | * This is the initialisation and cleanup code. | |
728 | */ | |
729 | static void ehci_mem_cleanup(struct oxu_hcd *oxu) | |
730 | { | |
731 | kfree(oxu->murb_pool); | |
732 | oxu->murb_pool = NULL; | |
733 | ||
734 | if (oxu->async) | |
735 | qh_put(oxu->async); | |
736 | oxu->async = NULL; | |
737 | ||
738 | del_timer(&oxu->urb_timer); | |
739 | ||
740 | oxu->periodic = NULL; | |
741 | ||
742 | /* shadow periodic table */ | |
743 | kfree(oxu->pshadow); | |
744 | oxu->pshadow = NULL; | |
745 | } | |
746 | ||
747 | /* Remember to add cleanup code (above) if you add anything here. | |
748 | */ | |
749 | static int ehci_mem_init(struct oxu_hcd *oxu, gfp_t flags) | |
750 | { | |
751 | int i; | |
752 | ||
753 | for (i = 0; i < oxu->periodic_size; i++) | |
754 | oxu->mem->frame_list[i] = EHCI_LIST_END; | |
755 | for (i = 0; i < QHEAD_NUM; i++) | |
756 | oxu->qh_used[i] = 0; | |
757 | for (i = 0; i < QTD_NUM; i++) | |
758 | oxu->qtd_used[i] = 0; | |
759 | ||
760 | oxu->murb_pool = kcalloc(MURB_NUM, sizeof(struct oxu_murb), flags); | |
761 | if (!oxu->murb_pool) | |
762 | goto fail; | |
763 | ||
764 | for (i = 0; i < MURB_NUM; i++) | |
765 | oxu->murb_used[i] = 0; | |
766 | ||
767 | oxu->async = oxu_qh_alloc(oxu); | |
768 | if (!oxu->async) | |
769 | goto fail; | |
770 | ||
771 | oxu->periodic = (__le32 *) &oxu->mem->frame_list; | |
772 | oxu->periodic_dma = virt_to_phys(oxu->periodic); | |
773 | ||
774 | for (i = 0; i < oxu->periodic_size; i++) | |
775 | oxu->periodic[i] = EHCI_LIST_END; | |
776 | ||
777 | /* software shadow of hardware table */ | |
778 | oxu->pshadow = kcalloc(oxu->periodic_size, sizeof(void *), flags); | |
779 | if (oxu->pshadow != NULL) | |
780 | return 0; | |
781 | ||
782 | fail: | |
783 | oxu_dbg(oxu, "couldn't init memory\n"); | |
784 | ehci_mem_cleanup(oxu); | |
785 | return -ENOMEM; | |
786 | } | |
787 | ||
788 | /* Fill a qtd, returning how much of the buffer we were able to queue up. | |
789 | */ | |
790 | static int qtd_fill(struct ehci_qtd *qtd, dma_addr_t buf, size_t len, | |
791 | int token, int maxpacket) | |
792 | { | |
793 | int i, count; | |
794 | u64 addr = buf; | |
795 | ||
796 | /* one buffer entry per 4K ... first might be short or unaligned */ | |
797 | qtd->hw_buf[0] = cpu_to_le32((u32)addr); | |
798 | qtd->hw_buf_hi[0] = cpu_to_le32((u32)(addr >> 32)); | |
799 | count = 0x1000 - (buf & 0x0fff); /* rest of that page */ | |
800 | if (likely(len < count)) /* ... iff needed */ | |
801 | count = len; | |
802 | else { | |
803 | buf += 0x1000; | |
804 | buf &= ~0x0fff; | |
805 | ||
806 | /* per-qtd limit: from 16K to 20K (best alignment) */ | |
807 | for (i = 1; count < len && i < 5; i++) { | |
808 | addr = buf; | |
809 | qtd->hw_buf[i] = cpu_to_le32((u32)addr); | |
810 | qtd->hw_buf_hi[i] = cpu_to_le32((u32)(addr >> 32)); | |
811 | buf += 0x1000; | |
812 | if ((count + 0x1000) < len) | |
813 | count += 0x1000; | |
814 | else | |
815 | count = len; | |
816 | } | |
817 | ||
818 | /* short packets may only terminate transfers */ | |
819 | if (count != len) | |
820 | count -= (count % maxpacket); | |
821 | } | |
822 | qtd->hw_token = cpu_to_le32((count << 16) | token); | |
823 | qtd->length = count; | |
824 | ||
825 | return count; | |
826 | } | |
827 | ||
828 | static inline void qh_update(struct oxu_hcd *oxu, | |
829 | struct ehci_qh *qh, struct ehci_qtd *qtd) | |
830 | { | |
831 | /* writes to an active overlay are unsafe */ | |
832 | BUG_ON(qh->qh_state != QH_STATE_IDLE); | |
833 | ||
834 | qh->hw_qtd_next = QTD_NEXT(qtd->qtd_dma); | |
835 | qh->hw_alt_next = EHCI_LIST_END; | |
836 | ||
837 | /* Except for control endpoints, we make hardware maintain data | |
838 | * toggle (like OHCI) ... here (re)initialize the toggle in the QH, | |
839 | * and set the pseudo-toggle in udev. Only usb_clear_halt() will | |
840 | * ever clear it. | |
841 | */ | |
842 | if (!(qh->hw_info1 & cpu_to_le32(1 << 14))) { | |
843 | unsigned is_out, epnum; | |
844 | ||
845 | is_out = !(qtd->hw_token & cpu_to_le32(1 << 8)); | |
846 | epnum = (le32_to_cpup(&qh->hw_info1) >> 8) & 0x0f; | |
847 | if (unlikely(!usb_gettoggle(qh->dev, epnum, is_out))) { | |
848 | qh->hw_token &= ~__constant_cpu_to_le32(QTD_TOGGLE); | |
849 | usb_settoggle(qh->dev, epnum, is_out, 1); | |
850 | } | |
851 | } | |
852 | ||
853 | /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */ | |
854 | wmb(); | |
855 | qh->hw_token &= __constant_cpu_to_le32(QTD_TOGGLE | QTD_STS_PING); | |
856 | } | |
857 | ||
858 | /* If it weren't for a common silicon quirk (writing the dummy into the qh | |
859 | * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault | |
860 | * recovery (including urb dequeue) would need software changes to a QH... | |
861 | */ | |
862 | static void qh_refresh(struct oxu_hcd *oxu, struct ehci_qh *qh) | |
863 | { | |
864 | struct ehci_qtd *qtd; | |
865 | ||
866 | if (list_empty(&qh->qtd_list)) | |
867 | qtd = qh->dummy; | |
868 | else { | |
869 | qtd = list_entry(qh->qtd_list.next, | |
870 | struct ehci_qtd, qtd_list); | |
871 | /* first qtd may already be partially processed */ | |
872 | if (cpu_to_le32(qtd->qtd_dma) == qh->hw_current) | |
873 | qtd = NULL; | |
874 | } | |
875 | ||
876 | if (qtd) | |
877 | qh_update(oxu, qh, qtd); | |
878 | } | |
879 | ||
880 | static void qtd_copy_status(struct oxu_hcd *oxu, struct urb *urb, | |
881 | size_t length, u32 token) | |
882 | { | |
883 | /* count IN/OUT bytes, not SETUP (even short packets) */ | |
884 | if (likely(QTD_PID(token) != 2)) | |
885 | urb->actual_length += length - QTD_LENGTH(token); | |
886 | ||
887 | /* don't modify error codes */ | |
888 | if (unlikely(urb->status != -EINPROGRESS)) | |
889 | return; | |
890 | ||
891 | /* force cleanup after short read; not always an error */ | |
892 | if (unlikely(IS_SHORT_READ(token))) | |
893 | urb->status = -EREMOTEIO; | |
894 | ||
895 | /* serious "can't proceed" faults reported by the hardware */ | |
896 | if (token & QTD_STS_HALT) { | |
897 | if (token & QTD_STS_BABBLE) { | |
898 | /* FIXME "must" disable babbling device's port too */ | |
899 | urb->status = -EOVERFLOW; | |
900 | } else if (token & QTD_STS_MMF) { | |
901 | /* fs/ls interrupt xfer missed the complete-split */ | |
902 | urb->status = -EPROTO; | |
903 | } else if (token & QTD_STS_DBE) { | |
904 | urb->status = (QTD_PID(token) == 1) /* IN ? */ | |
905 | ? -ENOSR /* hc couldn't read data */ | |
906 | : -ECOMM; /* hc couldn't write data */ | |
907 | } else if (token & QTD_STS_XACT) { | |
908 | /* timeout, bad crc, wrong PID, etc; retried */ | |
909 | if (QTD_CERR(token)) | |
910 | urb->status = -EPIPE; | |
911 | else { | |
912 | oxu_dbg(oxu, "devpath %s ep%d%s 3strikes\n", | |
913 | urb->dev->devpath, | |
914 | usb_pipeendpoint(urb->pipe), | |
915 | usb_pipein(urb->pipe) ? "in" : "out"); | |
916 | urb->status = -EPROTO; | |
917 | } | |
918 | /* CERR nonzero + no errors + halt --> stall */ | |
919 | } else if (QTD_CERR(token)) | |
920 | urb->status = -EPIPE; | |
921 | else /* unknown */ | |
922 | urb->status = -EPROTO; | |
923 | ||
924 | oxu_vdbg(oxu, "dev%d ep%d%s qtd token %08x --> status %d\n", | |
925 | usb_pipedevice(urb->pipe), | |
926 | usb_pipeendpoint(urb->pipe), | |
927 | usb_pipein(urb->pipe) ? "in" : "out", | |
928 | token, urb->status); | |
929 | } | |
930 | } | |
931 | ||
932 | static void ehci_urb_done(struct oxu_hcd *oxu, struct urb *urb) | |
933 | __releases(oxu->lock) | |
934 | __acquires(oxu->lock) | |
935 | { | |
936 | if (likely(urb->hcpriv != NULL)) { | |
937 | struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv; | |
938 | ||
939 | /* S-mask in a QH means it's an interrupt urb */ | |
940 | if ((qh->hw_info2 & __constant_cpu_to_le32(QH_SMASK)) != 0) { | |
941 | ||
942 | /* ... update hc-wide periodic stats (for usbfs) */ | |
943 | oxu_to_hcd(oxu)->self.bandwidth_int_reqs--; | |
944 | } | |
945 | qh_put(qh); | |
946 | } | |
947 | ||
948 | urb->hcpriv = NULL; | |
949 | switch (urb->status) { | |
950 | case -EINPROGRESS: /* success */ | |
951 | urb->status = 0; | |
952 | default: /* fault */ | |
953 | break; | |
954 | case -EREMOTEIO: /* fault or normal */ | |
955 | if (!(urb->transfer_flags & URB_SHORT_NOT_OK)) | |
956 | urb->status = 0; | |
957 | break; | |
958 | case -ECONNRESET: /* canceled */ | |
959 | case -ENOENT: | |
960 | break; | |
961 | } | |
962 | ||
963 | #ifdef OXU_URB_TRACE | |
964 | oxu_dbg(oxu, "%s %s urb %p ep%d%s status %d len %d/%d\n", | |
965 | __func__, urb->dev->devpath, urb, | |
966 | usb_pipeendpoint(urb->pipe), | |
967 | usb_pipein(urb->pipe) ? "in" : "out", | |
968 | urb->status, | |
969 | urb->actual_length, urb->transfer_buffer_length); | |
970 | #endif | |
971 | ||
972 | /* complete() can reenter this HCD */ | |
973 | spin_unlock(&oxu->lock); | |
974 | usb_hcd_giveback_urb(oxu_to_hcd(oxu), urb, urb->status); | |
975 | spin_lock(&oxu->lock); | |
976 | } | |
977 | ||
978 | static void start_unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh); | |
979 | static void unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh); | |
980 | ||
981 | static void intr_deschedule(struct oxu_hcd *oxu, struct ehci_qh *qh); | |
982 | static int qh_schedule(struct oxu_hcd *oxu, struct ehci_qh *qh); | |
983 | ||
984 | #define HALT_BIT __constant_cpu_to_le32(QTD_STS_HALT) | |
985 | ||
986 | /* Process and free completed qtds for a qh, returning URBs to drivers. | |
987 | * Chases up to qh->hw_current. Returns number of completions called, | |
988 | * indicating how much "real" work we did. | |
989 | */ | |
990 | static unsigned qh_completions(struct oxu_hcd *oxu, struct ehci_qh *qh) | |
991 | { | |
992 | struct ehci_qtd *last = NULL, *end = qh->dummy; | |
993 | struct list_head *entry, *tmp; | |
994 | int stopped; | |
995 | unsigned count = 0; | |
996 | int do_status = 0; | |
997 | u8 state; | |
998 | struct oxu_murb *murb = NULL; | |
999 | ||
1000 | if (unlikely(list_empty(&qh->qtd_list))) | |
1001 | return count; | |
1002 | ||
1003 | /* completions (or tasks on other cpus) must never clobber HALT | |
1004 | * till we've gone through and cleaned everything up, even when | |
1005 | * they add urbs to this qh's queue or mark them for unlinking. | |
1006 | * | |
1007 | * NOTE: unlinking expects to be done in queue order. | |
1008 | */ | |
1009 | state = qh->qh_state; | |
1010 | qh->qh_state = QH_STATE_COMPLETING; | |
1011 | stopped = (state == QH_STATE_IDLE); | |
1012 | ||
1013 | /* remove de-activated QTDs from front of queue. | |
1014 | * after faults (including short reads), cleanup this urb | |
1015 | * then let the queue advance. | |
1016 | * if queue is stopped, handles unlinks. | |
1017 | */ | |
1018 | list_for_each_safe(entry, tmp, &qh->qtd_list) { | |
1019 | struct ehci_qtd *qtd; | |
1020 | struct urb *urb; | |
1021 | u32 token = 0; | |
1022 | ||
1023 | qtd = list_entry(entry, struct ehci_qtd, qtd_list); | |
1024 | urb = qtd->urb; | |
1025 | ||
1026 | /* Clean up any state from previous QTD ...*/ | |
1027 | if (last) { | |
1028 | if (likely(last->urb != urb)) { | |
1029 | if (last->urb->complete == NULL) { | |
1030 | murb = (struct oxu_murb *) last->urb; | |
1031 | last->urb = murb->main; | |
1032 | if (murb->last) { | |
1033 | ehci_urb_done(oxu, last->urb); | |
1034 | count++; | |
1035 | } | |
1036 | oxu_murb_free(oxu, murb); | |
1037 | } else { | |
1038 | ehci_urb_done(oxu, last->urb); | |
1039 | count++; | |
1040 | } | |
1041 | } | |
1042 | oxu_qtd_free(oxu, last); | |
1043 | last = NULL; | |
1044 | } | |
1045 | ||
1046 | /* ignore urbs submitted during completions we reported */ | |
1047 | if (qtd == end) | |
1048 | break; | |
1049 | ||
1050 | /* hardware copies qtd out of qh overlay */ | |
1051 | rmb(); | |
1052 | token = le32_to_cpu(qtd->hw_token); | |
1053 | ||
1054 | /* always clean up qtds the hc de-activated */ | |
1055 | if ((token & QTD_STS_ACTIVE) == 0) { | |
1056 | ||
1057 | if ((token & QTD_STS_HALT) != 0) { | |
1058 | stopped = 1; | |
1059 | ||
1060 | /* magic dummy for some short reads; qh won't advance. | |
1061 | * that silicon quirk can kick in with this dummy too. | |
1062 | */ | |
1063 | } else if (IS_SHORT_READ(token) && | |
1064 | !(qtd->hw_alt_next & EHCI_LIST_END)) { | |
1065 | stopped = 1; | |
1066 | goto halt; | |
1067 | } | |
1068 | ||
1069 | /* stop scanning when we reach qtds the hc is using */ | |
1070 | } else if (likely(!stopped && | |
1071 | HC_IS_RUNNING(oxu_to_hcd(oxu)->state))) { | |
1072 | break; | |
1073 | ||
1074 | } else { | |
1075 | stopped = 1; | |
1076 | ||
1077 | if (unlikely(!HC_IS_RUNNING(oxu_to_hcd(oxu)->state))) | |
1078 | urb->status = -ESHUTDOWN; | |
1079 | ||
1080 | /* ignore active urbs unless some previous qtd | |
1081 | * for the urb faulted (including short read) or | |
1082 | * its urb was canceled. we may patch qh or qtds. | |
1083 | */ | |
1084 | if (likely(urb->status == -EINPROGRESS)) | |
1085 | continue; | |
1086 | ||
1087 | /* issue status after short control reads */ | |
1088 | if (unlikely(do_status != 0) | |
1089 | && QTD_PID(token) == 0 /* OUT */) { | |
1090 | do_status = 0; | |
1091 | continue; | |
1092 | } | |
1093 | ||
1094 | /* token in overlay may be most current */ | |
1095 | if (state == QH_STATE_IDLE | |
1096 | && cpu_to_le32(qtd->qtd_dma) | |
1097 | == qh->hw_current) | |
1098 | token = le32_to_cpu(qh->hw_token); | |
1099 | ||
1100 | /* force halt for unlinked or blocked qh, so we'll | |
1101 | * patch the qh later and so that completions can't | |
1102 | * activate it while we "know" it's stopped. | |
1103 | */ | |
1104 | if ((HALT_BIT & qh->hw_token) == 0) { | |
1105 | halt: | |
1106 | qh->hw_token |= HALT_BIT; | |
1107 | wmb(); | |
1108 | } | |
1109 | } | |
1110 | ||
1111 | /* Remove it from the queue */ | |
1112 | qtd_copy_status(oxu, urb->complete ? | |
1113 | urb : ((struct oxu_murb *) urb)->main, | |
1114 | qtd->length, token); | |
1115 | if ((usb_pipein(qtd->urb->pipe)) && | |
1116 | (NULL != qtd->transfer_buffer)) | |
1117 | memcpy(qtd->transfer_buffer, qtd->buffer, qtd->length); | |
1118 | do_status = (urb->status == -EREMOTEIO) | |
1119 | && usb_pipecontrol(urb->pipe); | |
1120 | ||
1121 | if (stopped && qtd->qtd_list.prev != &qh->qtd_list) { | |
1122 | last = list_entry(qtd->qtd_list.prev, | |
1123 | struct ehci_qtd, qtd_list); | |
1124 | last->hw_next = qtd->hw_next; | |
1125 | } | |
1126 | list_del(&qtd->qtd_list); | |
1127 | last = qtd; | |
1128 | } | |
1129 | ||
1130 | /* last urb's completion might still need calling */ | |
1131 | if (likely(last != NULL)) { | |
1132 | if (last->urb->complete == NULL) { | |
1133 | murb = (struct oxu_murb *) last->urb; | |
1134 | last->urb = murb->main; | |
1135 | if (murb->last) { | |
1136 | ehci_urb_done(oxu, last->urb); | |
1137 | count++; | |
1138 | } | |
1139 | oxu_murb_free(oxu, murb); | |
1140 | } else { | |
1141 | ehci_urb_done(oxu, last->urb); | |
1142 | count++; | |
1143 | } | |
1144 | oxu_qtd_free(oxu, last); | |
1145 | } | |
1146 | ||
1147 | /* restore original state; caller must unlink or relink */ | |
1148 | qh->qh_state = state; | |
1149 | ||
1150 | /* be sure the hardware's done with the qh before refreshing | |
1151 | * it after fault cleanup, or recovering from silicon wrongly | |
1152 | * overlaying the dummy qtd (which reduces DMA chatter). | |
1153 | */ | |
1154 | if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END) { | |
1155 | switch (state) { | |
1156 | case QH_STATE_IDLE: | |
1157 | qh_refresh(oxu, qh); | |
1158 | break; | |
1159 | case QH_STATE_LINKED: | |
1160 | /* should be rare for periodic transfers, | |
1161 | * except maybe high bandwidth ... | |
1162 | */ | |
1163 | if ((__constant_cpu_to_le32(QH_SMASK) | |
1164 | & qh->hw_info2) != 0) { | |
1165 | intr_deschedule(oxu, qh); | |
1166 | (void) qh_schedule(oxu, qh); | |
1167 | } else | |
1168 | unlink_async(oxu, qh); | |
1169 | break; | |
1170 | /* otherwise, unlink already started */ | |
1171 | } | |
1172 | } | |
1173 | ||
1174 | return count; | |
1175 | } | |
1176 | ||
1177 | /* High bandwidth multiplier, as encoded in highspeed endpoint descriptors */ | |
1178 | #define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03)) | |
1179 | /* ... and packet size, for any kind of endpoint descriptor */ | |
1180 | #define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff) | |
1181 | ||
1182 | /* Reverse of qh_urb_transaction: free a list of TDs. | |
1183 | * used for cleanup after errors, before HC sees an URB's TDs. | |
1184 | */ | |
1185 | static void qtd_list_free(struct oxu_hcd *oxu, | |
1186 | struct urb *urb, struct list_head *qtd_list) | |
1187 | { | |
1188 | struct list_head *entry, *temp; | |
1189 | ||
1190 | list_for_each_safe(entry, temp, qtd_list) { | |
1191 | struct ehci_qtd *qtd; | |
1192 | ||
1193 | qtd = list_entry(entry, struct ehci_qtd, qtd_list); | |
1194 | list_del(&qtd->qtd_list); | |
1195 | oxu_qtd_free(oxu, qtd); | |
1196 | } | |
1197 | } | |
1198 | ||
1199 | /* Create a list of filled qtds for this URB; won't link into qh. | |
1200 | */ | |
1201 | static struct list_head *qh_urb_transaction(struct oxu_hcd *oxu, | |
1202 | struct urb *urb, | |
1203 | struct list_head *head, | |
1204 | gfp_t flags) | |
1205 | { | |
1206 | struct ehci_qtd *qtd, *qtd_prev; | |
1207 | dma_addr_t buf; | |
1208 | int len, maxpacket; | |
1209 | int is_input; | |
1210 | u32 token; | |
1211 | void *transfer_buf = NULL; | |
1212 | int ret; | |
1213 | ||
1214 | /* | |
1215 | * URBs map to sequences of QTDs: one logical transaction | |
1216 | */ | |
1217 | qtd = ehci_qtd_alloc(oxu); | |
1218 | if (unlikely(!qtd)) | |
1219 | return NULL; | |
1220 | list_add_tail(&qtd->qtd_list, head); | |
1221 | qtd->urb = urb; | |
1222 | ||
1223 | token = QTD_STS_ACTIVE; | |
1224 | token |= (EHCI_TUNE_CERR << 10); | |
1225 | /* for split transactions, SplitXState initialized to zero */ | |
1226 | ||
1227 | len = urb->transfer_buffer_length; | |
1228 | is_input = usb_pipein(urb->pipe); | |
1229 | if (!urb->transfer_buffer && urb->transfer_buffer_length && is_input) | |
1230 | urb->transfer_buffer = phys_to_virt(urb->transfer_dma); | |
1231 | ||
1232 | if (usb_pipecontrol(urb->pipe)) { | |
1233 | /* SETUP pid */ | |
1234 | ret = oxu_buf_alloc(oxu, qtd, sizeof(struct usb_ctrlrequest)); | |
1235 | if (ret) | |
1236 | goto cleanup; | |
1237 | ||
1238 | qtd_fill(qtd, qtd->buffer_dma, sizeof(struct usb_ctrlrequest), | |
1239 | token | (2 /* "setup" */ << 8), 8); | |
1240 | memcpy(qtd->buffer, qtd->urb->setup_packet, | |
1241 | sizeof(struct usb_ctrlrequest)); | |
1242 | ||
1243 | /* ... and always at least one more pid */ | |
1244 | token ^= QTD_TOGGLE; | |
1245 | qtd_prev = qtd; | |
1246 | qtd = ehci_qtd_alloc(oxu); | |
1247 | if (unlikely(!qtd)) | |
1248 | goto cleanup; | |
1249 | qtd->urb = urb; | |
1250 | qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma); | |
1251 | list_add_tail(&qtd->qtd_list, head); | |
1252 | ||
1253 | /* for zero length DATA stages, STATUS is always IN */ | |
1254 | if (len == 0) | |
1255 | token |= (1 /* "in" */ << 8); | |
1256 | } | |
1257 | ||
1258 | /* | |
1259 | * Data transfer stage: buffer setup | |
1260 | */ | |
1261 | ||
1262 | ret = oxu_buf_alloc(oxu, qtd, len); | |
1263 | if (ret) | |
1264 | goto cleanup; | |
1265 | ||
1266 | buf = qtd->buffer_dma; | |
1267 | transfer_buf = urb->transfer_buffer; | |
1268 | ||
1269 | if (!is_input) | |
1270 | memcpy(qtd->buffer, qtd->urb->transfer_buffer, len); | |
1271 | ||
1272 | if (is_input) | |
1273 | token |= (1 /* "in" */ << 8); | |
1274 | /* else it's already initted to "out" pid (0 << 8) */ | |
1275 | ||
1276 | maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input)); | |
1277 | ||
1278 | /* | |
1279 | * buffer gets wrapped in one or more qtds; | |
1280 | * last one may be "short" (including zero len) | |
1281 | * and may serve as a control status ack | |
1282 | */ | |
1283 | for (;;) { | |
1284 | int this_qtd_len; | |
1285 | ||
1286 | this_qtd_len = qtd_fill(qtd, buf, len, token, maxpacket); | |
1287 | qtd->transfer_buffer = transfer_buf; | |
1288 | len -= this_qtd_len; | |
1289 | buf += this_qtd_len; | |
1290 | transfer_buf += this_qtd_len; | |
1291 | if (is_input) | |
1292 | qtd->hw_alt_next = oxu->async->hw_alt_next; | |
1293 | ||
1294 | /* qh makes control packets use qtd toggle; maybe switch it */ | |
1295 | if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0) | |
1296 | token ^= QTD_TOGGLE; | |
1297 | ||
1298 | if (likely(len <= 0)) | |
1299 | break; | |
1300 | ||
1301 | qtd_prev = qtd; | |
1302 | qtd = ehci_qtd_alloc(oxu); | |
1303 | if (unlikely(!qtd)) | |
1304 | goto cleanup; | |
1305 | if (likely(len > 0)) { | |
1306 | ret = oxu_buf_alloc(oxu, qtd, len); | |
1307 | if (ret) | |
1308 | goto cleanup; | |
1309 | } | |
1310 | qtd->urb = urb; | |
1311 | qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma); | |
1312 | list_add_tail(&qtd->qtd_list, head); | |
1313 | } | |
1314 | ||
1315 | /* unless the bulk/interrupt caller wants a chance to clean | |
1316 | * up after short reads, hc should advance qh past this urb | |
1317 | */ | |
1318 | if (likely((urb->transfer_flags & URB_SHORT_NOT_OK) == 0 | |
1319 | || usb_pipecontrol(urb->pipe))) | |
1320 | qtd->hw_alt_next = EHCI_LIST_END; | |
1321 | ||
1322 | /* | |
1323 | * control requests may need a terminating data "status" ack; | |
1324 | * bulk ones may need a terminating short packet (zero length). | |
1325 | */ | |
1326 | if (likely(urb->transfer_buffer_length != 0)) { | |
1327 | int one_more = 0; | |
1328 | ||
1329 | if (usb_pipecontrol(urb->pipe)) { | |
1330 | one_more = 1; | |
1331 | token ^= 0x0100; /* "in" <--> "out" */ | |
1332 | token |= QTD_TOGGLE; /* force DATA1 */ | |
1333 | } else if (usb_pipebulk(urb->pipe) | |
1334 | && (urb->transfer_flags & URB_ZERO_PACKET) | |
1335 | && !(urb->transfer_buffer_length % maxpacket)) { | |
1336 | one_more = 1; | |
1337 | } | |
1338 | if (one_more) { | |
1339 | qtd_prev = qtd; | |
1340 | qtd = ehci_qtd_alloc(oxu); | |
1341 | if (unlikely(!qtd)) | |
1342 | goto cleanup; | |
1343 | qtd->urb = urb; | |
1344 | qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma); | |
1345 | list_add_tail(&qtd->qtd_list, head); | |
1346 | ||
1347 | /* never any data in such packets */ | |
1348 | qtd_fill(qtd, 0, 0, token, 0); | |
1349 | } | |
1350 | } | |
1351 | ||
1352 | /* by default, enable interrupt on urb completion */ | |
1353 | qtd->hw_token |= __constant_cpu_to_le32(QTD_IOC); | |
1354 | return head; | |
1355 | ||
1356 | cleanup: | |
1357 | qtd_list_free(oxu, urb, head); | |
1358 | return NULL; | |
1359 | } | |
1360 | ||
1361 | /* Each QH holds a qtd list; a QH is used for everything except iso. | |
1362 | * | |
1363 | * For interrupt urbs, the scheduler must set the microframe scheduling | |
1364 | * mask(s) each time the QH gets scheduled. For highspeed, that's | |
1365 | * just one microframe in the s-mask. For split interrupt transactions | |
1366 | * there are additional complications: c-mask, maybe FSTNs. | |
1367 | */ | |
1368 | static struct ehci_qh *qh_make(struct oxu_hcd *oxu, | |
1369 | struct urb *urb, gfp_t flags) | |
1370 | { | |
1371 | struct ehci_qh *qh = oxu_qh_alloc(oxu); | |
1372 | u32 info1 = 0, info2 = 0; | |
1373 | int is_input, type; | |
1374 | int maxp = 0; | |
1375 | ||
1376 | if (!qh) | |
1377 | return qh; | |
1378 | ||
1379 | /* | |
1380 | * init endpoint/device data for this QH | |
1381 | */ | |
1382 | info1 |= usb_pipeendpoint(urb->pipe) << 8; | |
1383 | info1 |= usb_pipedevice(urb->pipe) << 0; | |
1384 | ||
1385 | is_input = usb_pipein(urb->pipe); | |
1386 | type = usb_pipetype(urb->pipe); | |
1387 | maxp = usb_maxpacket(urb->dev, urb->pipe, !is_input); | |
1388 | ||
1389 | /* Compute interrupt scheduling parameters just once, and save. | |
1390 | * - allowing for high bandwidth, how many nsec/uframe are used? | |
1391 | * - split transactions need a second CSPLIT uframe; same question | |
1392 | * - splits also need a schedule gap (for full/low speed I/O) | |
1393 | * - qh has a polling interval | |
1394 | * | |
1395 | * For control/bulk requests, the HC or TT handles these. | |
1396 | */ | |
1397 | if (type == PIPE_INTERRUPT) { | |
1398 | qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH, | |
1399 | is_input, 0, | |
1400 | hb_mult(maxp) * max_packet(maxp))); | |
1401 | qh->start = NO_FRAME; | |
1402 | ||
1403 | if (urb->dev->speed == USB_SPEED_HIGH) { | |
1404 | qh->c_usecs = 0; | |
1405 | qh->gap_uf = 0; | |
1406 | ||
1407 | qh->period = urb->interval >> 3; | |
1408 | if (qh->period == 0 && urb->interval != 1) { | |
1409 | /* NOTE interval 2 or 4 uframes could work. | |
1410 | * But interval 1 scheduling is simpler, and | |
1411 | * includes high bandwidth. | |
1412 | */ | |
1413 | dbg("intr period %d uframes, NYET!", | |
1414 | urb->interval); | |
1415 | goto done; | |
1416 | } | |
1417 | } else { | |
1418 | struct usb_tt *tt = urb->dev->tt; | |
1419 | int think_time; | |
1420 | ||
1421 | /* gap is f(FS/LS transfer times) */ | |
1422 | qh->gap_uf = 1 + usb_calc_bus_time(urb->dev->speed, | |
1423 | is_input, 0, maxp) / (125 * 1000); | |
1424 | ||
1425 | /* FIXME this just approximates SPLIT/CSPLIT times */ | |
1426 | if (is_input) { /* SPLIT, gap, CSPLIT+DATA */ | |
1427 | qh->c_usecs = qh->usecs + HS_USECS(0); | |
1428 | qh->usecs = HS_USECS(1); | |
1429 | } else { /* SPLIT+DATA, gap, CSPLIT */ | |
1430 | qh->usecs += HS_USECS(1); | |
1431 | qh->c_usecs = HS_USECS(0); | |
1432 | } | |
1433 | ||
1434 | think_time = tt ? tt->think_time : 0; | |
1435 | qh->tt_usecs = NS_TO_US(think_time + | |
1436 | usb_calc_bus_time(urb->dev->speed, | |
1437 | is_input, 0, max_packet(maxp))); | |
1438 | qh->period = urb->interval; | |
1439 | } | |
1440 | } | |
1441 | ||
1442 | /* support for tt scheduling, and access to toggles */ | |
1443 | qh->dev = urb->dev; | |
1444 | ||
1445 | /* using TT? */ | |
1446 | switch (urb->dev->speed) { | |
1447 | case USB_SPEED_LOW: | |
1448 | info1 |= (1 << 12); /* EPS "low" */ | |
1449 | /* FALL THROUGH */ | |
1450 | ||
1451 | case USB_SPEED_FULL: | |
1452 | /* EPS 0 means "full" */ | |
1453 | if (type != PIPE_INTERRUPT) | |
1454 | info1 |= (EHCI_TUNE_RL_TT << 28); | |
1455 | if (type == PIPE_CONTROL) { | |
1456 | info1 |= (1 << 27); /* for TT */ | |
1457 | info1 |= 1 << 14; /* toggle from qtd */ | |
1458 | } | |
1459 | info1 |= maxp << 16; | |
1460 | ||
1461 | info2 |= (EHCI_TUNE_MULT_TT << 30); | |
1462 | info2 |= urb->dev->ttport << 23; | |
1463 | ||
1464 | /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */ | |
1465 | ||
1466 | break; | |
1467 | ||
1468 | case USB_SPEED_HIGH: /* no TT involved */ | |
1469 | info1 |= (2 << 12); /* EPS "high" */ | |
1470 | if (type == PIPE_CONTROL) { | |
1471 | info1 |= (EHCI_TUNE_RL_HS << 28); | |
1472 | info1 |= 64 << 16; /* usb2 fixed maxpacket */ | |
1473 | info1 |= 1 << 14; /* toggle from qtd */ | |
1474 | info2 |= (EHCI_TUNE_MULT_HS << 30); | |
1475 | } else if (type == PIPE_BULK) { | |
1476 | info1 |= (EHCI_TUNE_RL_HS << 28); | |
1477 | info1 |= 512 << 16; /* usb2 fixed maxpacket */ | |
1478 | info2 |= (EHCI_TUNE_MULT_HS << 30); | |
1479 | } else { /* PIPE_INTERRUPT */ | |
1480 | info1 |= max_packet(maxp) << 16; | |
1481 | info2 |= hb_mult(maxp) << 30; | |
1482 | } | |
1483 | break; | |
1484 | default: | |
1485 | dbg("bogus dev %p speed %d", urb->dev, urb->dev->speed); | |
1486 | done: | |
1487 | qh_put(qh); | |
1488 | return NULL; | |
1489 | } | |
1490 | ||
1491 | /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */ | |
1492 | ||
1493 | /* init as live, toggle clear, advance to dummy */ | |
1494 | qh->qh_state = QH_STATE_IDLE; | |
1495 | qh->hw_info1 = cpu_to_le32(info1); | |
1496 | qh->hw_info2 = cpu_to_le32(info2); | |
1497 | usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), !is_input, 1); | |
1498 | qh_refresh(oxu, qh); | |
1499 | return qh; | |
1500 | } | |
1501 | ||
1502 | /* Move qh (and its qtds) onto async queue; maybe enable queue. | |
1503 | */ | |
1504 | static void qh_link_async(struct oxu_hcd *oxu, struct ehci_qh *qh) | |
1505 | { | |
1506 | __le32 dma = QH_NEXT(qh->qh_dma); | |
1507 | struct ehci_qh *head; | |
1508 | ||
1509 | /* (re)start the async schedule? */ | |
1510 | head = oxu->async; | |
1511 | timer_action_done(oxu, TIMER_ASYNC_OFF); | |
1512 | if (!head->qh_next.qh) { | |
1513 | u32 cmd = readl(&oxu->regs->command); | |
1514 | ||
1515 | if (!(cmd & CMD_ASE)) { | |
1516 | /* in case a clear of CMD_ASE didn't take yet */ | |
1517 | (void)handshake(oxu, &oxu->regs->status, | |
1518 | STS_ASS, 0, 150); | |
1519 | cmd |= CMD_ASE | CMD_RUN; | |
1520 | writel(cmd, &oxu->regs->command); | |
1521 | oxu_to_hcd(oxu)->state = HC_STATE_RUNNING; | |
1522 | /* posted write need not be known to HC yet ... */ | |
1523 | } | |
1524 | } | |
1525 | ||
1526 | /* clear halt and/or toggle; and maybe recover from silicon quirk */ | |
1527 | if (qh->qh_state == QH_STATE_IDLE) | |
1528 | qh_refresh(oxu, qh); | |
1529 | ||
1530 | /* splice right after start */ | |
1531 | qh->qh_next = head->qh_next; | |
1532 | qh->hw_next = head->hw_next; | |
1533 | wmb(); | |
1534 | ||
1535 | head->qh_next.qh = qh; | |
1536 | head->hw_next = dma; | |
1537 | ||
1538 | qh->qh_state = QH_STATE_LINKED; | |
1539 | /* qtd completions reported later by interrupt */ | |
1540 | } | |
1541 | ||
1542 | #define QH_ADDR_MASK __constant_cpu_to_le32(0x7f) | |
1543 | ||
1544 | /* | |
1545 | * For control/bulk/interrupt, return QH with these TDs appended. | |
1546 | * Allocates and initializes the QH if necessary. | |
1547 | * Returns null if it can't allocate a QH it needs to. | |
1548 | * If the QH has TDs (urbs) already, that's great. | |
1549 | */ | |
1550 | static struct ehci_qh *qh_append_tds(struct oxu_hcd *oxu, | |
1551 | struct urb *urb, struct list_head *qtd_list, | |
1552 | int epnum, void **ptr) | |
1553 | { | |
1554 | struct ehci_qh *qh = NULL; | |
1555 | ||
1556 | qh = (struct ehci_qh *) *ptr; | |
1557 | if (unlikely(qh == NULL)) { | |
1558 | /* can't sleep here, we have oxu->lock... */ | |
1559 | qh = qh_make(oxu, urb, GFP_ATOMIC); | |
1560 | *ptr = qh; | |
1561 | } | |
1562 | if (likely(qh != NULL)) { | |
1563 | struct ehci_qtd *qtd; | |
1564 | ||
1565 | if (unlikely(list_empty(qtd_list))) | |
1566 | qtd = NULL; | |
1567 | else | |
1568 | qtd = list_entry(qtd_list->next, struct ehci_qtd, | |
1569 | qtd_list); | |
1570 | ||
1571 | /* control qh may need patching ... */ | |
1572 | if (unlikely(epnum == 0)) { | |
1573 | ||
1574 | /* usb_reset_device() briefly reverts to address 0 */ | |
1575 | if (usb_pipedevice(urb->pipe) == 0) | |
1576 | qh->hw_info1 &= ~QH_ADDR_MASK; | |
1577 | } | |
1578 | ||
1579 | /* just one way to queue requests: swap with the dummy qtd. | |
1580 | * only hc or qh_refresh() ever modify the overlay. | |
1581 | */ | |
1582 | if (likely(qtd != NULL)) { | |
1583 | struct ehci_qtd *dummy; | |
1584 | dma_addr_t dma; | |
1585 | __le32 token; | |
1586 | ||
1587 | /* to avoid racing the HC, use the dummy td instead of | |
1588 | * the first td of our list (becomes new dummy). both | |
1589 | * tds stay deactivated until we're done, when the | |
1590 | * HC is allowed to fetch the old dummy (4.10.2). | |
1591 | */ | |
1592 | token = qtd->hw_token; | |
1593 | qtd->hw_token = HALT_BIT; | |
1594 | wmb(); | |
1595 | dummy = qh->dummy; | |
1596 | ||
1597 | dma = dummy->qtd_dma; | |
1598 | *dummy = *qtd; | |
1599 | dummy->qtd_dma = dma; | |
1600 | ||
1601 | list_del(&qtd->qtd_list); | |
1602 | list_add(&dummy->qtd_list, qtd_list); | |
1603 | list_splice(qtd_list, qh->qtd_list.prev); | |
1604 | ||
1605 | ehci_qtd_init(qtd, qtd->qtd_dma); | |
1606 | qh->dummy = qtd; | |
1607 | ||
1608 | /* hc must see the new dummy at list end */ | |
1609 | dma = qtd->qtd_dma; | |
1610 | qtd = list_entry(qh->qtd_list.prev, | |
1611 | struct ehci_qtd, qtd_list); | |
1612 | qtd->hw_next = QTD_NEXT(dma); | |
1613 | ||
1614 | /* let the hc process these next qtds */ | |
1615 | dummy->hw_token = (token & ~(0x80)); | |
1616 | wmb(); | |
1617 | dummy->hw_token = token; | |
1618 | ||
1619 | urb->hcpriv = qh_get(qh); | |
1620 | } | |
1621 | } | |
1622 | return qh; | |
1623 | } | |
1624 | ||
1625 | static int submit_async(struct oxu_hcd *oxu, struct urb *urb, | |
1626 | struct list_head *qtd_list, gfp_t mem_flags) | |
1627 | { | |
1628 | struct ehci_qtd *qtd; | |
1629 | int epnum; | |
1630 | unsigned long flags; | |
1631 | struct ehci_qh *qh = NULL; | |
1632 | int rc = 0; | |
1633 | ||
1634 | qtd = list_entry(qtd_list->next, struct ehci_qtd, qtd_list); | |
1635 | epnum = urb->ep->desc.bEndpointAddress; | |
1636 | ||
1637 | #ifdef OXU_URB_TRACE | |
1638 | oxu_dbg(oxu, "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n", | |
1639 | __func__, urb->dev->devpath, urb, | |
1640 | epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out", | |
1641 | urb->transfer_buffer_length, | |
1642 | qtd, urb->ep->hcpriv); | |
1643 | #endif | |
1644 | ||
1645 | spin_lock_irqsave(&oxu->lock, flags); | |
1646 | if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE, | |
1647 | &oxu_to_hcd(oxu)->flags))) { | |
1648 | rc = -ESHUTDOWN; | |
1649 | goto done; | |
1650 | } | |
1651 | ||
1652 | qh = qh_append_tds(oxu, urb, qtd_list, epnum, &urb->ep->hcpriv); | |
1653 | if (unlikely(qh == NULL)) { | |
1654 | rc = -ENOMEM; | |
1655 | goto done; | |
1656 | } | |
1657 | ||
1658 | /* Control/bulk operations through TTs don't need scheduling, | |
1659 | * the HC and TT handle it when the TT has a buffer ready. | |
1660 | */ | |
1661 | if (likely(qh->qh_state == QH_STATE_IDLE)) | |
1662 | qh_link_async(oxu, qh_get(qh)); | |
1663 | done: | |
1664 | spin_unlock_irqrestore(&oxu->lock, flags); | |
1665 | if (unlikely(qh == NULL)) | |
1666 | qtd_list_free(oxu, urb, qtd_list); | |
1667 | return rc; | |
1668 | } | |
1669 | ||
1670 | /* The async qh for the qtds being reclaimed are now unlinked from the HC */ | |
1671 | ||
1672 | static void end_unlink_async(struct oxu_hcd *oxu) | |
1673 | { | |
1674 | struct ehci_qh *qh = oxu->reclaim; | |
1675 | struct ehci_qh *next; | |
1676 | ||
1677 | timer_action_done(oxu, TIMER_IAA_WATCHDOG); | |
1678 | ||
1679 | qh->qh_state = QH_STATE_IDLE; | |
1680 | qh->qh_next.qh = NULL; | |
1681 | qh_put(qh); /* refcount from reclaim */ | |
1682 | ||
1683 | /* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */ | |
1684 | next = qh->reclaim; | |
1685 | oxu->reclaim = next; | |
1686 | oxu->reclaim_ready = 0; | |
1687 | qh->reclaim = NULL; | |
1688 | ||
1689 | qh_completions(oxu, qh); | |
1690 | ||
1691 | if (!list_empty(&qh->qtd_list) | |
1692 | && HC_IS_RUNNING(oxu_to_hcd(oxu)->state)) | |
1693 | qh_link_async(oxu, qh); | |
1694 | else { | |
1695 | qh_put(qh); /* refcount from async list */ | |
1696 | ||
1697 | /* it's not free to turn the async schedule on/off; leave it | |
1698 | * active but idle for a while once it empties. | |
1699 | */ | |
1700 | if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state) | |
1701 | && oxu->async->qh_next.qh == NULL) | |
1702 | timer_action(oxu, TIMER_ASYNC_OFF); | |
1703 | } | |
1704 | ||
1705 | if (next) { | |
1706 | oxu->reclaim = NULL; | |
1707 | start_unlink_async(oxu, next); | |
1708 | } | |
1709 | } | |
1710 | ||
1711 | /* makes sure the async qh will become idle */ | |
1712 | /* caller must own oxu->lock */ | |
1713 | ||
1714 | static void start_unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh) | |
1715 | { | |
1716 | int cmd = readl(&oxu->regs->command); | |
1717 | struct ehci_qh *prev; | |
1718 | ||
1719 | #ifdef DEBUG | |
1720 | assert_spin_locked(&oxu->lock); | |
1721 | if (oxu->reclaim || (qh->qh_state != QH_STATE_LINKED | |
1722 | && qh->qh_state != QH_STATE_UNLINK_WAIT)) | |
1723 | BUG(); | |
1724 | #endif | |
1725 | ||
1726 | /* stop async schedule right now? */ | |
1727 | if (unlikely(qh == oxu->async)) { | |
1728 | /* can't get here without STS_ASS set */ | |
1729 | if (oxu_to_hcd(oxu)->state != HC_STATE_HALT | |
1730 | && !oxu->reclaim) { | |
1731 | /* ... and CMD_IAAD clear */ | |
1732 | writel(cmd & ~CMD_ASE, &oxu->regs->command); | |
1733 | wmb(); | |
1734 | /* handshake later, if we need to */ | |
1735 | timer_action_done(oxu, TIMER_ASYNC_OFF); | |
1736 | } | |
1737 | return; | |
1738 | } | |
1739 | ||
1740 | qh->qh_state = QH_STATE_UNLINK; | |
1741 | oxu->reclaim = qh = qh_get(qh); | |
1742 | ||
1743 | prev = oxu->async; | |
1744 | while (prev->qh_next.qh != qh) | |
1745 | prev = prev->qh_next.qh; | |
1746 | ||
1747 | prev->hw_next = qh->hw_next; | |
1748 | prev->qh_next = qh->qh_next; | |
1749 | wmb(); | |
1750 | ||
1751 | if (unlikely(oxu_to_hcd(oxu)->state == HC_STATE_HALT)) { | |
1752 | /* if (unlikely(qh->reclaim != 0)) | |
1753 | * this will recurse, probably not much | |
1754 | */ | |
1755 | end_unlink_async(oxu); | |
1756 | return; | |
1757 | } | |
1758 | ||
1759 | oxu->reclaim_ready = 0; | |
1760 | cmd |= CMD_IAAD; | |
1761 | writel(cmd, &oxu->regs->command); | |
1762 | (void) readl(&oxu->regs->command); | |
1763 | timer_action(oxu, TIMER_IAA_WATCHDOG); | |
1764 | } | |
1765 | ||
1766 | static void scan_async(struct oxu_hcd *oxu) | |
1767 | { | |
1768 | struct ehci_qh *qh; | |
1769 | enum ehci_timer_action action = TIMER_IO_WATCHDOG; | |
1770 | ||
1771 | if (!++(oxu->stamp)) | |
1772 | oxu->stamp++; | |
1773 | timer_action_done(oxu, TIMER_ASYNC_SHRINK); | |
1774 | rescan: | |
1775 | qh = oxu->async->qh_next.qh; | |
1776 | if (likely(qh != NULL)) { | |
1777 | do { | |
1778 | /* clean any finished work for this qh */ | |
1779 | if (!list_empty(&qh->qtd_list) | |
1780 | && qh->stamp != oxu->stamp) { | |
1781 | int temp; | |
1782 | ||
1783 | /* unlinks could happen here; completion | |
1784 | * reporting drops the lock. rescan using | |
1785 | * the latest schedule, but don't rescan | |
1786 | * qhs we already finished (no looping). | |
1787 | */ | |
1788 | qh = qh_get(qh); | |
1789 | qh->stamp = oxu->stamp; | |
1790 | temp = qh_completions(oxu, qh); | |
1791 | qh_put(qh); | |
1792 | if (temp != 0) | |
1793 | goto rescan; | |
1794 | } | |
1795 | ||
1796 | /* unlink idle entries, reducing HC PCI usage as well | |
1797 | * as HCD schedule-scanning costs. delay for any qh | |
1798 | * we just scanned, there's a not-unusual case that it | |
1799 | * doesn't stay idle for long. | |
1800 | * (plus, avoids some kind of re-activation race.) | |
1801 | */ | |
1802 | if (list_empty(&qh->qtd_list)) { | |
1803 | if (qh->stamp == oxu->stamp) | |
1804 | action = TIMER_ASYNC_SHRINK; | |
1805 | else if (!oxu->reclaim | |
1806 | && qh->qh_state == QH_STATE_LINKED) | |
1807 | start_unlink_async(oxu, qh); | |
1808 | } | |
1809 | ||
1810 | qh = qh->qh_next.qh; | |
1811 | } while (qh); | |
1812 | } | |
1813 | if (action == TIMER_ASYNC_SHRINK) | |
1814 | timer_action(oxu, TIMER_ASYNC_SHRINK); | |
1815 | } | |
1816 | ||
1817 | /* | |
1818 | * periodic_next_shadow - return "next" pointer on shadow list | |
1819 | * @periodic: host pointer to qh/itd/sitd | |
1820 | * @tag: hardware tag for type of this record | |
1821 | */ | |
1822 | static union ehci_shadow *periodic_next_shadow(union ehci_shadow *periodic, | |
1823 | __le32 tag) | |
1824 | { | |
1825 | switch (tag) { | |
1826 | default: | |
1827 | case Q_TYPE_QH: | |
1828 | return &periodic->qh->qh_next; | |
1829 | } | |
1830 | } | |
1831 | ||
1832 | /* caller must hold oxu->lock */ | |
1833 | static void periodic_unlink(struct oxu_hcd *oxu, unsigned frame, void *ptr) | |
1834 | { | |
1835 | union ehci_shadow *prev_p = &oxu->pshadow[frame]; | |
1836 | __le32 *hw_p = &oxu->periodic[frame]; | |
1837 | union ehci_shadow here = *prev_p; | |
1838 | ||
1839 | /* find predecessor of "ptr"; hw and shadow lists are in sync */ | |
1840 | while (here.ptr && here.ptr != ptr) { | |
1841 | prev_p = periodic_next_shadow(prev_p, Q_NEXT_TYPE(*hw_p)); | |
1842 | hw_p = here.hw_next; | |
1843 | here = *prev_p; | |
1844 | } | |
1845 | /* an interrupt entry (at list end) could have been shared */ | |
1846 | if (!here.ptr) | |
1847 | return; | |
1848 | ||
1849 | /* update shadow and hardware lists ... the old "next" pointers | |
1850 | * from ptr may still be in use, the caller updates them. | |
1851 | */ | |
1852 | *prev_p = *periodic_next_shadow(&here, Q_NEXT_TYPE(*hw_p)); | |
1853 | *hw_p = *here.hw_next; | |
1854 | } | |
1855 | ||
1856 | /* how many of the uframe's 125 usecs are allocated? */ | |
1857 | static unsigned short periodic_usecs(struct oxu_hcd *oxu, | |
1858 | unsigned frame, unsigned uframe) | |
1859 | { | |
1860 | __le32 *hw_p = &oxu->periodic[frame]; | |
1861 | union ehci_shadow *q = &oxu->pshadow[frame]; | |
1862 | unsigned usecs = 0; | |
1863 | ||
1864 | while (q->ptr) { | |
1865 | switch (Q_NEXT_TYPE(*hw_p)) { | |
1866 | case Q_TYPE_QH: | |
1867 | default: | |
1868 | /* is it in the S-mask? */ | |
1869 | if (q->qh->hw_info2 & cpu_to_le32(1 << uframe)) | |
1870 | usecs += q->qh->usecs; | |
1871 | /* ... or C-mask? */ | |
1872 | if (q->qh->hw_info2 & cpu_to_le32(1 << (8 + uframe))) | |
1873 | usecs += q->qh->c_usecs; | |
1874 | hw_p = &q->qh->hw_next; | |
1875 | q = &q->qh->qh_next; | |
1876 | break; | |
1877 | } | |
1878 | } | |
1879 | #ifdef DEBUG | |
1880 | if (usecs > 100) | |
1881 | oxu_err(oxu, "uframe %d sched overrun: %d usecs\n", | |
1882 | frame * 8 + uframe, usecs); | |
1883 | #endif | |
1884 | return usecs; | |
1885 | } | |
1886 | ||
1887 | static int enable_periodic(struct oxu_hcd *oxu) | |
1888 | { | |
1889 | u32 cmd; | |
1890 | int status; | |
1891 | ||
1892 | /* did clearing PSE did take effect yet? | |
1893 | * takes effect only at frame boundaries... | |
1894 | */ | |
1895 | status = handshake(oxu, &oxu->regs->status, STS_PSS, 0, 9 * 125); | |
1896 | if (status != 0) { | |
1897 | oxu_to_hcd(oxu)->state = HC_STATE_HALT; | |
1898 | return status; | |
1899 | } | |
1900 | ||
1901 | cmd = readl(&oxu->regs->command) | CMD_PSE; | |
1902 | writel(cmd, &oxu->regs->command); | |
1903 | /* posted write ... PSS happens later */ | |
1904 | oxu_to_hcd(oxu)->state = HC_STATE_RUNNING; | |
1905 | ||
1906 | /* make sure ehci_work scans these */ | |
1907 | oxu->next_uframe = readl(&oxu->regs->frame_index) | |
1908 | % (oxu->periodic_size << 3); | |
1909 | return 0; | |
1910 | } | |
1911 | ||
1912 | static int disable_periodic(struct oxu_hcd *oxu) | |
1913 | { | |
1914 | u32 cmd; | |
1915 | int status; | |
1916 | ||
1917 | /* did setting PSE not take effect yet? | |
1918 | * takes effect only at frame boundaries... | |
1919 | */ | |
1920 | status = handshake(oxu, &oxu->regs->status, STS_PSS, STS_PSS, 9 * 125); | |
1921 | if (status != 0) { | |
1922 | oxu_to_hcd(oxu)->state = HC_STATE_HALT; | |
1923 | return status; | |
1924 | } | |
1925 | ||
1926 | cmd = readl(&oxu->regs->command) & ~CMD_PSE; | |
1927 | writel(cmd, &oxu->regs->command); | |
1928 | /* posted write ... */ | |
1929 | ||
1930 | oxu->next_uframe = -1; | |
1931 | return 0; | |
1932 | } | |
1933 | ||
1934 | /* periodic schedule slots have iso tds (normal or split) first, then a | |
1935 | * sparse tree for active interrupt transfers. | |
1936 | * | |
1937 | * this just links in a qh; caller guarantees uframe masks are set right. | |
1938 | * no FSTN support (yet; oxu 0.96+) | |
1939 | */ | |
1940 | static int qh_link_periodic(struct oxu_hcd *oxu, struct ehci_qh *qh) | |
1941 | { | |
1942 | unsigned i; | |
1943 | unsigned period = qh->period; | |
1944 | ||
1945 | dev_dbg(&qh->dev->dev, | |
1946 | "link qh%d-%04x/%p start %d [%d/%d us]\n", | |
1947 | period, le32_to_cpup(&qh->hw_info2) & (QH_CMASK | QH_SMASK), | |
1948 | qh, qh->start, qh->usecs, qh->c_usecs); | |
1949 | ||
1950 | /* high bandwidth, or otherwise every microframe */ | |
1951 | if (period == 0) | |
1952 | period = 1; | |
1953 | ||
1954 | for (i = qh->start; i < oxu->periodic_size; i += period) { | |
1955 | union ehci_shadow *prev = &oxu->pshadow[i]; | |
1956 | __le32 *hw_p = &oxu->periodic[i]; | |
1957 | union ehci_shadow here = *prev; | |
1958 | __le32 type = 0; | |
1959 | ||
1960 | /* skip the iso nodes at list head */ | |
1961 | while (here.ptr) { | |
1962 | type = Q_NEXT_TYPE(*hw_p); | |
1963 | if (type == Q_TYPE_QH) | |
1964 | break; | |
1965 | prev = periodic_next_shadow(prev, type); | |
1966 | hw_p = &here.qh->hw_next; | |
1967 | here = *prev; | |
1968 | } | |
1969 | ||
1970 | /* sorting each branch by period (slow-->fast) | |
1971 | * enables sharing interior tree nodes | |
1972 | */ | |
1973 | while (here.ptr && qh != here.qh) { | |
1974 | if (qh->period > here.qh->period) | |
1975 | break; | |
1976 | prev = &here.qh->qh_next; | |
1977 | hw_p = &here.qh->hw_next; | |
1978 | here = *prev; | |
1979 | } | |
1980 | /* link in this qh, unless some earlier pass did that */ | |
1981 | if (qh != here.qh) { | |
1982 | qh->qh_next = here; | |
1983 | if (here.qh) | |
1984 | qh->hw_next = *hw_p; | |
1985 | wmb(); | |
1986 | prev->qh = qh; | |
1987 | *hw_p = QH_NEXT(qh->qh_dma); | |
1988 | } | |
1989 | } | |
1990 | qh->qh_state = QH_STATE_LINKED; | |
1991 | qh_get(qh); | |
1992 | ||
1993 | /* update per-qh bandwidth for usbfs */ | |
1994 | oxu_to_hcd(oxu)->self.bandwidth_allocated += qh->period | |
1995 | ? ((qh->usecs + qh->c_usecs) / qh->period) | |
1996 | : (qh->usecs * 8); | |
1997 | ||
1998 | /* maybe enable periodic schedule processing */ | |
1999 | if (!oxu->periodic_sched++) | |
2000 | return enable_periodic(oxu); | |
2001 | ||
2002 | return 0; | |
2003 | } | |
2004 | ||
2005 | static void qh_unlink_periodic(struct oxu_hcd *oxu, struct ehci_qh *qh) | |
2006 | { | |
2007 | unsigned i; | |
2008 | unsigned period; | |
2009 | ||
2010 | /* FIXME: | |
2011 | * IF this isn't high speed | |
2012 | * and this qh is active in the current uframe | |
2013 | * (and overlay token SplitXstate is false?) | |
2014 | * THEN | |
2015 | * qh->hw_info1 |= __constant_cpu_to_le32(1 << 7 "ignore"); | |
2016 | */ | |
2017 | ||
2018 | /* high bandwidth, or otherwise part of every microframe */ | |
2019 | period = qh->period; | |
2020 | if (period == 0) | |
2021 | period = 1; | |
2022 | ||
2023 | for (i = qh->start; i < oxu->periodic_size; i += period) | |
2024 | periodic_unlink(oxu, i, qh); | |
2025 | ||
2026 | /* update per-qh bandwidth for usbfs */ | |
2027 | oxu_to_hcd(oxu)->self.bandwidth_allocated -= qh->period | |
2028 | ? ((qh->usecs + qh->c_usecs) / qh->period) | |
2029 | : (qh->usecs * 8); | |
2030 | ||
2031 | dev_dbg(&qh->dev->dev, | |
2032 | "unlink qh%d-%04x/%p start %d [%d/%d us]\n", | |
2033 | qh->period, | |
2034 | le32_to_cpup(&qh->hw_info2) & (QH_CMASK | QH_SMASK), | |
2035 | qh, qh->start, qh->usecs, qh->c_usecs); | |
2036 | ||
2037 | /* qh->qh_next still "live" to HC */ | |
2038 | qh->qh_state = QH_STATE_UNLINK; | |
2039 | qh->qh_next.ptr = NULL; | |
2040 | qh_put(qh); | |
2041 | ||
2042 | /* maybe turn off periodic schedule */ | |
2043 | oxu->periodic_sched--; | |
2044 | if (!oxu->periodic_sched) | |
2045 | (void) disable_periodic(oxu); | |
2046 | } | |
2047 | ||
2048 | static void intr_deschedule(struct oxu_hcd *oxu, struct ehci_qh *qh) | |
2049 | { | |
2050 | unsigned wait; | |
2051 | ||
2052 | qh_unlink_periodic(oxu, qh); | |
2053 | ||
2054 | /* simple/paranoid: always delay, expecting the HC needs to read | |
2055 | * qh->hw_next or finish a writeback after SPLIT/CSPLIT ... and | |
2056 | * expect khubd to clean up after any CSPLITs we won't issue. | |
2057 | * active high speed queues may need bigger delays... | |
2058 | */ | |
2059 | if (list_empty(&qh->qtd_list) | |
2060 | || (__constant_cpu_to_le32(QH_CMASK) & qh->hw_info2) != 0) | |
2061 | wait = 2; | |
2062 | else | |
2063 | wait = 55; /* worst case: 3 * 1024 */ | |
2064 | ||
2065 | udelay(wait); | |
2066 | qh->qh_state = QH_STATE_IDLE; | |
2067 | qh->hw_next = EHCI_LIST_END; | |
2068 | wmb(); | |
2069 | } | |
2070 | ||
2071 | static int check_period(struct oxu_hcd *oxu, | |
2072 | unsigned frame, unsigned uframe, | |
2073 | unsigned period, unsigned usecs) | |
2074 | { | |
2075 | int claimed; | |
2076 | ||
2077 | /* complete split running into next frame? | |
2078 | * given FSTN support, we could sometimes check... | |
2079 | */ | |
2080 | if (uframe >= 8) | |
2081 | return 0; | |
2082 | ||
2083 | /* | |
2084 | * 80% periodic == 100 usec/uframe available | |
2085 | * convert "usecs we need" to "max already claimed" | |
2086 | */ | |
2087 | usecs = 100 - usecs; | |
2088 | ||
2089 | /* we "know" 2 and 4 uframe intervals were rejected; so | |
2090 | * for period 0, check _every_ microframe in the schedule. | |
2091 | */ | |
2092 | if (unlikely(period == 0)) { | |
2093 | do { | |
2094 | for (uframe = 0; uframe < 7; uframe++) { | |
2095 | claimed = periodic_usecs(oxu, frame, uframe); | |
2096 | if (claimed > usecs) | |
2097 | return 0; | |
2098 | } | |
2099 | } while ((frame += 1) < oxu->periodic_size); | |
2100 | ||
2101 | /* just check the specified uframe, at that period */ | |
2102 | } else { | |
2103 | do { | |
2104 | claimed = periodic_usecs(oxu, frame, uframe); | |
2105 | if (claimed > usecs) | |
2106 | return 0; | |
2107 | } while ((frame += period) < oxu->periodic_size); | |
2108 | } | |
2109 | ||
2110 | return 1; | |
2111 | } | |
2112 | ||
2113 | static int check_intr_schedule(struct oxu_hcd *oxu, | |
2114 | unsigned frame, unsigned uframe, | |
2115 | const struct ehci_qh *qh, __le32 *c_maskp) | |
2116 | { | |
2117 | int retval = -ENOSPC; | |
2118 | ||
2119 | if (qh->c_usecs && uframe >= 6) /* FSTN territory? */ | |
2120 | goto done; | |
2121 | ||
2122 | if (!check_period(oxu, frame, uframe, qh->period, qh->usecs)) | |
2123 | goto done; | |
2124 | if (!qh->c_usecs) { | |
2125 | retval = 0; | |
2126 | *c_maskp = 0; | |
2127 | goto done; | |
2128 | } | |
2129 | ||
2130 | done: | |
2131 | return retval; | |
2132 | } | |
2133 | ||
2134 | /* "first fit" scheduling policy used the first time through, | |
2135 | * or when the previous schedule slot can't be re-used. | |
2136 | */ | |
2137 | static int qh_schedule(struct oxu_hcd *oxu, struct ehci_qh *qh) | |
2138 | { | |
2139 | int status; | |
2140 | unsigned uframe; | |
2141 | __le32 c_mask; | |
2142 | unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */ | |
2143 | ||
2144 | qh_refresh(oxu, qh); | |
2145 | qh->hw_next = EHCI_LIST_END; | |
2146 | frame = qh->start; | |
2147 | ||
2148 | /* reuse the previous schedule slots, if we can */ | |
2149 | if (frame < qh->period) { | |
2150 | uframe = ffs(le32_to_cpup(&qh->hw_info2) & QH_SMASK); | |
2151 | status = check_intr_schedule(oxu, frame, --uframe, | |
2152 | qh, &c_mask); | |
2153 | } else { | |
2154 | uframe = 0; | |
2155 | c_mask = 0; | |
2156 | status = -ENOSPC; | |
2157 | } | |
2158 | ||
2159 | /* else scan the schedule to find a group of slots such that all | |
2160 | * uframes have enough periodic bandwidth available. | |
2161 | */ | |
2162 | if (status) { | |
2163 | /* "normal" case, uframing flexible except with splits */ | |
2164 | if (qh->period) { | |
2165 | frame = qh->period - 1; | |
2166 | do { | |
2167 | for (uframe = 0; uframe < 8; uframe++) { | |
2168 | status = check_intr_schedule(oxu, | |
2169 | frame, uframe, qh, | |
2170 | &c_mask); | |
2171 | if (status == 0) | |
2172 | break; | |
2173 | } | |
2174 | } while (status && frame--); | |
2175 | ||
2176 | /* qh->period == 0 means every uframe */ | |
2177 | } else { | |
2178 | frame = 0; | |
2179 | status = check_intr_schedule(oxu, 0, 0, qh, &c_mask); | |
2180 | } | |
2181 | if (status) | |
2182 | goto done; | |
2183 | qh->start = frame; | |
2184 | ||
2185 | /* reset S-frame and (maybe) C-frame masks */ | |
2186 | qh->hw_info2 &= __constant_cpu_to_le32(~(QH_CMASK | QH_SMASK)); | |
2187 | qh->hw_info2 |= qh->period | |
2188 | ? cpu_to_le32(1 << uframe) | |
2189 | : __constant_cpu_to_le32(QH_SMASK); | |
2190 | qh->hw_info2 |= c_mask; | |
2191 | } else | |
2192 | oxu_dbg(oxu, "reused qh %p schedule\n", qh); | |
2193 | ||
2194 | /* stuff into the periodic schedule */ | |
2195 | status = qh_link_periodic(oxu, qh); | |
2196 | done: | |
2197 | return status; | |
2198 | } | |
2199 | ||
2200 | static int intr_submit(struct oxu_hcd *oxu, struct urb *urb, | |
2201 | struct list_head *qtd_list, gfp_t mem_flags) | |
2202 | { | |
2203 | unsigned epnum; | |
2204 | unsigned long flags; | |
2205 | struct ehci_qh *qh; | |
2206 | int status = 0; | |
2207 | struct list_head empty; | |
2208 | ||
2209 | /* get endpoint and transfer/schedule data */ | |
2210 | epnum = urb->ep->desc.bEndpointAddress; | |
2211 | ||
2212 | spin_lock_irqsave(&oxu->lock, flags); | |
2213 | ||
2214 | if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE, | |
2215 | &oxu_to_hcd(oxu)->flags))) { | |
2216 | status = -ESHUTDOWN; | |
2217 | goto done; | |
2218 | } | |
2219 | ||
2220 | /* get qh and force any scheduling errors */ | |
2221 | INIT_LIST_HEAD(&empty); | |
2222 | qh = qh_append_tds(oxu, urb, &empty, epnum, &urb->ep->hcpriv); | |
2223 | if (qh == NULL) { | |
2224 | status = -ENOMEM; | |
2225 | goto done; | |
2226 | } | |
2227 | if (qh->qh_state == QH_STATE_IDLE) { | |
2228 | status = qh_schedule(oxu, qh); | |
2229 | if (status != 0) | |
2230 | goto done; | |
2231 | } | |
2232 | ||
2233 | /* then queue the urb's tds to the qh */ | |
2234 | qh = qh_append_tds(oxu, urb, qtd_list, epnum, &urb->ep->hcpriv); | |
2235 | BUG_ON(qh == NULL); | |
2236 | ||
2237 | /* ... update usbfs periodic stats */ | |
2238 | oxu_to_hcd(oxu)->self.bandwidth_int_reqs++; | |
2239 | ||
2240 | done: | |
2241 | spin_unlock_irqrestore(&oxu->lock, flags); | |
2242 | if (status) | |
2243 | qtd_list_free(oxu, urb, qtd_list); | |
2244 | ||
2245 | return status; | |
2246 | } | |
2247 | ||
2248 | static inline int itd_submit(struct oxu_hcd *oxu, struct urb *urb, | |
2249 | gfp_t mem_flags) | |
2250 | { | |
2251 | oxu_dbg(oxu, "iso support is missing!\n"); | |
2252 | return -ENOSYS; | |
2253 | } | |
2254 | ||
2255 | static inline int sitd_submit(struct oxu_hcd *oxu, struct urb *urb, | |
2256 | gfp_t mem_flags) | |
2257 | { | |
2258 | oxu_dbg(oxu, "split iso support is missing!\n"); | |
2259 | return -ENOSYS; | |
2260 | } | |
2261 | ||
2262 | static void scan_periodic(struct oxu_hcd *oxu) | |
2263 | { | |
2264 | unsigned frame, clock, now_uframe, mod; | |
2265 | unsigned modified; | |
2266 | ||
2267 | mod = oxu->periodic_size << 3; | |
2268 | ||
2269 | /* | |
2270 | * When running, scan from last scan point up to "now" | |
2271 | * else clean up by scanning everything that's left. | |
2272 | * Touches as few pages as possible: cache-friendly. | |
2273 | */ | |
2274 | now_uframe = oxu->next_uframe; | |
2275 | if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state)) | |
2276 | clock = readl(&oxu->regs->frame_index); | |
2277 | else | |
2278 | clock = now_uframe + mod - 1; | |
2279 | clock %= mod; | |
2280 | ||
2281 | for (;;) { | |
2282 | union ehci_shadow q, *q_p; | |
2283 | __le32 type, *hw_p; | |
2284 | unsigned uframes; | |
2285 | ||
2286 | /* don't scan past the live uframe */ | |
2287 | frame = now_uframe >> 3; | |
2288 | if (frame == (clock >> 3)) | |
2289 | uframes = now_uframe & 0x07; | |
2290 | else { | |
2291 | /* safe to scan the whole frame at once */ | |
2292 | now_uframe |= 0x07; | |
2293 | uframes = 8; | |
2294 | } | |
2295 | ||
2296 | restart: | |
2297 | /* scan each element in frame's queue for completions */ | |
2298 | q_p = &oxu->pshadow[frame]; | |
2299 | hw_p = &oxu->periodic[frame]; | |
2300 | q.ptr = q_p->ptr; | |
2301 | type = Q_NEXT_TYPE(*hw_p); | |
2302 | modified = 0; | |
2303 | ||
2304 | while (q.ptr != NULL) { | |
2305 | union ehci_shadow temp; | |
2306 | int live; | |
2307 | ||
2308 | live = HC_IS_RUNNING(oxu_to_hcd(oxu)->state); | |
2309 | switch (type) { | |
2310 | case Q_TYPE_QH: | |
2311 | /* handle any completions */ | |
2312 | temp.qh = qh_get(q.qh); | |
2313 | type = Q_NEXT_TYPE(q.qh->hw_next); | |
2314 | q = q.qh->qh_next; | |
2315 | modified = qh_completions(oxu, temp.qh); | |
2316 | if (unlikely(list_empty(&temp.qh->qtd_list))) | |
2317 | intr_deschedule(oxu, temp.qh); | |
2318 | qh_put(temp.qh); | |
2319 | break; | |
2320 | default: | |
2321 | dbg("corrupt type %d frame %d shadow %p", | |
2322 | type, frame, q.ptr); | |
2323 | q.ptr = NULL; | |
2324 | } | |
2325 | ||
2326 | /* assume completion callbacks modify the queue */ | |
2327 | if (unlikely(modified)) | |
2328 | goto restart; | |
2329 | } | |
2330 | ||
2331 | /* Stop when we catch up to the HC */ | |
2332 | ||
2333 | /* FIXME: this assumes we won't get lapped when | |
2334 | * latencies climb; that should be rare, but... | |
2335 | * detect it, and just go all the way around. | |
2336 | * FLR might help detect this case, so long as latencies | |
2337 | * don't exceed periodic_size msec (default 1.024 sec). | |
2338 | */ | |
2339 | ||
2340 | /* FIXME: likewise assumes HC doesn't halt mid-scan */ | |
2341 | ||
2342 | if (now_uframe == clock) { | |
2343 | unsigned now; | |
2344 | ||
2345 | if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state)) | |
2346 | break; | |
2347 | oxu->next_uframe = now_uframe; | |
2348 | now = readl(&oxu->regs->frame_index) % mod; | |
2349 | if (now_uframe == now) | |
2350 | break; | |
2351 | ||
2352 | /* rescan the rest of this frame, then ... */ | |
2353 | clock = now; | |
2354 | } else { | |
2355 | now_uframe++; | |
2356 | now_uframe %= mod; | |
2357 | } | |
2358 | } | |
2359 | } | |
2360 | ||
2361 | /* On some systems, leaving remote wakeup enabled prevents system shutdown. | |
2362 | * The firmware seems to think that powering off is a wakeup event! | |
2363 | * This routine turns off remote wakeup and everything else, on all ports. | |
2364 | */ | |
2365 | static void ehci_turn_off_all_ports(struct oxu_hcd *oxu) | |
2366 | { | |
2367 | int port = HCS_N_PORTS(oxu->hcs_params); | |
2368 | ||
2369 | while (port--) | |
2370 | writel(PORT_RWC_BITS, &oxu->regs->port_status[port]); | |
2371 | } | |
2372 | ||
2373 | static void ehci_port_power(struct oxu_hcd *oxu, int is_on) | |
2374 | { | |
2375 | unsigned port; | |
2376 | ||
2377 | if (!HCS_PPC(oxu->hcs_params)) | |
2378 | return; | |
2379 | ||
2380 | oxu_dbg(oxu, "...power%s ports...\n", is_on ? "up" : "down"); | |
2381 | for (port = HCS_N_PORTS(oxu->hcs_params); port > 0; ) | |
2382 | (void) oxu_hub_control(oxu_to_hcd(oxu), | |
2383 | is_on ? SetPortFeature : ClearPortFeature, | |
2384 | USB_PORT_FEAT_POWER, | |
2385 | port--, NULL, 0); | |
2386 | msleep(20); | |
2387 | } | |
2388 | ||
2389 | /* Called from some interrupts, timers, and so on. | |
2390 | * It calls driver completion functions, after dropping oxu->lock. | |
2391 | */ | |
2392 | static void ehci_work(struct oxu_hcd *oxu) | |
2393 | { | |
2394 | timer_action_done(oxu, TIMER_IO_WATCHDOG); | |
2395 | if (oxu->reclaim_ready) | |
2396 | end_unlink_async(oxu); | |
2397 | ||
2398 | /* another CPU may drop oxu->lock during a schedule scan while | |
2399 | * it reports urb completions. this flag guards against bogus | |
2400 | * attempts at re-entrant schedule scanning. | |
2401 | */ | |
2402 | if (oxu->scanning) | |
2403 | return; | |
2404 | oxu->scanning = 1; | |
2405 | scan_async(oxu); | |
2406 | if (oxu->next_uframe != -1) | |
2407 | scan_periodic(oxu); | |
2408 | oxu->scanning = 0; | |
2409 | ||
2410 | /* the IO watchdog guards against hardware or driver bugs that | |
2411 | * misplace IRQs, and should let us run completely without IRQs. | |
2412 | * such lossage has been observed on both VT6202 and VT8235. | |
2413 | */ | |
2414 | if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state) && | |
2415 | (oxu->async->qh_next.ptr != NULL || | |
2416 | oxu->periodic_sched != 0)) | |
2417 | timer_action(oxu, TIMER_IO_WATCHDOG); | |
2418 | } | |
2419 | ||
2420 | static void unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh) | |
2421 | { | |
2422 | /* if we need to use IAA and it's busy, defer */ | |
2423 | if (qh->qh_state == QH_STATE_LINKED | |
2424 | && oxu->reclaim | |
2425 | && HC_IS_RUNNING(oxu_to_hcd(oxu)->state)) { | |
2426 | struct ehci_qh *last; | |
2427 | ||
2428 | for (last = oxu->reclaim; | |
2429 | last->reclaim; | |
2430 | last = last->reclaim) | |
2431 | continue; | |
2432 | qh->qh_state = QH_STATE_UNLINK_WAIT; | |
2433 | last->reclaim = qh; | |
2434 | ||
2435 | /* bypass IAA if the hc can't care */ | |
2436 | } else if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state) && oxu->reclaim) | |
2437 | end_unlink_async(oxu); | |
2438 | ||
2439 | /* something else might have unlinked the qh by now */ | |
2440 | if (qh->qh_state == QH_STATE_LINKED) | |
2441 | start_unlink_async(oxu, qh); | |
2442 | } | |
2443 | ||
2444 | /* | |
2445 | * USB host controller methods | |
2446 | */ | |
2447 | ||
2448 | static irqreturn_t oxu210_hcd_irq(struct usb_hcd *hcd) | |
2449 | { | |
2450 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
2451 | u32 status, pcd_status = 0; | |
2452 | int bh; | |
2453 | ||
2454 | spin_lock(&oxu->lock); | |
2455 | ||
2456 | status = readl(&oxu->regs->status); | |
2457 | ||
2458 | /* e.g. cardbus physical eject */ | |
2459 | if (status == ~(u32) 0) { | |
2460 | oxu_dbg(oxu, "device removed\n"); | |
2461 | goto dead; | |
2462 | } | |
2463 | ||
2464 | status &= INTR_MASK; | |
2465 | if (!status) { /* irq sharing? */ | |
2466 | spin_unlock(&oxu->lock); | |
2467 | return IRQ_NONE; | |
2468 | } | |
2469 | ||
2470 | /* clear (just) interrupts */ | |
2471 | writel(status, &oxu->regs->status); | |
2472 | readl(&oxu->regs->command); /* unblock posted write */ | |
2473 | bh = 0; | |
2474 | ||
2475 | #ifdef OXU_VERBOSE_DEBUG | |
2476 | /* unrequested/ignored: Frame List Rollover */ | |
2477 | dbg_status(oxu, "irq", status); | |
2478 | #endif | |
2479 | ||
2480 | /* INT, ERR, and IAA interrupt rates can be throttled */ | |
2481 | ||
2482 | /* normal [4.15.1.2] or error [4.15.1.1] completion */ | |
2483 | if (likely((status & (STS_INT|STS_ERR)) != 0)) | |
2484 | bh = 1; | |
2485 | ||
2486 | /* complete the unlinking of some qh [4.15.2.3] */ | |
2487 | if (status & STS_IAA) { | |
2488 | oxu->reclaim_ready = 1; | |
2489 | bh = 1; | |
2490 | } | |
2491 | ||
2492 | /* remote wakeup [4.3.1] */ | |
2493 | if (status & STS_PCD) { | |
2494 | unsigned i = HCS_N_PORTS(oxu->hcs_params); | |
2495 | pcd_status = status; | |
2496 | ||
2497 | /* resume root hub? */ | |
2498 | if (!(readl(&oxu->regs->command) & CMD_RUN)) | |
2499 | usb_hcd_resume_root_hub(hcd); | |
2500 | ||
2501 | while (i--) { | |
2502 | int pstatus = readl(&oxu->regs->port_status[i]); | |
2503 | ||
2504 | if (pstatus & PORT_OWNER) | |
2505 | continue; | |
2506 | if (!(pstatus & PORT_RESUME) | |
2507 | || oxu->reset_done[i] != 0) | |
2508 | continue; | |
2509 | ||
2510 | /* start 20 msec resume signaling from this port, | |
2511 | * and make khubd collect PORT_STAT_C_SUSPEND to | |
2512 | * stop that signaling. | |
2513 | */ | |
2514 | oxu->reset_done[i] = jiffies + msecs_to_jiffies(20); | |
2515 | oxu_dbg(oxu, "port %d remote wakeup\n", i + 1); | |
2516 | mod_timer(&hcd->rh_timer, oxu->reset_done[i]); | |
2517 | } | |
2518 | } | |
2519 | ||
2520 | /* PCI errors [4.15.2.4] */ | |
2521 | if (unlikely((status & STS_FATAL) != 0)) { | |
2522 | /* bogus "fatal" IRQs appear on some chips... why? */ | |
2523 | status = readl(&oxu->regs->status); | |
2524 | dbg_cmd(oxu, "fatal", readl(&oxu->regs->command)); | |
2525 | dbg_status(oxu, "fatal", status); | |
2526 | if (status & STS_HALT) { | |
2527 | oxu_err(oxu, "fatal error\n"); | |
2528 | dead: | |
2529 | ehci_reset(oxu); | |
2530 | writel(0, &oxu->regs->configured_flag); | |
2531 | /* generic layer kills/unlinks all urbs, then | |
2532 | * uses oxu_stop to clean up the rest | |
2533 | */ | |
2534 | bh = 1; | |
2535 | } | |
2536 | } | |
2537 | ||
2538 | if (bh) | |
2539 | ehci_work(oxu); | |
2540 | spin_unlock(&oxu->lock); | |
2541 | if (pcd_status & STS_PCD) | |
2542 | usb_hcd_poll_rh_status(hcd); | |
2543 | return IRQ_HANDLED; | |
2544 | } | |
2545 | ||
2546 | static irqreturn_t oxu_irq(struct usb_hcd *hcd) | |
2547 | { | |
2548 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
2549 | int ret = IRQ_HANDLED; | |
2550 | ||
2551 | u32 status = oxu_readl(hcd->regs, OXU_CHIPIRQSTATUS); | |
2552 | u32 enable = oxu_readl(hcd->regs, OXU_CHIPIRQEN_SET); | |
2553 | ||
2554 | /* Disable all interrupt */ | |
2555 | oxu_writel(hcd->regs, OXU_CHIPIRQEN_CLR, enable); | |
2556 | ||
2557 | if ((oxu->is_otg && (status & OXU_USBOTGI)) || | |
2558 | (!oxu->is_otg && (status & OXU_USBSPHI))) | |
2559 | oxu210_hcd_irq(hcd); | |
2560 | else | |
2561 | ret = IRQ_NONE; | |
2562 | ||
2563 | /* Enable all interrupt back */ | |
2564 | oxu_writel(hcd->regs, OXU_CHIPIRQEN_SET, enable); | |
2565 | ||
2566 | return ret; | |
2567 | } | |
2568 | ||
2569 | static void oxu_watchdog(unsigned long param) | |
2570 | { | |
2571 | struct oxu_hcd *oxu = (struct oxu_hcd *) param; | |
2572 | unsigned long flags; | |
2573 | ||
2574 | spin_lock_irqsave(&oxu->lock, flags); | |
2575 | ||
2576 | /* lost IAA irqs wedge things badly; seen with a vt8235 */ | |
2577 | if (oxu->reclaim) { | |
2578 | u32 status = readl(&oxu->regs->status); | |
2579 | if (status & STS_IAA) { | |
2580 | oxu_vdbg(oxu, "lost IAA\n"); | |
2581 | writel(STS_IAA, &oxu->regs->status); | |
2582 | oxu->reclaim_ready = 1; | |
2583 | } | |
2584 | } | |
2585 | ||
2586 | /* stop async processing after it's idled a bit */ | |
2587 | if (test_bit(TIMER_ASYNC_OFF, &oxu->actions)) | |
2588 | start_unlink_async(oxu, oxu->async); | |
2589 | ||
2590 | /* oxu could run by timer, without IRQs ... */ | |
2591 | ehci_work(oxu); | |
2592 | ||
2593 | spin_unlock_irqrestore(&oxu->lock, flags); | |
2594 | } | |
2595 | ||
2596 | /* One-time init, only for memory state. | |
2597 | */ | |
2598 | static int oxu_hcd_init(struct usb_hcd *hcd) | |
2599 | { | |
2600 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
2601 | u32 temp; | |
2602 | int retval; | |
2603 | u32 hcc_params; | |
2604 | ||
2605 | spin_lock_init(&oxu->lock); | |
2606 | ||
2607 | init_timer(&oxu->watchdog); | |
2608 | oxu->watchdog.function = oxu_watchdog; | |
2609 | oxu->watchdog.data = (unsigned long) oxu; | |
2610 | ||
2611 | /* | |
2612 | * hw default: 1K periodic list heads, one per frame. | |
2613 | * periodic_size can shrink by USBCMD update if hcc_params allows. | |
2614 | */ | |
2615 | oxu->periodic_size = DEFAULT_I_TDPS; | |
2616 | retval = ehci_mem_init(oxu, GFP_KERNEL); | |
2617 | if (retval < 0) | |
2618 | return retval; | |
2619 | ||
2620 | /* controllers may cache some of the periodic schedule ... */ | |
2621 | hcc_params = readl(&oxu->caps->hcc_params); | |
2622 | if (HCC_ISOC_CACHE(hcc_params)) /* full frame cache */ | |
2623 | oxu->i_thresh = 8; | |
2624 | else /* N microframes cached */ | |
2625 | oxu->i_thresh = 2 + HCC_ISOC_THRES(hcc_params); | |
2626 | ||
2627 | oxu->reclaim = NULL; | |
2628 | oxu->reclaim_ready = 0; | |
2629 | oxu->next_uframe = -1; | |
2630 | ||
2631 | /* | |
2632 | * dedicate a qh for the async ring head, since we couldn't unlink | |
2633 | * a 'real' qh without stopping the async schedule [4.8]. use it | |
2634 | * as the 'reclamation list head' too. | |
2635 | * its dummy is used in hw_alt_next of many tds, to prevent the qh | |
2636 | * from automatically advancing to the next td after short reads. | |
2637 | */ | |
2638 | oxu->async->qh_next.qh = NULL; | |
2639 | oxu->async->hw_next = QH_NEXT(oxu->async->qh_dma); | |
2640 | oxu->async->hw_info1 = cpu_to_le32(QH_HEAD); | |
2641 | oxu->async->hw_token = cpu_to_le32(QTD_STS_HALT); | |
2642 | oxu->async->hw_qtd_next = EHCI_LIST_END; | |
2643 | oxu->async->qh_state = QH_STATE_LINKED; | |
2644 | oxu->async->hw_alt_next = QTD_NEXT(oxu->async->dummy->qtd_dma); | |
2645 | ||
2646 | /* clear interrupt enables, set irq latency */ | |
2647 | if (log2_irq_thresh < 0 || log2_irq_thresh > 6) | |
2648 | log2_irq_thresh = 0; | |
2649 | temp = 1 << (16 + log2_irq_thresh); | |
2650 | if (HCC_CANPARK(hcc_params)) { | |
2651 | /* HW default park == 3, on hardware that supports it (like | |
2652 | * NVidia and ALI silicon), maximizes throughput on the async | |
2653 | * schedule by avoiding QH fetches between transfers. | |
2654 | * | |
2655 | * With fast usb storage devices and NForce2, "park" seems to | |
2656 | * make problems: throughput reduction (!), data errors... | |
2657 | */ | |
2658 | if (park) { | |
2659 | park = min(park, (unsigned) 3); | |
2660 | temp |= CMD_PARK; | |
2661 | temp |= park << 8; | |
2662 | } | |
2663 | oxu_dbg(oxu, "park %d\n", park); | |
2664 | } | |
2665 | if (HCC_PGM_FRAMELISTLEN(hcc_params)) { | |
2666 | /* periodic schedule size can be smaller than default */ | |
2667 | temp &= ~(3 << 2); | |
2668 | temp |= (EHCI_TUNE_FLS << 2); | |
2669 | } | |
2670 | oxu->command = temp; | |
2671 | ||
2672 | return 0; | |
2673 | } | |
2674 | ||
2675 | /* Called during probe() after chip reset completes. | |
2676 | */ | |
2677 | static int oxu_reset(struct usb_hcd *hcd) | |
2678 | { | |
2679 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
2680 | int ret; | |
2681 | ||
2682 | spin_lock_init(&oxu->mem_lock); | |
2683 | INIT_LIST_HEAD(&oxu->urb_list); | |
2684 | oxu->urb_len = 0; | |
2685 | ||
2686 | /* FIMXE */ | |
2687 | hcd->self.controller->dma_mask = 0UL; | |
2688 | ||
2689 | if (oxu->is_otg) { | |
2690 | oxu->caps = hcd->regs + OXU_OTG_CAP_OFFSET; | |
2691 | oxu->regs = hcd->regs + OXU_OTG_CAP_OFFSET + \ | |
2692 | HC_LENGTH(readl(&oxu->caps->hc_capbase)); | |
2693 | ||
2694 | oxu->mem = hcd->regs + OXU_SPH_MEM; | |
2695 | } else { | |
2696 | oxu->caps = hcd->regs + OXU_SPH_CAP_OFFSET; | |
2697 | oxu->regs = hcd->regs + OXU_SPH_CAP_OFFSET + \ | |
2698 | HC_LENGTH(readl(&oxu->caps->hc_capbase)); | |
2699 | ||
2700 | oxu->mem = hcd->regs + OXU_OTG_MEM; | |
2701 | } | |
2702 | ||
2703 | oxu->hcs_params = readl(&oxu->caps->hcs_params); | |
2704 | oxu->sbrn = 0x20; | |
2705 | ||
2706 | ret = oxu_hcd_init(hcd); | |
2707 | if (ret) | |
2708 | return ret; | |
2709 | ||
2710 | return 0; | |
2711 | } | |
2712 | ||
2713 | static int oxu_run(struct usb_hcd *hcd) | |
2714 | { | |
2715 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
2716 | int retval; | |
2717 | u32 temp, hcc_params; | |
2718 | ||
2719 | hcd->uses_new_polling = 1; | |
2720 | hcd->poll_rh = 0; | |
2721 | ||
2722 | /* EHCI spec section 4.1 */ | |
2723 | retval = ehci_reset(oxu); | |
2724 | if (retval != 0) { | |
2725 | ehci_mem_cleanup(oxu); | |
2726 | return retval; | |
2727 | } | |
2728 | writel(oxu->periodic_dma, &oxu->regs->frame_list); | |
2729 | writel((u32) oxu->async->qh_dma, &oxu->regs->async_next); | |
2730 | ||
2731 | /* hcc_params controls whether oxu->regs->segment must (!!!) | |
2732 | * be used; it constrains QH/ITD/SITD and QTD locations. | |
2733 | * pci_pool consistent memory always uses segment zero. | |
2734 | * streaming mappings for I/O buffers, like pci_map_single(), | |
2735 | * can return segments above 4GB, if the device allows. | |
2736 | * | |
2737 | * NOTE: the dma mask is visible through dma_supported(), so | |
2738 | * drivers can pass this info along ... like NETIF_F_HIGHDMA, | |
2739 | * Scsi_Host.highmem_io, and so forth. It's readonly to all | |
2740 | * host side drivers though. | |
2741 | */ | |
2742 | hcc_params = readl(&oxu->caps->hcc_params); | |
2743 | if (HCC_64BIT_ADDR(hcc_params)) | |
2744 | writel(0, &oxu->regs->segment); | |
2745 | ||
2746 | oxu->command &= ~(CMD_LRESET | CMD_IAAD | CMD_PSE | | |
2747 | CMD_ASE | CMD_RESET); | |
2748 | oxu->command |= CMD_RUN; | |
2749 | writel(oxu->command, &oxu->regs->command); | |
2750 | dbg_cmd(oxu, "init", oxu->command); | |
2751 | ||
2752 | /* | |
2753 | * Start, enabling full USB 2.0 functionality ... usb 1.1 devices | |
2754 | * are explicitly handed to companion controller(s), so no TT is | |
2755 | * involved with the root hub. (Except where one is integrated, | |
2756 | * and there's no companion controller unless maybe for USB OTG.) | |
2757 | */ | |
2758 | hcd->state = HC_STATE_RUNNING; | |
2759 | writel(FLAG_CF, &oxu->regs->configured_flag); | |
2760 | readl(&oxu->regs->command); /* unblock posted writes */ | |
2761 | ||
2762 | temp = HC_VERSION(readl(&oxu->caps->hc_capbase)); | |
2763 | oxu_info(oxu, "USB %x.%x started, quasi-EHCI %x.%02x, driver %s%s\n", | |
2764 | ((oxu->sbrn & 0xf0)>>4), (oxu->sbrn & 0x0f), | |
2765 | temp >> 8, temp & 0xff, DRIVER_VERSION, | |
2766 | ignore_oc ? ", overcurrent ignored" : ""); | |
2767 | ||
2768 | writel(INTR_MASK, &oxu->regs->intr_enable); /* Turn On Interrupts */ | |
2769 | ||
2770 | return 0; | |
2771 | } | |
2772 | ||
2773 | static void oxu_stop(struct usb_hcd *hcd) | |
2774 | { | |
2775 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
2776 | ||
2777 | /* Turn off port power on all root hub ports. */ | |
2778 | ehci_port_power(oxu, 0); | |
2779 | ||
2780 | /* no more interrupts ... */ | |
2781 | del_timer_sync(&oxu->watchdog); | |
2782 | ||
2783 | spin_lock_irq(&oxu->lock); | |
2784 | if (HC_IS_RUNNING(hcd->state)) | |
2785 | ehci_quiesce(oxu); | |
2786 | ||
2787 | ehci_reset(oxu); | |
2788 | writel(0, &oxu->regs->intr_enable); | |
2789 | spin_unlock_irq(&oxu->lock); | |
2790 | ||
2791 | /* let companion controllers work when we aren't */ | |
2792 | writel(0, &oxu->regs->configured_flag); | |
2793 | ||
2794 | /* root hub is shut down separately (first, when possible) */ | |
2795 | spin_lock_irq(&oxu->lock); | |
2796 | if (oxu->async) | |
2797 | ehci_work(oxu); | |
2798 | spin_unlock_irq(&oxu->lock); | |
2799 | ehci_mem_cleanup(oxu); | |
2800 | ||
2801 | dbg_status(oxu, "oxu_stop completed", readl(&oxu->regs->status)); | |
2802 | } | |
2803 | ||
2804 | /* Kick in for silicon on any bus (not just pci, etc). | |
2805 | * This forcibly disables dma and IRQs, helping kexec and other cases | |
2806 | * where the next system software may expect clean state. | |
2807 | */ | |
2808 | static void oxu_shutdown(struct usb_hcd *hcd) | |
2809 | { | |
2810 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
2811 | ||
2812 | (void) ehci_halt(oxu); | |
2813 | ehci_turn_off_all_ports(oxu); | |
2814 | ||
2815 | /* make BIOS/etc use companion controller during reboot */ | |
2816 | writel(0, &oxu->regs->configured_flag); | |
2817 | ||
2818 | /* unblock posted writes */ | |
2819 | readl(&oxu->regs->configured_flag); | |
2820 | } | |
2821 | ||
2822 | /* Non-error returns are a promise to giveback() the urb later | |
2823 | * we drop ownership so next owner (or urb unlink) can get it | |
2824 | * | |
2825 | * urb + dev is in hcd.self.controller.urb_list | |
2826 | * we're queueing TDs onto software and hardware lists | |
2827 | * | |
2828 | * hcd-specific init for hcpriv hasn't been done yet | |
2829 | * | |
2830 | * NOTE: control, bulk, and interrupt share the same code to append TDs | |
2831 | * to a (possibly active) QH, and the same QH scanning code. | |
2832 | */ | |
2833 | static int __oxu_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, | |
2834 | gfp_t mem_flags) | |
2835 | { | |
2836 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
2837 | struct list_head qtd_list; | |
2838 | ||
2839 | INIT_LIST_HEAD(&qtd_list); | |
2840 | ||
2841 | switch (usb_pipetype(urb->pipe)) { | |
2842 | case PIPE_CONTROL: | |
2843 | case PIPE_BULK: | |
2844 | default: | |
2845 | if (!qh_urb_transaction(oxu, urb, &qtd_list, mem_flags)) | |
2846 | return -ENOMEM; | |
2847 | return submit_async(oxu, urb, &qtd_list, mem_flags); | |
2848 | ||
2849 | case PIPE_INTERRUPT: | |
2850 | if (!qh_urb_transaction(oxu, urb, &qtd_list, mem_flags)) | |
2851 | return -ENOMEM; | |
2852 | return intr_submit(oxu, urb, &qtd_list, mem_flags); | |
2853 | ||
2854 | case PIPE_ISOCHRONOUS: | |
2855 | if (urb->dev->speed == USB_SPEED_HIGH) | |
2856 | return itd_submit(oxu, urb, mem_flags); | |
2857 | else | |
2858 | return sitd_submit(oxu, urb, mem_flags); | |
2859 | } | |
2860 | } | |
2861 | ||
2862 | /* This function is responsible for breaking URBs with big data size | |
2863 | * into smaller size and processing small urbs in sequence. | |
2864 | */ | |
2865 | static int oxu_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, | |
2866 | gfp_t mem_flags) | |
2867 | { | |
2868 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
2869 | int num, rem; | |
2870 | int transfer_buffer_length; | |
2871 | void *transfer_buffer; | |
2872 | struct urb *murb; | |
2873 | int i, ret; | |
2874 | ||
2875 | /* If not bulk pipe just enqueue the URB */ | |
2876 | if (!usb_pipebulk(urb->pipe)) | |
2877 | return __oxu_urb_enqueue(hcd, urb, mem_flags); | |
2878 | ||
2879 | /* Otherwise we should verify the USB transfer buffer size! */ | |
2880 | transfer_buffer = urb->transfer_buffer; | |
2881 | transfer_buffer_length = urb->transfer_buffer_length; | |
2882 | ||
2883 | num = urb->transfer_buffer_length / 4096; | |
2884 | rem = urb->transfer_buffer_length % 4096; | |
2885 | if (rem != 0) | |
2886 | num++; | |
2887 | ||
2888 | /* If URB is smaller than 4096 bytes just enqueue it! */ | |
2889 | if (num == 1) | |
2890 | return __oxu_urb_enqueue(hcd, urb, mem_flags); | |
2891 | ||
2892 | /* Ok, we have more job to do! :) */ | |
2893 | ||
2894 | for (i = 0; i < num - 1; i++) { | |
2895 | /* Get free micro URB poll till a free urb is recieved */ | |
2896 | ||
2897 | do { | |
2898 | murb = (struct urb *) oxu_murb_alloc(oxu); | |
2899 | if (!murb) | |
2900 | schedule(); | |
2901 | } while (!murb); | |
2902 | ||
2903 | /* Coping the urb */ | |
2904 | memcpy(murb, urb, sizeof(struct urb)); | |
2905 | ||
2906 | murb->transfer_buffer_length = 4096; | |
2907 | murb->transfer_buffer = transfer_buffer + i * 4096; | |
2908 | ||
2909 | /* Null pointer for the encodes that this is a micro urb */ | |
2910 | murb->complete = NULL; | |
2911 | ||
2912 | ((struct oxu_murb *) murb)->main = urb; | |
2913 | ((struct oxu_murb *) murb)->last = 0; | |
2914 | ||
2915 | /* This loop is to guarantee urb to be processed when there's | |
2916 | * not enough resources at a particular time by retrying. | |
2917 | */ | |
2918 | do { | |
2919 | ret = __oxu_urb_enqueue(hcd, murb, mem_flags); | |
2920 | if (ret) | |
2921 | schedule(); | |
2922 | } while (ret); | |
2923 | } | |
2924 | ||
2925 | /* Last urb requires special handling */ | |
2926 | ||
2927 | /* Get free micro URB poll till a free urb is recieved */ | |
2928 | do { | |
2929 | murb = (struct urb *) oxu_murb_alloc(oxu); | |
2930 | if (!murb) | |
2931 | schedule(); | |
2932 | } while (!murb); | |
2933 | ||
2934 | /* Coping the urb */ | |
2935 | memcpy(murb, urb, sizeof(struct urb)); | |
2936 | ||
2937 | murb->transfer_buffer_length = rem > 0 ? rem : 4096; | |
2938 | murb->transfer_buffer = transfer_buffer + (num - 1) * 4096; | |
2939 | ||
2940 | /* Null pointer for the encodes that this is a micro urb */ | |
2941 | murb->complete = NULL; | |
2942 | ||
2943 | ((struct oxu_murb *) murb)->main = urb; | |
2944 | ((struct oxu_murb *) murb)->last = 1; | |
2945 | ||
2946 | do { | |
2947 | ret = __oxu_urb_enqueue(hcd, murb, mem_flags); | |
2948 | if (ret) | |
2949 | schedule(); | |
2950 | } while (ret); | |
2951 | ||
2952 | return ret; | |
2953 | } | |
2954 | ||
2955 | /* Remove from hardware lists. | |
2956 | * Completions normally happen asynchronously | |
2957 | */ | |
2958 | static int oxu_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |
2959 | { | |
2960 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
2961 | struct ehci_qh *qh; | |
2962 | unsigned long flags; | |
2963 | ||
2964 | spin_lock_irqsave(&oxu->lock, flags); | |
2965 | switch (usb_pipetype(urb->pipe)) { | |
2966 | case PIPE_CONTROL: | |
2967 | case PIPE_BULK: | |
2968 | default: | |
2969 | qh = (struct ehci_qh *) urb->hcpriv; | |
2970 | if (!qh) | |
2971 | break; | |
2972 | unlink_async(oxu, qh); | |
2973 | break; | |
2974 | ||
2975 | case PIPE_INTERRUPT: | |
2976 | qh = (struct ehci_qh *) urb->hcpriv; | |
2977 | if (!qh) | |
2978 | break; | |
2979 | switch (qh->qh_state) { | |
2980 | case QH_STATE_LINKED: | |
2981 | intr_deschedule(oxu, qh); | |
2982 | /* FALL THROUGH */ | |
2983 | case QH_STATE_IDLE: | |
2984 | qh_completions(oxu, qh); | |
2985 | break; | |
2986 | default: | |
2987 | oxu_dbg(oxu, "bogus qh %p state %d\n", | |
2988 | qh, qh->qh_state); | |
2989 | goto done; | |
2990 | } | |
2991 | ||
2992 | /* reschedule QH iff another request is queued */ | |
2993 | if (!list_empty(&qh->qtd_list) | |
2994 | && HC_IS_RUNNING(hcd->state)) { | |
2995 | int status; | |
2996 | ||
2997 | status = qh_schedule(oxu, qh); | |
2998 | spin_unlock_irqrestore(&oxu->lock, flags); | |
2999 | ||
3000 | if (status != 0) { | |
3001 | /* shouldn't happen often, but ... | |
3002 | * FIXME kill those tds' urbs | |
3003 | */ | |
3004 | err("can't reschedule qh %p, err %d", | |
3005 | qh, status); | |
3006 | } | |
3007 | return status; | |
3008 | } | |
3009 | break; | |
3010 | } | |
3011 | done: | |
3012 | spin_unlock_irqrestore(&oxu->lock, flags); | |
3013 | return 0; | |
3014 | } | |
3015 | ||
3016 | /* Bulk qh holds the data toggle */ | |
3017 | static void oxu_endpoint_disable(struct usb_hcd *hcd, | |
3018 | struct usb_host_endpoint *ep) | |
3019 | { | |
3020 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
3021 | unsigned long flags; | |
3022 | struct ehci_qh *qh, *tmp; | |
3023 | ||
3024 | /* ASSERT: any requests/urbs are being unlinked */ | |
3025 | /* ASSERT: nobody can be submitting urbs for this any more */ | |
3026 | ||
3027 | rescan: | |
3028 | spin_lock_irqsave(&oxu->lock, flags); | |
3029 | qh = ep->hcpriv; | |
3030 | if (!qh) | |
3031 | goto done; | |
3032 | ||
3033 | /* endpoints can be iso streams. for now, we don't | |
3034 | * accelerate iso completions ... so spin a while. | |
3035 | */ | |
3036 | if (qh->hw_info1 == 0) { | |
3037 | oxu_vdbg(oxu, "iso delay\n"); | |
3038 | goto idle_timeout; | |
3039 | } | |
3040 | ||
3041 | if (!HC_IS_RUNNING(hcd->state)) | |
3042 | qh->qh_state = QH_STATE_IDLE; | |
3043 | switch (qh->qh_state) { | |
3044 | case QH_STATE_LINKED: | |
3045 | for (tmp = oxu->async->qh_next.qh; | |
3046 | tmp && tmp != qh; | |
3047 | tmp = tmp->qh_next.qh) | |
3048 | continue; | |
3049 | /* periodic qh self-unlinks on empty */ | |
3050 | if (!tmp) | |
3051 | goto nogood; | |
3052 | unlink_async(oxu, qh); | |
3053 | /* FALL THROUGH */ | |
3054 | case QH_STATE_UNLINK: /* wait for hw to finish? */ | |
3055 | idle_timeout: | |
3056 | spin_unlock_irqrestore(&oxu->lock, flags); | |
3057 | schedule_timeout_uninterruptible(1); | |
3058 | goto rescan; | |
3059 | case QH_STATE_IDLE: /* fully unlinked */ | |
3060 | if (list_empty(&qh->qtd_list)) { | |
3061 | qh_put(qh); | |
3062 | break; | |
3063 | } | |
3064 | /* else FALL THROUGH */ | |
3065 | default: | |
3066 | nogood: | |
3067 | /* caller was supposed to have unlinked any requests; | |
3068 | * that's not our job. just leak this memory. | |
3069 | */ | |
3070 | oxu_err(oxu, "qh %p (#%02x) state %d%s\n", | |
3071 | qh, ep->desc.bEndpointAddress, qh->qh_state, | |
3072 | list_empty(&qh->qtd_list) ? "" : "(has tds)"); | |
3073 | break; | |
3074 | } | |
3075 | ep->hcpriv = NULL; | |
3076 | done: | |
3077 | spin_unlock_irqrestore(&oxu->lock, flags); | |
3078 | return; | |
3079 | } | |
3080 | ||
3081 | static int oxu_get_frame(struct usb_hcd *hcd) | |
3082 | { | |
3083 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
3084 | ||
3085 | return (readl(&oxu->regs->frame_index) >> 3) % | |
3086 | oxu->periodic_size; | |
3087 | } | |
3088 | ||
3089 | /* Build "status change" packet (one or two bytes) from HC registers */ | |
3090 | static int oxu_hub_status_data(struct usb_hcd *hcd, char *buf) | |
3091 | { | |
3092 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
3093 | u32 temp, mask, status = 0; | |
3094 | int ports, i, retval = 1; | |
3095 | unsigned long flags; | |
3096 | ||
3097 | /* if !USB_SUSPEND, root hub timers won't get shut down ... */ | |
3098 | if (!HC_IS_RUNNING(hcd->state)) | |
3099 | return 0; | |
3100 | ||
3101 | /* init status to no-changes */ | |
3102 | buf[0] = 0; | |
3103 | ports = HCS_N_PORTS(oxu->hcs_params); | |
3104 | if (ports > 7) { | |
3105 | buf[1] = 0; | |
3106 | retval++; | |
3107 | } | |
3108 | ||
3109 | /* Some boards (mostly VIA?) report bogus overcurrent indications, | |
3110 | * causing massive log spam unless we completely ignore them. It | |
3111 | * may be relevant that VIA VT8235 controlers, where PORT_POWER is | |
3112 | * always set, seem to clear PORT_OCC and PORT_CSC when writing to | |
3113 | * PORT_POWER; that's surprising, but maybe within-spec. | |
3114 | */ | |
3115 | if (!ignore_oc) | |
3116 | mask = PORT_CSC | PORT_PEC | PORT_OCC; | |
3117 | else | |
3118 | mask = PORT_CSC | PORT_PEC; | |
3119 | ||
3120 | /* no hub change reports (bit 0) for now (power, ...) */ | |
3121 | ||
3122 | /* port N changes (bit N)? */ | |
3123 | spin_lock_irqsave(&oxu->lock, flags); | |
3124 | for (i = 0; i < ports; i++) { | |
3125 | temp = readl(&oxu->regs->port_status[i]); | |
3126 | ||
3127 | /* | |
3128 | * Return status information even for ports with OWNER set. | |
3129 | * Otherwise khubd wouldn't see the disconnect event when a | |
3130 | * high-speed device is switched over to the companion | |
3131 | * controller by the user. | |
3132 | */ | |
3133 | ||
3134 | if (!(temp & PORT_CONNECT)) | |
3135 | oxu->reset_done[i] = 0; | |
3136 | if ((temp & mask) != 0 || ((temp & PORT_RESUME) != 0 && | |
3137 | time_after_eq(jiffies, oxu->reset_done[i]))) { | |
3138 | if (i < 7) | |
3139 | buf[0] |= 1 << (i + 1); | |
3140 | else | |
3141 | buf[1] |= 1 << (i - 7); | |
3142 | status = STS_PCD; | |
3143 | } | |
3144 | } | |
3145 | /* FIXME autosuspend idle root hubs */ | |
3146 | spin_unlock_irqrestore(&oxu->lock, flags); | |
3147 | return status ? retval : 0; | |
3148 | } | |
3149 | ||
3150 | /* Returns the speed of a device attached to a port on the root hub. */ | |
3151 | static inline unsigned int oxu_port_speed(struct oxu_hcd *oxu, | |
3152 | unsigned int portsc) | |
3153 | { | |
3154 | switch ((portsc >> 26) & 3) { | |
3155 | case 0: | |
3156 | return 0; | |
3157 | case 1: | |
3158 | return 1 << USB_PORT_FEAT_LOWSPEED; | |
3159 | case 2: | |
3160 | default: | |
3161 | return 1 << USB_PORT_FEAT_HIGHSPEED; | |
3162 | } | |
3163 | } | |
3164 | ||
3165 | #define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E) | |
3166 | static int oxu_hub_control(struct usb_hcd *hcd, u16 typeReq, | |
3167 | u16 wValue, u16 wIndex, char *buf, u16 wLength) | |
3168 | { | |
3169 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
3170 | int ports = HCS_N_PORTS(oxu->hcs_params); | |
3171 | u32 __iomem *status_reg = &oxu->regs->port_status[wIndex - 1]; | |
3172 | u32 temp, status; | |
3173 | unsigned long flags; | |
3174 | int retval = 0; | |
3175 | unsigned selector; | |
3176 | ||
3177 | /* | |
3178 | * FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR. | |
3179 | * HCS_INDICATOR may say we can change LEDs to off/amber/green. | |
3180 | * (track current state ourselves) ... blink for diagnostics, | |
3181 | * power, "this is the one", etc. EHCI spec supports this. | |
3182 | */ | |
3183 | ||
3184 | spin_lock_irqsave(&oxu->lock, flags); | |
3185 | switch (typeReq) { | |
3186 | case ClearHubFeature: | |
3187 | switch (wValue) { | |
3188 | case C_HUB_LOCAL_POWER: | |
3189 | case C_HUB_OVER_CURRENT: | |
3190 | /* no hub-wide feature/status flags */ | |
3191 | break; | |
3192 | default: | |
3193 | goto error; | |
3194 | } | |
3195 | break; | |
3196 | case ClearPortFeature: | |
3197 | if (!wIndex || wIndex > ports) | |
3198 | goto error; | |
3199 | wIndex--; | |
3200 | temp = readl(status_reg); | |
3201 | ||
3202 | /* | |
3203 | * Even if OWNER is set, so the port is owned by the | |
3204 | * companion controller, khubd needs to be able to clear | |
3205 | * the port-change status bits (especially | |
3206 | * USB_PORT_FEAT_C_CONNECTION). | |
3207 | */ | |
3208 | ||
3209 | switch (wValue) { | |
3210 | case USB_PORT_FEAT_ENABLE: | |
3211 | writel(temp & ~PORT_PE, status_reg); | |
3212 | break; | |
3213 | case USB_PORT_FEAT_C_ENABLE: | |
3214 | writel((temp & ~PORT_RWC_BITS) | PORT_PEC, status_reg); | |
3215 | break; | |
3216 | case USB_PORT_FEAT_SUSPEND: | |
3217 | if (temp & PORT_RESET) | |
3218 | goto error; | |
3219 | if (temp & PORT_SUSPEND) { | |
3220 | if ((temp & PORT_PE) == 0) | |
3221 | goto error; | |
3222 | /* resume signaling for 20 msec */ | |
3223 | temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS); | |
3224 | writel(temp | PORT_RESUME, status_reg); | |
3225 | oxu->reset_done[wIndex] = jiffies | |
3226 | + msecs_to_jiffies(20); | |
3227 | } | |
3228 | break; | |
3229 | case USB_PORT_FEAT_C_SUSPEND: | |
3230 | /* we auto-clear this feature */ | |
3231 | break; | |
3232 | case USB_PORT_FEAT_POWER: | |
3233 | if (HCS_PPC(oxu->hcs_params)) | |
3234 | writel(temp & ~(PORT_RWC_BITS | PORT_POWER), | |
3235 | status_reg); | |
3236 | break; | |
3237 | case USB_PORT_FEAT_C_CONNECTION: | |
3238 | writel((temp & ~PORT_RWC_BITS) | PORT_CSC, status_reg); | |
3239 | break; | |
3240 | case USB_PORT_FEAT_C_OVER_CURRENT: | |
3241 | writel((temp & ~PORT_RWC_BITS) | PORT_OCC, status_reg); | |
3242 | break; | |
3243 | case USB_PORT_FEAT_C_RESET: | |
3244 | /* GetPortStatus clears reset */ | |
3245 | break; | |
3246 | default: | |
3247 | goto error; | |
3248 | } | |
3249 | readl(&oxu->regs->command); /* unblock posted write */ | |
3250 | break; | |
3251 | case GetHubDescriptor: | |
3252 | ehci_hub_descriptor(oxu, (struct usb_hub_descriptor *) | |
3253 | buf); | |
3254 | break; | |
3255 | case GetHubStatus: | |
3256 | /* no hub-wide feature/status flags */ | |
3257 | memset(buf, 0, 4); | |
3258 | break; | |
3259 | case GetPortStatus: | |
3260 | if (!wIndex || wIndex > ports) | |
3261 | goto error; | |
3262 | wIndex--; | |
3263 | status = 0; | |
3264 | temp = readl(status_reg); | |
3265 | ||
3266 | /* wPortChange bits */ | |
3267 | if (temp & PORT_CSC) | |
3268 | status |= 1 << USB_PORT_FEAT_C_CONNECTION; | |
3269 | if (temp & PORT_PEC) | |
3270 | status |= 1 << USB_PORT_FEAT_C_ENABLE; | |
3271 | if ((temp & PORT_OCC) && !ignore_oc) | |
3272 | status |= 1 << USB_PORT_FEAT_C_OVER_CURRENT; | |
3273 | ||
3274 | /* whoever resumes must GetPortStatus to complete it!! */ | |
3275 | if (temp & PORT_RESUME) { | |
3276 | ||
3277 | /* Remote Wakeup received? */ | |
3278 | if (!oxu->reset_done[wIndex]) { | |
3279 | /* resume signaling for 20 msec */ | |
3280 | oxu->reset_done[wIndex] = jiffies | |
3281 | + msecs_to_jiffies(20); | |
3282 | /* check the port again */ | |
3283 | mod_timer(&oxu_to_hcd(oxu)->rh_timer, | |
3284 | oxu->reset_done[wIndex]); | |
3285 | } | |
3286 | ||
3287 | /* resume completed? */ | |
3288 | else if (time_after_eq(jiffies, | |
3289 | oxu->reset_done[wIndex])) { | |
3290 | status |= 1 << USB_PORT_FEAT_C_SUSPEND; | |
3291 | oxu->reset_done[wIndex] = 0; | |
3292 | ||
3293 | /* stop resume signaling */ | |
3294 | temp = readl(status_reg); | |
3295 | writel(temp & ~(PORT_RWC_BITS | PORT_RESUME), | |
3296 | status_reg); | |
3297 | retval = handshake(oxu, status_reg, | |
3298 | PORT_RESUME, 0, 2000 /* 2msec */); | |
3299 | if (retval != 0) { | |
3300 | oxu_err(oxu, | |
3301 | "port %d resume error %d\n", | |
3302 | wIndex + 1, retval); | |
3303 | goto error; | |
3304 | } | |
3305 | temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10)); | |
3306 | } | |
3307 | } | |
3308 | ||
3309 | /* whoever resets must GetPortStatus to complete it!! */ | |
3310 | if ((temp & PORT_RESET) | |
3311 | && time_after_eq(jiffies, | |
3312 | oxu->reset_done[wIndex])) { | |
3313 | status |= 1 << USB_PORT_FEAT_C_RESET; | |
3314 | oxu->reset_done[wIndex] = 0; | |
3315 | ||
3316 | /* force reset to complete */ | |
3317 | writel(temp & ~(PORT_RWC_BITS | PORT_RESET), | |
3318 | status_reg); | |
3319 | /* REVISIT: some hardware needs 550+ usec to clear | |
3320 | * this bit; seems too long to spin routinely... | |
3321 | */ | |
3322 | retval = handshake(oxu, status_reg, | |
3323 | PORT_RESET, 0, 750); | |
3324 | if (retval != 0) { | |
3325 | oxu_err(oxu, "port %d reset error %d\n", | |
3326 | wIndex + 1, retval); | |
3327 | goto error; | |
3328 | } | |
3329 | ||
3330 | /* see what we found out */ | |
3331 | temp = check_reset_complete(oxu, wIndex, status_reg, | |
3332 | readl(status_reg)); | |
3333 | } | |
3334 | ||
3335 | /* transfer dedicated ports to the companion hc */ | |
3336 | if ((temp & PORT_CONNECT) && | |
3337 | test_bit(wIndex, &oxu->companion_ports)) { | |
3338 | temp &= ~PORT_RWC_BITS; | |
3339 | temp |= PORT_OWNER; | |
3340 | writel(temp, status_reg); | |
3341 | oxu_dbg(oxu, "port %d --> companion\n", wIndex + 1); | |
3342 | temp = readl(status_reg); | |
3343 | } | |
3344 | ||
3345 | /* | |
3346 | * Even if OWNER is set, there's no harm letting khubd | |
3347 | * see the wPortStatus values (they should all be 0 except | |
3348 | * for PORT_POWER anyway). | |
3349 | */ | |
3350 | ||
3351 | if (temp & PORT_CONNECT) { | |
3352 | status |= 1 << USB_PORT_FEAT_CONNECTION; | |
3353 | /* status may be from integrated TT */ | |
3354 | status |= oxu_port_speed(oxu, temp); | |
3355 | } | |
3356 | if (temp & PORT_PE) | |
3357 | status |= 1 << USB_PORT_FEAT_ENABLE; | |
3358 | if (temp & (PORT_SUSPEND|PORT_RESUME)) | |
3359 | status |= 1 << USB_PORT_FEAT_SUSPEND; | |
3360 | if (temp & PORT_OC) | |
3361 | status |= 1 << USB_PORT_FEAT_OVER_CURRENT; | |
3362 | if (temp & PORT_RESET) | |
3363 | status |= 1 << USB_PORT_FEAT_RESET; | |
3364 | if (temp & PORT_POWER) | |
3365 | status |= 1 << USB_PORT_FEAT_POWER; | |
3366 | ||
3367 | #ifndef OXU_VERBOSE_DEBUG | |
3368 | if (status & ~0xffff) /* only if wPortChange is interesting */ | |
3369 | #endif | |
3370 | dbg_port(oxu, "GetStatus", wIndex + 1, temp); | |
3371 | put_unaligned(cpu_to_le32(status), (__le32 *) buf); | |
3372 | break; | |
3373 | case SetHubFeature: | |
3374 | switch (wValue) { | |
3375 | case C_HUB_LOCAL_POWER: | |
3376 | case C_HUB_OVER_CURRENT: | |
3377 | /* no hub-wide feature/status flags */ | |
3378 | break; | |
3379 | default: | |
3380 | goto error; | |
3381 | } | |
3382 | break; | |
3383 | case SetPortFeature: | |
3384 | selector = wIndex >> 8; | |
3385 | wIndex &= 0xff; | |
3386 | if (!wIndex || wIndex > ports) | |
3387 | goto error; | |
3388 | wIndex--; | |
3389 | temp = readl(status_reg); | |
3390 | if (temp & PORT_OWNER) | |
3391 | break; | |
3392 | ||
3393 | temp &= ~PORT_RWC_BITS; | |
3394 | switch (wValue) { | |
3395 | case USB_PORT_FEAT_SUSPEND: | |
3396 | if ((temp & PORT_PE) == 0 | |
3397 | || (temp & PORT_RESET) != 0) | |
3398 | goto error; | |
3399 | if (device_may_wakeup(&hcd->self.root_hub->dev)) | |
3400 | temp |= PORT_WAKE_BITS; | |
3401 | writel(temp | PORT_SUSPEND, status_reg); | |
3402 | break; | |
3403 | case USB_PORT_FEAT_POWER: | |
3404 | if (HCS_PPC(oxu->hcs_params)) | |
3405 | writel(temp | PORT_POWER, status_reg); | |
3406 | break; | |
3407 | case USB_PORT_FEAT_RESET: | |
3408 | if (temp & PORT_RESUME) | |
3409 | goto error; | |
3410 | /* line status bits may report this as low speed, | |
3411 | * which can be fine if this root hub has a | |
3412 | * transaction translator built in. | |
3413 | */ | |
3414 | oxu_vdbg(oxu, "port %d reset\n", wIndex + 1); | |
3415 | temp |= PORT_RESET; | |
3416 | temp &= ~PORT_PE; | |
3417 | ||
3418 | /* | |
3419 | * caller must wait, then call GetPortStatus | |
3420 | * usb 2.0 spec says 50 ms resets on root | |
3421 | */ | |
3422 | oxu->reset_done[wIndex] = jiffies | |
3423 | + msecs_to_jiffies(50); | |
3424 | writel(temp, status_reg); | |
3425 | break; | |
3426 | ||
3427 | /* For downstream facing ports (these): one hub port is put | |
3428 | * into test mode according to USB2 11.24.2.13, then the hub | |
3429 | * must be reset (which for root hub now means rmmod+modprobe, | |
3430 | * or else system reboot). See EHCI 2.3.9 and 4.14 for info | |
3431 | * about the EHCI-specific stuff. | |
3432 | */ | |
3433 | case USB_PORT_FEAT_TEST: | |
3434 | if (!selector || selector > 5) | |
3435 | goto error; | |
3436 | ehci_quiesce(oxu); | |
3437 | ehci_halt(oxu); | |
3438 | temp |= selector << 16; | |
3439 | writel(temp, status_reg); | |
3440 | break; | |
3441 | ||
3442 | default: | |
3443 | goto error; | |
3444 | } | |
3445 | readl(&oxu->regs->command); /* unblock posted writes */ | |
3446 | break; | |
3447 | ||
3448 | default: | |
3449 | error: | |
3450 | /* "stall" on error */ | |
3451 | retval = -EPIPE; | |
3452 | } | |
3453 | spin_unlock_irqrestore(&oxu->lock, flags); | |
3454 | return retval; | |
3455 | } | |
3456 | ||
3457 | #ifdef CONFIG_PM | |
3458 | ||
3459 | static int oxu_bus_suspend(struct usb_hcd *hcd) | |
3460 | { | |
3461 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
3462 | int port; | |
3463 | int mask; | |
3464 | ||
3465 | oxu_dbg(oxu, "suspend root hub\n"); | |
3466 | ||
3467 | if (time_before(jiffies, oxu->next_statechange)) | |
3468 | msleep(5); | |
3469 | ||
3470 | port = HCS_N_PORTS(oxu->hcs_params); | |
3471 | spin_lock_irq(&oxu->lock); | |
3472 | ||
3473 | /* stop schedules, clean any completed work */ | |
3474 | if (HC_IS_RUNNING(hcd->state)) { | |
3475 | ehci_quiesce(oxu); | |
3476 | hcd->state = HC_STATE_QUIESCING; | |
3477 | } | |
3478 | oxu->command = readl(&oxu->regs->command); | |
3479 | if (oxu->reclaim) | |
3480 | oxu->reclaim_ready = 1; | |
3481 | ehci_work(oxu); | |
3482 | ||
3483 | /* Unlike other USB host controller types, EHCI doesn't have | |
3484 | * any notion of "global" or bus-wide suspend. The driver has | |
3485 | * to manually suspend all the active unsuspended ports, and | |
3486 | * then manually resume them in the bus_resume() routine. | |
3487 | */ | |
3488 | oxu->bus_suspended = 0; | |
3489 | while (port--) { | |
3490 | u32 __iomem *reg = &oxu->regs->port_status[port]; | |
3491 | u32 t1 = readl(reg) & ~PORT_RWC_BITS; | |
3492 | u32 t2 = t1; | |
3493 | ||
3494 | /* keep track of which ports we suspend */ | |
3495 | if ((t1 & PORT_PE) && !(t1 & PORT_OWNER) && | |
3496 | !(t1 & PORT_SUSPEND)) { | |
3497 | t2 |= PORT_SUSPEND; | |
3498 | set_bit(port, &oxu->bus_suspended); | |
3499 | } | |
3500 | ||
3501 | /* enable remote wakeup on all ports */ | |
3502 | if (device_may_wakeup(&hcd->self.root_hub->dev)) | |
3503 | t2 |= PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E; | |
3504 | else | |
3505 | t2 &= ~(PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E); | |
3506 | ||
3507 | if (t1 != t2) { | |
3508 | oxu_vdbg(oxu, "port %d, %08x -> %08x\n", | |
3509 | port + 1, t1, t2); | |
3510 | writel(t2, reg); | |
3511 | } | |
3512 | } | |
3513 | ||
3514 | /* turn off now-idle HC */ | |
3515 | del_timer_sync(&oxu->watchdog); | |
3516 | ehci_halt(oxu); | |
3517 | hcd->state = HC_STATE_SUSPENDED; | |
3518 | ||
3519 | /* allow remote wakeup */ | |
3520 | mask = INTR_MASK; | |
3521 | if (!device_may_wakeup(&hcd->self.root_hub->dev)) | |
3522 | mask &= ~STS_PCD; | |
3523 | writel(mask, &oxu->regs->intr_enable); | |
3524 | readl(&oxu->regs->intr_enable); | |
3525 | ||
3526 | oxu->next_statechange = jiffies + msecs_to_jiffies(10); | |
3527 | spin_unlock_irq(&oxu->lock); | |
3528 | return 0; | |
3529 | } | |
3530 | ||
3531 | /* Caller has locked the root hub, and should reset/reinit on error */ | |
3532 | static int oxu_bus_resume(struct usb_hcd *hcd) | |
3533 | { | |
3534 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
3535 | u32 temp; | |
3536 | int i; | |
3537 | ||
3538 | if (time_before(jiffies, oxu->next_statechange)) | |
3539 | msleep(5); | |
3540 | spin_lock_irq(&oxu->lock); | |
3541 | ||
3542 | /* Ideally and we've got a real resume here, and no port's power | |
3543 | * was lost. (For PCI, that means Vaux was maintained.) But we | |
3544 | * could instead be restoring a swsusp snapshot -- so that BIOS was | |
3545 | * the last user of the controller, not reset/pm hardware keeping | |
3546 | * state we gave to it. | |
3547 | */ | |
3548 | temp = readl(&oxu->regs->intr_enable); | |
3549 | oxu_dbg(oxu, "resume root hub%s\n", temp ? "" : " after power loss"); | |
3550 | ||
3551 | /* at least some APM implementations will try to deliver | |
3552 | * IRQs right away, so delay them until we're ready. | |
3553 | */ | |
3554 | writel(0, &oxu->regs->intr_enable); | |
3555 | ||
3556 | /* re-init operational registers */ | |
3557 | writel(0, &oxu->regs->segment); | |
3558 | writel(oxu->periodic_dma, &oxu->regs->frame_list); | |
3559 | writel((u32) oxu->async->qh_dma, &oxu->regs->async_next); | |
3560 | ||
3561 | /* restore CMD_RUN, framelist size, and irq threshold */ | |
3562 | writel(oxu->command, &oxu->regs->command); | |
3563 | ||
3564 | /* Some controller/firmware combinations need a delay during which | |
3565 | * they set up the port statuses. See Bugzilla #8190. */ | |
3566 | mdelay(8); | |
3567 | ||
3568 | /* manually resume the ports we suspended during bus_suspend() */ | |
3569 | i = HCS_N_PORTS(oxu->hcs_params); | |
3570 | while (i--) { | |
3571 | temp = readl(&oxu->regs->port_status[i]); | |
3572 | temp &= ~(PORT_RWC_BITS | |
3573 | | PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E); | |
3574 | if (test_bit(i, &oxu->bus_suspended) && (temp & PORT_SUSPEND)) { | |
3575 | oxu->reset_done[i] = jiffies + msecs_to_jiffies(20); | |
3576 | temp |= PORT_RESUME; | |
3577 | } | |
3578 | writel(temp, &oxu->regs->port_status[i]); | |
3579 | } | |
3580 | i = HCS_N_PORTS(oxu->hcs_params); | |
3581 | mdelay(20); | |
3582 | while (i--) { | |
3583 | temp = readl(&oxu->regs->port_status[i]); | |
3584 | if (test_bit(i, &oxu->bus_suspended) && (temp & PORT_SUSPEND)) { | |
3585 | temp &= ~(PORT_RWC_BITS | PORT_RESUME); | |
3586 | writel(temp, &oxu->regs->port_status[i]); | |
3587 | oxu_vdbg(oxu, "resumed port %d\n", i + 1); | |
3588 | } | |
3589 | } | |
3590 | (void) readl(&oxu->regs->command); | |
3591 | ||
3592 | /* maybe re-activate the schedule(s) */ | |
3593 | temp = 0; | |
3594 | if (oxu->async->qh_next.qh) | |
3595 | temp |= CMD_ASE; | |
3596 | if (oxu->periodic_sched) | |
3597 | temp |= CMD_PSE; | |
3598 | if (temp) { | |
3599 | oxu->command |= temp; | |
3600 | writel(oxu->command, &oxu->regs->command); | |
3601 | } | |
3602 | ||
3603 | oxu->next_statechange = jiffies + msecs_to_jiffies(5); | |
3604 | hcd->state = HC_STATE_RUNNING; | |
3605 | ||
3606 | /* Now we can safely re-enable irqs */ | |
3607 | writel(INTR_MASK, &oxu->regs->intr_enable); | |
3608 | ||
3609 | spin_unlock_irq(&oxu->lock); | |
3610 | return 0; | |
3611 | } | |
3612 | ||
3613 | #else | |
3614 | ||
3615 | static int oxu_bus_suspend(struct usb_hcd *hcd) | |
3616 | { | |
3617 | return 0; | |
3618 | } | |
3619 | ||
3620 | static int oxu_bus_resume(struct usb_hcd *hcd) | |
3621 | { | |
3622 | return 0; | |
3623 | } | |
3624 | ||
3625 | #endif /* CONFIG_PM */ | |
3626 | ||
3627 | static const struct hc_driver oxu_hc_driver = { | |
3628 | .description = "oxu210hp_hcd", | |
3629 | .product_desc = "oxu210hp HCD", | |
3630 | .hcd_priv_size = sizeof(struct oxu_hcd), | |
3631 | ||
3632 | /* | |
3633 | * Generic hardware linkage | |
3634 | */ | |
3635 | .irq = oxu_irq, | |
3636 | .flags = HCD_MEMORY | HCD_USB2, | |
3637 | ||
3638 | /* | |
3639 | * Basic lifecycle operations | |
3640 | */ | |
3641 | .reset = oxu_reset, | |
3642 | .start = oxu_run, | |
3643 | .stop = oxu_stop, | |
3644 | .shutdown = oxu_shutdown, | |
3645 | ||
3646 | /* | |
3647 | * Managing i/o requests and associated device resources | |
3648 | */ | |
3649 | .urb_enqueue = oxu_urb_enqueue, | |
3650 | .urb_dequeue = oxu_urb_dequeue, | |
3651 | .endpoint_disable = oxu_endpoint_disable, | |
3652 | ||
3653 | /* | |
3654 | * Scheduling support | |
3655 | */ | |
3656 | .get_frame_number = oxu_get_frame, | |
3657 | ||
3658 | /* | |
3659 | * Root hub support | |
3660 | */ | |
3661 | .hub_status_data = oxu_hub_status_data, | |
3662 | .hub_control = oxu_hub_control, | |
3663 | .bus_suspend = oxu_bus_suspend, | |
3664 | .bus_resume = oxu_bus_resume, | |
3665 | }; | |
3666 | ||
3667 | /* | |
3668 | * Module stuff | |
3669 | */ | |
3670 | ||
3671 | static void oxu_configuration(struct platform_device *pdev, void *base) | |
3672 | { | |
3673 | u32 tmp; | |
3674 | ||
3675 | /* Initialize top level registers. | |
3676 | * First write ever | |
3677 | */ | |
3678 | oxu_writel(base, OXU_HOSTIFCONFIG, 0x0000037D); | |
3679 | oxu_writel(base, OXU_SOFTRESET, OXU_SRESET); | |
3680 | oxu_writel(base, OXU_HOSTIFCONFIG, 0x0000037D); | |
3681 | ||
3682 | tmp = oxu_readl(base, OXU_PIOBURSTREADCTRL); | |
3683 | oxu_writel(base, OXU_PIOBURSTREADCTRL, tmp | 0x0040); | |
3684 | ||
3685 | oxu_writel(base, OXU_ASO, OXU_SPHPOEN | OXU_OVRCCURPUPDEN | | |
3686 | OXU_COMPARATOR | OXU_ASO_OP); | |
3687 | ||
3688 | tmp = oxu_readl(base, OXU_CLKCTRL_SET); | |
3689 | oxu_writel(base, OXU_CLKCTRL_SET, tmp | OXU_SYSCLKEN | OXU_USBOTGCLKEN); | |
3690 | ||
3691 | /* Clear all top interrupt enable */ | |
3692 | oxu_writel(base, OXU_CHIPIRQEN_CLR, 0xff); | |
3693 | ||
3694 | /* Clear all top interrupt status */ | |
3695 | oxu_writel(base, OXU_CHIPIRQSTATUS, 0xff); | |
3696 | ||
3697 | /* Enable all needed top interrupt except OTG SPH core */ | |
3698 | oxu_writel(base, OXU_CHIPIRQEN_SET, OXU_USBSPHLPWUI | OXU_USBOTGLPWUI); | |
3699 | } | |
3700 | ||
3701 | static int oxu_verify_id(struct platform_device *pdev, void *base) | |
3702 | { | |
3703 | u32 id; | |
3704 | char *bo[] = { | |
3705 | "reserved", | |
3706 | "128-pin LQFP", | |
3707 | "84-pin TFBGA", | |
3708 | "reserved", | |
3709 | }; | |
3710 | ||
3711 | /* Read controller signature register to find a match */ | |
3712 | id = oxu_readl(base, OXU_DEVICEID); | |
3713 | dev_info(&pdev->dev, "device ID %x\n", id); | |
3714 | if ((id & OXU_REV_MASK) != (OXU_REV_2100 << OXU_REV_SHIFT)) | |
3715 | return -1; | |
3716 | ||
3717 | dev_info(&pdev->dev, "found device %x %s (%04x:%04x)\n", | |
3718 | id >> OXU_REV_SHIFT, | |
3719 | bo[(id & OXU_BO_MASK) >> OXU_BO_SHIFT], | |
3720 | (id & OXU_MAJ_REV_MASK) >> OXU_MAJ_REV_SHIFT, | |
3721 | (id & OXU_MIN_REV_MASK) >> OXU_MIN_REV_SHIFT); | |
3722 | ||
3723 | return 0; | |
3724 | } | |
3725 | ||
3726 | static const struct hc_driver oxu_hc_driver; | |
3727 | static struct usb_hcd *oxu_create(struct platform_device *pdev, | |
3728 | unsigned long memstart, unsigned long memlen, | |
3729 | void *base, int irq, int otg) | |
3730 | { | |
3731 | struct device *dev = &pdev->dev; | |
3732 | ||
3733 | struct usb_hcd *hcd; | |
3734 | struct oxu_hcd *oxu; | |
3735 | int ret; | |
3736 | ||
3737 | /* Set endian mode and host mode */ | |
3738 | oxu_writel(base + (otg ? OXU_OTG_CORE_OFFSET : OXU_SPH_CORE_OFFSET), | |
3739 | OXU_USBMODE, | |
3740 | OXU_CM_HOST_ONLY | OXU_ES_LITTLE | OXU_VBPS); | |
3741 | ||
3742 | hcd = usb_create_hcd(&oxu_hc_driver, dev, | |
3743 | otg ? "oxu210hp_otg" : "oxu210hp_sph"); | |
3744 | if (!hcd) | |
3745 | return ERR_PTR(-ENOMEM); | |
3746 | ||
3747 | hcd->rsrc_start = memstart; | |
3748 | hcd->rsrc_len = memlen; | |
3749 | hcd->regs = base; | |
3750 | hcd->irq = irq; | |
3751 | hcd->state = HC_STATE_HALT; | |
3752 | ||
3753 | oxu = hcd_to_oxu(hcd); | |
3754 | oxu->is_otg = otg; | |
3755 | ||
3756 | ret = usb_add_hcd(hcd, irq, IRQF_SHARED); | |
3757 | if (ret < 0) | |
3758 | return ERR_PTR(ret); | |
3759 | ||
3760 | return hcd; | |
3761 | } | |
3762 | ||
3763 | static int oxu_init(struct platform_device *pdev, | |
3764 | unsigned long memstart, unsigned long memlen, | |
3765 | void *base, int irq) | |
3766 | { | |
3767 | struct oxu_info *info = platform_get_drvdata(pdev); | |
3768 | struct usb_hcd *hcd; | |
3769 | int ret; | |
3770 | ||
3771 | /* First time configuration at start up */ | |
3772 | oxu_configuration(pdev, base); | |
3773 | ||
3774 | ret = oxu_verify_id(pdev, base); | |
3775 | if (ret) { | |
3776 | dev_err(&pdev->dev, "no devices found!\n"); | |
3777 | return -ENODEV; | |
3778 | } | |
3779 | ||
3780 | /* Create the OTG controller */ | |
3781 | hcd = oxu_create(pdev, memstart, memlen, base, irq, 1); | |
3782 | if (IS_ERR(hcd)) { | |
3783 | dev_err(&pdev->dev, "cannot create OTG controller!\n"); | |
3784 | ret = PTR_ERR(hcd); | |
3785 | goto error_create_otg; | |
3786 | } | |
3787 | info->hcd[0] = hcd; | |
3788 | ||
3789 | /* Create the SPH host controller */ | |
3790 | hcd = oxu_create(pdev, memstart, memlen, base, irq, 0); | |
3791 | if (IS_ERR(hcd)) { | |
3792 | dev_err(&pdev->dev, "cannot create SPH controller!\n"); | |
3793 | ret = PTR_ERR(hcd); | |
3794 | goto error_create_sph; | |
3795 | } | |
3796 | info->hcd[1] = hcd; | |
3797 | ||
3798 | oxu_writel(base, OXU_CHIPIRQEN_SET, | |
3799 | oxu_readl(base, OXU_CHIPIRQEN_SET) | 3); | |
3800 | ||
3801 | return 0; | |
3802 | ||
3803 | error_create_sph: | |
3804 | usb_remove_hcd(info->hcd[0]); | |
3805 | usb_put_hcd(info->hcd[0]); | |
3806 | ||
3807 | error_create_otg: | |
3808 | return ret; | |
3809 | } | |
3810 | ||
3811 | static int oxu_drv_probe(struct platform_device *pdev) | |
3812 | { | |
3813 | struct resource *res; | |
3814 | void *base; | |
3815 | unsigned long memstart, memlen; | |
3816 | int irq, ret; | |
3817 | struct oxu_info *info; | |
3818 | ||
3819 | if (usb_disabled()) | |
3820 | return -ENODEV; | |
3821 | ||
3822 | /* | |
3823 | * Get the platform resources | |
3824 | */ | |
3825 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | |
3826 | if (!res) { | |
3827 | dev_err(&pdev->dev, | |
74c71ebd | 3828 | "no IRQ! Check %s setup!\n", dev_name(&pdev->dev)); |
b92a78e5 RG |
3829 | return -ENODEV; |
3830 | } | |
3831 | irq = res->start; | |
3832 | dev_dbg(&pdev->dev, "IRQ resource %d\n", irq); | |
3833 | ||
3834 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
3835 | if (!res) { | |
3836 | dev_err(&pdev->dev, "no registers address! Check %s setup!\n", | |
74c71ebd | 3837 | dev_name(&pdev->dev)); |
b92a78e5 RG |
3838 | return -ENODEV; |
3839 | } | |
3840 | memstart = res->start; | |
3841 | memlen = res->end - res->start + 1; | |
3842 | dev_dbg(&pdev->dev, "MEM resource %lx-%lx\n", memstart, memlen); | |
3843 | if (!request_mem_region(memstart, memlen, | |
3844 | oxu_hc_driver.description)) { | |
3845 | dev_dbg(&pdev->dev, "memory area already in use\n"); | |
3846 | return -EBUSY; | |
3847 | } | |
3848 | ||
3849 | ret = set_irq_type(irq, IRQF_TRIGGER_FALLING); | |
3850 | if (ret) { | |
3851 | dev_err(&pdev->dev, "error setting irq type\n"); | |
3852 | ret = -EFAULT; | |
3853 | goto error_set_irq_type; | |
3854 | } | |
3855 | ||
3856 | base = ioremap(memstart, memlen); | |
3857 | if (!base) { | |
3858 | dev_dbg(&pdev->dev, "error mapping memory\n"); | |
3859 | ret = -EFAULT; | |
3860 | goto error_ioremap; | |
3861 | } | |
3862 | ||
3863 | /* Allocate a driver data struct to hold useful info for both | |
3864 | * SPH & OTG devices | |
3865 | */ | |
3866 | info = kzalloc(sizeof(struct oxu_info), GFP_KERNEL); | |
3867 | if (!info) { | |
3868 | dev_dbg(&pdev->dev, "error allocating memory\n"); | |
3869 | ret = -EFAULT; | |
3870 | goto error_alloc; | |
3871 | } | |
3872 | platform_set_drvdata(pdev, info); | |
3873 | ||
3874 | ret = oxu_init(pdev, memstart, memlen, base, irq); | |
3875 | if (ret < 0) { | |
3876 | dev_dbg(&pdev->dev, "cannot init USB devices\n"); | |
3877 | goto error_init; | |
3878 | } | |
3879 | ||
3880 | dev_info(&pdev->dev, "devices enabled and running\n"); | |
3881 | platform_set_drvdata(pdev, info); | |
3882 | ||
3883 | return 0; | |
3884 | ||
3885 | error_init: | |
3886 | kfree(info); | |
3887 | platform_set_drvdata(pdev, NULL); | |
3888 | ||
3889 | error_alloc: | |
3890 | iounmap(base); | |
3891 | ||
3892 | error_set_irq_type: | |
3893 | error_ioremap: | |
3894 | release_mem_region(memstart, memlen); | |
3895 | ||
74c71ebd | 3896 | dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), ret); |
b92a78e5 RG |
3897 | return ret; |
3898 | } | |
3899 | ||
3900 | static void oxu_remove(struct platform_device *pdev, struct usb_hcd *hcd) | |
3901 | { | |
3902 | usb_remove_hcd(hcd); | |
3903 | usb_put_hcd(hcd); | |
3904 | } | |
3905 | ||
3906 | static int oxu_drv_remove(struct platform_device *pdev) | |
3907 | { | |
3908 | struct oxu_info *info = platform_get_drvdata(pdev); | |
3909 | unsigned long memstart = info->hcd[0]->rsrc_start, | |
3910 | memlen = info->hcd[0]->rsrc_len; | |
3911 | void *base = info->hcd[0]->regs; | |
3912 | ||
3913 | oxu_remove(pdev, info->hcd[0]); | |
3914 | oxu_remove(pdev, info->hcd[1]); | |
3915 | ||
3916 | iounmap(base); | |
3917 | release_mem_region(memstart, memlen); | |
3918 | ||
3919 | kfree(info); | |
3920 | platform_set_drvdata(pdev, NULL); | |
3921 | ||
3922 | return 0; | |
3923 | } | |
3924 | ||
3925 | static void oxu_drv_shutdown(struct platform_device *pdev) | |
3926 | { | |
3927 | oxu_drv_remove(pdev); | |
3928 | } | |
3929 | ||
3930 | #if 0 | |
3931 | /* FIXME: TODO */ | |
3932 | static int oxu_drv_suspend(struct device *dev) | |
3933 | { | |
3934 | struct platform_device *pdev = to_platform_device(dev); | |
3935 | struct usb_hcd *hcd = dev_get_drvdata(dev); | |
3936 | ||
3937 | return 0; | |
3938 | } | |
3939 | ||
3940 | static int oxu_drv_resume(struct device *dev) | |
3941 | { | |
3942 | struct platform_device *pdev = to_platform_device(dev); | |
3943 | struct usb_hcd *hcd = dev_get_drvdata(dev); | |
3944 | ||
3945 | return 0; | |
3946 | } | |
3947 | #else | |
3948 | #define oxu_drv_suspend NULL | |
3949 | #define oxu_drv_resume NULL | |
3950 | #endif | |
3951 | ||
3952 | static struct platform_driver oxu_driver = { | |
3953 | .probe = oxu_drv_probe, | |
3954 | .remove = oxu_drv_remove, | |
3955 | .shutdown = oxu_drv_shutdown, | |
3956 | .suspend = oxu_drv_suspend, | |
3957 | .resume = oxu_drv_resume, | |
3958 | .driver = { | |
3959 | .name = "oxu210hp-hcd", | |
3960 | .bus = &platform_bus_type | |
3961 | } | |
3962 | }; | |
3963 | ||
3964 | static int __init oxu_module_init(void) | |
3965 | { | |
3966 | int retval = 0; | |
3967 | ||
3968 | retval = platform_driver_register(&oxu_driver); | |
3969 | if (retval < 0) | |
3970 | return retval; | |
3971 | ||
3972 | return retval; | |
3973 | } | |
3974 | ||
3975 | static void __exit oxu_module_cleanup(void) | |
3976 | { | |
3977 | platform_driver_unregister(&oxu_driver); | |
3978 | } | |
3979 | ||
3980 | module_init(oxu_module_init); | |
3981 | module_exit(oxu_module_cleanup); | |
3982 | ||
3983 | MODULE_DESCRIPTION("Oxford OXU210HP HCD driver - ver. " DRIVER_VERSION); | |
3984 | MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>"); | |
3985 | MODULE_LICENSE("GPL"); |