]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * OHCI HCD (Host Controller Driver) for USB. | |
dd9048af | 3 | * |
1da177e4 LT |
4 | * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at> |
5 | * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net> | |
dd9048af | 6 | * |
1da177e4 LT |
7 | * This file is licenced under the GPL. |
8 | */ | |
9 | ||
7d12e780 | 10 | #include <linux/irq.h> |
5a0e3ad6 | 11 | #include <linux/slab.h> |
7d12e780 | 12 | |
1da177e4 LT |
13 | static void urb_free_priv (struct ohci_hcd *hc, urb_priv_t *urb_priv) |
14 | { | |
15 | int last = urb_priv->length - 1; | |
16 | ||
17 | if (last >= 0) { | |
18 | int i; | |
19 | struct td *td; | |
20 | ||
21 | for (i = 0; i <= last; i++) { | |
22 | td = urb_priv->td [i]; | |
23 | if (td) | |
24 | td_free (hc, td); | |
25 | } | |
26 | } | |
27 | ||
28 | list_del (&urb_priv->pending); | |
29 | kfree (urb_priv); | |
30 | } | |
31 | ||
32 | /*-------------------------------------------------------------------------*/ | |
33 | ||
34 | /* | |
35 | * URB goes back to driver, and isn't reissued. | |
36 | * It's completely gone from HC data structures. | |
37 | * PRECONDITION: ohci lock held, irqs blocked. | |
38 | */ | |
39 | static void | |
55d84968 | 40 | finish_urb(struct ohci_hcd *ohci, struct urb *urb, int status) |
1da177e4 LT |
41 | __releases(ohci->lock) |
42 | __acquires(ohci->lock) | |
43 | { | |
a8693424 AS |
44 | struct device *dev = ohci_to_hcd(ohci)->self.controller; |
45 | struct usb_host_endpoint *ep = urb->ep; | |
46 | struct urb_priv *urb_priv; | |
47 | ||
1da177e4 LT |
48 | // ASSERT (urb->hcpriv != 0); |
49 | ||
a8693424 | 50 | restart: |
1da177e4 | 51 | urb_free_priv (ohci, urb->hcpriv); |
ece1d77e | 52 | urb->hcpriv = NULL; |
55d84968 AS |
53 | if (likely(status == -EINPROGRESS)) |
54 | status = 0; | |
1da177e4 LT |
55 | |
56 | switch (usb_pipetype (urb->pipe)) { | |
57 | case PIPE_ISOCHRONOUS: | |
58 | ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs--; | |
a1f17a87 LY |
59 | if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0) { |
60 | if (quirk_amdiso(ohci)) | |
ad93562b | 61 | usb_amd_quirk_pll_enable(); |
a1f17a87 | 62 | if (quirk_amdprefetch(ohci)) |
2621d011 | 63 | sb800_prefetch(dev, 0); |
a1f17a87 | 64 | } |
1da177e4 LT |
65 | break; |
66 | case PIPE_INTERRUPT: | |
67 | ohci_to_hcd(ohci)->self.bandwidth_int_reqs--; | |
68 | break; | |
69 | } | |
70 | ||
1da177e4 | 71 | /* urb->complete() can reenter this HCD */ |
e9df41c5 | 72 | usb_hcd_unlink_urb_from_ep(ohci_to_hcd(ohci), urb); |
1da177e4 | 73 | spin_unlock (&ohci->lock); |
4a00027d | 74 | usb_hcd_giveback_urb(ohci_to_hcd(ohci), urb, status); |
1da177e4 LT |
75 | spin_lock (&ohci->lock); |
76 | ||
77 | /* stop periodic dma if it's not needed */ | |
78 | if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0 | |
79 | && ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0) { | |
80 | ohci->hc_control &= ~(OHCI_CTRL_PLE|OHCI_CTRL_IE); | |
81 | ohci_writel (ohci, ohci->hc_control, &ohci->regs->control); | |
82 | } | |
a8693424 AS |
83 | |
84 | /* | |
85 | * An isochronous URB that is sumitted too late won't have any TDs | |
86 | * (marked by the fact that the td_cnt value is larger than the | |
87 | * actual number of TDs). If the next URB on this endpoint is like | |
88 | * that, give it back now. | |
89 | */ | |
90 | if (!list_empty(&ep->urb_list)) { | |
91 | urb = list_first_entry(&ep->urb_list, struct urb, urb_list); | |
92 | urb_priv = urb->hcpriv; | |
93 | if (urb_priv->td_cnt > urb_priv->length) { | |
94 | status = 0; | |
95 | goto restart; | |
96 | } | |
97 | } | |
1da177e4 LT |
98 | } |
99 | ||
100 | ||
101 | /*-------------------------------------------------------------------------* | |
102 | * ED handling functions | |
dd9048af | 103 | *-------------------------------------------------------------------------*/ |
1da177e4 LT |
104 | |
105 | /* search for the right schedule branch to use for a periodic ed. | |
106 | * does some load balancing; returns the branch, or negative errno. | |
107 | */ | |
108 | static int balance (struct ohci_hcd *ohci, int interval, int load) | |
109 | { | |
110 | int i, branch = -ENOSPC; | |
111 | ||
112 | /* iso periods can be huge; iso tds specify frame numbers */ | |
113 | if (interval > NUM_INTS) | |
114 | interval = NUM_INTS; | |
115 | ||
116 | /* search for the least loaded schedule branch of that period | |
117 | * that has enough bandwidth left unreserved. | |
118 | */ | |
119 | for (i = 0; i < interval ; i++) { | |
120 | if (branch < 0 || ohci->load [branch] > ohci->load [i]) { | |
1da177e4 LT |
121 | int j; |
122 | ||
123 | /* usb 1.1 says 90% of one frame */ | |
124 | for (j = i; j < NUM_INTS; j += interval) { | |
125 | if ((ohci->load [j] + load) > 900) | |
126 | break; | |
127 | } | |
128 | if (j < NUM_INTS) | |
129 | continue; | |
dd9048af | 130 | branch = i; |
1da177e4 LT |
131 | } |
132 | } | |
133 | return branch; | |
134 | } | |
135 | ||
136 | /*-------------------------------------------------------------------------*/ | |
137 | ||
138 | /* both iso and interrupt requests have periods; this routine puts them | |
139 | * into the schedule tree in the apppropriate place. most iso devices use | |
140 | * 1msec periods, but that's not required. | |
141 | */ | |
142 | static void periodic_link (struct ohci_hcd *ohci, struct ed *ed) | |
143 | { | |
144 | unsigned i; | |
145 | ||
d2c4254f | 146 | ohci_dbg(ohci, "link %sed %p branch %d [%dus.], interval %d\n", |
1da177e4 LT |
147 | (ed->hwINFO & cpu_to_hc32 (ohci, ED_ISO)) ? "iso " : "", |
148 | ed, ed->branch, ed->load, ed->interval); | |
149 | ||
150 | for (i = ed->branch; i < NUM_INTS; i += ed->interval) { | |
151 | struct ed **prev = &ohci->periodic [i]; | |
152 | __hc32 *prev_p = &ohci->hcca->int_table [i]; | |
153 | struct ed *here = *prev; | |
154 | ||
155 | /* sorting each branch by period (slow before fast) | |
156 | * lets us share the faster parts of the tree. | |
157 | * (plus maybe: put interrupt eds before iso) | |
158 | */ | |
159 | while (here && ed != here) { | |
160 | if (ed->interval > here->interval) | |
161 | break; | |
162 | prev = &here->ed_next; | |
163 | prev_p = &here->hwNextED; | |
164 | here = *prev; | |
165 | } | |
166 | if (ed != here) { | |
167 | ed->ed_next = here; | |
168 | if (here) | |
169 | ed->hwNextED = *prev_p; | |
170 | wmb (); | |
171 | *prev = ed; | |
172 | *prev_p = cpu_to_hc32(ohci, ed->dma); | |
173 | wmb(); | |
174 | } | |
175 | ohci->load [i] += ed->load; | |
176 | } | |
177 | ohci_to_hcd(ohci)->self.bandwidth_allocated += ed->load / ed->interval; | |
178 | } | |
179 | ||
180 | /* link an ed into one of the HC chains */ | |
181 | ||
182 | static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed) | |
dd9048af | 183 | { |
1da177e4 LT |
184 | int branch; |
185 | ||
1da177e4 LT |
186 | ed->ed_prev = NULL; |
187 | ed->ed_next = NULL; | |
188 | ed->hwNextED = 0; | |
189 | wmb (); | |
190 | ||
191 | /* we care about rm_list when setting CLE/BLE in case the HC was at | |
192 | * work on some TD when CLE/BLE was turned off, and isn't quiesced | |
193 | * yet. finish_unlinks() restarts as needed, some upcoming INTR_SF. | |
194 | * | |
195 | * control and bulk EDs are doubly linked (ed_next, ed_prev), but | |
196 | * periodic ones are singly linked (ed_next). that's because the | |
197 | * periodic schedule encodes a tree like figure 3-5 in the ohci | |
198 | * spec: each qh can have several "previous" nodes, and the tree | |
199 | * doesn't have unused/idle descriptors. | |
200 | */ | |
201 | switch (ed->type) { | |
202 | case PIPE_CONTROL: | |
203 | if (ohci->ed_controltail == NULL) { | |
204 | WARN_ON (ohci->hc_control & OHCI_CTRL_CLE); | |
205 | ohci_writel (ohci, ed->dma, | |
206 | &ohci->regs->ed_controlhead); | |
207 | } else { | |
208 | ohci->ed_controltail->ed_next = ed; | |
209 | ohci->ed_controltail->hwNextED = cpu_to_hc32 (ohci, | |
210 | ed->dma); | |
211 | } | |
212 | ed->ed_prev = ohci->ed_controltail; | |
213 | if (!ohci->ed_controltail && !ohci->ed_rm_list) { | |
214 | wmb(); | |
215 | ohci->hc_control |= OHCI_CTRL_CLE; | |
216 | ohci_writel (ohci, 0, &ohci->regs->ed_controlcurrent); | |
217 | ohci_writel (ohci, ohci->hc_control, | |
218 | &ohci->regs->control); | |
219 | } | |
220 | ohci->ed_controltail = ed; | |
221 | break; | |
222 | ||
223 | case PIPE_BULK: | |
224 | if (ohci->ed_bulktail == NULL) { | |
225 | WARN_ON (ohci->hc_control & OHCI_CTRL_BLE); | |
226 | ohci_writel (ohci, ed->dma, &ohci->regs->ed_bulkhead); | |
227 | } else { | |
228 | ohci->ed_bulktail->ed_next = ed; | |
229 | ohci->ed_bulktail->hwNextED = cpu_to_hc32 (ohci, | |
230 | ed->dma); | |
231 | } | |
232 | ed->ed_prev = ohci->ed_bulktail; | |
233 | if (!ohci->ed_bulktail && !ohci->ed_rm_list) { | |
234 | wmb(); | |
235 | ohci->hc_control |= OHCI_CTRL_BLE; | |
236 | ohci_writel (ohci, 0, &ohci->regs->ed_bulkcurrent); | |
237 | ohci_writel (ohci, ohci->hc_control, | |
238 | &ohci->regs->control); | |
239 | } | |
240 | ohci->ed_bulktail = ed; | |
241 | break; | |
242 | ||
243 | // case PIPE_INTERRUPT: | |
244 | // case PIPE_ISOCHRONOUS: | |
245 | default: | |
246 | branch = balance (ohci, ed->interval, ed->load); | |
247 | if (branch < 0) { | |
248 | ohci_dbg (ohci, | |
249 | "ERR %d, interval %d msecs, load %d\n", | |
250 | branch, ed->interval, ed->load); | |
251 | // FIXME if there are TDs queued, fail them! | |
252 | return branch; | |
253 | } | |
254 | ed->branch = branch; | |
255 | periodic_link (ohci, ed); | |
dd9048af | 256 | } |
1da177e4 LT |
257 | |
258 | /* the HC may not see the schedule updates yet, but if it does | |
259 | * then they'll be properly ordered. | |
260 | */ | |
c66f59ee MP |
261 | |
262 | ed->state = ED_OPER; | |
1da177e4 LT |
263 | return 0; |
264 | } | |
265 | ||
266 | /*-------------------------------------------------------------------------*/ | |
267 | ||
268 | /* scan the periodic table to find and unlink this ED */ | |
269 | static void periodic_unlink (struct ohci_hcd *ohci, struct ed *ed) | |
270 | { | |
271 | int i; | |
272 | ||
273 | for (i = ed->branch; i < NUM_INTS; i += ed->interval) { | |
274 | struct ed *temp; | |
275 | struct ed **prev = &ohci->periodic [i]; | |
276 | __hc32 *prev_p = &ohci->hcca->int_table [i]; | |
277 | ||
278 | while (*prev && (temp = *prev) != ed) { | |
279 | prev_p = &temp->hwNextED; | |
280 | prev = &temp->ed_next; | |
281 | } | |
282 | if (*prev) { | |
283 | *prev_p = ed->hwNextED; | |
284 | *prev = ed->ed_next; | |
285 | } | |
286 | ohci->load [i] -= ed->load; | |
dd9048af | 287 | } |
1da177e4 LT |
288 | ohci_to_hcd(ohci)->self.bandwidth_allocated -= ed->load / ed->interval; |
289 | ||
d2c4254f | 290 | ohci_dbg(ohci, "unlink %sed %p branch %d [%dus.], interval %d\n", |
1da177e4 LT |
291 | (ed->hwINFO & cpu_to_hc32 (ohci, ED_ISO)) ? "iso " : "", |
292 | ed, ed->branch, ed->load, ed->interval); | |
293 | } | |
294 | ||
dd9048af | 295 | /* unlink an ed from one of the HC chains. |
1da177e4 LT |
296 | * just the link to the ed is unlinked. |
297 | * the link from the ed still points to another operational ed or 0 | |
298 | * so the HC can eventually finish the processing of the unlinked ed | |
299 | * (assuming it already started that, which needn't be true). | |
300 | * | |
301 | * ED_UNLINK is a transient state: the HC may still see this ED, but soon | |
302 | * it won't. ED_SKIP means the HC will finish its current transaction, | |
303 | * but won't start anything new. The TD queue may still grow; device | |
304 | * drivers don't know about this HCD-internal state. | |
305 | * | |
306 | * When the HC can't see the ED, something changes ED_UNLINK to one of: | |
307 | * | |
308 | * - ED_OPER: when there's any request queued, the ED gets rescheduled | |
309 | * immediately. HC should be working on them. | |
310 | * | |
977dcfdc | 311 | * - ED_IDLE: when there's no TD queue or the HC isn't running. |
1da177e4 LT |
312 | * |
313 | * When finish_unlinks() runs later, after SOF interrupt, it will often | |
314 | * complete one or more URB unlinks before making that state change. | |
315 | */ | |
dd9048af | 316 | static void ed_deschedule (struct ohci_hcd *ohci, struct ed *ed) |
1da177e4 LT |
317 | { |
318 | ed->hwINFO |= cpu_to_hc32 (ohci, ED_SKIP); | |
319 | wmb (); | |
320 | ed->state = ED_UNLINK; | |
321 | ||
322 | /* To deschedule something from the control or bulk list, just | |
323 | * clear CLE/BLE and wait. There's no safe way to scrub out list | |
324 | * head/current registers until later, and "later" isn't very | |
325 | * tightly specified. Figure 6-5 and Section 6.4.2.2 show how | |
326 | * the HC is reading the ED queues (while we modify them). | |
327 | * | |
328 | * For now, ed_schedule() is "later". It might be good paranoia | |
329 | * to scrub those registers in finish_unlinks(), in case of bugs | |
330 | * that make the HC try to use them. | |
331 | */ | |
332 | switch (ed->type) { | |
333 | case PIPE_CONTROL: | |
334 | /* remove ED from the HC's list: */ | |
335 | if (ed->ed_prev == NULL) { | |
336 | if (!ed->hwNextED) { | |
337 | ohci->hc_control &= ~OHCI_CTRL_CLE; | |
338 | ohci_writel (ohci, ohci->hc_control, | |
339 | &ohci->regs->control); | |
340 | // a ohci_readl() later syncs CLE with the HC | |
341 | } else | |
342 | ohci_writel (ohci, | |
343 | hc32_to_cpup (ohci, &ed->hwNextED), | |
344 | &ohci->regs->ed_controlhead); | |
345 | } else { | |
346 | ed->ed_prev->ed_next = ed->ed_next; | |
347 | ed->ed_prev->hwNextED = ed->hwNextED; | |
348 | } | |
349 | /* remove ED from the HCD's list: */ | |
350 | if (ohci->ed_controltail == ed) { | |
351 | ohci->ed_controltail = ed->ed_prev; | |
352 | if (ohci->ed_controltail) | |
353 | ohci->ed_controltail->ed_next = NULL; | |
354 | } else if (ed->ed_next) { | |
355 | ed->ed_next->ed_prev = ed->ed_prev; | |
356 | } | |
357 | break; | |
358 | ||
359 | case PIPE_BULK: | |
360 | /* remove ED from the HC's list: */ | |
361 | if (ed->ed_prev == NULL) { | |
362 | if (!ed->hwNextED) { | |
363 | ohci->hc_control &= ~OHCI_CTRL_BLE; | |
364 | ohci_writel (ohci, ohci->hc_control, | |
365 | &ohci->regs->control); | |
366 | // a ohci_readl() later syncs BLE with the HC | |
367 | } else | |
368 | ohci_writel (ohci, | |
369 | hc32_to_cpup (ohci, &ed->hwNextED), | |
370 | &ohci->regs->ed_bulkhead); | |
371 | } else { | |
372 | ed->ed_prev->ed_next = ed->ed_next; | |
373 | ed->ed_prev->hwNextED = ed->hwNextED; | |
374 | } | |
375 | /* remove ED from the HCD's list: */ | |
376 | if (ohci->ed_bulktail == ed) { | |
377 | ohci->ed_bulktail = ed->ed_prev; | |
378 | if (ohci->ed_bulktail) | |
379 | ohci->ed_bulktail->ed_next = NULL; | |
380 | } else if (ed->ed_next) { | |
381 | ed->ed_next->ed_prev = ed->ed_prev; | |
382 | } | |
383 | break; | |
384 | ||
385 | // case PIPE_INTERRUPT: | |
386 | // case PIPE_ISOCHRONOUS: | |
387 | default: | |
388 | periodic_unlink (ohci, ed); | |
389 | break; | |
390 | } | |
391 | } | |
392 | ||
393 | ||
394 | /*-------------------------------------------------------------------------*/ | |
395 | ||
396 | /* get and maybe (re)init an endpoint. init _should_ be done only as part | |
397 | * of enumeration, usb_set_configuration() or usb_set_interface(). | |
398 | */ | |
399 | static struct ed *ed_get ( | |
400 | struct ohci_hcd *ohci, | |
401 | struct usb_host_endpoint *ep, | |
402 | struct usb_device *udev, | |
403 | unsigned int pipe, | |
404 | int interval | |
405 | ) { | |
dd9048af | 406 | struct ed *ed; |
1da177e4 LT |
407 | unsigned long flags; |
408 | ||
409 | spin_lock_irqsave (&ohci->lock, flags); | |
410 | ||
8af93191 GKH |
411 | ed = ep->hcpriv; |
412 | if (!ed) { | |
1da177e4 LT |
413 | struct td *td; |
414 | int is_out; | |
415 | u32 info; | |
416 | ||
417 | ed = ed_alloc (ohci, GFP_ATOMIC); | |
418 | if (!ed) { | |
419 | /* out of memory */ | |
420 | goto done; | |
421 | } | |
422 | ||
dd9048af | 423 | /* dummy td; end of td list for ed */ |
1da177e4 | 424 | td = td_alloc (ohci, GFP_ATOMIC); |
dd9048af | 425 | if (!td) { |
1da177e4 LT |
426 | /* out of memory */ |
427 | ed_free (ohci, ed); | |
428 | ed = NULL; | |
429 | goto done; | |
430 | } | |
431 | ed->dummy = td; | |
432 | ed->hwTailP = cpu_to_hc32 (ohci, td->td_dma); | |
433 | ed->hwHeadP = ed->hwTailP; /* ED_C, ED_H zeroed */ | |
434 | ed->state = ED_IDLE; | |
435 | ||
436 | is_out = !(ep->desc.bEndpointAddress & USB_DIR_IN); | |
437 | ||
438 | /* FIXME usbcore changes dev->devnum before SET_ADDRESS | |
4b26d50b | 439 | * succeeds ... otherwise we wouldn't need "pipe". |
1da177e4 LT |
440 | */ |
441 | info = usb_pipedevice (pipe); | |
442 | ed->type = usb_pipetype(pipe); | |
443 | ||
444 | info |= (ep->desc.bEndpointAddress & ~USB_DIR_IN) << 7; | |
29cc8897 | 445 | info |= usb_endpoint_maxp(&ep->desc) << 16; |
1da177e4 LT |
446 | if (udev->speed == USB_SPEED_LOW) |
447 | info |= ED_LOWSPEED; | |
448 | /* only control transfers store pids in tds */ | |
449 | if (ed->type != PIPE_CONTROL) { | |
450 | info |= is_out ? ED_OUT : ED_IN; | |
451 | if (ed->type != PIPE_BULK) { | |
452 | /* periodic transfers... */ | |
453 | if (ed->type == PIPE_ISOCHRONOUS) | |
454 | info |= ED_ISO; | |
455 | else if (interval > 32) /* iso can be bigger */ | |
456 | interval = 32; | |
457 | ed->interval = interval; | |
458 | ed->load = usb_calc_bus_time ( | |
459 | udev->speed, !is_out, | |
460 | ed->type == PIPE_ISOCHRONOUS, | |
29cc8897 | 461 | usb_endpoint_maxp(&ep->desc)) |
1da177e4 LT |
462 | / 1000; |
463 | } | |
464 | } | |
465 | ed->hwINFO = cpu_to_hc32(ohci, info); | |
466 | ||
467 | ep->hcpriv = ed; | |
468 | } | |
469 | ||
470 | done: | |
471 | spin_unlock_irqrestore (&ohci->lock, flags); | |
dd9048af | 472 | return ed; |
1da177e4 LT |
473 | } |
474 | ||
475 | /*-------------------------------------------------------------------------*/ | |
476 | ||
477 | /* request unlinking of an endpoint from an operational HC. | |
478 | * put the ep on the rm_list | |
479 | * real work is done at the next start frame (SF) hardware interrupt | |
480 | * caller guarantees HCD is running, so hardware access is safe, | |
481 | * and that ed->state is ED_OPER | |
482 | */ | |
483 | static void start_ed_unlink (struct ohci_hcd *ohci, struct ed *ed) | |
dd9048af | 484 | { |
1da177e4 LT |
485 | ed->hwINFO |= cpu_to_hc32 (ohci, ED_DEQUEUE); |
486 | ed_deschedule (ohci, ed); | |
487 | ||
488 | /* rm_list is just singly linked, for simplicity */ | |
489 | ed->ed_next = ohci->ed_rm_list; | |
490 | ed->ed_prev = NULL; | |
491 | ohci->ed_rm_list = ed; | |
492 | ||
493 | /* enable SOF interrupt */ | |
494 | ohci_writel (ohci, OHCI_INTR_SF, &ohci->regs->intrstatus); | |
495 | ohci_writel (ohci, OHCI_INTR_SF, &ohci->regs->intrenable); | |
496 | // flush those writes, and get latest HCCA contents | |
497 | (void) ohci_readl (ohci, &ohci->regs->control); | |
498 | ||
499 | /* SF interrupt might get delayed; record the frame counter value that | |
500 | * indicates when the HC isn't looking at it, so concurrent unlinks | |
501 | * behave. frame_no wraps every 2^16 msec, and changes right before | |
502 | * SF is triggered. | |
503 | */ | |
504 | ed->tick = ohci_frame_no(ohci) + 1; | |
505 | ||
506 | } | |
507 | ||
508 | /*-------------------------------------------------------------------------* | |
509 | * TD handling functions | |
510 | *-------------------------------------------------------------------------*/ | |
511 | ||
512 | /* enqueue next TD for this URB (OHCI spec 5.2.8.2) */ | |
513 | ||
514 | static void | |
515 | td_fill (struct ohci_hcd *ohci, u32 info, | |
516 | dma_addr_t data, int len, | |
517 | struct urb *urb, int index) | |
518 | { | |
519 | struct td *td, *td_pt; | |
520 | struct urb_priv *urb_priv = urb->hcpriv; | |
521 | int is_iso = info & TD_ISO; | |
522 | int hash; | |
523 | ||
524 | // ASSERT (index < urb_priv->length); | |
525 | ||
526 | /* aim for only one interrupt per urb. mostly applies to control | |
527 | * and iso; other urbs rarely need more than one TD per urb. | |
528 | * this way, only final tds (or ones with an error) cause IRQs. | |
529 | * at least immediately; use DI=6 in case any control request is | |
530 | * tempted to die part way through. (and to force the hc to flush | |
531 | * its donelist soonish, even on unlink paths.) | |
532 | * | |
533 | * NOTE: could delay interrupts even for the last TD, and get fewer | |
534 | * interrupts ... increasing per-urb latency by sharing interrupts. | |
535 | * Drivers that queue bulk urbs may request that behavior. | |
536 | */ | |
537 | if (index != (urb_priv->length - 1) | |
538 | || (urb->transfer_flags & URB_NO_INTERRUPT)) | |
539 | info |= TD_DI_SET (6); | |
540 | ||
541 | /* use this td as the next dummy */ | |
542 | td_pt = urb_priv->td [index]; | |
543 | ||
544 | /* fill the old dummy TD */ | |
545 | td = urb_priv->td [index] = urb_priv->ed->dummy; | |
546 | urb_priv->ed->dummy = td_pt; | |
547 | ||
548 | td->ed = urb_priv->ed; | |
549 | td->next_dl_td = NULL; | |
550 | td->index = index; | |
dd9048af | 551 | td->urb = urb; |
1da177e4 LT |
552 | td->data_dma = data; |
553 | if (!len) | |
554 | data = 0; | |
555 | ||
556 | td->hwINFO = cpu_to_hc32 (ohci, info); | |
557 | if (is_iso) { | |
558 | td->hwCBP = cpu_to_hc32 (ohci, data & 0xFFFFF000); | |
559 | *ohci_hwPSWp(ohci, td, 0) = cpu_to_hc16 (ohci, | |
560 | (data & 0x0FFF) | 0xE000); | |
1da177e4 | 561 | } else { |
dd9048af DB |
562 | td->hwCBP = cpu_to_hc32 (ohci, data); |
563 | } | |
1da177e4 LT |
564 | if (data) |
565 | td->hwBE = cpu_to_hc32 (ohci, data + len - 1); | |
566 | else | |
567 | td->hwBE = 0; | |
568 | td->hwNextTD = cpu_to_hc32 (ohci, td_pt->td_dma); | |
569 | ||
570 | /* append to queue */ | |
571 | list_add_tail (&td->td_list, &td->ed->td_list); | |
572 | ||
573 | /* hash it for later reverse mapping */ | |
574 | hash = TD_HASH_FUNC (td->td_dma); | |
575 | td->td_hash = ohci->td_hash [hash]; | |
576 | ohci->td_hash [hash] = td; | |
577 | ||
578 | /* HC might read the TD (or cachelines) right away ... */ | |
579 | wmb (); | |
580 | td->ed->hwTailP = td->hwNextTD; | |
581 | } | |
582 | ||
583 | /*-------------------------------------------------------------------------*/ | |
584 | ||
585 | /* Prepare all TDs of a transfer, and queue them onto the ED. | |
586 | * Caller guarantees HC is active. | |
587 | * Usually the ED is already on the schedule, so TDs might be | |
588 | * processed as soon as they're queued. | |
589 | */ | |
590 | static void td_submit_urb ( | |
591 | struct ohci_hcd *ohci, | |
592 | struct urb *urb | |
593 | ) { | |
594 | struct urb_priv *urb_priv = urb->hcpriv; | |
2621d011 | 595 | struct device *dev = ohci_to_hcd(ohci)->self.controller; |
1da177e4 LT |
596 | dma_addr_t data; |
597 | int data_len = urb->transfer_buffer_length; | |
598 | int cnt = 0; | |
599 | u32 info = 0; | |
600 | int is_out = usb_pipeout (urb->pipe); | |
601 | int periodic = 0; | |
6f65126c AS |
602 | int i, this_sg_len, n; |
603 | struct scatterlist *sg; | |
1da177e4 LT |
604 | |
605 | /* OHCI handles the bulk/interrupt data toggles itself. We just | |
606 | * use the device toggle bits for resetting, and rely on the fact | |
607 | * that resetting toggle is meaningless if the endpoint is active. | |
608 | */ | |
dd9048af | 609 | if (!usb_gettoggle (urb->dev, usb_pipeendpoint (urb->pipe), is_out)) { |
1da177e4 LT |
610 | usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), |
611 | is_out, 1); | |
612 | urb_priv->ed->hwHeadP &= ~cpu_to_hc32 (ohci, ED_C); | |
613 | } | |
614 | ||
1da177e4 LT |
615 | list_add (&urb_priv->pending, &ohci->pending); |
616 | ||
6f65126c AS |
617 | i = urb->num_mapped_sgs; |
618 | if (data_len > 0 && i > 0) { | |
619 | sg = urb->sg; | |
620 | data = sg_dma_address(sg); | |
621 | ||
622 | /* | |
623 | * urb->transfer_buffer_length may be smaller than the | |
624 | * size of the scatterlist (or vice versa) | |
625 | */ | |
626 | this_sg_len = min_t(int, sg_dma_len(sg), data_len); | |
627 | } else { | |
628 | sg = NULL; | |
629 | if (data_len) | |
630 | data = urb->transfer_dma; | |
631 | else | |
632 | data = 0; | |
633 | this_sg_len = data_len; | |
634 | } | |
1da177e4 LT |
635 | |
636 | /* NOTE: TD_CC is set so we can tell which TDs the HC processed by | |
637 | * using TD_CC_GET, as well as by seeing them on the done list. | |
638 | * (CC = NotAccessed ... 0x0F, or 0x0E in PSWs for ISO.) | |
639 | */ | |
640 | switch (urb_priv->ed->type) { | |
641 | ||
642 | /* Bulk and interrupt are identical except for where in the schedule | |
643 | * their EDs live. | |
644 | */ | |
645 | case PIPE_INTERRUPT: | |
646 | /* ... and periodic urbs have extra accounting */ | |
647 | periodic = ohci_to_hcd(ohci)->self.bandwidth_int_reqs++ == 0 | |
648 | && ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0; | |
649 | /* FALLTHROUGH */ | |
650 | case PIPE_BULK: | |
651 | info = is_out | |
652 | ? TD_T_TOGGLE | TD_CC | TD_DP_OUT | |
653 | : TD_T_TOGGLE | TD_CC | TD_DP_IN; | |
654 | /* TDs _could_ transfer up to 8K each */ | |
6f65126c AS |
655 | for (;;) { |
656 | n = min(this_sg_len, 4096); | |
657 | ||
658 | /* maybe avoid ED halt on final TD short read */ | |
659 | if (n >= data_len || (i == 1 && n >= this_sg_len)) { | |
660 | if (!(urb->transfer_flags & URB_SHORT_NOT_OK)) | |
661 | info |= TD_R; | |
662 | } | |
663 | td_fill(ohci, info, data, n, urb, cnt); | |
664 | this_sg_len -= n; | |
665 | data_len -= n; | |
666 | data += n; | |
1da177e4 | 667 | cnt++; |
6f65126c AS |
668 | |
669 | if (this_sg_len <= 0) { | |
670 | if (--i <= 0 || data_len <= 0) | |
671 | break; | |
672 | sg = sg_next(sg); | |
673 | data = sg_dma_address(sg); | |
674 | this_sg_len = min_t(int, sg_dma_len(sg), | |
675 | data_len); | |
676 | } | |
1da177e4 | 677 | } |
1da177e4 LT |
678 | if ((urb->transfer_flags & URB_ZERO_PACKET) |
679 | && cnt < urb_priv->length) { | |
680 | td_fill (ohci, info, 0, 0, urb, cnt); | |
681 | cnt++; | |
682 | } | |
683 | /* maybe kickstart bulk list */ | |
684 | if (urb_priv->ed->type == PIPE_BULK) { | |
685 | wmb (); | |
686 | ohci_writel (ohci, OHCI_BLF, &ohci->regs->cmdstatus); | |
687 | } | |
688 | break; | |
689 | ||
690 | /* control manages DATA0/DATA1 toggle per-request; SETUP resets it, | |
691 | * any DATA phase works normally, and the STATUS ack is special. | |
692 | */ | |
693 | case PIPE_CONTROL: | |
694 | info = TD_CC | TD_DP_SETUP | TD_T_DATA0; | |
695 | td_fill (ohci, info, urb->setup_dma, 8, urb, cnt++); | |
696 | if (data_len > 0) { | |
697 | info = TD_CC | TD_R | TD_T_DATA1; | |
698 | info |= is_out ? TD_DP_OUT : TD_DP_IN; | |
699 | /* NOTE: mishandles transfers >8K, some >4K */ | |
700 | td_fill (ohci, info, data, data_len, urb, cnt++); | |
701 | } | |
702 | info = (is_out || data_len == 0) | |
703 | ? TD_CC | TD_DP_IN | TD_T_DATA1 | |
704 | : TD_CC | TD_DP_OUT | TD_T_DATA1; | |
705 | td_fill (ohci, info, data, 0, urb, cnt++); | |
706 | /* maybe kickstart control list */ | |
707 | wmb (); | |
708 | ohci_writel (ohci, OHCI_CLF, &ohci->regs->cmdstatus); | |
709 | break; | |
710 | ||
711 | /* ISO has no retransmit, so no toggle; and it uses special TDs. | |
712 | * Each TD could handle multiple consecutive frames (interval 1); | |
713 | * we could often reduce the number of TDs here. | |
714 | */ | |
715 | case PIPE_ISOCHRONOUS: | |
6a41b4d3 AS |
716 | for (cnt = urb_priv->td_cnt; cnt < urb->number_of_packets; |
717 | cnt++) { | |
1da177e4 LT |
718 | int frame = urb->start_frame; |
719 | ||
720 | // FIXME scheduling should handle frame counter | |
721 | // roll-around ... exotic case (and OHCI has | |
722 | // a 2^16 iso range, vs other HCs max of 2^10) | |
723 | frame += cnt * urb->interval; | |
724 | frame &= 0xffff; | |
725 | td_fill (ohci, TD_CC | TD_ISO | frame, | |
726 | data + urb->iso_frame_desc [cnt].offset, | |
727 | urb->iso_frame_desc [cnt].length, urb, cnt); | |
728 | } | |
a1f17a87 LY |
729 | if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0) { |
730 | if (quirk_amdiso(ohci)) | |
ad93562b | 731 | usb_amd_quirk_pll_disable(); |
a1f17a87 | 732 | if (quirk_amdprefetch(ohci)) |
2621d011 | 733 | sb800_prefetch(dev, 1); |
a1f17a87 | 734 | } |
1da177e4 LT |
735 | periodic = ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs++ == 0 |
736 | && ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0; | |
737 | break; | |
738 | } | |
739 | ||
740 | /* start periodic dma if needed */ | |
741 | if (periodic) { | |
742 | wmb (); | |
743 | ohci->hc_control |= OHCI_CTRL_PLE|OHCI_CTRL_IE; | |
744 | ohci_writel (ohci, ohci->hc_control, &ohci->regs->control); | |
745 | } | |
746 | ||
747 | // ASSERT (urb_priv->length == cnt); | |
748 | } | |
749 | ||
750 | /*-------------------------------------------------------------------------* | |
751 | * Done List handling functions | |
752 | *-------------------------------------------------------------------------*/ | |
753 | ||
55d84968 AS |
754 | /* calculate transfer length/status and update the urb */ |
755 | static int td_done(struct ohci_hcd *ohci, struct urb *urb, struct td *td) | |
1da177e4 LT |
756 | { |
757 | u32 tdINFO = hc32_to_cpup (ohci, &td->hwINFO); | |
758 | int cc = 0; | |
55d84968 | 759 | int status = -EINPROGRESS; |
1da177e4 LT |
760 | |
761 | list_del (&td->td_list); | |
762 | ||
763 | /* ISO ... drivers see per-TD length/status */ | |
dd9048af | 764 | if (tdINFO & TD_ISO) { |
55d84968 | 765 | u16 tdPSW = ohci_hwPSW(ohci, td, 0); |
1da177e4 LT |
766 | int dlen = 0; |
767 | ||
768 | /* NOTE: assumes FC in tdINFO == 0, and that | |
769 | * only the first of 0..MAXPSW psws is used. | |
770 | */ | |
771 | ||
dd9048af DB |
772 | cc = (tdPSW >> 12) & 0xF; |
773 | if (tdINFO & TD_CC) /* hc didn't touch? */ | |
55d84968 | 774 | return status; |
1da177e4 LT |
775 | |
776 | if (usb_pipeout (urb->pipe)) | |
777 | dlen = urb->iso_frame_desc [td->index].length; | |
778 | else { | |
779 | /* short reads are always OK for ISO */ | |
780 | if (cc == TD_DATAUNDERRUN) | |
781 | cc = TD_CC_NOERROR; | |
782 | dlen = tdPSW & 0x3ff; | |
783 | } | |
784 | urb->actual_length += dlen; | |
785 | urb->iso_frame_desc [td->index].actual_length = dlen; | |
786 | urb->iso_frame_desc [td->index].status = cc_to_error [cc]; | |
787 | ||
788 | if (cc != TD_CC_NOERROR) | |
d2c4254f | 789 | ohci_dbg(ohci, |
1da177e4 LT |
790 | "urb %p iso td %p (%d) len %d cc %d\n", |
791 | urb, td, 1 + td->index, dlen, cc); | |
792 | ||
793 | /* BULK, INT, CONTROL ... drivers see aggregate length/status, | |
794 | * except that "setup" bytes aren't counted and "short" transfers | |
795 | * might not be reported as errors. | |
796 | */ | |
797 | } else { | |
798 | int type = usb_pipetype (urb->pipe); | |
799 | u32 tdBE = hc32_to_cpup (ohci, &td->hwBE); | |
800 | ||
dd9048af | 801 | cc = TD_CC_GET (tdINFO); |
1da177e4 LT |
802 | |
803 | /* update packet status if needed (short is normally ok) */ | |
804 | if (cc == TD_DATAUNDERRUN | |
805 | && !(urb->transfer_flags & URB_SHORT_NOT_OK)) | |
806 | cc = TD_CC_NOERROR; | |
55d84968 AS |
807 | if (cc != TD_CC_NOERROR && cc < 0x0E) |
808 | status = cc_to_error[cc]; | |
1da177e4 LT |
809 | |
810 | /* count all non-empty packets except control SETUP packet */ | |
811 | if ((type != PIPE_CONTROL || td->index != 0) && tdBE != 0) { | |
812 | if (td->hwCBP == 0) | |
813 | urb->actual_length += tdBE - td->data_dma + 1; | |
814 | else | |
815 | urb->actual_length += | |
816 | hc32_to_cpup (ohci, &td->hwCBP) | |
817 | - td->data_dma; | |
818 | } | |
819 | ||
820 | if (cc != TD_CC_NOERROR && cc < 0x0E) | |
d2c4254f | 821 | ohci_dbg(ohci, |
1da177e4 LT |
822 | "urb %p td %p (%d) cc %d, len=%d/%d\n", |
823 | urb, td, 1 + td->index, cc, | |
824 | urb->actual_length, | |
825 | urb->transfer_buffer_length); | |
dd9048af | 826 | } |
55d84968 | 827 | return status; |
1da177e4 LT |
828 | } |
829 | ||
830 | /*-------------------------------------------------------------------------*/ | |
831 | ||
6e8fe43b | 832 | static void ed_halted(struct ohci_hcd *ohci, struct td *td, int cc) |
1da177e4 | 833 | { |
dd9048af | 834 | struct urb *urb = td->urb; |
6e8fe43b | 835 | urb_priv_t *urb_priv = urb->hcpriv; |
1da177e4 LT |
836 | struct ed *ed = td->ed; |
837 | struct list_head *tmp = td->td_list.next; | |
838 | __hc32 toggle = ed->hwHeadP & cpu_to_hc32 (ohci, ED_C); | |
839 | ||
840 | /* clear ed halt; this is the td that caused it, but keep it inactive | |
841 | * until its urb->complete() has a chance to clean up. | |
842 | */ | |
843 | ed->hwINFO |= cpu_to_hc32 (ohci, ED_SKIP); | |
844 | wmb (); | |
dd9048af | 845 | ed->hwHeadP &= ~cpu_to_hc32 (ohci, ED_H); |
1da177e4 | 846 | |
6e8fe43b AS |
847 | /* Get rid of all later tds from this urb. We don't have |
848 | * to be careful: no errors and nothing was transferred. | |
849 | * Also patch the ed so it looks as if those tds completed normally. | |
1da177e4 LT |
850 | */ |
851 | while (tmp != &ed->td_list) { | |
852 | struct td *next; | |
1da177e4 LT |
853 | |
854 | next = list_entry (tmp, struct td, td_list); | |
855 | tmp = next->td_list.next; | |
856 | ||
857 | if (next->urb != urb) | |
858 | break; | |
859 | ||
860 | /* NOTE: if multi-td control DATA segments get supported, | |
861 | * this urb had one of them, this td wasn't the last td | |
862 | * in that segment (TD_R clear), this ed halted because | |
863 | * of a short read, _and_ URB_SHORT_NOT_OK is clear ... | |
864 | * then we need to leave the control STATUS packet queued | |
865 | * and clear ED_SKIP. | |
866 | */ | |
1da177e4 | 867 | |
6e8fe43b AS |
868 | list_del(&next->td_list); |
869 | urb_priv->td_cnt++; | |
1da177e4 LT |
870 | ed->hwHeadP = next->hwNextTD | toggle; |
871 | } | |
872 | ||
873 | /* help for troubleshooting: report anything that | |
874 | * looks odd ... that doesn't include protocol stalls | |
875 | * (or maybe some other things) | |
876 | */ | |
877 | switch (cc) { | |
878 | case TD_DATAUNDERRUN: | |
879 | if ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0) | |
880 | break; | |
881 | /* fallthrough */ | |
882 | case TD_CC_STALL: | |
883 | if (usb_pipecontrol (urb->pipe)) | |
884 | break; | |
885 | /* fallthrough */ | |
886 | default: | |
887 | ohci_dbg (ohci, | |
888 | "urb %p path %s ep%d%s %08x cc %d --> status %d\n", | |
889 | urb, urb->dev->devpath, | |
890 | usb_pipeendpoint (urb->pipe), | |
891 | usb_pipein (urb->pipe) ? "in" : "out", | |
892 | hc32_to_cpu (ohci, td->hwINFO), | |
893 | cc, cc_to_error [cc]); | |
894 | } | |
1da177e4 LT |
895 | } |
896 | ||
c6fcb85e AS |
897 | /* Add a TD to the done list */ |
898 | static void add_to_done_list(struct ohci_hcd *ohci, struct td *td) | |
899 | { | |
900 | struct td *td2, *td_prev; | |
901 | struct ed *ed; | |
902 | ||
903 | if (td->next_dl_td) | |
904 | return; /* Already on the list */ | |
905 | ||
906 | /* Add all the TDs going back until we reach one that's on the list */ | |
907 | ed = td->ed; | |
908 | td2 = td_prev = td; | |
909 | list_for_each_entry_continue_reverse(td2, &ed->td_list, td_list) { | |
910 | if (td2->next_dl_td) | |
911 | break; | |
912 | td2->next_dl_td = td_prev; | |
913 | td_prev = td2; | |
914 | } | |
915 | ||
916 | if (ohci->dl_end) | |
917 | ohci->dl_end->next_dl_td = td_prev; | |
918 | else | |
919 | ohci->dl_start = td_prev; | |
920 | ||
921 | /* | |
922 | * Make td->next_dl_td point to td itself, to mark the fact | |
923 | * that td is on the done list. | |
924 | */ | |
925 | ohci->dl_end = td->next_dl_td = td; | |
81e38333 AS |
926 | |
927 | /* Did we just add the latest pending TD? */ | |
928 | td2 = ed->pending_td; | |
929 | if (td2 && td2->next_dl_td) | |
930 | ed->pending_td = NULL; | |
c6fcb85e AS |
931 | } |
932 | ||
933 | /* Get the entries on the hardware done queue and put them on our list */ | |
934 | static void update_done_list(struct ohci_hcd *ohci) | |
1da177e4 LT |
935 | { |
936 | u32 td_dma; | |
1da177e4 LT |
937 | struct td *td = NULL; |
938 | ||
939 | td_dma = hc32_to_cpup (ohci, &ohci->hcca->done_head); | |
940 | ohci->hcca->done_head = 0; | |
941 | wmb(); | |
942 | ||
943 | /* get TD from hc's singly linked list, and | |
c6fcb85e | 944 | * add to ours. ed->td_list changes later. |
1da177e4 | 945 | */ |
dd9048af DB |
946 | while (td_dma) { |
947 | int cc; | |
1da177e4 LT |
948 | |
949 | td = dma_to_td (ohci, td_dma); | |
950 | if (!td) { | |
951 | ohci_err (ohci, "bad entry %8x\n", td_dma); | |
952 | break; | |
953 | } | |
954 | ||
955 | td->hwINFO |= cpu_to_hc32 (ohci, TD_DONE); | |
956 | cc = TD_CC_GET (hc32_to_cpup (ohci, &td->hwINFO)); | |
957 | ||
958 | /* Non-iso endpoints can halt on error; un-halt, | |
959 | * and dequeue any other TDs from this urb. | |
960 | * No other TD could have caused the halt. | |
961 | */ | |
962 | if (cc != TD_CC_NOERROR | |
963 | && (td->ed->hwHeadP & cpu_to_hc32 (ohci, ED_H))) | |
6e8fe43b | 964 | ed_halted(ohci, td, cc); |
1da177e4 | 965 | |
1da177e4 | 966 | td_dma = hc32_to_cpup (ohci, &td->hwNextTD); |
c6fcb85e | 967 | add_to_done_list(ohci, td); |
dd9048af | 968 | } |
1da177e4 LT |
969 | } |
970 | ||
971 | /*-------------------------------------------------------------------------*/ | |
972 | ||
973 | /* there are some urbs/eds to unlink; called in_irq(), with HCD locked */ | |
cdb4dd15 | 974 | static void finish_unlinks(struct ohci_hcd *ohci) |
1da177e4 | 975 | { |
cdb4dd15 | 976 | unsigned tick = ohci_frame_no(ohci); |
1da177e4 LT |
977 | struct ed *ed, **last; |
978 | ||
979 | rescan_all: | |
980 | for (last = &ohci->ed_rm_list, ed = *last; ed != NULL; ed = *last) { | |
981 | struct list_head *entry, *tmp; | |
982 | int completed, modified; | |
983 | __hc32 *prev; | |
984 | ||
985 | /* only take off EDs that the HC isn't using, accounting for | |
986 | * frame counter wraps and EDs with partially retired TDs | |
987 | */ | |
c6fcb85e AS |
988 | if (likely(ohci->rh_state == OHCI_RH_RUNNING) && |
989 | tick_before(tick, ed->tick)) { | |
1da177e4 | 990 | skip_ed: |
c6fcb85e AS |
991 | last = &ed->ed_next; |
992 | continue; | |
993 | } | |
994 | if (!list_empty(&ed->td_list)) { | |
995 | struct td *td; | |
996 | u32 head; | |
1da177e4 | 997 | |
c6fcb85e | 998 | td = list_first_entry(&ed->td_list, struct td, td_list); |
1da177e4 | 999 | |
c6fcb85e AS |
1000 | /* INTR_WDH may need to clean up first */ |
1001 | head = hc32_to_cpu(ohci, ed->hwHeadP) & TD_MASK; | |
1002 | if (td->td_dma != head && | |
1003 | ohci->rh_state == OHCI_RH_RUNNING) | |
1004 | goto skip_ed; | |
1da177e4 | 1005 | |
c6fcb85e AS |
1006 | /* Don't mess up anything already on the done list */ |
1007 | if (td->next_dl_td) | |
1008 | goto skip_ed; | |
1da177e4 LT |
1009 | } |
1010 | ||
977dcfdc | 1011 | /* ED's now officially unlinked, hc doesn't see */ |
977dcfdc AS |
1012 | ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H); |
1013 | ed->hwNextED = 0; | |
1014 | wmb(); | |
1015 | ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE); | |
977dcfdc | 1016 | |
1da177e4 LT |
1017 | /* reentrancy: if we drop the schedule lock, someone might |
1018 | * have modified this list. normally it's just prepending | |
1019 | * entries (which we'd ignore), but paranoia won't hurt. | |
1020 | */ | |
1da177e4 LT |
1021 | modified = 0; |
1022 | ||
1023 | /* unlink urbs as requested, but rescan the list after | |
1024 | * we call a completion since it might have unlinked | |
1025 | * another (earlier) urb | |
1026 | * | |
1027 | * When we get here, the HC doesn't see this ed. But it | |
1028 | * must not be rescheduled until all completed URBs have | |
1029 | * been given back to the driver. | |
1030 | */ | |
1031 | rescan_this: | |
1032 | completed = 0; | |
1033 | prev = &ed->hwHeadP; | |
1034 | list_for_each_safe (entry, tmp, &ed->td_list) { | |
1035 | struct td *td; | |
1036 | struct urb *urb; | |
1037 | urb_priv_t *urb_priv; | |
1038 | __hc32 savebits; | |
29c8f6a7 | 1039 | u32 tdINFO; |
1da177e4 LT |
1040 | |
1041 | td = list_entry (entry, struct td, td_list); | |
1042 | urb = td->urb; | |
1043 | urb_priv = td->urb->hcpriv; | |
1044 | ||
eb231054 | 1045 | if (!urb->unlinked) { |
1da177e4 LT |
1046 | prev = &td->hwNextTD; |
1047 | continue; | |
1048 | } | |
1049 | ||
1050 | /* patch pointer hc uses */ | |
1051 | savebits = *prev & ~cpu_to_hc32 (ohci, TD_MASK); | |
1052 | *prev = td->hwNextTD | savebits; | |
1053 | ||
29c8f6a7 DB |
1054 | /* If this was unlinked, the TD may not have been |
1055 | * retired ... so manually save the data toggle. | |
1056 | * The controller ignores the value we save for | |
1057 | * control and ISO endpoints. | |
1058 | */ | |
1059 | tdINFO = hc32_to_cpup(ohci, &td->hwINFO); | |
1060 | if ((tdINFO & TD_T) == TD_T_DATA0) | |
1061 | ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_C); | |
1062 | else if ((tdINFO & TD_T) == TD_T_DATA1) | |
1063 | ed->hwHeadP |= cpu_to_hc32(ohci, ED_C); | |
1064 | ||
1da177e4 LT |
1065 | /* HC may have partly processed this TD */ |
1066 | td_done (ohci, urb, td); | |
1067 | urb_priv->td_cnt++; | |
1068 | ||
1069 | /* if URB is done, clean up */ | |
a8693424 | 1070 | if (urb_priv->td_cnt >= urb_priv->length) { |
1da177e4 | 1071 | modified = completed = 1; |
55d84968 | 1072 | finish_urb(ohci, urb, 0); |
1da177e4 LT |
1073 | } |
1074 | } | |
1075 | if (completed && !list_empty (&ed->td_list)) | |
1076 | goto rescan_this; | |
1077 | ||
977dcfdc AS |
1078 | /* |
1079 | * If no TDs are queued, take ED off the ed_rm_list. | |
1080 | * Otherwise, if the HC is running, reschedule. | |
1081 | * If not, leave it on the list for further dequeues. | |
1082 | */ | |
1083 | if (list_empty(&ed->td_list)) { | |
1084 | *last = ed->ed_next; | |
1085 | ed->ed_next = NULL; | |
7d8021c9 | 1086 | ed->state = ED_IDLE; |
81e38333 | 1087 | list_del(&ed->in_use_list); |
977dcfdc AS |
1088 | } else if (ohci->rh_state == OHCI_RH_RUNNING) { |
1089 | *last = ed->ed_next; | |
1090 | ed->ed_next = NULL; | |
1091 | ed_schedule(ohci, ed); | |
1092 | } else { | |
1093 | last = &ed->ed_next; | |
1da177e4 LT |
1094 | } |
1095 | ||
1096 | if (modified) | |
1097 | goto rescan_all; | |
dd9048af | 1098 | } |
1da177e4 | 1099 | |
dd9048af | 1100 | /* maybe reenable control and bulk lists */ |
b7463c71 | 1101 | if (ohci->rh_state == OHCI_RH_RUNNING && !ohci->ed_rm_list) { |
1da177e4 LT |
1102 | u32 command = 0, control = 0; |
1103 | ||
1104 | if (ohci->ed_controltail) { | |
1105 | command |= OHCI_CLF; | |
89a0fd18 | 1106 | if (quirk_zfmicro(ohci)) |
0e498763 | 1107 | mdelay(1); |
1da177e4 LT |
1108 | if (!(ohci->hc_control & OHCI_CTRL_CLE)) { |
1109 | control |= OHCI_CTRL_CLE; | |
1110 | ohci_writel (ohci, 0, | |
1111 | &ohci->regs->ed_controlcurrent); | |
1112 | } | |
1113 | } | |
1114 | if (ohci->ed_bulktail) { | |
1115 | command |= OHCI_BLF; | |
89a0fd18 | 1116 | if (quirk_zfmicro(ohci)) |
0e498763 | 1117 | mdelay(1); |
1da177e4 LT |
1118 | if (!(ohci->hc_control & OHCI_CTRL_BLE)) { |
1119 | control |= OHCI_CTRL_BLE; | |
1120 | ohci_writel (ohci, 0, | |
1121 | &ohci->regs->ed_bulkcurrent); | |
1122 | } | |
1123 | } | |
dd9048af | 1124 | |
1da177e4 LT |
1125 | /* CLE/BLE to enable, CLF/BLF to (maybe) kickstart */ |
1126 | if (control) { | |
1127 | ohci->hc_control |= control; | |
89a0fd18 | 1128 | if (quirk_zfmicro(ohci)) |
0e498763 | 1129 | mdelay(1); |
dd9048af DB |
1130 | ohci_writel (ohci, ohci->hc_control, |
1131 | &ohci->regs->control); | |
1132 | } | |
0e498763 | 1133 | if (command) { |
89a0fd18 | 1134 | if (quirk_zfmicro(ohci)) |
0e498763 | 1135 | mdelay(1); |
dd9048af DB |
1136 | ohci_writel (ohci, command, &ohci->regs->cmdstatus); |
1137 | } | |
0e498763 | 1138 | } |
1da177e4 LT |
1139 | } |
1140 | ||
1141 | ||
1142 | ||
1143 | /*-------------------------------------------------------------------------*/ | |
1144 | ||
95d9a01d | 1145 | /* Take back a TD from the host controller */ |
89a0fd18 MN |
1146 | static void takeback_td(struct ohci_hcd *ohci, struct td *td) |
1147 | { | |
1148 | struct urb *urb = td->urb; | |
1149 | urb_priv_t *urb_priv = urb->hcpriv; | |
1150 | struct ed *ed = td->ed; | |
55d84968 | 1151 | int status; |
89a0fd18 MN |
1152 | |
1153 | /* update URB's length and status from TD */ | |
55d84968 | 1154 | status = td_done(ohci, urb, td); |
89a0fd18 MN |
1155 | urb_priv->td_cnt++; |
1156 | ||
1157 | /* If all this urb's TDs are done, call complete() */ | |
a8693424 | 1158 | if (urb_priv->td_cnt >= urb_priv->length) |
55d84968 | 1159 | finish_urb(ohci, urb, status); |
89a0fd18 MN |
1160 | |
1161 | /* clean schedule: unlink EDs that are no longer busy */ | |
1162 | if (list_empty(&ed->td_list)) { | |
1163 | if (ed->state == ED_OPER) | |
1164 | start_ed_unlink(ohci, ed); | |
1165 | ||
1166 | /* ... reenabling halted EDs only after fault cleanup */ | |
1167 | } else if ((ed->hwINFO & cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE)) | |
1168 | == cpu_to_hc32(ohci, ED_SKIP)) { | |
1169 | td = list_entry(ed->td_list.next, struct td, td_list); | |
1170 | if (!(td->hwINFO & cpu_to_hc32(ohci, TD_DONE))) { | |
1171 | ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP); | |
1172 | /* ... hc may need waking-up */ | |
1173 | switch (ed->type) { | |
1174 | case PIPE_CONTROL: | |
1175 | ohci_writel(ohci, OHCI_CLF, | |
1176 | &ohci->regs->cmdstatus); | |
1177 | break; | |
1178 | case PIPE_BULK: | |
1179 | ohci_writel(ohci, OHCI_BLF, | |
1180 | &ohci->regs->cmdstatus); | |
1181 | break; | |
1182 | } | |
1183 | } | |
1184 | } | |
1185 | } | |
1186 | ||
1da177e4 LT |
1187 | /* |
1188 | * Process normal completions (error or success) and clean the schedules. | |
1189 | * | |
1190 | * This is the main path for handing urbs back to drivers. The only other | |
89a0fd18 | 1191 | * normal path is finish_unlinks(), which unlinks URBs using ed_rm_list, |
95d9a01d | 1192 | * instead of scanning the (re-reversed) donelist as this does. |
1da177e4 | 1193 | */ |
c6fcb85e | 1194 | static void process_done_list(struct ohci_hcd *ohci) |
1da177e4 | 1195 | { |
c6fcb85e | 1196 | struct td *td; |
50ce5c06 | 1197 | |
c6fcb85e AS |
1198 | while (ohci->dl_start) { |
1199 | td = ohci->dl_start; | |
1200 | if (td == ohci->dl_end) | |
1201 | ohci->dl_start = ohci->dl_end = NULL; | |
1202 | else | |
1203 | ohci->dl_start = td->next_dl_td; | |
50ce5c06 | 1204 | |
89a0fd18 | 1205 | takeback_td(ohci, td); |
dd9048af | 1206 | } |
1da177e4 | 1207 | } |
cdb4dd15 AS |
1208 | |
1209 | /* | |
1210 | * TD takeback and URB giveback must be single-threaded. | |
1211 | * This routine takes care of it all. | |
1212 | */ | |
1213 | static void ohci_work(struct ohci_hcd *ohci) | |
1214 | { | |
1215 | if (ohci->working) { | |
1216 | ohci->restart_work = 1; | |
1217 | return; | |
1218 | } | |
1219 | ohci->working = 1; | |
1220 | ||
1221 | restart: | |
1222 | process_done_list(ohci); | |
1223 | if (ohci->ed_rm_list) | |
1224 | finish_unlinks(ohci); | |
1225 | ||
1226 | if (ohci->restart_work) { | |
1227 | ohci->restart_work = 0; | |
1228 | goto restart; | |
1229 | } | |
1230 | ohci->working = 0; | |
1231 | } |