]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/usb/host/ohci-q.c
USB: reorganize urb->status use in usbmon
[mirror_ubuntu-artful-kernel.git] / drivers / usb / host / ohci-q.c
CommitLineData
1da177e4
LT
1/*
2 * OHCI HCD (Host Controller Driver) for USB.
dd9048af 3 *
1da177e4
LT
4 * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
5 * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
dd9048af 6 *
1da177e4
LT
7 * This file is licenced under the GPL.
8 */
9
7d12e780
DH
10#include <linux/irq.h>
11
1da177e4
LT
12static void urb_free_priv (struct ohci_hcd *hc, urb_priv_t *urb_priv)
13{
14 int last = urb_priv->length - 1;
15
16 if (last >= 0) {
17 int i;
18 struct td *td;
19
20 for (i = 0; i <= last; i++) {
21 td = urb_priv->td [i];
22 if (td)
23 td_free (hc, td);
24 }
25 }
26
27 list_del (&urb_priv->pending);
28 kfree (urb_priv);
29}
30
31/*-------------------------------------------------------------------------*/
32
33/*
34 * URB goes back to driver, and isn't reissued.
35 * It's completely gone from HC data structures.
36 * PRECONDITION: ohci lock held, irqs blocked.
37 */
38static void
55d84968 39finish_urb(struct ohci_hcd *ohci, struct urb *urb, int status)
1da177e4
LT
40__releases(ohci->lock)
41__acquires(ohci->lock)
42{
43 // ASSERT (urb->hcpriv != 0);
44
45 urb_free_priv (ohci, urb->hcpriv);
55d84968
AS
46 if (likely(status == -EINPROGRESS))
47 status = 0;
1da177e4
LT
48
49 switch (usb_pipetype (urb->pipe)) {
50 case PIPE_ISOCHRONOUS:
51 ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs--;
52 break;
53 case PIPE_INTERRUPT:
54 ohci_to_hcd(ohci)->self.bandwidth_int_reqs--;
55 break;
56 }
57
58#ifdef OHCI_VERBOSE_DEBUG
55d84968 59 urb_print(urb, "RET", usb_pipeout (urb->pipe), status);
1da177e4
LT
60#endif
61
62 /* urb->complete() can reenter this HCD */
e9df41c5 63 usb_hcd_unlink_urb_from_ep(ohci_to_hcd(ohci), urb);
1da177e4 64 spin_unlock (&ohci->lock);
55d84968 65 urb->status = status;
7d12e780 66 usb_hcd_giveback_urb (ohci_to_hcd(ohci), urb);
1da177e4
LT
67 spin_lock (&ohci->lock);
68
69 /* stop periodic dma if it's not needed */
70 if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0
71 && ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0) {
72 ohci->hc_control &= ~(OHCI_CTRL_PLE|OHCI_CTRL_IE);
73 ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
74 }
75}
76
77
78/*-------------------------------------------------------------------------*
79 * ED handling functions
dd9048af 80 *-------------------------------------------------------------------------*/
1da177e4
LT
81
82/* search for the right schedule branch to use for a periodic ed.
83 * does some load balancing; returns the branch, or negative errno.
84 */
85static int balance (struct ohci_hcd *ohci, int interval, int load)
86{
87 int i, branch = -ENOSPC;
88
89 /* iso periods can be huge; iso tds specify frame numbers */
90 if (interval > NUM_INTS)
91 interval = NUM_INTS;
92
93 /* search for the least loaded schedule branch of that period
94 * that has enough bandwidth left unreserved.
95 */
96 for (i = 0; i < interval ; i++) {
97 if (branch < 0 || ohci->load [branch] > ohci->load [i]) {
1da177e4
LT
98 int j;
99
100 /* usb 1.1 says 90% of one frame */
101 for (j = i; j < NUM_INTS; j += interval) {
102 if ((ohci->load [j] + load) > 900)
103 break;
104 }
105 if (j < NUM_INTS)
106 continue;
dd9048af 107 branch = i;
1da177e4
LT
108 }
109 }
110 return branch;
111}
112
113/*-------------------------------------------------------------------------*/
114
115/* both iso and interrupt requests have periods; this routine puts them
116 * into the schedule tree in the apppropriate place. most iso devices use
117 * 1msec periods, but that's not required.
118 */
119static void periodic_link (struct ohci_hcd *ohci, struct ed *ed)
120{
121 unsigned i;
122
123 ohci_vdbg (ohci, "link %sed %p branch %d [%dus.], interval %d\n",
124 (ed->hwINFO & cpu_to_hc32 (ohci, ED_ISO)) ? "iso " : "",
125 ed, ed->branch, ed->load, ed->interval);
126
127 for (i = ed->branch; i < NUM_INTS; i += ed->interval) {
128 struct ed **prev = &ohci->periodic [i];
129 __hc32 *prev_p = &ohci->hcca->int_table [i];
130 struct ed *here = *prev;
131
132 /* sorting each branch by period (slow before fast)
133 * lets us share the faster parts of the tree.
134 * (plus maybe: put interrupt eds before iso)
135 */
136 while (here && ed != here) {
137 if (ed->interval > here->interval)
138 break;
139 prev = &here->ed_next;
140 prev_p = &here->hwNextED;
141 here = *prev;
142 }
143 if (ed != here) {
144 ed->ed_next = here;
145 if (here)
146 ed->hwNextED = *prev_p;
147 wmb ();
148 *prev = ed;
149 *prev_p = cpu_to_hc32(ohci, ed->dma);
150 wmb();
151 }
152 ohci->load [i] += ed->load;
153 }
154 ohci_to_hcd(ohci)->self.bandwidth_allocated += ed->load / ed->interval;
155}
156
157/* link an ed into one of the HC chains */
158
159static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
dd9048af 160{
1da177e4
LT
161 int branch;
162
163 if (ohci_to_hcd(ohci)->state == HC_STATE_QUIESCING)
164 return -EAGAIN;
165
166 ed->state = ED_OPER;
167 ed->ed_prev = NULL;
168 ed->ed_next = NULL;
169 ed->hwNextED = 0;
89a0fd18
MN
170 if (quirk_zfmicro(ohci)
171 && (ed->type == PIPE_INTERRUPT)
172 && !(ohci->eds_scheduled++))
173 mod_timer(&ohci->unlink_watchdog, round_jiffies_relative(HZ));
1da177e4
LT
174 wmb ();
175
176 /* we care about rm_list when setting CLE/BLE in case the HC was at
177 * work on some TD when CLE/BLE was turned off, and isn't quiesced
178 * yet. finish_unlinks() restarts as needed, some upcoming INTR_SF.
179 *
180 * control and bulk EDs are doubly linked (ed_next, ed_prev), but
181 * periodic ones are singly linked (ed_next). that's because the
182 * periodic schedule encodes a tree like figure 3-5 in the ohci
183 * spec: each qh can have several "previous" nodes, and the tree
184 * doesn't have unused/idle descriptors.
185 */
186 switch (ed->type) {
187 case PIPE_CONTROL:
188 if (ohci->ed_controltail == NULL) {
189 WARN_ON (ohci->hc_control & OHCI_CTRL_CLE);
190 ohci_writel (ohci, ed->dma,
191 &ohci->regs->ed_controlhead);
192 } else {
193 ohci->ed_controltail->ed_next = ed;
194 ohci->ed_controltail->hwNextED = cpu_to_hc32 (ohci,
195 ed->dma);
196 }
197 ed->ed_prev = ohci->ed_controltail;
198 if (!ohci->ed_controltail && !ohci->ed_rm_list) {
199 wmb();
200 ohci->hc_control |= OHCI_CTRL_CLE;
201 ohci_writel (ohci, 0, &ohci->regs->ed_controlcurrent);
202 ohci_writel (ohci, ohci->hc_control,
203 &ohci->regs->control);
204 }
205 ohci->ed_controltail = ed;
206 break;
207
208 case PIPE_BULK:
209 if (ohci->ed_bulktail == NULL) {
210 WARN_ON (ohci->hc_control & OHCI_CTRL_BLE);
211 ohci_writel (ohci, ed->dma, &ohci->regs->ed_bulkhead);
212 } else {
213 ohci->ed_bulktail->ed_next = ed;
214 ohci->ed_bulktail->hwNextED = cpu_to_hc32 (ohci,
215 ed->dma);
216 }
217 ed->ed_prev = ohci->ed_bulktail;
218 if (!ohci->ed_bulktail && !ohci->ed_rm_list) {
219 wmb();
220 ohci->hc_control |= OHCI_CTRL_BLE;
221 ohci_writel (ohci, 0, &ohci->regs->ed_bulkcurrent);
222 ohci_writel (ohci, ohci->hc_control,
223 &ohci->regs->control);
224 }
225 ohci->ed_bulktail = ed;
226 break;
227
228 // case PIPE_INTERRUPT:
229 // case PIPE_ISOCHRONOUS:
230 default:
231 branch = balance (ohci, ed->interval, ed->load);
232 if (branch < 0) {
233 ohci_dbg (ohci,
234 "ERR %d, interval %d msecs, load %d\n",
235 branch, ed->interval, ed->load);
236 // FIXME if there are TDs queued, fail them!
237 return branch;
238 }
239 ed->branch = branch;
240 periodic_link (ohci, ed);
dd9048af 241 }
1da177e4
LT
242
243 /* the HC may not see the schedule updates yet, but if it does
244 * then they'll be properly ordered.
245 */
246 return 0;
247}
248
249/*-------------------------------------------------------------------------*/
250
251/* scan the periodic table to find and unlink this ED */
252static void periodic_unlink (struct ohci_hcd *ohci, struct ed *ed)
253{
254 int i;
255
256 for (i = ed->branch; i < NUM_INTS; i += ed->interval) {
257 struct ed *temp;
258 struct ed **prev = &ohci->periodic [i];
259 __hc32 *prev_p = &ohci->hcca->int_table [i];
260
261 while (*prev && (temp = *prev) != ed) {
262 prev_p = &temp->hwNextED;
263 prev = &temp->ed_next;
264 }
265 if (*prev) {
266 *prev_p = ed->hwNextED;
267 *prev = ed->ed_next;
268 }
269 ohci->load [i] -= ed->load;
dd9048af 270 }
1da177e4
LT
271 ohci_to_hcd(ohci)->self.bandwidth_allocated -= ed->load / ed->interval;
272
273 ohci_vdbg (ohci, "unlink %sed %p branch %d [%dus.], interval %d\n",
274 (ed->hwINFO & cpu_to_hc32 (ohci, ED_ISO)) ? "iso " : "",
275 ed, ed->branch, ed->load, ed->interval);
276}
277
dd9048af 278/* unlink an ed from one of the HC chains.
1da177e4
LT
279 * just the link to the ed is unlinked.
280 * the link from the ed still points to another operational ed or 0
281 * so the HC can eventually finish the processing of the unlinked ed
282 * (assuming it already started that, which needn't be true).
283 *
284 * ED_UNLINK is a transient state: the HC may still see this ED, but soon
285 * it won't. ED_SKIP means the HC will finish its current transaction,
286 * but won't start anything new. The TD queue may still grow; device
287 * drivers don't know about this HCD-internal state.
288 *
289 * When the HC can't see the ED, something changes ED_UNLINK to one of:
290 *
291 * - ED_OPER: when there's any request queued, the ED gets rescheduled
292 * immediately. HC should be working on them.
293 *
294 * - ED_IDLE: when there's no TD queue. there's no reason for the HC
295 * to care about this ED; safe to disable the endpoint.
296 *
297 * When finish_unlinks() runs later, after SOF interrupt, it will often
298 * complete one or more URB unlinks before making that state change.
299 */
dd9048af 300static void ed_deschedule (struct ohci_hcd *ohci, struct ed *ed)
1da177e4
LT
301{
302 ed->hwINFO |= cpu_to_hc32 (ohci, ED_SKIP);
303 wmb ();
304 ed->state = ED_UNLINK;
305
306 /* To deschedule something from the control or bulk list, just
307 * clear CLE/BLE and wait. There's no safe way to scrub out list
308 * head/current registers until later, and "later" isn't very
309 * tightly specified. Figure 6-5 and Section 6.4.2.2 show how
310 * the HC is reading the ED queues (while we modify them).
311 *
312 * For now, ed_schedule() is "later". It might be good paranoia
313 * to scrub those registers in finish_unlinks(), in case of bugs
314 * that make the HC try to use them.
315 */
316 switch (ed->type) {
317 case PIPE_CONTROL:
318 /* remove ED from the HC's list: */
319 if (ed->ed_prev == NULL) {
320 if (!ed->hwNextED) {
321 ohci->hc_control &= ~OHCI_CTRL_CLE;
322 ohci_writel (ohci, ohci->hc_control,
323 &ohci->regs->control);
324 // a ohci_readl() later syncs CLE with the HC
325 } else
326 ohci_writel (ohci,
327 hc32_to_cpup (ohci, &ed->hwNextED),
328 &ohci->regs->ed_controlhead);
329 } else {
330 ed->ed_prev->ed_next = ed->ed_next;
331 ed->ed_prev->hwNextED = ed->hwNextED;
332 }
333 /* remove ED from the HCD's list: */
334 if (ohci->ed_controltail == ed) {
335 ohci->ed_controltail = ed->ed_prev;
336 if (ohci->ed_controltail)
337 ohci->ed_controltail->ed_next = NULL;
338 } else if (ed->ed_next) {
339 ed->ed_next->ed_prev = ed->ed_prev;
340 }
341 break;
342
343 case PIPE_BULK:
344 /* remove ED from the HC's list: */
345 if (ed->ed_prev == NULL) {
346 if (!ed->hwNextED) {
347 ohci->hc_control &= ~OHCI_CTRL_BLE;
348 ohci_writel (ohci, ohci->hc_control,
349 &ohci->regs->control);
350 // a ohci_readl() later syncs BLE with the HC
351 } else
352 ohci_writel (ohci,
353 hc32_to_cpup (ohci, &ed->hwNextED),
354 &ohci->regs->ed_bulkhead);
355 } else {
356 ed->ed_prev->ed_next = ed->ed_next;
357 ed->ed_prev->hwNextED = ed->hwNextED;
358 }
359 /* remove ED from the HCD's list: */
360 if (ohci->ed_bulktail == ed) {
361 ohci->ed_bulktail = ed->ed_prev;
362 if (ohci->ed_bulktail)
363 ohci->ed_bulktail->ed_next = NULL;
364 } else if (ed->ed_next) {
365 ed->ed_next->ed_prev = ed->ed_prev;
366 }
367 break;
368
369 // case PIPE_INTERRUPT:
370 // case PIPE_ISOCHRONOUS:
371 default:
372 periodic_unlink (ohci, ed);
373 break;
374 }
375}
376
377
378/*-------------------------------------------------------------------------*/
379
380/* get and maybe (re)init an endpoint. init _should_ be done only as part
381 * of enumeration, usb_set_configuration() or usb_set_interface().
382 */
383static struct ed *ed_get (
384 struct ohci_hcd *ohci,
385 struct usb_host_endpoint *ep,
386 struct usb_device *udev,
387 unsigned int pipe,
388 int interval
389) {
dd9048af 390 struct ed *ed;
1da177e4
LT
391 unsigned long flags;
392
393 spin_lock_irqsave (&ohci->lock, flags);
394
395 if (!(ed = ep->hcpriv)) {
396 struct td *td;
397 int is_out;
398 u32 info;
399
400 ed = ed_alloc (ohci, GFP_ATOMIC);
401 if (!ed) {
402 /* out of memory */
403 goto done;
404 }
405
dd9048af 406 /* dummy td; end of td list for ed */
1da177e4 407 td = td_alloc (ohci, GFP_ATOMIC);
dd9048af 408 if (!td) {
1da177e4
LT
409 /* out of memory */
410 ed_free (ohci, ed);
411 ed = NULL;
412 goto done;
413 }
414 ed->dummy = td;
415 ed->hwTailP = cpu_to_hc32 (ohci, td->td_dma);
416 ed->hwHeadP = ed->hwTailP; /* ED_C, ED_H zeroed */
417 ed->state = ED_IDLE;
418
419 is_out = !(ep->desc.bEndpointAddress & USB_DIR_IN);
420
421 /* FIXME usbcore changes dev->devnum before SET_ADDRESS
422 * suceeds ... otherwise we wouldn't need "pipe".
423 */
424 info = usb_pipedevice (pipe);
425 ed->type = usb_pipetype(pipe);
426
427 info |= (ep->desc.bEndpointAddress & ~USB_DIR_IN) << 7;
428 info |= le16_to_cpu(ep->desc.wMaxPacketSize) << 16;
429 if (udev->speed == USB_SPEED_LOW)
430 info |= ED_LOWSPEED;
431 /* only control transfers store pids in tds */
432 if (ed->type != PIPE_CONTROL) {
433 info |= is_out ? ED_OUT : ED_IN;
434 if (ed->type != PIPE_BULK) {
435 /* periodic transfers... */
436 if (ed->type == PIPE_ISOCHRONOUS)
437 info |= ED_ISO;
438 else if (interval > 32) /* iso can be bigger */
439 interval = 32;
440 ed->interval = interval;
441 ed->load = usb_calc_bus_time (
442 udev->speed, !is_out,
443 ed->type == PIPE_ISOCHRONOUS,
444 le16_to_cpu(ep->desc.wMaxPacketSize))
445 / 1000;
446 }
447 }
448 ed->hwINFO = cpu_to_hc32(ohci, info);
449
450 ep->hcpriv = ed;
451 }
452
453done:
454 spin_unlock_irqrestore (&ohci->lock, flags);
dd9048af 455 return ed;
1da177e4
LT
456}
457
458/*-------------------------------------------------------------------------*/
459
460/* request unlinking of an endpoint from an operational HC.
461 * put the ep on the rm_list
462 * real work is done at the next start frame (SF) hardware interrupt
463 * caller guarantees HCD is running, so hardware access is safe,
464 * and that ed->state is ED_OPER
465 */
466static void start_ed_unlink (struct ohci_hcd *ohci, struct ed *ed)
dd9048af 467{
1da177e4
LT
468 ed->hwINFO |= cpu_to_hc32 (ohci, ED_DEQUEUE);
469 ed_deschedule (ohci, ed);
470
471 /* rm_list is just singly linked, for simplicity */
472 ed->ed_next = ohci->ed_rm_list;
473 ed->ed_prev = NULL;
474 ohci->ed_rm_list = ed;
475
476 /* enable SOF interrupt */
477 ohci_writel (ohci, OHCI_INTR_SF, &ohci->regs->intrstatus);
478 ohci_writel (ohci, OHCI_INTR_SF, &ohci->regs->intrenable);
479 // flush those writes, and get latest HCCA contents
480 (void) ohci_readl (ohci, &ohci->regs->control);
481
482 /* SF interrupt might get delayed; record the frame counter value that
483 * indicates when the HC isn't looking at it, so concurrent unlinks
484 * behave. frame_no wraps every 2^16 msec, and changes right before
485 * SF is triggered.
486 */
487 ed->tick = ohci_frame_no(ohci) + 1;
488
489}
490
491/*-------------------------------------------------------------------------*
492 * TD handling functions
493 *-------------------------------------------------------------------------*/
494
495/* enqueue next TD for this URB (OHCI spec 5.2.8.2) */
496
497static void
498td_fill (struct ohci_hcd *ohci, u32 info,
499 dma_addr_t data, int len,
500 struct urb *urb, int index)
501{
502 struct td *td, *td_pt;
503 struct urb_priv *urb_priv = urb->hcpriv;
504 int is_iso = info & TD_ISO;
505 int hash;
506
507 // ASSERT (index < urb_priv->length);
508
509 /* aim for only one interrupt per urb. mostly applies to control
510 * and iso; other urbs rarely need more than one TD per urb.
511 * this way, only final tds (or ones with an error) cause IRQs.
512 * at least immediately; use DI=6 in case any control request is
513 * tempted to die part way through. (and to force the hc to flush
514 * its donelist soonish, even on unlink paths.)
515 *
516 * NOTE: could delay interrupts even for the last TD, and get fewer
517 * interrupts ... increasing per-urb latency by sharing interrupts.
518 * Drivers that queue bulk urbs may request that behavior.
519 */
520 if (index != (urb_priv->length - 1)
521 || (urb->transfer_flags & URB_NO_INTERRUPT))
522 info |= TD_DI_SET (6);
523
524 /* use this td as the next dummy */
525 td_pt = urb_priv->td [index];
526
527 /* fill the old dummy TD */
528 td = urb_priv->td [index] = urb_priv->ed->dummy;
529 urb_priv->ed->dummy = td_pt;
530
531 td->ed = urb_priv->ed;
532 td->next_dl_td = NULL;
533 td->index = index;
dd9048af 534 td->urb = urb;
1da177e4
LT
535 td->data_dma = data;
536 if (!len)
537 data = 0;
538
539 td->hwINFO = cpu_to_hc32 (ohci, info);
540 if (is_iso) {
541 td->hwCBP = cpu_to_hc32 (ohci, data & 0xFFFFF000);
542 *ohci_hwPSWp(ohci, td, 0) = cpu_to_hc16 (ohci,
543 (data & 0x0FFF) | 0xE000);
544 td->ed->last_iso = info & 0xffff;
545 } else {
dd9048af
DB
546 td->hwCBP = cpu_to_hc32 (ohci, data);
547 }
1da177e4
LT
548 if (data)
549 td->hwBE = cpu_to_hc32 (ohci, data + len - 1);
550 else
551 td->hwBE = 0;
552 td->hwNextTD = cpu_to_hc32 (ohci, td_pt->td_dma);
553
554 /* append to queue */
555 list_add_tail (&td->td_list, &td->ed->td_list);
556
557 /* hash it for later reverse mapping */
558 hash = TD_HASH_FUNC (td->td_dma);
559 td->td_hash = ohci->td_hash [hash];
560 ohci->td_hash [hash] = td;
561
562 /* HC might read the TD (or cachelines) right away ... */
563 wmb ();
564 td->ed->hwTailP = td->hwNextTD;
565}
566
567/*-------------------------------------------------------------------------*/
568
569/* Prepare all TDs of a transfer, and queue them onto the ED.
570 * Caller guarantees HC is active.
571 * Usually the ED is already on the schedule, so TDs might be
572 * processed as soon as they're queued.
573 */
574static void td_submit_urb (
575 struct ohci_hcd *ohci,
576 struct urb *urb
577) {
578 struct urb_priv *urb_priv = urb->hcpriv;
579 dma_addr_t data;
580 int data_len = urb->transfer_buffer_length;
581 int cnt = 0;
582 u32 info = 0;
583 int is_out = usb_pipeout (urb->pipe);
584 int periodic = 0;
585
586 /* OHCI handles the bulk/interrupt data toggles itself. We just
587 * use the device toggle bits for resetting, and rely on the fact
588 * that resetting toggle is meaningless if the endpoint is active.
589 */
dd9048af 590 if (!usb_gettoggle (urb->dev, usb_pipeendpoint (urb->pipe), is_out)) {
1da177e4
LT
591 usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe),
592 is_out, 1);
593 urb_priv->ed->hwHeadP &= ~cpu_to_hc32 (ohci, ED_C);
594 }
595
596 urb_priv->td_cnt = 0;
597 list_add (&urb_priv->pending, &ohci->pending);
598
599 if (data_len)
600 data = urb->transfer_dma;
601 else
602 data = 0;
603
604 /* NOTE: TD_CC is set so we can tell which TDs the HC processed by
605 * using TD_CC_GET, as well as by seeing them on the done list.
606 * (CC = NotAccessed ... 0x0F, or 0x0E in PSWs for ISO.)
607 */
608 switch (urb_priv->ed->type) {
609
610 /* Bulk and interrupt are identical except for where in the schedule
611 * their EDs live.
612 */
613 case PIPE_INTERRUPT:
614 /* ... and periodic urbs have extra accounting */
615 periodic = ohci_to_hcd(ohci)->self.bandwidth_int_reqs++ == 0
616 && ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0;
617 /* FALLTHROUGH */
618 case PIPE_BULK:
619 info = is_out
620 ? TD_T_TOGGLE | TD_CC | TD_DP_OUT
621 : TD_T_TOGGLE | TD_CC | TD_DP_IN;
622 /* TDs _could_ transfer up to 8K each */
623 while (data_len > 4096) {
624 td_fill (ohci, info, data, 4096, urb, cnt);
625 data += 4096;
626 data_len -= 4096;
627 cnt++;
628 }
629 /* maybe avoid ED halt on final TD short read */
630 if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
631 info |= TD_R;
632 td_fill (ohci, info, data, data_len, urb, cnt);
633 cnt++;
634 if ((urb->transfer_flags & URB_ZERO_PACKET)
635 && cnt < urb_priv->length) {
636 td_fill (ohci, info, 0, 0, urb, cnt);
637 cnt++;
638 }
639 /* maybe kickstart bulk list */
640 if (urb_priv->ed->type == PIPE_BULK) {
641 wmb ();
642 ohci_writel (ohci, OHCI_BLF, &ohci->regs->cmdstatus);
643 }
644 break;
645
646 /* control manages DATA0/DATA1 toggle per-request; SETUP resets it,
647 * any DATA phase works normally, and the STATUS ack is special.
648 */
649 case PIPE_CONTROL:
650 info = TD_CC | TD_DP_SETUP | TD_T_DATA0;
651 td_fill (ohci, info, urb->setup_dma, 8, urb, cnt++);
652 if (data_len > 0) {
653 info = TD_CC | TD_R | TD_T_DATA1;
654 info |= is_out ? TD_DP_OUT : TD_DP_IN;
655 /* NOTE: mishandles transfers >8K, some >4K */
656 td_fill (ohci, info, data, data_len, urb, cnt++);
657 }
658 info = (is_out || data_len == 0)
659 ? TD_CC | TD_DP_IN | TD_T_DATA1
660 : TD_CC | TD_DP_OUT | TD_T_DATA1;
661 td_fill (ohci, info, data, 0, urb, cnt++);
662 /* maybe kickstart control list */
663 wmb ();
664 ohci_writel (ohci, OHCI_CLF, &ohci->regs->cmdstatus);
665 break;
666
667 /* ISO has no retransmit, so no toggle; and it uses special TDs.
668 * Each TD could handle multiple consecutive frames (interval 1);
669 * we could often reduce the number of TDs here.
670 */
671 case PIPE_ISOCHRONOUS:
672 for (cnt = 0; cnt < urb->number_of_packets; cnt++) {
673 int frame = urb->start_frame;
674
675 // FIXME scheduling should handle frame counter
676 // roll-around ... exotic case (and OHCI has
677 // a 2^16 iso range, vs other HCs max of 2^10)
678 frame += cnt * urb->interval;
679 frame &= 0xffff;
680 td_fill (ohci, TD_CC | TD_ISO | frame,
681 data + urb->iso_frame_desc [cnt].offset,
682 urb->iso_frame_desc [cnt].length, urb, cnt);
683 }
684 periodic = ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs++ == 0
685 && ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0;
686 break;
687 }
688
689 /* start periodic dma if needed */
690 if (periodic) {
691 wmb ();
692 ohci->hc_control |= OHCI_CTRL_PLE|OHCI_CTRL_IE;
693 ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
694 }
695
696 // ASSERT (urb_priv->length == cnt);
697}
698
699/*-------------------------------------------------------------------------*
700 * Done List handling functions
701 *-------------------------------------------------------------------------*/
702
55d84968
AS
703/* calculate transfer length/status and update the urb */
704static int td_done(struct ohci_hcd *ohci, struct urb *urb, struct td *td)
1da177e4
LT
705{
706 u32 tdINFO = hc32_to_cpup (ohci, &td->hwINFO);
707 int cc = 0;
55d84968 708 int status = -EINPROGRESS;
1da177e4
LT
709
710 list_del (&td->td_list);
711
712 /* ISO ... drivers see per-TD length/status */
dd9048af 713 if (tdINFO & TD_ISO) {
55d84968 714 u16 tdPSW = ohci_hwPSW(ohci, td, 0);
1da177e4
LT
715 int dlen = 0;
716
717 /* NOTE: assumes FC in tdINFO == 0, and that
718 * only the first of 0..MAXPSW psws is used.
719 */
720
dd9048af
DB
721 cc = (tdPSW >> 12) & 0xF;
722 if (tdINFO & TD_CC) /* hc didn't touch? */
55d84968 723 return status;
1da177e4
LT
724
725 if (usb_pipeout (urb->pipe))
726 dlen = urb->iso_frame_desc [td->index].length;
727 else {
728 /* short reads are always OK for ISO */
729 if (cc == TD_DATAUNDERRUN)
730 cc = TD_CC_NOERROR;
731 dlen = tdPSW & 0x3ff;
732 }
733 urb->actual_length += dlen;
734 urb->iso_frame_desc [td->index].actual_length = dlen;
735 urb->iso_frame_desc [td->index].status = cc_to_error [cc];
736
737 if (cc != TD_CC_NOERROR)
738 ohci_vdbg (ohci,
739 "urb %p iso td %p (%d) len %d cc %d\n",
740 urb, td, 1 + td->index, dlen, cc);
741
742 /* BULK, INT, CONTROL ... drivers see aggregate length/status,
743 * except that "setup" bytes aren't counted and "short" transfers
744 * might not be reported as errors.
745 */
746 } else {
747 int type = usb_pipetype (urb->pipe);
748 u32 tdBE = hc32_to_cpup (ohci, &td->hwBE);
749
dd9048af 750 cc = TD_CC_GET (tdINFO);
1da177e4
LT
751
752 /* update packet status if needed (short is normally ok) */
753 if (cc == TD_DATAUNDERRUN
754 && !(urb->transfer_flags & URB_SHORT_NOT_OK))
755 cc = TD_CC_NOERROR;
55d84968
AS
756 if (cc != TD_CC_NOERROR && cc < 0x0E)
757 status = cc_to_error[cc];
1da177e4
LT
758
759 /* count all non-empty packets except control SETUP packet */
760 if ((type != PIPE_CONTROL || td->index != 0) && tdBE != 0) {
761 if (td->hwCBP == 0)
762 urb->actual_length += tdBE - td->data_dma + 1;
763 else
764 urb->actual_length +=
765 hc32_to_cpup (ohci, &td->hwCBP)
766 - td->data_dma;
767 }
768
769 if (cc != TD_CC_NOERROR && cc < 0x0E)
770 ohci_vdbg (ohci,
771 "urb %p td %p (%d) cc %d, len=%d/%d\n",
772 urb, td, 1 + td->index, cc,
773 urb->actual_length,
774 urb->transfer_buffer_length);
dd9048af 775 }
55d84968 776 return status;
1da177e4
LT
777}
778
779/*-------------------------------------------------------------------------*/
780
6e8fe43b 781static void ed_halted(struct ohci_hcd *ohci, struct td *td, int cc)
1da177e4 782{
dd9048af 783 struct urb *urb = td->urb;
6e8fe43b 784 urb_priv_t *urb_priv = urb->hcpriv;
1da177e4
LT
785 struct ed *ed = td->ed;
786 struct list_head *tmp = td->td_list.next;
787 __hc32 toggle = ed->hwHeadP & cpu_to_hc32 (ohci, ED_C);
788
789 /* clear ed halt; this is the td that caused it, but keep it inactive
790 * until its urb->complete() has a chance to clean up.
791 */
792 ed->hwINFO |= cpu_to_hc32 (ohci, ED_SKIP);
793 wmb ();
dd9048af 794 ed->hwHeadP &= ~cpu_to_hc32 (ohci, ED_H);
1da177e4 795
6e8fe43b
AS
796 /* Get rid of all later tds from this urb. We don't have
797 * to be careful: no errors and nothing was transferred.
798 * Also patch the ed so it looks as if those tds completed normally.
1da177e4
LT
799 */
800 while (tmp != &ed->td_list) {
801 struct td *next;
1da177e4
LT
802
803 next = list_entry (tmp, struct td, td_list);
804 tmp = next->td_list.next;
805
806 if (next->urb != urb)
807 break;
808
809 /* NOTE: if multi-td control DATA segments get supported,
810 * this urb had one of them, this td wasn't the last td
811 * in that segment (TD_R clear), this ed halted because
812 * of a short read, _and_ URB_SHORT_NOT_OK is clear ...
813 * then we need to leave the control STATUS packet queued
814 * and clear ED_SKIP.
815 */
1da177e4 816
6e8fe43b
AS
817 list_del(&next->td_list);
818 urb_priv->td_cnt++;
1da177e4
LT
819 ed->hwHeadP = next->hwNextTD | toggle;
820 }
821
822 /* help for troubleshooting: report anything that
823 * looks odd ... that doesn't include protocol stalls
824 * (or maybe some other things)
825 */
826 switch (cc) {
827 case TD_DATAUNDERRUN:
828 if ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0)
829 break;
830 /* fallthrough */
831 case TD_CC_STALL:
832 if (usb_pipecontrol (urb->pipe))
833 break;
834 /* fallthrough */
835 default:
836 ohci_dbg (ohci,
837 "urb %p path %s ep%d%s %08x cc %d --> status %d\n",
838 urb, urb->dev->devpath,
839 usb_pipeendpoint (urb->pipe),
840 usb_pipein (urb->pipe) ? "in" : "out",
841 hc32_to_cpu (ohci, td->hwINFO),
842 cc, cc_to_error [cc]);
843 }
1da177e4
LT
844}
845
846/* replies to the request have to be on a FIFO basis so
847 * we unreverse the hc-reversed done-list
848 */
849static struct td *dl_reverse_done_list (struct ohci_hcd *ohci)
850{
851 u32 td_dma;
852 struct td *td_rev = NULL;
853 struct td *td = NULL;
854
855 td_dma = hc32_to_cpup (ohci, &ohci->hcca->done_head);
856 ohci->hcca->done_head = 0;
857 wmb();
858
859 /* get TD from hc's singly linked list, and
860 * prepend to ours. ed->td_list changes later.
861 */
dd9048af
DB
862 while (td_dma) {
863 int cc;
1da177e4
LT
864
865 td = dma_to_td (ohci, td_dma);
866 if (!td) {
867 ohci_err (ohci, "bad entry %8x\n", td_dma);
868 break;
869 }
870
871 td->hwINFO |= cpu_to_hc32 (ohci, TD_DONE);
872 cc = TD_CC_GET (hc32_to_cpup (ohci, &td->hwINFO));
873
874 /* Non-iso endpoints can halt on error; un-halt,
875 * and dequeue any other TDs from this urb.
876 * No other TD could have caused the halt.
877 */
878 if (cc != TD_CC_NOERROR
879 && (td->ed->hwHeadP & cpu_to_hc32 (ohci, ED_H)))
6e8fe43b 880 ed_halted(ohci, td, cc);
1da177e4 881
dd9048af 882 td->next_dl_td = td_rev;
1da177e4
LT
883 td_rev = td;
884 td_dma = hc32_to_cpup (ohci, &td->hwNextTD);
dd9048af 885 }
1da177e4
LT
886 return td_rev;
887}
888
889/*-------------------------------------------------------------------------*/
890
891/* there are some urbs/eds to unlink; called in_irq(), with HCD locked */
892static void
7d12e780 893finish_unlinks (struct ohci_hcd *ohci, u16 tick)
1da177e4
LT
894{
895 struct ed *ed, **last;
896
897rescan_all:
898 for (last = &ohci->ed_rm_list, ed = *last; ed != NULL; ed = *last) {
899 struct list_head *entry, *tmp;
900 int completed, modified;
901 __hc32 *prev;
902
903 /* only take off EDs that the HC isn't using, accounting for
904 * frame counter wraps and EDs with partially retired TDs
905 */
da66b719 906 if (likely (HC_IS_RUNNING(ohci_to_hcd(ohci)->state))) {
1da177e4
LT
907 if (tick_before (tick, ed->tick)) {
908skip_ed:
909 last = &ed->ed_next;
910 continue;
911 }
912
913 if (!list_empty (&ed->td_list)) {
914 struct td *td;
915 u32 head;
916
917 td = list_entry (ed->td_list.next, struct td,
918 td_list);
919 head = hc32_to_cpu (ohci, ed->hwHeadP) &
920 TD_MASK;
921
922 /* INTR_WDH may need to clean up first */
89a0fd18
MN
923 if (td->td_dma != head) {
924 if (ed == ohci->ed_to_check)
925 ohci->ed_to_check = NULL;
926 else
927 goto skip_ed;
928 }
1da177e4
LT
929 }
930 }
931
932 /* reentrancy: if we drop the schedule lock, someone might
933 * have modified this list. normally it's just prepending
934 * entries (which we'd ignore), but paranoia won't hurt.
935 */
936 *last = ed->ed_next;
937 ed->ed_next = NULL;
938 modified = 0;
939
940 /* unlink urbs as requested, but rescan the list after
941 * we call a completion since it might have unlinked
942 * another (earlier) urb
943 *
944 * When we get here, the HC doesn't see this ed. But it
945 * must not be rescheduled until all completed URBs have
946 * been given back to the driver.
947 */
948rescan_this:
949 completed = 0;
950 prev = &ed->hwHeadP;
951 list_for_each_safe (entry, tmp, &ed->td_list) {
952 struct td *td;
953 struct urb *urb;
954 urb_priv_t *urb_priv;
955 __hc32 savebits;
956
957 td = list_entry (entry, struct td, td_list);
958 urb = td->urb;
959 urb_priv = td->urb->hcpriv;
960
eb231054 961 if (!urb->unlinked) {
1da177e4
LT
962 prev = &td->hwNextTD;
963 continue;
964 }
965
966 /* patch pointer hc uses */
967 savebits = *prev & ~cpu_to_hc32 (ohci, TD_MASK);
968 *prev = td->hwNextTD | savebits;
969
970 /* HC may have partly processed this TD */
971 td_done (ohci, urb, td);
972 urb_priv->td_cnt++;
973
974 /* if URB is done, clean up */
975 if (urb_priv->td_cnt == urb_priv->length) {
976 modified = completed = 1;
55d84968 977 finish_urb(ohci, urb, 0);
1da177e4
LT
978 }
979 }
980 if (completed && !list_empty (&ed->td_list))
981 goto rescan_this;
982
983 /* ED's now officially unlinked, hc doesn't see */
984 ed->state = ED_IDLE;
89a0fd18
MN
985 if (quirk_zfmicro(ohci) && ed->type == PIPE_INTERRUPT)
986 ohci->eds_scheduled--;
1da177e4
LT
987 ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H);
988 ed->hwNextED = 0;
989 wmb ();
990 ed->hwINFO &= ~cpu_to_hc32 (ohci, ED_SKIP | ED_DEQUEUE);
991
992 /* but if there's work queued, reschedule */
993 if (!list_empty (&ed->td_list)) {
994 if (HC_IS_RUNNING(ohci_to_hcd(ohci)->state))
995 ed_schedule (ohci, ed);
996 }
997
998 if (modified)
999 goto rescan_all;
dd9048af 1000 }
1da177e4 1001
dd9048af 1002 /* maybe reenable control and bulk lists */
1da177e4
LT
1003 if (HC_IS_RUNNING(ohci_to_hcd(ohci)->state)
1004 && ohci_to_hcd(ohci)->state != HC_STATE_QUIESCING
1005 && !ohci->ed_rm_list) {
1006 u32 command = 0, control = 0;
1007
1008 if (ohci->ed_controltail) {
1009 command |= OHCI_CLF;
89a0fd18 1010 if (quirk_zfmicro(ohci))
0e498763 1011 mdelay(1);
1da177e4
LT
1012 if (!(ohci->hc_control & OHCI_CTRL_CLE)) {
1013 control |= OHCI_CTRL_CLE;
1014 ohci_writel (ohci, 0,
1015 &ohci->regs->ed_controlcurrent);
1016 }
1017 }
1018 if (ohci->ed_bulktail) {
1019 command |= OHCI_BLF;
89a0fd18 1020 if (quirk_zfmicro(ohci))
0e498763 1021 mdelay(1);
1da177e4
LT
1022 if (!(ohci->hc_control & OHCI_CTRL_BLE)) {
1023 control |= OHCI_CTRL_BLE;
1024 ohci_writel (ohci, 0,
1025 &ohci->regs->ed_bulkcurrent);
1026 }
1027 }
dd9048af 1028
1da177e4
LT
1029 /* CLE/BLE to enable, CLF/BLF to (maybe) kickstart */
1030 if (control) {
1031 ohci->hc_control |= control;
89a0fd18 1032 if (quirk_zfmicro(ohci))
0e498763 1033 mdelay(1);
dd9048af
DB
1034 ohci_writel (ohci, ohci->hc_control,
1035 &ohci->regs->control);
1036 }
0e498763 1037 if (command) {
89a0fd18 1038 if (quirk_zfmicro(ohci))
0e498763 1039 mdelay(1);
dd9048af
DB
1040 ohci_writel (ohci, command, &ohci->regs->cmdstatus);
1041 }
0e498763 1042 }
1da177e4
LT
1043}
1044
1045
1046
1047/*-------------------------------------------------------------------------*/
1048
89a0fd18
MN
1049/*
1050 * Used to take back a TD from the host controller. This would normally be
1051 * called from within dl_done_list, however it may be called directly if the
1052 * HC no longer sees the TD and it has not appeared on the donelist (after
1053 * two frames). This bug has been observed on ZF Micro systems.
1054 */
1055static void takeback_td(struct ohci_hcd *ohci, struct td *td)
1056{
1057 struct urb *urb = td->urb;
1058 urb_priv_t *urb_priv = urb->hcpriv;
1059 struct ed *ed = td->ed;
55d84968 1060 int status;
89a0fd18
MN
1061
1062 /* update URB's length and status from TD */
55d84968 1063 status = td_done(ohci, urb, td);
89a0fd18
MN
1064 urb_priv->td_cnt++;
1065
1066 /* If all this urb's TDs are done, call complete() */
1067 if (urb_priv->td_cnt == urb_priv->length)
55d84968 1068 finish_urb(ohci, urb, status);
89a0fd18
MN
1069
1070 /* clean schedule: unlink EDs that are no longer busy */
1071 if (list_empty(&ed->td_list)) {
1072 if (ed->state == ED_OPER)
1073 start_ed_unlink(ohci, ed);
1074
1075 /* ... reenabling halted EDs only after fault cleanup */
1076 } else if ((ed->hwINFO & cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE))
1077 == cpu_to_hc32(ohci, ED_SKIP)) {
1078 td = list_entry(ed->td_list.next, struct td, td_list);
1079 if (!(td->hwINFO & cpu_to_hc32(ohci, TD_DONE))) {
1080 ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP);
1081 /* ... hc may need waking-up */
1082 switch (ed->type) {
1083 case PIPE_CONTROL:
1084 ohci_writel(ohci, OHCI_CLF,
1085 &ohci->regs->cmdstatus);
1086 break;
1087 case PIPE_BULK:
1088 ohci_writel(ohci, OHCI_BLF,
1089 &ohci->regs->cmdstatus);
1090 break;
1091 }
1092 }
1093 }
1094}
1095
1da177e4
LT
1096/*
1097 * Process normal completions (error or success) and clean the schedules.
1098 *
1099 * This is the main path for handing urbs back to drivers. The only other
89a0fd18
MN
1100 * normal path is finish_unlinks(), which unlinks URBs using ed_rm_list,
1101 * instead of scanning the (re-reversed) donelist as this does. There's
1102 * an abnormal path too, handling a quirk in some Compaq silicon: URBs
1103 * with TDs that appear to be orphaned are directly reclaimed.
1da177e4
LT
1104 */
1105static void
7d12e780 1106dl_done_list (struct ohci_hcd *ohci)
1da177e4
LT
1107{
1108 struct td *td = dl_reverse_done_list (ohci);
1109
dd9048af 1110 while (td) {
1da177e4 1111 struct td *td_next = td->next_dl_td;
89a0fd18 1112 takeback_td(ohci, td);
dd9048af
DB
1113 td = td_next;
1114 }
1da177e4 1115}