]>
Commit | Line | Data |
---|---|---|
5fd54ace | 1 | // SPDX-License-Identifier: GPL-2.0+ |
1da177e4 | 2 | /* |
d49d4317 | 3 | * Copyright (C) 2001-2004 by David Brownell |
1da177e4 LT |
4 | */ |
5 | ||
6 | /* this file is part of ehci-hcd.c */ | |
7 | ||
8 | /*-------------------------------------------------------------------------*/ | |
9 | ||
10 | /* | |
11 | * EHCI hardware queue manipulation ... the core. QH/QTD manipulation. | |
12 | * | |
13 | * Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd" | |
14 | * entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned | |
15 | * buffers needed for the larger number). We use one QH per endpoint, queue | |
16 | * multiple urbs (all three types) per endpoint. URBs may need several qtds. | |
17 | * | |
18 | * ISO traffic uses "ISO TD" (itd, and sitd) records, and (along with | |
19 | * interrupts) needs careful scheduling. Performance improvements can be | |
20 | * an ongoing challenge. That's in "ehci-sched.c". | |
53bd6a60 | 21 | * |
1da177e4 LT |
22 | * USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs, |
23 | * or otherwise through transaction translators (TTs) in USB 2.0 hubs using | |
24 | * (b) special fields in qh entries or (c) split iso entries. TTs will | |
25 | * buffer low/full speed data so the host collects it at high speed. | |
26 | */ | |
27 | ||
28 | /*-------------------------------------------------------------------------*/ | |
29 | ||
30 | /* fill a qtd, returning how much of the buffer we were able to queue up */ | |
31 | ||
32 | static int | |
6dbd682b SR |
33 | qtd_fill(struct ehci_hcd *ehci, struct ehci_qtd *qtd, dma_addr_t buf, |
34 | size_t len, int token, int maxpacket) | |
1da177e4 LT |
35 | { |
36 | int i, count; | |
37 | u64 addr = buf; | |
38 | ||
39 | /* one buffer entry per 4K ... first might be short or unaligned */ | |
6dbd682b SR |
40 | qtd->hw_buf[0] = cpu_to_hc32(ehci, (u32)addr); |
41 | qtd->hw_buf_hi[0] = cpu_to_hc32(ehci, (u32)(addr >> 32)); | |
1da177e4 LT |
42 | count = 0x1000 - (buf & 0x0fff); /* rest of that page */ |
43 | if (likely (len < count)) /* ... iff needed */ | |
44 | count = len; | |
45 | else { | |
46 | buf += 0x1000; | |
47 | buf &= ~0x0fff; | |
48 | ||
49 | /* per-qtd limit: from 16K to 20K (best alignment) */ | |
50 | for (i = 1; count < len && i < 5; i++) { | |
51 | addr = buf; | |
6dbd682b SR |
52 | qtd->hw_buf[i] = cpu_to_hc32(ehci, (u32)addr); |
53 | qtd->hw_buf_hi[i] = cpu_to_hc32(ehci, | |
54 | (u32)(addr >> 32)); | |
1da177e4 LT |
55 | buf += 0x1000; |
56 | if ((count + 0x1000) < len) | |
57 | count += 0x1000; | |
58 | else | |
59 | count = len; | |
60 | } | |
61 | ||
62 | /* short packets may only terminate transfers */ | |
63 | if (count != len) | |
64 | count -= (count % maxpacket); | |
65 | } | |
6dbd682b | 66 | qtd->hw_token = cpu_to_hc32(ehci, (count << 16) | token); |
1da177e4 LT |
67 | qtd->length = count; |
68 | ||
69 | return count; | |
70 | } | |
71 | ||
72 | /*-------------------------------------------------------------------------*/ | |
73 | ||
74 | static inline void | |
75 | qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd) | |
76 | { | |
3807e26d AD |
77 | struct ehci_qh_hw *hw = qh->hw; |
78 | ||
1da177e4 | 79 | /* writes to an active overlay are unsafe */ |
c1fdb68e | 80 | WARN_ON(qh->qh_state != QH_STATE_IDLE); |
1da177e4 | 81 | |
3807e26d AD |
82 | hw->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma); |
83 | hw->hw_alt_next = EHCI_LIST_END(ehci); | |
1da177e4 | 84 | |
a455212d AS |
85 | /* Except for control endpoints, we make hardware maintain data |
86 | * toggle (like OHCI) ... here (re)initialize the toggle in the QH, | |
87 | * and set the pseudo-toggle in udev. Only usb_clear_halt() will | |
88 | * ever clear it. | |
89 | */ | |
4c53de72 | 90 | if (!(hw->hw_info1 & cpu_to_hc32(ehci, QH_TOGGLE_CTL))) { |
a455212d AS |
91 | unsigned is_out, epnum; |
92 | ||
e04f5f7e | 93 | is_out = qh->is_out; |
3807e26d | 94 | epnum = (hc32_to_cpup(ehci, &hw->hw_info1) >> 8) & 0x0f; |
ffa0248e | 95 | if (unlikely(!usb_gettoggle(qh->ps.udev, epnum, is_out))) { |
3807e26d | 96 | hw->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE); |
ffa0248e | 97 | usb_settoggle(qh->ps.udev, epnum, is_out, 1); |
a455212d AS |
98 | } |
99 | } | |
100 | ||
3807e26d | 101 | hw->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING); |
1da177e4 LT |
102 | } |
103 | ||
104 | /* if it weren't for a common silicon quirk (writing the dummy into the qh | |
105 | * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault | |
106 | * recovery (including urb dequeue) would need software changes to a QH... | |
107 | */ | |
108 | static void | |
109 | qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh) | |
110 | { | |
111 | struct ehci_qtd *qtd; | |
112 | ||
c1fdb68e | 113 | qtd = list_entry(qh->qtd_list.next, struct ehci_qtd, qtd_list); |
1da177e4 | 114 | |
c1fdb68e AS |
115 | /* |
116 | * first qtd may already be partially processed. | |
117 | * If we come here during unlink, the QH overlay region | |
118 | * might have reference to the just unlinked qtd. The | |
119 | * qtd is updated in qh_completions(). Update the QH | |
120 | * overlay here. | |
121 | */ | |
fc0855f2 | 122 | if (qh->hw->hw_token & ACTIVE_BIT(ehci)) { |
c1fdb68e | 123 | qh->hw->hw_qtd_next = qtd->hw_next; |
fc0855f2 AS |
124 | if (qh->should_be_inactive) |
125 | ehci_warn(ehci, "qh %p should be inactive!\n", qh); | |
126 | } else { | |
c1fdb68e | 127 | qh_update(ehci, qh, qtd); |
fc0855f2 AS |
128 | } |
129 | qh->should_be_inactive = 0; | |
1da177e4 LT |
130 | } |
131 | ||
132 | /*-------------------------------------------------------------------------*/ | |
133 | ||
914b7012 AS |
134 | static void qh_link_async(struct ehci_hcd *ehci, struct ehci_qh *qh); |
135 | ||
136 | static void ehci_clear_tt_buffer_complete(struct usb_hcd *hcd, | |
137 | struct usb_host_endpoint *ep) | |
138 | { | |
139 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); | |
140 | struct ehci_qh *qh = ep->hcpriv; | |
141 | unsigned long flags; | |
142 | ||
143 | spin_lock_irqsave(&ehci->lock, flags); | |
144 | qh->clearing_tt = 0; | |
145 | if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list) | |
e8799906 | 146 | && ehci->rh_state == EHCI_RH_RUNNING) |
914b7012 AS |
147 | qh_link_async(ehci, qh); |
148 | spin_unlock_irqrestore(&ehci->lock, flags); | |
149 | } | |
150 | ||
151 | static void ehci_clear_tt_buffer(struct ehci_hcd *ehci, struct ehci_qh *qh, | |
152 | struct urb *urb, u32 token) | |
153 | { | |
154 | ||
155 | /* If an async split transaction gets an error or is unlinked, | |
156 | * the TT buffer may be left in an indeterminate state. We | |
157 | * have to clear the TT buffer. | |
158 | * | |
159 | * Note: this routine is never called for Isochronous transfers. | |
160 | */ | |
161 | if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) { | |
1c20163d | 162 | #ifdef CONFIG_DYNAMIC_DEBUG |
914b7012 AS |
163 | struct usb_device *tt = urb->dev->tt->hub; |
164 | dev_dbg(&tt->dev, | |
165 | "clear tt buffer port %d, a%d ep%d t%08x\n", | |
166 | urb->dev->ttport, urb->dev->devnum, | |
167 | usb_pipeendpoint(urb->pipe), token); | |
1c20163d | 168 | #endif /* CONFIG_DYNAMIC_DEBUG */ |
914b7012 AS |
169 | if (!ehci_is_TDI(ehci) |
170 | || urb->dev->tt->hub != | |
171 | ehci_to_hcd(ehci)->self.root_hub) { | |
172 | if (usb_hub_clear_tt_buffer(urb) == 0) | |
173 | qh->clearing_tt = 1; | |
174 | } else { | |
175 | ||
176 | /* REVISIT ARC-derived cores don't clear the root | |
177 | * hub TT buffer in this way... | |
178 | */ | |
179 | } | |
180 | } | |
181 | } | |
182 | ||
14c04c0f | 183 | static int qtd_copy_status ( |
1da177e4 LT |
184 | struct ehci_hcd *ehci, |
185 | struct urb *urb, | |
186 | size_t length, | |
187 | u32 token | |
188 | ) | |
189 | { | |
14c04c0f AS |
190 | int status = -EINPROGRESS; |
191 | ||
1da177e4 LT |
192 | /* count IN/OUT bytes, not SETUP (even short packets) */ |
193 | if (likely (QTD_PID (token) != 2)) | |
194 | urb->actual_length += length - QTD_LENGTH (token); | |
195 | ||
196 | /* don't modify error codes */ | |
eb231054 | 197 | if (unlikely(urb->unlinked)) |
14c04c0f | 198 | return status; |
1da177e4 LT |
199 | |
200 | /* force cleanup after short read; not always an error */ | |
201 | if (unlikely (IS_SHORT_READ (token))) | |
14c04c0f | 202 | status = -EREMOTEIO; |
1da177e4 LT |
203 | |
204 | /* serious "can't proceed" faults reported by the hardware */ | |
205 | if (token & QTD_STS_HALT) { | |
206 | if (token & QTD_STS_BABBLE) { | |
207 | /* FIXME "must" disable babbling device's port too */ | |
14c04c0f | 208 | status = -EOVERFLOW; |
ba516de3 AS |
209 | /* CERR nonzero + halt --> stall */ |
210 | } else if (QTD_CERR(token)) { | |
211 | status = -EPIPE; | |
212 | ||
213 | /* In theory, more than one of the following bits can be set | |
214 | * since they are sticky and the transaction is retried. | |
215 | * Which to test first is rather arbitrary. | |
216 | */ | |
1da177e4 LT |
217 | } else if (token & QTD_STS_MMF) { |
218 | /* fs/ls interrupt xfer missed the complete-split */ | |
14c04c0f | 219 | status = -EPROTO; |
1da177e4 | 220 | } else if (token & QTD_STS_DBE) { |
14c04c0f | 221 | status = (QTD_PID (token) == 1) /* IN ? */ |
1da177e4 LT |
222 | ? -ENOSR /* hc couldn't read data */ |
223 | : -ECOMM; /* hc couldn't write data */ | |
224 | } else if (token & QTD_STS_XACT) { | |
ba516de3 AS |
225 | /* timeout, bad CRC, wrong PID, etc */ |
226 | ehci_dbg(ehci, "devpath %s ep%d%s 3strikes\n", | |
227 | urb->dev->devpath, | |
228 | usb_pipeendpoint(urb->pipe), | |
229 | usb_pipein(urb->pipe) ? "in" : "out"); | |
14c04c0f | 230 | status = -EPROTO; |
ba516de3 AS |
231 | } else { /* unknown */ |
232 | status = -EPROTO; | |
233 | } | |
1da177e4 | 234 | } |
14c04c0f AS |
235 | |
236 | return status; | |
1da177e4 LT |
237 | } |
238 | ||
239 | static void | |
14c04c0f | 240 | ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status) |
1da177e4 | 241 | { |
2656a9ab AS |
242 | if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) { |
243 | /* ... update hc-wide periodic stats */ | |
244 | ehci_to_hcd(ehci)->self.bandwidth_int_reqs--; | |
1da177e4 LT |
245 | } |
246 | ||
eb231054 AS |
247 | if (unlikely(urb->unlinked)) { |
248 | COUNT(ehci->stats.unlink); | |
249 | } else { | |
4f667627 DB |
250 | /* report non-error and short read status as zero */ |
251 | if (status == -EINPROGRESS || status == -EREMOTEIO) | |
14c04c0f | 252 | status = 0; |
eb231054 | 253 | COUNT(ehci->stats.complete); |
1da177e4 | 254 | } |
1da177e4 LT |
255 | |
256 | #ifdef EHCI_URB_TRACE | |
257 | ehci_dbg (ehci, | |
258 | "%s %s urb %p ep%d%s status %d len %d/%d\n", | |
441b62c1 | 259 | __func__, urb->dev->devpath, urb, |
1da177e4 LT |
260 | usb_pipeendpoint (urb->pipe), |
261 | usb_pipein (urb->pipe) ? "in" : "out", | |
14c04c0f | 262 | status, |
1da177e4 LT |
263 | urb->actual_length, urb->transfer_buffer_length); |
264 | #endif | |
265 | ||
e9df41c5 | 266 | usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb); |
4a00027d | 267 | usb_hcd_giveback_urb(ehci_to_hcd(ehci), urb, status); |
1da177e4 LT |
268 | } |
269 | ||
1da177e4 LT |
270 | static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh); |
271 | ||
272 | /* | |
273 | * Process and free completed qtds for a qh, returning URBs to drivers. | |
79bcf7b0 AS |
274 | * Chases up to qh->hw_current. Returns nonzero if the caller should |
275 | * unlink qh. | |
1da177e4 | 276 | */ |
1da177e4 | 277 | static unsigned |
7d12e780 | 278 | qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) |
1da177e4 | 279 | { |
3a44494e | 280 | struct ehci_qtd *last, *end = qh->dummy; |
1da177e4 | 281 | struct list_head *entry, *tmp; |
3a44494e | 282 | int last_status; |
1da177e4 | 283 | int stopped; |
1da177e4 | 284 | u8 state; |
3807e26d | 285 | struct ehci_qh_hw *hw = qh->hw; |
1da177e4 | 286 | |
1da177e4 LT |
287 | /* completions (or tasks on other cpus) must never clobber HALT |
288 | * till we've gone through and cleaned everything up, even when | |
289 | * they add urbs to this qh's queue or mark them for unlinking. | |
290 | * | |
291 | * NOTE: unlinking expects to be done in queue order. | |
3a44494e AS |
292 | * |
293 | * It's a bug for qh->qh_state to be anything other than | |
294 | * QH_STATE_IDLE, unless our caller is scan_async() or | |
569b394f | 295 | * scan_intr(). |
1da177e4 LT |
296 | */ |
297 | state = qh->qh_state; | |
298 | qh->qh_state = QH_STATE_COMPLETING; | |
299 | stopped = (state == QH_STATE_IDLE); | |
300 | ||
3a44494e AS |
301 | rescan: |
302 | last = NULL; | |
303 | last_status = -EINPROGRESS; | |
7bc782d7 | 304 | qh->dequeue_during_giveback = 0; |
3a44494e | 305 | |
1da177e4 LT |
306 | /* remove de-activated QTDs from front of queue. |
307 | * after faults (including short reads), cleanup this urb | |
308 | * then let the queue advance. | |
309 | * if queue is stopped, handles unlinks. | |
310 | */ | |
311 | list_for_each_safe (entry, tmp, &qh->qtd_list) { | |
312 | struct ehci_qtd *qtd; | |
313 | struct urb *urb; | |
314 | u32 token = 0; | |
315 | ||
316 | qtd = list_entry (entry, struct ehci_qtd, qtd_list); | |
317 | urb = qtd->urb; | |
318 | ||
319 | /* clean up any state from previous QTD ...*/ | |
320 | if (last) { | |
321 | if (likely (last->urb != urb)) { | |
14c04c0f | 322 | ehci_urb_done(ehci, last->urb, last_status); |
b5f7a0ec | 323 | last_status = -EINPROGRESS; |
1da177e4 LT |
324 | } |
325 | ehci_qtd_free (ehci, last); | |
326 | last = NULL; | |
327 | } | |
328 | ||
329 | /* ignore urbs submitted during completions we reported */ | |
330 | if (qtd == end) | |
331 | break; | |
332 | ||
333 | /* hardware copies qtd out of qh overlay */ | |
334 | rmb (); | |
6dbd682b | 335 | token = hc32_to_cpu(ehci, qtd->hw_token); |
1da177e4 LT |
336 | |
337 | /* always clean up qtds the hc de-activated */ | |
a2c2706e | 338 | retry_xacterr: |
1da177e4 LT |
339 | if ((token & QTD_STS_ACTIVE) == 0) { |
340 | ||
332960bd VP |
341 | /* Report Data Buffer Error: non-fatal but useful */ |
342 | if (token & QTD_STS_DBE) | |
343 | ehci_dbg(ehci, | |
344 | "detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n", | |
345 | urb, | |
346 | usb_endpoint_num(&urb->ep->desc), | |
347 | usb_endpoint_dir_in(&urb->ep->desc) ? "in" : "out", | |
348 | urb->transfer_buffer_length, | |
349 | qtd, | |
350 | qh); | |
351 | ||
a082b5c7 DB |
352 | /* on STALL, error, and short reads this urb must |
353 | * complete and all its qtds must be recycled. | |
354 | */ | |
1da177e4 | 355 | if ((token & QTD_STS_HALT) != 0) { |
a2c2706e AS |
356 | |
357 | /* retry transaction errors until we | |
358 | * reach the software xacterr limit | |
359 | */ | |
360 | if ((token & QTD_STS_XACT) && | |
361 | QTD_CERR(token) == 0 && | |
ef4638f9 | 362 | ++qh->xacterrs < QH_XACTERR_MAX && |
a2c2706e AS |
363 | !urb->unlinked) { |
364 | ehci_dbg(ehci, | |
d0626808 | 365 | "detected XactErr len %zu/%zu retry %d\n", |
ef4638f9 | 366 | qtd->length - QTD_LENGTH(token), qtd->length, qh->xacterrs); |
a2c2706e AS |
367 | |
368 | /* reset the token in the qtd and the | |
369 | * qh overlay (which still contains | |
370 | * the qtd) so that we pick up from | |
371 | * where we left off | |
372 | */ | |
373 | token &= ~QTD_STS_HALT; | |
374 | token |= QTD_STS_ACTIVE | | |
375 | (EHCI_TUNE_CERR << 10); | |
376 | qtd->hw_token = cpu_to_hc32(ehci, | |
377 | token); | |
378 | wmb(); | |
3807e26d AD |
379 | hw->hw_token = cpu_to_hc32(ehci, |
380 | token); | |
a2c2706e AS |
381 | goto retry_xacterr; |
382 | } | |
1da177e4 | 383 | stopped = 1; |
fcc5184e | 384 | qh->unlink_reason |= QH_UNLINK_HALTED; |
1da177e4 LT |
385 | |
386 | /* magic dummy for some short reads; qh won't advance. | |
387 | * that silicon quirk can kick in with this dummy too. | |
a082b5c7 DB |
388 | * |
389 | * other short reads won't stop the queue, including | |
390 | * control transfers (status stage handles that) or | |
391 | * most other single-qtd reads ... the queue stops if | |
392 | * URB_SHORT_NOT_OK was set so the driver submitting | |
393 | * the urbs could clean it up. | |
1da177e4 LT |
394 | */ |
395 | } else if (IS_SHORT_READ (token) | |
6dbd682b SR |
396 | && !(qtd->hw_alt_next |
397 | & EHCI_LIST_END(ehci))) { | |
1da177e4 | 398 | stopped = 1; |
fcc5184e | 399 | qh->unlink_reason |= QH_UNLINK_SHORT_READ; |
1da177e4 LT |
400 | } |
401 | ||
402 | /* stop scanning when we reach qtds the hc is using */ | |
403 | } else if (likely (!stopped | |
c0c53dbc | 404 | && ehci->rh_state >= EHCI_RH_RUNNING)) { |
1da177e4 LT |
405 | break; |
406 | ||
a082b5c7 | 407 | /* scan the whole queue for unlinks whenever it stops */ |
1da177e4 LT |
408 | } else { |
409 | stopped = 1; | |
410 | ||
a082b5c7 | 411 | /* cancel everything if we halt, suspend, etc */ |
fcc5184e | 412 | if (ehci->rh_state < EHCI_RH_RUNNING) { |
14c04c0f | 413 | last_status = -ESHUTDOWN; |
fcc5184e AS |
414 | qh->unlink_reason |= QH_UNLINK_SHUTDOWN; |
415 | } | |
1da177e4 | 416 | |
a082b5c7 DB |
417 | /* this qtd is active; skip it unless a previous qtd |
418 | * for its urb faulted, or its urb was canceled. | |
1da177e4 | 419 | */ |
a082b5c7 | 420 | else if (last_status == -EINPROGRESS && !urb->unlinked) |
1da177e4 | 421 | continue; |
53bd6a60 | 422 | |
feca7746 AS |
423 | /* |
424 | * If this was the active qtd when the qh was unlinked | |
425 | * and the overlay's token is active, then the overlay | |
426 | * hasn't been written back to the qtd yet so use its | |
427 | * token instead of the qtd's. After the qtd is | |
428 | * processed and removed, the overlay won't be valid | |
429 | * any more. | |
430 | */ | |
431 | if (state == QH_STATE_IDLE && | |
432 | qh->qtd_list.next == &qtd->qtd_list && | |
433 | (hw->hw_token & ACTIVE_BIT(ehci))) { | |
3807e26d | 434 | token = hc32_to_cpu(ehci, hw->hw_token); |
feca7746 | 435 | hw->hw_token &= ~ACTIVE_BIT(ehci); |
fc0855f2 | 436 | qh->should_be_inactive = 1; |
1da177e4 | 437 | |
914b7012 AS |
438 | /* An unlink may leave an incomplete |
439 | * async transaction in the TT buffer. | |
440 | * We have to clear it. | |
441 | */ | |
442 | ehci_clear_tt_buffer(ehci, qh, urb, token); | |
443 | } | |
1da177e4 | 444 | } |
53bd6a60 | 445 | |
4f667627 DB |
446 | /* unless we already know the urb's status, collect qtd status |
447 | * and update count of bytes transferred. in common short read | |
448 | * cases with only one data qtd (including control transfers), | |
449 | * queue processing won't halt. but with two or more qtds (for | |
450 | * example, with a 32 KB transfer), when the first qtd gets a | |
451 | * short read the second must be removed by hand. | |
452 | */ | |
453 | if (last_status == -EINPROGRESS) { | |
454 | last_status = qtd_copy_status(ehci, urb, | |
455 | qtd->length, token); | |
456 | if (last_status == -EREMOTEIO | |
457 | && (qtd->hw_alt_next | |
458 | & EHCI_LIST_END(ehci))) | |
459 | last_status = -EINPROGRESS; | |
914b7012 AS |
460 | |
461 | /* As part of low/full-speed endpoint-halt processing | |
462 | * we must clear the TT buffer (11.17.5). | |
463 | */ | |
464 | if (unlikely(last_status != -EINPROGRESS && | |
c2f6595f AS |
465 | last_status != -EREMOTEIO)) { |
466 | /* The TT's in some hubs malfunction when they | |
467 | * receive this request following a STALL (they | |
468 | * stop sending isochronous packets). Since a | |
469 | * STALL can't leave the TT buffer in a busy | |
470 | * state (if you believe Figures 11-48 - 11-51 | |
471 | * in the USB 2.0 spec), we won't clear the TT | |
472 | * buffer in this case. Strictly speaking this | |
473 | * is a violation of the spec. | |
474 | */ | |
475 | if (last_status != -EPIPE) | |
476 | ehci_clear_tt_buffer(ehci, qh, urb, | |
477 | token); | |
478 | } | |
b0d9efba | 479 | } |
1da177e4 | 480 | |
a082b5c7 DB |
481 | /* if we're removing something not at the queue head, |
482 | * patch the hardware queue pointer. | |
483 | */ | |
1da177e4 LT |
484 | if (stopped && qtd->qtd_list.prev != &qh->qtd_list) { |
485 | last = list_entry (qtd->qtd_list.prev, | |
486 | struct ehci_qtd, qtd_list); | |
487 | last->hw_next = qtd->hw_next; | |
488 | } | |
a082b5c7 DB |
489 | |
490 | /* remove qtd; it's recycled after possible urb completion */ | |
1da177e4 LT |
491 | list_del (&qtd->qtd_list); |
492 | last = qtd; | |
a2c2706e AS |
493 | |
494 | /* reinit the xacterr counter for the next qtd */ | |
ef4638f9 | 495 | qh->xacterrs = 0; |
1da177e4 LT |
496 | } |
497 | ||
498 | /* last urb's completion might still need calling */ | |
499 | if (likely (last != NULL)) { | |
14c04c0f | 500 | ehci_urb_done(ehci, last->urb, last_status); |
1da177e4 LT |
501 | ehci_qtd_free (ehci, last); |
502 | } | |
503 | ||
3a44494e | 504 | /* Do we need to rescan for URBs dequeued during a giveback? */ |
7bc782d7 | 505 | if (unlikely(qh->dequeue_during_giveback)) { |
3a44494e AS |
506 | /* If the QH is already unlinked, do the rescan now. */ |
507 | if (state == QH_STATE_IDLE) | |
508 | goto rescan; | |
509 | ||
7bc782d7 | 510 | /* Otherwise the caller must unlink the QH. */ |
3a44494e AS |
511 | } |
512 | ||
1da177e4 LT |
513 | /* restore original state; caller must unlink or relink */ |
514 | qh->qh_state = state; | |
515 | ||
516 | /* be sure the hardware's done with the qh before refreshing | |
517 | * it after fault cleanup, or recovering from silicon wrongly | |
518 | * overlaying the dummy qtd (which reduces DMA chatter). | |
7bc782d7 AS |
519 | * |
520 | * We won't refresh a QH that's linked (after the HC | |
521 | * stopped the queue). That avoids a race: | |
522 | * - HC reads first part of QH; | |
523 | * - CPU updates that first part and the token; | |
524 | * - HC reads rest of that QH, including token | |
525 | * Result: HC gets an inconsistent image, and then | |
526 | * DMAs to/from the wrong memory (corrupting it). | |
527 | * | |
528 | * That should be rare for interrupt transfers, | |
529 | * except maybe high bandwidth ... | |
1da177e4 | 530 | */ |
7bc782d7 | 531 | if (stopped != 0 || hw->hw_qtd_next == EHCI_LIST_END(ehci)) |
fcc5184e | 532 | qh->unlink_reason |= QH_UNLINK_DUMMY_OVERLAY; |
1da177e4 | 533 | |
7bc782d7 | 534 | /* Let the caller know if the QH needs to be unlinked. */ |
fcc5184e | 535 | return qh->unlink_reason; |
1da177e4 LT |
536 | } |
537 | ||
538 | /*-------------------------------------------------------------------------*/ | |
539 | ||
1da177e4 LT |
540 | /* |
541 | * reverse of qh_urb_transaction: free a list of TDs. | |
542 | * used for cleanup after errors, before HC sees an URB's TDs. | |
543 | */ | |
544 | static void qtd_list_free ( | |
545 | struct ehci_hcd *ehci, | |
546 | struct urb *urb, | |
547 | struct list_head *qtd_list | |
548 | ) { | |
549 | struct list_head *entry, *temp; | |
550 | ||
551 | list_for_each_safe (entry, temp, qtd_list) { | |
552 | struct ehci_qtd *qtd; | |
553 | ||
554 | qtd = list_entry (entry, struct ehci_qtd, qtd_list); | |
555 | list_del (&qtd->qtd_list); | |
556 | ehci_qtd_free (ehci, qtd); | |
557 | } | |
558 | } | |
559 | ||
560 | /* | |
561 | * create a list of filled qtds for this URB; won't link into qh. | |
562 | */ | |
563 | static struct list_head * | |
564 | qh_urb_transaction ( | |
565 | struct ehci_hcd *ehci, | |
566 | struct urb *urb, | |
567 | struct list_head *head, | |
55016f10 | 568 | gfp_t flags |
1da177e4 LT |
569 | ) { |
570 | struct ehci_qtd *qtd, *qtd_prev; | |
571 | dma_addr_t buf; | |
40f8db8f | 572 | int len, this_sg_len, maxpacket; |
1da177e4 LT |
573 | int is_input; |
574 | u32 token; | |
40f8db8f AS |
575 | int i; |
576 | struct scatterlist *sg; | |
1da177e4 LT |
577 | |
578 | /* | |
579 | * URBs map to sequences of QTDs: one logical transaction | |
580 | */ | |
581 | qtd = ehci_qtd_alloc (ehci, flags); | |
582 | if (unlikely (!qtd)) | |
583 | return NULL; | |
584 | list_add_tail (&qtd->qtd_list, head); | |
585 | qtd->urb = urb; | |
586 | ||
587 | token = QTD_STS_ACTIVE; | |
588 | token |= (EHCI_TUNE_CERR << 10); | |
589 | /* for split transactions, SplitXState initialized to zero */ | |
590 | ||
591 | len = urb->transfer_buffer_length; | |
592 | is_input = usb_pipein (urb->pipe); | |
593 | if (usb_pipecontrol (urb->pipe)) { | |
594 | /* SETUP pid */ | |
6dbd682b SR |
595 | qtd_fill(ehci, qtd, urb->setup_dma, |
596 | sizeof (struct usb_ctrlrequest), | |
597 | token | (2 /* "setup" */ << 8), 8); | |
1da177e4 LT |
598 | |
599 | /* ... and always at least one more pid */ | |
600 | token ^= QTD_TOGGLE; | |
601 | qtd_prev = qtd; | |
602 | qtd = ehci_qtd_alloc (ehci, flags); | |
603 | if (unlikely (!qtd)) | |
604 | goto cleanup; | |
605 | qtd->urb = urb; | |
6dbd682b | 606 | qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); |
1da177e4 | 607 | list_add_tail (&qtd->qtd_list, head); |
6912354a AS |
608 | |
609 | /* for zero length DATA stages, STATUS is always IN */ | |
610 | if (len == 0) | |
611 | token |= (1 /* "in" */ << 8); | |
53bd6a60 | 612 | } |
1da177e4 LT |
613 | |
614 | /* | |
615 | * data transfer stage: buffer setup | |
616 | */ | |
bc677d5b | 617 | i = urb->num_mapped_sgs; |
40f8db8f | 618 | if (len > 0 && i > 0) { |
910f8d0c | 619 | sg = urb->sg; |
40f8db8f AS |
620 | buf = sg_dma_address(sg); |
621 | ||
622 | /* urb->transfer_buffer_length may be smaller than the | |
623 | * size of the scatterlist (or vice versa) | |
624 | */ | |
625 | this_sg_len = min_t(int, sg_dma_len(sg), len); | |
626 | } else { | |
627 | sg = NULL; | |
628 | buf = urb->transfer_dma; | |
629 | this_sg_len = len; | |
630 | } | |
1da177e4 | 631 | |
6912354a | 632 | if (is_input) |
1da177e4 LT |
633 | token |= (1 /* "in" */ << 8); |
634 | /* else it's already initted to "out" pid (0 << 8) */ | |
635 | ||
8437ab99 | 636 | maxpacket = usb_maxpacket(urb->dev, urb->pipe, !is_input); |
1da177e4 LT |
637 | |
638 | /* | |
639 | * buffer gets wrapped in one or more qtds; | |
640 | * last one may be "short" (including zero len) | |
641 | * and may serve as a control status ack | |
642 | */ | |
643 | for (;;) { | |
644 | int this_qtd_len; | |
645 | ||
40f8db8f AS |
646 | this_qtd_len = qtd_fill(ehci, qtd, buf, this_sg_len, token, |
647 | maxpacket); | |
648 | this_sg_len -= this_qtd_len; | |
1da177e4 LT |
649 | len -= this_qtd_len; |
650 | buf += this_qtd_len; | |
a082b5c7 DB |
651 | |
652 | /* | |
653 | * short reads advance to a "magic" dummy instead of the next | |
654 | * qtd ... that forces the queue to stop, for manual cleanup. | |
655 | * (this will usually be overridden later.) | |
656 | */ | |
1da177e4 | 657 | if (is_input) |
3807e26d | 658 | qtd->hw_alt_next = ehci->async->hw->hw_alt_next; |
1da177e4 LT |
659 | |
660 | /* qh makes control packets use qtd toggle; maybe switch it */ | |
661 | if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0) | |
662 | token ^= QTD_TOGGLE; | |
663 | ||
40f8db8f AS |
664 | if (likely(this_sg_len <= 0)) { |
665 | if (--i <= 0 || len <= 0) | |
666 | break; | |
667 | sg = sg_next(sg); | |
668 | buf = sg_dma_address(sg); | |
669 | this_sg_len = min_t(int, sg_dma_len(sg), len); | |
670 | } | |
1da177e4 LT |
671 | |
672 | qtd_prev = qtd; | |
673 | qtd = ehci_qtd_alloc (ehci, flags); | |
674 | if (unlikely (!qtd)) | |
675 | goto cleanup; | |
676 | qtd->urb = urb; | |
6dbd682b | 677 | qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); |
1da177e4 LT |
678 | list_add_tail (&qtd->qtd_list, head); |
679 | } | |
680 | ||
a082b5c7 DB |
681 | /* |
682 | * unless the caller requires manual cleanup after short reads, | |
683 | * have the alt_next mechanism keep the queue running after the | |
684 | * last data qtd (the only one, for control and most other cases). | |
1da177e4 LT |
685 | */ |
686 | if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0 | |
687 | || usb_pipecontrol (urb->pipe))) | |
6dbd682b | 688 | qtd->hw_alt_next = EHCI_LIST_END(ehci); |
1da177e4 LT |
689 | |
690 | /* | |
691 | * control requests may need a terminating data "status" ack; | |
9a971dda ML |
692 | * other OUT ones may need a terminating short packet |
693 | * (zero length). | |
1da177e4 | 694 | */ |
6912354a | 695 | if (likely (urb->transfer_buffer_length != 0)) { |
1da177e4 LT |
696 | int one_more = 0; |
697 | ||
698 | if (usb_pipecontrol (urb->pipe)) { | |
699 | one_more = 1; | |
700 | token ^= 0x0100; /* "in" <--> "out" */ | |
701 | token |= QTD_TOGGLE; /* force DATA1 */ | |
9a971dda | 702 | } else if (usb_pipeout(urb->pipe) |
1da177e4 LT |
703 | && (urb->transfer_flags & URB_ZERO_PACKET) |
704 | && !(urb->transfer_buffer_length % maxpacket)) { | |
705 | one_more = 1; | |
706 | } | |
707 | if (one_more) { | |
708 | qtd_prev = qtd; | |
709 | qtd = ehci_qtd_alloc (ehci, flags); | |
710 | if (unlikely (!qtd)) | |
711 | goto cleanup; | |
712 | qtd->urb = urb; | |
6dbd682b | 713 | qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); |
1da177e4 LT |
714 | list_add_tail (&qtd->qtd_list, head); |
715 | ||
716 | /* never any data in such packets */ | |
6dbd682b | 717 | qtd_fill(ehci, qtd, 0, 0, token, 0); |
1da177e4 LT |
718 | } |
719 | } | |
720 | ||
721 | /* by default, enable interrupt on urb completion */ | |
722 | if (likely (!(urb->transfer_flags & URB_NO_INTERRUPT))) | |
6dbd682b | 723 | qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC); |
1da177e4 LT |
724 | return head; |
725 | ||
726 | cleanup: | |
727 | qtd_list_free (ehci, urb, head); | |
728 | return NULL; | |
729 | } | |
730 | ||
731 | /*-------------------------------------------------------------------------*/ | |
732 | ||
733 | // Would be best to create all qh's from config descriptors, | |
734 | // when each interface/altsetting is established. Unlink | |
735 | // any previous qh and cancel its urbs first; endpoints are | |
736 | // implicitly reset then (data toggle too). | |
737 | // That'd mean updating how usbcore talks to HCDs. (2.7?) | |
738 | ||
739 | ||
740 | /* | |
741 | * Each QH holds a qtd list; a QH is used for everything except iso. | |
742 | * | |
743 | * For interrupt urbs, the scheduler must set the microframe scheduling | |
744 | * mask(s) each time the QH gets scheduled. For highspeed, that's | |
745 | * just one microframe in the s-mask. For split interrupt transactions | |
746 | * there are additional complications: c-mask, maybe FSTNs. | |
747 | */ | |
748 | static struct ehci_qh * | |
749 | qh_make ( | |
750 | struct ehci_hcd *ehci, | |
751 | struct urb *urb, | |
55016f10 | 752 | gfp_t flags |
1da177e4 LT |
753 | ) { |
754 | struct ehci_qh *qh = ehci_qh_alloc (ehci, flags); | |
e3b89080 | 755 | struct usb_host_endpoint *ep; |
1da177e4 LT |
756 | u32 info1 = 0, info2 = 0; |
757 | int is_input, type; | |
758 | int maxp = 0; | |
e3b89080 | 759 | int mult; |
340ba5f9 | 760 | struct usb_tt *tt = urb->dev->tt; |
3807e26d | 761 | struct ehci_qh_hw *hw; |
1da177e4 LT |
762 | |
763 | if (!qh) | |
764 | return qh; | |
765 | ||
766 | /* | |
767 | * init endpoint/device data for this QH | |
768 | */ | |
769 | info1 |= usb_pipeendpoint (urb->pipe) << 8; | |
770 | info1 |= usb_pipedevice (urb->pipe) << 0; | |
771 | ||
772 | is_input = usb_pipein (urb->pipe); | |
773 | type = usb_pipetype (urb->pipe); | |
e3b89080 | 774 | ep = usb_pipe_endpoint (urb->dev, urb->pipe); |
8437ab99 | 775 | maxp = usb_endpoint_maxp (&ep->desc); |
e3b89080 | 776 | mult = usb_endpoint_maxp_mult (&ep->desc); |
1da177e4 | 777 | |
caa9ef67 DB |
778 | /* 1024 byte maxpacket is a hardware ceiling. High bandwidth |
779 | * acts like up to 3KB, but is built from smaller packets. | |
780 | */ | |
8437ab99 FB |
781 | if (maxp > 1024) { |
782 | ehci_dbg(ehci, "bogus qh maxpacket %d\n", maxp); | |
caa9ef67 DB |
783 | goto done; |
784 | } | |
785 | ||
1da177e4 LT |
786 | /* Compute interrupt scheduling parameters just once, and save. |
787 | * - allowing for high bandwidth, how many nsec/uframe are used? | |
788 | * - split transactions need a second CSPLIT uframe; same question | |
789 | * - splits also need a schedule gap (for full/low speed I/O) | |
790 | * - qh has a polling interval | |
791 | * | |
792 | * For control/bulk requests, the HC or TT handles these. | |
793 | */ | |
794 | if (type == PIPE_INTERRUPT) { | |
d0ce5c6b AS |
795 | unsigned tmp; |
796 | ||
ffa0248e | 797 | qh->ps.usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH, |
8437ab99 | 798 | is_input, 0, mult * maxp)); |
ffa0248e | 799 | qh->ps.phase = NO_FRAME; |
1da177e4 LT |
800 | |
801 | if (urb->dev->speed == USB_SPEED_HIGH) { | |
ffa0248e | 802 | qh->ps.c_usecs = 0; |
1da177e4 LT |
803 | qh->gap_uf = 0; |
804 | ||
ffa0248e | 805 | if (urb->interval > 1 && urb->interval < 8) { |
1da177e4 LT |
806 | /* NOTE interval 2 or 4 uframes could work. |
807 | * But interval 1 scheduling is simpler, and | |
808 | * includes high bandwidth. | |
809 | */ | |
1b9a38bf | 810 | urb->interval = 1; |
ffa0248e AS |
811 | } else if (urb->interval > ehci->periodic_size << 3) { |
812 | urb->interval = ehci->periodic_size << 3; | |
1da177e4 | 813 | } |
ffa0248e | 814 | qh->ps.period = urb->interval >> 3; |
d0ce5c6b AS |
815 | |
816 | /* period for bandwidth allocation */ | |
817 | tmp = min_t(unsigned, EHCI_BANDWIDTH_SIZE, | |
818 | 1 << (urb->ep->desc.bInterval - 1)); | |
819 | ||
820 | /* Allow urb->interval to override */ | |
821 | qh->ps.bw_uperiod = min_t(unsigned, tmp, urb->interval); | |
822 | qh->ps.bw_period = qh->ps.bw_uperiod >> 3; | |
1da177e4 | 823 | } else { |
d0384200 DB |
824 | int think_time; |
825 | ||
1da177e4 LT |
826 | /* gap is f(FS/LS transfer times) */ |
827 | qh->gap_uf = 1 + usb_calc_bus_time (urb->dev->speed, | |
828 | is_input, 0, maxp) / (125 * 1000); | |
829 | ||
830 | /* FIXME this just approximates SPLIT/CSPLIT times */ | |
831 | if (is_input) { // SPLIT, gap, CSPLIT+DATA | |
ffa0248e AS |
832 | qh->ps.c_usecs = qh->ps.usecs + HS_USECS(0); |
833 | qh->ps.usecs = HS_USECS(1); | |
1da177e4 | 834 | } else { // SPLIT+DATA, gap, CSPLIT |
ffa0248e AS |
835 | qh->ps.usecs += HS_USECS(1); |
836 | qh->ps.c_usecs = HS_USECS(0); | |
1da177e4 LT |
837 | } |
838 | ||
d0384200 | 839 | think_time = tt ? tt->think_time : 0; |
ffa0248e | 840 | qh->ps.tt_usecs = NS_TO_US(think_time + |
d0384200 | 841 | usb_calc_bus_time (urb->dev->speed, |
8437ab99 | 842 | is_input, 0, maxp)); |
ffa0248e AS |
843 | if (urb->interval > ehci->periodic_size) |
844 | urb->interval = ehci->periodic_size; | |
845 | qh->ps.period = urb->interval; | |
d0ce5c6b AS |
846 | |
847 | /* period for bandwidth allocation */ | |
848 | tmp = min_t(unsigned, EHCI_BANDWIDTH_FRAMES, | |
849 | urb->ep->desc.bInterval); | |
850 | tmp = rounddown_pow_of_two(tmp); | |
851 | ||
852 | /* Allow urb->interval to override */ | |
853 | qh->ps.bw_period = min_t(unsigned, tmp, urb->interval); | |
854 | qh->ps.bw_uperiod = qh->ps.bw_period << 3; | |
1da177e4 LT |
855 | } |
856 | } | |
857 | ||
858 | /* support for tt scheduling, and access to toggles */ | |
ffa0248e AS |
859 | qh->ps.udev = urb->dev; |
860 | qh->ps.ep = urb->ep; | |
1da177e4 LT |
861 | |
862 | /* using TT? */ | |
863 | switch (urb->dev->speed) { | |
864 | case USB_SPEED_LOW: | |
4c53de72 | 865 | info1 |= QH_LOW_SPEED; |
1da177e4 LT |
866 | /* FALL THROUGH */ |
867 | ||
868 | case USB_SPEED_FULL: | |
869 | /* EPS 0 means "full" */ | |
870 | if (type != PIPE_INTERRUPT) | |
871 | info1 |= (EHCI_TUNE_RL_TT << 28); | |
872 | if (type == PIPE_CONTROL) { | |
4c53de72 AS |
873 | info1 |= QH_CONTROL_EP; /* for TT */ |
874 | info1 |= QH_TOGGLE_CTL; /* toggle from qtd */ | |
1da177e4 LT |
875 | } |
876 | info1 |= maxp << 16; | |
877 | ||
878 | info2 |= (EHCI_TUNE_MULT_TT << 30); | |
8cd42e97 KG |
879 | |
880 | /* Some Freescale processors have an erratum in which the | |
881 | * port number in the queue head was 0..N-1 instead of 1..N. | |
882 | */ | |
883 | if (ehci_has_fsl_portno_bug(ehci)) | |
884 | info2 |= (urb->dev->ttport-1) << 23; | |
885 | else | |
886 | info2 |= urb->dev->ttport << 23; | |
1da177e4 LT |
887 | |
888 | /* set the address of the TT; for TDI's integrated | |
889 | * root hub tt, leave it zeroed. | |
890 | */ | |
340ba5f9 DB |
891 | if (tt && tt->hub != ehci_to_hcd(ehci)->self.root_hub) |
892 | info2 |= tt->hub->devnum << 16; | |
1da177e4 LT |
893 | |
894 | /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */ | |
895 | ||
896 | break; | |
897 | ||
898 | case USB_SPEED_HIGH: /* no TT involved */ | |
4c53de72 | 899 | info1 |= QH_HIGH_SPEED; |
1da177e4 LT |
900 | if (type == PIPE_CONTROL) { |
901 | info1 |= (EHCI_TUNE_RL_HS << 28); | |
902 | info1 |= 64 << 16; /* usb2 fixed maxpacket */ | |
4c53de72 | 903 | info1 |= QH_TOGGLE_CTL; /* toggle from qtd */ |
1da177e4 LT |
904 | info2 |= (EHCI_TUNE_MULT_HS << 30); |
905 | } else if (type == PIPE_BULK) { | |
906 | info1 |= (EHCI_TUNE_RL_HS << 28); | |
caa9ef67 DB |
907 | /* The USB spec says that high speed bulk endpoints |
908 | * always use 512 byte maxpacket. But some device | |
909 | * vendors decided to ignore that, and MSFT is happy | |
910 | * to help them do so. So now people expect to use | |
911 | * such nonconformant devices with Linux too; sigh. | |
912 | */ | |
8437ab99 | 913 | info1 |= maxp << 16; |
1da177e4 LT |
914 | info2 |= (EHCI_TUNE_MULT_HS << 30); |
915 | } else { /* PIPE_INTERRUPT */ | |
8437ab99 | 916 | info1 |= maxp << 16; |
e3b89080 | 917 | info2 |= mult << 30; |
1da177e4 LT |
918 | } |
919 | break; | |
920 | default: | |
82491c2a GKH |
921 | ehci_dbg(ehci, "bogus dev %p speed %d\n", urb->dev, |
922 | urb->dev->speed); | |
1da177e4 | 923 | done: |
c83e1a9f | 924 | qh_destroy(ehci, qh); |
1da177e4 LT |
925 | return NULL; |
926 | } | |
927 | ||
928 | /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */ | |
929 | ||
c1fdb68e | 930 | /* init as live, toggle clear */ |
1da177e4 | 931 | qh->qh_state = QH_STATE_IDLE; |
3807e26d AD |
932 | hw = qh->hw; |
933 | hw->hw_info1 = cpu_to_hc32(ehci, info1); | |
934 | hw->hw_info2 = cpu_to_hc32(ehci, info2); | |
e04f5f7e | 935 | qh->is_out = !is_input; |
a455212d | 936 | usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1); |
1da177e4 LT |
937 | return qh; |
938 | } | |
939 | ||
940 | /*-------------------------------------------------------------------------*/ | |
941 | ||
31446610 AS |
942 | static void enable_async(struct ehci_hcd *ehci) |
943 | { | |
944 | if (ehci->async_count++) | |
945 | return; | |
946 | ||
947 | /* Stop waiting to turn off the async schedule */ | |
948 | ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_ASYNC); | |
949 | ||
950 | /* Don't start the schedule until ASS is 0 */ | |
951 | ehci_poll_ASS(ehci); | |
18aafe64 | 952 | turn_on_io_watchdog(ehci); |
31446610 AS |
953 | } |
954 | ||
955 | static void disable_async(struct ehci_hcd *ehci) | |
956 | { | |
957 | if (--ehci->async_count) | |
958 | return; | |
959 | ||
6e018751 AS |
960 | /* The async schedule and unlink lists are supposed to be empty */ |
961 | WARN_ON(ehci->async->qh_next.qh || !list_empty(&ehci->async_unlink) || | |
214ac7a0 | 962 | !list_empty(&ehci->async_idle)); |
31446610 AS |
963 | |
964 | /* Don't turn off the schedule until ASS is 1 */ | |
965 | ehci_poll_ASS(ehci); | |
966 | } | |
967 | ||
1da177e4 LT |
968 | /* move qh (and its qtds) onto async queue; maybe enable queue. */ |
969 | ||
970 | static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh) | |
971 | { | |
6dbd682b | 972 | __hc32 dma = QH_NEXT(ehci, qh->qh_dma); |
1da177e4 LT |
973 | struct ehci_qh *head; |
974 | ||
914b7012 AS |
975 | /* Don't link a QH if there's a Clear-TT-Buffer pending */ |
976 | if (unlikely(qh->clearing_tt)) | |
977 | return; | |
978 | ||
3a44494e AS |
979 | WARN_ON(qh->qh_state != QH_STATE_IDLE); |
980 | ||
a455212d | 981 | /* clear halt and/or toggle; and maybe recover from silicon quirk */ |
3a44494e | 982 | qh_refresh(ehci, qh); |
1da177e4 LT |
983 | |
984 | /* splice right after start */ | |
31446610 | 985 | head = ehci->async; |
1da177e4 | 986 | qh->qh_next = head->qh_next; |
3807e26d | 987 | qh->hw->hw_next = head->hw->hw_next; |
1da177e4 LT |
988 | wmb (); |
989 | ||
990 | head->qh_next.qh = qh; | |
3807e26d | 991 | head->hw->hw_next = dma; |
1da177e4 LT |
992 | |
993 | qh->qh_state = QH_STATE_LINKED; | |
7bc782d7 | 994 | qh->xacterrs = 0; |
fcc5184e | 995 | qh->unlink_reason = 0; |
1da177e4 | 996 | /* qtd completions reported later by interrupt */ |
31446610 AS |
997 | |
998 | enable_async(ehci); | |
1da177e4 LT |
999 | } |
1000 | ||
1001 | /*-------------------------------------------------------------------------*/ | |
1002 | ||
1da177e4 LT |
1003 | /* |
1004 | * For control/bulk/interrupt, return QH with these TDs appended. | |
1005 | * Allocates and initializes the QH if necessary. | |
1006 | * Returns null if it can't allocate a QH it needs to. | |
1007 | * If the QH has TDs (urbs) already, that's great. | |
1008 | */ | |
1009 | static struct ehci_qh *qh_append_tds ( | |
1010 | struct ehci_hcd *ehci, | |
1011 | struct urb *urb, | |
1012 | struct list_head *qtd_list, | |
1013 | int epnum, | |
1014 | void **ptr | |
1015 | ) | |
1016 | { | |
1017 | struct ehci_qh *qh = NULL; | |
fd05e720 | 1018 | __hc32 qh_addr_mask = cpu_to_hc32(ehci, 0x7f); |
1da177e4 LT |
1019 | |
1020 | qh = (struct ehci_qh *) *ptr; | |
1021 | if (unlikely (qh == NULL)) { | |
1022 | /* can't sleep here, we have ehci->lock... */ | |
1023 | qh = qh_make (ehci, urb, GFP_ATOMIC); | |
1024 | *ptr = qh; | |
1025 | } | |
1026 | if (likely (qh != NULL)) { | |
1027 | struct ehci_qtd *qtd; | |
1028 | ||
1029 | if (unlikely (list_empty (qtd_list))) | |
1030 | qtd = NULL; | |
1031 | else | |
1032 | qtd = list_entry (qtd_list->next, struct ehci_qtd, | |
1033 | qtd_list); | |
1034 | ||
1035 | /* control qh may need patching ... */ | |
1036 | if (unlikely (epnum == 0)) { | |
1037 | ||
1038 | /* usb_reset_device() briefly reverts to address 0 */ | |
1039 | if (usb_pipedevice (urb->pipe) == 0) | |
3807e26d | 1040 | qh->hw->hw_info1 &= ~qh_addr_mask; |
1da177e4 LT |
1041 | } |
1042 | ||
1043 | /* just one way to queue requests: swap with the dummy qtd. | |
1044 | * only hc or qh_refresh() ever modify the overlay. | |
1045 | */ | |
1046 | if (likely (qtd != NULL)) { | |
1047 | struct ehci_qtd *dummy; | |
1048 | dma_addr_t dma; | |
6dbd682b | 1049 | __hc32 token; |
1da177e4 LT |
1050 | |
1051 | /* to avoid racing the HC, use the dummy td instead of | |
1052 | * the first td of our list (becomes new dummy). both | |
1053 | * tds stay deactivated until we're done, when the | |
1054 | * HC is allowed to fetch the old dummy (4.10.2). | |
1055 | */ | |
1056 | token = qtd->hw_token; | |
6dbd682b | 1057 | qtd->hw_token = HALT_BIT(ehci); |
41f05ded | 1058 | |
1da177e4 LT |
1059 | dummy = qh->dummy; |
1060 | ||
1061 | dma = dummy->qtd_dma; | |
1062 | *dummy = *qtd; | |
1063 | dummy->qtd_dma = dma; | |
1064 | ||
1065 | list_del (&qtd->qtd_list); | |
1066 | list_add (&dummy->qtd_list, qtd_list); | |
7d283aee | 1067 | list_splice_tail(qtd_list, &qh->qtd_list); |
1da177e4 | 1068 | |
6dbd682b | 1069 | ehci_qtd_init(ehci, qtd, qtd->qtd_dma); |
1da177e4 LT |
1070 | qh->dummy = qtd; |
1071 | ||
1072 | /* hc must see the new dummy at list end */ | |
1073 | dma = qtd->qtd_dma; | |
1074 | qtd = list_entry (qh->qtd_list.prev, | |
1075 | struct ehci_qtd, qtd_list); | |
6dbd682b | 1076 | qtd->hw_next = QTD_NEXT(ehci, dma); |
1da177e4 LT |
1077 | |
1078 | /* let the hc process these next qtds */ | |
1079 | wmb (); | |
1080 | dummy->hw_token = token; | |
1081 | ||
c83e1a9f | 1082 | urb->hcpriv = qh; |
1da177e4 LT |
1083 | } |
1084 | } | |
1085 | return qh; | |
1086 | } | |
1087 | ||
1088 | /*-------------------------------------------------------------------------*/ | |
1089 | ||
1090 | static int | |
1091 | submit_async ( | |
1092 | struct ehci_hcd *ehci, | |
1da177e4 LT |
1093 | struct urb *urb, |
1094 | struct list_head *qtd_list, | |
55016f10 | 1095 | gfp_t mem_flags |
1da177e4 | 1096 | ) { |
1da177e4 LT |
1097 | int epnum; |
1098 | unsigned long flags; | |
1099 | struct ehci_qh *qh = NULL; | |
e9df41c5 | 1100 | int rc; |
1da177e4 | 1101 | |
e9df41c5 | 1102 | epnum = urb->ep->desc.bEndpointAddress; |
1da177e4 LT |
1103 | |
1104 | #ifdef EHCI_URB_TRACE | |
eb34a908 DD |
1105 | { |
1106 | struct ehci_qtd *qtd; | |
1107 | qtd = list_entry(qtd_list->next, struct ehci_qtd, qtd_list); | |
1108 | ehci_dbg(ehci, | |
1109 | "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n", | |
1110 | __func__, urb->dev->devpath, urb, | |
1111 | epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out", | |
1112 | urb->transfer_buffer_length, | |
1113 | qtd, urb->ep->hcpriv); | |
1114 | } | |
1da177e4 LT |
1115 | #endif |
1116 | ||
1117 | spin_lock_irqsave (&ehci->lock, flags); | |
541c7d43 | 1118 | if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) { |
8de98402 BH |
1119 | rc = -ESHUTDOWN; |
1120 | goto done; | |
1121 | } | |
e9df41c5 AS |
1122 | rc = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb); |
1123 | if (unlikely(rc)) | |
1124 | goto done; | |
8de98402 | 1125 | |
e9df41c5 | 1126 | qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv); |
8de98402 | 1127 | if (unlikely(qh == NULL)) { |
e9df41c5 | 1128 | usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb); |
8de98402 BH |
1129 | rc = -ENOMEM; |
1130 | goto done; | |
1131 | } | |
1da177e4 LT |
1132 | |
1133 | /* Control/bulk operations through TTs don't need scheduling, | |
1134 | * the HC and TT handle it when the TT has a buffer ready. | |
1135 | */ | |
8de98402 | 1136 | if (likely (qh->qh_state == QH_STATE_IDLE)) |
7a0f0d95 | 1137 | qh_link_async(ehci, qh); |
8de98402 | 1138 | done: |
1da177e4 | 1139 | spin_unlock_irqrestore (&ehci->lock, flags); |
8de98402 | 1140 | if (unlikely (qh == NULL)) |
1da177e4 | 1141 | qtd_list_free (ehci, urb, qtd_list); |
8de98402 | 1142 | return rc; |
1da177e4 LT |
1143 | } |
1144 | ||
1145 | /*-------------------------------------------------------------------------*/ | |
726a85ca | 1146 | #ifdef CONFIG_USB_HCD_TEST_MODE |
9841f37a MG |
1147 | /* |
1148 | * This function creates the qtds and submits them for the | |
1149 | * SINGLE_STEP_SET_FEATURE Test. | |
1150 | * This is done in two parts: first SETUP req for GetDesc is sent then | |
1151 | * 15 seconds later, the IN stage for GetDesc starts to req data from dev | |
1152 | * | |
1153 | * is_setup : i/p arguement decides which of the two stage needs to be | |
1154 | * performed; TRUE - SETUP and FALSE - IN+STATUS | |
1155 | * Returns 0 if success | |
1156 | */ | |
1157 | static int submit_single_step_set_feature( | |
1158 | struct usb_hcd *hcd, | |
1159 | struct urb *urb, | |
1160 | int is_setup | |
1161 | ) { | |
1162 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); | |
1163 | struct list_head qtd_list; | |
1164 | struct list_head *head; | |
1165 | ||
1166 | struct ehci_qtd *qtd, *qtd_prev; | |
1167 | dma_addr_t buf; | |
1168 | int len, maxpacket; | |
1169 | u32 token; | |
1170 | ||
1171 | INIT_LIST_HEAD(&qtd_list); | |
1172 | head = &qtd_list; | |
1173 | ||
1174 | /* URBs map to sequences of QTDs: one logical transaction */ | |
1175 | qtd = ehci_qtd_alloc(ehci, GFP_KERNEL); | |
1176 | if (unlikely(!qtd)) | |
1177 | return -1; | |
1178 | list_add_tail(&qtd->qtd_list, head); | |
1179 | qtd->urb = urb; | |
1180 | ||
1181 | token = QTD_STS_ACTIVE; | |
1182 | token |= (EHCI_TUNE_CERR << 10); | |
1183 | ||
1184 | len = urb->transfer_buffer_length; | |
1185 | /* | |
1186 | * Check if the request is to perform just the SETUP stage (getDesc) | |
1187 | * as in SINGLE_STEP_SET_FEATURE test, DATA stage (IN) happens | |
1188 | * 15 secs after the setup | |
1189 | */ | |
1190 | if (is_setup) { | |
1191 | /* SETUP pid */ | |
1192 | qtd_fill(ehci, qtd, urb->setup_dma, | |
1193 | sizeof(struct usb_ctrlrequest), | |
1194 | token | (2 /* "setup" */ << 8), 8); | |
1195 | ||
1196 | submit_async(ehci, urb, &qtd_list, GFP_ATOMIC); | |
1197 | return 0; /*Return now; we shall come back after 15 seconds*/ | |
1198 | } | |
1199 | ||
1200 | /* | |
1201 | * IN: data transfer stage: buffer setup : start the IN txn phase for | |
1202 | * the get_Desc SETUP which was sent 15seconds back | |
1203 | */ | |
1204 | token ^= QTD_TOGGLE; /*We need to start IN with DATA-1 Pid-sequence*/ | |
1205 | buf = urb->transfer_dma; | |
1206 | ||
1207 | token |= (1 /* "in" */ << 8); /*This is IN stage*/ | |
1208 | ||
8437ab99 | 1209 | maxpacket = usb_maxpacket(urb->dev, urb->pipe, 0); |
9841f37a MG |
1210 | |
1211 | qtd_fill(ehci, qtd, buf, len, token, maxpacket); | |
1212 | ||
1213 | /* | |
1214 | * Our IN phase shall always be a short read; so keep the queue running | |
1215 | * and let it advance to the next qtd which zero length OUT status | |
1216 | */ | |
1217 | qtd->hw_alt_next = EHCI_LIST_END(ehci); | |
1218 | ||
1219 | /* STATUS stage for GetDesc control request */ | |
1220 | token ^= 0x0100; /* "in" <--> "out" */ | |
1221 | token |= QTD_TOGGLE; /* force DATA1 */ | |
1222 | ||
1223 | qtd_prev = qtd; | |
1224 | qtd = ehci_qtd_alloc(ehci, GFP_ATOMIC); | |
1225 | if (unlikely(!qtd)) | |
1226 | goto cleanup; | |
1227 | qtd->urb = urb; | |
1228 | qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); | |
1229 | list_add_tail(&qtd->qtd_list, head); | |
1230 | ||
1231 | /* dont fill any data in such packets */ | |
1232 | qtd_fill(ehci, qtd, 0, 0, token, 0); | |
1233 | ||
1234 | /* by default, enable interrupt on urb completion */ | |
1235 | if (likely(!(urb->transfer_flags & URB_NO_INTERRUPT))) | |
1236 | qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC); | |
1237 | ||
1238 | submit_async(ehci, urb, &qtd_list, GFP_KERNEL); | |
1239 | ||
1240 | return 0; | |
1241 | ||
1242 | cleanup: | |
1243 | qtd_list_free(ehci, urb, head); | |
1244 | return -1; | |
1245 | } | |
726a85ca | 1246 | #endif /* CONFIG_USB_HCD_TEST_MODE */ |
9841f37a MG |
1247 | |
1248 | /*-------------------------------------------------------------------------*/ | |
1da177e4 | 1249 | |
3c273a05 | 1250 | static void single_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh) |
1da177e4 | 1251 | { |
3c273a05 | 1252 | struct ehci_qh *prev; |
1da177e4 | 1253 | |
3c273a05 | 1254 | /* Add to the end of the list of QHs waiting for the next IAAD */ |
6402c796 | 1255 | qh->qh_state = QH_STATE_UNLINK_WAIT; |
6e018751 | 1256 | list_add_tail(&qh->unlink_node, &ehci->async_unlink); |
3c273a05 AS |
1257 | |
1258 | /* Unlink it from the schedule */ | |
1259 | prev = ehci->async; | |
1260 | while (prev->qh_next.qh != qh) | |
1261 | prev = prev->qh_next.qh; | |
1262 | ||
1263 | prev->hw->hw_next = qh->hw->hw_next; | |
1264 | prev->qh_next = qh->qh_next; | |
1265 | if (ehci->qh_scan_next == qh) | |
1266 | ehci->qh_scan_next = qh->qh_next.qh; | |
1267 | } | |
1da177e4 | 1268 | |
214ac7a0 | 1269 | static void start_iaa_cycle(struct ehci_hcd *ehci) |
3c273a05 | 1270 | { |
3c273a05 AS |
1271 | /* If the controller isn't running, we don't have to wait for it */ |
1272 | if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) { | |
214ac7a0 | 1273 | end_unlink_async(ehci); |
31446610 | 1274 | |
f96fba0d AS |
1275 | /* Otherwise start a new IAA cycle if one isn't already running */ |
1276 | } else if (ehci->rh_state == EHCI_RH_RUNNING && | |
1277 | !ehci->iaa_in_progress) { | |
6e0c3339 | 1278 | |
3c273a05 AS |
1279 | /* Make sure the unlinks are all visible to the hardware */ |
1280 | wmb(); | |
1da177e4 | 1281 | |
3c273a05 AS |
1282 | ehci_writel(ehci, ehci->command | CMD_IAAD, |
1283 | &ehci->regs->command); | |
1284 | ehci_readl(ehci, &ehci->regs->command); | |
f96fba0d | 1285 | ehci->iaa_in_progress = true; |
3c273a05 | 1286 | ehci_enable_event(ehci, EHCI_HRTIMER_IAA_WATCHDOG, true); |
1da177e4 | 1287 | } |
3c273a05 AS |
1288 | } |
1289 | ||
f96fba0d | 1290 | static void end_iaa_cycle(struct ehci_hcd *ehci) |
3c273a05 | 1291 | { |
2f7ac6c1 GJ |
1292 | if (ehci->has_synopsys_hc_bug) |
1293 | ehci_writel(ehci, (u32) ehci->async->qh_dma, | |
1294 | &ehci->regs->async_next); | |
3c273a05 | 1295 | |
214ac7a0 AS |
1296 | /* The current IAA cycle has ended */ |
1297 | ehci->iaa_in_progress = false; | |
1298 | ||
f96fba0d AS |
1299 | end_unlink_async(ehci); |
1300 | } | |
1301 | ||
1302 | /* See if the async qh for the qtds being unlinked are now gone from the HC */ | |
1303 | ||
1304 | static void end_unlink_async(struct ehci_hcd *ehci) | |
1305 | { | |
1306 | struct ehci_qh *qh; | |
1307 | bool early_exit; | |
1308 | ||
214ac7a0 AS |
1309 | if (list_empty(&ehci->async_unlink)) |
1310 | return; | |
1311 | qh = list_first_entry(&ehci->async_unlink, struct ehci_qh, | |
1312 | unlink_node); /* QH whose IAA cycle just ended */ | |
1313 | ||
1314 | /* | |
1315 | * If async_unlinking is set then this routine is already running, | |
1316 | * either on the stack or on another CPU. | |
1317 | */ | |
1318 | early_exit = ehci->async_unlinking; | |
1319 | ||
1320 | /* If the controller isn't running, process all the waiting QHs */ | |
1321 | if (ehci->rh_state < EHCI_RH_RUNNING) | |
1322 | list_splice_tail_init(&ehci->async_unlink, &ehci->async_idle); | |
1323 | ||
1324 | /* | |
1325 | * Intel (?) bug: The HC can write back the overlay region even | |
1326 | * after the IAA interrupt occurs. In self-defense, always go | |
1327 | * through two IAA cycles for each QH. | |
1328 | */ | |
87d61912 AS |
1329 | else if (qh->qh_state == QH_STATE_UNLINK) { |
1330 | /* | |
1331 | * Second IAA cycle has finished. Process only the first | |
1332 | * waiting QH (NVIDIA (?) bug). | |
1333 | */ | |
1334 | list_move_tail(&qh->unlink_node, &ehci->async_idle); | |
1335 | } | |
1336 | ||
1337 | /* | |
1338 | * AMD/ATI (?) bug: The HC can continue to use an active QH long | |
1339 | * after the IAA interrupt occurs. To prevent problems, QHs that | |
1340 | * may still be active will wait until 2 ms have passed with no | |
1341 | * change to the hw_current and hw_token fields (this delay occurs | |
1342 | * between the two IAA cycles). | |
1343 | * | |
1344 | * The EHCI spec (4.8.2) says that active QHs must not be removed | |
1345 | * from the async schedule and recommends waiting until the QH | |
1346 | * goes inactive. This is ridiculous because the QH will _never_ | |
1347 | * become inactive if the endpoint NAKs indefinitely. | |
1348 | */ | |
1349 | ||
1350 | /* Some reasons for unlinking guarantee the QH can't be active */ | |
1351 | else if (qh->unlink_reason & (QH_UNLINK_HALTED | | |
1352 | QH_UNLINK_SHORT_READ | QH_UNLINK_DUMMY_OVERLAY)) | |
1353 | goto DelayDone; | |
1354 | ||
1355 | /* The QH can't be active if the queue was and still is empty... */ | |
1356 | else if ((qh->unlink_reason & QH_UNLINK_QUEUE_EMPTY) && | |
1357 | list_empty(&qh->qtd_list)) | |
1358 | goto DelayDone; | |
1359 | ||
1360 | /* ... or if the QH has halted */ | |
1361 | else if (qh->hw->hw_token & cpu_to_hc32(ehci, QTD_STS_HALT)) | |
1362 | goto DelayDone; | |
1363 | ||
1364 | /* Otherwise we have to wait until the QH stops changing */ | |
1365 | else { | |
1366 | __hc32 qh_current, qh_token; | |
1367 | ||
1368 | qh_current = qh->hw->hw_current; | |
1369 | qh_token = qh->hw->hw_token; | |
1370 | if (qh_current != ehci->old_current || | |
1371 | qh_token != ehci->old_token) { | |
1372 | ehci->old_current = qh_current; | |
1373 | ehci->old_token = qh_token; | |
1374 | ehci_enable_event(ehci, | |
1375 | EHCI_HRTIMER_ACTIVE_UNLINK, true); | |
1376 | return; | |
1377 | } | |
1378 | DelayDone: | |
214ac7a0 AS |
1379 | qh->qh_state = QH_STATE_UNLINK; |
1380 | early_exit = true; | |
1381 | } | |
87d61912 | 1382 | ehci->old_current = ~0; /* Prepare for next QH */ |
214ac7a0 AS |
1383 | |
1384 | /* Start a new IAA cycle if any QHs are waiting for it */ | |
1385 | if (!list_empty(&ehci->async_unlink)) | |
1386 | start_iaa_cycle(ehci); | |
1387 | ||
1388 | /* | |
1389 | * Don't allow nesting or concurrent calls, | |
1390 | * or wait for the second IAA cycle for the next QH. | |
1391 | */ | |
1392 | if (early_exit) | |
1393 | return; | |
1394 | ||
3c273a05 | 1395 | /* Process the idle QHs */ |
3c273a05 | 1396 | ehci->async_unlinking = true; |
214ac7a0 AS |
1397 | while (!list_empty(&ehci->async_idle)) { |
1398 | qh = list_first_entry(&ehci->async_idle, struct ehci_qh, | |
6e018751 AS |
1399 | unlink_node); |
1400 | list_del(&qh->unlink_node); | |
3c273a05 AS |
1401 | |
1402 | qh->qh_state = QH_STATE_IDLE; | |
1403 | qh->qh_next.qh = NULL; | |
1404 | ||
79bcf7b0 AS |
1405 | if (!list_empty(&qh->qtd_list)) |
1406 | qh_completions(ehci, qh); | |
3c273a05 AS |
1407 | if (!list_empty(&qh->qtd_list) && |
1408 | ehci->rh_state == EHCI_RH_RUNNING) | |
1409 | qh_link_async(ehci, qh); | |
1410 | disable_async(ehci); | |
1411 | } | |
1412 | ehci->async_unlinking = false; | |
1da177e4 LT |
1413 | } |
1414 | ||
6e0c3339 AS |
1415 | static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh); |
1416 | ||
32830f20 AS |
1417 | static void unlink_empty_async(struct ehci_hcd *ehci) |
1418 | { | |
6e0c3339 AS |
1419 | struct ehci_qh *qh; |
1420 | struct ehci_qh *qh_to_unlink = NULL; | |
6e0c3339 | 1421 | int count = 0; |
32830f20 | 1422 | |
6e0c3339 AS |
1423 | /* Find the last async QH which has been empty for a timer cycle */ |
1424 | for (qh = ehci->async->qh_next.qh; qh; qh = qh->qh_next.qh) { | |
32830f20 AS |
1425 | if (list_empty(&qh->qtd_list) && |
1426 | qh->qh_state == QH_STATE_LINKED) { | |
6e0c3339 | 1427 | ++count; |
afc2c9a2 | 1428 | if (qh->unlink_cycle != ehci->async_unlink_cycle) |
6e0c3339 | 1429 | qh_to_unlink = qh; |
32830f20 AS |
1430 | } |
1431 | } | |
1432 | ||
6e0c3339 | 1433 | /* If nothing else is being unlinked, unlink the last empty QH */ |
214ac7a0 | 1434 | if (list_empty(&ehci->async_unlink) && qh_to_unlink) { |
fcc5184e | 1435 | qh_to_unlink->unlink_reason |= QH_UNLINK_QUEUE_EMPTY; |
6e0c3339 AS |
1436 | start_unlink_async(ehci, qh_to_unlink); |
1437 | --count; | |
1438 | } | |
32830f20 | 1439 | |
6e0c3339 AS |
1440 | /* Other QHs will be handled later */ |
1441 | if (count > 0) { | |
32830f20 AS |
1442 | ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true); |
1443 | ++ehci->async_unlink_cycle; | |
1444 | } | |
1445 | } | |
1446 | ||
8df0d77d AS |
1447 | #ifdef CONFIG_PM |
1448 | ||
2a40f324 | 1449 | /* The root hub is suspended; unlink all the async QHs */ |
8df0d77d | 1450 | static void unlink_empty_async_suspended(struct ehci_hcd *ehci) |
2a40f324 AS |
1451 | { |
1452 | struct ehci_qh *qh; | |
1453 | ||
1454 | while (ehci->async->qh_next.qh) { | |
1455 | qh = ehci->async->qh_next.qh; | |
1456 | WARN_ON(!list_empty(&qh->qtd_list)); | |
1457 | single_unlink_async(ehci, qh); | |
1458 | } | |
2a40f324 AS |
1459 | } |
1460 | ||
8df0d77d AS |
1461 | #endif |
1462 | ||
1da177e4 LT |
1463 | /* makes sure the async qh will become idle */ |
1464 | /* caller must own ehci->lock */ | |
1465 | ||
3c273a05 | 1466 | static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh) |
1da177e4 | 1467 | { |
7bc782d7 AS |
1468 | /* If the QH isn't linked then there's nothing we can do. */ |
1469 | if (qh->qh_state != QH_STATE_LINKED) | |
1da177e4 | 1470 | return; |
1da177e4 | 1471 | |
3c273a05 | 1472 | single_unlink_async(ehci, qh); |
214ac7a0 | 1473 | start_iaa_cycle(ehci); |
1da177e4 LT |
1474 | } |
1475 | ||
1476 | /*-------------------------------------------------------------------------*/ | |
1477 | ||
7d12e780 | 1478 | static void scan_async (struct ehci_hcd *ehci) |
1da177e4 LT |
1479 | { |
1480 | struct ehci_qh *qh; | |
32830f20 | 1481 | bool check_unlinks_later = false; |
1da177e4 | 1482 | |
004c1968 AS |
1483 | ehci->qh_scan_next = ehci->async->qh_next.qh; |
1484 | while (ehci->qh_scan_next) { | |
1485 | qh = ehci->qh_scan_next; | |
1486 | ehci->qh_scan_next = qh->qh_next.qh; | |
79bcf7b0 | 1487 | |
004c1968 AS |
1488 | /* clean any finished work for this qh */ |
1489 | if (!list_empty(&qh->qtd_list)) { | |
1490 | int temp; | |
1491 | ||
1492 | /* | |
1493 | * Unlinks could happen here; completion reporting | |
1494 | * drops the lock. That's why ehci->qh_scan_next | |
1495 | * always holds the next qh to scan; if the next qh | |
1496 | * gets unlinked then ehci->qh_scan_next is adjusted | |
3c273a05 | 1497 | * in single_unlink_async(). |
1da177e4 | 1498 | */ |
004c1968 | 1499 | temp = qh_completions(ehci, qh); |
79bcf7b0 | 1500 | if (unlikely(temp)) { |
3c273a05 | 1501 | start_unlink_async(ehci, qh); |
32830f20 AS |
1502 | } else if (list_empty(&qh->qtd_list) |
1503 | && qh->qh_state == QH_STATE_LINKED) { | |
1504 | qh->unlink_cycle = ehci->async_unlink_cycle; | |
1505 | check_unlinks_later = true; | |
79bcf7b0 | 1506 | } |
004c1968 | 1507 | } |
32830f20 | 1508 | } |
1da177e4 | 1509 | |
32830f20 AS |
1510 | /* |
1511 | * Unlink empty entries, reducing DMA usage as well | |
1512 | * as HCD schedule-scanning costs. Delay for any qh | |
1513 | * we just scanned, there's a not-unusual case that it | |
1514 | * doesn't stay idle for long. | |
1515 | */ | |
1516 | if (check_unlinks_later && ehci->rh_state == EHCI_RH_RUNNING && | |
1517 | !(ehci->enabled_hrtimer_events & | |
1518 | BIT(EHCI_HRTIMER_ASYNC_UNLINKS))) { | |
1519 | ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true); | |
1520 | ++ehci->async_unlink_cycle; | |
1da177e4 | 1521 | } |
1da177e4 | 1522 | } |