]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/usb/host/xhci-ring.c
xhci: remove extra URB_SHORT_NOT_OK checks in xhci, core handles most cases
[mirror_ubuntu-jammy-kernel.git] / drivers / usb / host / xhci-ring.c
CommitLineData
7f84eef0
SS
1/*
2 * xHCI host controller driver
3 *
4 * Copyright (C) 2008 Intel Corp.
5 *
6 * Author: Sarah Sharp
7 * Some code borrowed from the Linux EHCI driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23/*
24 * Ring initialization rules:
25 * 1. Each segment is initialized to zero, except for link TRBs.
26 * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or
27 * Consumer Cycle State (CCS), depending on ring function.
28 * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
29 *
30 * Ring behavior rules:
31 * 1. A ring is empty if enqueue == dequeue. This means there will always be at
32 * least one free TRB in the ring. This is useful if you want to turn that
33 * into a link TRB and expand the ring.
34 * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
35 * link TRB, then load the pointer with the address in the link TRB. If the
36 * link TRB had its toggle bit set, you may need to update the ring cycle
37 * state (see cycle bit rules). You may have to do this multiple times
38 * until you reach a non-link TRB.
39 * 3. A ring is full if enqueue++ (for the definition of increment above)
40 * equals the dequeue pointer.
41 *
42 * Cycle bit rules:
43 * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
44 * in a link TRB, it must toggle the ring cycle state.
45 * 2. When a producer increments an enqueue pointer and encounters a toggle bit
46 * in a link TRB, it must toggle the ring cycle state.
47 *
48 * Producer rules:
49 * 1. Check if ring is full before you enqueue.
50 * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
51 * Update enqueue pointer between each write (which may update the ring
52 * cycle state).
53 * 3. Notify consumer. If SW is producer, it rings the doorbell for command
54 * and endpoint rings. If HC is the producer for the event ring,
55 * and it generates an interrupt according to interrupt modulation rules.
56 *
57 * Consumer rules:
58 * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state,
59 * the TRB is owned by the consumer.
60 * 2. Update dequeue pointer (which may update the ring cycle state) and
61 * continue processing TRBs until you reach a TRB which is not owned by you.
62 * 3. Notify the producer. SW is the consumer for the event ring, and it
63 * updates event ring dequeue pointer. HC is the consumer for the command and
64 * endpoint rings; it generates events on the event ring for these.
65 */
66
8a96c052 67#include <linux/scatterlist.h>
5a0e3ad6 68#include <linux/slab.h>
f9c589e1 69#include <linux/dma-mapping.h>
7f84eef0 70#include "xhci.h"
3a7fa5be 71#include "xhci-trace.h"
0cbd4b34 72#include "xhci-mtk.h"
7f84eef0
SS
73
74/*
75 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
76 * address of the TRB.
77 */
23e3be11 78dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
7f84eef0
SS
79 union xhci_trb *trb)
80{
6071d836 81 unsigned long segment_offset;
7f84eef0 82
6071d836 83 if (!seg || !trb || trb < seg->trbs)
7f84eef0 84 return 0;
6071d836
SS
85 /* offset in TRBs */
86 segment_offset = trb - seg->trbs;
7895086a 87 if (segment_offset >= TRBS_PER_SEGMENT)
7f84eef0 88 return 0;
6071d836 89 return seg->dma + (segment_offset * sizeof(*trb));
7f84eef0
SS
90}
91
0ce57499
MN
92static bool trb_is_noop(union xhci_trb *trb)
93{
94 return TRB_TYPE_NOOP_LE32(trb->generic.field[3]);
95}
96
2d98ef40
MN
97static bool trb_is_link(union xhci_trb *trb)
98{
99 return TRB_TYPE_LINK_LE32(trb->link.control);
100}
101
bd5e67f5
MN
102static bool last_trb_on_seg(struct xhci_segment *seg, union xhci_trb *trb)
103{
104 return trb == &seg->trbs[TRBS_PER_SEGMENT - 1];
105}
106
107static bool last_trb_on_ring(struct xhci_ring *ring,
108 struct xhci_segment *seg, union xhci_trb *trb)
109{
110 return last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg);
111}
112
d0c77d84
MN
113static bool link_trb_toggles_cycle(union xhci_trb *trb)
114{
115 return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
116}
117
ae636747
SS
118/* Updates trb to point to the next TRB in the ring, and updates seg if the next
119 * TRB is in a new segment. This does not skip over link TRBs, and it does not
120 * effect the ring dequeue or enqueue pointers.
121 */
122static void next_trb(struct xhci_hcd *xhci,
123 struct xhci_ring *ring,
124 struct xhci_segment **seg,
125 union xhci_trb **trb)
126{
2d98ef40 127 if (trb_is_link(*trb)) {
ae636747
SS
128 *seg = (*seg)->next;
129 *trb = ((*seg)->trbs);
130 } else {
a1669b2c 131 (*trb)++;
ae636747
SS
132 }
133}
134
7f84eef0
SS
135/*
136 * See Cycle bit rules. SW is the consumer for the event ring only.
137 * Don't make a ring full of link TRBs. That would be dumb and this would loop.
138 */
3b72fca0 139static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
7f84eef0 140{
7f84eef0 141 ring->deq_updates++;
b008df60 142
bd5e67f5
MN
143 /* event ring doesn't have link trbs, check for last trb */
144 if (ring->type == TYPE_EVENT) {
145 if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
50d0206f 146 ring->dequeue++;
bd5e67f5 147 return;
7f84eef0 148 }
bd5e67f5
MN
149 if (last_trb_on_ring(ring, ring->deq_seg, ring->dequeue))
150 ring->cycle_state ^= 1;
151 ring->deq_seg = ring->deq_seg->next;
152 ring->dequeue = ring->deq_seg->trbs;
153 return;
154 }
155
156 /* All other rings have link trbs */
157 if (!trb_is_link(ring->dequeue)) {
158 ring->dequeue++;
159 ring->num_trbs_free++;
160 }
161 while (trb_is_link(ring->dequeue)) {
162 ring->deq_seg = ring->deq_seg->next;
163 ring->dequeue = ring->deq_seg->trbs;
164 }
165 return;
7f84eef0
SS
166}
167
168/*
169 * See Cycle bit rules. SW is the consumer for the event ring only.
170 * Don't make a ring full of link TRBs. That would be dumb and this would loop.
171 *
172 * If we've just enqueued a TRB that is in the middle of a TD (meaning the
173 * chain bit is set), then set the chain bit in all the following link TRBs.
174 * If we've enqueued the last TRB in a TD, make sure the following link TRBs
175 * have their chain bit cleared (so that each Link TRB is a separate TD).
176 *
177 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
b0567b3f
SS
178 * set, but other sections talk about dealing with the chain bit set. This was
179 * fixed in the 0.96 specification errata, but we have to assume that all 0.95
180 * xHCI hardware can't handle the chain bit being cleared on a link TRB.
6cc30d85
SS
181 *
182 * @more_trbs_coming: Will you enqueue more TRBs before calling
183 * prepare_transfer()?
7f84eef0 184 */
6cc30d85 185static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
3b72fca0 186 bool more_trbs_coming)
7f84eef0
SS
187{
188 u32 chain;
189 union xhci_trb *next;
190
28ccd296 191 chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
b008df60 192 /* If this is not event ring, there is one less usable TRB */
2d98ef40 193 if (!trb_is_link(ring->enqueue))
b008df60 194 ring->num_trbs_free--;
7f84eef0
SS
195 next = ++(ring->enqueue);
196
197 ring->enq_updates++;
2251198b 198 /* Update the dequeue pointer further if that was a link TRB */
2d98ef40 199 while (trb_is_link(next)) {
6cc30d85 200
2251198b
MN
201 /*
202 * If the caller doesn't plan on enqueueing more TDs before
203 * ringing the doorbell, then we don't want to give the link TRB
204 * to the hardware just yet. We'll give the link TRB back in
205 * prepare_ring() just before we enqueue the TD at the top of
206 * the ring.
207 */
208 if (!chain && !more_trbs_coming)
209 break;
3b72fca0 210
2251198b
MN
211 /* If we're not dealing with 0.95 hardware or isoc rings on
212 * AMD 0.96 host, carry over the chain bit of the previous TRB
213 * (which may mean the chain bit is cleared).
214 */
215 if (!(ring->type == TYPE_ISOC &&
216 (xhci->quirks & XHCI_AMD_0x96_HOST)) &&
217 !xhci_link_trb_quirk(xhci)) {
218 next->link.control &= cpu_to_le32(~TRB_CHAIN);
219 next->link.control |= cpu_to_le32(chain);
7f84eef0 220 }
2251198b
MN
221 /* Give this link TRB to the hardware */
222 wmb();
223 next->link.control ^= cpu_to_le32(TRB_CYCLE);
224
225 /* Toggle the cycle bit after the last ring segment. */
d0c77d84 226 if (link_trb_toggles_cycle(next))
2251198b
MN
227 ring->cycle_state ^= 1;
228
7f84eef0
SS
229 ring->enq_seg = ring->enq_seg->next;
230 ring->enqueue = ring->enq_seg->trbs;
231 next = ring->enqueue;
232 }
233}
234
235/*
085deb16
AX
236 * Check to see if there's room to enqueue num_trbs on the ring and make sure
237 * enqueue pointer will not advance into dequeue segment. See rules above.
7f84eef0 238 */
b008df60 239static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
7f84eef0
SS
240 unsigned int num_trbs)
241{
085deb16 242 int num_trbs_in_deq_seg;
b008df60 243
085deb16
AX
244 if (ring->num_trbs_free < num_trbs)
245 return 0;
246
247 if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
248 num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
249 if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
250 return 0;
251 }
252
253 return 1;
7f84eef0
SS
254}
255
7f84eef0 256/* Ring the host controller doorbell after placing a command on the ring */
23e3be11 257void xhci_ring_cmd_db(struct xhci_hcd *xhci)
7f84eef0 258{
c181bc5b
EF
259 if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
260 return;
261
7f84eef0 262 xhci_dbg(xhci, "// Ding dong!\n");
204b7793 263 writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]);
7f84eef0 264 /* Flush PCI posted writes */
b0ba9720 265 readl(&xhci->dba->doorbell[0]);
7f84eef0
SS
266}
267
b92cc66c
EF
268static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
269{
270 u64 temp_64;
271 int ret;
272
273 xhci_dbg(xhci, "Abort command ring\n");
274
f7b2e403 275 temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
b92cc66c 276 xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
3425aa03
MN
277
278 /*
279 * Writing the CMD_RING_ABORT bit should cause a cmd completion event,
280 * however on some host hw the CMD_RING_RUNNING bit is correctly cleared
281 * but the completion event in never sent. Use the cmd timeout timer to
282 * handle those cases. Use twice the time to cover the bit polling retry
283 */
284 mod_timer(&xhci->cmd_timer, jiffies + (2 * XHCI_CMD_DEFAULT_TIMEOUT));
477632df
SS
285 xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
286 &xhci->op_regs->cmd_ring);
b92cc66c
EF
287
288 /* Section 4.6.1.2 of xHCI 1.0 spec says software should
289 * time the completion od all xHCI commands, including
290 * the Command Abort operation. If software doesn't see
291 * CRR negated in a timely manner (e.g. longer than 5
292 * seconds), then it should assume that the there are
293 * larger problems with the xHC and assert HCRST.
294 */
dc0b177c 295 ret = xhci_handshake(&xhci->op_regs->cmd_ring,
b92cc66c
EF
296 CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
297 if (ret < 0) {
a6809ffd
MN
298 /* we are about to kill xhci, give it one more chance */
299 xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
300 &xhci->op_regs->cmd_ring);
301 udelay(1000);
302 ret = xhci_handshake(&xhci->op_regs->cmd_ring,
303 CMD_RING_RUNNING, 0, 3 * 1000 * 1000);
304 if (ret == 0)
305 return 0;
306
b92cc66c
EF
307 xhci_err(xhci, "Stopped the command ring failed, "
308 "maybe the host is dead\n");
3425aa03 309 del_timer(&xhci->cmd_timer);
b92cc66c 310 xhci->xhc_state |= XHCI_STATE_DYING;
b92cc66c
EF
311 xhci_halt(xhci);
312 return -ESHUTDOWN;
313 }
314
315 return 0;
316}
317
be88fe4f 318void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
ae636747 319 unsigned int slot_id,
e9df17eb
SS
320 unsigned int ep_index,
321 unsigned int stream_id)
ae636747 322{
28ccd296 323 __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
50d64676
MW
324 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
325 unsigned int ep_state = ep->ep_state;
ae636747 326
ae636747 327 /* Don't ring the doorbell for this endpoint if there are pending
50d64676 328 * cancellations because we don't want to interrupt processing.
8df75f42
SS
329 * We don't want to restart any stream rings if there's a set dequeue
330 * pointer command pending because the device can choose to start any
331 * stream once the endpoint is on the HW schedule.
ae636747 332 */
50d64676
MW
333 if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) ||
334 (ep_state & EP_HALTED))
335 return;
204b7793 336 writel(DB_VALUE(ep_index, stream_id), db_addr);
50d64676
MW
337 /* The CPU has better things to do at this point than wait for a
338 * write-posting flush. It'll get there soon enough.
339 */
ae636747
SS
340}
341
e9df17eb
SS
342/* Ring the doorbell for any rings with pending URBs */
343static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
344 unsigned int slot_id,
345 unsigned int ep_index)
346{
347 unsigned int stream_id;
348 struct xhci_virt_ep *ep;
349
350 ep = &xhci->devs[slot_id]->eps[ep_index];
351
352 /* A ring has pending URBs if its TD list is not empty */
353 if (!(ep->ep_state & EP_HAS_STREAMS)) {
d66eaf9f 354 if (ep->ring && !(list_empty(&ep->ring->td_list)))
be88fe4f 355 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
e9df17eb
SS
356 return;
357 }
358
359 for (stream_id = 1; stream_id < ep->stream_info->num_streams;
360 stream_id++) {
361 struct xhci_stream_info *stream_info = ep->stream_info;
362 if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
be88fe4f
AX
363 xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
364 stream_id);
e9df17eb
SS
365 }
366}
367
75b040ec
AI
368/* Get the right ring for the given slot_id, ep_index and stream_id.
369 * If the endpoint supports streams, boundary check the URB's stream ID.
370 * If the endpoint doesn't support streams, return the singular endpoint ring.
371 */
372struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
021bff91
SS
373 unsigned int slot_id, unsigned int ep_index,
374 unsigned int stream_id)
375{
376 struct xhci_virt_ep *ep;
377
378 ep = &xhci->devs[slot_id]->eps[ep_index];
379 /* Common case: no streams */
380 if (!(ep->ep_state & EP_HAS_STREAMS))
381 return ep->ring;
382
383 if (stream_id == 0) {
384 xhci_warn(xhci,
385 "WARN: Slot ID %u, ep index %u has streams, "
386 "but URB has no stream ID.\n",
387 slot_id, ep_index);
388 return NULL;
389 }
390
391 if (stream_id < ep->stream_info->num_streams)
392 return ep->stream_info->stream_rings[stream_id];
393
394 xhci_warn(xhci,
395 "WARN: Slot ID %u, ep index %u has "
396 "stream IDs 1 to %u allocated, "
397 "but stream ID %u is requested.\n",
398 slot_id, ep_index,
399 ep->stream_info->num_streams - 1,
400 stream_id);
401 return NULL;
402}
403
ae636747
SS
404/*
405 * Move the xHC's endpoint ring dequeue pointer past cur_td.
406 * Record the new state of the xHC's endpoint ring dequeue segment,
407 * dequeue pointer, and new consumer cycle state in state.
408 * Update our internal representation of the ring's dequeue pointer.
409 *
410 * We do this in three jumps:
411 * - First we update our new ring state to be the same as when the xHC stopped.
412 * - Then we traverse the ring to find the segment that contains
413 * the last TRB in the TD. We toggle the xHC's new cycle state when we pass
414 * any link TRBs with the toggle cycle bit set.
415 * - Finally we move the dequeue state one TRB further, toggling the cycle bit
416 * if we've moved it past a link TRB with the toggle cycle bit set.
28ccd296
ME
417 *
418 * Some of the uses of xhci_generic_trb are grotty, but if they're done
419 * with correct __le32 accesses they should work fine. Only users of this are
420 * in here.
ae636747 421 */
c92bcfa7 422void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
ae636747 423 unsigned int slot_id, unsigned int ep_index,
e9df17eb
SS
424 unsigned int stream_id, struct xhci_td *cur_td,
425 struct xhci_dequeue_state *state)
ae636747
SS
426{
427 struct xhci_virt_device *dev = xhci->devs[slot_id];
c4bedb77 428 struct xhci_virt_ep *ep = &dev->eps[ep_index];
e9df17eb 429 struct xhci_ring *ep_ring;
365038d8
MN
430 struct xhci_segment *new_seg;
431 union xhci_trb *new_deq;
c92bcfa7 432 dma_addr_t addr;
1f81b6d2 433 u64 hw_dequeue;
365038d8
MN
434 bool cycle_found = false;
435 bool td_last_trb_found = false;
ae636747 436
e9df17eb
SS
437 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
438 ep_index, stream_id);
439 if (!ep_ring) {
440 xhci_warn(xhci, "WARN can't find new dequeue state "
441 "for invalid stream ID %u.\n",
442 stream_id);
443 return;
444 }
68e41c5d 445
ae636747 446 /* Dig out the cycle state saved by the xHC during the stop ep cmd */
aa50b290
XR
447 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
448 "Finding endpoint context");
c4bedb77
HG
449 /* 4.6.9 the css flag is written to the stream context for streams */
450 if (ep->ep_state & EP_HAS_STREAMS) {
451 struct xhci_stream_ctx *ctx =
452 &ep->stream_info->stream_ctx_array[stream_id];
1f81b6d2 453 hw_dequeue = le64_to_cpu(ctx->stream_ring);
c4bedb77
HG
454 } else {
455 struct xhci_ep_ctx *ep_ctx
456 = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
1f81b6d2 457 hw_dequeue = le64_to_cpu(ep_ctx->deq);
c4bedb77 458 }
ae636747 459
365038d8
MN
460 new_seg = ep_ring->deq_seg;
461 new_deq = ep_ring->dequeue;
462 state->new_cycle_state = hw_dequeue & 0x1;
463
1f81b6d2 464 /*
365038d8
MN
465 * We want to find the pointer, segment and cycle state of the new trb
466 * (the one after current TD's last_trb). We know the cycle state at
467 * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are
468 * found.
1f81b6d2 469 */
365038d8
MN
470 do {
471 if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq)
472 == (dma_addr_t)(hw_dequeue & ~0xf)) {
473 cycle_found = true;
474 if (td_last_trb_found)
475 break;
476 }
477 if (new_deq == cur_td->last_trb)
478 td_last_trb_found = true;
1f81b6d2 479
3495e451
MN
480 if (cycle_found && trb_is_link(new_deq) &&
481 link_trb_toggles_cycle(new_deq))
365038d8
MN
482 state->new_cycle_state ^= 0x1;
483
484 next_trb(xhci, ep_ring, &new_seg, &new_deq);
485
486 /* Search wrapped around, bail out */
487 if (new_deq == ep->ring->dequeue) {
488 xhci_err(xhci, "Error: Failed finding new dequeue state\n");
489 state->new_deq_seg = NULL;
490 state->new_deq_ptr = NULL;
491 return;
492 }
493
494 } while (!cycle_found || !td_last_trb_found);
ae636747 495
365038d8
MN
496 state->new_deq_seg = new_seg;
497 state->new_deq_ptr = new_deq;
ae636747 498
1f81b6d2 499 /* Don't update the ring cycle state for the producer (us). */
aa50b290
XR
500 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
501 "Cycle state = 0x%x", state->new_cycle_state);
01a1fdb9 502
aa50b290
XR
503 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
504 "New dequeue segment = %p (virtual)",
c92bcfa7
SS
505 state->new_deq_seg);
506 addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
aa50b290
XR
507 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
508 "New dequeue pointer = 0x%llx (DMA)",
c92bcfa7 509 (unsigned long long) addr);
ae636747
SS
510}
511
522989a2
SS
512/* flip_cycle means flip the cycle bit of all but the first and last TRB.
513 * (The last TRB actually points to the ring enqueue pointer, which is not part
514 * of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
515 */
23e3be11 516static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
522989a2 517 struct xhci_td *cur_td, bool flip_cycle)
ae636747
SS
518{
519 struct xhci_segment *cur_seg;
520 union xhci_trb *cur_trb;
521
522 for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
523 true;
524 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
3495e451 525 if (trb_is_link(cur_trb)) {
ae636747
SS
526 /* Unchain any chained Link TRBs, but
527 * leave the pointers intact.
528 */
28ccd296 529 cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
522989a2
SS
530 /* Flip the cycle bit (link TRBs can't be the first
531 * or last TRB).
532 */
533 if (flip_cycle)
534 cur_trb->generic.field[3] ^=
535 cpu_to_le32(TRB_CYCLE);
aa50b290
XR
536 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
537 "Cancel (unchain) link TRB");
538 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
539 "Address = %p (0x%llx dma); "
540 "in seg %p (0x%llx dma)",
700e2052 541 cur_trb,
23e3be11 542 (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
700e2052
GKH
543 cur_seg,
544 (unsigned long long)cur_seg->dma);
ae636747
SS
545 } else {
546 cur_trb->generic.field[0] = 0;
547 cur_trb->generic.field[1] = 0;
548 cur_trb->generic.field[2] = 0;
549 /* Preserve only the cycle bit of this TRB */
28ccd296 550 cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
522989a2
SS
551 /* Flip the cycle bit except on the first or last TRB */
552 if (flip_cycle && cur_trb != cur_td->first_trb &&
553 cur_trb != cur_td->last_trb)
554 cur_trb->generic.field[3] ^=
555 cpu_to_le32(TRB_CYCLE);
28ccd296
ME
556 cur_trb->generic.field[3] |= cpu_to_le32(
557 TRB_TYPE(TRB_TR_NOOP));
aa50b290
XR
558 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
559 "TRB to noop at offset 0x%llx",
79688acf
SS
560 (unsigned long long)
561 xhci_trb_virt_to_dma(cur_seg, cur_trb));
ae636747
SS
562 }
563 if (cur_trb == cur_td->last_trb)
564 break;
565 }
566}
567
575688e1 568static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
6f5165cf
SS
569 struct xhci_virt_ep *ep)
570{
571 ep->ep_state &= ~EP_HALT_PENDING;
572 /* Can't del_timer_sync in interrupt, so we attempt to cancel. If the
573 * timer is running on another CPU, we don't decrement stop_cmds_pending
574 * (since we didn't successfully stop the watchdog timer).
575 */
576 if (del_timer(&ep->stop_cmd_timer))
577 ep->stop_cmds_pending--;
578}
579
580/* Must be called with xhci->lock held in interrupt context */
581static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
07a37e9e 582 struct xhci_td *cur_td, int status)
6f5165cf 583{
214f76f7 584 struct usb_hcd *hcd;
8e51adcc
AX
585 struct urb *urb;
586 struct urb_priv *urb_priv;
6f5165cf 587
8e51adcc
AX
588 urb = cur_td->urb;
589 urb_priv = urb->hcpriv;
590 urb_priv->td_cnt++;
214f76f7 591 hcd = bus_to_hcd(urb->dev->bus);
6f5165cf 592
8e51adcc
AX
593 /* Only giveback urb when this is the last td in urb */
594 if (urb_priv->td_cnt == urb_priv->length) {
c41136b0
AX
595 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
596 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
597 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
598 if (xhci->quirks & XHCI_AMD_PLL_FIX)
599 usb_amd_quirk_pll_enable();
600 }
601 }
8e51adcc 602 usb_hcd_unlink_urb_from_ep(hcd, urb);
8e51adcc
AX
603
604 spin_unlock(&xhci->lock);
605 usb_hcd_giveback_urb(hcd, urb, status);
4daf9df5 606 xhci_urb_free_priv(urb_priv);
8e51adcc 607 spin_lock(&xhci->lock);
8e51adcc 608 }
6f5165cf
SS
609}
610
f9c589e1
MN
611void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, struct xhci_ring *ring,
612 struct xhci_td *td)
613{
614 struct device *dev = xhci_to_hcd(xhci)->self.controller;
615 struct xhci_segment *seg = td->bounce_seg;
616 struct urb *urb = td->urb;
617
618 if (!seg || !urb)
619 return;
620
621 if (usb_urb_dir_out(urb)) {
622 dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
623 DMA_TO_DEVICE);
624 return;
625 }
626
627 /* for in tranfers we need to copy the data from bounce to sg */
628 sg_pcopy_from_buffer(urb->sg, urb->num_mapped_sgs, seg->bounce_buf,
629 seg->bounce_len, seg->bounce_offs);
630 dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
631 DMA_FROM_DEVICE);
632 seg->bounce_len = 0;
633 seg->bounce_offs = 0;
634}
635
ae636747
SS
636/*
637 * When we get a command completion for a Stop Endpoint Command, we need to
638 * unlink any cancelled TDs from the ring. There are two ways to do that:
639 *
640 * 1. If the HW was in the middle of processing the TD that needs to be
641 * cancelled, then we must move the ring's dequeue pointer past the last TRB
642 * in the TD with a Set Dequeue Pointer Command.
643 * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
644 * bit cleared) so that the HW will skip over them.
645 */
b8200c94 646static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
be88fe4f 647 union xhci_trb *trb, struct xhci_event_cmd *event)
ae636747 648{
ae636747
SS
649 unsigned int ep_index;
650 struct xhci_ring *ep_ring;
63a0d9ab 651 struct xhci_virt_ep *ep;
ae636747 652 struct list_head *entry;
326b4810 653 struct xhci_td *cur_td = NULL;
ae636747
SS
654 struct xhci_td *last_unlinked_td;
655
c92bcfa7 656 struct xhci_dequeue_state deq_state;
ae636747 657
bc752bde 658 if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
9ea1833e 659 if (!xhci->devs[slot_id])
be88fe4f
AX
660 xhci_warn(xhci, "Stop endpoint command "
661 "completion for disabled slot %u\n",
662 slot_id);
663 return;
664 }
665
ae636747 666 memset(&deq_state, 0, sizeof(deq_state));
28ccd296 667 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
63a0d9ab 668 ep = &xhci->devs[slot_id]->eps[ep_index];
ae636747 669
678539cf 670 if (list_empty(&ep->cancelled_td_list)) {
6f5165cf 671 xhci_stop_watchdog_timer_in_irq(xhci, ep);
0714a57c 672 ep->stopped_td = NULL;
e9df17eb 673 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
ae636747 674 return;
678539cf 675 }
ae636747
SS
676
677 /* Fix up the ep ring first, so HW stops executing cancelled TDs.
678 * We have the xHCI lock, so nothing can modify this list until we drop
679 * it. We're also in the event handler, so we can't get re-interrupted
680 * if another Stop Endpoint command completes
681 */
63a0d9ab 682 list_for_each(entry, &ep->cancelled_td_list) {
ae636747 683 cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
aa50b290
XR
684 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
685 "Removing canceled TD starting at 0x%llx (dma).",
79688acf
SS
686 (unsigned long long)xhci_trb_virt_to_dma(
687 cur_td->start_seg, cur_td->first_trb));
e9df17eb
SS
688 ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
689 if (!ep_ring) {
690 /* This shouldn't happen unless a driver is mucking
691 * with the stream ID after submission. This will
692 * leave the TD on the hardware ring, and the hardware
693 * will try to execute it, and may access a buffer
694 * that has already been freed. In the best case, the
695 * hardware will execute it, and the event handler will
696 * ignore the completion event for that TD, since it was
697 * removed from the td_list for that endpoint. In
698 * short, don't muck with the stream ID after
699 * submission.
700 */
701 xhci_warn(xhci, "WARN Cancelled URB %p "
702 "has invalid stream ID %u.\n",
703 cur_td->urb,
704 cur_td->urb->stream_id);
705 goto remove_finished_td;
706 }
ae636747
SS
707 /*
708 * If we stopped on the TD we need to cancel, then we have to
709 * move the xHC endpoint ring dequeue pointer past this TD.
710 */
63a0d9ab 711 if (cur_td == ep->stopped_td)
e9df17eb
SS
712 xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
713 cur_td->urb->stream_id,
714 cur_td, &deq_state);
ae636747 715 else
522989a2 716 td_to_noop(xhci, ep_ring, cur_td, false);
e9df17eb 717remove_finished_td:
ae636747
SS
718 /*
719 * The event handler won't see a completion for this TD anymore,
720 * so remove it from the endpoint ring's TD list. Keep it in
721 * the cancelled TD list for URB completion later.
722 */
585df1d9 723 list_del_init(&cur_td->td_list);
ae636747
SS
724 }
725 last_unlinked_td = cur_td;
6f5165cf 726 xhci_stop_watchdog_timer_in_irq(xhci, ep);
ae636747
SS
727
728 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
729 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
1e3452e3
HG
730 xhci_queue_new_dequeue_state(xhci, slot_id, ep_index,
731 ep->stopped_td->urb->stream_id, &deq_state);
ac9d8fe7 732 xhci_ring_cmd_db(xhci);
ae636747 733 } else {
e9df17eb
SS
734 /* Otherwise ring the doorbell(s) to restart queued transfers */
735 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
ae636747 736 }
526867c3 737
d97b4f8d 738 ep->stopped_td = NULL;
ae636747
SS
739
740 /*
741 * Drop the lock and complete the URBs in the cancelled TD list.
742 * New TDs to be cancelled might be added to the end of the list before
743 * we can complete all the URBs for the TDs we already unlinked.
744 * So stop when we've completed the URB for the last TD we unlinked.
745 */
746 do {
63a0d9ab 747 cur_td = list_entry(ep->cancelled_td_list.next,
ae636747 748 struct xhci_td, cancelled_td_list);
585df1d9 749 list_del_init(&cur_td->cancelled_td_list);
ae636747
SS
750
751 /* Clean up the cancelled URB */
ae636747
SS
752 /* Doesn't matter what we pass for status, since the core will
753 * just overwrite it (because the URB has been unlinked).
754 */
f76a28a6 755 ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
f9c589e1
MN
756 if (ep_ring && cur_td->bounce_seg)
757 xhci_unmap_td_bounce_buffer(xhci, ep_ring, cur_td);
07a37e9e 758 xhci_giveback_urb_in_irq(xhci, cur_td, 0);
ae636747 759
6f5165cf
SS
760 /* Stop processing the cancelled list if the watchdog timer is
761 * running.
762 */
763 if (xhci->xhc_state & XHCI_STATE_DYING)
764 return;
ae636747
SS
765 } while (cur_td != last_unlinked_td);
766
767 /* Return to the event handler with xhci->lock re-acquired */
768}
769
50e8725e
SS
770static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
771{
772 struct xhci_td *cur_td;
773
774 while (!list_empty(&ring->td_list)) {
775 cur_td = list_first_entry(&ring->td_list,
776 struct xhci_td, td_list);
777 list_del_init(&cur_td->td_list);
778 if (!list_empty(&cur_td->cancelled_td_list))
779 list_del_init(&cur_td->cancelled_td_list);
f9c589e1
MN
780
781 if (cur_td->bounce_seg)
782 xhci_unmap_td_bounce_buffer(xhci, ring, cur_td);
50e8725e
SS
783 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
784 }
785}
786
787static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
788 int slot_id, int ep_index)
789{
790 struct xhci_td *cur_td;
791 struct xhci_virt_ep *ep;
792 struct xhci_ring *ring;
793
794 ep = &xhci->devs[slot_id]->eps[ep_index];
21d0e51b
SS
795 if ((ep->ep_state & EP_HAS_STREAMS) ||
796 (ep->ep_state & EP_GETTING_NO_STREAMS)) {
797 int stream_id;
798
799 for (stream_id = 0; stream_id < ep->stream_info->num_streams;
800 stream_id++) {
801 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
802 "Killing URBs for slot ID %u, ep index %u, stream %u",
803 slot_id, ep_index, stream_id + 1);
804 xhci_kill_ring_urbs(xhci,
805 ep->stream_info->stream_rings[stream_id]);
806 }
807 } else {
808 ring = ep->ring;
809 if (!ring)
810 return;
811 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
812 "Killing URBs for slot ID %u, ep index %u",
813 slot_id, ep_index);
814 xhci_kill_ring_urbs(xhci, ring);
815 }
50e8725e
SS
816 while (!list_empty(&ep->cancelled_td_list)) {
817 cur_td = list_first_entry(&ep->cancelled_td_list,
818 struct xhci_td, cancelled_td_list);
819 list_del_init(&cur_td->cancelled_td_list);
820 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
821 }
822}
823
6f5165cf
SS
824/* Watchdog timer function for when a stop endpoint command fails to complete.
825 * In this case, we assume the host controller is broken or dying or dead. The
826 * host may still be completing some other events, so we have to be careful to
827 * let the event ring handler and the URB dequeueing/enqueueing functions know
828 * through xhci->state.
829 *
830 * The timer may also fire if the host takes a very long time to respond to the
831 * command, and the stop endpoint command completion handler cannot delete the
832 * timer before the timer function is called. Another endpoint cancellation may
833 * sneak in before the timer function can grab the lock, and that may queue
834 * another stop endpoint command and add the timer back. So we cannot use a
835 * simple flag to say whether there is a pending stop endpoint command for a
836 * particular endpoint.
837 *
838 * Instead we use a combination of that flag and a counter for the number of
839 * pending stop endpoint commands. If the timer is the tail end of the last
840 * stop endpoint command, and the endpoint's command is still pending, we assume
841 * the host is dying.
842 */
843void xhci_stop_endpoint_command_watchdog(unsigned long arg)
844{
845 struct xhci_hcd *xhci;
846 struct xhci_virt_ep *ep;
6f5165cf 847 int ret, i, j;
f43d6231 848 unsigned long flags;
6f5165cf
SS
849
850 ep = (struct xhci_virt_ep *) arg;
851 xhci = ep->xhci;
852
f43d6231 853 spin_lock_irqsave(&xhci->lock, flags);
6f5165cf
SS
854
855 ep->stop_cmds_pending--;
bcf42aa6
MN
856 if (xhci->xhc_state & XHCI_STATE_REMOVING) {
857 spin_unlock_irqrestore(&xhci->lock, flags);
858 return;
859 }
6f5165cf 860 if (xhci->xhc_state & XHCI_STATE_DYING) {
aa50b290
XR
861 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
862 "Stop EP timer ran, but another timer marked "
863 "xHCI as DYING, exiting.");
f43d6231 864 spin_unlock_irqrestore(&xhci->lock, flags);
6f5165cf
SS
865 return;
866 }
867 if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
aa50b290
XR
868 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
869 "Stop EP timer ran, but no command pending, "
870 "exiting.");
f43d6231 871 spin_unlock_irqrestore(&xhci->lock, flags);
6f5165cf
SS
872 return;
873 }
874
875 xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
876 xhci_warn(xhci, "Assuming host is dying, halting host.\n");
877 /* Oops, HC is dead or dying or at least not responding to the stop
878 * endpoint command.
879 */
880 xhci->xhc_state |= XHCI_STATE_DYING;
881 /* Disable interrupts from the host controller and start halting it */
882 xhci_quiesce(xhci);
f43d6231 883 spin_unlock_irqrestore(&xhci->lock, flags);
6f5165cf
SS
884
885 ret = xhci_halt(xhci);
886
f43d6231 887 spin_lock_irqsave(&xhci->lock, flags);
6f5165cf
SS
888 if (ret < 0) {
889 /* This is bad; the host is not responding to commands and it's
890 * not allowing itself to be halted. At least interrupts are
ac04e6ff 891 * disabled. If we call usb_hc_died(), it will attempt to
6f5165cf
SS
892 * disconnect all device drivers under this host. Those
893 * disconnect() methods will wait for all URBs to be unlinked,
894 * so we must complete them.
895 */
896 xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
897 xhci_warn(xhci, "Completing active URBs anyway.\n");
898 /* We could turn all TDs on the rings to no-ops. This won't
899 * help if the host has cached part of the ring, and is slow if
900 * we want to preserve the cycle bit. Skip it and hope the host
901 * doesn't touch the memory.
902 */
903 }
904 for (i = 0; i < MAX_HC_SLOTS; i++) {
905 if (!xhci->devs[i])
906 continue;
50e8725e
SS
907 for (j = 0; j < 31; j++)
908 xhci_kill_endpoint_urbs(xhci, i, j);
6f5165cf 909 }
f43d6231 910 spin_unlock_irqrestore(&xhci->lock, flags);
aa50b290
XR
911 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
912 "Calling usb_hc_died()");
bcf42aa6 913 usb_hc_died(xhci_to_hcd(xhci));
aa50b290
XR
914 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
915 "xHCI host controller is dead.");
6f5165cf
SS
916}
917
b008df60
AX
918
919static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
920 struct xhci_virt_device *dev,
921 struct xhci_ring *ep_ring,
922 unsigned int ep_index)
923{
924 union xhci_trb *dequeue_temp;
925 int num_trbs_free_temp;
926 bool revert = false;
927
928 num_trbs_free_temp = ep_ring->num_trbs_free;
929 dequeue_temp = ep_ring->dequeue;
930
0d9f78a9
SS
931 /* If we get two back-to-back stalls, and the first stalled transfer
932 * ends just before a link TRB, the dequeue pointer will be left on
933 * the link TRB by the code in the while loop. So we have to update
934 * the dequeue pointer one segment further, or we'll jump off
935 * the segment into la-la-land.
936 */
2d98ef40 937 if (trb_is_link(ep_ring->dequeue)) {
0d9f78a9
SS
938 ep_ring->deq_seg = ep_ring->deq_seg->next;
939 ep_ring->dequeue = ep_ring->deq_seg->trbs;
940 }
941
b008df60
AX
942 while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
943 /* We have more usable TRBs */
944 ep_ring->num_trbs_free++;
945 ep_ring->dequeue++;
2d98ef40 946 if (trb_is_link(ep_ring->dequeue)) {
b008df60
AX
947 if (ep_ring->dequeue ==
948 dev->eps[ep_index].queued_deq_ptr)
949 break;
950 ep_ring->deq_seg = ep_ring->deq_seg->next;
951 ep_ring->dequeue = ep_ring->deq_seg->trbs;
952 }
953 if (ep_ring->dequeue == dequeue_temp) {
954 revert = true;
955 break;
956 }
957 }
958
959 if (revert) {
960 xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
961 ep_ring->num_trbs_free = num_trbs_free_temp;
962 }
963}
964
ae636747
SS
965/*
966 * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
967 * we need to clear the set deq pending flag in the endpoint ring state, so that
968 * the TD queueing code can ring the doorbell again. We also need to ring the
969 * endpoint doorbell to restart the ring, but only if there aren't more
970 * cancellations pending.
971 */
b8200c94 972static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
c69a0597 973 union xhci_trb *trb, u32 cmd_comp_code)
ae636747 974{
ae636747 975 unsigned int ep_index;
e9df17eb 976 unsigned int stream_id;
ae636747
SS
977 struct xhci_ring *ep_ring;
978 struct xhci_virt_device *dev;
9aad95e2 979 struct xhci_virt_ep *ep;
d115b048
JY
980 struct xhci_ep_ctx *ep_ctx;
981 struct xhci_slot_ctx *slot_ctx;
ae636747 982
28ccd296
ME
983 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
984 stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
ae636747 985 dev = xhci->devs[slot_id];
9aad95e2 986 ep = &dev->eps[ep_index];
e9df17eb
SS
987
988 ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
989 if (!ep_ring) {
e587b8b2 990 xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
e9df17eb
SS
991 stream_id);
992 /* XXX: Harmless??? */
0d4976ec 993 goto cleanup;
e9df17eb
SS
994 }
995
d115b048
JY
996 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
997 slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
ae636747 998
c69a0597 999 if (cmd_comp_code != COMP_SUCCESS) {
ae636747
SS
1000 unsigned int ep_state;
1001 unsigned int slot_state;
1002
c69a0597 1003 switch (cmd_comp_code) {
ae636747 1004 case COMP_TRB_ERR:
e587b8b2 1005 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n");
ae636747
SS
1006 break;
1007 case COMP_CTX_STATE:
e587b8b2 1008 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n");
28ccd296 1009 ep_state = le32_to_cpu(ep_ctx->ep_info);
ae636747 1010 ep_state &= EP_STATE_MASK;
28ccd296 1011 slot_state = le32_to_cpu(slot_ctx->dev_state);
ae636747 1012 slot_state = GET_SLOT_STATE(slot_state);
aa50b290
XR
1013 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1014 "Slot state = %u, EP state = %u",
ae636747
SS
1015 slot_state, ep_state);
1016 break;
1017 case COMP_EBADSLT:
e587b8b2
ON
1018 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n",
1019 slot_id);
ae636747
SS
1020 break;
1021 default:
e587b8b2
ON
1022 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n",
1023 cmd_comp_code);
ae636747
SS
1024 break;
1025 }
1026 /* OK what do we do now? The endpoint state is hosed, and we
1027 * should never get to this point if the synchronization between
1028 * queueing, and endpoint state are correct. This might happen
1029 * if the device gets disconnected after we've finished
1030 * cancelling URBs, which might not be an error...
1031 */
1032 } else {
9aad95e2
HG
1033 u64 deq;
1034 /* 4.6.10 deq ptr is written to the stream ctx for streams */
1035 if (ep->ep_state & EP_HAS_STREAMS) {
1036 struct xhci_stream_ctx *ctx =
1037 &ep->stream_info->stream_ctx_array[stream_id];
1038 deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK;
1039 } else {
1040 deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
1041 }
aa50b290 1042 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
9aad95e2
HG
1043 "Successful Set TR Deq Ptr cmd, deq = @%08llx", deq);
1044 if (xhci_trb_virt_to_dma(ep->queued_deq_seg,
1045 ep->queued_deq_ptr) == deq) {
bf161e85
SS
1046 /* Update the ring's dequeue segment and dequeue pointer
1047 * to reflect the new position.
1048 */
b008df60
AX
1049 update_ring_for_set_deq_completion(xhci, dev,
1050 ep_ring, ep_index);
bf161e85 1051 } else {
e587b8b2 1052 xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
bf161e85 1053 xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
9aad95e2 1054 ep->queued_deq_seg, ep->queued_deq_ptr);
bf161e85 1055 }
ae636747
SS
1056 }
1057
0d4976ec 1058cleanup:
63a0d9ab 1059 dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
bf161e85
SS
1060 dev->eps[ep_index].queued_deq_seg = NULL;
1061 dev->eps[ep_index].queued_deq_ptr = NULL;
e9df17eb
SS
1062 /* Restart any rings with pending URBs */
1063 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
ae636747
SS
1064}
1065
b8200c94 1066static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
c69a0597 1067 union xhci_trb *trb, u32 cmd_comp_code)
a1587d97 1068{
a1587d97
SS
1069 unsigned int ep_index;
1070
28ccd296 1071 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
a1587d97
SS
1072 /* This command will only fail if the endpoint wasn't halted,
1073 * but we don't care.
1074 */
a0254324 1075 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
c69a0597 1076 "Ignoring reset ep completion code of %u", cmd_comp_code);
a1587d97 1077
ac9d8fe7
SS
1078 /* HW with the reset endpoint quirk needs to have a configure endpoint
1079 * command complete before the endpoint can be used. Queue that here
1080 * because the HW can't handle two commands being queued in a row.
1081 */
1082 if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
ddba5cd0
MN
1083 struct xhci_command *command;
1084 command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
a0ee619f
HG
1085 if (!command) {
1086 xhci_warn(xhci, "WARN Cannot submit cfg ep: ENOMEM\n");
1087 return;
1088 }
4bdfe4c3
XR
1089 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1090 "Queueing configure endpoint command");
ddba5cd0 1091 xhci_queue_configure_endpoint(xhci, command,
913a8a34
SS
1092 xhci->devs[slot_id]->in_ctx->dma, slot_id,
1093 false);
ac9d8fe7
SS
1094 xhci_ring_cmd_db(xhci);
1095 } else {
c3492dbf 1096 /* Clear our internal halted state */
63a0d9ab 1097 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
ac9d8fe7 1098 }
a1587d97 1099}
ae636747 1100
b244b431
XR
1101static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
1102 u32 cmd_comp_code)
1103{
1104 if (cmd_comp_code == COMP_SUCCESS)
1105 xhci->slot_id = slot_id;
1106 else
1107 xhci->slot_id = 0;
b244b431
XR
1108}
1109
6c02dd14
XR
1110static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
1111{
1112 struct xhci_virt_device *virt_dev;
1113
1114 virt_dev = xhci->devs[slot_id];
1115 if (!virt_dev)
1116 return;
1117 if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
1118 /* Delete default control endpoint resources */
1119 xhci_free_device_endpoint_resources(xhci, virt_dev, true);
1120 xhci_free_virt_device(xhci, slot_id);
1121}
1122
6ed46d33
XR
1123static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
1124 struct xhci_event_cmd *event, u32 cmd_comp_code)
1125{
1126 struct xhci_virt_device *virt_dev;
1127 struct xhci_input_control_ctx *ctrl_ctx;
1128 unsigned int ep_index;
1129 unsigned int ep_state;
1130 u32 add_flags, drop_flags;
1131
6ed46d33
XR
1132 /*
1133 * Configure endpoint commands can come from the USB core
1134 * configuration or alt setting changes, or because the HW
1135 * needed an extra configure endpoint command after a reset
1136 * endpoint command or streams were being configured.
1137 * If the command was for a halted endpoint, the xHCI driver
1138 * is not waiting on the configure endpoint command.
1139 */
9ea1833e 1140 virt_dev = xhci->devs[slot_id];
4daf9df5 1141 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
6ed46d33
XR
1142 if (!ctrl_ctx) {
1143 xhci_warn(xhci, "Could not get input context, bad type.\n");
1144 return;
1145 }
1146
1147 add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1148 drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1149 /* Input ctx add_flags are the endpoint index plus one */
1150 ep_index = xhci_last_valid_endpoint(add_flags) - 1;
1151
1152 /* A usb_set_interface() call directly after clearing a halted
1153 * condition may race on this quirky hardware. Not worth
1154 * worrying about, since this is prototype hardware. Not sure
1155 * if this will work for streams, but streams support was
1156 * untested on this prototype.
1157 */
1158 if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
1159 ep_index != (unsigned int) -1 &&
1160 add_flags - SLOT_FLAG == drop_flags) {
1161 ep_state = virt_dev->eps[ep_index].ep_state;
1162 if (!(ep_state & EP_HALTED))
ddba5cd0 1163 return;
6ed46d33
XR
1164 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1165 "Completed config ep cmd - "
1166 "last ep index = %d, state = %d",
1167 ep_index, ep_state);
1168 /* Clear internal halted state and restart ring(s) */
1169 virt_dev->eps[ep_index].ep_state &= ~EP_HALTED;
1170 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1171 return;
1172 }
6ed46d33
XR
1173 return;
1174}
1175
f681321b
XR
1176static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id,
1177 struct xhci_event_cmd *event)
1178{
f681321b 1179 xhci_dbg(xhci, "Completed reset device command.\n");
9ea1833e 1180 if (!xhci->devs[slot_id])
f681321b
XR
1181 xhci_warn(xhci, "Reset device command completion "
1182 "for disabled slot %u\n", slot_id);
1183}
1184
2c070821
XR
1185static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
1186 struct xhci_event_cmd *event)
1187{
1188 if (!(xhci->quirks & XHCI_NEC_HOST)) {
1189 xhci->error_bitmask |= 1 << 6;
1190 return;
1191 }
1192 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1193 "NEC firmware version %2x.%02x",
1194 NEC_FW_MAJOR(le32_to_cpu(event->status)),
1195 NEC_FW_MINOR(le32_to_cpu(event->status)));
1196}
1197
9ea1833e 1198static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status)
c9aa1a2d
MN
1199{
1200 list_del(&cmd->cmd_list);
9ea1833e
MN
1201
1202 if (cmd->completion) {
1203 cmd->status = status;
1204 complete(cmd->completion);
1205 } else {
c9aa1a2d 1206 kfree(cmd);
9ea1833e 1207 }
c9aa1a2d
MN
1208}
1209
1210void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
1211{
1212 struct xhci_command *cur_cmd, *tmp_cmd;
1213 list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list)
9ea1833e 1214 xhci_complete_del_and_free_cmd(cur_cmd, COMP_CMD_ABORT);
c9aa1a2d
MN
1215}
1216
c311e391
MN
1217/*
1218 * Turn all commands on command ring with status set to "aborted" to no-op trbs.
1219 * If there are other commands waiting then restart the ring and kick the timer.
1220 * This must be called with command ring stopped and xhci->lock held.
1221 */
1222static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
1223 struct xhci_command *cur_cmd)
1224{
1225 struct xhci_command *i_cmd, *tmp_cmd;
1226 u32 cycle_state;
1227
1228 /* Turn all aborted commands in list to no-ops, then restart */
1229 list_for_each_entry_safe(i_cmd, tmp_cmd, &xhci->cmd_list,
1230 cmd_list) {
1231
1232 if (i_cmd->status != COMP_CMD_ABORT)
1233 continue;
1234
1235 i_cmd->status = COMP_CMD_STOP;
1236
1237 xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
1238 i_cmd->command_trb);
1239 /* get cycle state from the original cmd trb */
1240 cycle_state = le32_to_cpu(
1241 i_cmd->command_trb->generic.field[3]) & TRB_CYCLE;
1242 /* modify the command trb to no-op command */
1243 i_cmd->command_trb->generic.field[0] = 0;
1244 i_cmd->command_trb->generic.field[1] = 0;
1245 i_cmd->command_trb->generic.field[2] = 0;
1246 i_cmd->command_trb->generic.field[3] = cpu_to_le32(
1247 TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
1248
1249 /*
1250 * caller waiting for completion is called when command
1251 * completion event is received for these no-op commands
1252 */
1253 }
1254
1255 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
1256
1257 /* ring command ring doorbell to restart the command ring */
1258 if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
1259 !(xhci->xhc_state & XHCI_STATE_DYING)) {
1260 xhci->current_cmd = cur_cmd;
1261 mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
1262 xhci_ring_cmd_db(xhci);
1263 }
1264 return;
1265}
1266
1267
1268void xhci_handle_command_timeout(unsigned long data)
1269{
1270 struct xhci_hcd *xhci;
1271 int ret;
1272 unsigned long flags;
1273 u64 hw_ring_state;
3425aa03 1274 bool second_timeout = false;
c311e391
MN
1275 xhci = (struct xhci_hcd *) data;
1276
1277 /* mark this command to be cancelled */
1278 spin_lock_irqsave(&xhci->lock, flags);
1279 if (xhci->current_cmd) {
3425aa03
MN
1280 if (xhci->current_cmd->status == COMP_CMD_ABORT)
1281 second_timeout = true;
1282 xhci->current_cmd->status = COMP_CMD_ABORT;
c311e391
MN
1283 }
1284
c311e391
MN
1285 /* Make sure command ring is running before aborting it */
1286 hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
1287 if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
1288 (hw_ring_state & CMD_RING_RUNNING)) {
c311e391
MN
1289 spin_unlock_irqrestore(&xhci->lock, flags);
1290 xhci_dbg(xhci, "Command timeout\n");
1291 ret = xhci_abort_cmd_ring(xhci);
1292 if (unlikely(ret == -ESHUTDOWN)) {
1293 xhci_err(xhci, "Abort command ring failed\n");
1294 xhci_cleanup_command_queue(xhci);
1295 usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
1296 xhci_dbg(xhci, "xHCI host controller is dead.\n");
1297 }
1298 return;
1299 }
3425aa03
MN
1300
1301 /* command ring failed to restart, or host removed. Bail out */
1302 if (second_timeout || xhci->xhc_state & XHCI_STATE_REMOVING) {
1303 spin_unlock_irqrestore(&xhci->lock, flags);
1304 xhci_dbg(xhci, "command timed out twice, ring start fail?\n");
1305 xhci_cleanup_command_queue(xhci);
1306 return;
1307 }
1308
c311e391
MN
1309 /* command timeout on stopped ring, ring can't be aborted */
1310 xhci_dbg(xhci, "Command timeout on stopped ring\n");
1311 xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
1312 spin_unlock_irqrestore(&xhci->lock, flags);
1313 return;
1314}
1315
7f84eef0
SS
1316static void handle_cmd_completion(struct xhci_hcd *xhci,
1317 struct xhci_event_cmd *event)
1318{
28ccd296 1319 int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
7f84eef0
SS
1320 u64 cmd_dma;
1321 dma_addr_t cmd_dequeue_dma;
e7a79a1d 1322 u32 cmd_comp_code;
9124b121 1323 union xhci_trb *cmd_trb;
c9aa1a2d 1324 struct xhci_command *cmd;
b54fc46d 1325 u32 cmd_type;
7f84eef0 1326
28ccd296 1327 cmd_dma = le64_to_cpu(event->cmd_trb);
9124b121 1328 cmd_trb = xhci->cmd_ring->dequeue;
23e3be11 1329 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
9124b121 1330 cmd_trb);
7f84eef0
SS
1331 /* Is the command ring deq ptr out of sync with the deq seg ptr? */
1332 if (cmd_dequeue_dma == 0) {
1333 xhci->error_bitmask |= 1 << 4;
1334 return;
1335 }
1336 /* Does the DMA address match our internal dequeue pointer address? */
1337 if (cmd_dma != (u64) cmd_dequeue_dma) {
1338 xhci->error_bitmask |= 1 << 5;
1339 return;
1340 }
b63f4053 1341
c9aa1a2d
MN
1342 cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list);
1343
c311e391
MN
1344 del_timer(&xhci->cmd_timer);
1345
9124b121 1346 trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event);
63a23b9a 1347
e7a79a1d 1348 cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
c311e391
MN
1349
1350 /* If CMD ring stopped we own the trbs between enqueue and dequeue */
1351 if (cmd_comp_code == COMP_CMD_STOP) {
1352 xhci_handle_stopped_cmd_ring(xhci, cmd);
1353 return;
1354 }
33be1265
MN
1355
1356 if (cmd->command_trb != xhci->cmd_ring->dequeue) {
1357 xhci_err(xhci,
1358 "Command completion event does not match command\n");
1359 return;
1360 }
1361
c311e391
MN
1362 /*
1363 * Host aborted the command ring, check if the current command was
1364 * supposed to be aborted, otherwise continue normally.
1365 * The command ring is stopped now, but the xHC will issue a Command
1366 * Ring Stopped event which will cause us to restart it.
1367 */
1368 if (cmd_comp_code == COMP_CMD_ABORT) {
1369 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
1370 if (cmd->status == COMP_CMD_ABORT)
1371 goto event_handled;
b63f4053
EF
1372 }
1373
b54fc46d
XR
1374 cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
1375 switch (cmd_type) {
1376 case TRB_ENABLE_SLOT:
e7a79a1d 1377 xhci_handle_cmd_enable_slot(xhci, slot_id, cmd_comp_code);
3ffbba95 1378 break;
b54fc46d 1379 case TRB_DISABLE_SLOT:
6c02dd14 1380 xhci_handle_cmd_disable_slot(xhci, slot_id);
3ffbba95 1381 break;
b54fc46d 1382 case TRB_CONFIG_EP:
9ea1833e
MN
1383 if (!cmd->completion)
1384 xhci_handle_cmd_config_ep(xhci, slot_id, event,
1385 cmd_comp_code);
f94e0186 1386 break;
b54fc46d 1387 case TRB_EVAL_CONTEXT:
2d3f1fac 1388 break;
b54fc46d 1389 case TRB_ADDR_DEV:
3ffbba95 1390 break;
b54fc46d 1391 case TRB_STOP_RING:
b8200c94
XR
1392 WARN_ON(slot_id != TRB_TO_SLOT_ID(
1393 le32_to_cpu(cmd_trb->generic.field[3])));
1394 xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, event);
ae636747 1395 break;
b54fc46d 1396 case TRB_SET_DEQ:
b8200c94
XR
1397 WARN_ON(slot_id != TRB_TO_SLOT_ID(
1398 le32_to_cpu(cmd_trb->generic.field[3])));
c69a0597 1399 xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code);
ae636747 1400 break;
b54fc46d 1401 case TRB_CMD_NOOP:
c311e391
MN
1402 /* Is this an aborted command turned to NO-OP? */
1403 if (cmd->status == COMP_CMD_STOP)
1404 cmd_comp_code = COMP_CMD_STOP;
7f84eef0 1405 break;
b54fc46d 1406 case TRB_RESET_EP:
b8200c94
XR
1407 WARN_ON(slot_id != TRB_TO_SLOT_ID(
1408 le32_to_cpu(cmd_trb->generic.field[3])));
c69a0597 1409 xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code);
a1587d97 1410 break;
b54fc46d 1411 case TRB_RESET_DEV:
6fcfb0d6
MN
1412 /* SLOT_ID field in reset device cmd completion event TRB is 0.
1413 * Use the SLOT_ID from the command TRB instead (xhci 4.6.11)
1414 */
1415 slot_id = TRB_TO_SLOT_ID(
1416 le32_to_cpu(cmd_trb->generic.field[3]));
f681321b 1417 xhci_handle_cmd_reset_dev(xhci, slot_id, event);
2a8f82c4 1418 break;
b54fc46d 1419 case TRB_NEC_GET_FW:
2c070821 1420 xhci_handle_cmd_nec_get_fw(xhci, event);
0238634d 1421 break;
7f84eef0
SS
1422 default:
1423 /* Skip over unknown commands on the event ring */
1424 xhci->error_bitmask |= 1 << 6;
1425 break;
1426 }
c9aa1a2d 1427
c311e391
MN
1428 /* restart timer if this wasn't the last command */
1429 if (cmd->cmd_list.next != &xhci->cmd_list) {
1430 xhci->current_cmd = list_entry(cmd->cmd_list.next,
1431 struct xhci_command, cmd_list);
1432 mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
1433 }
1434
1435event_handled:
9ea1833e 1436 xhci_complete_del_and_free_cmd(cmd, cmd_comp_code);
c9aa1a2d 1437
3b72fca0 1438 inc_deq(xhci, xhci->cmd_ring);
7f84eef0
SS
1439}
1440
0238634d
SS
1441static void handle_vendor_event(struct xhci_hcd *xhci,
1442 union xhci_trb *event)
1443{
1444 u32 trb_type;
1445
28ccd296 1446 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3]));
0238634d
SS
1447 xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
1448 if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
1449 handle_cmd_completion(xhci, &event->event_cmd);
1450}
1451
f6ff0ac8
SS
1452/* @port_id: the one-based port ID from the hardware (indexed from array of all
1453 * port registers -- USB 3.0 and USB 2.0).
1454 *
1455 * Returns a zero-based port number, which is suitable for indexing into each of
1456 * the split roothubs' port arrays and bus state arrays.
d0cd5d48 1457 * Add one to it in order to call xhci_find_slot_id_by_port.
f6ff0ac8
SS
1458 */
1459static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
1460 struct xhci_hcd *xhci, u32 port_id)
1461{
1462 unsigned int i;
1463 unsigned int num_similar_speed_ports = 0;
1464
1465 /* port_id from the hardware is 1-based, but port_array[], usb3_ports[],
1466 * and usb2_ports are 0-based indexes. Count the number of similar
1467 * speed ports, up to 1 port before this port.
1468 */
1469 for (i = 0; i < (port_id - 1); i++) {
1470 u8 port_speed = xhci->port_array[i];
1471
1472 /*
1473 * Skip ports that don't have known speeds, or have duplicate
1474 * Extended Capabilities port speed entries.
1475 */
22e04870 1476 if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
f6ff0ac8
SS
1477 continue;
1478
1479 /*
1480 * USB 3.0 ports are always under a USB 3.0 hub. USB 2.0 and
1481 * 1.1 ports are under the USB 2.0 hub. If the port speed
1482 * matches the device speed, it's a similar speed port.
1483 */
b50107bb 1484 if ((port_speed == 0x03) == (hcd->speed >= HCD_USB3))
f6ff0ac8
SS
1485 num_similar_speed_ports++;
1486 }
1487 return num_similar_speed_ports;
1488}
1489
623bef9e
SS
1490static void handle_device_notification(struct xhci_hcd *xhci,
1491 union xhci_trb *event)
1492{
1493 u32 slot_id;
4ee823b8 1494 struct usb_device *udev;
623bef9e 1495
7e76ad43 1496 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3]));
4ee823b8 1497 if (!xhci->devs[slot_id]) {
623bef9e
SS
1498 xhci_warn(xhci, "Device Notification event for "
1499 "unused slot %u\n", slot_id);
4ee823b8
SS
1500 return;
1501 }
1502
1503 xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n",
1504 slot_id);
1505 udev = xhci->devs[slot_id]->udev;
1506 if (udev && udev->parent)
1507 usb_wakeup_notification(udev->parent, udev->portnum);
623bef9e
SS
1508}
1509
0f2a7930
SS
1510static void handle_port_status(struct xhci_hcd *xhci,
1511 union xhci_trb *event)
1512{
f6ff0ac8 1513 struct usb_hcd *hcd;
0f2a7930 1514 u32 port_id;
56192531 1515 u32 temp, temp1;
518e848e 1516 int max_ports;
56192531 1517 int slot_id;
5308a91b 1518 unsigned int faked_port_index;
f6ff0ac8 1519 u8 major_revision;
20b67cf5 1520 struct xhci_bus_state *bus_state;
28ccd296 1521 __le32 __iomem **port_array;
386139d7 1522 bool bogus_port_status = false;
0f2a7930
SS
1523
1524 /* Port status change events always have a successful completion code */
28ccd296 1525 if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) {
0f2a7930
SS
1526 xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
1527 xhci->error_bitmask |= 1 << 8;
1528 }
28ccd296 1529 port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
0f2a7930
SS
1530 xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
1531
518e848e
SS
1532 max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1533 if ((port_id <= 0) || (port_id > max_ports)) {
56192531 1534 xhci_warn(xhci, "Invalid port id %d\n", port_id);
09ce0c0c
PC
1535 inc_deq(xhci, xhci->event_ring);
1536 return;
56192531
AX
1537 }
1538
f6ff0ac8
SS
1539 /* Figure out which usb_hcd this port is attached to:
1540 * is it a USB 3.0 port or a USB 2.0/1.1 port?
1541 */
1542 major_revision = xhci->port_array[port_id - 1];
09ce0c0c
PC
1543
1544 /* Find the right roothub. */
1545 hcd = xhci_to_hcd(xhci);
b50107bb 1546 if ((major_revision == 0x03) != (hcd->speed >= HCD_USB3))
09ce0c0c
PC
1547 hcd = xhci->shared_hcd;
1548
f6ff0ac8
SS
1549 if (major_revision == 0) {
1550 xhci_warn(xhci, "Event for port %u not in "
1551 "Extended Capabilities, ignoring.\n",
1552 port_id);
386139d7 1553 bogus_port_status = true;
f6ff0ac8 1554 goto cleanup;
5308a91b 1555 }
22e04870 1556 if (major_revision == DUPLICATE_ENTRY) {
f6ff0ac8
SS
1557 xhci_warn(xhci, "Event for port %u duplicated in"
1558 "Extended Capabilities, ignoring.\n",
1559 port_id);
386139d7 1560 bogus_port_status = true;
f6ff0ac8
SS
1561 goto cleanup;
1562 }
1563
1564 /*
1565 * Hardware port IDs reported by a Port Status Change Event include USB
1566 * 3.0 and USB 2.0 ports. We want to check if the port has reported a
1567 * resume event, but we first need to translate the hardware port ID
1568 * into the index into the ports on the correct split roothub, and the
1569 * correct bus_state structure.
1570 */
f6ff0ac8 1571 bus_state = &xhci->bus_state[hcd_index(hcd)];
b50107bb 1572 if (hcd->speed >= HCD_USB3)
f6ff0ac8
SS
1573 port_array = xhci->usb3_ports;
1574 else
1575 port_array = xhci->usb2_ports;
1576 /* Find the faked port hub number */
1577 faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci,
1578 port_id);
5308a91b 1579
b0ba9720 1580 temp = readl(port_array[faked_port_index]);
7111ebc9 1581 if (hcd->state == HC_STATE_SUSPENDED) {
56192531
AX
1582 xhci_dbg(xhci, "resume root hub\n");
1583 usb_hcd_resume_root_hub(hcd);
1584 }
1585
b50107bb 1586 if (hcd->speed >= HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE)
fac4271d
ZJC
1587 bus_state->port_remote_wakeup &= ~(1 << faked_port_index);
1588
56192531
AX
1589 if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
1590 xhci_dbg(xhci, "port resume event for port %d\n", port_id);
1591
b0ba9720 1592 temp1 = readl(&xhci->op_regs->command);
56192531
AX
1593 if (!(temp1 & CMD_RUN)) {
1594 xhci_warn(xhci, "xHC is not running.\n");
1595 goto cleanup;
1596 }
1597
2338b9e4 1598 if (DEV_SUPERSPEED_ANY(temp)) {
d93814cf 1599 xhci_dbg(xhci, "remote wake SS port %d\n", port_id);
4ee823b8
SS
1600 /* Set a flag to say the port signaled remote wakeup,
1601 * so we can tell the difference between the end of
1602 * device and host initiated resume.
1603 */
1604 bus_state->port_remote_wakeup |= 1 << faked_port_index;
d93814cf
SS
1605 xhci_test_and_clear_bit(xhci, port_array,
1606 faked_port_index, PORT_PLC);
c9682dff
AX
1607 xhci_set_link_state(xhci, port_array, faked_port_index,
1608 XDEV_U0);
d93814cf
SS
1609 /* Need to wait until the next link state change
1610 * indicates the device is actually in U0.
1611 */
1612 bogus_port_status = true;
1613 goto cleanup;
f69115fd
MN
1614 } else if (!test_bit(faked_port_index,
1615 &bus_state->resuming_ports)) {
56192531 1616 xhci_dbg(xhci, "resume HS port %d\n", port_id);
f6ff0ac8 1617 bus_state->resume_done[faked_port_index] = jiffies +
b9e45188 1618 msecs_to_jiffies(USB_RESUME_TIMEOUT);
f370b996 1619 set_bit(faked_port_index, &bus_state->resuming_ports);
56192531 1620 mod_timer(&hcd->rh_timer,
f6ff0ac8 1621 bus_state->resume_done[faked_port_index]);
56192531
AX
1622 /* Do the rest in GetPortStatus */
1623 }
1624 }
d93814cf
SS
1625
1626 if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_U0 &&
2338b9e4 1627 DEV_SUPERSPEED_ANY(temp)) {
d93814cf 1628 xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
4ee823b8
SS
1629 /* We've just brought the device into U0 through either the
1630 * Resume state after a device remote wakeup, or through the
1631 * U3Exit state after a host-initiated resume. If it's a device
1632 * initiated remote wake, don't pass up the link state change,
1633 * so the roothub behavior is consistent with external
1634 * USB 3.0 hub behavior.
1635 */
d93814cf
SS
1636 slot_id = xhci_find_slot_id_by_port(hcd, xhci,
1637 faked_port_index + 1);
1638 if (slot_id && xhci->devs[slot_id])
1639 xhci_ring_device(xhci, slot_id);
ba7b5c22 1640 if (bus_state->port_remote_wakeup & (1 << faked_port_index)) {
4ee823b8
SS
1641 bus_state->port_remote_wakeup &=
1642 ~(1 << faked_port_index);
1643 xhci_test_and_clear_bit(xhci, port_array,
1644 faked_port_index, PORT_PLC);
1645 usb_wakeup_notification(hcd->self.root_hub,
1646 faked_port_index + 1);
1647 bogus_port_status = true;
1648 goto cleanup;
1649 }
d93814cf 1650 }
56192531 1651
8b3d4570
SS
1652 /*
1653 * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or
1654 * RExit to a disconnect state). If so, let the the driver know it's
1655 * out of the RExit state.
1656 */
2338b9e4 1657 if (!DEV_SUPERSPEED_ANY(temp) &&
8b3d4570
SS
1658 test_and_clear_bit(faked_port_index,
1659 &bus_state->rexit_ports)) {
1660 complete(&bus_state->rexit_done[faked_port_index]);
1661 bogus_port_status = true;
1662 goto cleanup;
1663 }
1664
b50107bb 1665 if (hcd->speed < HCD_USB3)
6fd45621
AX
1666 xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
1667 PORT_PLC);
1668
56192531 1669cleanup:
0f2a7930 1670 /* Update event ring dequeue pointer before dropping the lock */
3b72fca0 1671 inc_deq(xhci, xhci->event_ring);
0f2a7930 1672
386139d7
SS
1673 /* Don't make the USB core poll the roothub if we got a bad port status
1674 * change event. Besides, at that point we can't tell which roothub
1675 * (USB 2.0 or USB 3.0) to kick.
1676 */
1677 if (bogus_port_status)
1678 return;
1679
c52804a4
SS
1680 /*
1681 * xHCI port-status-change events occur when the "or" of all the
1682 * status-change bits in the portsc register changes from 0 to 1.
1683 * New status changes won't cause an event if any other change
1684 * bits are still set. When an event occurs, switch over to
1685 * polling to avoid losing status changes.
1686 */
1687 xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
1688 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
0f2a7930
SS
1689 spin_unlock(&xhci->lock);
1690 /* Pass this up to the core */
f6ff0ac8 1691 usb_hcd_poll_rh_status(hcd);
0f2a7930
SS
1692 spin_lock(&xhci->lock);
1693}
1694
d0e96f5a
SS
1695/*
1696 * This TD is defined by the TRBs starting at start_trb in start_seg and ending
1697 * at end_trb, which may be in another segment. If the suspect DMA address is a
1698 * TRB in this TD, this function returns that TRB's segment. Otherwise it
1699 * returns 0.
1700 */
cffb9be8
HG
1701struct xhci_segment *trb_in_td(struct xhci_hcd *xhci,
1702 struct xhci_segment *start_seg,
d0e96f5a
SS
1703 union xhci_trb *start_trb,
1704 union xhci_trb *end_trb,
cffb9be8
HG
1705 dma_addr_t suspect_dma,
1706 bool debug)
d0e96f5a
SS
1707{
1708 dma_addr_t start_dma;
1709 dma_addr_t end_seg_dma;
1710 dma_addr_t end_trb_dma;
1711 struct xhci_segment *cur_seg;
1712
23e3be11 1713 start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
d0e96f5a
SS
1714 cur_seg = start_seg;
1715
1716 do {
2fa88daa 1717 if (start_dma == 0)
326b4810 1718 return NULL;
ae636747 1719 /* We may get an event for a Link TRB in the middle of a TD */
23e3be11 1720 end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
2fa88daa 1721 &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
d0e96f5a 1722 /* If the end TRB isn't in this segment, this is set to 0 */
23e3be11 1723 end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
d0e96f5a 1724
cffb9be8
HG
1725 if (debug)
1726 xhci_warn(xhci,
1727 "Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx\n",
1728 (unsigned long long)suspect_dma,
1729 (unsigned long long)start_dma,
1730 (unsigned long long)end_trb_dma,
1731 (unsigned long long)cur_seg->dma,
1732 (unsigned long long)end_seg_dma);
1733
d0e96f5a
SS
1734 if (end_trb_dma > 0) {
1735 /* The end TRB is in this segment, so suspect should be here */
1736 if (start_dma <= end_trb_dma) {
1737 if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
1738 return cur_seg;
1739 } else {
1740 /* Case for one segment with
1741 * a TD wrapped around to the top
1742 */
1743 if ((suspect_dma >= start_dma &&
1744 suspect_dma <= end_seg_dma) ||
1745 (suspect_dma >= cur_seg->dma &&
1746 suspect_dma <= end_trb_dma))
1747 return cur_seg;
1748 }
326b4810 1749 return NULL;
d0e96f5a
SS
1750 } else {
1751 /* Might still be somewhere in this segment */
1752 if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
1753 return cur_seg;
1754 }
1755 cur_seg = cur_seg->next;
23e3be11 1756 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
2fa88daa 1757 } while (cur_seg != start_seg);
d0e96f5a 1758
326b4810 1759 return NULL;
d0e96f5a
SS
1760}
1761
bcef3fd5
SS
1762static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
1763 unsigned int slot_id, unsigned int ep_index,
e9df17eb 1764 unsigned int stream_id,
bcef3fd5
SS
1765 struct xhci_td *td, union xhci_trb *event_trb)
1766{
1767 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
ddba5cd0
MN
1768 struct xhci_command *command;
1769 command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
1770 if (!command)
1771 return;
1772
d0167ad2 1773 ep->ep_state |= EP_HALTED;
e9df17eb 1774 ep->stopped_stream = stream_id;
1624ae1c 1775
ddba5cd0 1776 xhci_queue_reset_ep(xhci, command, slot_id, ep_index);
d97b4f8d 1777 xhci_cleanup_stalled_ring(xhci, ep_index, td);
1624ae1c 1778
5e5cf6fc 1779 ep->stopped_stream = 0;
1624ae1c 1780
bcef3fd5
SS
1781 xhci_ring_cmd_db(xhci);
1782}
1783
1784/* Check if an error has halted the endpoint ring. The class driver will
1785 * cleanup the halt for a non-default control endpoint if we indicate a stall.
1786 * However, a babble and other errors also halt the endpoint ring, and the class
1787 * driver won't clear the halt in that case, so we need to issue a Set Transfer
1788 * Ring Dequeue Pointer command manually.
1789 */
1790static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
1791 struct xhci_ep_ctx *ep_ctx,
1792 unsigned int trb_comp_code)
1793{
1794 /* TRB completion codes that may require a manual halt cleanup */
1795 if (trb_comp_code == COMP_TX_ERR ||
1796 trb_comp_code == COMP_BABBLE ||
1797 trb_comp_code == COMP_SPLIT_ERR)
d4fc8bf5 1798 /* The 0.95 spec says a babbling control endpoint
bcef3fd5
SS
1799 * is not halted. The 0.96 spec says it is. Some HW
1800 * claims to be 0.95 compliant, but it halts the control
1801 * endpoint anyway. Check if a babble halted the
1802 * endpoint.
1803 */
f5960b69
ME
1804 if ((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
1805 cpu_to_le32(EP_STATE_HALTED))
bcef3fd5
SS
1806 return 1;
1807
1808 return 0;
1809}
1810
b45b5069
SS
1811int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
1812{
1813 if (trb_comp_code >= 224 && trb_comp_code <= 255) {
1814 /* Vendor defined "informational" completion code,
1815 * treat as not-an-error.
1816 */
1817 xhci_dbg(xhci, "Vendor defined info completion code %u\n",
1818 trb_comp_code);
1819 xhci_dbg(xhci, "Treating code as success.\n");
1820 return 1;
1821 }
1822 return 0;
1823}
1824
4422da61
AX
1825/*
1826 * Finish the td processing, remove the td from td list;
1827 * Return 1 if the urb can be given back.
1828 */
1829static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
1830 union xhci_trb *event_trb, struct xhci_transfer_event *event,
1831 struct xhci_virt_ep *ep, int *status, bool skip)
1832{
1833 struct xhci_virt_device *xdev;
1834 struct xhci_ring *ep_ring;
1835 unsigned int slot_id;
1836 int ep_index;
1837 struct urb *urb = NULL;
1838 struct xhci_ep_ctx *ep_ctx;
1839 int ret = 0;
8e51adcc 1840 struct urb_priv *urb_priv;
4422da61
AX
1841 u32 trb_comp_code;
1842
28ccd296 1843 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
4422da61 1844 xdev = xhci->devs[slot_id];
28ccd296
ME
1845 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1846 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
4422da61 1847 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
28ccd296 1848 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
4422da61
AX
1849
1850 if (skip)
1851 goto td_cleanup;
1852
40a3b775
LB
1853 if (trb_comp_code == COMP_STOP_INVAL ||
1854 trb_comp_code == COMP_STOP ||
1855 trb_comp_code == COMP_STOP_SHORT) {
4422da61
AX
1856 /* The Endpoint Stop Command completion will take care of any
1857 * stopped TDs. A stopped TD may be restarted, so don't update
1858 * the ring dequeue pointer or take this TD off any lists yet.
1859 */
1860 ep->stopped_td = td;
4422da61 1861 return 0;
69defe04
MN
1862 }
1863 if (trb_comp_code == COMP_STALL ||
1864 xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
1865 trb_comp_code)) {
1866 /* Issue a reset endpoint command to clear the host side
1867 * halt, followed by a set dequeue command to move the
1868 * dequeue pointer past the TD.
1869 * The class driver clears the device side halt later.
1870 */
1871 xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index,
1872 ep_ring->stream_id, td, event_trb);
4422da61 1873 } else {
69defe04
MN
1874 /* Update ring dequeue pointer */
1875 while (ep_ring->dequeue != td->last_trb)
3b72fca0 1876 inc_deq(xhci, ep_ring);
69defe04
MN
1877 inc_deq(xhci, ep_ring);
1878 }
4422da61
AX
1879
1880td_cleanup:
69defe04
MN
1881 /* Clean up the endpoint's TD list */
1882 urb = td->urb;
1883 urb_priv = urb->hcpriv;
1884
f9c589e1
MN
1885 /* if a bounce buffer was used to align this td then unmap it */
1886 if (td->bounce_seg)
1887 xhci_unmap_td_bounce_buffer(xhci, ep_ring, td);
1888
69defe04
MN
1889 /* Do one last check of the actual transfer length.
1890 * If the host controller said we transferred more data than the buffer
1891 * length, urb->actual_length will be a very big number (since it's
1892 * unsigned). Play it safe and say we didn't transfer anything.
1893 */
1894 if (urb->actual_length > urb->transfer_buffer_length) {
1895 xhci_warn(xhci, "URB transfer length is wrong, xHC issue? req. len = %u, act. len = %u\n",
1896 urb->transfer_buffer_length,
1897 urb->actual_length);
1898 urb->actual_length = 0;
69defe04
MN
1899 *status = 0;
1900 }
1901 list_del_init(&td->td_list);
1902 /* Was this TD slated to be cancelled but completed anyway? */
1903 if (!list_empty(&td->cancelled_td_list))
1904 list_del_init(&td->cancelled_td_list);
1905
1906 urb_priv->td_cnt++;
1907 /* Giveback the urb when all the tds are completed */
1908 if (urb_priv->td_cnt == urb_priv->length) {
1909 ret = 1;
1910 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
1911 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
1912 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
1913 if (xhci->quirks & XHCI_AMD_PLL_FIX)
1914 usb_amd_quirk_pll_enable();
c41136b0
AX
1915 }
1916 }
4422da61
AX
1917 }
1918
1919 return ret;
1920}
1921
8af56be1
AX
1922/*
1923 * Process control tds, update urb status and actual_length.
1924 */
1925static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1926 union xhci_trb *event_trb, struct xhci_transfer_event *event,
1927 struct xhci_virt_ep *ep, int *status)
1928{
1929 struct xhci_virt_device *xdev;
1930 struct xhci_ring *ep_ring;
1931 unsigned int slot_id;
1932 int ep_index;
1933 struct xhci_ep_ctx *ep_ctx;
1934 u32 trb_comp_code;
1935
28ccd296 1936 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
8af56be1 1937 xdev = xhci->devs[slot_id];
28ccd296
ME
1938 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1939 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
8af56be1 1940 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
28ccd296 1941 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
8af56be1 1942
8af56be1
AX
1943 switch (trb_comp_code) {
1944 case COMP_SUCCESS:
1945 if (event_trb == ep_ring->dequeue) {
1946 xhci_warn(xhci, "WARN: Success on ctrl setup TRB "
1947 "without IOC set??\n");
1948 *status = -ESHUTDOWN;
1949 } else if (event_trb != td->last_trb) {
1950 xhci_warn(xhci, "WARN: Success on ctrl data TRB "
1951 "without IOC set??\n");
1952 *status = -ESHUTDOWN;
1953 } else {
8af56be1
AX
1954 *status = 0;
1955 }
1956 break;
1957 case COMP_SHORT_TX:
8af56be1
AX
1958 *status = 0;
1959 break;
40a3b775
LB
1960 case COMP_STOP_SHORT:
1961 if (event_trb == ep_ring->dequeue || event_trb == td->last_trb)
1962 xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n");
1963 else
1964 td->urb->actual_length =
1965 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
1966
1967 return finish_td(xhci, td, event_trb, event, ep, status, false);
3abeca99 1968 case COMP_STOP:
40a3b775
LB
1969 /* Did we stop at data stage? */
1970 if (event_trb != ep_ring->dequeue && event_trb != td->last_trb)
1971 td->urb->actual_length =
1972 td->urb->transfer_buffer_length -
1973 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
1974 /* fall through */
1975 case COMP_STOP_INVAL:
3abeca99 1976 return finish_td(xhci, td, event_trb, event, ep, status, false);
8af56be1
AX
1977 default:
1978 if (!xhci_requires_manual_halt_cleanup(xhci,
1979 ep_ctx, trb_comp_code))
1980 break;
1981 xhci_dbg(xhci, "TRB error code %u, "
1982 "halted endpoint index = %u\n",
1983 trb_comp_code, ep_index);
1984 /* else fall through */
1985 case COMP_STALL:
1986 /* Did we transfer part of the data (middle) phase? */
1987 if (event_trb != ep_ring->dequeue &&
1988 event_trb != td->last_trb)
1989 td->urb->actual_length =
1c11a172
VG
1990 td->urb->transfer_buffer_length -
1991 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
22ae47e6 1992 else if (!td->urb_length_set)
8af56be1
AX
1993 td->urb->actual_length = 0;
1994
8e71a322 1995 return finish_td(xhci, td, event_trb, event, ep, status, false);
8af56be1
AX
1996 }
1997 /*
1998 * Did we transfer any data, despite the errors that might have
1999 * happened? I.e. did we get past the setup stage?
2000 */
2001 if (event_trb != ep_ring->dequeue) {
2002 /* The event was for the status stage */
2003 if (event_trb == td->last_trb) {
52ab8685 2004 if (!td->urb_length_set) {
8af56be1
AX
2005 td->urb->actual_length =
2006 td->urb->transfer_buffer_length;
2007 }
2008 } else {
45ba2154
AM
2009 /*
2010 * Maybe the event was for the data stage? If so, update
2011 * already the actual_length of the URB and flag it as
2012 * set, so that it is not overwritten in the event for
2013 * the last TRB.
2014 */
2015 td->urb_length_set = true;
3abeca99
SS
2016 td->urb->actual_length =
2017 td->urb->transfer_buffer_length -
1c11a172 2018 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
3abeca99
SS
2019 xhci_dbg(xhci, "Waiting for status "
2020 "stage event\n");
2021 return 0;
8af56be1
AX
2022 }
2023 }
2024
2025 return finish_td(xhci, td, event_trb, event, ep, status, false);
2026}
2027
04e51901
AX
2028/*
2029 * Process isochronous tds, update urb packet status and actual_length.
2030 */
2031static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
2032 union xhci_trb *event_trb, struct xhci_transfer_event *event,
2033 struct xhci_virt_ep *ep, int *status)
2034{
2035 struct xhci_ring *ep_ring;
2036 struct urb_priv *urb_priv;
2037 int idx;
2038 int len = 0;
04e51901
AX
2039 union xhci_trb *cur_trb;
2040 struct xhci_segment *cur_seg;
926008c9 2041 struct usb_iso_packet_descriptor *frame;
04e51901 2042 u32 trb_comp_code;
926008c9 2043 bool skip_td = false;
04e51901 2044
28ccd296
ME
2045 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2046 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
04e51901
AX
2047 urb_priv = td->urb->hcpriv;
2048 idx = urb_priv->td_cnt;
926008c9 2049 frame = &td->urb->iso_frame_desc[idx];
04e51901 2050
926008c9
DT
2051 /* handle completion code */
2052 switch (trb_comp_code) {
2053 case COMP_SUCCESS:
1c11a172 2054 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) {
1530bbc6
SS
2055 frame->status = 0;
2056 break;
2057 }
2058 if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
2059 trb_comp_code = COMP_SHORT_TX;
40a3b775
LB
2060 /* fallthrough */
2061 case COMP_STOP_SHORT:
926008c9
DT
2062 case COMP_SHORT_TX:
2063 frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
2064 -EREMOTEIO : 0;
2065 break;
2066 case COMP_BW_OVER:
2067 frame->status = -ECOMM;
2068 skip_td = true;
2069 break;
2070 case COMP_BUFF_OVER:
2071 case COMP_BABBLE:
2072 frame->status = -EOVERFLOW;
2073 skip_td = true;
2074 break;
f6ba6fe2 2075 case COMP_DEV_ERR:
926008c9 2076 case COMP_STALL:
d104d015
MN
2077 frame->status = -EPROTO;
2078 skip_td = true;
2079 break;
9c745995 2080 case COMP_TX_ERR:
926008c9 2081 frame->status = -EPROTO;
d104d015
MN
2082 if (event_trb != td->last_trb)
2083 return 0;
926008c9
DT
2084 skip_td = true;
2085 break;
2086 case COMP_STOP:
2087 case COMP_STOP_INVAL:
2088 break;
2089 default:
2090 frame->status = -1;
2091 break;
04e51901
AX
2092 }
2093
926008c9
DT
2094 if (trb_comp_code == COMP_SUCCESS || skip_td) {
2095 frame->actual_length = frame->length;
2096 td->urb->actual_length += frame->length;
40a3b775
LB
2097 } else if (trb_comp_code == COMP_STOP_SHORT) {
2098 frame->actual_length =
2099 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2100 td->urb->actual_length += frame->actual_length;
04e51901
AX
2101 } else {
2102 for (cur_trb = ep_ring->dequeue,
2103 cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
2104 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
0ce57499 2105 if (!trb_is_noop(cur_trb) && !trb_is_link(cur_trb))
28ccd296 2106 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
04e51901 2107 }
28ccd296 2108 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
1c11a172 2109 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
04e51901
AX
2110
2111 if (trb_comp_code != COMP_STOP_INVAL) {
926008c9 2112 frame->actual_length = len;
04e51901
AX
2113 td->urb->actual_length += len;
2114 }
2115 }
2116
04e51901
AX
2117 return finish_td(xhci, td, event_trb, event, ep, status, false);
2118}
2119
926008c9
DT
2120static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
2121 struct xhci_transfer_event *event,
2122 struct xhci_virt_ep *ep, int *status)
2123{
2124 struct xhci_ring *ep_ring;
2125 struct urb_priv *urb_priv;
2126 struct usb_iso_packet_descriptor *frame;
2127 int idx;
2128
f6975314 2129 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
926008c9
DT
2130 urb_priv = td->urb->hcpriv;
2131 idx = urb_priv->td_cnt;
2132 frame = &td->urb->iso_frame_desc[idx];
2133
b3df3f9c 2134 /* The transfer is partly done. */
926008c9
DT
2135 frame->status = -EXDEV;
2136
2137 /* calc actual length */
2138 frame->actual_length = 0;
2139
2140 /* Update ring dequeue pointer */
2141 while (ep_ring->dequeue != td->last_trb)
3b72fca0
AX
2142 inc_deq(xhci, ep_ring);
2143 inc_deq(xhci, ep_ring);
926008c9
DT
2144
2145 return finish_td(xhci, td, NULL, event, ep, status, true);
2146}
2147
22405ed2
AX
2148/*
2149 * Process bulk and interrupt tds, update urb status and actual_length.
2150 */
2151static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
2152 union xhci_trb *event_trb, struct xhci_transfer_event *event,
2153 struct xhci_virt_ep *ep, int *status)
2154{
2155 struct xhci_ring *ep_ring;
2156 union xhci_trb *cur_trb;
2157 struct xhci_segment *cur_seg;
2158 u32 trb_comp_code;
2159
28ccd296
ME
2160 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2161 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
22405ed2
AX
2162
2163 switch (trb_comp_code) {
2164 case COMP_SUCCESS:
2165 /* Double check that the HW transferred everything. */
1530bbc6 2166 if (event_trb != td->last_trb ||
1c11a172 2167 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
52ab8685 2168 xhci_warn(xhci, "WARN Successful completion on short TX\n");
1530bbc6
SS
2169 if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
2170 trb_comp_code = COMP_SHORT_TX;
22405ed2 2171 }
52ab8685 2172 *status = 0;
22405ed2 2173 break;
40a3b775 2174 case COMP_STOP_SHORT:
22405ed2 2175 case COMP_SHORT_TX:
52ab8685 2176 *status = 0;
22405ed2
AX
2177 break;
2178 default:
2179 /* Others already handled above */
2180 break;
2181 }
f444ff27
SS
2182 if (trb_comp_code == COMP_SHORT_TX)
2183 xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
2184 "%d bytes untransferred\n",
2185 td->urb->ep->desc.bEndpointAddress,
2186 td->urb->transfer_buffer_length,
1c11a172 2187 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
40a3b775
LB
2188 /* Stopped - short packet completion */
2189 if (trb_comp_code == COMP_STOP_SHORT) {
2190 td->urb->actual_length =
2191 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2192
2193 if (td->urb->transfer_buffer_length <
2194 td->urb->actual_length) {
2195 xhci_warn(xhci, "HC gave bad length of %d bytes txed\n",
2196 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
2197 td->urb->actual_length = 0;
2198 /* status will be set by usb core for canceled urbs */
2199 }
22405ed2 2200 /* Fast path - was this the last TRB in the TD for this URB? */
40a3b775 2201 } else if (event_trb == td->last_trb) {
1c11a172 2202 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
22405ed2
AX
2203 td->urb->actual_length =
2204 td->urb->transfer_buffer_length -
1c11a172 2205 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
22405ed2
AX
2206 if (td->urb->transfer_buffer_length <
2207 td->urb->actual_length) {
52ab8685 2208 xhci_warn(xhci, "HC gave bad length of %d bytes left\n",
1c11a172 2209 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
22405ed2 2210 td->urb->actual_length = 0;
52ab8685 2211 *status = 0;
22405ed2
AX
2212 }
2213 /* Don't overwrite a previously set error code */
52ab8685 2214 if (*status == -EINPROGRESS)
22405ed2
AX
2215 *status = 0;
2216 }
2217 } else {
2218 /* Slow path - walk the list, starting from the dequeue
2219 * pointer, to get the actual length transferred.
2220 */
2221 td->urb->actual_length = 0;
2222 for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
2223 cur_trb != event_trb;
2224 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
0ce57499 2225 if (!trb_is_noop(cur_trb) && !trb_is_link(cur_trb))
22405ed2 2226 td->urb->actual_length +=
28ccd296 2227 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
22405ed2
AX
2228 }
2229 /* If the ring didn't stop on a Link or No-op TRB, add
2230 * in the actual bytes transferred from the Normal TRB
2231 */
2232 if (trb_comp_code != COMP_STOP_INVAL)
2233 td->urb->actual_length +=
28ccd296 2234 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
1c11a172 2235 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
22405ed2
AX
2236 }
2237
2238 return finish_td(xhci, td, event_trb, event, ep, status, false);
2239}
2240
d0e96f5a
SS
2241/*
2242 * If this function returns an error condition, it means it got a Transfer
2243 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
2244 * At this point, the host controller is probably hosed and should be reset.
2245 */
2246static int handle_tx_event(struct xhci_hcd *xhci,
2247 struct xhci_transfer_event *event)
ed384bd3
FB
2248 __releases(&xhci->lock)
2249 __acquires(&xhci->lock)
d0e96f5a
SS
2250{
2251 struct xhci_virt_device *xdev;
63a0d9ab 2252 struct xhci_virt_ep *ep;
d0e96f5a 2253 struct xhci_ring *ep_ring;
82d1009f 2254 unsigned int slot_id;
d0e96f5a 2255 int ep_index;
326b4810 2256 struct xhci_td *td = NULL;
d0e96f5a
SS
2257 dma_addr_t event_dma;
2258 struct xhci_segment *event_seg;
2259 union xhci_trb *event_trb;
326b4810 2260 struct urb *urb = NULL;
d0e96f5a 2261 int status = -EINPROGRESS;
8e51adcc 2262 struct urb_priv *urb_priv;
d115b048 2263 struct xhci_ep_ctx *ep_ctx;
c2d7b49f 2264 struct list_head *tmp;
66d1eebc 2265 u32 trb_comp_code;
4422da61 2266 int ret = 0;
c2d7b49f 2267 int td_num = 0;
3b4739b8 2268 bool handling_skipped_tds = false;
d0e96f5a 2269
28ccd296 2270 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
82d1009f 2271 xdev = xhci->devs[slot_id];
d0e96f5a
SS
2272 if (!xdev) {
2273 xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
9258c0b2 2274 xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
e910b440
SS
2275 (unsigned long long) xhci_trb_virt_to_dma(
2276 xhci->event_ring->deq_seg,
9258c0b2
SS
2277 xhci->event_ring->dequeue),
2278 lower_32_bits(le64_to_cpu(event->buffer)),
2279 upper_32_bits(le64_to_cpu(event->buffer)),
2280 le32_to_cpu(event->transfer_len),
2281 le32_to_cpu(event->flags));
2282 xhci_dbg(xhci, "Event ring:\n");
2283 xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
d0e96f5a
SS
2284 return -ENODEV;
2285 }
2286
2287 /* Endpoint ID is 1 based, our index is zero based */
28ccd296 2288 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
63a0d9ab 2289 ep = &xdev->eps[ep_index];
28ccd296 2290 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
d115b048 2291 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
986a92d4 2292 if (!ep_ring ||
28ccd296
ME
2293 (le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
2294 EP_STATE_DISABLED) {
e9df17eb
SS
2295 xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
2296 "or incorrect stream ring\n");
9258c0b2 2297 xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
e910b440
SS
2298 (unsigned long long) xhci_trb_virt_to_dma(
2299 xhci->event_ring->deq_seg,
9258c0b2
SS
2300 xhci->event_ring->dequeue),
2301 lower_32_bits(le64_to_cpu(event->buffer)),
2302 upper_32_bits(le64_to_cpu(event->buffer)),
2303 le32_to_cpu(event->transfer_len),
2304 le32_to_cpu(event->flags));
2305 xhci_dbg(xhci, "Event ring:\n");
2306 xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
d0e96f5a
SS
2307 return -ENODEV;
2308 }
2309
c2d7b49f
AX
2310 /* Count current td numbers if ep->skip is set */
2311 if (ep->skip) {
2312 list_for_each(tmp, &ep_ring->td_list)
2313 td_num++;
2314 }
2315
28ccd296
ME
2316 event_dma = le64_to_cpu(event->buffer);
2317 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
986a92d4 2318 /* Look for common error cases */
66d1eebc 2319 switch (trb_comp_code) {
b10de142
SS
2320 /* Skip codes that require special handling depending on
2321 * transfer type
2322 */
2323 case COMP_SUCCESS:
1c11a172 2324 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
1530bbc6
SS
2325 break;
2326 if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
2327 trb_comp_code = COMP_SHORT_TX;
2328 else
8202ce2e
SS
2329 xhci_warn_ratelimited(xhci,
2330 "WARN Successful completion on short TX: needs XHCI_TRUST_TX_LENGTH quirk?\n");
b10de142
SS
2331 case COMP_SHORT_TX:
2332 break;
ae636747
SS
2333 case COMP_STOP:
2334 xhci_dbg(xhci, "Stopped on Transfer TRB\n");
2335 break;
2336 case COMP_STOP_INVAL:
2337 xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
2338 break;
40a3b775
LB
2339 case COMP_STOP_SHORT:
2340 xhci_dbg(xhci, "Stopped with short packet transfer detected\n");
2341 break;
b10de142 2342 case COMP_STALL:
2a9227a5 2343 xhci_dbg(xhci, "Stalled endpoint\n");
63a0d9ab 2344 ep->ep_state |= EP_HALTED;
b10de142
SS
2345 status = -EPIPE;
2346 break;
2347 case COMP_TRB_ERR:
2348 xhci_warn(xhci, "WARN: TRB error on endpoint\n");
2349 status = -EILSEQ;
2350 break;
ec74e403 2351 case COMP_SPLIT_ERR:
b10de142 2352 case COMP_TX_ERR:
2a9227a5 2353 xhci_dbg(xhci, "Transfer error on endpoint\n");
b10de142
SS
2354 status = -EPROTO;
2355 break;
4a73143c 2356 case COMP_BABBLE:
2a9227a5 2357 xhci_dbg(xhci, "Babble error on endpoint\n");
4a73143c
SS
2358 status = -EOVERFLOW;
2359 break;
b10de142
SS
2360 case COMP_DB_ERR:
2361 xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
2362 status = -ENOSR;
2363 break;
986a92d4
AX
2364 case COMP_BW_OVER:
2365 xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n");
2366 break;
2367 case COMP_BUFF_OVER:
2368 xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n");
2369 break;
2370 case COMP_UNDERRUN:
2371 /*
2372 * When the Isoch ring is empty, the xHC will generate
2373 * a Ring Overrun Event for IN Isoch endpoint or Ring
2374 * Underrun Event for OUT Isoch endpoint.
2375 */
2376 xhci_dbg(xhci, "underrun event on endpoint\n");
2377 if (!list_empty(&ep_ring->td_list))
2378 xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
2379 "still with TDs queued?\n",
28ccd296
ME
2380 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2381 ep_index);
986a92d4
AX
2382 goto cleanup;
2383 case COMP_OVERRUN:
2384 xhci_dbg(xhci, "overrun event on endpoint\n");
2385 if (!list_empty(&ep_ring->td_list))
2386 xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
2387 "still with TDs queued?\n",
28ccd296
ME
2388 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2389 ep_index);
986a92d4 2390 goto cleanup;
f6ba6fe2
AH
2391 case COMP_DEV_ERR:
2392 xhci_warn(xhci, "WARN: detect an incompatible device");
2393 status = -EPROTO;
2394 break;
d18240db
AX
2395 case COMP_MISSED_INT:
2396 /*
2397 * When encounter missed service error, one or more isoc tds
2398 * may be missed by xHC.
2399 * Set skip flag of the ep_ring; Complete the missed tds as
2400 * short transfer when process the ep_ring next time.
2401 */
2402 ep->skip = true;
2403 xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
2404 goto cleanup;
3b4739b8
MN
2405 case COMP_PING_ERR:
2406 ep->skip = true;
2407 xhci_dbg(xhci, "No Ping response error, Skip one Isoc TD\n");
2408 goto cleanup;
b10de142 2409 default:
b45b5069 2410 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
5ad6a529
SS
2411 status = 0;
2412 break;
2413 }
86cd740a
MN
2414 xhci_warn(xhci, "ERROR Unknown event condition %u, HC probably busted\n",
2415 trb_comp_code);
986a92d4
AX
2416 goto cleanup;
2417 }
2418
d18240db
AX
2419 do {
2420 /* This TRB should be in the TD at the head of this ring's
2421 * TD list.
2422 */
2423 if (list_empty(&ep_ring->td_list)) {
a83d6755
SS
2424 /*
2425 * A stopped endpoint may generate an extra completion
2426 * event if the device was suspended. Don't print
2427 * warnings.
2428 */
2429 if (!(trb_comp_code == COMP_STOP ||
2430 trb_comp_code == COMP_STOP_INVAL)) {
2431 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
2432 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2433 ep_index);
2434 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
2435 (le32_to_cpu(event->flags) &
2436 TRB_TYPE_BITMASK)>>10);
2437 xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
2438 }
d18240db
AX
2439 if (ep->skip) {
2440 ep->skip = false;
2441 xhci_dbg(xhci, "td_list is empty while skip "
2442 "flag set. Clear skip flag.\n");
2443 }
2444 ret = 0;
2445 goto cleanup;
2446 }
986a92d4 2447
c2d7b49f
AX
2448 /* We've skipped all the TDs on the ep ring when ep->skip set */
2449 if (ep->skip && td_num == 0) {
2450 ep->skip = false;
2451 xhci_dbg(xhci, "All tds on the ep_ring skipped. "
2452 "Clear skip flag.\n");
2453 ret = 0;
2454 goto cleanup;
2455 }
2456
d18240db 2457 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
c2d7b49f
AX
2458 if (ep->skip)
2459 td_num--;
926008c9 2460
d18240db 2461 /* Is this a TRB in the currently executing TD? */
cffb9be8
HG
2462 event_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue,
2463 td->last_trb, event_dma, false);
e1cf486d
AH
2464
2465 /*
2466 * Skip the Force Stopped Event. The event_trb(event_dma) of FSE
2467 * is not in the current TD pointed by ep_ring->dequeue because
2468 * that the hardware dequeue pointer still at the previous TRB
2469 * of the current TD. The previous TRB maybe a Link TD or the
2470 * last TRB of the previous TD. The command completion handle
2471 * will take care the rest.
2472 */
9a548863
HG
2473 if (!event_seg && (trb_comp_code == COMP_STOP ||
2474 trb_comp_code == COMP_STOP_INVAL)) {
e1cf486d
AH
2475 ret = 0;
2476 goto cleanup;
2477 }
2478
926008c9
DT
2479 if (!event_seg) {
2480 if (!ep->skip ||
2481 !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
ad808333
SS
2482 /* Some host controllers give a spurious
2483 * successful event after a short transfer.
2484 * Ignore it.
2485 */
ddba5cd0 2486 if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
ad808333
SS
2487 ep_ring->last_td_was_short) {
2488 ep_ring->last_td_was_short = false;
2489 ret = 0;
2490 goto cleanup;
2491 }
926008c9
DT
2492 /* HC is busted, give up! */
2493 xhci_err(xhci,
2494 "ERROR Transfer event TRB DMA ptr not "
cffb9be8
HG
2495 "part of current TD ep_index %d "
2496 "comp_code %u\n", ep_index,
2497 trb_comp_code);
2498 trb_in_td(xhci, ep_ring->deq_seg,
2499 ep_ring->dequeue, td->last_trb,
2500 event_dma, true);
926008c9
DT
2501 return -ESHUTDOWN;
2502 }
2503
2504 ret = skip_isoc_td(xhci, td, event, ep, &status);
2505 goto cleanup;
2506 }
ad808333
SS
2507 if (trb_comp_code == COMP_SHORT_TX)
2508 ep_ring->last_td_was_short = true;
2509 else
2510 ep_ring->last_td_was_short = false;
926008c9
DT
2511
2512 if (ep->skip) {
d18240db
AX
2513 xhci_dbg(xhci, "Found td. Clear skip flag.\n");
2514 ep->skip = false;
2515 }
678539cf 2516
926008c9
DT
2517 event_trb = &event_seg->trbs[(event_dma - event_seg->dma) /
2518 sizeof(*event_trb)];
2519 /*
2520 * No-op TRB should not trigger interrupts.
2521 * If event_trb is a no-op TRB, it means the
2522 * corresponding TD has been cancelled. Just ignore
2523 * the TD.
2524 */
0ce57499
MN
2525 if (trb_is_noop(event_trb)) {
2526 xhci_dbg(xhci, "event_trb is a no-op TRB. Skip it\n");
926008c9 2527 goto cleanup;
d18240db 2528 }
4422da61 2529
d18240db
AX
2530 /* Now update the urb's actual_length and give back to
2531 * the core
82d1009f 2532 */
d18240db
AX
2533 if (usb_endpoint_xfer_control(&td->urb->ep->desc))
2534 ret = process_ctrl_td(xhci, td, event_trb, event, ep,
2535 &status);
04e51901
AX
2536 else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
2537 ret = process_isoc_td(xhci, td, event_trb, event, ep,
2538 &status);
d18240db
AX
2539 else
2540 ret = process_bulk_intr_td(xhci, td, event_trb, event,
2541 ep, &status);
2542
2543cleanup:
3b4739b8
MN
2544
2545
2546 handling_skipped_tds = ep->skip &&
2547 trb_comp_code != COMP_MISSED_INT &&
2548 trb_comp_code != COMP_PING_ERR;
2549
d18240db 2550 /*
3b4739b8
MN
2551 * Do not update event ring dequeue pointer if we're in a loop
2552 * processing missed tds.
d18240db 2553 */
3b4739b8 2554 if (!handling_skipped_tds)
3b72fca0 2555 inc_deq(xhci, xhci->event_ring);
d18240db
AX
2556
2557 if (ret) {
2558 urb = td->urb;
8e51adcc 2559 urb_priv = urb->hcpriv;
8e71a322 2560
4daf9df5 2561 xhci_urb_free_priv(urb_priv);
d18240db 2562
214f76f7 2563 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
f444ff27
SS
2564 if ((urb->actual_length != urb->transfer_buffer_length &&
2565 (urb->transfer_flags &
2566 URB_SHORT_NOT_OK)) ||
fd984d24
SS
2567 (status != 0 &&
2568 !usb_endpoint_xfer_isoc(&urb->ep->desc)))
f444ff27 2569 xhci_dbg(xhci, "Giveback URB %p, len = %d, "
1949f9e2 2570 "expected = %d, status = %d\n",
f444ff27
SS
2571 urb, urb->actual_length,
2572 urb->transfer_buffer_length,
2573 status);
d18240db 2574 spin_unlock(&xhci->lock);
b3df3f9c
SS
2575 /* EHCI, UHCI, and OHCI always unconditionally set the
2576 * urb->status of an isochronous endpoint to 0.
2577 */
2578 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
2579 status = 0;
214f76f7 2580 usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
d18240db
AX
2581 spin_lock(&xhci->lock);
2582 }
2583
2584 /*
2585 * If ep->skip is set, it means there are missed tds on the
2586 * endpoint ring need to take care of.
2587 * Process them as short transfer until reach the td pointed by
2588 * the event.
2589 */
3b4739b8 2590 } while (handling_skipped_tds);
d18240db 2591
d0e96f5a
SS
2592 return 0;
2593}
2594
0f2a7930
SS
2595/*
2596 * This function handles all OS-owned events on the event ring. It may drop
2597 * xhci->lock between event processing (e.g. to pass up port status changes).
9dee9a21
ME
2598 * Returns >0 for "possibly more events to process" (caller should call again),
2599 * otherwise 0 if done. In future, <0 returns should indicate error code.
0f2a7930 2600 */
9dee9a21 2601static int xhci_handle_event(struct xhci_hcd *xhci)
7f84eef0
SS
2602{
2603 union xhci_trb *event;
0f2a7930 2604 int update_ptrs = 1;
d0e96f5a 2605 int ret;
7f84eef0
SS
2606
2607 if (!xhci->event_ring || !xhci->event_ring->dequeue) {
2608 xhci->error_bitmask |= 1 << 1;
9dee9a21 2609 return 0;
7f84eef0
SS
2610 }
2611
2612 event = xhci->event_ring->dequeue;
2613 /* Does the HC or OS own the TRB? */
28ccd296
ME
2614 if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
2615 xhci->event_ring->cycle_state) {
7f84eef0 2616 xhci->error_bitmask |= 1 << 2;
9dee9a21 2617 return 0;
7f84eef0
SS
2618 }
2619
92a3da41
ME
2620 /*
2621 * Barrier between reading the TRB_CYCLE (valid) flag above and any
2622 * speculative reads of the event's flags/data below.
2623 */
2624 rmb();
0f2a7930 2625 /* FIXME: Handle more event types. */
28ccd296 2626 switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) {
7f84eef0
SS
2627 case TRB_TYPE(TRB_COMPLETION):
2628 handle_cmd_completion(xhci, &event->event_cmd);
2629 break;
0f2a7930
SS
2630 case TRB_TYPE(TRB_PORT_STATUS):
2631 handle_port_status(xhci, event);
2632 update_ptrs = 0;
2633 break;
d0e96f5a
SS
2634 case TRB_TYPE(TRB_TRANSFER):
2635 ret = handle_tx_event(xhci, &event->trans_event);
2636 if (ret < 0)
2637 xhci->error_bitmask |= 1 << 9;
2638 else
2639 update_ptrs = 0;
2640 break;
623bef9e
SS
2641 case TRB_TYPE(TRB_DEV_NOTE):
2642 handle_device_notification(xhci, event);
2643 break;
7f84eef0 2644 default:
28ccd296
ME
2645 if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
2646 TRB_TYPE(48))
0238634d
SS
2647 handle_vendor_event(xhci, event);
2648 else
2649 xhci->error_bitmask |= 1 << 3;
7f84eef0 2650 }
6f5165cf
SS
2651 /* Any of the above functions may drop and re-acquire the lock, so check
2652 * to make sure a watchdog timer didn't mark the host as non-responsive.
2653 */
2654 if (xhci->xhc_state & XHCI_STATE_DYING) {
2655 xhci_dbg(xhci, "xHCI host dying, returning from "
2656 "event handler.\n");
9dee9a21 2657 return 0;
6f5165cf 2658 }
7f84eef0 2659
c06d68b8
SS
2660 if (update_ptrs)
2661 /* Update SW event ring dequeue pointer */
3b72fca0 2662 inc_deq(xhci, xhci->event_ring);
c06d68b8 2663
9dee9a21
ME
2664 /* Are there more items on the event ring? Caller will call us again to
2665 * check.
2666 */
2667 return 1;
7f84eef0 2668}
9032cd52
SS
2669
2670/*
2671 * xHCI spec says we can get an interrupt, and if the HC has an error condition,
2672 * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
2673 * indicators of an event TRB error, but we check the status *first* to be safe.
2674 */
2675irqreturn_t xhci_irq(struct usb_hcd *hcd)
2676{
2677 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
c21599a3 2678 u32 status;
bda53145 2679 u64 temp_64;
c06d68b8
SS
2680 union xhci_trb *event_ring_deq;
2681 dma_addr_t deq;
9032cd52
SS
2682
2683 spin_lock(&xhci->lock);
9032cd52 2684 /* Check if the xHC generated the interrupt, or the irq is shared */
b0ba9720 2685 status = readl(&xhci->op_regs->status);
c21599a3 2686 if (status == 0xffffffff)
9032cd52
SS
2687 goto hw_died;
2688
c21599a3 2689 if (!(status & STS_EINT)) {
9032cd52 2690 spin_unlock(&xhci->lock);
9032cd52
SS
2691 return IRQ_NONE;
2692 }
27e0dd4d 2693 if (status & STS_FATAL) {
9032cd52
SS
2694 xhci_warn(xhci, "WARNING: Host System Error\n");
2695 xhci_halt(xhci);
2696hw_died:
9032cd52 2697 spin_unlock(&xhci->lock);
948fa135 2698 return IRQ_HANDLED;
9032cd52
SS
2699 }
2700
bda53145
SS
2701 /*
2702 * Clear the op reg interrupt status first,
2703 * so we can receive interrupts from other MSI-X interrupters.
2704 * Write 1 to clear the interrupt status.
2705 */
27e0dd4d 2706 status |= STS_EINT;
204b7793 2707 writel(status, &xhci->op_regs->status);
bda53145
SS
2708 /* FIXME when MSI-X is supported and there are multiple vectors */
2709 /* Clear the MSI-X event interrupt status */
2710
cd70469d 2711 if (hcd->irq) {
c21599a3
SS
2712 u32 irq_pending;
2713 /* Acknowledge the PCI interrupt */
b0ba9720 2714 irq_pending = readl(&xhci->ir_set->irq_pending);
4e833c0b 2715 irq_pending |= IMAN_IP;
204b7793 2716 writel(irq_pending, &xhci->ir_set->irq_pending);
c21599a3 2717 }
bda53145 2718
27a41a83
GKB
2719 if (xhci->xhc_state & XHCI_STATE_DYING ||
2720 xhci->xhc_state & XHCI_STATE_HALTED) {
bda53145
SS
2721 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
2722 "Shouldn't IRQs be disabled?\n");
c06d68b8
SS
2723 /* Clear the event handler busy flag (RW1C);
2724 * the event ring should be empty.
bda53145 2725 */
f7b2e403 2726 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
477632df
SS
2727 xhci_write_64(xhci, temp_64 | ERST_EHB,
2728 &xhci->ir_set->erst_dequeue);
c06d68b8
SS
2729 spin_unlock(&xhci->lock);
2730
2731 return IRQ_HANDLED;
2732 }
2733
2734 event_ring_deq = xhci->event_ring->dequeue;
2735 /* FIXME this should be a delayed service routine
2736 * that clears the EHB.
2737 */
9dee9a21 2738 while (xhci_handle_event(xhci) > 0) {}
bda53145 2739
f7b2e403 2740 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
c06d68b8
SS
2741 /* If necessary, update the HW's version of the event ring deq ptr. */
2742 if (event_ring_deq != xhci->event_ring->dequeue) {
2743 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2744 xhci->event_ring->dequeue);
2745 if (deq == 0)
2746 xhci_warn(xhci, "WARN something wrong with SW event "
2747 "ring dequeue ptr.\n");
2748 /* Update HC event ring dequeue pointer */
2749 temp_64 &= ERST_PTR_MASK;
2750 temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
2751 }
2752
2753 /* Clear the event handler busy flag (RW1C); event ring is empty. */
2754 temp_64 |= ERST_EHB;
477632df 2755 xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
c06d68b8 2756
9032cd52
SS
2757 spin_unlock(&xhci->lock);
2758
2759 return IRQ_HANDLED;
2760}
2761
851ec164 2762irqreturn_t xhci_msi_irq(int irq, void *hcd)
9032cd52 2763{
968b822c 2764 return xhci_irq(hcd);
9032cd52 2765}
7f84eef0 2766
d0e96f5a
SS
2767/**** Endpoint Ring Operations ****/
2768
7f84eef0
SS
2769/*
2770 * Generic function for queueing a TRB on a ring.
2771 * The caller must have checked to make sure there's room on the ring.
6cc30d85
SS
2772 *
2773 * @more_trbs_coming: Will you enqueue more TRBs before calling
2774 * prepare_transfer()?
7f84eef0
SS
2775 */
2776static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
3b72fca0 2777 bool more_trbs_coming,
7f84eef0
SS
2778 u32 field1, u32 field2, u32 field3, u32 field4)
2779{
2780 struct xhci_generic_trb *trb;
2781
2782 trb = &ring->enqueue->generic;
28ccd296
ME
2783 trb->field[0] = cpu_to_le32(field1);
2784 trb->field[1] = cpu_to_le32(field2);
2785 trb->field[2] = cpu_to_le32(field3);
2786 trb->field[3] = cpu_to_le32(field4);
3b72fca0 2787 inc_enq(xhci, ring, more_trbs_coming);
7f84eef0
SS
2788}
2789
d0e96f5a
SS
2790/*
2791 * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
2792 * FIXME allocate segments if the ring is full.
2793 */
2794static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
3b72fca0 2795 u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
d0e96f5a 2796{
8dfec614
AX
2797 unsigned int num_trbs_needed;
2798
d0e96f5a 2799 /* Make sure the endpoint has been added to xHC schedule */
d0e96f5a
SS
2800 switch (ep_state) {
2801 case EP_STATE_DISABLED:
2802 /*
2803 * USB core changed config/interfaces without notifying us,
2804 * or hardware is reporting the wrong state.
2805 */
2806 xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
2807 return -ENOENT;
d0e96f5a 2808 case EP_STATE_ERROR:
c92bcfa7 2809 xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
d0e96f5a
SS
2810 /* FIXME event handling code for error needs to clear it */
2811 /* XXX not sure if this should be -ENOENT or not */
2812 return -EINVAL;
c92bcfa7
SS
2813 case EP_STATE_HALTED:
2814 xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
d0e96f5a
SS
2815 case EP_STATE_STOPPED:
2816 case EP_STATE_RUNNING:
2817 break;
2818 default:
2819 xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
2820 /*
2821 * FIXME issue Configure Endpoint command to try to get the HC
2822 * back into a known state.
2823 */
2824 return -EINVAL;
2825 }
8dfec614
AX
2826
2827 while (1) {
3d4b81ed
SS
2828 if (room_on_ring(xhci, ep_ring, num_trbs))
2829 break;
8dfec614
AX
2830
2831 if (ep_ring == xhci->cmd_ring) {
2832 xhci_err(xhci, "Do not support expand command ring\n");
2833 return -ENOMEM;
2834 }
2835
68ffb011
XR
2836 xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
2837 "ERROR no room on ep ring, try ring expansion");
8dfec614
AX
2838 num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
2839 if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed,
2840 mem_flags)) {
2841 xhci_err(xhci, "Ring expansion failed\n");
2842 return -ENOMEM;
2843 }
261fa12b 2844 }
6c12db90 2845
d0c77d84
MN
2846 while (trb_is_link(ep_ring->enqueue)) {
2847 /* If we're not dealing with 0.95 hardware or isoc rings
2848 * on AMD 0.96 host, clear the chain bit.
2849 */
2850 if (!xhci_link_trb_quirk(xhci) &&
2851 !(ep_ring->type == TYPE_ISOC &&
2852 (xhci->quirks & XHCI_AMD_0x96_HOST)))
2853 ep_ring->enqueue->link.control &=
2854 cpu_to_le32(~TRB_CHAIN);
2855 else
2856 ep_ring->enqueue->link.control |=
2857 cpu_to_le32(TRB_CHAIN);
6c12db90 2858
d0c77d84
MN
2859 wmb();
2860 ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
6c12db90 2861
d0c77d84
MN
2862 /* Toggle the cycle bit after the last ring segment. */
2863 if (link_trb_toggles_cycle(ep_ring->enqueue))
2864 ep_ring->cycle_state ^= 1;
6c12db90 2865
d0c77d84
MN
2866 ep_ring->enq_seg = ep_ring->enq_seg->next;
2867 ep_ring->enqueue = ep_ring->enq_seg->trbs;
6c12db90 2868 }
d0e96f5a
SS
2869 return 0;
2870}
2871
23e3be11 2872static int prepare_transfer(struct xhci_hcd *xhci,
d0e96f5a
SS
2873 struct xhci_virt_device *xdev,
2874 unsigned int ep_index,
e9df17eb 2875 unsigned int stream_id,
d0e96f5a
SS
2876 unsigned int num_trbs,
2877 struct urb *urb,
8e51adcc 2878 unsigned int td_index,
d0e96f5a
SS
2879 gfp_t mem_flags)
2880{
2881 int ret;
8e51adcc
AX
2882 struct urb_priv *urb_priv;
2883 struct xhci_td *td;
e9df17eb 2884 struct xhci_ring *ep_ring;
d115b048 2885 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
e9df17eb
SS
2886
2887 ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
2888 if (!ep_ring) {
2889 xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
2890 stream_id);
2891 return -EINVAL;
2892 }
2893
2894 ret = prepare_ring(xhci, ep_ring,
28ccd296 2895 le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
3b72fca0 2896 num_trbs, mem_flags);
d0e96f5a
SS
2897 if (ret)
2898 return ret;
d0e96f5a 2899
8e51adcc
AX
2900 urb_priv = urb->hcpriv;
2901 td = urb_priv->td[td_index];
2902
2903 INIT_LIST_HEAD(&td->td_list);
2904 INIT_LIST_HEAD(&td->cancelled_td_list);
2905
2906 if (td_index == 0) {
214f76f7 2907 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
d13565c1 2908 if (unlikely(ret))
8e51adcc 2909 return ret;
d0e96f5a
SS
2910 }
2911
8e51adcc 2912 td->urb = urb;
d0e96f5a 2913 /* Add this TD to the tail of the endpoint ring's TD list */
8e51adcc
AX
2914 list_add_tail(&td->td_list, &ep_ring->td_list);
2915 td->start_seg = ep_ring->enq_seg;
2916 td->first_trb = ep_ring->enqueue;
2917
2918 urb_priv->td[td_index] = td;
d0e96f5a
SS
2919
2920 return 0;
2921}
2922
d2510342
AI
2923static unsigned int count_trbs(u64 addr, u64 len)
2924{
2925 unsigned int num_trbs;
2926
2927 num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
2928 TRB_MAX_BUFF_SIZE);
2929 if (num_trbs == 0)
2930 num_trbs++;
2931
2932 return num_trbs;
2933}
2934
2935static inline unsigned int count_trbs_needed(struct urb *urb)
2936{
2937 return count_trbs(urb->transfer_dma, urb->transfer_buffer_length);
2938}
2939
2940static unsigned int count_sg_trbs_needed(struct urb *urb)
8a96c052 2941{
8a96c052 2942 struct scatterlist *sg;
d2510342 2943 unsigned int i, len, full_len, num_trbs = 0;
8a96c052 2944
d2510342 2945 full_len = urb->transfer_buffer_length;
8a96c052 2946
d2510342
AI
2947 for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
2948 len = sg_dma_len(sg);
2949 num_trbs += count_trbs(sg_dma_address(sg), len);
2950 len = min_t(unsigned int, len, full_len);
2951 full_len -= len;
2952 if (full_len == 0)
8a96c052
SS
2953 break;
2954 }
d2510342 2955
8a96c052
SS
2956 return num_trbs;
2957}
2958
d2510342
AI
2959static unsigned int count_isoc_trbs_needed(struct urb *urb, int i)
2960{
2961 u64 addr, len;
2962
2963 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
2964 len = urb->iso_frame_desc[i].length;
2965
2966 return count_trbs(addr, len);
2967}
2968
2969static void check_trb_math(struct urb *urb, int running_total)
8a96c052 2970{
d2510342 2971 if (unlikely(running_total != urb->transfer_buffer_length))
a2490187 2972 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
8a96c052
SS
2973 "queued %#x (%d), asked for %#x (%d)\n",
2974 __func__,
2975 urb->ep->desc.bEndpointAddress,
2976 running_total, running_total,
2977 urb->transfer_buffer_length,
2978 urb->transfer_buffer_length);
2979}
2980
23e3be11 2981static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
e9df17eb 2982 unsigned int ep_index, unsigned int stream_id, int start_cycle,
e1eab2e0 2983 struct xhci_generic_trb *start_trb)
8a96c052 2984{
8a96c052
SS
2985 /*
2986 * Pass all the TRBs to the hardware at once and make sure this write
2987 * isn't reordered.
2988 */
2989 wmb();
50f7b52a 2990 if (start_cycle)
28ccd296 2991 start_trb->field[3] |= cpu_to_le32(start_cycle);
50f7b52a 2992 else
28ccd296 2993 start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
be88fe4f 2994 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
8a96c052
SS
2995}
2996
78140156
AI
2997static void check_interval(struct xhci_hcd *xhci, struct urb *urb,
2998 struct xhci_ep_ctx *ep_ctx)
624defa1 2999{
624defa1
SS
3000 int xhci_interval;
3001 int ep_interval;
3002
28ccd296 3003 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
624defa1 3004 ep_interval = urb->interval;
78140156 3005
624defa1
SS
3006 /* Convert to microframes */
3007 if (urb->dev->speed == USB_SPEED_LOW ||
3008 urb->dev->speed == USB_SPEED_FULL)
3009 ep_interval *= 8;
78140156 3010
624defa1
SS
3011 /* FIXME change this to a warning and a suggestion to use the new API
3012 * to set the polling interval (once the API is added).
3013 */
3014 if (xhci_interval != ep_interval) {
0730d52a
DK
3015 dev_dbg_ratelimited(&urb->dev->dev,
3016 "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
3017 ep_interval, ep_interval == 1 ? "" : "s",
3018 xhci_interval, xhci_interval == 1 ? "" : "s");
624defa1
SS
3019 urb->interval = xhci_interval;
3020 /* Convert back to frames for LS/FS devices */
3021 if (urb->dev->speed == USB_SPEED_LOW ||
3022 urb->dev->speed == USB_SPEED_FULL)
3023 urb->interval /= 8;
3024 }
78140156
AI
3025}
3026
3027/*
3028 * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt
3029 * endpoint is to be serviced, the xHC will consume (at most) one TD. A TD
3030 * (comprised of sg list entries) can take several service intervals to
3031 * transmit.
3032 */
3033int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3034 struct urb *urb, int slot_id, unsigned int ep_index)
3035{
3036 struct xhci_ep_ctx *ep_ctx;
3037
3038 ep_ctx = xhci_get_ep_ctx(xhci, xhci->devs[slot_id]->out_ctx, ep_index);
3039 check_interval(xhci, urb, ep_ctx);
3040
3fc8206d 3041 return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
624defa1
SS
3042}
3043
4da6e6f2 3044/*
4525c0a1
SS
3045 * For xHCI 1.0 host controllers, TD size is the number of max packet sized
3046 * packets remaining in the TD (*not* including this TRB).
4da6e6f2
SS
3047 *
3048 * Total TD packet count = total_packet_count =
4525c0a1 3049 * DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
4da6e6f2
SS
3050 *
3051 * Packets transferred up to and including this TRB = packets_transferred =
3052 * rounddown(total bytes transferred including this TRB / wMaxPacketSize)
3053 *
3054 * TD size = total_packet_count - packets_transferred
3055 *
c840d6ce
MN
3056 * For xHCI 0.96 and older, TD size field should be the remaining bytes
3057 * including this TRB, right shifted by 10
3058 *
3059 * For all hosts it must fit in bits 21:17, so it can't be bigger than 31.
3060 * This is taken care of in the TRB_TD_SIZE() macro
3061 *
4525c0a1 3062 * The last TRB in a TD must have the TD size set to zero.
4da6e6f2 3063 */
c840d6ce
MN
3064static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
3065 int trb_buff_len, unsigned int td_total_len,
124c3937 3066 struct urb *urb, bool more_trbs_coming)
4da6e6f2 3067{
c840d6ce
MN
3068 u32 maxp, total_packet_count;
3069
0cbd4b34
CY
3070 /* MTK xHCI is mostly 0.97 but contains some features from 1.0 */
3071 if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST))
c840d6ce
MN
3072 return ((td_total_len - transferred) >> 10);
3073
48df4a6f 3074 /* One TRB with a zero-length data packet. */
124c3937 3075 if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) ||
c840d6ce 3076 trb_buff_len == td_total_len)
48df4a6f
SS
3077 return 0;
3078
0cbd4b34
CY
3079 /* for MTK xHCI, TD size doesn't include this TRB */
3080 if (xhci->quirks & XHCI_MTK_HOST)
3081 trb_buff_len = 0;
3082
3083 maxp = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
3084 total_packet_count = DIV_ROUND_UP(td_total_len, maxp);
3085
c840d6ce
MN
3086 /* Queueing functions don't count the current TRB into transferred */
3087 return (total_packet_count - ((transferred + trb_buff_len) / maxp));
4da6e6f2
SS
3088}
3089
f9c589e1 3090
474ed23a 3091static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
f9c589e1 3092 u32 *trb_buff_len, struct xhci_segment *seg)
474ed23a 3093{
f9c589e1 3094 struct device *dev = xhci_to_hcd(xhci)->self.controller;
474ed23a
MN
3095 unsigned int unalign;
3096 unsigned int max_pkt;
f9c589e1 3097 u32 new_buff_len;
474ed23a
MN
3098
3099 max_pkt = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
3100 unalign = (enqd_len + *trb_buff_len) % max_pkt;
3101
3102 /* we got lucky, last normal TRB data on segment is packet aligned */
3103 if (unalign == 0)
3104 return 0;
3105
f9c589e1
MN
3106 xhci_dbg(xhci, "Unaligned %d bytes, buff len %d\n",
3107 unalign, *trb_buff_len);
3108
474ed23a
MN
3109 /* is the last nornal TRB alignable by splitting it */
3110 if (*trb_buff_len > unalign) {
3111 *trb_buff_len -= unalign;
f9c589e1 3112 xhci_dbg(xhci, "split align, new buff len %d\n", *trb_buff_len);
474ed23a
MN
3113 return 0;
3114 }
f9c589e1
MN
3115
3116 /*
3117 * We want enqd_len + trb_buff_len to sum up to a number aligned to
3118 * number which is divisible by the endpoint's wMaxPacketSize. IOW:
3119 * (size of currently enqueued TRBs + remainder) % wMaxPacketSize == 0.
3120 */
3121 new_buff_len = max_pkt - (enqd_len % max_pkt);
3122
3123 if (new_buff_len > (urb->transfer_buffer_length - enqd_len))
3124 new_buff_len = (urb->transfer_buffer_length - enqd_len);
3125
3126 /* create a max max_pkt sized bounce buffer pointed to by last trb */
3127 if (usb_urb_dir_out(urb)) {
3128 sg_pcopy_to_buffer(urb->sg, urb->num_mapped_sgs,
3129 seg->bounce_buf, new_buff_len, enqd_len);
3130 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
3131 max_pkt, DMA_TO_DEVICE);
3132 } else {
3133 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
3134 max_pkt, DMA_FROM_DEVICE);
3135 }
3136
3137 if (dma_mapping_error(dev, seg->bounce_dma)) {
3138 /* try without aligning. Some host controllers survive */
3139 xhci_warn(xhci, "Failed mapping bounce buffer, not aligning\n");
3140 return 0;
3141 }
3142 *trb_buff_len = new_buff_len;
3143 seg->bounce_len = new_buff_len;
3144 seg->bounce_offs = enqd_len;
3145
3146 xhci_dbg(xhci, "Bounce align, new buff len %d\n", *trb_buff_len);
3147
474ed23a
MN
3148 return 1;
3149}
3150
d2510342
AI
3151/* This is very similar to what ehci-q.c qtd_fill() does */
3152int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
8a96c052
SS
3153 struct urb *urb, int slot_id, unsigned int ep_index)
3154{
5a5a0b1a 3155 struct xhci_ring *ring;
8e51adcc 3156 struct urb_priv *urb_priv;
8a96c052 3157 struct xhci_td *td;
d2510342
AI
3158 struct xhci_generic_trb *start_trb;
3159 struct scatterlist *sg = NULL;
5a83f04a
MN
3160 bool more_trbs_coming = true;
3161 bool need_zero_pkt = false;
86065c27
MN
3162 bool first_trb = true;
3163 unsigned int num_trbs;
d2510342 3164 unsigned int start_cycle, num_sgs = 0;
86065c27 3165 unsigned int enqd_len, block_len, trb_buff_len, full_len;
f9c589e1 3166 int sent_len, ret;
d2510342 3167 u32 field, length_field, remainder;
f9c589e1 3168 u64 addr, send_addr;
8a96c052 3169
5a5a0b1a
MN
3170 ring = xhci_urb_to_transfer_ring(xhci, urb);
3171 if (!ring)
e9df17eb
SS
3172 return -EINVAL;
3173
86065c27 3174 full_len = urb->transfer_buffer_length;
d2510342
AI
3175 /* If we have scatter/gather list, we use it. */
3176 if (urb->num_sgs) {
3177 num_sgs = urb->num_mapped_sgs;
3178 sg = urb->sg;
86065c27
MN
3179 addr = (u64) sg_dma_address(sg);
3180 block_len = sg_dma_len(sg);
d2510342 3181 num_trbs = count_sg_trbs_needed(urb);
86065c27 3182 } else {
d2510342 3183 num_trbs = count_trbs_needed(urb);
86065c27
MN
3184 addr = (u64) urb->transfer_dma;
3185 block_len = full_len;
3186 }
4758dcd1 3187 ret = prepare_transfer(xhci, xhci->devs[slot_id],
e9df17eb 3188 ep_index, urb->stream_id,
3b72fca0 3189 num_trbs, urb, 0, mem_flags);
d2510342 3190 if (unlikely(ret < 0))
4758dcd1 3191 return ret;
8e51adcc
AX
3192
3193 urb_priv = urb->hcpriv;
4758dcd1
RA
3194
3195 /* Deal with URB_ZERO_PACKET - need one more td/trb */
5a83f04a
MN
3196 if (urb->transfer_flags & URB_ZERO_PACKET && urb_priv->length > 1)
3197 need_zero_pkt = true;
4758dcd1 3198
8e51adcc
AX
3199 td = urb_priv->td[0];
3200
8a96c052
SS
3201 /*
3202 * Don't give the first TRB to the hardware (by toggling the cycle bit)
3203 * until we've finished creating all the other TRBs. The ring's cycle
3204 * state may change as we enqueue the other TRBs, so save it too.
3205 */
5a5a0b1a
MN
3206 start_trb = &ring->enqueue->generic;
3207 start_cycle = ring->cycle_state;
f9c589e1 3208 send_addr = addr;
8a96c052 3209
d2510342 3210 /* Queue the TRBs, even if they are zero-length */
0d2daade
AB
3211 for (enqd_len = 0; first_trb || enqd_len < full_len;
3212 enqd_len += trb_buff_len) {
d2510342 3213 field = TRB_TYPE(TRB_NORMAL);
af8b9e63 3214
86065c27
MN
3215 /* TRB buffer should not cross 64KB boundaries */
3216 trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
3217 trb_buff_len = min_t(unsigned int, trb_buff_len, block_len);
8a96c052 3218
86065c27
MN
3219 if (enqd_len + trb_buff_len > full_len)
3220 trb_buff_len = full_len - enqd_len;
b10de142
SS
3221
3222 /* Don't change the cycle bit of the first TRB until later */
86065c27
MN
3223 if (first_trb) {
3224 first_trb = false;
50f7b52a 3225 if (start_cycle == 0)
d2510342 3226 field |= TRB_CYCLE;
50f7b52a 3227 } else
5a5a0b1a 3228 field |= ring->cycle_state;
b10de142
SS
3229
3230 /* Chain all the TRBs together; clear the chain bit in the last
3231 * TRB to indicate it's the last TRB in the chain.
3232 */
86065c27 3233 if (enqd_len + trb_buff_len < full_len) {
b10de142 3234 field |= TRB_CHAIN;
2d98ef40 3235 if (trb_is_link(ring->enqueue + 1)) {
474ed23a 3236 if (xhci_align_td(xhci, urb, enqd_len,
f9c589e1
MN
3237 &trb_buff_len,
3238 ring->enq_seg)) {
3239 send_addr = ring->enq_seg->bounce_dma;
3240 /* assuming TD won't span 2 segs */
3241 td->bounce_seg = ring->enq_seg;
3242 }
474ed23a 3243 }
f9c589e1
MN
3244 }
3245 if (enqd_len + trb_buff_len >= full_len) {
3246 field &= ~TRB_CHAIN;
4758dcd1 3247 field |= TRB_IOC;
124c3937 3248 more_trbs_coming = false;
5a83f04a 3249 td->last_trb = ring->enqueue;
b10de142 3250 }
af8b9e63
SS
3251
3252 /* Only set interrupt on short packet for IN endpoints */
3253 if (usb_urb_dir_in(urb))
3254 field |= TRB_ISP;
3255
4da6e6f2 3256 /* Set the TRB length, TD size, and interrupter fields. */
86065c27
MN
3257 remainder = xhci_td_remainder(xhci, enqd_len, trb_buff_len,
3258 full_len, urb, more_trbs_coming);
3259
f9dc68fe 3260 length_field = TRB_LEN(trb_buff_len) |
c840d6ce 3261 TRB_TD_SIZE(remainder) |
f9dc68fe 3262 TRB_INTR_TARGET(0);
4da6e6f2 3263
124c3937 3264 queue_trb(xhci, ring, more_trbs_coming | need_zero_pkt,
f9c589e1
MN
3265 lower_32_bits(send_addr),
3266 upper_32_bits(send_addr),
f9dc68fe 3267 length_field,
d2510342 3268 field);
b10de142 3269
b10de142 3270 addr += trb_buff_len;
f9c589e1 3271 sent_len = trb_buff_len;
d2510342 3272
f9c589e1 3273 while (sg && sent_len >= block_len) {
86065c27
MN
3274 /* New sg entry */
3275 --num_sgs;
f9c589e1 3276 sent_len -= block_len;
86065c27 3277 if (num_sgs != 0) {
d2510342 3278 sg = sg_next(sg);
86065c27
MN
3279 block_len = sg_dma_len(sg);
3280 addr = (u64) sg_dma_address(sg);
f9c589e1 3281 addr += sent_len;
d2510342
AI
3282 }
3283 }
f9c589e1
MN
3284 block_len -= sent_len;
3285 send_addr = addr;
d2510342 3286 }
b10de142 3287
5a83f04a
MN
3288 if (need_zero_pkt) {
3289 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3290 ep_index, urb->stream_id,
3291 1, urb, 1, mem_flags);
3292 urb_priv->td[1]->last_trb = ring->enqueue;
3293 field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC;
3294 queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field);
3295 }
3296
86065c27 3297 check_trb_math(urb, enqd_len);
e9df17eb 3298 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
e1eab2e0 3299 start_cycle, start_trb);
b10de142
SS
3300 return 0;
3301}
3302
d0e96f5a 3303/* Caller must have locked xhci->lock */
23e3be11 3304int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
d0e96f5a
SS
3305 struct urb *urb, int slot_id, unsigned int ep_index)
3306{
3307 struct xhci_ring *ep_ring;
3308 int num_trbs;
3309 int ret;
3310 struct usb_ctrlrequest *setup;
3311 struct xhci_generic_trb *start_trb;
3312 int start_cycle;
c840d6ce 3313 u32 field, length_field, remainder;
8e51adcc 3314 struct urb_priv *urb_priv;
d0e96f5a
SS
3315 struct xhci_td *td;
3316
e9df17eb
SS
3317 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3318 if (!ep_ring)
3319 return -EINVAL;
d0e96f5a
SS
3320
3321 /*
3322 * Need to copy setup packet into setup TRB, so we can't use the setup
3323 * DMA address.
3324 */
3325 if (!urb->setup_packet)
3326 return -EINVAL;
3327
d0e96f5a
SS
3328 /* 1 TRB for setup, 1 for status */
3329 num_trbs = 2;
3330 /*
3331 * Don't need to check if we need additional event data and normal TRBs,
3332 * since data in control transfers will never get bigger than 16MB
3333 * XXX: can we get a buffer that crosses 64KB boundaries?
3334 */
3335 if (urb->transfer_buffer_length > 0)
3336 num_trbs++;
e9df17eb
SS
3337 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3338 ep_index, urb->stream_id,
3b72fca0 3339 num_trbs, urb, 0, mem_flags);
d0e96f5a
SS
3340 if (ret < 0)
3341 return ret;
3342
8e51adcc
AX
3343 urb_priv = urb->hcpriv;
3344 td = urb_priv->td[0];
3345
d0e96f5a
SS
3346 /*
3347 * Don't give the first TRB to the hardware (by toggling the cycle bit)
3348 * until we've finished creating all the other TRBs. The ring's cycle
3349 * state may change as we enqueue the other TRBs, so save it too.
3350 */
3351 start_trb = &ep_ring->enqueue->generic;
3352 start_cycle = ep_ring->cycle_state;
3353
3354 /* Queue setup TRB - see section 6.4.1.2.1 */
3355 /* FIXME better way to translate setup_packet into two u32 fields? */
3356 setup = (struct usb_ctrlrequest *) urb->setup_packet;
50f7b52a
AX
3357 field = 0;
3358 field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
3359 if (start_cycle == 0)
3360 field |= 0x1;
b83cdc8f 3361
dca77945 3362 /* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */
0cbd4b34 3363 if ((xhci->hci_version >= 0x100) || (xhci->quirks & XHCI_MTK_HOST)) {
b83cdc8f
AX
3364 if (urb->transfer_buffer_length > 0) {
3365 if (setup->bRequestType & USB_DIR_IN)
3366 field |= TRB_TX_TYPE(TRB_DATA_IN);
3367 else
3368 field |= TRB_TX_TYPE(TRB_DATA_OUT);
3369 }
3370 }
3371
3b72fca0 3372 queue_trb(xhci, ep_ring, true,
28ccd296
ME
3373 setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
3374 le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
3375 TRB_LEN(8) | TRB_INTR_TARGET(0),
3376 /* Immediate data in pointer */
3377 field);
d0e96f5a
SS
3378
3379 /* If there's data, queue data TRBs */
af8b9e63
SS
3380 /* Only set interrupt on short packet for IN endpoints */
3381 if (usb_urb_dir_in(urb))
3382 field = TRB_ISP | TRB_TYPE(TRB_DATA);
3383 else
3384 field = TRB_TYPE(TRB_DATA);
3385
c840d6ce
MN
3386 remainder = xhci_td_remainder(xhci, 0,
3387 urb->transfer_buffer_length,
3388 urb->transfer_buffer_length,
3389 urb, 1);
3390
f9dc68fe 3391 length_field = TRB_LEN(urb->transfer_buffer_length) |
c840d6ce 3392 TRB_TD_SIZE(remainder) |
f9dc68fe 3393 TRB_INTR_TARGET(0);
c840d6ce 3394
d0e96f5a
SS
3395 if (urb->transfer_buffer_length > 0) {
3396 if (setup->bRequestType & USB_DIR_IN)
3397 field |= TRB_DIR_IN;
3b72fca0 3398 queue_trb(xhci, ep_ring, true,
d0e96f5a
SS
3399 lower_32_bits(urb->transfer_dma),
3400 upper_32_bits(urb->transfer_dma),
f9dc68fe 3401 length_field,
af8b9e63 3402 field | ep_ring->cycle_state);
d0e96f5a
SS
3403 }
3404
3405 /* Save the DMA address of the last TRB in the TD */
3406 td->last_trb = ep_ring->enqueue;
3407
3408 /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
3409 /* If the device sent data, the status stage is an OUT transfer */
3410 if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
3411 field = 0;
3412 else
3413 field = TRB_DIR_IN;
3b72fca0 3414 queue_trb(xhci, ep_ring, false,
d0e96f5a
SS
3415 0,
3416 0,
3417 TRB_INTR_TARGET(0),
3418 /* Event on completion */
3419 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
3420
e9df17eb 3421 giveback_first_trb(xhci, slot_id, ep_index, 0,
e1eab2e0 3422 start_cycle, start_trb);
d0e96f5a
SS
3423 return 0;
3424}
3425
5cd43e33
SS
3426/*
3427 * The transfer burst count field of the isochronous TRB defines the number of
3428 * bursts that are required to move all packets in this TD. Only SuperSpeed
3429 * devices can burst up to bMaxBurst number of packets per service interval.
3430 * This field is zero based, meaning a value of zero in the field means one
3431 * burst. Basically, for everything but SuperSpeed devices, this field will be
3432 * zero. Only xHCI 1.0 host controllers support this field.
3433 */
3434static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
5cd43e33
SS
3435 struct urb *urb, unsigned int total_packet_count)
3436{
3437 unsigned int max_burst;
3438
09c352ed 3439 if (xhci->hci_version < 0x100 || urb->dev->speed < USB_SPEED_SUPER)
5cd43e33
SS
3440 return 0;
3441
3442 max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3213b151 3443 return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
5cd43e33
SS
3444}
3445
b61d378f
SS
3446/*
3447 * Returns the number of packets in the last "burst" of packets. This field is
3448 * valid for all speeds of devices. USB 2.0 devices can only do one "burst", so
3449 * the last burst packet count is equal to the total number of packets in the
3450 * TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst
3451 * must contain (bMaxBurst + 1) number of packets, but the last burst can
3452 * contain 1 to (bMaxBurst + 1) packets.
3453 */
3454static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
b61d378f
SS
3455 struct urb *urb, unsigned int total_packet_count)
3456{
3457 unsigned int max_burst;
3458 unsigned int residue;
3459
3460 if (xhci->hci_version < 0x100)
3461 return 0;
3462
09c352ed 3463 if (urb->dev->speed >= USB_SPEED_SUPER) {
b61d378f
SS
3464 /* bMaxBurst is zero based: 0 means 1 packet per burst */
3465 max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3466 residue = total_packet_count % (max_burst + 1);
3467 /* If residue is zero, the last burst contains (max_burst + 1)
3468 * number of packets, but the TLBPC field is zero-based.
3469 */
3470 if (residue == 0)
3471 return max_burst;
3472 return residue - 1;
b61d378f 3473 }
09c352ed
MN
3474 if (total_packet_count == 0)
3475 return 0;
3476 return total_packet_count - 1;
b61d378f
SS
3477}
3478
79b8094f
LB
3479/*
3480 * Calculates Frame ID field of the isochronous TRB identifies the
3481 * target frame that the Interval associated with this Isochronous
3482 * Transfer Descriptor will start on. Refer to 4.11.2.5 in 1.1 spec.
3483 *
3484 * Returns actual frame id on success, negative value on error.
3485 */
3486static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci,
3487 struct urb *urb, int index)
3488{
3489 int start_frame, ist, ret = 0;
3490 int start_frame_id, end_frame_id, current_frame_id;
3491
3492 if (urb->dev->speed == USB_SPEED_LOW ||
3493 urb->dev->speed == USB_SPEED_FULL)
3494 start_frame = urb->start_frame + index * urb->interval;
3495 else
3496 start_frame = (urb->start_frame + index * urb->interval) >> 3;
3497
3498 /* Isochronous Scheduling Threshold (IST, bits 0~3 in HCSPARAMS2):
3499 *
3500 * If bit [3] of IST is cleared to '0', software can add a TRB no
3501 * later than IST[2:0] Microframes before that TRB is scheduled to
3502 * be executed.
3503 * If bit [3] of IST is set to '1', software can add a TRB no later
3504 * than IST[2:0] Frames before that TRB is scheduled to be executed.
3505 */
3506 ist = HCS_IST(xhci->hcs_params2) & 0x7;
3507 if (HCS_IST(xhci->hcs_params2) & (1 << 3))
3508 ist <<= 3;
3509
3510 /* Software shall not schedule an Isoch TD with a Frame ID value that
3511 * is less than the Start Frame ID or greater than the End Frame ID,
3512 * where:
3513 *
3514 * End Frame ID = (Current MFINDEX register value + 895 ms.) MOD 2048
3515 * Start Frame ID = (Current MFINDEX register value + IST + 1) MOD 2048
3516 *
3517 * Both the End Frame ID and Start Frame ID values are calculated
3518 * in microframes. When software determines the valid Frame ID value;
3519 * The End Frame ID value should be rounded down to the nearest Frame
3520 * boundary, and the Start Frame ID value should be rounded up to the
3521 * nearest Frame boundary.
3522 */
3523 current_frame_id = readl(&xhci->run_regs->microframe_index);
3524 start_frame_id = roundup(current_frame_id + ist + 1, 8);
3525 end_frame_id = rounddown(current_frame_id + 895 * 8, 8);
3526
3527 start_frame &= 0x7ff;
3528 start_frame_id = (start_frame_id >> 3) & 0x7ff;
3529 end_frame_id = (end_frame_id >> 3) & 0x7ff;
3530
3531 xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n",
3532 __func__, index, readl(&xhci->run_regs->microframe_index),
3533 start_frame_id, end_frame_id, start_frame);
3534
3535 if (start_frame_id < end_frame_id) {
3536 if (start_frame > end_frame_id ||
3537 start_frame < start_frame_id)
3538 ret = -EINVAL;
3539 } else if (start_frame_id > end_frame_id) {
3540 if ((start_frame > end_frame_id &&
3541 start_frame < start_frame_id))
3542 ret = -EINVAL;
3543 } else {
3544 ret = -EINVAL;
3545 }
3546
3547 if (index == 0) {
3548 if (ret == -EINVAL || start_frame == start_frame_id) {
3549 start_frame = start_frame_id + 1;
3550 if (urb->dev->speed == USB_SPEED_LOW ||
3551 urb->dev->speed == USB_SPEED_FULL)
3552 urb->start_frame = start_frame;
3553 else
3554 urb->start_frame = start_frame << 3;
3555 ret = 0;
3556 }
3557 }
3558
3559 if (ret) {
3560 xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n",
3561 start_frame, current_frame_id, index,
3562 start_frame_id, end_frame_id);
3563 xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n");
3564 return ret;
3565 }
3566
3567 return start_frame;
3568}
3569
04e51901
AX
3570/* This is for isoc transfer */
3571static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3572 struct urb *urb, int slot_id, unsigned int ep_index)
3573{
3574 struct xhci_ring *ep_ring;
3575 struct urb_priv *urb_priv;
3576 struct xhci_td *td;
3577 int num_tds, trbs_per_td;
3578 struct xhci_generic_trb *start_trb;
3579 bool first_trb;
3580 int start_cycle;
3581 u32 field, length_field;
3582 int running_total, trb_buff_len, td_len, td_remain_len, ret;
3583 u64 start_addr, addr;
3584 int i, j;
47cbf692 3585 bool more_trbs_coming;
79b8094f 3586 struct xhci_virt_ep *xep;
09c352ed 3587 int frame_id;
04e51901 3588
79b8094f 3589 xep = &xhci->devs[slot_id]->eps[ep_index];
04e51901
AX
3590 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
3591
3592 num_tds = urb->number_of_packets;
3593 if (num_tds < 1) {
3594 xhci_dbg(xhci, "Isoc URB with zero packets?\n");
3595 return -EINVAL;
3596 }
04e51901
AX
3597 start_addr = (u64) urb->transfer_dma;
3598 start_trb = &ep_ring->enqueue->generic;
3599 start_cycle = ep_ring->cycle_state;
3600
522989a2 3601 urb_priv = urb->hcpriv;
09c352ed 3602 /* Queue the TRBs for each TD, even if they are zero-length */
04e51901 3603 for (i = 0; i < num_tds; i++) {
09c352ed
MN
3604 unsigned int total_pkt_count, max_pkt;
3605 unsigned int burst_count, last_burst_pkt_count;
3606 u32 sia_frame_id;
04e51901 3607
4da6e6f2 3608 first_trb = true;
04e51901
AX
3609 running_total = 0;
3610 addr = start_addr + urb->iso_frame_desc[i].offset;
3611 td_len = urb->iso_frame_desc[i].length;
3612 td_remain_len = td_len;
09c352ed
MN
3613 max_pkt = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
3614 total_pkt_count = DIV_ROUND_UP(td_len, max_pkt);
3615
48df4a6f 3616 /* A zero-length transfer still involves at least one packet. */
09c352ed
MN
3617 if (total_pkt_count == 0)
3618 total_pkt_count++;
3619 burst_count = xhci_get_burst_count(xhci, urb, total_pkt_count);
3620 last_burst_pkt_count = xhci_get_last_burst_packet_count(xhci,
3621 urb, total_pkt_count);
04e51901 3622
d2510342 3623 trbs_per_td = count_isoc_trbs_needed(urb, i);
04e51901
AX
3624
3625 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
3b72fca0 3626 urb->stream_id, trbs_per_td, urb, i, mem_flags);
522989a2
SS
3627 if (ret < 0) {
3628 if (i == 0)
3629 return ret;
3630 goto cleanup;
3631 }
04e51901 3632 td = urb_priv->td[i];
09c352ed
MN
3633
3634 /* use SIA as default, if frame id is used overwrite it */
3635 sia_frame_id = TRB_SIA;
3636 if (!(urb->transfer_flags & URB_ISO_ASAP) &&
3637 HCC_CFC(xhci->hcc_params)) {
3638 frame_id = xhci_get_isoc_frame_id(xhci, urb, i);
3639 if (frame_id >= 0)
3640 sia_frame_id = TRB_FRAME_ID(frame_id);
3641 }
3642 /*
3643 * Set isoc specific data for the first TRB in a TD.
3644 * Prevent HW from getting the TRBs by keeping the cycle state
3645 * inverted in the first TDs isoc TRB.
3646 */
2f6d3b65 3647 field = TRB_TYPE(TRB_ISOC) |
09c352ed
MN
3648 TRB_TLBPC(last_burst_pkt_count) |
3649 sia_frame_id |
3650 (i ? ep_ring->cycle_state : !start_cycle);
3651
2f6d3b65
MN
3652 /* xhci 1.1 with ETE uses TD_Size field for TBC, old is Rsvdz */
3653 if (!xep->use_extended_tbc)
3654 field |= TRB_TBC(burst_count);
3655
09c352ed 3656 /* fill the rest of the TRB fields, and remaining normal TRBs */
04e51901
AX
3657 for (j = 0; j < trbs_per_td; j++) {
3658 u32 remainder = 0;
09c352ed
MN
3659
3660 /* only first TRB is isoc, overwrite otherwise */
3661 if (!first_trb)
3662 field = TRB_TYPE(TRB_NORMAL) |
3663 ep_ring->cycle_state;
04e51901 3664
af8b9e63
SS
3665 /* Only set interrupt on short packet for IN EPs */
3666 if (usb_urb_dir_in(urb))
3667 field |= TRB_ISP;
3668
09c352ed 3669 /* Set the chain bit for all except the last TRB */
04e51901 3670 if (j < trbs_per_td - 1) {
47cbf692 3671 more_trbs_coming = true;
09c352ed 3672 field |= TRB_CHAIN;
04e51901 3673 } else {
09c352ed 3674 more_trbs_coming = false;
04e51901
AX
3675 td->last_trb = ep_ring->enqueue;
3676 field |= TRB_IOC;
09c352ed
MN
3677 /* set BEI, except for the last TD */
3678 if (xhci->hci_version >= 0x100 &&
3679 !(xhci->quirks & XHCI_AVOID_BEI) &&
3680 i < num_tds - 1)
3681 field |= TRB_BEI;
04e51901 3682 }
04e51901 3683 /* Calculate TRB length */
d2510342 3684 trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
04e51901
AX
3685 if (trb_buff_len > td_remain_len)
3686 trb_buff_len = td_remain_len;
3687
4da6e6f2 3688 /* Set the TRB length, TD size, & interrupter fields. */
c840d6ce
MN
3689 remainder = xhci_td_remainder(xhci, running_total,
3690 trb_buff_len, td_len,
124c3937 3691 urb, more_trbs_coming);
c840d6ce 3692
04e51901 3693 length_field = TRB_LEN(trb_buff_len) |
04e51901 3694 TRB_INTR_TARGET(0);
4da6e6f2 3695
2f6d3b65
MN
3696 /* xhci 1.1 with ETE uses TD Size field for TBC */
3697 if (first_trb && xep->use_extended_tbc)
3698 length_field |= TRB_TD_SIZE_TBC(burst_count);
3699 else
3700 length_field |= TRB_TD_SIZE(remainder);
3701 first_trb = false;
3702
3b72fca0 3703 queue_trb(xhci, ep_ring, more_trbs_coming,
04e51901
AX
3704 lower_32_bits(addr),
3705 upper_32_bits(addr),
3706 length_field,
af8b9e63 3707 field);
04e51901
AX
3708 running_total += trb_buff_len;
3709
3710 addr += trb_buff_len;
3711 td_remain_len -= trb_buff_len;
3712 }
3713
3714 /* Check TD length */
3715 if (running_total != td_len) {
3716 xhci_err(xhci, "ISOC TD length unmatch\n");
cf840551
AX
3717 ret = -EINVAL;
3718 goto cleanup;
04e51901
AX
3719 }
3720 }
3721
79b8094f
LB
3722 /* store the next frame id */
3723 if (HCC_CFC(xhci->hcc_params))
3724 xep->next_frame_id = urb->start_frame + num_tds * urb->interval;
3725
c41136b0
AX
3726 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
3727 if (xhci->quirks & XHCI_AMD_PLL_FIX)
3728 usb_amd_quirk_pll_disable();
3729 }
3730 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
3731
e1eab2e0
AX
3732 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3733 start_cycle, start_trb);
04e51901 3734 return 0;
522989a2
SS
3735cleanup:
3736 /* Clean up a partially enqueued isoc transfer. */
3737
3738 for (i--; i >= 0; i--)
585df1d9 3739 list_del_init(&urb_priv->td[i]->td_list);
522989a2
SS
3740
3741 /* Use the first TD as a temporary variable to turn the TDs we've queued
3742 * into No-ops with a software-owned cycle bit. That way the hardware
3743 * won't accidentally start executing bogus TDs when we partially
3744 * overwrite them. td->first_trb and td->start_seg are already set.
3745 */
3746 urb_priv->td[0]->last_trb = ep_ring->enqueue;
3747 /* Every TRB except the first & last will have its cycle bit flipped. */
3748 td_to_noop(xhci, ep_ring, urb_priv->td[0], true);
3749
3750 /* Reset the ring enqueue back to the first TRB and its cycle bit. */
3751 ep_ring->enqueue = urb_priv->td[0]->first_trb;
3752 ep_ring->enq_seg = urb_priv->td[0]->start_seg;
3753 ep_ring->cycle_state = start_cycle;
b008df60 3754 ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp;
522989a2
SS
3755 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
3756 return ret;
04e51901
AX
3757}
3758
3759/*
3760 * Check transfer ring to guarantee there is enough room for the urb.
3761 * Update ISO URB start_frame and interval.
79b8094f
LB
3762 * Update interval as xhci_queue_intr_tx does. Use xhci frame_index to
3763 * update urb->start_frame if URB_ISO_ASAP is set in transfer_flags or
3764 * Contiguous Frame ID is not supported by HC.
04e51901
AX
3765 */
3766int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
3767 struct urb *urb, int slot_id, unsigned int ep_index)
3768{
3769 struct xhci_virt_device *xdev;
3770 struct xhci_ring *ep_ring;
3771 struct xhci_ep_ctx *ep_ctx;
3772 int start_frame;
04e51901
AX
3773 int num_tds, num_trbs, i;
3774 int ret;
79b8094f
LB
3775 struct xhci_virt_ep *xep;
3776 int ist;
04e51901
AX
3777
3778 xdev = xhci->devs[slot_id];
79b8094f 3779 xep = &xhci->devs[slot_id]->eps[ep_index];
04e51901
AX
3780 ep_ring = xdev->eps[ep_index].ring;
3781 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
3782
3783 num_trbs = 0;
3784 num_tds = urb->number_of_packets;
3785 for (i = 0; i < num_tds; i++)
d2510342 3786 num_trbs += count_isoc_trbs_needed(urb, i);
04e51901
AX
3787
3788 /* Check the ring to guarantee there is enough room for the whole urb.
3789 * Do not insert any td of the urb to the ring if the check failed.
3790 */
28ccd296 3791 ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
3b72fca0 3792 num_trbs, mem_flags);
04e51901
AX
3793 if (ret)
3794 return ret;
3795
79b8094f
LB
3796 /*
3797 * Check interval value. This should be done before we start to
3798 * calculate the start frame value.
3799 */
78140156 3800 check_interval(xhci, urb, ep_ctx);
79b8094f
LB
3801
3802 /* Calculate the start frame and put it in urb->start_frame. */
42df7215
LB
3803 if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) {
3804 if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
3805 EP_STATE_RUNNING) {
3806 urb->start_frame = xep->next_frame_id;
3807 goto skip_start_over;
3808 }
79b8094f
LB
3809 }
3810
3811 start_frame = readl(&xhci->run_regs->microframe_index);
3812 start_frame &= 0x3fff;
3813 /*
3814 * Round up to the next frame and consider the time before trb really
3815 * gets scheduled by hardare.
3816 */
3817 ist = HCS_IST(xhci->hcs_params2) & 0x7;
3818 if (HCS_IST(xhci->hcs_params2) & (1 << 3))
3819 ist <<= 3;
3820 start_frame += ist + XHCI_CFC_DELAY;
3821 start_frame = roundup(start_frame, 8);
3822
3823 /*
3824 * Round up to the next ESIT (Endpoint Service Interval Time) if ESIT
3825 * is greate than 8 microframes.
3826 */
3827 if (urb->dev->speed == USB_SPEED_LOW ||
3828 urb->dev->speed == USB_SPEED_FULL) {
3829 start_frame = roundup(start_frame, urb->interval << 3);
3830 urb->start_frame = start_frame >> 3;
3831 } else {
3832 start_frame = roundup(start_frame, urb->interval);
3833 urb->start_frame = start_frame;
3834 }
3835
3836skip_start_over:
b008df60
AX
3837 ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;
3838
3fc8206d 3839 return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
04e51901
AX
3840}
3841
d0e96f5a
SS
3842/**** Command Ring Operations ****/
3843
913a8a34
SS
3844/* Generic function for queueing a command TRB on the command ring.
3845 * Check to make sure there's room on the command ring for one command TRB.
3846 * Also check that there's room reserved for commands that must not fail.
3847 * If this is a command that must not fail, meaning command_must_succeed = TRUE,
3848 * then only check for the number of reserved spots.
3849 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
3850 * because the command event handler may want to resubmit a failed command.
3851 */
ddba5cd0
MN
3852static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
3853 u32 field1, u32 field2,
3854 u32 field3, u32 field4, bool command_must_succeed)
7f84eef0 3855{
913a8a34 3856 int reserved_trbs = xhci->cmd_ring_reserved_trbs;
d1dc908a 3857 int ret;
ad6b1d91 3858
98d74f9c
MN
3859 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
3860 (xhci->xhc_state & XHCI_STATE_HALTED)) {
ad6b1d91 3861 xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
c9aa1a2d 3862 return -ESHUTDOWN;
ad6b1d91 3863 }
d1dc908a 3864
913a8a34
SS
3865 if (!command_must_succeed)
3866 reserved_trbs++;
3867
d1dc908a 3868 ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
3b72fca0 3869 reserved_trbs, GFP_ATOMIC);
d1dc908a
SS
3870 if (ret < 0) {
3871 xhci_err(xhci, "ERR: No room for command on command ring\n");
913a8a34
SS
3872 if (command_must_succeed)
3873 xhci_err(xhci, "ERR: Reserved TRB counting for "
3874 "unfailable commands failed.\n");
d1dc908a 3875 return ret;
7f84eef0 3876 }
c9aa1a2d
MN
3877
3878 cmd->command_trb = xhci->cmd_ring->enqueue;
3879 list_add_tail(&cmd->cmd_list, &xhci->cmd_list);
ddba5cd0 3880
c311e391
MN
3881 /* if there are no other commands queued we start the timeout timer */
3882 if (xhci->cmd_list.next == &cmd->cmd_list &&
3883 !timer_pending(&xhci->cmd_timer)) {
3884 xhci->current_cmd = cmd;
3885 mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
3886 }
3887
3b72fca0
AX
3888 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
3889 field4 | xhci->cmd_ring->cycle_state);
7f84eef0
SS
3890 return 0;
3891}
3892
3ffbba95 3893/* Queue a slot enable or disable request on the command ring */
ddba5cd0
MN
3894int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd,
3895 u32 trb_type, u32 slot_id)
3ffbba95 3896{
ddba5cd0 3897 return queue_command(xhci, cmd, 0, 0, 0,
913a8a34 3898 TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
3ffbba95
SS
3899}
3900
3901/* Queue an address device command TRB */
ddba5cd0
MN
3902int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
3903 dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup)
3ffbba95 3904{
ddba5cd0 3905 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
8e595a5d 3906 upper_32_bits(in_ctx_ptr), 0,
48fc7dbd
DW
3907 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)
3908 | (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false);
2a8f82c4
SS
3909}
3910
ddba5cd0 3911int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
0238634d
SS
3912 u32 field1, u32 field2, u32 field3, u32 field4)
3913{
ddba5cd0 3914 return queue_command(xhci, cmd, field1, field2, field3, field4, false);
0238634d
SS
3915}
3916
2a8f82c4 3917/* Queue a reset device command TRB */
ddba5cd0
MN
3918int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
3919 u32 slot_id)
2a8f82c4 3920{
ddba5cd0 3921 return queue_command(xhci, cmd, 0, 0, 0,
2a8f82c4 3922 TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
913a8a34 3923 false);
3ffbba95 3924}
f94e0186
SS
3925
3926/* Queue a configure endpoint command TRB */
ddba5cd0
MN
3927int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
3928 struct xhci_command *cmd, dma_addr_t in_ctx_ptr,
913a8a34 3929 u32 slot_id, bool command_must_succeed)
f94e0186 3930{
ddba5cd0 3931 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
8e595a5d 3932 upper_32_bits(in_ctx_ptr), 0,
913a8a34
SS
3933 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
3934 command_must_succeed);
f94e0186 3935}
ae636747 3936
f2217e8e 3937/* Queue an evaluate context command TRB */
ddba5cd0
MN
3938int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
3939 dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed)
f2217e8e 3940{
ddba5cd0 3941 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
f2217e8e 3942 upper_32_bits(in_ctx_ptr), 0,
913a8a34 3943 TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
4b266541 3944 command_must_succeed);
f2217e8e
SS
3945}
3946
be88fe4f
AX
3947/*
3948 * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
3949 * activity on an endpoint that is about to be suspended.
3950 */
ddba5cd0
MN
3951int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
3952 int slot_id, unsigned int ep_index, int suspend)
ae636747
SS
3953{
3954 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
3955 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
3956 u32 type = TRB_TYPE(TRB_STOP_RING);
be88fe4f 3957 u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
ae636747 3958
ddba5cd0 3959 return queue_command(xhci, cmd, 0, 0, 0,
be88fe4f 3960 trb_slot_id | trb_ep_index | type | trb_suspend, false);
ae636747
SS
3961}
3962
d3a43e66
HG
3963/* Set Transfer Ring Dequeue Pointer command */
3964void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
3965 unsigned int slot_id, unsigned int ep_index,
3966 unsigned int stream_id,
3967 struct xhci_dequeue_state *deq_state)
ae636747
SS
3968{
3969 dma_addr_t addr;
3970 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
3971 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
e9df17eb 3972 u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
95241dbd 3973 u32 trb_sct = 0;
ae636747 3974 u32 type = TRB_TYPE(TRB_SET_DEQ);
bf161e85 3975 struct xhci_virt_ep *ep;
1e3452e3
HG
3976 struct xhci_command *cmd;
3977 int ret;
ae636747 3978
d3a43e66
HG
3979 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
3980 "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), new deq ptr = %p (0x%llx dma), new cycle = %u",
3981 deq_state->new_deq_seg,
3982 (unsigned long long)deq_state->new_deq_seg->dma,
3983 deq_state->new_deq_ptr,
3984 (unsigned long long)xhci_trb_virt_to_dma(
3985 deq_state->new_deq_seg, deq_state->new_deq_ptr),
3986 deq_state->new_cycle_state);
3987
3988 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
3989 deq_state->new_deq_ptr);
c92bcfa7 3990 if (addr == 0) {
ae636747 3991 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
700e2052 3992 xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
d3a43e66
HG
3993 deq_state->new_deq_seg, deq_state->new_deq_ptr);
3994 return;
c92bcfa7 3995 }
bf161e85
SS
3996 ep = &xhci->devs[slot_id]->eps[ep_index];
3997 if ((ep->ep_state & SET_DEQ_PENDING)) {
3998 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
3999 xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
d3a43e66 4000 return;
bf161e85 4001 }
1e3452e3
HG
4002
4003 /* This function gets called from contexts where it cannot sleep */
4004 cmd = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
4005 if (!cmd) {
4006 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr: ENOMEM\n");
d3a43e66 4007 return;
1e3452e3
HG
4008 }
4009
d3a43e66
HG
4010 ep->queued_deq_seg = deq_state->new_deq_seg;
4011 ep->queued_deq_ptr = deq_state->new_deq_ptr;
95241dbd
HG
4012 if (stream_id)
4013 trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
1e3452e3 4014 ret = queue_command(xhci, cmd,
d3a43e66
HG
4015 lower_32_bits(addr) | trb_sct | deq_state->new_cycle_state,
4016 upper_32_bits(addr), trb_stream_id,
4017 trb_slot_id | trb_ep_index | type, false);
1e3452e3
HG
4018 if (ret < 0) {
4019 xhci_free_command(xhci, cmd);
d3a43e66 4020 return;
1e3452e3
HG
4021 }
4022
d3a43e66
HG
4023 /* Stop the TD queueing code from ringing the doorbell until
4024 * this command completes. The HC won't set the dequeue pointer
4025 * if the ring is running, and ringing the doorbell starts the
4026 * ring running.
4027 */
4028 ep->ep_state |= SET_DEQ_PENDING;
ae636747 4029}
a1587d97 4030
ddba5cd0
MN
4031int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
4032 int slot_id, unsigned int ep_index)
a1587d97
SS
4033{
4034 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
4035 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
4036 u32 type = TRB_TYPE(TRB_RESET_EP);
4037
ddba5cd0
MN
4038 return queue_command(xhci, cmd, 0, 0, 0,
4039 trb_slot_id | trb_ep_index | type, false);
a1587d97 4040}