]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/usb/host/xhci-ring.c
usb: host: xhci: reorder variable definitions
[mirror_ubuntu-artful-kernel.git] / drivers / usb / host / xhci-ring.c
CommitLineData
7f84eef0
SS
1/*
2 * xHCI host controller driver
3 *
4 * Copyright (C) 2008 Intel Corp.
5 *
6 * Author: Sarah Sharp
7 * Some code borrowed from the Linux EHCI driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23/*
24 * Ring initialization rules:
25 * 1. Each segment is initialized to zero, except for link TRBs.
26 * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or
27 * Consumer Cycle State (CCS), depending on ring function.
28 * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
29 *
30 * Ring behavior rules:
31 * 1. A ring is empty if enqueue == dequeue. This means there will always be at
32 * least one free TRB in the ring. This is useful if you want to turn that
33 * into a link TRB and expand the ring.
34 * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
35 * link TRB, then load the pointer with the address in the link TRB. If the
36 * link TRB had its toggle bit set, you may need to update the ring cycle
37 * state (see cycle bit rules). You may have to do this multiple times
38 * until you reach a non-link TRB.
39 * 3. A ring is full if enqueue++ (for the definition of increment above)
40 * equals the dequeue pointer.
41 *
42 * Cycle bit rules:
43 * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
44 * in a link TRB, it must toggle the ring cycle state.
45 * 2. When a producer increments an enqueue pointer and encounters a toggle bit
46 * in a link TRB, it must toggle the ring cycle state.
47 *
48 * Producer rules:
49 * 1. Check if ring is full before you enqueue.
50 * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
51 * Update enqueue pointer between each write (which may update the ring
52 * cycle state).
53 * 3. Notify consumer. If SW is producer, it rings the doorbell for command
54 * and endpoint rings. If HC is the producer for the event ring,
55 * and it generates an interrupt according to interrupt modulation rules.
56 *
57 * Consumer rules:
58 * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state,
59 * the TRB is owned by the consumer.
60 * 2. Update dequeue pointer (which may update the ring cycle state) and
61 * continue processing TRBs until you reach a TRB which is not owned by you.
62 * 3. Notify the producer. SW is the consumer for the event ring, and it
63 * updates event ring dequeue pointer. HC is the consumer for the command and
64 * endpoint rings; it generates events on the event ring for these.
65 */
66
8a96c052 67#include <linux/scatterlist.h>
5a0e3ad6 68#include <linux/slab.h>
f9c589e1 69#include <linux/dma-mapping.h>
7f84eef0 70#include "xhci.h"
3a7fa5be 71#include "xhci-trace.h"
0cbd4b34 72#include "xhci-mtk.h"
7f84eef0
SS
73
74/*
75 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
76 * address of the TRB.
77 */
23e3be11 78dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
7f84eef0
SS
79 union xhci_trb *trb)
80{
6071d836 81 unsigned long segment_offset;
7f84eef0 82
6071d836 83 if (!seg || !trb || trb < seg->trbs)
7f84eef0 84 return 0;
6071d836
SS
85 /* offset in TRBs */
86 segment_offset = trb - seg->trbs;
7895086a 87 if (segment_offset >= TRBS_PER_SEGMENT)
7f84eef0 88 return 0;
6071d836 89 return seg->dma + (segment_offset * sizeof(*trb));
7f84eef0
SS
90}
91
0ce57499
MN
92static bool trb_is_noop(union xhci_trb *trb)
93{
94 return TRB_TYPE_NOOP_LE32(trb->generic.field[3]);
95}
96
2d98ef40
MN
97static bool trb_is_link(union xhci_trb *trb)
98{
99 return TRB_TYPE_LINK_LE32(trb->link.control);
100}
101
bd5e67f5
MN
102static bool last_trb_on_seg(struct xhci_segment *seg, union xhci_trb *trb)
103{
104 return trb == &seg->trbs[TRBS_PER_SEGMENT - 1];
105}
106
107static bool last_trb_on_ring(struct xhci_ring *ring,
108 struct xhci_segment *seg, union xhci_trb *trb)
109{
110 return last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg);
111}
112
d0c77d84
MN
113static bool link_trb_toggles_cycle(union xhci_trb *trb)
114{
115 return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
116}
117
2a72126d
MN
118static bool last_td_in_urb(struct xhci_td *td)
119{
120 struct urb_priv *urb_priv = td->urb->hcpriv;
121
122 return urb_priv->td_cnt == urb_priv->length;
123}
124
125static void inc_td_cnt(struct urb *urb)
126{
127 struct urb_priv *urb_priv = urb->hcpriv;
128
129 urb_priv->td_cnt++;
130}
131
ae636747
SS
132/* Updates trb to point to the next TRB in the ring, and updates seg if the next
133 * TRB is in a new segment. This does not skip over link TRBs, and it does not
134 * effect the ring dequeue or enqueue pointers.
135 */
136static void next_trb(struct xhci_hcd *xhci,
137 struct xhci_ring *ring,
138 struct xhci_segment **seg,
139 union xhci_trb **trb)
140{
2d98ef40 141 if (trb_is_link(*trb)) {
ae636747
SS
142 *seg = (*seg)->next;
143 *trb = ((*seg)->trbs);
144 } else {
a1669b2c 145 (*trb)++;
ae636747
SS
146 }
147}
148
7f84eef0
SS
149/*
150 * See Cycle bit rules. SW is the consumer for the event ring only.
151 * Don't make a ring full of link TRBs. That would be dumb and this would loop.
152 */
3b72fca0 153static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
7f84eef0 154{
7f84eef0 155 ring->deq_updates++;
b008df60 156
bd5e67f5
MN
157 /* event ring doesn't have link trbs, check for last trb */
158 if (ring->type == TYPE_EVENT) {
159 if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
50d0206f 160 ring->dequeue++;
bd5e67f5 161 return;
7f84eef0 162 }
bd5e67f5
MN
163 if (last_trb_on_ring(ring, ring->deq_seg, ring->dequeue))
164 ring->cycle_state ^= 1;
165 ring->deq_seg = ring->deq_seg->next;
166 ring->dequeue = ring->deq_seg->trbs;
167 return;
168 }
169
170 /* All other rings have link trbs */
171 if (!trb_is_link(ring->dequeue)) {
172 ring->dequeue++;
173 ring->num_trbs_free++;
174 }
175 while (trb_is_link(ring->dequeue)) {
176 ring->deq_seg = ring->deq_seg->next;
177 ring->dequeue = ring->deq_seg->trbs;
178 }
179 return;
7f84eef0
SS
180}
181
182/*
183 * See Cycle bit rules. SW is the consumer for the event ring only.
184 * Don't make a ring full of link TRBs. That would be dumb and this would loop.
185 *
186 * If we've just enqueued a TRB that is in the middle of a TD (meaning the
187 * chain bit is set), then set the chain bit in all the following link TRBs.
188 * If we've enqueued the last TRB in a TD, make sure the following link TRBs
189 * have their chain bit cleared (so that each Link TRB is a separate TD).
190 *
191 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
b0567b3f
SS
192 * set, but other sections talk about dealing with the chain bit set. This was
193 * fixed in the 0.96 specification errata, but we have to assume that all 0.95
194 * xHCI hardware can't handle the chain bit being cleared on a link TRB.
6cc30d85
SS
195 *
196 * @more_trbs_coming: Will you enqueue more TRBs before calling
197 * prepare_transfer()?
7f84eef0 198 */
6cc30d85 199static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
3b72fca0 200 bool more_trbs_coming)
7f84eef0
SS
201{
202 u32 chain;
203 union xhci_trb *next;
204
28ccd296 205 chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
b008df60 206 /* If this is not event ring, there is one less usable TRB */
2d98ef40 207 if (!trb_is_link(ring->enqueue))
b008df60 208 ring->num_trbs_free--;
7f84eef0
SS
209 next = ++(ring->enqueue);
210
211 ring->enq_updates++;
2251198b 212 /* Update the dequeue pointer further if that was a link TRB */
2d98ef40 213 while (trb_is_link(next)) {
6cc30d85 214
2251198b
MN
215 /*
216 * If the caller doesn't plan on enqueueing more TDs before
217 * ringing the doorbell, then we don't want to give the link TRB
218 * to the hardware just yet. We'll give the link TRB back in
219 * prepare_ring() just before we enqueue the TD at the top of
220 * the ring.
221 */
222 if (!chain && !more_trbs_coming)
223 break;
3b72fca0 224
2251198b
MN
225 /* If we're not dealing with 0.95 hardware or isoc rings on
226 * AMD 0.96 host, carry over the chain bit of the previous TRB
227 * (which may mean the chain bit is cleared).
228 */
229 if (!(ring->type == TYPE_ISOC &&
230 (xhci->quirks & XHCI_AMD_0x96_HOST)) &&
231 !xhci_link_trb_quirk(xhci)) {
232 next->link.control &= cpu_to_le32(~TRB_CHAIN);
233 next->link.control |= cpu_to_le32(chain);
7f84eef0 234 }
2251198b
MN
235 /* Give this link TRB to the hardware */
236 wmb();
237 next->link.control ^= cpu_to_le32(TRB_CYCLE);
238
239 /* Toggle the cycle bit after the last ring segment. */
d0c77d84 240 if (link_trb_toggles_cycle(next))
2251198b
MN
241 ring->cycle_state ^= 1;
242
7f84eef0
SS
243 ring->enq_seg = ring->enq_seg->next;
244 ring->enqueue = ring->enq_seg->trbs;
245 next = ring->enqueue;
246 }
247}
248
249/*
085deb16
AX
250 * Check to see if there's room to enqueue num_trbs on the ring and make sure
251 * enqueue pointer will not advance into dequeue segment. See rules above.
7f84eef0 252 */
b008df60 253static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
7f84eef0
SS
254 unsigned int num_trbs)
255{
085deb16 256 int num_trbs_in_deq_seg;
b008df60 257
085deb16
AX
258 if (ring->num_trbs_free < num_trbs)
259 return 0;
260
261 if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
262 num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
263 if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
264 return 0;
265 }
266
267 return 1;
7f84eef0
SS
268}
269
7f84eef0 270/* Ring the host controller doorbell after placing a command on the ring */
23e3be11 271void xhci_ring_cmd_db(struct xhci_hcd *xhci)
7f84eef0 272{
c181bc5b
EF
273 if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
274 return;
275
7f84eef0 276 xhci_dbg(xhci, "// Ding dong!\n");
204b7793 277 writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]);
7f84eef0 278 /* Flush PCI posted writes */
b0ba9720 279 readl(&xhci->dba->doorbell[0]);
7f84eef0
SS
280}
281
cb4d5ce5
OH
282static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci, unsigned long delay)
283{
284 return mod_delayed_work(system_wq, &xhci->cmd_timer, delay);
285}
286
1c111b6c
OH
287static struct xhci_command *xhci_next_queued_cmd(struct xhci_hcd *xhci)
288{
289 return list_first_entry_or_null(&xhci->cmd_list, struct xhci_command,
290 cmd_list);
291}
292
293/*
294 * Turn all commands on command ring with status set to "aborted" to no-op trbs.
295 * If there are other commands waiting then restart the ring and kick the timer.
296 * This must be called with command ring stopped and xhci->lock held.
297 */
298static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
299 struct xhci_command *cur_cmd)
300{
301 struct xhci_command *i_cmd;
302 u32 cycle_state;
303
304 /* Turn all aborted commands in list to no-ops, then restart */
305 list_for_each_entry(i_cmd, &xhci->cmd_list, cmd_list) {
306
0b7c105a 307 if (i_cmd->status != COMP_COMMAND_ABORTED)
1c111b6c
OH
308 continue;
309
0b7c105a 310 i_cmd->status = COMP_STOPPED;
1c111b6c
OH
311
312 xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
313 i_cmd->command_trb);
314 /* get cycle state from the original cmd trb */
315 cycle_state = le32_to_cpu(
316 i_cmd->command_trb->generic.field[3]) & TRB_CYCLE;
317 /* modify the command trb to no-op command */
318 i_cmd->command_trb->generic.field[0] = 0;
319 i_cmd->command_trb->generic.field[1] = 0;
320 i_cmd->command_trb->generic.field[2] = 0;
321 i_cmd->command_trb->generic.field[3] = cpu_to_le32(
322 TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
323
324 /*
325 * caller waiting for completion is called when command
326 * completion event is received for these no-op commands
327 */
328 }
329
330 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
331
332 /* ring command ring doorbell to restart the command ring */
333 if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
334 !(xhci->xhc_state & XHCI_STATE_DYING)) {
335 xhci->current_cmd = cur_cmd;
336 xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
337 xhci_ring_cmd_db(xhci);
338 }
339}
340
341/* Must be called with xhci->lock held, releases and aquires lock back */
342static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
b92cc66c
EF
343{
344 u64 temp_64;
345 int ret;
346
347 xhci_dbg(xhci, "Abort command ring\n");
348
1c111b6c 349 reinit_completion(&xhci->cmd_ring_stop_completion);
3425aa03 350
1c111b6c 351 temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
477632df
SS
352 xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
353 &xhci->op_regs->cmd_ring);
b92cc66c
EF
354
355 /* Section 4.6.1.2 of xHCI 1.0 spec says software should
356 * time the completion od all xHCI commands, including
357 * the Command Abort operation. If software doesn't see
358 * CRR negated in a timely manner (e.g. longer than 5
359 * seconds), then it should assume that the there are
360 * larger problems with the xHC and assert HCRST.
361 */
dc0b177c 362 ret = xhci_handshake(&xhci->op_regs->cmd_ring,
b92cc66c
EF
363 CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
364 if (ret < 0) {
1cc6d861
LB
365 xhci_err(xhci,
366 "Stop command ring failed, maybe the host is dead\n");
367 xhci->xhc_state |= XHCI_STATE_DYING;
368 xhci_halt(xhci);
369 return -ESHUTDOWN;
1c111b6c
OH
370 }
371 /*
372 * Writing the CMD_RING_ABORT bit should cause a cmd completion event,
373 * however on some host hw the CMD_RING_RUNNING bit is correctly cleared
374 * but the completion event in never sent. Wait 2 secs (arbitrary
375 * number) to handle those cases after negation of CMD_RING_RUNNING.
376 */
377 spin_unlock_irqrestore(&xhci->lock, flags);
378 ret = wait_for_completion_timeout(&xhci->cmd_ring_stop_completion,
379 msecs_to_jiffies(2000));
380 spin_lock_irqsave(&xhci->lock, flags);
381 if (!ret) {
382 xhci_dbg(xhci, "No stop event for abort, ring start fail?\n");
383 xhci_cleanup_command_queue(xhci);
384 } else {
385 xhci_handle_stopped_cmd_ring(xhci, xhci_next_queued_cmd(xhci));
b92cc66c 386 }
b92cc66c
EF
387 return 0;
388}
389
be88fe4f 390void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
ae636747 391 unsigned int slot_id,
e9df17eb
SS
392 unsigned int ep_index,
393 unsigned int stream_id)
ae636747 394{
28ccd296 395 __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
50d64676
MW
396 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
397 unsigned int ep_state = ep->ep_state;
ae636747 398
ae636747 399 /* Don't ring the doorbell for this endpoint if there are pending
50d64676 400 * cancellations because we don't want to interrupt processing.
8df75f42
SS
401 * We don't want to restart any stream rings if there's a set dequeue
402 * pointer command pending because the device can choose to start any
403 * stream once the endpoint is on the HW schedule.
ae636747 404 */
9983a5fc 405 if ((ep_state & EP_STOP_CMD_PENDING) || (ep_state & SET_DEQ_PENDING) ||
50d64676
MW
406 (ep_state & EP_HALTED))
407 return;
204b7793 408 writel(DB_VALUE(ep_index, stream_id), db_addr);
50d64676
MW
409 /* The CPU has better things to do at this point than wait for a
410 * write-posting flush. It'll get there soon enough.
411 */
ae636747
SS
412}
413
e9df17eb
SS
414/* Ring the doorbell for any rings with pending URBs */
415static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
416 unsigned int slot_id,
417 unsigned int ep_index)
418{
419 unsigned int stream_id;
420 struct xhci_virt_ep *ep;
421
422 ep = &xhci->devs[slot_id]->eps[ep_index];
423
424 /* A ring has pending URBs if its TD list is not empty */
425 if (!(ep->ep_state & EP_HAS_STREAMS)) {
d66eaf9f 426 if (ep->ring && !(list_empty(&ep->ring->td_list)))
be88fe4f 427 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
e9df17eb
SS
428 return;
429 }
430
431 for (stream_id = 1; stream_id < ep->stream_info->num_streams;
432 stream_id++) {
433 struct xhci_stream_info *stream_info = ep->stream_info;
434 if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
be88fe4f
AX
435 xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
436 stream_id);
e9df17eb
SS
437 }
438}
439
75b040ec
AI
440/* Get the right ring for the given slot_id, ep_index and stream_id.
441 * If the endpoint supports streams, boundary check the URB's stream ID.
442 * If the endpoint doesn't support streams, return the singular endpoint ring.
443 */
444struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
021bff91
SS
445 unsigned int slot_id, unsigned int ep_index,
446 unsigned int stream_id)
447{
448 struct xhci_virt_ep *ep;
449
450 ep = &xhci->devs[slot_id]->eps[ep_index];
451 /* Common case: no streams */
452 if (!(ep->ep_state & EP_HAS_STREAMS))
453 return ep->ring;
454
455 if (stream_id == 0) {
456 xhci_warn(xhci,
457 "WARN: Slot ID %u, ep index %u has streams, "
458 "but URB has no stream ID.\n",
459 slot_id, ep_index);
460 return NULL;
461 }
462
463 if (stream_id < ep->stream_info->num_streams)
464 return ep->stream_info->stream_rings[stream_id];
465
466 xhci_warn(xhci,
467 "WARN: Slot ID %u, ep index %u has "
468 "stream IDs 1 to %u allocated, "
469 "but stream ID %u is requested.\n",
470 slot_id, ep_index,
471 ep->stream_info->num_streams - 1,
472 stream_id);
473 return NULL;
474}
475
ae636747
SS
476/*
477 * Move the xHC's endpoint ring dequeue pointer past cur_td.
478 * Record the new state of the xHC's endpoint ring dequeue segment,
479 * dequeue pointer, and new consumer cycle state in state.
480 * Update our internal representation of the ring's dequeue pointer.
481 *
482 * We do this in three jumps:
483 * - First we update our new ring state to be the same as when the xHC stopped.
484 * - Then we traverse the ring to find the segment that contains
485 * the last TRB in the TD. We toggle the xHC's new cycle state when we pass
486 * any link TRBs with the toggle cycle bit set.
487 * - Finally we move the dequeue state one TRB further, toggling the cycle bit
488 * if we've moved it past a link TRB with the toggle cycle bit set.
28ccd296
ME
489 *
490 * Some of the uses of xhci_generic_trb are grotty, but if they're done
491 * with correct __le32 accesses they should work fine. Only users of this are
492 * in here.
ae636747 493 */
c92bcfa7 494void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
ae636747 495 unsigned int slot_id, unsigned int ep_index,
e9df17eb
SS
496 unsigned int stream_id, struct xhci_td *cur_td,
497 struct xhci_dequeue_state *state)
ae636747
SS
498{
499 struct xhci_virt_device *dev = xhci->devs[slot_id];
c4bedb77 500 struct xhci_virt_ep *ep = &dev->eps[ep_index];
e9df17eb 501 struct xhci_ring *ep_ring;
365038d8
MN
502 struct xhci_segment *new_seg;
503 union xhci_trb *new_deq;
c92bcfa7 504 dma_addr_t addr;
1f81b6d2 505 u64 hw_dequeue;
365038d8
MN
506 bool cycle_found = false;
507 bool td_last_trb_found = false;
ae636747 508
e9df17eb
SS
509 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
510 ep_index, stream_id);
511 if (!ep_ring) {
512 xhci_warn(xhci, "WARN can't find new dequeue state "
513 "for invalid stream ID %u.\n",
514 stream_id);
515 return;
516 }
68e41c5d 517
ae636747 518 /* Dig out the cycle state saved by the xHC during the stop ep cmd */
aa50b290
XR
519 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
520 "Finding endpoint context");
c4bedb77
HG
521 /* 4.6.9 the css flag is written to the stream context for streams */
522 if (ep->ep_state & EP_HAS_STREAMS) {
523 struct xhci_stream_ctx *ctx =
524 &ep->stream_info->stream_ctx_array[stream_id];
1f81b6d2 525 hw_dequeue = le64_to_cpu(ctx->stream_ring);
c4bedb77
HG
526 } else {
527 struct xhci_ep_ctx *ep_ctx
528 = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
1f81b6d2 529 hw_dequeue = le64_to_cpu(ep_ctx->deq);
c4bedb77 530 }
ae636747 531
365038d8
MN
532 new_seg = ep_ring->deq_seg;
533 new_deq = ep_ring->dequeue;
534 state->new_cycle_state = hw_dequeue & 0x1;
535
1f81b6d2 536 /*
365038d8
MN
537 * We want to find the pointer, segment and cycle state of the new trb
538 * (the one after current TD's last_trb). We know the cycle state at
539 * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are
540 * found.
1f81b6d2 541 */
365038d8
MN
542 do {
543 if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq)
544 == (dma_addr_t)(hw_dequeue & ~0xf)) {
545 cycle_found = true;
546 if (td_last_trb_found)
547 break;
548 }
549 if (new_deq == cur_td->last_trb)
550 td_last_trb_found = true;
1f81b6d2 551
3495e451
MN
552 if (cycle_found && trb_is_link(new_deq) &&
553 link_trb_toggles_cycle(new_deq))
365038d8
MN
554 state->new_cycle_state ^= 0x1;
555
556 next_trb(xhci, ep_ring, &new_seg, &new_deq);
557
558 /* Search wrapped around, bail out */
559 if (new_deq == ep->ring->dequeue) {
560 xhci_err(xhci, "Error: Failed finding new dequeue state\n");
561 state->new_deq_seg = NULL;
562 state->new_deq_ptr = NULL;
563 return;
564 }
565
566 } while (!cycle_found || !td_last_trb_found);
ae636747 567
365038d8
MN
568 state->new_deq_seg = new_seg;
569 state->new_deq_ptr = new_deq;
ae636747 570
1f81b6d2 571 /* Don't update the ring cycle state for the producer (us). */
aa50b290
XR
572 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
573 "Cycle state = 0x%x", state->new_cycle_state);
01a1fdb9 574
aa50b290
XR
575 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
576 "New dequeue segment = %p (virtual)",
c92bcfa7
SS
577 state->new_deq_seg);
578 addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
aa50b290
XR
579 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
580 "New dequeue pointer = 0x%llx (DMA)",
c92bcfa7 581 (unsigned long long) addr);
ae636747
SS
582}
583
522989a2
SS
584/* flip_cycle means flip the cycle bit of all but the first and last TRB.
585 * (The last TRB actually points to the ring enqueue pointer, which is not part
586 * of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
587 */
23e3be11 588static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
0d58a1a0 589 struct xhci_td *td, bool flip_cycle)
ae636747 590{
0d58a1a0
MN
591 struct xhci_segment *seg = td->start_seg;
592 union xhci_trb *trb = td->first_trb;
593
594 while (1) {
595 if (trb_is_link(trb)) {
596 /* unchain chained link TRBs */
597 trb->link.control &= cpu_to_le32(~TRB_CHAIN);
ae636747 598 } else {
0d58a1a0
MN
599 trb->generic.field[0] = 0;
600 trb->generic.field[1] = 0;
601 trb->generic.field[2] = 0;
ae636747 602 /* Preserve only the cycle bit of this TRB */
0d58a1a0
MN
603 trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
604 trb->generic.field[3] |= cpu_to_le32(
28ccd296 605 TRB_TYPE(TRB_TR_NOOP));
ae636747 606 }
0d58a1a0
MN
607 /* flip cycle if asked to */
608 if (flip_cycle && trb != td->first_trb && trb != td->last_trb)
609 trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE);
610
611 if (trb == td->last_trb)
ae636747 612 break;
0d58a1a0
MN
613
614 next_trb(xhci, ep_ring, &seg, &trb);
ae636747
SS
615 }
616}
617
575688e1 618static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
6f5165cf
SS
619 struct xhci_virt_ep *ep)
620{
9983a5fc 621 ep->ep_state &= ~EP_STOP_CMD_PENDING;
f9926596
MN
622 /* Can't del_timer_sync in interrupt */
623 del_timer(&ep->stop_cmd_timer);
6f5165cf
SS
624}
625
2a72126d
MN
626/*
627 * Must be called with xhci->lock held in interrupt context,
628 * releases and re-acquires xhci->lock
629 */
6f5165cf 630static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
2a72126d 631 struct xhci_td *cur_td, int status)
6f5165cf 632{
2a72126d
MN
633 struct urb *urb = cur_td->urb;
634 struct urb_priv *urb_priv = urb->hcpriv;
635 struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus);
636
637 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
638 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
639 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
640 if (xhci->quirks & XHCI_AMD_PLL_FIX)
641 usb_amd_quirk_pll_enable();
c41136b0 642 }
8e51adcc 643 }
446b3141 644 xhci_urb_free_priv(urb_priv);
2a72126d 645 usb_hcd_unlink_urb_from_ep(hcd, urb);
446b3141 646 spin_unlock(&xhci->lock);
2a72126d 647 usb_hcd_giveback_urb(hcd, urb, status);
446b3141
MN
648 spin_lock(&xhci->lock);
649}
650
2d6d5769
WY
651static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
652 struct xhci_ring *ring, struct xhci_td *td)
f9c589e1
MN
653{
654 struct device *dev = xhci_to_hcd(xhci)->self.controller;
655 struct xhci_segment *seg = td->bounce_seg;
656 struct urb *urb = td->urb;
657
658 if (!seg || !urb)
659 return;
660
661 if (usb_urb_dir_out(urb)) {
662 dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
663 DMA_TO_DEVICE);
664 return;
665 }
666
667 /* for in tranfers we need to copy the data from bounce to sg */
668 sg_pcopy_from_buffer(urb->sg, urb->num_mapped_sgs, seg->bounce_buf,
669 seg->bounce_len, seg->bounce_offs);
670 dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
671 DMA_FROM_DEVICE);
672 seg->bounce_len = 0;
673 seg->bounce_offs = 0;
674}
675
ae636747
SS
676/*
677 * When we get a command completion for a Stop Endpoint Command, we need to
678 * unlink any cancelled TDs from the ring. There are two ways to do that:
679 *
680 * 1. If the HW was in the middle of processing the TD that needs to be
681 * cancelled, then we must move the ring's dequeue pointer past the last TRB
682 * in the TD with a Set Dequeue Pointer Command.
683 * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
684 * bit cleared) so that the HW will skip over them.
685 */
b8200c94 686static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
be88fe4f 687 union xhci_trb *trb, struct xhci_event_cmd *event)
ae636747 688{
ae636747
SS
689 unsigned int ep_index;
690 struct xhci_ring *ep_ring;
63a0d9ab 691 struct xhci_virt_ep *ep;
326b4810 692 struct xhci_td *cur_td = NULL;
ae636747
SS
693 struct xhci_td *last_unlinked_td;
694
c92bcfa7 695 struct xhci_dequeue_state deq_state;
ae636747 696
bc752bde 697 if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
9ea1833e 698 if (!xhci->devs[slot_id])
be88fe4f
AX
699 xhci_warn(xhci, "Stop endpoint command "
700 "completion for disabled slot %u\n",
701 slot_id);
702 return;
703 }
704
ae636747 705 memset(&deq_state, 0, sizeof(deq_state));
28ccd296 706 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
63a0d9ab 707 ep = &xhci->devs[slot_id]->eps[ep_index];
04861f83
FB
708 last_unlinked_td = list_last_entry(&ep->cancelled_td_list,
709 struct xhci_td, cancelled_td_list);
ae636747 710
678539cf 711 if (list_empty(&ep->cancelled_td_list)) {
6f5165cf 712 xhci_stop_watchdog_timer_in_irq(xhci, ep);
0714a57c 713 ep->stopped_td = NULL;
e9df17eb 714 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
ae636747 715 return;
678539cf 716 }
ae636747
SS
717
718 /* Fix up the ep ring first, so HW stops executing cancelled TDs.
719 * We have the xHCI lock, so nothing can modify this list until we drop
720 * it. We're also in the event handler, so we can't get re-interrupted
721 * if another Stop Endpoint command completes
722 */
04861f83 723 list_for_each_entry(cur_td, &ep->cancelled_td_list, cancelled_td_list) {
aa50b290
XR
724 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
725 "Removing canceled TD starting at 0x%llx (dma).",
79688acf
SS
726 (unsigned long long)xhci_trb_virt_to_dma(
727 cur_td->start_seg, cur_td->first_trb));
e9df17eb
SS
728 ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
729 if (!ep_ring) {
730 /* This shouldn't happen unless a driver is mucking
731 * with the stream ID after submission. This will
732 * leave the TD on the hardware ring, and the hardware
733 * will try to execute it, and may access a buffer
734 * that has already been freed. In the best case, the
735 * hardware will execute it, and the event handler will
736 * ignore the completion event for that TD, since it was
737 * removed from the td_list for that endpoint. In
738 * short, don't muck with the stream ID after
739 * submission.
740 */
741 xhci_warn(xhci, "WARN Cancelled URB %p "
742 "has invalid stream ID %u.\n",
743 cur_td->urb,
744 cur_td->urb->stream_id);
745 goto remove_finished_td;
746 }
ae636747
SS
747 /*
748 * If we stopped on the TD we need to cancel, then we have to
749 * move the xHC endpoint ring dequeue pointer past this TD.
750 */
63a0d9ab 751 if (cur_td == ep->stopped_td)
e9df17eb
SS
752 xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
753 cur_td->urb->stream_id,
754 cur_td, &deq_state);
ae636747 755 else
522989a2 756 td_to_noop(xhci, ep_ring, cur_td, false);
e9df17eb 757remove_finished_td:
ae636747
SS
758 /*
759 * The event handler won't see a completion for this TD anymore,
760 * so remove it from the endpoint ring's TD list. Keep it in
761 * the cancelled TD list for URB completion later.
762 */
585df1d9 763 list_del_init(&cur_td->td_list);
ae636747 764 }
04861f83 765
6f5165cf 766 xhci_stop_watchdog_timer_in_irq(xhci, ep);
ae636747
SS
767
768 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
769 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
1e3452e3
HG
770 xhci_queue_new_dequeue_state(xhci, slot_id, ep_index,
771 ep->stopped_td->urb->stream_id, &deq_state);
ac9d8fe7 772 xhci_ring_cmd_db(xhci);
ae636747 773 } else {
e9df17eb
SS
774 /* Otherwise ring the doorbell(s) to restart queued transfers */
775 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
ae636747 776 }
526867c3 777
d97b4f8d 778 ep->stopped_td = NULL;
ae636747
SS
779
780 /*
781 * Drop the lock and complete the URBs in the cancelled TD list.
782 * New TDs to be cancelled might be added to the end of the list before
783 * we can complete all the URBs for the TDs we already unlinked.
784 * So stop when we've completed the URB for the last TD we unlinked.
785 */
786 do {
04861f83 787 cur_td = list_first_entry(&ep->cancelled_td_list,
ae636747 788 struct xhci_td, cancelled_td_list);
585df1d9 789 list_del_init(&cur_td->cancelled_td_list);
ae636747
SS
790
791 /* Clean up the cancelled URB */
ae636747
SS
792 /* Doesn't matter what we pass for status, since the core will
793 * just overwrite it (because the URB has been unlinked).
794 */
f76a28a6 795 ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
f9c589e1
MN
796 if (ep_ring && cur_td->bounce_seg)
797 xhci_unmap_td_bounce_buffer(xhci, ep_ring, cur_td);
2a72126d
MN
798 inc_td_cnt(cur_td->urb);
799 if (last_td_in_urb(cur_td))
800 xhci_giveback_urb_in_irq(xhci, cur_td, 0);
ae636747 801
6f5165cf
SS
802 /* Stop processing the cancelled list if the watchdog timer is
803 * running.
804 */
805 if (xhci->xhc_state & XHCI_STATE_DYING)
806 return;
ae636747
SS
807 } while (cur_td != last_unlinked_td);
808
809 /* Return to the event handler with xhci->lock re-acquired */
810}
811
50e8725e
SS
812static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
813{
814 struct xhci_td *cur_td;
815
816 while (!list_empty(&ring->td_list)) {
817 cur_td = list_first_entry(&ring->td_list,
818 struct xhci_td, td_list);
819 list_del_init(&cur_td->td_list);
820 if (!list_empty(&cur_td->cancelled_td_list))
821 list_del_init(&cur_td->cancelled_td_list);
f9c589e1
MN
822
823 if (cur_td->bounce_seg)
824 xhci_unmap_td_bounce_buffer(xhci, ring, cur_td);
2a72126d
MN
825
826 inc_td_cnt(cur_td->urb);
827 if (last_td_in_urb(cur_td))
828 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
50e8725e
SS
829 }
830}
831
832static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
833 int slot_id, int ep_index)
834{
835 struct xhci_td *cur_td;
836 struct xhci_virt_ep *ep;
837 struct xhci_ring *ring;
838
839 ep = &xhci->devs[slot_id]->eps[ep_index];
21d0e51b
SS
840 if ((ep->ep_state & EP_HAS_STREAMS) ||
841 (ep->ep_state & EP_GETTING_NO_STREAMS)) {
842 int stream_id;
843
844 for (stream_id = 0; stream_id < ep->stream_info->num_streams;
845 stream_id++) {
846 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
847 "Killing URBs for slot ID %u, ep index %u, stream %u",
848 slot_id, ep_index, stream_id + 1);
849 xhci_kill_ring_urbs(xhci,
850 ep->stream_info->stream_rings[stream_id]);
851 }
852 } else {
853 ring = ep->ring;
854 if (!ring)
855 return;
856 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
857 "Killing URBs for slot ID %u, ep index %u",
858 slot_id, ep_index);
859 xhci_kill_ring_urbs(xhci, ring);
860 }
50e8725e
SS
861 while (!list_empty(&ep->cancelled_td_list)) {
862 cur_td = list_first_entry(&ep->cancelled_td_list,
863 struct xhci_td, cancelled_td_list);
864 list_del_init(&cur_td->cancelled_td_list);
2a72126d
MN
865
866 inc_td_cnt(cur_td->urb);
867 if (last_td_in_urb(cur_td))
868 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
50e8725e
SS
869 }
870}
871
6f5165cf
SS
872/* Watchdog timer function for when a stop endpoint command fails to complete.
873 * In this case, we assume the host controller is broken or dying or dead. The
874 * host may still be completing some other events, so we have to be careful to
875 * let the event ring handler and the URB dequeueing/enqueueing functions know
876 * through xhci->state.
877 *
878 * The timer may also fire if the host takes a very long time to respond to the
879 * command, and the stop endpoint command completion handler cannot delete the
880 * timer before the timer function is called. Another endpoint cancellation may
881 * sneak in before the timer function can grab the lock, and that may queue
882 * another stop endpoint command and add the timer back. So we cannot use a
883 * simple flag to say whether there is a pending stop endpoint command for a
884 * particular endpoint.
885 *
f9926596
MN
886 * Instead we use a combination of that flag and checking if a new timer is
887 * pending.
6f5165cf
SS
888 */
889void xhci_stop_endpoint_command_watchdog(unsigned long arg)
890{
891 struct xhci_hcd *xhci;
892 struct xhci_virt_ep *ep;
6f5165cf 893 int ret, i, j;
f43d6231 894 unsigned long flags;
6f5165cf
SS
895
896 ep = (struct xhci_virt_ep *) arg;
897 xhci = ep->xhci;
898
f43d6231 899 spin_lock_irqsave(&xhci->lock, flags);
6f5165cf 900
f9926596
MN
901 /* bail out if cmd completed but raced with stop ep watchdog timer.*/
902 if (!(ep->ep_state & EP_STOP_CMD_PENDING) ||
903 timer_pending(&ep->stop_cmd_timer)) {
f43d6231 904 spin_unlock_irqrestore(&xhci->lock, flags);
f9926596 905 xhci_dbg(xhci, "Stop EP timer raced with cmd completion, exit");
6f5165cf
SS
906 return;
907 }
908
909 xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
910 xhci_warn(xhci, "Assuming host is dying, halting host.\n");
911 /* Oops, HC is dead or dying or at least not responding to the stop
912 * endpoint command.
913 */
f9926596 914
6f5165cf 915 xhci->xhc_state |= XHCI_STATE_DYING;
f9926596
MN
916 ep->ep_state &= ~EP_STOP_CMD_PENDING;
917
6f5165cf
SS
918 /* Disable interrupts from the host controller and start halting it */
919 xhci_quiesce(xhci);
f43d6231 920 spin_unlock_irqrestore(&xhci->lock, flags);
6f5165cf
SS
921
922 ret = xhci_halt(xhci);
923
f43d6231 924 spin_lock_irqsave(&xhci->lock, flags);
6f5165cf
SS
925 if (ret < 0) {
926 /* This is bad; the host is not responding to commands and it's
927 * not allowing itself to be halted. At least interrupts are
ac04e6ff 928 * disabled. If we call usb_hc_died(), it will attempt to
6f5165cf
SS
929 * disconnect all device drivers under this host. Those
930 * disconnect() methods will wait for all URBs to be unlinked,
931 * so we must complete them.
932 */
933 xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
934 xhci_warn(xhci, "Completing active URBs anyway.\n");
935 /* We could turn all TDs on the rings to no-ops. This won't
936 * help if the host has cached part of the ring, and is slow if
937 * we want to preserve the cycle bit. Skip it and hope the host
938 * doesn't touch the memory.
939 */
940 }
941 for (i = 0; i < MAX_HC_SLOTS; i++) {
942 if (!xhci->devs[i])
943 continue;
50e8725e
SS
944 for (j = 0; j < 31; j++)
945 xhci_kill_endpoint_urbs(xhci, i, j);
6f5165cf 946 }
f43d6231 947 spin_unlock_irqrestore(&xhci->lock, flags);
aa50b290
XR
948 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
949 "Calling usb_hc_died()");
bcf42aa6 950 usb_hc_died(xhci_to_hcd(xhci));
aa50b290
XR
951 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
952 "xHCI host controller is dead.");
6f5165cf
SS
953}
954
b008df60
AX
955
956static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
957 struct xhci_virt_device *dev,
958 struct xhci_ring *ep_ring,
959 unsigned int ep_index)
960{
961 union xhci_trb *dequeue_temp;
962 int num_trbs_free_temp;
963 bool revert = false;
964
965 num_trbs_free_temp = ep_ring->num_trbs_free;
966 dequeue_temp = ep_ring->dequeue;
967
0d9f78a9
SS
968 /* If we get two back-to-back stalls, and the first stalled transfer
969 * ends just before a link TRB, the dequeue pointer will be left on
970 * the link TRB by the code in the while loop. So we have to update
971 * the dequeue pointer one segment further, or we'll jump off
972 * the segment into la-la-land.
973 */
2d98ef40 974 if (trb_is_link(ep_ring->dequeue)) {
0d9f78a9
SS
975 ep_ring->deq_seg = ep_ring->deq_seg->next;
976 ep_ring->dequeue = ep_ring->deq_seg->trbs;
977 }
978
b008df60
AX
979 while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
980 /* We have more usable TRBs */
981 ep_ring->num_trbs_free++;
982 ep_ring->dequeue++;
2d98ef40 983 if (trb_is_link(ep_ring->dequeue)) {
b008df60
AX
984 if (ep_ring->dequeue ==
985 dev->eps[ep_index].queued_deq_ptr)
986 break;
987 ep_ring->deq_seg = ep_ring->deq_seg->next;
988 ep_ring->dequeue = ep_ring->deq_seg->trbs;
989 }
990 if (ep_ring->dequeue == dequeue_temp) {
991 revert = true;
992 break;
993 }
994 }
995
996 if (revert) {
997 xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
998 ep_ring->num_trbs_free = num_trbs_free_temp;
999 }
1000}
1001
ae636747
SS
1002/*
1003 * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
1004 * we need to clear the set deq pending flag in the endpoint ring state, so that
1005 * the TD queueing code can ring the doorbell again. We also need to ring the
1006 * endpoint doorbell to restart the ring, but only if there aren't more
1007 * cancellations pending.
1008 */
b8200c94 1009static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
c69a0597 1010 union xhci_trb *trb, u32 cmd_comp_code)
ae636747 1011{
ae636747 1012 unsigned int ep_index;
e9df17eb 1013 unsigned int stream_id;
ae636747
SS
1014 struct xhci_ring *ep_ring;
1015 struct xhci_virt_device *dev;
9aad95e2 1016 struct xhci_virt_ep *ep;
d115b048
JY
1017 struct xhci_ep_ctx *ep_ctx;
1018 struct xhci_slot_ctx *slot_ctx;
ae636747 1019
28ccd296
ME
1020 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1021 stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
ae636747 1022 dev = xhci->devs[slot_id];
9aad95e2 1023 ep = &dev->eps[ep_index];
e9df17eb
SS
1024
1025 ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
1026 if (!ep_ring) {
e587b8b2 1027 xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
e9df17eb
SS
1028 stream_id);
1029 /* XXX: Harmless??? */
0d4976ec 1030 goto cleanup;
e9df17eb
SS
1031 }
1032
d115b048
JY
1033 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
1034 slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
ae636747 1035
c69a0597 1036 if (cmd_comp_code != COMP_SUCCESS) {
ae636747
SS
1037 unsigned int ep_state;
1038 unsigned int slot_state;
1039
c69a0597 1040 switch (cmd_comp_code) {
0b7c105a 1041 case COMP_TRB_ERROR:
e587b8b2 1042 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n");
ae636747 1043 break;
0b7c105a 1044 case COMP_CONTEXT_STATE_ERROR:
e587b8b2 1045 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n");
5071e6b2 1046 ep_state = GET_EP_CTX_STATE(ep_ctx);
28ccd296 1047 slot_state = le32_to_cpu(slot_ctx->dev_state);
ae636747 1048 slot_state = GET_SLOT_STATE(slot_state);
aa50b290
XR
1049 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1050 "Slot state = %u, EP state = %u",
ae636747
SS
1051 slot_state, ep_state);
1052 break;
0b7c105a 1053 case COMP_SLOT_NOT_ENABLED_ERROR:
e587b8b2
ON
1054 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n",
1055 slot_id);
ae636747
SS
1056 break;
1057 default:
e587b8b2
ON
1058 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n",
1059 cmd_comp_code);
ae636747
SS
1060 break;
1061 }
1062 /* OK what do we do now? The endpoint state is hosed, and we
1063 * should never get to this point if the synchronization between
1064 * queueing, and endpoint state are correct. This might happen
1065 * if the device gets disconnected after we've finished
1066 * cancelling URBs, which might not be an error...
1067 */
1068 } else {
9aad95e2
HG
1069 u64 deq;
1070 /* 4.6.10 deq ptr is written to the stream ctx for streams */
1071 if (ep->ep_state & EP_HAS_STREAMS) {
1072 struct xhci_stream_ctx *ctx =
1073 &ep->stream_info->stream_ctx_array[stream_id];
1074 deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK;
1075 } else {
1076 deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
1077 }
aa50b290 1078 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
9aad95e2
HG
1079 "Successful Set TR Deq Ptr cmd, deq = @%08llx", deq);
1080 if (xhci_trb_virt_to_dma(ep->queued_deq_seg,
1081 ep->queued_deq_ptr) == deq) {
bf161e85
SS
1082 /* Update the ring's dequeue segment and dequeue pointer
1083 * to reflect the new position.
1084 */
b008df60
AX
1085 update_ring_for_set_deq_completion(xhci, dev,
1086 ep_ring, ep_index);
bf161e85 1087 } else {
e587b8b2 1088 xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
bf161e85 1089 xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
9aad95e2 1090 ep->queued_deq_seg, ep->queued_deq_ptr);
bf161e85 1091 }
ae636747
SS
1092 }
1093
0d4976ec 1094cleanup:
63a0d9ab 1095 dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
bf161e85
SS
1096 dev->eps[ep_index].queued_deq_seg = NULL;
1097 dev->eps[ep_index].queued_deq_ptr = NULL;
e9df17eb
SS
1098 /* Restart any rings with pending URBs */
1099 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
ae636747
SS
1100}
1101
b8200c94 1102static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
c69a0597 1103 union xhci_trb *trb, u32 cmd_comp_code)
a1587d97 1104{
a1587d97
SS
1105 unsigned int ep_index;
1106
28ccd296 1107 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
a1587d97
SS
1108 /* This command will only fail if the endpoint wasn't halted,
1109 * but we don't care.
1110 */
a0254324 1111 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
c69a0597 1112 "Ignoring reset ep completion code of %u", cmd_comp_code);
a1587d97 1113
ac9d8fe7
SS
1114 /* HW with the reset endpoint quirk needs to have a configure endpoint
1115 * command complete before the endpoint can be used. Queue that here
1116 * because the HW can't handle two commands being queued in a row.
1117 */
1118 if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
ddba5cd0
MN
1119 struct xhci_command *command;
1120 command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
a0ee619f
HG
1121 if (!command) {
1122 xhci_warn(xhci, "WARN Cannot submit cfg ep: ENOMEM\n");
1123 return;
1124 }
4bdfe4c3
XR
1125 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1126 "Queueing configure endpoint command");
ddba5cd0 1127 xhci_queue_configure_endpoint(xhci, command,
913a8a34
SS
1128 xhci->devs[slot_id]->in_ctx->dma, slot_id,
1129 false);
ac9d8fe7
SS
1130 xhci_ring_cmd_db(xhci);
1131 } else {
c3492dbf 1132 /* Clear our internal halted state */
63a0d9ab 1133 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
ac9d8fe7 1134 }
a1587d97 1135}
ae636747 1136
b244b431 1137static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
c2d3d49b 1138 struct xhci_command *command, u32 cmd_comp_code)
b244b431
XR
1139{
1140 if (cmd_comp_code == COMP_SUCCESS)
c2d3d49b 1141 command->slot_id = slot_id;
b244b431 1142 else
c2d3d49b 1143 command->slot_id = 0;
b244b431
XR
1144}
1145
6c02dd14
XR
1146static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
1147{
1148 struct xhci_virt_device *virt_dev;
1149
1150 virt_dev = xhci->devs[slot_id];
1151 if (!virt_dev)
1152 return;
1153 if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
1154 /* Delete default control endpoint resources */
1155 xhci_free_device_endpoint_resources(xhci, virt_dev, true);
1156 xhci_free_virt_device(xhci, slot_id);
1157}
1158
6ed46d33
XR
1159static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
1160 struct xhci_event_cmd *event, u32 cmd_comp_code)
1161{
1162 struct xhci_virt_device *virt_dev;
1163 struct xhci_input_control_ctx *ctrl_ctx;
1164 unsigned int ep_index;
1165 unsigned int ep_state;
1166 u32 add_flags, drop_flags;
1167
6ed46d33
XR
1168 /*
1169 * Configure endpoint commands can come from the USB core
1170 * configuration or alt setting changes, or because the HW
1171 * needed an extra configure endpoint command after a reset
1172 * endpoint command or streams were being configured.
1173 * If the command was for a halted endpoint, the xHCI driver
1174 * is not waiting on the configure endpoint command.
1175 */
9ea1833e 1176 virt_dev = xhci->devs[slot_id];
4daf9df5 1177 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
6ed46d33
XR
1178 if (!ctrl_ctx) {
1179 xhci_warn(xhci, "Could not get input context, bad type.\n");
1180 return;
1181 }
1182
1183 add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1184 drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1185 /* Input ctx add_flags are the endpoint index plus one */
1186 ep_index = xhci_last_valid_endpoint(add_flags) - 1;
1187
1188 /* A usb_set_interface() call directly after clearing a halted
1189 * condition may race on this quirky hardware. Not worth
1190 * worrying about, since this is prototype hardware. Not sure
1191 * if this will work for streams, but streams support was
1192 * untested on this prototype.
1193 */
1194 if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
1195 ep_index != (unsigned int) -1 &&
1196 add_flags - SLOT_FLAG == drop_flags) {
1197 ep_state = virt_dev->eps[ep_index].ep_state;
1198 if (!(ep_state & EP_HALTED))
ddba5cd0 1199 return;
6ed46d33
XR
1200 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1201 "Completed config ep cmd - "
1202 "last ep index = %d, state = %d",
1203 ep_index, ep_state);
1204 /* Clear internal halted state and restart ring(s) */
1205 virt_dev->eps[ep_index].ep_state &= ~EP_HALTED;
1206 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1207 return;
1208 }
6ed46d33
XR
1209 return;
1210}
1211
f681321b
XR
1212static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id,
1213 struct xhci_event_cmd *event)
1214{
f681321b 1215 xhci_dbg(xhci, "Completed reset device command.\n");
9ea1833e 1216 if (!xhci->devs[slot_id])
f681321b
XR
1217 xhci_warn(xhci, "Reset device command completion "
1218 "for disabled slot %u\n", slot_id);
1219}
1220
2c070821
XR
1221static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
1222 struct xhci_event_cmd *event)
1223{
1224 if (!(xhci->quirks & XHCI_NEC_HOST)) {
f4c8f03c 1225 xhci_warn(xhci, "WARN NEC_GET_FW command on non-NEC host\n");
2c070821
XR
1226 return;
1227 }
1228 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1229 "NEC firmware version %2x.%02x",
1230 NEC_FW_MAJOR(le32_to_cpu(event->status)),
1231 NEC_FW_MINOR(le32_to_cpu(event->status)));
1232}
1233
9ea1833e 1234static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status)
c9aa1a2d
MN
1235{
1236 list_del(&cmd->cmd_list);
9ea1833e
MN
1237
1238 if (cmd->completion) {
1239 cmd->status = status;
1240 complete(cmd->completion);
1241 } else {
c9aa1a2d 1242 kfree(cmd);
9ea1833e 1243 }
c9aa1a2d
MN
1244}
1245
1246void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
1247{
1248 struct xhci_command *cur_cmd, *tmp_cmd;
1249 list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list)
0b7c105a 1250 xhci_complete_del_and_free_cmd(cur_cmd, COMP_COMMAND_ABORTED);
c9aa1a2d
MN
1251}
1252
cb4d5ce5 1253void xhci_handle_command_timeout(struct work_struct *work)
c311e391
MN
1254{
1255 struct xhci_hcd *xhci;
1256 int ret;
1257 unsigned long flags;
1258 u64 hw_ring_state;
cb4d5ce5
OH
1259
1260 xhci = container_of(to_delayed_work(work), struct xhci_hcd, cmd_timer);
c311e391 1261
c311e391 1262 spin_lock_irqsave(&xhci->lock, flags);
2b985467 1263
a5a1b951
MN
1264 /*
1265 * If timeout work is pending, or current_cmd is NULL, it means we
1266 * raced with command completion. Command is handled so just return.
1267 */
cb4d5ce5 1268 if (!xhci->current_cmd || delayed_work_pending(&xhci->cmd_timer)) {
2b985467
LB
1269 spin_unlock_irqrestore(&xhci->lock, flags);
1270 return;
c311e391 1271 }
2b985467 1272 /* mark this command to be cancelled */
0b7c105a 1273 xhci->current_cmd->status = COMP_COMMAND_ABORTED;
2b985467 1274
c311e391
MN
1275 /* Make sure command ring is running before aborting it */
1276 hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
1277 if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
1278 (hw_ring_state & CMD_RING_RUNNING)) {
1c111b6c
OH
1279 /* Prevent new doorbell, and start command abort */
1280 xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
c311e391 1281 xhci_dbg(xhci, "Command timeout\n");
1c111b6c 1282 ret = xhci_abort_cmd_ring(xhci, flags);
c311e391
MN
1283 if (unlikely(ret == -ESHUTDOWN)) {
1284 xhci_err(xhci, "Abort command ring failed\n");
1285 xhci_cleanup_command_queue(xhci);
4dea7077 1286 spin_unlock_irqrestore(&xhci->lock, flags);
c311e391
MN
1287 usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
1288 xhci_dbg(xhci, "xHCI host controller is dead.\n");
4dea7077
LB
1289
1290 return;
c311e391 1291 }
4dea7077
LB
1292
1293 goto time_out_completed;
c311e391 1294 }
3425aa03 1295
1c111b6c
OH
1296 /* host removed. Bail out */
1297 if (xhci->xhc_state & XHCI_STATE_REMOVING) {
1298 xhci_dbg(xhci, "host removed, ring start fail?\n");
3425aa03 1299 xhci_cleanup_command_queue(xhci);
4dea7077
LB
1300
1301 goto time_out_completed;
3425aa03
MN
1302 }
1303
c311e391
MN
1304 /* command timeout on stopped ring, ring can't be aborted */
1305 xhci_dbg(xhci, "Command timeout on stopped ring\n");
1306 xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
4dea7077
LB
1307
1308time_out_completed:
c311e391
MN
1309 spin_unlock_irqrestore(&xhci->lock, flags);
1310 return;
1311}
1312
7f84eef0
SS
1313static void handle_cmd_completion(struct xhci_hcd *xhci,
1314 struct xhci_event_cmd *event)
1315{
28ccd296 1316 int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
7f84eef0
SS
1317 u64 cmd_dma;
1318 dma_addr_t cmd_dequeue_dma;
e7a79a1d 1319 u32 cmd_comp_code;
9124b121 1320 union xhci_trb *cmd_trb;
c9aa1a2d 1321 struct xhci_command *cmd;
b54fc46d 1322 u32 cmd_type;
7f84eef0 1323
28ccd296 1324 cmd_dma = le64_to_cpu(event->cmd_trb);
9124b121 1325 cmd_trb = xhci->cmd_ring->dequeue;
23e3be11 1326 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
9124b121 1327 cmd_trb);
f4c8f03c
LB
1328 /*
1329 * Check whether the completion event is for our internal kept
1330 * command.
1331 */
1332 if (!cmd_dequeue_dma || cmd_dma != (u64)cmd_dequeue_dma) {
1333 xhci_warn(xhci,
1334 "ERROR mismatched command completion event\n");
7f84eef0
SS
1335 return;
1336 }
b63f4053 1337
04861f83 1338 cmd = list_first_entry(&xhci->cmd_list, struct xhci_command, cmd_list);
c9aa1a2d 1339
cb4d5ce5 1340 cancel_delayed_work(&xhci->cmd_timer);
c311e391 1341
9124b121 1342 trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event);
63a23b9a 1343
e7a79a1d 1344 cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
c311e391
MN
1345
1346 /* If CMD ring stopped we own the trbs between enqueue and dequeue */
0b7c105a 1347 if (cmd_comp_code == COMP_STOPPED) {
1c111b6c 1348 complete_all(&xhci->cmd_ring_stop_completion);
c311e391
MN
1349 return;
1350 }
33be1265
MN
1351
1352 if (cmd->command_trb != xhci->cmd_ring->dequeue) {
1353 xhci_err(xhci,
1354 "Command completion event does not match command\n");
1355 return;
1356 }
1357
c311e391
MN
1358 /*
1359 * Host aborted the command ring, check if the current command was
1360 * supposed to be aborted, otherwise continue normally.
1361 * The command ring is stopped now, but the xHC will issue a Command
1362 * Ring Stopped event which will cause us to restart it.
1363 */
0b7c105a 1364 if (cmd_comp_code == COMP_COMMAND_ABORTED) {
c311e391 1365 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
0b7c105a 1366 if (cmd->status == COMP_COMMAND_ABORTED) {
2a7cfdf3
BW
1367 if (xhci->current_cmd == cmd)
1368 xhci->current_cmd = NULL;
c311e391 1369 goto event_handled;
2a7cfdf3 1370 }
b63f4053
EF
1371 }
1372
b54fc46d
XR
1373 cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
1374 switch (cmd_type) {
1375 case TRB_ENABLE_SLOT:
c2d3d49b 1376 xhci_handle_cmd_enable_slot(xhci, slot_id, cmd, cmd_comp_code);
3ffbba95 1377 break;
b54fc46d 1378 case TRB_DISABLE_SLOT:
6c02dd14 1379 xhci_handle_cmd_disable_slot(xhci, slot_id);
3ffbba95 1380 break;
b54fc46d 1381 case TRB_CONFIG_EP:
9ea1833e
MN
1382 if (!cmd->completion)
1383 xhci_handle_cmd_config_ep(xhci, slot_id, event,
1384 cmd_comp_code);
f94e0186 1385 break;
b54fc46d 1386 case TRB_EVAL_CONTEXT:
2d3f1fac 1387 break;
b54fc46d 1388 case TRB_ADDR_DEV:
3ffbba95 1389 break;
b54fc46d 1390 case TRB_STOP_RING:
b8200c94
XR
1391 WARN_ON(slot_id != TRB_TO_SLOT_ID(
1392 le32_to_cpu(cmd_trb->generic.field[3])));
1393 xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, event);
ae636747 1394 break;
b54fc46d 1395 case TRB_SET_DEQ:
b8200c94
XR
1396 WARN_ON(slot_id != TRB_TO_SLOT_ID(
1397 le32_to_cpu(cmd_trb->generic.field[3])));
c69a0597 1398 xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code);
ae636747 1399 break;
b54fc46d 1400 case TRB_CMD_NOOP:
c311e391 1401 /* Is this an aborted command turned to NO-OP? */
0b7c105a
FB
1402 if (cmd->status == COMP_STOPPED)
1403 cmd_comp_code = COMP_STOPPED;
7f84eef0 1404 break;
b54fc46d 1405 case TRB_RESET_EP:
b8200c94
XR
1406 WARN_ON(slot_id != TRB_TO_SLOT_ID(
1407 le32_to_cpu(cmd_trb->generic.field[3])));
c69a0597 1408 xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code);
a1587d97 1409 break;
b54fc46d 1410 case TRB_RESET_DEV:
6fcfb0d6
MN
1411 /* SLOT_ID field in reset device cmd completion event TRB is 0.
1412 * Use the SLOT_ID from the command TRB instead (xhci 4.6.11)
1413 */
1414 slot_id = TRB_TO_SLOT_ID(
1415 le32_to_cpu(cmd_trb->generic.field[3]));
f681321b 1416 xhci_handle_cmd_reset_dev(xhci, slot_id, event);
2a8f82c4 1417 break;
b54fc46d 1418 case TRB_NEC_GET_FW:
2c070821 1419 xhci_handle_cmd_nec_get_fw(xhci, event);
0238634d 1420 break;
7f84eef0
SS
1421 default:
1422 /* Skip over unknown commands on the event ring */
f4c8f03c 1423 xhci_info(xhci, "INFO unknown command type %d\n", cmd_type);
7f84eef0
SS
1424 break;
1425 }
c9aa1a2d 1426
c311e391 1427 /* restart timer if this wasn't the last command */
daa47f21 1428 if (!list_is_singular(&xhci->cmd_list)) {
04861f83
FB
1429 xhci->current_cmd = list_first_entry(&cmd->cmd_list,
1430 struct xhci_command, cmd_list);
cb4d5ce5 1431 xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
2b985467
LB
1432 } else if (xhci->current_cmd == cmd) {
1433 xhci->current_cmd = NULL;
c311e391
MN
1434 }
1435
1436event_handled:
9ea1833e 1437 xhci_complete_del_and_free_cmd(cmd, cmd_comp_code);
c9aa1a2d 1438
3b72fca0 1439 inc_deq(xhci, xhci->cmd_ring);
7f84eef0
SS
1440}
1441
0238634d
SS
1442static void handle_vendor_event(struct xhci_hcd *xhci,
1443 union xhci_trb *event)
1444{
1445 u32 trb_type;
1446
28ccd296 1447 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3]));
0238634d
SS
1448 xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
1449 if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
1450 handle_cmd_completion(xhci, &event->event_cmd);
1451}
1452
f6ff0ac8
SS
1453/* @port_id: the one-based port ID from the hardware (indexed from array of all
1454 * port registers -- USB 3.0 and USB 2.0).
1455 *
1456 * Returns a zero-based port number, which is suitable for indexing into each of
1457 * the split roothubs' port arrays and bus state arrays.
d0cd5d48 1458 * Add one to it in order to call xhci_find_slot_id_by_port.
f6ff0ac8
SS
1459 */
1460static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
1461 struct xhci_hcd *xhci, u32 port_id)
1462{
1463 unsigned int i;
1464 unsigned int num_similar_speed_ports = 0;
1465
1466 /* port_id from the hardware is 1-based, but port_array[], usb3_ports[],
1467 * and usb2_ports are 0-based indexes. Count the number of similar
1468 * speed ports, up to 1 port before this port.
1469 */
1470 for (i = 0; i < (port_id - 1); i++) {
1471 u8 port_speed = xhci->port_array[i];
1472
1473 /*
1474 * Skip ports that don't have known speeds, or have duplicate
1475 * Extended Capabilities port speed entries.
1476 */
22e04870 1477 if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
f6ff0ac8
SS
1478 continue;
1479
1480 /*
1481 * USB 3.0 ports are always under a USB 3.0 hub. USB 2.0 and
1482 * 1.1 ports are under the USB 2.0 hub. If the port speed
1483 * matches the device speed, it's a similar speed port.
1484 */
b50107bb 1485 if ((port_speed == 0x03) == (hcd->speed >= HCD_USB3))
f6ff0ac8
SS
1486 num_similar_speed_ports++;
1487 }
1488 return num_similar_speed_ports;
1489}
1490
623bef9e
SS
1491static void handle_device_notification(struct xhci_hcd *xhci,
1492 union xhci_trb *event)
1493{
1494 u32 slot_id;
4ee823b8 1495 struct usb_device *udev;
623bef9e 1496
7e76ad43 1497 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3]));
4ee823b8 1498 if (!xhci->devs[slot_id]) {
623bef9e
SS
1499 xhci_warn(xhci, "Device Notification event for "
1500 "unused slot %u\n", slot_id);
4ee823b8
SS
1501 return;
1502 }
1503
1504 xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n",
1505 slot_id);
1506 udev = xhci->devs[slot_id]->udev;
1507 if (udev && udev->parent)
1508 usb_wakeup_notification(udev->parent, udev->portnum);
623bef9e
SS
1509}
1510
0f2a7930
SS
1511static void handle_port_status(struct xhci_hcd *xhci,
1512 union xhci_trb *event)
1513{
f6ff0ac8 1514 struct usb_hcd *hcd;
0f2a7930 1515 u32 port_id;
56192531 1516 u32 temp, temp1;
518e848e 1517 int max_ports;
56192531 1518 int slot_id;
5308a91b 1519 unsigned int faked_port_index;
f6ff0ac8 1520 u8 major_revision;
20b67cf5 1521 struct xhci_bus_state *bus_state;
28ccd296 1522 __le32 __iomem **port_array;
386139d7 1523 bool bogus_port_status = false;
0f2a7930
SS
1524
1525 /* Port status change events always have a successful completion code */
f4c8f03c
LB
1526 if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS)
1527 xhci_warn(xhci,
1528 "WARN: xHC returned failed port status event\n");
1529
28ccd296 1530 port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
0f2a7930
SS
1531 xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
1532
518e848e
SS
1533 max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1534 if ((port_id <= 0) || (port_id > max_ports)) {
56192531 1535 xhci_warn(xhci, "Invalid port id %d\n", port_id);
09ce0c0c
PC
1536 inc_deq(xhci, xhci->event_ring);
1537 return;
56192531
AX
1538 }
1539
f6ff0ac8
SS
1540 /* Figure out which usb_hcd this port is attached to:
1541 * is it a USB 3.0 port or a USB 2.0/1.1 port?
1542 */
1543 major_revision = xhci->port_array[port_id - 1];
09ce0c0c
PC
1544
1545 /* Find the right roothub. */
1546 hcd = xhci_to_hcd(xhci);
b50107bb 1547 if ((major_revision == 0x03) != (hcd->speed >= HCD_USB3))
09ce0c0c
PC
1548 hcd = xhci->shared_hcd;
1549
f6ff0ac8
SS
1550 if (major_revision == 0) {
1551 xhci_warn(xhci, "Event for port %u not in "
1552 "Extended Capabilities, ignoring.\n",
1553 port_id);
386139d7 1554 bogus_port_status = true;
f6ff0ac8 1555 goto cleanup;
5308a91b 1556 }
22e04870 1557 if (major_revision == DUPLICATE_ENTRY) {
f6ff0ac8
SS
1558 xhci_warn(xhci, "Event for port %u duplicated in"
1559 "Extended Capabilities, ignoring.\n",
1560 port_id);
386139d7 1561 bogus_port_status = true;
f6ff0ac8
SS
1562 goto cleanup;
1563 }
1564
1565 /*
1566 * Hardware port IDs reported by a Port Status Change Event include USB
1567 * 3.0 and USB 2.0 ports. We want to check if the port has reported a
1568 * resume event, but we first need to translate the hardware port ID
1569 * into the index into the ports on the correct split roothub, and the
1570 * correct bus_state structure.
1571 */
f6ff0ac8 1572 bus_state = &xhci->bus_state[hcd_index(hcd)];
b50107bb 1573 if (hcd->speed >= HCD_USB3)
f6ff0ac8
SS
1574 port_array = xhci->usb3_ports;
1575 else
1576 port_array = xhci->usb2_ports;
1577 /* Find the faked port hub number */
1578 faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci,
1579 port_id);
5308a91b 1580
b0ba9720 1581 temp = readl(port_array[faked_port_index]);
7111ebc9 1582 if (hcd->state == HC_STATE_SUSPENDED) {
56192531
AX
1583 xhci_dbg(xhci, "resume root hub\n");
1584 usb_hcd_resume_root_hub(hcd);
1585 }
1586
b50107bb 1587 if (hcd->speed >= HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE)
fac4271d
ZJC
1588 bus_state->port_remote_wakeup &= ~(1 << faked_port_index);
1589
56192531
AX
1590 if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
1591 xhci_dbg(xhci, "port resume event for port %d\n", port_id);
1592
b0ba9720 1593 temp1 = readl(&xhci->op_regs->command);
56192531
AX
1594 if (!(temp1 & CMD_RUN)) {
1595 xhci_warn(xhci, "xHC is not running.\n");
1596 goto cleanup;
1597 }
1598
2338b9e4 1599 if (DEV_SUPERSPEED_ANY(temp)) {
d93814cf 1600 xhci_dbg(xhci, "remote wake SS port %d\n", port_id);
4ee823b8
SS
1601 /* Set a flag to say the port signaled remote wakeup,
1602 * so we can tell the difference between the end of
1603 * device and host initiated resume.
1604 */
1605 bus_state->port_remote_wakeup |= 1 << faked_port_index;
d93814cf
SS
1606 xhci_test_and_clear_bit(xhci, port_array,
1607 faked_port_index, PORT_PLC);
c9682dff
AX
1608 xhci_set_link_state(xhci, port_array, faked_port_index,
1609 XDEV_U0);
d93814cf
SS
1610 /* Need to wait until the next link state change
1611 * indicates the device is actually in U0.
1612 */
1613 bogus_port_status = true;
1614 goto cleanup;
f69115fd
MN
1615 } else if (!test_bit(faked_port_index,
1616 &bus_state->resuming_ports)) {
56192531 1617 xhci_dbg(xhci, "resume HS port %d\n", port_id);
f6ff0ac8 1618 bus_state->resume_done[faked_port_index] = jiffies +
b9e45188 1619 msecs_to_jiffies(USB_RESUME_TIMEOUT);
f370b996 1620 set_bit(faked_port_index, &bus_state->resuming_ports);
56192531 1621 mod_timer(&hcd->rh_timer,
f6ff0ac8 1622 bus_state->resume_done[faked_port_index]);
56192531
AX
1623 /* Do the rest in GetPortStatus */
1624 }
1625 }
d93814cf
SS
1626
1627 if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_U0 &&
2338b9e4 1628 DEV_SUPERSPEED_ANY(temp)) {
d93814cf 1629 xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
4ee823b8
SS
1630 /* We've just brought the device into U0 through either the
1631 * Resume state after a device remote wakeup, or through the
1632 * U3Exit state after a host-initiated resume. If it's a device
1633 * initiated remote wake, don't pass up the link state change,
1634 * so the roothub behavior is consistent with external
1635 * USB 3.0 hub behavior.
1636 */
d93814cf
SS
1637 slot_id = xhci_find_slot_id_by_port(hcd, xhci,
1638 faked_port_index + 1);
1639 if (slot_id && xhci->devs[slot_id])
1640 xhci_ring_device(xhci, slot_id);
ba7b5c22 1641 if (bus_state->port_remote_wakeup & (1 << faked_port_index)) {
4ee823b8
SS
1642 bus_state->port_remote_wakeup &=
1643 ~(1 << faked_port_index);
1644 xhci_test_and_clear_bit(xhci, port_array,
1645 faked_port_index, PORT_PLC);
1646 usb_wakeup_notification(hcd->self.root_hub,
1647 faked_port_index + 1);
1648 bogus_port_status = true;
1649 goto cleanup;
1650 }
d93814cf 1651 }
56192531 1652
8b3d4570
SS
1653 /*
1654 * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or
1655 * RExit to a disconnect state). If so, let the the driver know it's
1656 * out of the RExit state.
1657 */
2338b9e4 1658 if (!DEV_SUPERSPEED_ANY(temp) &&
8b3d4570
SS
1659 test_and_clear_bit(faked_port_index,
1660 &bus_state->rexit_ports)) {
1661 complete(&bus_state->rexit_done[faked_port_index]);
1662 bogus_port_status = true;
1663 goto cleanup;
1664 }
1665
b50107bb 1666 if (hcd->speed < HCD_USB3)
6fd45621
AX
1667 xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
1668 PORT_PLC);
1669
56192531 1670cleanup:
0f2a7930 1671 /* Update event ring dequeue pointer before dropping the lock */
3b72fca0 1672 inc_deq(xhci, xhci->event_ring);
0f2a7930 1673
386139d7
SS
1674 /* Don't make the USB core poll the roothub if we got a bad port status
1675 * change event. Besides, at that point we can't tell which roothub
1676 * (USB 2.0 or USB 3.0) to kick.
1677 */
1678 if (bogus_port_status)
1679 return;
1680
c52804a4
SS
1681 /*
1682 * xHCI port-status-change events occur when the "or" of all the
1683 * status-change bits in the portsc register changes from 0 to 1.
1684 * New status changes won't cause an event if any other change
1685 * bits are still set. When an event occurs, switch over to
1686 * polling to avoid losing status changes.
1687 */
1688 xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
1689 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
0f2a7930
SS
1690 spin_unlock(&xhci->lock);
1691 /* Pass this up to the core */
f6ff0ac8 1692 usb_hcd_poll_rh_status(hcd);
0f2a7930
SS
1693 spin_lock(&xhci->lock);
1694}
1695
d0e96f5a
SS
1696/*
1697 * This TD is defined by the TRBs starting at start_trb in start_seg and ending
1698 * at end_trb, which may be in another segment. If the suspect DMA address is a
1699 * TRB in this TD, this function returns that TRB's segment. Otherwise it
1700 * returns 0.
1701 */
cffb9be8
HG
1702struct xhci_segment *trb_in_td(struct xhci_hcd *xhci,
1703 struct xhci_segment *start_seg,
d0e96f5a
SS
1704 union xhci_trb *start_trb,
1705 union xhci_trb *end_trb,
cffb9be8
HG
1706 dma_addr_t suspect_dma,
1707 bool debug)
d0e96f5a
SS
1708{
1709 dma_addr_t start_dma;
1710 dma_addr_t end_seg_dma;
1711 dma_addr_t end_trb_dma;
1712 struct xhci_segment *cur_seg;
1713
23e3be11 1714 start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
d0e96f5a
SS
1715 cur_seg = start_seg;
1716
1717 do {
2fa88daa 1718 if (start_dma == 0)
326b4810 1719 return NULL;
ae636747 1720 /* We may get an event for a Link TRB in the middle of a TD */
23e3be11 1721 end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
2fa88daa 1722 &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
d0e96f5a 1723 /* If the end TRB isn't in this segment, this is set to 0 */
23e3be11 1724 end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
d0e96f5a 1725
cffb9be8
HG
1726 if (debug)
1727 xhci_warn(xhci,
1728 "Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx\n",
1729 (unsigned long long)suspect_dma,
1730 (unsigned long long)start_dma,
1731 (unsigned long long)end_trb_dma,
1732 (unsigned long long)cur_seg->dma,
1733 (unsigned long long)end_seg_dma);
1734
d0e96f5a
SS
1735 if (end_trb_dma > 0) {
1736 /* The end TRB is in this segment, so suspect should be here */
1737 if (start_dma <= end_trb_dma) {
1738 if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
1739 return cur_seg;
1740 } else {
1741 /* Case for one segment with
1742 * a TD wrapped around to the top
1743 */
1744 if ((suspect_dma >= start_dma &&
1745 suspect_dma <= end_seg_dma) ||
1746 (suspect_dma >= cur_seg->dma &&
1747 suspect_dma <= end_trb_dma))
1748 return cur_seg;
1749 }
326b4810 1750 return NULL;
d0e96f5a
SS
1751 } else {
1752 /* Might still be somewhere in this segment */
1753 if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
1754 return cur_seg;
1755 }
1756 cur_seg = cur_seg->next;
23e3be11 1757 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
2fa88daa 1758 } while (cur_seg != start_seg);
d0e96f5a 1759
326b4810 1760 return NULL;
d0e96f5a
SS
1761}
1762
bcef3fd5
SS
1763static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
1764 unsigned int slot_id, unsigned int ep_index,
e9df17eb 1765 unsigned int stream_id,
f97c08ae 1766 struct xhci_td *td, union xhci_trb *ep_trb)
bcef3fd5
SS
1767{
1768 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
ddba5cd0
MN
1769 struct xhci_command *command;
1770 command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
1771 if (!command)
1772 return;
1773
d0167ad2 1774 ep->ep_state |= EP_HALTED;
e9df17eb 1775 ep->stopped_stream = stream_id;
1624ae1c 1776
ddba5cd0 1777 xhci_queue_reset_ep(xhci, command, slot_id, ep_index);
d97b4f8d 1778 xhci_cleanup_stalled_ring(xhci, ep_index, td);
1624ae1c 1779
5e5cf6fc 1780 ep->stopped_stream = 0;
1624ae1c 1781
bcef3fd5
SS
1782 xhci_ring_cmd_db(xhci);
1783}
1784
1785/* Check if an error has halted the endpoint ring. The class driver will
1786 * cleanup the halt for a non-default control endpoint if we indicate a stall.
1787 * However, a babble and other errors also halt the endpoint ring, and the class
1788 * driver won't clear the halt in that case, so we need to issue a Set Transfer
1789 * Ring Dequeue Pointer command manually.
1790 */
1791static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
1792 struct xhci_ep_ctx *ep_ctx,
1793 unsigned int trb_comp_code)
1794{
1795 /* TRB completion codes that may require a manual halt cleanup */
0b7c105a
FB
1796 if (trb_comp_code == COMP_USB_TRANSACTION_ERROR ||
1797 trb_comp_code == COMP_BABBLE_DETECTED_ERROR ||
1798 trb_comp_code == COMP_SPLIT_TRANSACTION_ERROR)
d4fc8bf5 1799 /* The 0.95 spec says a babbling control endpoint
bcef3fd5
SS
1800 * is not halted. The 0.96 spec says it is. Some HW
1801 * claims to be 0.95 compliant, but it halts the control
1802 * endpoint anyway. Check if a babble halted the
1803 * endpoint.
1804 */
5071e6b2 1805 if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_HALTED)
bcef3fd5
SS
1806 return 1;
1807
1808 return 0;
1809}
1810
b45b5069
SS
1811int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
1812{
1813 if (trb_comp_code >= 224 && trb_comp_code <= 255) {
1814 /* Vendor defined "informational" completion code,
1815 * treat as not-an-error.
1816 */
1817 xhci_dbg(xhci, "Vendor defined info completion code %u\n",
1818 trb_comp_code);
1819 xhci_dbg(xhci, "Treating code as success.\n");
1820 return 1;
1821 }
1822 return 0;
1823}
1824
4422da61 1825static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
f97c08ae 1826 union xhci_trb *ep_trb, struct xhci_transfer_event *event,
4422da61
AX
1827 struct xhci_virt_ep *ep, int *status, bool skip)
1828{
1829 struct xhci_virt_device *xdev;
4422da61 1830 struct xhci_ep_ctx *ep_ctx;
be0f50c2 1831 struct xhci_ring *ep_ring;
8e51adcc 1832 struct urb_priv *urb_priv;
be0f50c2
FB
1833 struct urb *urb = NULL;
1834 unsigned int slot_id;
4422da61 1835 u32 trb_comp_code;
be0f50c2 1836 int ep_index;
4422da61 1837
28ccd296 1838 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
4422da61 1839 xdev = xhci->devs[slot_id];
28ccd296
ME
1840 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1841 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
4422da61 1842 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
28ccd296 1843 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
4422da61
AX
1844
1845 if (skip)
1846 goto td_cleanup;
1847
0b7c105a
FB
1848 if (trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
1849 trb_comp_code == COMP_STOPPED ||
1850 trb_comp_code == COMP_STOPPED_SHORT_PACKET) {
4422da61
AX
1851 /* The Endpoint Stop Command completion will take care of any
1852 * stopped TDs. A stopped TD may be restarted, so don't update
1853 * the ring dequeue pointer or take this TD off any lists yet.
1854 */
1855 ep->stopped_td = td;
4422da61 1856 return 0;
69defe04 1857 }
0b7c105a 1858 if (trb_comp_code == COMP_STALL_ERROR ||
69defe04
MN
1859 xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
1860 trb_comp_code)) {
1861 /* Issue a reset endpoint command to clear the host side
1862 * halt, followed by a set dequeue command to move the
1863 * dequeue pointer past the TD.
1864 * The class driver clears the device side halt later.
1865 */
1866 xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index,
f97c08ae 1867 ep_ring->stream_id, td, ep_trb);
4422da61 1868 } else {
69defe04
MN
1869 /* Update ring dequeue pointer */
1870 while (ep_ring->dequeue != td->last_trb)
3b72fca0 1871 inc_deq(xhci, ep_ring);
69defe04
MN
1872 inc_deq(xhci, ep_ring);
1873 }
4422da61
AX
1874
1875td_cleanup:
69defe04
MN
1876 /* Clean up the endpoint's TD list */
1877 urb = td->urb;
1878 urb_priv = urb->hcpriv;
1879
f9c589e1
MN
1880 /* if a bounce buffer was used to align this td then unmap it */
1881 if (td->bounce_seg)
1882 xhci_unmap_td_bounce_buffer(xhci, ep_ring, td);
1883
69defe04
MN
1884 /* Do one last check of the actual transfer length.
1885 * If the host controller said we transferred more data than the buffer
1886 * length, urb->actual_length will be a very big number (since it's
1887 * unsigned). Play it safe and say we didn't transfer anything.
1888 */
1889 if (urb->actual_length > urb->transfer_buffer_length) {
2a72126d
MN
1890 xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n",
1891 urb->transfer_buffer_length, urb->actual_length);
69defe04 1892 urb->actual_length = 0;
2a72126d 1893 *status = 0;
69defe04
MN
1894 }
1895 list_del_init(&td->td_list);
1896 /* Was this TD slated to be cancelled but completed anyway? */
1897 if (!list_empty(&td->cancelled_td_list))
1898 list_del_init(&td->cancelled_td_list);
1899
2a72126d 1900 inc_td_cnt(urb);
69defe04 1901 /* Giveback the urb when all the tds are completed */
2a72126d
MN
1902 if (last_td_in_urb(td)) {
1903 if ((urb->actual_length != urb->transfer_buffer_length &&
1904 (urb->transfer_flags & URB_SHORT_NOT_OK)) ||
1905 (*status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc)))
1906 xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n",
1907 urb, urb->actual_length,
1908 urb->transfer_buffer_length, *status);
1909
1910 /* set isoc urb status to 0 just as EHCI, UHCI, and OHCI */
1911 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
1912 *status = 0;
1913 xhci_giveback_urb_in_irq(xhci, td, *status);
4422da61 1914 }
0c03d89d 1915 return 0;
4422da61
AX
1916}
1917
30a65b45
MN
1918/* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */
1919static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring,
1920 union xhci_trb *stop_trb)
1921{
1922 u32 sum;
1923 union xhci_trb *trb = ring->dequeue;
1924 struct xhci_segment *seg = ring->deq_seg;
1925
1926 for (sum = 0; trb != stop_trb; next_trb(xhci, ring, &seg, &trb)) {
1927 if (!trb_is_noop(trb) && !trb_is_link(trb))
1928 sum += TRB_LEN(le32_to_cpu(trb->generic.field[2]));
1929 }
1930 return sum;
1931}
1932
8af56be1
AX
1933/*
1934 * Process control tds, update urb status and actual_length.
1935 */
1936static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
f97c08ae 1937 union xhci_trb *ep_trb, struct xhci_transfer_event *event,
8af56be1
AX
1938 struct xhci_virt_ep *ep, int *status)
1939{
1940 struct xhci_virt_device *xdev;
1941 struct xhci_ring *ep_ring;
1942 unsigned int slot_id;
1943 int ep_index;
1944 struct xhci_ep_ctx *ep_ctx;
1945 u32 trb_comp_code;
0b6c324c 1946 u32 remaining, requested;
29fc1aa4 1947 u32 trb_type;
8af56be1 1948
29fc1aa4 1949 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(ep_trb->generic.field[3]));
28ccd296 1950 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
8af56be1 1951 xdev = xhci->devs[slot_id];
28ccd296
ME
1952 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1953 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
8af56be1 1954 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
28ccd296 1955 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
0b6c324c
MN
1956 requested = td->urb->transfer_buffer_length;
1957 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
1958
8af56be1
AX
1959 switch (trb_comp_code) {
1960 case COMP_SUCCESS:
29fc1aa4 1961 if (trb_type != TRB_STATUS) {
0b6c324c 1962 xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n",
29fc1aa4 1963 (trb_type == TRB_DATA) ? "data" : "setup");
8af56be1 1964 *status = -ESHUTDOWN;
0b6c324c 1965 break;
8af56be1 1966 }
0b6c324c 1967 *status = 0;
8af56be1 1968 break;
0b7c105a 1969 case COMP_SHORT_PACKET:
0b6c324c 1970 *status = 0;
8af56be1 1971 break;
0b7c105a 1972 case COMP_STOPPED_SHORT_PACKET:
29fc1aa4 1973 if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
0b6c324c 1974 td->urb->actual_length = remaining;
40a3b775 1975 else
0b6c324c
MN
1976 xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n");
1977 goto finish_td;
0b7c105a 1978 case COMP_STOPPED:
29fc1aa4
FB
1979 switch (trb_type) {
1980 case TRB_SETUP:
1981 td->urb->actual_length = 0;
1982 goto finish_td;
1983 case TRB_DATA:
1984 case TRB_NORMAL:
0b6c324c 1985 td->urb->actual_length = requested - remaining;
29fc1aa4
FB
1986 goto finish_td;
1987 default:
1988 xhci_warn(xhci, "WARN: unexpected TRB Type %d\n",
1989 trb_type);
1990 goto finish_td;
1991 }
0b7c105a 1992 case COMP_STOPPED_LENGTH_INVALID:
0b6c324c 1993 goto finish_td;
8af56be1
AX
1994 default:
1995 if (!xhci_requires_manual_halt_cleanup(xhci,
0b6c324c 1996 ep_ctx, trb_comp_code))
8af56be1 1997 break;
0b6c324c
MN
1998 xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n",
1999 trb_comp_code, ep_index);
8af56be1 2000 /* else fall through */
0b7c105a 2001 case COMP_STALL_ERROR:
8af56be1 2002 /* Did we transfer part of the data (middle) phase? */
29fc1aa4 2003 if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
0b6c324c 2004 td->urb->actual_length = requested - remaining;
22ae47e6 2005 else if (!td->urb_length_set)
8af56be1 2006 td->urb->actual_length = 0;
0b6c324c 2007 goto finish_td;
8af56be1 2008 }
0b6c324c
MN
2009
2010 /* stopped at setup stage, no data transferred */
29fc1aa4 2011 if (trb_type == TRB_SETUP)
0b6c324c
MN
2012 goto finish_td;
2013
8af56be1 2014 /*
0b6c324c
MN
2015 * if on data stage then update the actual_length of the URB and flag it
2016 * as set, so it won't be overwritten in the event for the last TRB.
8af56be1 2017 */
29fc1aa4
FB
2018 if (trb_type == TRB_DATA ||
2019 trb_type == TRB_NORMAL) {
0b6c324c
MN
2020 td->urb_length_set = true;
2021 td->urb->actual_length = requested - remaining;
2022 xhci_dbg(xhci, "Waiting for status stage event\n");
2023 return 0;
8af56be1
AX
2024 }
2025
0b6c324c
MN
2026 /* at status stage */
2027 if (!td->urb_length_set)
2028 td->urb->actual_length = requested;
2029
2030finish_td:
f97c08ae 2031 return finish_td(xhci, td, ep_trb, event, ep, status, false);
8af56be1
AX
2032}
2033
04e51901
AX
2034/*
2035 * Process isochronous tds, update urb packet status and actual_length.
2036 */
2037static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
f97c08ae 2038 union xhci_trb *ep_trb, struct xhci_transfer_event *event,
04e51901
AX
2039 struct xhci_virt_ep *ep, int *status)
2040{
2041 struct xhci_ring *ep_ring;
2042 struct urb_priv *urb_priv;
2043 int idx;
926008c9 2044 struct usb_iso_packet_descriptor *frame;
04e51901 2045 u32 trb_comp_code;
36da3a1d
MN
2046 bool sum_trbs_for_length = false;
2047 u32 remaining, requested, ep_trb_len;
2048 int short_framestatus;
04e51901 2049
28ccd296
ME
2050 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2051 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
04e51901
AX
2052 urb_priv = td->urb->hcpriv;
2053 idx = urb_priv->td_cnt;
926008c9 2054 frame = &td->urb->iso_frame_desc[idx];
36da3a1d
MN
2055 requested = frame->length;
2056 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2057 ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
2058 short_framestatus = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
2059 -EREMOTEIO : 0;
04e51901 2060
926008c9
DT
2061 /* handle completion code */
2062 switch (trb_comp_code) {
2063 case COMP_SUCCESS:
36da3a1d
MN
2064 if (remaining) {
2065 frame->status = short_framestatus;
2066 if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
2067 sum_trbs_for_length = true;
1530bbc6
SS
2068 break;
2069 }
36da3a1d
MN
2070 frame->status = 0;
2071 break;
0b7c105a 2072 case COMP_SHORT_PACKET:
36da3a1d
MN
2073 frame->status = short_framestatus;
2074 sum_trbs_for_length = true;
926008c9 2075 break;
0b7c105a 2076 case COMP_BANDWIDTH_OVERRUN_ERROR:
926008c9 2077 frame->status = -ECOMM;
926008c9 2078 break;
0b7c105a
FB
2079 case COMP_ISOCH_BUFFER_OVERRUN:
2080 case COMP_BABBLE_DETECTED_ERROR:
926008c9 2081 frame->status = -EOVERFLOW;
926008c9 2082 break;
0b7c105a
FB
2083 case COMP_INCOMPATIBLE_DEVICE_ERROR:
2084 case COMP_STALL_ERROR:
d104d015 2085 frame->status = -EPROTO;
d104d015 2086 break;
0b7c105a 2087 case COMP_USB_TRANSACTION_ERROR:
926008c9 2088 frame->status = -EPROTO;
f97c08ae 2089 if (ep_trb != td->last_trb)
d104d015 2090 return 0;
926008c9 2091 break;
0b7c105a 2092 case COMP_STOPPED:
36da3a1d
MN
2093 sum_trbs_for_length = true;
2094 break;
0b7c105a 2095 case COMP_STOPPED_SHORT_PACKET:
36da3a1d
MN
2096 /* field normally containing residue now contains tranferred */
2097 frame->status = short_framestatus;
2098 requested = remaining;
2099 break;
0b7c105a 2100 case COMP_STOPPED_LENGTH_INVALID:
36da3a1d
MN
2101 requested = 0;
2102 remaining = 0;
926008c9
DT
2103 break;
2104 default:
36da3a1d 2105 sum_trbs_for_length = true;
926008c9
DT
2106 frame->status = -1;
2107 break;
04e51901
AX
2108 }
2109
36da3a1d
MN
2110 if (sum_trbs_for_length)
2111 frame->actual_length = sum_trb_lengths(xhci, ep_ring, ep_trb) +
2112 ep_trb_len - remaining;
2113 else
2114 frame->actual_length = requested;
04e51901 2115
36da3a1d 2116 td->urb->actual_length += frame->actual_length;
04e51901 2117
f97c08ae 2118 return finish_td(xhci, td, ep_trb, event, ep, status, false);
04e51901
AX
2119}
2120
926008c9
DT
2121static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
2122 struct xhci_transfer_event *event,
2123 struct xhci_virt_ep *ep, int *status)
2124{
2125 struct xhci_ring *ep_ring;
2126 struct urb_priv *urb_priv;
2127 struct usb_iso_packet_descriptor *frame;
2128 int idx;
2129
f6975314 2130 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
926008c9
DT
2131 urb_priv = td->urb->hcpriv;
2132 idx = urb_priv->td_cnt;
2133 frame = &td->urb->iso_frame_desc[idx];
2134
b3df3f9c 2135 /* The transfer is partly done. */
926008c9
DT
2136 frame->status = -EXDEV;
2137
2138 /* calc actual length */
2139 frame->actual_length = 0;
2140
2141 /* Update ring dequeue pointer */
2142 while (ep_ring->dequeue != td->last_trb)
3b72fca0
AX
2143 inc_deq(xhci, ep_ring);
2144 inc_deq(xhci, ep_ring);
926008c9
DT
2145
2146 return finish_td(xhci, td, NULL, event, ep, status, true);
2147}
2148
22405ed2
AX
2149/*
2150 * Process bulk and interrupt tds, update urb status and actual_length.
2151 */
2152static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
f97c08ae 2153 union xhci_trb *ep_trb, struct xhci_transfer_event *event,
22405ed2
AX
2154 struct xhci_virt_ep *ep, int *status)
2155{
2156 struct xhci_ring *ep_ring;
22405ed2 2157 u32 trb_comp_code;
f97c08ae 2158 u32 remaining, requested, ep_trb_len;
22405ed2 2159
28ccd296
ME
2160 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2161 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
30a65b45 2162 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
f97c08ae 2163 ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
30a65b45 2164 requested = td->urb->transfer_buffer_length;
22405ed2
AX
2165
2166 switch (trb_comp_code) {
2167 case COMP_SUCCESS:
30a65b45 2168 /* handle success with untransferred data as short packet */
f97c08ae 2169 if (ep_trb != td->last_trb || remaining) {
52ab8685 2170 xhci_warn(xhci, "WARN Successful completion on short TX\n");
30a65b45
MN
2171 xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
2172 td->urb->ep->desc.bEndpointAddress,
2173 requested, remaining);
22405ed2 2174 }
52ab8685 2175 *status = 0;
22405ed2 2176 break;
0b7c105a 2177 case COMP_SHORT_PACKET:
30a65b45
MN
2178 xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
2179 td->urb->ep->desc.bEndpointAddress,
2180 requested, remaining);
52ab8685 2181 *status = 0;
22405ed2 2182 break;
0b7c105a 2183 case COMP_STOPPED_SHORT_PACKET:
30a65b45
MN
2184 td->urb->actual_length = remaining;
2185 goto finish_td;
0b7c105a 2186 case COMP_STOPPED_LENGTH_INVALID:
30a65b45 2187 /* stopped on ep trb with invalid length, exclude it */
f97c08ae 2188 ep_trb_len = 0;
30a65b45
MN
2189 remaining = 0;
2190 break;
22405ed2 2191 default:
30a65b45 2192 /* do nothing */
22405ed2
AX
2193 break;
2194 }
40a3b775 2195
f97c08ae 2196 if (ep_trb == td->last_trb)
30a65b45
MN
2197 td->urb->actual_length = requested - remaining;
2198 else
2199 td->urb->actual_length =
f97c08ae
MN
2200 sum_trb_lengths(xhci, ep_ring, ep_trb) +
2201 ep_trb_len - remaining;
30a65b45
MN
2202finish_td:
2203 if (remaining > requested) {
2204 xhci_warn(xhci, "bad transfer trb length %d in event trb\n",
2205 remaining);
22405ed2 2206 td->urb->actual_length = 0;
22405ed2 2207 }
f97c08ae 2208 return finish_td(xhci, td, ep_trb, event, ep, status, false);
22405ed2
AX
2209}
2210
d0e96f5a
SS
2211/*
2212 * If this function returns an error condition, it means it got a Transfer
2213 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
2214 * At this point, the host controller is probably hosed and should be reset.
2215 */
2216static int handle_tx_event(struct xhci_hcd *xhci,
2217 struct xhci_transfer_event *event)
ed384bd3
FB
2218 __releases(&xhci->lock)
2219 __acquires(&xhci->lock)
d0e96f5a
SS
2220{
2221 struct xhci_virt_device *xdev;
63a0d9ab 2222 struct xhci_virt_ep *ep;
d0e96f5a 2223 struct xhci_ring *ep_ring;
82d1009f 2224 unsigned int slot_id;
d0e96f5a 2225 int ep_index;
326b4810 2226 struct xhci_td *td = NULL;
f97c08ae
MN
2227 dma_addr_t ep_trb_dma;
2228 struct xhci_segment *ep_seg;
2229 union xhci_trb *ep_trb;
d0e96f5a 2230 int status = -EINPROGRESS;
d115b048 2231 struct xhci_ep_ctx *ep_ctx;
c2d7b49f 2232 struct list_head *tmp;
66d1eebc 2233 u32 trb_comp_code;
c2d7b49f 2234 int td_num = 0;
3b4739b8 2235 bool handling_skipped_tds = false;
d0e96f5a 2236
28ccd296 2237 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
82d1009f 2238 xdev = xhci->devs[slot_id];
d0e96f5a
SS
2239 if (!xdev) {
2240 xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
9258c0b2 2241 xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
e910b440
SS
2242 (unsigned long long) xhci_trb_virt_to_dma(
2243 xhci->event_ring->deq_seg,
9258c0b2
SS
2244 xhci->event_ring->dequeue),
2245 lower_32_bits(le64_to_cpu(event->buffer)),
2246 upper_32_bits(le64_to_cpu(event->buffer)),
2247 le32_to_cpu(event->transfer_len),
2248 le32_to_cpu(event->flags));
2249 xhci_dbg(xhci, "Event ring:\n");
2250 xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
d0e96f5a
SS
2251 return -ENODEV;
2252 }
2253
2254 /* Endpoint ID is 1 based, our index is zero based */
28ccd296 2255 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
63a0d9ab 2256 ep = &xdev->eps[ep_index];
28ccd296 2257 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
d115b048 2258 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
5071e6b2 2259 if (!ep_ring || GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) {
e9df17eb
SS
2260 xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
2261 "or incorrect stream ring\n");
9258c0b2 2262 xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
e910b440
SS
2263 (unsigned long long) xhci_trb_virt_to_dma(
2264 xhci->event_ring->deq_seg,
9258c0b2
SS
2265 xhci->event_ring->dequeue),
2266 lower_32_bits(le64_to_cpu(event->buffer)),
2267 upper_32_bits(le64_to_cpu(event->buffer)),
2268 le32_to_cpu(event->transfer_len),
2269 le32_to_cpu(event->flags));
2270 xhci_dbg(xhci, "Event ring:\n");
2271 xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
d0e96f5a
SS
2272 return -ENODEV;
2273 }
2274
c2d7b49f
AX
2275 /* Count current td numbers if ep->skip is set */
2276 if (ep->skip) {
2277 list_for_each(tmp, &ep_ring->td_list)
2278 td_num++;
2279 }
2280
f97c08ae 2281 ep_trb_dma = le64_to_cpu(event->buffer);
28ccd296 2282 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
986a92d4 2283 /* Look for common error cases */
66d1eebc 2284 switch (trb_comp_code) {
b10de142
SS
2285 /* Skip codes that require special handling depending on
2286 * transfer type
2287 */
2288 case COMP_SUCCESS:
1c11a172 2289 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
1530bbc6
SS
2290 break;
2291 if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
0b7c105a 2292 trb_comp_code = COMP_SHORT_PACKET;
1530bbc6 2293 else
8202ce2e
SS
2294 xhci_warn_ratelimited(xhci,
2295 "WARN Successful completion on short TX: needs XHCI_TRUST_TX_LENGTH quirk?\n");
0b7c105a 2296 case COMP_SHORT_PACKET:
b10de142 2297 break;
0b7c105a 2298 case COMP_STOPPED:
ae636747
SS
2299 xhci_dbg(xhci, "Stopped on Transfer TRB\n");
2300 break;
0b7c105a 2301 case COMP_STOPPED_LENGTH_INVALID:
ae636747
SS
2302 xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
2303 break;
0b7c105a 2304 case COMP_STOPPED_SHORT_PACKET:
40a3b775
LB
2305 xhci_dbg(xhci, "Stopped with short packet transfer detected\n");
2306 break;
0b7c105a 2307 case COMP_STALL_ERROR:
2a9227a5 2308 xhci_dbg(xhci, "Stalled endpoint\n");
63a0d9ab 2309 ep->ep_state |= EP_HALTED;
b10de142
SS
2310 status = -EPIPE;
2311 break;
0b7c105a 2312 case COMP_TRB_ERROR:
b10de142
SS
2313 xhci_warn(xhci, "WARN: TRB error on endpoint\n");
2314 status = -EILSEQ;
2315 break;
0b7c105a
FB
2316 case COMP_SPLIT_TRANSACTION_ERROR:
2317 case COMP_USB_TRANSACTION_ERROR:
2a9227a5 2318 xhci_dbg(xhci, "Transfer error on endpoint\n");
b10de142
SS
2319 status = -EPROTO;
2320 break;
0b7c105a 2321 case COMP_BABBLE_DETECTED_ERROR:
2a9227a5 2322 xhci_dbg(xhci, "Babble error on endpoint\n");
4a73143c
SS
2323 status = -EOVERFLOW;
2324 break;
0b7c105a 2325 case COMP_DATA_BUFFER_ERROR:
b10de142
SS
2326 xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
2327 status = -ENOSR;
2328 break;
0b7c105a 2329 case COMP_BANDWIDTH_OVERRUN_ERROR:
986a92d4
AX
2330 xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n");
2331 break;
0b7c105a 2332 case COMP_ISOCH_BUFFER_OVERRUN:
986a92d4
AX
2333 xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n");
2334 break;
0b7c105a 2335 case COMP_RING_UNDERRUN:
986a92d4
AX
2336 /*
2337 * When the Isoch ring is empty, the xHC will generate
2338 * a Ring Overrun Event for IN Isoch endpoint or Ring
2339 * Underrun Event for OUT Isoch endpoint.
2340 */
2341 xhci_dbg(xhci, "underrun event on endpoint\n");
2342 if (!list_empty(&ep_ring->td_list))
2343 xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
2344 "still with TDs queued?\n",
28ccd296
ME
2345 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2346 ep_index);
986a92d4 2347 goto cleanup;
0b7c105a 2348 case COMP_RING_OVERRUN:
986a92d4
AX
2349 xhci_dbg(xhci, "overrun event on endpoint\n");
2350 if (!list_empty(&ep_ring->td_list))
2351 xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
2352 "still with TDs queued?\n",
28ccd296
ME
2353 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2354 ep_index);
986a92d4 2355 goto cleanup;
0b7c105a 2356 case COMP_INCOMPATIBLE_DEVICE_ERROR:
f6ba6fe2
AH
2357 xhci_warn(xhci, "WARN: detect an incompatible device");
2358 status = -EPROTO;
2359 break;
0b7c105a 2360 case COMP_MISSED_SERVICE_ERROR:
d18240db
AX
2361 /*
2362 * When encounter missed service error, one or more isoc tds
2363 * may be missed by xHC.
2364 * Set skip flag of the ep_ring; Complete the missed tds as
2365 * short transfer when process the ep_ring next time.
2366 */
2367 ep->skip = true;
2368 xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
2369 goto cleanup;
0b7c105a 2370 case COMP_NO_PING_RESPONSE_ERROR:
3b4739b8
MN
2371 ep->skip = true;
2372 xhci_dbg(xhci, "No Ping response error, Skip one Isoc TD\n");
2373 goto cleanup;
b10de142 2374 default:
b45b5069 2375 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
5ad6a529
SS
2376 status = 0;
2377 break;
2378 }
86cd740a
MN
2379 xhci_warn(xhci, "ERROR Unknown event condition %u, HC probably busted\n",
2380 trb_comp_code);
986a92d4
AX
2381 goto cleanup;
2382 }
2383
d18240db
AX
2384 do {
2385 /* This TRB should be in the TD at the head of this ring's
2386 * TD list.
2387 */
2388 if (list_empty(&ep_ring->td_list)) {
a83d6755
SS
2389 /*
2390 * A stopped endpoint may generate an extra completion
2391 * event if the device was suspended. Don't print
2392 * warnings.
2393 */
0b7c105a
FB
2394 if (!(trb_comp_code == COMP_STOPPED ||
2395 trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) {
a83d6755
SS
2396 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
2397 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2398 ep_index);
2399 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
2400 (le32_to_cpu(event->flags) &
2401 TRB_TYPE_BITMASK)>>10);
2402 xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
2403 }
d18240db
AX
2404 if (ep->skip) {
2405 ep->skip = false;
2406 xhci_dbg(xhci, "td_list is empty while skip "
2407 "flag set. Clear skip flag.\n");
2408 }
d18240db
AX
2409 goto cleanup;
2410 }
986a92d4 2411
c2d7b49f
AX
2412 /* We've skipped all the TDs on the ep ring when ep->skip set */
2413 if (ep->skip && td_num == 0) {
2414 ep->skip = false;
2415 xhci_dbg(xhci, "All tds on the ep_ring skipped. "
2416 "Clear skip flag.\n");
c2d7b49f
AX
2417 goto cleanup;
2418 }
2419
04861f83
FB
2420 td = list_first_entry(&ep_ring->td_list, struct xhci_td,
2421 td_list);
c2d7b49f
AX
2422 if (ep->skip)
2423 td_num--;
926008c9 2424
d18240db 2425 /* Is this a TRB in the currently executing TD? */
f97c08ae
MN
2426 ep_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue,
2427 td->last_trb, ep_trb_dma, false);
e1cf486d
AH
2428
2429 /*
2430 * Skip the Force Stopped Event. The event_trb(event_dma) of FSE
2431 * is not in the current TD pointed by ep_ring->dequeue because
2432 * that the hardware dequeue pointer still at the previous TRB
2433 * of the current TD. The previous TRB maybe a Link TD or the
2434 * last TRB of the previous TD. The command completion handle
2435 * will take care the rest.
2436 */
0b7c105a
FB
2437 if (!ep_seg && (trb_comp_code == COMP_STOPPED ||
2438 trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) {
e1cf486d
AH
2439 goto cleanup;
2440 }
2441
f97c08ae 2442 if (!ep_seg) {
926008c9
DT
2443 if (!ep->skip ||
2444 !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
ad808333
SS
2445 /* Some host controllers give a spurious
2446 * successful event after a short transfer.
2447 * Ignore it.
2448 */
ddba5cd0 2449 if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
ad808333
SS
2450 ep_ring->last_td_was_short) {
2451 ep_ring->last_td_was_short = false;
ad808333
SS
2452 goto cleanup;
2453 }
926008c9
DT
2454 /* HC is busted, give up! */
2455 xhci_err(xhci,
2456 "ERROR Transfer event TRB DMA ptr not "
cffb9be8
HG
2457 "part of current TD ep_index %d "
2458 "comp_code %u\n", ep_index,
2459 trb_comp_code);
2460 trb_in_td(xhci, ep_ring->deq_seg,
2461 ep_ring->dequeue, td->last_trb,
f97c08ae 2462 ep_trb_dma, true);
926008c9
DT
2463 return -ESHUTDOWN;
2464 }
2465
0c03d89d 2466 skip_isoc_td(xhci, td, event, ep, &status);
926008c9
DT
2467 goto cleanup;
2468 }
0b7c105a 2469 if (trb_comp_code == COMP_SHORT_PACKET)
ad808333
SS
2470 ep_ring->last_td_was_short = true;
2471 else
2472 ep_ring->last_td_was_short = false;
926008c9
DT
2473
2474 if (ep->skip) {
d18240db
AX
2475 xhci_dbg(xhci, "Found td. Clear skip flag.\n");
2476 ep->skip = false;
2477 }
678539cf 2478
f97c08ae
MN
2479 ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) /
2480 sizeof(*ep_trb)];
926008c9
DT
2481 /*
2482 * No-op TRB should not trigger interrupts.
f97c08ae 2483 * If ep_trb is a no-op TRB, it means the
926008c9
DT
2484 * corresponding TD has been cancelled. Just ignore
2485 * the TD.
2486 */
f97c08ae
MN
2487 if (trb_is_noop(ep_trb)) {
2488 xhci_dbg(xhci, "ep_trb is a no-op TRB. Skip it\n");
926008c9 2489 goto cleanup;
d18240db 2490 }
4422da61 2491
0c03d89d 2492 /* update the urb's actual_length and give back to the core */
d18240db 2493 if (usb_endpoint_xfer_control(&td->urb->ep->desc))
0c03d89d 2494 process_ctrl_td(xhci, td, ep_trb, event, ep, &status);
04e51901 2495 else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
0c03d89d 2496 process_isoc_td(xhci, td, ep_trb, event, ep, &status);
d18240db 2497 else
0c03d89d
MN
2498 process_bulk_intr_td(xhci, td, ep_trb, event, ep,
2499 &status);
d18240db 2500cleanup:
3b4739b8 2501 handling_skipped_tds = ep->skip &&
0b7c105a
FB
2502 trb_comp_code != COMP_MISSED_SERVICE_ERROR &&
2503 trb_comp_code != COMP_NO_PING_RESPONSE_ERROR;
3b4739b8 2504
d18240db 2505 /*
3b4739b8
MN
2506 * Do not update event ring dequeue pointer if we're in a loop
2507 * processing missed tds.
d18240db 2508 */
3b4739b8 2509 if (!handling_skipped_tds)
3b72fca0 2510 inc_deq(xhci, xhci->event_ring);
d18240db 2511
d18240db
AX
2512 /*
2513 * If ep->skip is set, it means there are missed tds on the
2514 * endpoint ring need to take care of.
2515 * Process them as short transfer until reach the td pointed by
2516 * the event.
2517 */
3b4739b8 2518 } while (handling_skipped_tds);
d18240db 2519
d0e96f5a
SS
2520 return 0;
2521}
2522
0f2a7930
SS
2523/*
2524 * This function handles all OS-owned events on the event ring. It may drop
2525 * xhci->lock between event processing (e.g. to pass up port status changes).
9dee9a21
ME
2526 * Returns >0 for "possibly more events to process" (caller should call again),
2527 * otherwise 0 if done. In future, <0 returns should indicate error code.
0f2a7930 2528 */
9dee9a21 2529static int xhci_handle_event(struct xhci_hcd *xhci)
7f84eef0
SS
2530{
2531 union xhci_trb *event;
0f2a7930 2532 int update_ptrs = 1;
d0e96f5a 2533 int ret;
7f84eef0 2534
f4c8f03c 2535 /* Event ring hasn't been allocated yet. */
7f84eef0 2536 if (!xhci->event_ring || !xhci->event_ring->dequeue) {
f4c8f03c
LB
2537 xhci_err(xhci, "ERROR event ring not ready\n");
2538 return -ENOMEM;
7f84eef0
SS
2539 }
2540
2541 event = xhci->event_ring->dequeue;
2542 /* Does the HC or OS own the TRB? */
28ccd296 2543 if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
f4c8f03c 2544 xhci->event_ring->cycle_state)
9dee9a21 2545 return 0;
7f84eef0 2546
92a3da41
ME
2547 /*
2548 * Barrier between reading the TRB_CYCLE (valid) flag above and any
2549 * speculative reads of the event's flags/data below.
2550 */
2551 rmb();
0f2a7930 2552 /* FIXME: Handle more event types. */
f4c8f03c 2553 switch (le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) {
7f84eef0
SS
2554 case TRB_TYPE(TRB_COMPLETION):
2555 handle_cmd_completion(xhci, &event->event_cmd);
2556 break;
0f2a7930
SS
2557 case TRB_TYPE(TRB_PORT_STATUS):
2558 handle_port_status(xhci, event);
2559 update_ptrs = 0;
2560 break;
d0e96f5a
SS
2561 case TRB_TYPE(TRB_TRANSFER):
2562 ret = handle_tx_event(xhci, &event->trans_event);
f4c8f03c 2563 if (ret >= 0)
d0e96f5a
SS
2564 update_ptrs = 0;
2565 break;
623bef9e
SS
2566 case TRB_TYPE(TRB_DEV_NOTE):
2567 handle_device_notification(xhci, event);
2568 break;
7f84eef0 2569 default:
28ccd296
ME
2570 if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
2571 TRB_TYPE(48))
0238634d
SS
2572 handle_vendor_event(xhci, event);
2573 else
f4c8f03c
LB
2574 xhci_warn(xhci, "ERROR unknown event type %d\n",
2575 TRB_FIELD_TO_TYPE(
2576 le32_to_cpu(event->event_cmd.flags)));
7f84eef0 2577 }
6f5165cf
SS
2578 /* Any of the above functions may drop and re-acquire the lock, so check
2579 * to make sure a watchdog timer didn't mark the host as non-responsive.
2580 */
2581 if (xhci->xhc_state & XHCI_STATE_DYING) {
2582 xhci_dbg(xhci, "xHCI host dying, returning from "
2583 "event handler.\n");
9dee9a21 2584 return 0;
6f5165cf 2585 }
7f84eef0 2586
c06d68b8
SS
2587 if (update_ptrs)
2588 /* Update SW event ring dequeue pointer */
3b72fca0 2589 inc_deq(xhci, xhci->event_ring);
c06d68b8 2590
9dee9a21
ME
2591 /* Are there more items on the event ring? Caller will call us again to
2592 * check.
2593 */
2594 return 1;
7f84eef0 2595}
9032cd52
SS
2596
2597/*
2598 * xHCI spec says we can get an interrupt, and if the HC has an error condition,
2599 * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
2600 * indicators of an event TRB error, but we check the status *first* to be safe.
2601 */
2602irqreturn_t xhci_irq(struct usb_hcd *hcd)
2603{
2604 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
c06d68b8 2605 union xhci_trb *event_ring_deq;
76a35293 2606 irqreturn_t ret = IRQ_NONE;
c06d68b8 2607 dma_addr_t deq;
76a35293
FB
2608 u64 temp_64;
2609 u32 status;
9032cd52
SS
2610
2611 spin_lock(&xhci->lock);
9032cd52 2612 /* Check if the xHC generated the interrupt, or the irq is shared */
b0ba9720 2613 status = readl(&xhci->op_regs->status);
76a35293
FB
2614 if (status == 0xffffffff) {
2615 ret = IRQ_HANDLED;
2616 goto out;
9032cd52 2617 }
76a35293
FB
2618
2619 if (!(status & STS_EINT))
2620 goto out;
2621
27e0dd4d 2622 if (status & STS_FATAL) {
9032cd52
SS
2623 xhci_warn(xhci, "WARNING: Host System Error\n");
2624 xhci_halt(xhci);
76a35293
FB
2625 ret = IRQ_HANDLED;
2626 goto out;
9032cd52
SS
2627 }
2628
bda53145
SS
2629 /*
2630 * Clear the op reg interrupt status first,
2631 * so we can receive interrupts from other MSI-X interrupters.
2632 * Write 1 to clear the interrupt status.
2633 */
27e0dd4d 2634 status |= STS_EINT;
204b7793 2635 writel(status, &xhci->op_regs->status);
bda53145
SS
2636 /* FIXME when MSI-X is supported and there are multiple vectors */
2637 /* Clear the MSI-X event interrupt status */
2638
cd70469d 2639 if (hcd->irq) {
c21599a3
SS
2640 u32 irq_pending;
2641 /* Acknowledge the PCI interrupt */
b0ba9720 2642 irq_pending = readl(&xhci->ir_set->irq_pending);
4e833c0b 2643 irq_pending |= IMAN_IP;
204b7793 2644 writel(irq_pending, &xhci->ir_set->irq_pending);
c21599a3 2645 }
bda53145 2646
27a41a83
GKB
2647 if (xhci->xhc_state & XHCI_STATE_DYING ||
2648 xhci->xhc_state & XHCI_STATE_HALTED) {
bda53145
SS
2649 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
2650 "Shouldn't IRQs be disabled?\n");
c06d68b8
SS
2651 /* Clear the event handler busy flag (RW1C);
2652 * the event ring should be empty.
bda53145 2653 */
f7b2e403 2654 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
477632df
SS
2655 xhci_write_64(xhci, temp_64 | ERST_EHB,
2656 &xhci->ir_set->erst_dequeue);
76a35293
FB
2657 ret = IRQ_HANDLED;
2658 goto out;
c06d68b8
SS
2659 }
2660
2661 event_ring_deq = xhci->event_ring->dequeue;
2662 /* FIXME this should be a delayed service routine
2663 * that clears the EHB.
2664 */
9dee9a21 2665 while (xhci_handle_event(xhci) > 0) {}
bda53145 2666
f7b2e403 2667 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
c06d68b8
SS
2668 /* If necessary, update the HW's version of the event ring deq ptr. */
2669 if (event_ring_deq != xhci->event_ring->dequeue) {
2670 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2671 xhci->event_ring->dequeue);
2672 if (deq == 0)
2673 xhci_warn(xhci, "WARN something wrong with SW event "
2674 "ring dequeue ptr.\n");
2675 /* Update HC event ring dequeue pointer */
2676 temp_64 &= ERST_PTR_MASK;
2677 temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
2678 }
2679
2680 /* Clear the event handler busy flag (RW1C); event ring is empty. */
2681 temp_64 |= ERST_EHB;
477632df 2682 xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
76a35293 2683 ret = IRQ_HANDLED;
c06d68b8 2684
76a35293 2685out:
9032cd52
SS
2686 spin_unlock(&xhci->lock);
2687
76a35293 2688 return ret;
9032cd52
SS
2689}
2690
851ec164 2691irqreturn_t xhci_msi_irq(int irq, void *hcd)
9032cd52 2692{
968b822c 2693 return xhci_irq(hcd);
9032cd52 2694}
7f84eef0 2695
d0e96f5a
SS
2696/**** Endpoint Ring Operations ****/
2697
7f84eef0
SS
2698/*
2699 * Generic function for queueing a TRB on a ring.
2700 * The caller must have checked to make sure there's room on the ring.
6cc30d85
SS
2701 *
2702 * @more_trbs_coming: Will you enqueue more TRBs before calling
2703 * prepare_transfer()?
7f84eef0
SS
2704 */
2705static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
3b72fca0 2706 bool more_trbs_coming,
7f84eef0
SS
2707 u32 field1, u32 field2, u32 field3, u32 field4)
2708{
2709 struct xhci_generic_trb *trb;
2710
2711 trb = &ring->enqueue->generic;
28ccd296
ME
2712 trb->field[0] = cpu_to_le32(field1);
2713 trb->field[1] = cpu_to_le32(field2);
2714 trb->field[2] = cpu_to_le32(field3);
2715 trb->field[3] = cpu_to_le32(field4);
3b72fca0 2716 inc_enq(xhci, ring, more_trbs_coming);
7f84eef0
SS
2717}
2718
d0e96f5a
SS
2719/*
2720 * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
2721 * FIXME allocate segments if the ring is full.
2722 */
2723static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
3b72fca0 2724 u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
d0e96f5a 2725{
8dfec614
AX
2726 unsigned int num_trbs_needed;
2727
d0e96f5a 2728 /* Make sure the endpoint has been added to xHC schedule */
d0e96f5a
SS
2729 switch (ep_state) {
2730 case EP_STATE_DISABLED:
2731 /*
2732 * USB core changed config/interfaces without notifying us,
2733 * or hardware is reporting the wrong state.
2734 */
2735 xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
2736 return -ENOENT;
d0e96f5a 2737 case EP_STATE_ERROR:
c92bcfa7 2738 xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
d0e96f5a
SS
2739 /* FIXME event handling code for error needs to clear it */
2740 /* XXX not sure if this should be -ENOENT or not */
2741 return -EINVAL;
c92bcfa7
SS
2742 case EP_STATE_HALTED:
2743 xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
d0e96f5a
SS
2744 case EP_STATE_STOPPED:
2745 case EP_STATE_RUNNING:
2746 break;
2747 default:
2748 xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
2749 /*
2750 * FIXME issue Configure Endpoint command to try to get the HC
2751 * back into a known state.
2752 */
2753 return -EINVAL;
2754 }
8dfec614
AX
2755
2756 while (1) {
3d4b81ed
SS
2757 if (room_on_ring(xhci, ep_ring, num_trbs))
2758 break;
8dfec614
AX
2759
2760 if (ep_ring == xhci->cmd_ring) {
2761 xhci_err(xhci, "Do not support expand command ring\n");
2762 return -ENOMEM;
2763 }
2764
68ffb011
XR
2765 xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
2766 "ERROR no room on ep ring, try ring expansion");
8dfec614
AX
2767 num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
2768 if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed,
2769 mem_flags)) {
2770 xhci_err(xhci, "Ring expansion failed\n");
2771 return -ENOMEM;
2772 }
261fa12b 2773 }
6c12db90 2774
d0c77d84
MN
2775 while (trb_is_link(ep_ring->enqueue)) {
2776 /* If we're not dealing with 0.95 hardware or isoc rings
2777 * on AMD 0.96 host, clear the chain bit.
2778 */
2779 if (!xhci_link_trb_quirk(xhci) &&
2780 !(ep_ring->type == TYPE_ISOC &&
2781 (xhci->quirks & XHCI_AMD_0x96_HOST)))
2782 ep_ring->enqueue->link.control &=
2783 cpu_to_le32(~TRB_CHAIN);
2784 else
2785 ep_ring->enqueue->link.control |=
2786 cpu_to_le32(TRB_CHAIN);
6c12db90 2787
d0c77d84
MN
2788 wmb();
2789 ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
6c12db90 2790
d0c77d84
MN
2791 /* Toggle the cycle bit after the last ring segment. */
2792 if (link_trb_toggles_cycle(ep_ring->enqueue))
2793 ep_ring->cycle_state ^= 1;
6c12db90 2794
d0c77d84
MN
2795 ep_ring->enq_seg = ep_ring->enq_seg->next;
2796 ep_ring->enqueue = ep_ring->enq_seg->trbs;
6c12db90 2797 }
d0e96f5a
SS
2798 return 0;
2799}
2800
23e3be11 2801static int prepare_transfer(struct xhci_hcd *xhci,
d0e96f5a
SS
2802 struct xhci_virt_device *xdev,
2803 unsigned int ep_index,
e9df17eb 2804 unsigned int stream_id,
d0e96f5a
SS
2805 unsigned int num_trbs,
2806 struct urb *urb,
8e51adcc 2807 unsigned int td_index,
d0e96f5a
SS
2808 gfp_t mem_flags)
2809{
2810 int ret;
8e51adcc
AX
2811 struct urb_priv *urb_priv;
2812 struct xhci_td *td;
e9df17eb 2813 struct xhci_ring *ep_ring;
d115b048 2814 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
e9df17eb
SS
2815
2816 ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
2817 if (!ep_ring) {
2818 xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
2819 stream_id);
2820 return -EINVAL;
2821 }
2822
5071e6b2 2823 ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx),
3b72fca0 2824 num_trbs, mem_flags);
d0e96f5a
SS
2825 if (ret)
2826 return ret;
d0e96f5a 2827
8e51adcc
AX
2828 urb_priv = urb->hcpriv;
2829 td = urb_priv->td[td_index];
2830
2831 INIT_LIST_HEAD(&td->td_list);
2832 INIT_LIST_HEAD(&td->cancelled_td_list);
2833
2834 if (td_index == 0) {
214f76f7 2835 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
d13565c1 2836 if (unlikely(ret))
8e51adcc 2837 return ret;
d0e96f5a
SS
2838 }
2839
8e51adcc 2840 td->urb = urb;
d0e96f5a 2841 /* Add this TD to the tail of the endpoint ring's TD list */
8e51adcc
AX
2842 list_add_tail(&td->td_list, &ep_ring->td_list);
2843 td->start_seg = ep_ring->enq_seg;
2844 td->first_trb = ep_ring->enqueue;
2845
d0e96f5a
SS
2846 return 0;
2847}
2848
d2510342
AI
2849static unsigned int count_trbs(u64 addr, u64 len)
2850{
2851 unsigned int num_trbs;
2852
2853 num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
2854 TRB_MAX_BUFF_SIZE);
2855 if (num_trbs == 0)
2856 num_trbs++;
2857
2858 return num_trbs;
2859}
2860
2861static inline unsigned int count_trbs_needed(struct urb *urb)
2862{
2863 return count_trbs(urb->transfer_dma, urb->transfer_buffer_length);
2864}
2865
2866static unsigned int count_sg_trbs_needed(struct urb *urb)
8a96c052 2867{
8a96c052 2868 struct scatterlist *sg;
d2510342 2869 unsigned int i, len, full_len, num_trbs = 0;
8a96c052 2870
d2510342 2871 full_len = urb->transfer_buffer_length;
8a96c052 2872
d2510342
AI
2873 for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
2874 len = sg_dma_len(sg);
2875 num_trbs += count_trbs(sg_dma_address(sg), len);
2876 len = min_t(unsigned int, len, full_len);
2877 full_len -= len;
2878 if (full_len == 0)
8a96c052
SS
2879 break;
2880 }
d2510342 2881
8a96c052
SS
2882 return num_trbs;
2883}
2884
d2510342
AI
2885static unsigned int count_isoc_trbs_needed(struct urb *urb, int i)
2886{
2887 u64 addr, len;
2888
2889 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
2890 len = urb->iso_frame_desc[i].length;
2891
2892 return count_trbs(addr, len);
2893}
2894
2895static void check_trb_math(struct urb *urb, int running_total)
8a96c052 2896{
d2510342 2897 if (unlikely(running_total != urb->transfer_buffer_length))
a2490187 2898 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
8a96c052
SS
2899 "queued %#x (%d), asked for %#x (%d)\n",
2900 __func__,
2901 urb->ep->desc.bEndpointAddress,
2902 running_total, running_total,
2903 urb->transfer_buffer_length,
2904 urb->transfer_buffer_length);
2905}
2906
23e3be11 2907static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
e9df17eb 2908 unsigned int ep_index, unsigned int stream_id, int start_cycle,
e1eab2e0 2909 struct xhci_generic_trb *start_trb)
8a96c052 2910{
8a96c052
SS
2911 /*
2912 * Pass all the TRBs to the hardware at once and make sure this write
2913 * isn't reordered.
2914 */
2915 wmb();
50f7b52a 2916 if (start_cycle)
28ccd296 2917 start_trb->field[3] |= cpu_to_le32(start_cycle);
50f7b52a 2918 else
28ccd296 2919 start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
be88fe4f 2920 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
8a96c052
SS
2921}
2922
78140156
AI
2923static void check_interval(struct xhci_hcd *xhci, struct urb *urb,
2924 struct xhci_ep_ctx *ep_ctx)
624defa1 2925{
624defa1
SS
2926 int xhci_interval;
2927 int ep_interval;
2928
28ccd296 2929 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
624defa1 2930 ep_interval = urb->interval;
78140156 2931
624defa1
SS
2932 /* Convert to microframes */
2933 if (urb->dev->speed == USB_SPEED_LOW ||
2934 urb->dev->speed == USB_SPEED_FULL)
2935 ep_interval *= 8;
78140156 2936
624defa1
SS
2937 /* FIXME change this to a warning and a suggestion to use the new API
2938 * to set the polling interval (once the API is added).
2939 */
2940 if (xhci_interval != ep_interval) {
0730d52a
DK
2941 dev_dbg_ratelimited(&urb->dev->dev,
2942 "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
2943 ep_interval, ep_interval == 1 ? "" : "s",
2944 xhci_interval, xhci_interval == 1 ? "" : "s");
624defa1
SS
2945 urb->interval = xhci_interval;
2946 /* Convert back to frames for LS/FS devices */
2947 if (urb->dev->speed == USB_SPEED_LOW ||
2948 urb->dev->speed == USB_SPEED_FULL)
2949 urb->interval /= 8;
2950 }
78140156
AI
2951}
2952
2953/*
2954 * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt
2955 * endpoint is to be serviced, the xHC will consume (at most) one TD. A TD
2956 * (comprised of sg list entries) can take several service intervals to
2957 * transmit.
2958 */
2959int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2960 struct urb *urb, int slot_id, unsigned int ep_index)
2961{
2962 struct xhci_ep_ctx *ep_ctx;
2963
2964 ep_ctx = xhci_get_ep_ctx(xhci, xhci->devs[slot_id]->out_ctx, ep_index);
2965 check_interval(xhci, urb, ep_ctx);
2966
3fc8206d 2967 return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
624defa1
SS
2968}
2969
4da6e6f2 2970/*
4525c0a1
SS
2971 * For xHCI 1.0 host controllers, TD size is the number of max packet sized
2972 * packets remaining in the TD (*not* including this TRB).
4da6e6f2
SS
2973 *
2974 * Total TD packet count = total_packet_count =
4525c0a1 2975 * DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
4da6e6f2
SS
2976 *
2977 * Packets transferred up to and including this TRB = packets_transferred =
2978 * rounddown(total bytes transferred including this TRB / wMaxPacketSize)
2979 *
2980 * TD size = total_packet_count - packets_transferred
2981 *
c840d6ce
MN
2982 * For xHCI 0.96 and older, TD size field should be the remaining bytes
2983 * including this TRB, right shifted by 10
2984 *
2985 * For all hosts it must fit in bits 21:17, so it can't be bigger than 31.
2986 * This is taken care of in the TRB_TD_SIZE() macro
2987 *
4525c0a1 2988 * The last TRB in a TD must have the TD size set to zero.
4da6e6f2 2989 */
c840d6ce
MN
2990static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
2991 int trb_buff_len, unsigned int td_total_len,
124c3937 2992 struct urb *urb, bool more_trbs_coming)
4da6e6f2 2993{
c840d6ce
MN
2994 u32 maxp, total_packet_count;
2995
0cbd4b34
CY
2996 /* MTK xHCI is mostly 0.97 but contains some features from 1.0 */
2997 if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST))
c840d6ce
MN
2998 return ((td_total_len - transferred) >> 10);
2999
48df4a6f 3000 /* One TRB with a zero-length data packet. */
124c3937 3001 if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) ||
c840d6ce 3002 trb_buff_len == td_total_len)
48df4a6f
SS
3003 return 0;
3004
0cbd4b34
CY
3005 /* for MTK xHCI, TD size doesn't include this TRB */
3006 if (xhci->quirks & XHCI_MTK_HOST)
3007 trb_buff_len = 0;
3008
734d3ddd 3009 maxp = usb_endpoint_maxp(&urb->ep->desc);
0cbd4b34
CY
3010 total_packet_count = DIV_ROUND_UP(td_total_len, maxp);
3011
c840d6ce
MN
3012 /* Queueing functions don't count the current TRB into transferred */
3013 return (total_packet_count - ((transferred + trb_buff_len) / maxp));
4da6e6f2
SS
3014}
3015
f9c589e1 3016
474ed23a 3017static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
f9c589e1 3018 u32 *trb_buff_len, struct xhci_segment *seg)
474ed23a 3019{
f9c589e1 3020 struct device *dev = xhci_to_hcd(xhci)->self.controller;
474ed23a
MN
3021 unsigned int unalign;
3022 unsigned int max_pkt;
f9c589e1 3023 u32 new_buff_len;
474ed23a 3024
734d3ddd 3025 max_pkt = usb_endpoint_maxp(&urb->ep->desc);
474ed23a
MN
3026 unalign = (enqd_len + *trb_buff_len) % max_pkt;
3027
3028 /* we got lucky, last normal TRB data on segment is packet aligned */
3029 if (unalign == 0)
3030 return 0;
3031
f9c589e1
MN
3032 xhci_dbg(xhci, "Unaligned %d bytes, buff len %d\n",
3033 unalign, *trb_buff_len);
3034
474ed23a
MN
3035 /* is the last nornal TRB alignable by splitting it */
3036 if (*trb_buff_len > unalign) {
3037 *trb_buff_len -= unalign;
f9c589e1 3038 xhci_dbg(xhci, "split align, new buff len %d\n", *trb_buff_len);
474ed23a
MN
3039 return 0;
3040 }
f9c589e1
MN
3041
3042 /*
3043 * We want enqd_len + trb_buff_len to sum up to a number aligned to
3044 * number which is divisible by the endpoint's wMaxPacketSize. IOW:
3045 * (size of currently enqueued TRBs + remainder) % wMaxPacketSize == 0.
3046 */
3047 new_buff_len = max_pkt - (enqd_len % max_pkt);
3048
3049 if (new_buff_len > (urb->transfer_buffer_length - enqd_len))
3050 new_buff_len = (urb->transfer_buffer_length - enqd_len);
3051
3052 /* create a max max_pkt sized bounce buffer pointed to by last trb */
3053 if (usb_urb_dir_out(urb)) {
3054 sg_pcopy_to_buffer(urb->sg, urb->num_mapped_sgs,
3055 seg->bounce_buf, new_buff_len, enqd_len);
3056 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
3057 max_pkt, DMA_TO_DEVICE);
3058 } else {
3059 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
3060 max_pkt, DMA_FROM_DEVICE);
3061 }
3062
3063 if (dma_mapping_error(dev, seg->bounce_dma)) {
3064 /* try without aligning. Some host controllers survive */
3065 xhci_warn(xhci, "Failed mapping bounce buffer, not aligning\n");
3066 return 0;
3067 }
3068 *trb_buff_len = new_buff_len;
3069 seg->bounce_len = new_buff_len;
3070 seg->bounce_offs = enqd_len;
3071
3072 xhci_dbg(xhci, "Bounce align, new buff len %d\n", *trb_buff_len);
3073
474ed23a
MN
3074 return 1;
3075}
3076
d2510342
AI
3077/* This is very similar to what ehci-q.c qtd_fill() does */
3078int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
8a96c052
SS
3079 struct urb *urb, int slot_id, unsigned int ep_index)
3080{
5a5a0b1a 3081 struct xhci_ring *ring;
8e51adcc 3082 struct urb_priv *urb_priv;
8a96c052 3083 struct xhci_td *td;
d2510342
AI
3084 struct xhci_generic_trb *start_trb;
3085 struct scatterlist *sg = NULL;
5a83f04a
MN
3086 bool more_trbs_coming = true;
3087 bool need_zero_pkt = false;
86065c27
MN
3088 bool first_trb = true;
3089 unsigned int num_trbs;
d2510342 3090 unsigned int start_cycle, num_sgs = 0;
86065c27 3091 unsigned int enqd_len, block_len, trb_buff_len, full_len;
f9c589e1 3092 int sent_len, ret;
d2510342 3093 u32 field, length_field, remainder;
f9c589e1 3094 u64 addr, send_addr;
8a96c052 3095
5a5a0b1a
MN
3096 ring = xhci_urb_to_transfer_ring(xhci, urb);
3097 if (!ring)
e9df17eb
SS
3098 return -EINVAL;
3099
86065c27 3100 full_len = urb->transfer_buffer_length;
d2510342
AI
3101 /* If we have scatter/gather list, we use it. */
3102 if (urb->num_sgs) {
3103 num_sgs = urb->num_mapped_sgs;
3104 sg = urb->sg;
86065c27
MN
3105 addr = (u64) sg_dma_address(sg);
3106 block_len = sg_dma_len(sg);
d2510342 3107 num_trbs = count_sg_trbs_needed(urb);
86065c27 3108 } else {
d2510342 3109 num_trbs = count_trbs_needed(urb);
86065c27
MN
3110 addr = (u64) urb->transfer_dma;
3111 block_len = full_len;
3112 }
4758dcd1 3113 ret = prepare_transfer(xhci, xhci->devs[slot_id],
e9df17eb 3114 ep_index, urb->stream_id,
3b72fca0 3115 num_trbs, urb, 0, mem_flags);
d2510342 3116 if (unlikely(ret < 0))
4758dcd1 3117 return ret;
8e51adcc
AX
3118
3119 urb_priv = urb->hcpriv;
4758dcd1
RA
3120
3121 /* Deal with URB_ZERO_PACKET - need one more td/trb */
5a83f04a
MN
3122 if (urb->transfer_flags & URB_ZERO_PACKET && urb_priv->length > 1)
3123 need_zero_pkt = true;
4758dcd1 3124
8e51adcc
AX
3125 td = urb_priv->td[0];
3126
8a96c052
SS
3127 /*
3128 * Don't give the first TRB to the hardware (by toggling the cycle bit)
3129 * until we've finished creating all the other TRBs. The ring's cycle
3130 * state may change as we enqueue the other TRBs, so save it too.
3131 */
5a5a0b1a
MN
3132 start_trb = &ring->enqueue->generic;
3133 start_cycle = ring->cycle_state;
f9c589e1 3134 send_addr = addr;
8a96c052 3135
d2510342 3136 /* Queue the TRBs, even if they are zero-length */
0d2daade
AB
3137 for (enqd_len = 0; first_trb || enqd_len < full_len;
3138 enqd_len += trb_buff_len) {
d2510342 3139 field = TRB_TYPE(TRB_NORMAL);
af8b9e63 3140
86065c27
MN
3141 /* TRB buffer should not cross 64KB boundaries */
3142 trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
3143 trb_buff_len = min_t(unsigned int, trb_buff_len, block_len);
8a96c052 3144
86065c27
MN
3145 if (enqd_len + trb_buff_len > full_len)
3146 trb_buff_len = full_len - enqd_len;
b10de142
SS
3147
3148 /* Don't change the cycle bit of the first TRB until later */
86065c27
MN
3149 if (first_trb) {
3150 first_trb = false;
50f7b52a 3151 if (start_cycle == 0)
d2510342 3152 field |= TRB_CYCLE;
50f7b52a 3153 } else
5a5a0b1a 3154 field |= ring->cycle_state;
b10de142
SS
3155
3156 /* Chain all the TRBs together; clear the chain bit in the last
3157 * TRB to indicate it's the last TRB in the chain.
3158 */
86065c27 3159 if (enqd_len + trb_buff_len < full_len) {
b10de142 3160 field |= TRB_CHAIN;
2d98ef40 3161 if (trb_is_link(ring->enqueue + 1)) {
474ed23a 3162 if (xhci_align_td(xhci, urb, enqd_len,
f9c589e1
MN
3163 &trb_buff_len,
3164 ring->enq_seg)) {
3165 send_addr = ring->enq_seg->bounce_dma;
3166 /* assuming TD won't span 2 segs */
3167 td->bounce_seg = ring->enq_seg;
3168 }
474ed23a 3169 }
f9c589e1
MN
3170 }
3171 if (enqd_len + trb_buff_len >= full_len) {
3172 field &= ~TRB_CHAIN;
4758dcd1 3173 field |= TRB_IOC;
124c3937 3174 more_trbs_coming = false;
5a83f04a 3175 td->last_trb = ring->enqueue;
b10de142 3176 }
af8b9e63
SS
3177
3178 /* Only set interrupt on short packet for IN endpoints */
3179 if (usb_urb_dir_in(urb))
3180 field |= TRB_ISP;
3181
4da6e6f2 3182 /* Set the TRB length, TD size, and interrupter fields. */
86065c27
MN
3183 remainder = xhci_td_remainder(xhci, enqd_len, trb_buff_len,
3184 full_len, urb, more_trbs_coming);
3185
f9dc68fe 3186 length_field = TRB_LEN(trb_buff_len) |
c840d6ce 3187 TRB_TD_SIZE(remainder) |
f9dc68fe 3188 TRB_INTR_TARGET(0);
4da6e6f2 3189
124c3937 3190 queue_trb(xhci, ring, more_trbs_coming | need_zero_pkt,
f9c589e1
MN
3191 lower_32_bits(send_addr),
3192 upper_32_bits(send_addr),
f9dc68fe 3193 length_field,
d2510342 3194 field);
b10de142 3195
b10de142 3196 addr += trb_buff_len;
f9c589e1 3197 sent_len = trb_buff_len;
d2510342 3198
f9c589e1 3199 while (sg && sent_len >= block_len) {
86065c27
MN
3200 /* New sg entry */
3201 --num_sgs;
f9c589e1 3202 sent_len -= block_len;
86065c27 3203 if (num_sgs != 0) {
d2510342 3204 sg = sg_next(sg);
86065c27
MN
3205 block_len = sg_dma_len(sg);
3206 addr = (u64) sg_dma_address(sg);
f9c589e1 3207 addr += sent_len;
d2510342
AI
3208 }
3209 }
f9c589e1
MN
3210 block_len -= sent_len;
3211 send_addr = addr;
d2510342 3212 }
b10de142 3213
5a83f04a
MN
3214 if (need_zero_pkt) {
3215 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3216 ep_index, urb->stream_id,
3217 1, urb, 1, mem_flags);
3218 urb_priv->td[1]->last_trb = ring->enqueue;
3219 field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC;
3220 queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field);
3221 }
3222
86065c27 3223 check_trb_math(urb, enqd_len);
e9df17eb 3224 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
e1eab2e0 3225 start_cycle, start_trb);
b10de142
SS
3226 return 0;
3227}
3228
d0e96f5a 3229/* Caller must have locked xhci->lock */
23e3be11 3230int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
d0e96f5a
SS
3231 struct urb *urb, int slot_id, unsigned int ep_index)
3232{
3233 struct xhci_ring *ep_ring;
3234 int num_trbs;
3235 int ret;
3236 struct usb_ctrlrequest *setup;
3237 struct xhci_generic_trb *start_trb;
3238 int start_cycle;
fb79a6da 3239 u32 field;
8e51adcc 3240 struct urb_priv *urb_priv;
d0e96f5a
SS
3241 struct xhci_td *td;
3242
e9df17eb
SS
3243 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3244 if (!ep_ring)
3245 return -EINVAL;
d0e96f5a
SS
3246
3247 /*
3248 * Need to copy setup packet into setup TRB, so we can't use the setup
3249 * DMA address.
3250 */
3251 if (!urb->setup_packet)
3252 return -EINVAL;
3253
d0e96f5a
SS
3254 /* 1 TRB for setup, 1 for status */
3255 num_trbs = 2;
3256 /*
3257 * Don't need to check if we need additional event data and normal TRBs,
3258 * since data in control transfers will never get bigger than 16MB
3259 * XXX: can we get a buffer that crosses 64KB boundaries?
3260 */
3261 if (urb->transfer_buffer_length > 0)
3262 num_trbs++;
e9df17eb
SS
3263 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3264 ep_index, urb->stream_id,
3b72fca0 3265 num_trbs, urb, 0, mem_flags);
d0e96f5a
SS
3266 if (ret < 0)
3267 return ret;
3268
8e51adcc
AX
3269 urb_priv = urb->hcpriv;
3270 td = urb_priv->td[0];
3271
d0e96f5a
SS
3272 /*
3273 * Don't give the first TRB to the hardware (by toggling the cycle bit)
3274 * until we've finished creating all the other TRBs. The ring's cycle
3275 * state may change as we enqueue the other TRBs, so save it too.
3276 */
3277 start_trb = &ep_ring->enqueue->generic;
3278 start_cycle = ep_ring->cycle_state;
3279
3280 /* Queue setup TRB - see section 6.4.1.2.1 */
3281 /* FIXME better way to translate setup_packet into two u32 fields? */
3282 setup = (struct usb_ctrlrequest *) urb->setup_packet;
50f7b52a
AX
3283 field = 0;
3284 field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
3285 if (start_cycle == 0)
3286 field |= 0x1;
b83cdc8f 3287
dca77945 3288 /* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */
0cbd4b34 3289 if ((xhci->hci_version >= 0x100) || (xhci->quirks & XHCI_MTK_HOST)) {
b83cdc8f
AX
3290 if (urb->transfer_buffer_length > 0) {
3291 if (setup->bRequestType & USB_DIR_IN)
3292 field |= TRB_TX_TYPE(TRB_DATA_IN);
3293 else
3294 field |= TRB_TX_TYPE(TRB_DATA_OUT);
3295 }
3296 }
3297
3b72fca0 3298 queue_trb(xhci, ep_ring, true,
28ccd296
ME
3299 setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
3300 le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
3301 TRB_LEN(8) | TRB_INTR_TARGET(0),
3302 /* Immediate data in pointer */
3303 field);
d0e96f5a
SS
3304
3305 /* If there's data, queue data TRBs */
af8b9e63
SS
3306 /* Only set interrupt on short packet for IN endpoints */
3307 if (usb_urb_dir_in(urb))
3308 field = TRB_ISP | TRB_TYPE(TRB_DATA);
3309 else
3310 field = TRB_TYPE(TRB_DATA);
3311
d0e96f5a 3312 if (urb->transfer_buffer_length > 0) {
fb79a6da
LB
3313 u32 length_field, remainder;
3314
3315 remainder = xhci_td_remainder(xhci, 0,
3316 urb->transfer_buffer_length,
3317 urb->transfer_buffer_length,
3318 urb, 1);
3319 length_field = TRB_LEN(urb->transfer_buffer_length) |
3320 TRB_TD_SIZE(remainder) |
3321 TRB_INTR_TARGET(0);
d0e96f5a
SS
3322 if (setup->bRequestType & USB_DIR_IN)
3323 field |= TRB_DIR_IN;
3b72fca0 3324 queue_trb(xhci, ep_ring, true,
d0e96f5a
SS
3325 lower_32_bits(urb->transfer_dma),
3326 upper_32_bits(urb->transfer_dma),
f9dc68fe 3327 length_field,
af8b9e63 3328 field | ep_ring->cycle_state);
d0e96f5a
SS
3329 }
3330
3331 /* Save the DMA address of the last TRB in the TD */
3332 td->last_trb = ep_ring->enqueue;
3333
3334 /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
3335 /* If the device sent data, the status stage is an OUT transfer */
3336 if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
3337 field = 0;
3338 else
3339 field = TRB_DIR_IN;
3b72fca0 3340 queue_trb(xhci, ep_ring, false,
d0e96f5a
SS
3341 0,
3342 0,
3343 TRB_INTR_TARGET(0),
3344 /* Event on completion */
3345 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
3346
e9df17eb 3347 giveback_first_trb(xhci, slot_id, ep_index, 0,
e1eab2e0 3348 start_cycle, start_trb);
d0e96f5a
SS
3349 return 0;
3350}
3351
5cd43e33
SS
3352/*
3353 * The transfer burst count field of the isochronous TRB defines the number of
3354 * bursts that are required to move all packets in this TD. Only SuperSpeed
3355 * devices can burst up to bMaxBurst number of packets per service interval.
3356 * This field is zero based, meaning a value of zero in the field means one
3357 * burst. Basically, for everything but SuperSpeed devices, this field will be
3358 * zero. Only xHCI 1.0 host controllers support this field.
3359 */
3360static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
5cd43e33
SS
3361 struct urb *urb, unsigned int total_packet_count)
3362{
3363 unsigned int max_burst;
3364
09c352ed 3365 if (xhci->hci_version < 0x100 || urb->dev->speed < USB_SPEED_SUPER)
5cd43e33
SS
3366 return 0;
3367
3368 max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3213b151 3369 return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
5cd43e33
SS
3370}
3371
b61d378f
SS
3372/*
3373 * Returns the number of packets in the last "burst" of packets. This field is
3374 * valid for all speeds of devices. USB 2.0 devices can only do one "burst", so
3375 * the last burst packet count is equal to the total number of packets in the
3376 * TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst
3377 * must contain (bMaxBurst + 1) number of packets, but the last burst can
3378 * contain 1 to (bMaxBurst + 1) packets.
3379 */
3380static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
b61d378f
SS
3381 struct urb *urb, unsigned int total_packet_count)
3382{
3383 unsigned int max_burst;
3384 unsigned int residue;
3385
3386 if (xhci->hci_version < 0x100)
3387 return 0;
3388
09c352ed 3389 if (urb->dev->speed >= USB_SPEED_SUPER) {
b61d378f
SS
3390 /* bMaxBurst is zero based: 0 means 1 packet per burst */
3391 max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3392 residue = total_packet_count % (max_burst + 1);
3393 /* If residue is zero, the last burst contains (max_burst + 1)
3394 * number of packets, but the TLBPC field is zero-based.
3395 */
3396 if (residue == 0)
3397 return max_burst;
3398 return residue - 1;
b61d378f 3399 }
09c352ed
MN
3400 if (total_packet_count == 0)
3401 return 0;
3402 return total_packet_count - 1;
b61d378f
SS
3403}
3404
79b8094f
LB
3405/*
3406 * Calculates Frame ID field of the isochronous TRB identifies the
3407 * target frame that the Interval associated with this Isochronous
3408 * Transfer Descriptor will start on. Refer to 4.11.2.5 in 1.1 spec.
3409 *
3410 * Returns actual frame id on success, negative value on error.
3411 */
3412static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci,
3413 struct urb *urb, int index)
3414{
3415 int start_frame, ist, ret = 0;
3416 int start_frame_id, end_frame_id, current_frame_id;
3417
3418 if (urb->dev->speed == USB_SPEED_LOW ||
3419 urb->dev->speed == USB_SPEED_FULL)
3420 start_frame = urb->start_frame + index * urb->interval;
3421 else
3422 start_frame = (urb->start_frame + index * urb->interval) >> 3;
3423
3424 /* Isochronous Scheduling Threshold (IST, bits 0~3 in HCSPARAMS2):
3425 *
3426 * If bit [3] of IST is cleared to '0', software can add a TRB no
3427 * later than IST[2:0] Microframes before that TRB is scheduled to
3428 * be executed.
3429 * If bit [3] of IST is set to '1', software can add a TRB no later
3430 * than IST[2:0] Frames before that TRB is scheduled to be executed.
3431 */
3432 ist = HCS_IST(xhci->hcs_params2) & 0x7;
3433 if (HCS_IST(xhci->hcs_params2) & (1 << 3))
3434 ist <<= 3;
3435
3436 /* Software shall not schedule an Isoch TD with a Frame ID value that
3437 * is less than the Start Frame ID or greater than the End Frame ID,
3438 * where:
3439 *
3440 * End Frame ID = (Current MFINDEX register value + 895 ms.) MOD 2048
3441 * Start Frame ID = (Current MFINDEX register value + IST + 1) MOD 2048
3442 *
3443 * Both the End Frame ID and Start Frame ID values are calculated
3444 * in microframes. When software determines the valid Frame ID value;
3445 * The End Frame ID value should be rounded down to the nearest Frame
3446 * boundary, and the Start Frame ID value should be rounded up to the
3447 * nearest Frame boundary.
3448 */
3449 current_frame_id = readl(&xhci->run_regs->microframe_index);
3450 start_frame_id = roundup(current_frame_id + ist + 1, 8);
3451 end_frame_id = rounddown(current_frame_id + 895 * 8, 8);
3452
3453 start_frame &= 0x7ff;
3454 start_frame_id = (start_frame_id >> 3) & 0x7ff;
3455 end_frame_id = (end_frame_id >> 3) & 0x7ff;
3456
3457 xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n",
3458 __func__, index, readl(&xhci->run_regs->microframe_index),
3459 start_frame_id, end_frame_id, start_frame);
3460
3461 if (start_frame_id < end_frame_id) {
3462 if (start_frame > end_frame_id ||
3463 start_frame < start_frame_id)
3464 ret = -EINVAL;
3465 } else if (start_frame_id > end_frame_id) {
3466 if ((start_frame > end_frame_id &&
3467 start_frame < start_frame_id))
3468 ret = -EINVAL;
3469 } else {
3470 ret = -EINVAL;
3471 }
3472
3473 if (index == 0) {
3474 if (ret == -EINVAL || start_frame == start_frame_id) {
3475 start_frame = start_frame_id + 1;
3476 if (urb->dev->speed == USB_SPEED_LOW ||
3477 urb->dev->speed == USB_SPEED_FULL)
3478 urb->start_frame = start_frame;
3479 else
3480 urb->start_frame = start_frame << 3;
3481 ret = 0;
3482 }
3483 }
3484
3485 if (ret) {
3486 xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n",
3487 start_frame, current_frame_id, index,
3488 start_frame_id, end_frame_id);
3489 xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n");
3490 return ret;
3491 }
3492
3493 return start_frame;
3494}
3495
04e51901
AX
3496/* This is for isoc transfer */
3497static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3498 struct urb *urb, int slot_id, unsigned int ep_index)
3499{
3500 struct xhci_ring *ep_ring;
3501 struct urb_priv *urb_priv;
3502 struct xhci_td *td;
3503 int num_tds, trbs_per_td;
3504 struct xhci_generic_trb *start_trb;
3505 bool first_trb;
3506 int start_cycle;
3507 u32 field, length_field;
3508 int running_total, trb_buff_len, td_len, td_remain_len, ret;
3509 u64 start_addr, addr;
3510 int i, j;
47cbf692 3511 bool more_trbs_coming;
79b8094f 3512 struct xhci_virt_ep *xep;
09c352ed 3513 int frame_id;
04e51901 3514
79b8094f 3515 xep = &xhci->devs[slot_id]->eps[ep_index];
04e51901
AX
3516 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
3517
3518 num_tds = urb->number_of_packets;
3519 if (num_tds < 1) {
3520 xhci_dbg(xhci, "Isoc URB with zero packets?\n");
3521 return -EINVAL;
3522 }
04e51901
AX
3523 start_addr = (u64) urb->transfer_dma;
3524 start_trb = &ep_ring->enqueue->generic;
3525 start_cycle = ep_ring->cycle_state;
3526
522989a2 3527 urb_priv = urb->hcpriv;
09c352ed 3528 /* Queue the TRBs for each TD, even if they are zero-length */
04e51901 3529 for (i = 0; i < num_tds; i++) {
09c352ed
MN
3530 unsigned int total_pkt_count, max_pkt;
3531 unsigned int burst_count, last_burst_pkt_count;
3532 u32 sia_frame_id;
04e51901 3533
4da6e6f2 3534 first_trb = true;
04e51901
AX
3535 running_total = 0;
3536 addr = start_addr + urb->iso_frame_desc[i].offset;
3537 td_len = urb->iso_frame_desc[i].length;
3538 td_remain_len = td_len;
734d3ddd 3539 max_pkt = usb_endpoint_maxp(&urb->ep->desc);
09c352ed
MN
3540 total_pkt_count = DIV_ROUND_UP(td_len, max_pkt);
3541
48df4a6f 3542 /* A zero-length transfer still involves at least one packet. */
09c352ed
MN
3543 if (total_pkt_count == 0)
3544 total_pkt_count++;
3545 burst_count = xhci_get_burst_count(xhci, urb, total_pkt_count);
3546 last_burst_pkt_count = xhci_get_last_burst_packet_count(xhci,
3547 urb, total_pkt_count);
04e51901 3548
d2510342 3549 trbs_per_td = count_isoc_trbs_needed(urb, i);
04e51901
AX
3550
3551 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
3b72fca0 3552 urb->stream_id, trbs_per_td, urb, i, mem_flags);
522989a2
SS
3553 if (ret < 0) {
3554 if (i == 0)
3555 return ret;
3556 goto cleanup;
3557 }
04e51901 3558 td = urb_priv->td[i];
09c352ed
MN
3559
3560 /* use SIA as default, if frame id is used overwrite it */
3561 sia_frame_id = TRB_SIA;
3562 if (!(urb->transfer_flags & URB_ISO_ASAP) &&
3563 HCC_CFC(xhci->hcc_params)) {
3564 frame_id = xhci_get_isoc_frame_id(xhci, urb, i);
3565 if (frame_id >= 0)
3566 sia_frame_id = TRB_FRAME_ID(frame_id);
3567 }
3568 /*
3569 * Set isoc specific data for the first TRB in a TD.
3570 * Prevent HW from getting the TRBs by keeping the cycle state
3571 * inverted in the first TDs isoc TRB.
3572 */
2f6d3b65 3573 field = TRB_TYPE(TRB_ISOC) |
09c352ed
MN
3574 TRB_TLBPC(last_burst_pkt_count) |
3575 sia_frame_id |
3576 (i ? ep_ring->cycle_state : !start_cycle);
3577
2f6d3b65
MN
3578 /* xhci 1.1 with ETE uses TD_Size field for TBC, old is Rsvdz */
3579 if (!xep->use_extended_tbc)
3580 field |= TRB_TBC(burst_count);
3581
09c352ed 3582 /* fill the rest of the TRB fields, and remaining normal TRBs */
04e51901
AX
3583 for (j = 0; j < trbs_per_td; j++) {
3584 u32 remainder = 0;
09c352ed
MN
3585
3586 /* only first TRB is isoc, overwrite otherwise */
3587 if (!first_trb)
3588 field = TRB_TYPE(TRB_NORMAL) |
3589 ep_ring->cycle_state;
04e51901 3590
af8b9e63
SS
3591 /* Only set interrupt on short packet for IN EPs */
3592 if (usb_urb_dir_in(urb))
3593 field |= TRB_ISP;
3594
09c352ed 3595 /* Set the chain bit for all except the last TRB */
04e51901 3596 if (j < trbs_per_td - 1) {
47cbf692 3597 more_trbs_coming = true;
09c352ed 3598 field |= TRB_CHAIN;
04e51901 3599 } else {
09c352ed 3600 more_trbs_coming = false;
04e51901
AX
3601 td->last_trb = ep_ring->enqueue;
3602 field |= TRB_IOC;
09c352ed
MN
3603 /* set BEI, except for the last TD */
3604 if (xhci->hci_version >= 0x100 &&
3605 !(xhci->quirks & XHCI_AVOID_BEI) &&
3606 i < num_tds - 1)
3607 field |= TRB_BEI;
04e51901 3608 }
04e51901 3609 /* Calculate TRB length */
d2510342 3610 trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
04e51901
AX
3611 if (trb_buff_len > td_remain_len)
3612 trb_buff_len = td_remain_len;
3613
4da6e6f2 3614 /* Set the TRB length, TD size, & interrupter fields. */
c840d6ce
MN
3615 remainder = xhci_td_remainder(xhci, running_total,
3616 trb_buff_len, td_len,
124c3937 3617 urb, more_trbs_coming);
c840d6ce 3618
04e51901 3619 length_field = TRB_LEN(trb_buff_len) |
04e51901 3620 TRB_INTR_TARGET(0);
4da6e6f2 3621
2f6d3b65
MN
3622 /* xhci 1.1 with ETE uses TD Size field for TBC */
3623 if (first_trb && xep->use_extended_tbc)
3624 length_field |= TRB_TD_SIZE_TBC(burst_count);
3625 else
3626 length_field |= TRB_TD_SIZE(remainder);
3627 first_trb = false;
3628
3b72fca0 3629 queue_trb(xhci, ep_ring, more_trbs_coming,
04e51901
AX
3630 lower_32_bits(addr),
3631 upper_32_bits(addr),
3632 length_field,
af8b9e63 3633 field);
04e51901
AX
3634 running_total += trb_buff_len;
3635
3636 addr += trb_buff_len;
3637 td_remain_len -= trb_buff_len;
3638 }
3639
3640 /* Check TD length */
3641 if (running_total != td_len) {
3642 xhci_err(xhci, "ISOC TD length unmatch\n");
cf840551
AX
3643 ret = -EINVAL;
3644 goto cleanup;
04e51901
AX
3645 }
3646 }
3647
79b8094f
LB
3648 /* store the next frame id */
3649 if (HCC_CFC(xhci->hcc_params))
3650 xep->next_frame_id = urb->start_frame + num_tds * urb->interval;
3651
c41136b0
AX
3652 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
3653 if (xhci->quirks & XHCI_AMD_PLL_FIX)
3654 usb_amd_quirk_pll_disable();
3655 }
3656 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
3657
e1eab2e0
AX
3658 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3659 start_cycle, start_trb);
04e51901 3660 return 0;
522989a2
SS
3661cleanup:
3662 /* Clean up a partially enqueued isoc transfer. */
3663
3664 for (i--; i >= 0; i--)
585df1d9 3665 list_del_init(&urb_priv->td[i]->td_list);
522989a2
SS
3666
3667 /* Use the first TD as a temporary variable to turn the TDs we've queued
3668 * into No-ops with a software-owned cycle bit. That way the hardware
3669 * won't accidentally start executing bogus TDs when we partially
3670 * overwrite them. td->first_trb and td->start_seg are already set.
3671 */
3672 urb_priv->td[0]->last_trb = ep_ring->enqueue;
3673 /* Every TRB except the first & last will have its cycle bit flipped. */
3674 td_to_noop(xhci, ep_ring, urb_priv->td[0], true);
3675
3676 /* Reset the ring enqueue back to the first TRB and its cycle bit. */
3677 ep_ring->enqueue = urb_priv->td[0]->first_trb;
3678 ep_ring->enq_seg = urb_priv->td[0]->start_seg;
3679 ep_ring->cycle_state = start_cycle;
b008df60 3680 ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp;
522989a2
SS
3681 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
3682 return ret;
04e51901
AX
3683}
3684
3685/*
3686 * Check transfer ring to guarantee there is enough room for the urb.
3687 * Update ISO URB start_frame and interval.
79b8094f
LB
3688 * Update interval as xhci_queue_intr_tx does. Use xhci frame_index to
3689 * update urb->start_frame if URB_ISO_ASAP is set in transfer_flags or
3690 * Contiguous Frame ID is not supported by HC.
04e51901
AX
3691 */
3692int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
3693 struct urb *urb, int slot_id, unsigned int ep_index)
3694{
3695 struct xhci_virt_device *xdev;
3696 struct xhci_ring *ep_ring;
3697 struct xhci_ep_ctx *ep_ctx;
3698 int start_frame;
04e51901
AX
3699 int num_tds, num_trbs, i;
3700 int ret;
79b8094f
LB
3701 struct xhci_virt_ep *xep;
3702 int ist;
04e51901
AX
3703
3704 xdev = xhci->devs[slot_id];
79b8094f 3705 xep = &xhci->devs[slot_id]->eps[ep_index];
04e51901
AX
3706 ep_ring = xdev->eps[ep_index].ring;
3707 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
3708
3709 num_trbs = 0;
3710 num_tds = urb->number_of_packets;
3711 for (i = 0; i < num_tds; i++)
d2510342 3712 num_trbs += count_isoc_trbs_needed(urb, i);
04e51901
AX
3713
3714 /* Check the ring to guarantee there is enough room for the whole urb.
3715 * Do not insert any td of the urb to the ring if the check failed.
3716 */
5071e6b2 3717 ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx),
3b72fca0 3718 num_trbs, mem_flags);
04e51901
AX
3719 if (ret)
3720 return ret;
3721
79b8094f
LB
3722 /*
3723 * Check interval value. This should be done before we start to
3724 * calculate the start frame value.
3725 */
78140156 3726 check_interval(xhci, urb, ep_ctx);
79b8094f
LB
3727
3728 /* Calculate the start frame and put it in urb->start_frame. */
42df7215 3729 if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) {
5071e6b2 3730 if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_RUNNING) {
42df7215
LB
3731 urb->start_frame = xep->next_frame_id;
3732 goto skip_start_over;
3733 }
79b8094f
LB
3734 }
3735
3736 start_frame = readl(&xhci->run_regs->microframe_index);
3737 start_frame &= 0x3fff;
3738 /*
3739 * Round up to the next frame and consider the time before trb really
3740 * gets scheduled by hardare.
3741 */
3742 ist = HCS_IST(xhci->hcs_params2) & 0x7;
3743 if (HCS_IST(xhci->hcs_params2) & (1 << 3))
3744 ist <<= 3;
3745 start_frame += ist + XHCI_CFC_DELAY;
3746 start_frame = roundup(start_frame, 8);
3747
3748 /*
3749 * Round up to the next ESIT (Endpoint Service Interval Time) if ESIT
3750 * is greate than 8 microframes.
3751 */
3752 if (urb->dev->speed == USB_SPEED_LOW ||
3753 urb->dev->speed == USB_SPEED_FULL) {
3754 start_frame = roundup(start_frame, urb->interval << 3);
3755 urb->start_frame = start_frame >> 3;
3756 } else {
3757 start_frame = roundup(start_frame, urb->interval);
3758 urb->start_frame = start_frame;
3759 }
3760
3761skip_start_over:
b008df60
AX
3762 ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;
3763
3fc8206d 3764 return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
04e51901
AX
3765}
3766
d0e96f5a
SS
3767/**** Command Ring Operations ****/
3768
913a8a34
SS
3769/* Generic function for queueing a command TRB on the command ring.
3770 * Check to make sure there's room on the command ring for one command TRB.
3771 * Also check that there's room reserved for commands that must not fail.
3772 * If this is a command that must not fail, meaning command_must_succeed = TRUE,
3773 * then only check for the number of reserved spots.
3774 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
3775 * because the command event handler may want to resubmit a failed command.
3776 */
ddba5cd0
MN
3777static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
3778 u32 field1, u32 field2,
3779 u32 field3, u32 field4, bool command_must_succeed)
7f84eef0 3780{
913a8a34 3781 int reserved_trbs = xhci->cmd_ring_reserved_trbs;
d1dc908a 3782 int ret;
ad6b1d91 3783
98d74f9c
MN
3784 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
3785 (xhci->xhc_state & XHCI_STATE_HALTED)) {
ad6b1d91 3786 xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
c9aa1a2d 3787 return -ESHUTDOWN;
ad6b1d91 3788 }
d1dc908a 3789
913a8a34
SS
3790 if (!command_must_succeed)
3791 reserved_trbs++;
3792
d1dc908a 3793 ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
3b72fca0 3794 reserved_trbs, GFP_ATOMIC);
d1dc908a
SS
3795 if (ret < 0) {
3796 xhci_err(xhci, "ERR: No room for command on command ring\n");
913a8a34
SS
3797 if (command_must_succeed)
3798 xhci_err(xhci, "ERR: Reserved TRB counting for "
3799 "unfailable commands failed.\n");
d1dc908a 3800 return ret;
7f84eef0 3801 }
c9aa1a2d
MN
3802
3803 cmd->command_trb = xhci->cmd_ring->enqueue;
ddba5cd0 3804
c311e391 3805 /* if there are no other commands queued we start the timeout timer */
daa47f21 3806 if (list_empty(&xhci->cmd_list)) {
c311e391 3807 xhci->current_cmd = cmd;
cb4d5ce5 3808 xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
c311e391
MN
3809 }
3810
daa47f21
LB
3811 list_add_tail(&cmd->cmd_list, &xhci->cmd_list);
3812
3b72fca0
AX
3813 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
3814 field4 | xhci->cmd_ring->cycle_state);
7f84eef0
SS
3815 return 0;
3816}
3817
3ffbba95 3818/* Queue a slot enable or disable request on the command ring */
ddba5cd0
MN
3819int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd,
3820 u32 trb_type, u32 slot_id)
3ffbba95 3821{
ddba5cd0 3822 return queue_command(xhci, cmd, 0, 0, 0,
913a8a34 3823 TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
3ffbba95
SS
3824}
3825
3826/* Queue an address device command TRB */
ddba5cd0
MN
3827int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
3828 dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup)
3ffbba95 3829{
ddba5cd0 3830 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
8e595a5d 3831 upper_32_bits(in_ctx_ptr), 0,
48fc7dbd
DW
3832 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)
3833 | (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false);
2a8f82c4
SS
3834}
3835
ddba5cd0 3836int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
0238634d
SS
3837 u32 field1, u32 field2, u32 field3, u32 field4)
3838{
ddba5cd0 3839 return queue_command(xhci, cmd, field1, field2, field3, field4, false);
0238634d
SS
3840}
3841
2a8f82c4 3842/* Queue a reset device command TRB */
ddba5cd0
MN
3843int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
3844 u32 slot_id)
2a8f82c4 3845{
ddba5cd0 3846 return queue_command(xhci, cmd, 0, 0, 0,
2a8f82c4 3847 TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
913a8a34 3848 false);
3ffbba95 3849}
f94e0186
SS
3850
3851/* Queue a configure endpoint command TRB */
ddba5cd0
MN
3852int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
3853 struct xhci_command *cmd, dma_addr_t in_ctx_ptr,
913a8a34 3854 u32 slot_id, bool command_must_succeed)
f94e0186 3855{
ddba5cd0 3856 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
8e595a5d 3857 upper_32_bits(in_ctx_ptr), 0,
913a8a34
SS
3858 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
3859 command_must_succeed);
f94e0186 3860}
ae636747 3861
f2217e8e 3862/* Queue an evaluate context command TRB */
ddba5cd0
MN
3863int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
3864 dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed)
f2217e8e 3865{
ddba5cd0 3866 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
f2217e8e 3867 upper_32_bits(in_ctx_ptr), 0,
913a8a34 3868 TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
4b266541 3869 command_must_succeed);
f2217e8e
SS
3870}
3871
be88fe4f
AX
3872/*
3873 * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
3874 * activity on an endpoint that is about to be suspended.
3875 */
ddba5cd0
MN
3876int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
3877 int slot_id, unsigned int ep_index, int suspend)
ae636747
SS
3878{
3879 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
3880 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
3881 u32 type = TRB_TYPE(TRB_STOP_RING);
be88fe4f 3882 u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
ae636747 3883
ddba5cd0 3884 return queue_command(xhci, cmd, 0, 0, 0,
be88fe4f 3885 trb_slot_id | trb_ep_index | type | trb_suspend, false);
ae636747
SS
3886}
3887
d3a43e66
HG
3888/* Set Transfer Ring Dequeue Pointer command */
3889void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
3890 unsigned int slot_id, unsigned int ep_index,
3891 unsigned int stream_id,
3892 struct xhci_dequeue_state *deq_state)
ae636747
SS
3893{
3894 dma_addr_t addr;
3895 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
3896 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
e9df17eb 3897 u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
95241dbd 3898 u32 trb_sct = 0;
ae636747 3899 u32 type = TRB_TYPE(TRB_SET_DEQ);
bf161e85 3900 struct xhci_virt_ep *ep;
1e3452e3
HG
3901 struct xhci_command *cmd;
3902 int ret;
ae636747 3903
d3a43e66
HG
3904 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
3905 "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), new deq ptr = %p (0x%llx dma), new cycle = %u",
3906 deq_state->new_deq_seg,
3907 (unsigned long long)deq_state->new_deq_seg->dma,
3908 deq_state->new_deq_ptr,
3909 (unsigned long long)xhci_trb_virt_to_dma(
3910 deq_state->new_deq_seg, deq_state->new_deq_ptr),
3911 deq_state->new_cycle_state);
3912
3913 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
3914 deq_state->new_deq_ptr);
c92bcfa7 3915 if (addr == 0) {
ae636747 3916 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
700e2052 3917 xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
d3a43e66
HG
3918 deq_state->new_deq_seg, deq_state->new_deq_ptr);
3919 return;
c92bcfa7 3920 }
bf161e85
SS
3921 ep = &xhci->devs[slot_id]->eps[ep_index];
3922 if ((ep->ep_state & SET_DEQ_PENDING)) {
3923 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
3924 xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
d3a43e66 3925 return;
bf161e85 3926 }
1e3452e3
HG
3927
3928 /* This function gets called from contexts where it cannot sleep */
3929 cmd = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
3930 if (!cmd) {
3931 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr: ENOMEM\n");
d3a43e66 3932 return;
1e3452e3
HG
3933 }
3934
d3a43e66
HG
3935 ep->queued_deq_seg = deq_state->new_deq_seg;
3936 ep->queued_deq_ptr = deq_state->new_deq_ptr;
95241dbd
HG
3937 if (stream_id)
3938 trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
1e3452e3 3939 ret = queue_command(xhci, cmd,
d3a43e66
HG
3940 lower_32_bits(addr) | trb_sct | deq_state->new_cycle_state,
3941 upper_32_bits(addr), trb_stream_id,
3942 trb_slot_id | trb_ep_index | type, false);
1e3452e3
HG
3943 if (ret < 0) {
3944 xhci_free_command(xhci, cmd);
d3a43e66 3945 return;
1e3452e3
HG
3946 }
3947
d3a43e66
HG
3948 /* Stop the TD queueing code from ringing the doorbell until
3949 * this command completes. The HC won't set the dequeue pointer
3950 * if the ring is running, and ringing the doorbell starts the
3951 * ring running.
3952 */
3953 ep->ep_state |= SET_DEQ_PENDING;
ae636747 3954}
a1587d97 3955
ddba5cd0
MN
3956int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
3957 int slot_id, unsigned int ep_index)
a1587d97
SS
3958{
3959 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
3960 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
3961 u32 type = TRB_TYPE(TRB_RESET_EP);
3962
ddba5cd0
MN
3963 return queue_command(xhci, cmd, 0, 0, 0,
3964 trb_slot_id | trb_ep_index | type, false);
a1587d97 3965}