2 * xHCI host controller driver
4 * Copyright (C) 2008 Intel Corp.
7 * Some code borrowed from the Linux EHCI driver.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/irq.h>
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
29 #define DRIVER_AUTHOR "Sarah Sharp"
30 #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
32 /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
33 static int link_quirk
;
34 module_param(link_quirk
, int, S_IRUGO
| S_IWUSR
);
35 MODULE_PARM_DESC(link_quirk
, "Don't clear the chain bit on a link TRB");
37 /* TODO: copied from ehci-hcd.c - can this be refactored? */
39 * handshake - spin reading hc until handshake completes or fails
40 * @ptr: address of hc register to be read
41 * @mask: bits to look at in result of read
42 * @done: value of those bits when handshake succeeds
43 * @usec: timeout in microseconds
45 * Returns negative errno, or zero on success
47 * Success happens when the "mask" bits have the specified value (hardware
48 * handshake done). There are two failure modes: "usec" have passed (major
49 * hardware flakeout), or the register reads as all-ones (hardware removed).
51 static int handshake(struct xhci_hcd
*xhci
, void __iomem
*ptr
,
52 u32 mask
, u32 done
, int usec
)
57 result
= xhci_readl(xhci
, ptr
);
58 if (result
== ~(u32
)0) /* card removed */
70 * Disable interrupts and begin the xHCI halting process.
72 void xhci_quiesce(struct xhci_hcd
*xhci
)
79 halted
= xhci_readl(xhci
, &xhci
->op_regs
->status
) & STS_HALT
;
83 cmd
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
85 xhci_writel(xhci
, cmd
, &xhci
->op_regs
->command
);
89 * Force HC into halt state.
91 * Disable any IRQs and clear the run/stop bit.
92 * HC will complete any current and actively pipelined transactions, and
93 * should halt within 16 microframes of the run/stop bit being cleared.
94 * Read HC Halted bit in the status register to see when the HC is finished.
95 * XXX: shouldn't we set HC_STATE_HALT here somewhere?
97 int xhci_halt(struct xhci_hcd
*xhci
)
99 xhci_dbg(xhci
, "// Halt the HC\n");
102 return handshake(xhci
, &xhci
->op_regs
->status
,
103 STS_HALT
, STS_HALT
, XHCI_MAX_HALT_USEC
);
107 * Reset a halted HC, and set the internal HC state to HC_STATE_HALT.
109 * This resets pipelines, timers, counters, state machines, etc.
110 * Transactions will be terminated immediately, and operational registers
111 * will be set to their defaults.
113 int xhci_reset(struct xhci_hcd
*xhci
)
118 state
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
119 if ((state
& STS_HALT
) == 0) {
120 xhci_warn(xhci
, "Host controller not halted, aborting reset.\n");
124 xhci_dbg(xhci
, "// Reset the HC\n");
125 command
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
126 command
|= CMD_RESET
;
127 xhci_writel(xhci
, command
, &xhci
->op_regs
->command
);
128 /* XXX: Why does EHCI set this here? Shouldn't other code do this? */
129 xhci_to_hcd(xhci
)->state
= HC_STATE_HALT
;
131 return handshake(xhci
, &xhci
->op_regs
->command
, CMD_RESET
, 0, 250 * 1000);
136 /* Set up MSI-X table for entry 0 (may claim other entries later) */
137 static int xhci_setup_msix(struct xhci_hcd
*xhci
)
140 struct pci_dev
*pdev
= to_pci_dev(xhci_to_hcd(xhci
)->self
.controller
);
142 xhci
->msix_count
= 0;
143 /* XXX: did I do this right? ixgbe does kcalloc for more than one */
144 xhci
->msix_entries
= kmalloc(sizeof(struct msix_entry
), GFP_KERNEL
);
145 if (!xhci
->msix_entries
) {
146 xhci_err(xhci
, "Failed to allocate MSI-X entries\n");
149 xhci
->msix_entries
[0].entry
= 0;
151 ret
= pci_enable_msix(pdev
, xhci
->msix_entries
, xhci
->msix_count
);
153 xhci_err(xhci
, "Failed to enable MSI-X\n");
158 * Pass the xhci pointer value as the request_irq "cookie".
159 * If more irqs are added, this will need to be unique for each one.
161 ret
= request_irq(xhci
->msix_entries
[0].vector
, &xhci_irq
, 0,
162 "xHCI", xhci_to_hcd(xhci
));
164 xhci_err(xhci
, "Failed to allocate MSI-X interrupt\n");
167 xhci_dbg(xhci
, "Finished setting up MSI-X\n");
171 pci_disable_msix(pdev
);
173 kfree(xhci
->msix_entries
);
174 xhci
->msix_entries
= NULL
;
178 /* XXX: code duplication; can xhci_setup_msix call this? */
179 /* Free any IRQs and disable MSI-X */
180 static void xhci_cleanup_msix(struct xhci_hcd
*xhci
)
182 struct pci_dev
*pdev
= to_pci_dev(xhci_to_hcd(xhci
)->self
.controller
);
183 if (!xhci
->msix_entries
)
186 free_irq(xhci
->msix_entries
[0].vector
, xhci
);
187 pci_disable_msix(pdev
);
188 kfree(xhci
->msix_entries
);
189 xhci
->msix_entries
= NULL
;
190 xhci_dbg(xhci
, "Finished cleaning up MSI-X\n");
195 * Initialize memory for HCD and xHC (one-time init).
197 * Program the PAGESIZE register, initialize the device context array, create
198 * device contexts (?), set up a command ring segment (or two?), create event
199 * ring (one for now).
201 int xhci_init(struct usb_hcd
*hcd
)
203 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
206 xhci_dbg(xhci
, "xhci_init\n");
207 spin_lock_init(&xhci
->lock
);
209 xhci_dbg(xhci
, "QUIRK: Not clearing Link TRB chain bits.\n");
210 xhci
->quirks
|= XHCI_LINK_TRB_QUIRK
;
212 xhci_dbg(xhci
, "xHCI doesn't need link TRB QUIRK\n");
214 retval
= xhci_mem_init(xhci
, GFP_KERNEL
);
215 xhci_dbg(xhci
, "Finished xhci_init\n");
221 * Called in interrupt context when there might be work
222 * queued on the event ring
224 * xhci->lock must be held by caller.
226 static void xhci_work(struct xhci_hcd
*xhci
)
232 * Clear the op reg interrupt status first,
233 * so we can receive interrupts from other MSI-X interrupters.
234 * Write 1 to clear the interrupt status.
236 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
238 xhci_writel(xhci
, temp
, &xhci
->op_regs
->status
);
239 /* FIXME when MSI-X is supported and there are multiple vectors */
240 /* Clear the MSI-X event interrupt status */
242 /* Acknowledge the interrupt */
243 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
245 xhci_writel(xhci
, temp
, &xhci
->ir_set
->irq_pending
);
246 /* Flush posted writes */
247 xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
249 if (xhci
->xhc_state
& XHCI_STATE_DYING
)
250 xhci_dbg(xhci
, "xHCI dying, ignoring interrupt. "
251 "Shouldn't IRQs be disabled?\n");
253 /* FIXME this should be a delayed service routine
254 * that clears the EHB.
256 xhci_handle_event(xhci
);
258 /* Clear the event handler busy flag (RW1C); the event ring should be empty. */
259 temp_64
= xhci_read_64(xhci
, &xhci
->ir_set
->erst_dequeue
);
260 xhci_write_64(xhci
, temp_64
| ERST_EHB
, &xhci
->ir_set
->erst_dequeue
);
261 /* Flush posted writes -- FIXME is this necessary? */
262 xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
265 /*-------------------------------------------------------------------------*/
268 * xHCI spec says we can get an interrupt, and if the HC has an error condition,
269 * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
270 * indicators of an event TRB error, but we check the status *first* to be safe.
272 irqreturn_t
xhci_irq(struct usb_hcd
*hcd
)
274 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
278 spin_lock(&xhci
->lock
);
279 trb
= xhci
->event_ring
->dequeue
;
280 /* Check if the xHC generated the interrupt, or the irq is shared */
281 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
282 temp2
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
283 if (temp
== 0xffffffff && temp2
== 0xffffffff)
286 if (!(temp
& STS_EINT
) && !ER_IRQ_PENDING(temp2
)) {
287 spin_unlock(&xhci
->lock
);
290 xhci_dbg(xhci
, "op reg status = %08x\n", temp
);
291 xhci_dbg(xhci
, "ir set irq_pending = %08x\n", temp2
);
292 xhci_dbg(xhci
, "Event ring dequeue ptr:\n");
293 xhci_dbg(xhci
, "@%llx %08x %08x %08x %08x\n",
294 (unsigned long long)xhci_trb_virt_to_dma(xhci
->event_ring
->deq_seg
, trb
),
295 lower_32_bits(trb
->link
.segment_ptr
),
296 upper_32_bits(trb
->link
.segment_ptr
),
297 (unsigned int) trb
->link
.intr_target
,
298 (unsigned int) trb
->link
.control
);
300 if (temp
& STS_FATAL
) {
301 xhci_warn(xhci
, "WARNING: Host System Error\n");
304 xhci_to_hcd(xhci
)->state
= HC_STATE_HALT
;
305 spin_unlock(&xhci
->lock
);
310 spin_unlock(&xhci
->lock
);
315 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
316 void xhci_event_ring_work(unsigned long arg
)
321 struct xhci_hcd
*xhci
= (struct xhci_hcd
*) arg
;
324 xhci_dbg(xhci
, "Poll event ring: %lu\n", jiffies
);
326 spin_lock_irqsave(&xhci
->lock
, flags
);
327 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
328 xhci_dbg(xhci
, "op reg status = 0x%x\n", temp
);
329 if (temp
== 0xffffffff || (xhci
->xhc_state
& XHCI_STATE_DYING
)) {
330 xhci_dbg(xhci
, "HW died, polling stopped.\n");
331 spin_unlock_irqrestore(&xhci
->lock
, flags
);
335 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
336 xhci_dbg(xhci
, "ir_set 0 pending = 0x%x\n", temp
);
337 xhci_dbg(xhci
, "No-op commands handled = %d\n", xhci
->noops_handled
);
338 xhci_dbg(xhci
, "HC error bitmask = 0x%x\n", xhci
->error_bitmask
);
339 xhci
->error_bitmask
= 0;
340 xhci_dbg(xhci
, "Event ring:\n");
341 xhci_debug_segment(xhci
, xhci
->event_ring
->deq_seg
);
342 xhci_dbg_ring_ptrs(xhci
, xhci
->event_ring
);
343 temp_64
= xhci_read_64(xhci
, &xhci
->ir_set
->erst_dequeue
);
344 temp_64
&= ~ERST_PTR_MASK
;
345 xhci_dbg(xhci
, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64
);
346 xhci_dbg(xhci
, "Command ring:\n");
347 xhci_debug_segment(xhci
, xhci
->cmd_ring
->deq_seg
);
348 xhci_dbg_ring_ptrs(xhci
, xhci
->cmd_ring
);
349 xhci_dbg_cmd_ptrs(xhci
);
350 for (i
= 0; i
< MAX_HC_SLOTS
; ++i
) {
353 for (j
= 0; j
< 31; ++j
) {
354 struct xhci_ring
*ring
= xhci
->devs
[i
]->eps
[j
].ring
;
357 xhci_dbg(xhci
, "Dev %d endpoint ring %d:\n", i
, j
);
358 xhci_debug_segment(xhci
, ring
->deq_seg
);
362 if (xhci
->noops_submitted
!= NUM_TEST_NOOPS
)
363 if (xhci_setup_one_noop(xhci
))
364 xhci_ring_cmd_db(xhci
);
365 spin_unlock_irqrestore(&xhci
->lock
, flags
);
368 mod_timer(&xhci
->event_ring_timer
, jiffies
+ POLL_TIMEOUT
* HZ
);
370 xhci_dbg(xhci
, "Quit polling the event ring.\n");
375 * Start the HC after it was halted.
377 * This function is called by the USB core when the HC driver is added.
378 * Its opposite is xhci_stop().
380 * xhci_init() must be called once before this function can be called.
381 * Reset the HC, enable device slot contexts, program DCBAAP, and
382 * set command ring pointer and event ring pointer.
384 * Setup MSI-X vectors and enable interrupts.
386 int xhci_run(struct usb_hcd
*hcd
)
390 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
391 void (*doorbell
)(struct xhci_hcd
*) = NULL
;
393 hcd
->uses_new_polling
= 1;
396 xhci_dbg(xhci
, "xhci_run\n");
397 #if 0 /* FIXME: MSI not setup yet */
398 /* Do this at the very last minute */
399 ret
= xhci_setup_msix(xhci
);
405 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
406 init_timer(&xhci
->event_ring_timer
);
407 xhci
->event_ring_timer
.data
= (unsigned long) xhci
;
408 xhci
->event_ring_timer
.function
= xhci_event_ring_work
;
409 /* Poll the event ring */
410 xhci
->event_ring_timer
.expires
= jiffies
+ POLL_TIMEOUT
* HZ
;
412 xhci_dbg(xhci
, "Setting event ring polling timer\n");
413 add_timer(&xhci
->event_ring_timer
);
416 xhci_dbg(xhci
, "Command ring memory map follows:\n");
417 xhci_debug_ring(xhci
, xhci
->cmd_ring
);
418 xhci_dbg_ring_ptrs(xhci
, xhci
->cmd_ring
);
419 xhci_dbg_cmd_ptrs(xhci
);
421 xhci_dbg(xhci
, "ERST memory map follows:\n");
422 xhci_dbg_erst(xhci
, &xhci
->erst
);
423 xhci_dbg(xhci
, "Event ring:\n");
424 xhci_debug_ring(xhci
, xhci
->event_ring
);
425 xhci_dbg_ring_ptrs(xhci
, xhci
->event_ring
);
426 temp_64
= xhci_read_64(xhci
, &xhci
->ir_set
->erst_dequeue
);
427 temp_64
&= ~ERST_PTR_MASK
;
428 xhci_dbg(xhci
, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64
);
430 xhci_dbg(xhci
, "// Set the interrupt modulation register\n");
431 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_control
);
432 temp
&= ~ER_IRQ_INTERVAL_MASK
;
434 xhci_writel(xhci
, temp
, &xhci
->ir_set
->irq_control
);
436 /* Set the HCD state before we enable the irqs */
437 hcd
->state
= HC_STATE_RUNNING
;
438 temp
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
440 xhci_dbg(xhci
, "// Enable interrupts, cmd = 0x%x.\n",
442 xhci_writel(xhci
, temp
, &xhci
->op_regs
->command
);
444 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
445 xhci_dbg(xhci
, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
446 xhci
->ir_set
, (unsigned int) ER_IRQ_ENABLE(temp
));
447 xhci_writel(xhci
, ER_IRQ_ENABLE(temp
),
448 &xhci
->ir_set
->irq_pending
);
449 xhci_print_ir_set(xhci
, xhci
->ir_set
, 0);
451 if (NUM_TEST_NOOPS
> 0)
452 doorbell
= xhci_setup_one_noop(xhci
);
454 temp
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
456 xhci_dbg(xhci
, "// Turn on HC, cmd = 0x%x.\n",
458 xhci_writel(xhci
, temp
, &xhci
->op_regs
->command
);
459 /* Flush PCI posted writes */
460 temp
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
461 xhci_dbg(xhci
, "// @%p = 0x%x\n", &xhci
->op_regs
->command
, temp
);
465 xhci_dbg(xhci
, "Finished xhci_run\n");
472 * This function is called by the USB core when the HC driver is removed.
473 * Its opposite is xhci_run().
475 * Disable device contexts, disable IRQs, and quiesce the HC.
476 * Reset the HC, finish any completed transactions, and cleanup memory.
478 void xhci_stop(struct usb_hcd
*hcd
)
481 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
483 spin_lock_irq(&xhci
->lock
);
486 spin_unlock_irq(&xhci
->lock
);
488 #if 0 /* No MSI yet */
489 xhci_cleanup_msix(xhci
);
491 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
492 /* Tell the event ring poll function not to reschedule */
494 del_timer_sync(&xhci
->event_ring_timer
);
497 xhci_dbg(xhci
, "// Disabling event ring interrupts\n");
498 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
499 xhci_writel(xhci
, temp
& ~STS_EINT
, &xhci
->op_regs
->status
);
500 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
501 xhci_writel(xhci
, ER_IRQ_DISABLE(temp
),
502 &xhci
->ir_set
->irq_pending
);
503 xhci_print_ir_set(xhci
, xhci
->ir_set
, 0);
505 xhci_dbg(xhci
, "cleaning up memory\n");
506 xhci_mem_cleanup(xhci
);
507 xhci_dbg(xhci
, "xhci_stop completed - status = %x\n",
508 xhci_readl(xhci
, &xhci
->op_regs
->status
));
512 * Shutdown HC (not bus-specific)
514 * This is called when the machine is rebooting or halting. We assume that the
515 * machine will be powered off, and the HC's internal state will be reset.
516 * Don't bother to free memory.
518 void xhci_shutdown(struct usb_hcd
*hcd
)
520 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
522 spin_lock_irq(&xhci
->lock
);
524 spin_unlock_irq(&xhci
->lock
);
527 xhci_cleanup_msix(xhci
);
530 xhci_dbg(xhci
, "xhci_shutdown completed - status = %x\n",
531 xhci_readl(xhci
, &xhci
->op_regs
->status
));
534 /*-------------------------------------------------------------------------*/
537 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
538 * HCDs. Find the index for an endpoint given its descriptor. Use the return
539 * value to right shift 1 for the bitmask.
541 * Index = (epnum * 2) + direction - 1,
542 * where direction = 0 for OUT, 1 for IN.
543 * For control endpoints, the IN index is used (OUT index is unused), so
544 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
546 unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor
*desc
)
549 if (usb_endpoint_xfer_control(desc
))
550 index
= (unsigned int) (usb_endpoint_num(desc
)*2);
552 index
= (unsigned int) (usb_endpoint_num(desc
)*2) +
553 (usb_endpoint_dir_in(desc
) ? 1 : 0) - 1;
557 /* Find the flag for this endpoint (for use in the control context). Use the
558 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
561 unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor
*desc
)
563 return 1 << (xhci_get_endpoint_index(desc
) + 1);
566 /* Find the flag for this endpoint (for use in the control context). Use the
567 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
570 unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index
)
572 return 1 << (ep_index
+ 1);
575 /* Compute the last valid endpoint context index. Basically, this is the
576 * endpoint index plus one. For slot contexts with more than valid endpoint,
577 * we find the most significant bit set in the added contexts flags.
578 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
579 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
581 unsigned int xhci_last_valid_endpoint(u32 added_ctxs
)
583 return fls(added_ctxs
) - 1;
586 /* Returns 1 if the arguments are OK;
587 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
589 int xhci_check_args(struct usb_hcd
*hcd
, struct usb_device
*udev
,
590 struct usb_host_endpoint
*ep
, int check_ep
, const char *func
) {
591 if (!hcd
|| (check_ep
&& !ep
) || !udev
) {
592 printk(KERN_DEBUG
"xHCI %s called with invalid args\n",
597 printk(KERN_DEBUG
"xHCI %s called for root hub\n",
601 if (!udev
->slot_id
) {
602 printk(KERN_DEBUG
"xHCI %s called with unaddressed device\n",
609 static int xhci_configure_endpoint(struct xhci_hcd
*xhci
,
610 struct usb_device
*udev
, struct xhci_command
*command
,
611 bool ctx_change
, bool must_succeed
);
614 * Full speed devices may have a max packet size greater than 8 bytes, but the
615 * USB core doesn't know that until it reads the first 8 bytes of the
616 * descriptor. If the usb_device's max packet size changes after that point,
617 * we need to issue an evaluate context command and wait on it.
619 static int xhci_check_maxpacket(struct xhci_hcd
*xhci
, unsigned int slot_id
,
620 unsigned int ep_index
, struct urb
*urb
)
622 struct xhci_container_ctx
*in_ctx
;
623 struct xhci_container_ctx
*out_ctx
;
624 struct xhci_input_control_ctx
*ctrl_ctx
;
625 struct xhci_ep_ctx
*ep_ctx
;
627 int hw_max_packet_size
;
630 out_ctx
= xhci
->devs
[slot_id
]->out_ctx
;
631 ep_ctx
= xhci_get_ep_ctx(xhci
, out_ctx
, ep_index
);
632 hw_max_packet_size
= MAX_PACKET_DECODED(ep_ctx
->ep_info2
);
633 max_packet_size
= urb
->dev
->ep0
.desc
.wMaxPacketSize
;
634 if (hw_max_packet_size
!= max_packet_size
) {
635 xhci_dbg(xhci
, "Max Packet Size for ep 0 changed.\n");
636 xhci_dbg(xhci
, "Max packet size in usb_device = %d\n",
638 xhci_dbg(xhci
, "Max packet size in xHCI HW = %d\n",
640 xhci_dbg(xhci
, "Issuing evaluate context command.\n");
642 /* Set up the modified control endpoint 0 */
643 xhci_endpoint_copy(xhci
, xhci
->devs
[slot_id
]->in_ctx
,
644 xhci
->devs
[slot_id
]->out_ctx
, ep_index
);
645 in_ctx
= xhci
->devs
[slot_id
]->in_ctx
;
646 ep_ctx
= xhci_get_ep_ctx(xhci
, in_ctx
, ep_index
);
647 ep_ctx
->ep_info2
&= ~MAX_PACKET_MASK
;
648 ep_ctx
->ep_info2
|= MAX_PACKET(max_packet_size
);
650 /* Set up the input context flags for the command */
651 /* FIXME: This won't work if a non-default control endpoint
652 * changes max packet sizes.
654 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, in_ctx
);
655 ctrl_ctx
->add_flags
= EP0_FLAG
;
656 ctrl_ctx
->drop_flags
= 0;
658 xhci_dbg(xhci
, "Slot %d input context\n", slot_id
);
659 xhci_dbg_ctx(xhci
, in_ctx
, ep_index
);
660 xhci_dbg(xhci
, "Slot %d output context\n", slot_id
);
661 xhci_dbg_ctx(xhci
, out_ctx
, ep_index
);
663 ret
= xhci_configure_endpoint(xhci
, urb
->dev
, NULL
,
666 /* Clean up the input context for later use by bandwidth
669 ctrl_ctx
->add_flags
= SLOT_FLAG
;
675 * non-error returns are a promise to giveback() the urb later
676 * we drop ownership so next owner (or urb unlink) can get it
678 int xhci_urb_enqueue(struct usb_hcd
*hcd
, struct urb
*urb
, gfp_t mem_flags
)
680 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
683 unsigned int slot_id
, ep_index
;
686 if (!urb
|| xhci_check_args(hcd
, urb
->dev
, urb
->ep
, true, __func__
) <= 0)
689 slot_id
= urb
->dev
->slot_id
;
690 ep_index
= xhci_get_endpoint_index(&urb
->ep
->desc
);
692 if (!xhci
->devs
|| !xhci
->devs
[slot_id
]) {
694 dev_warn(&urb
->dev
->dev
, "WARN: urb submitted for dev with no Slot ID\n");
698 if (!test_bit(HCD_FLAG_HW_ACCESSIBLE
, &hcd
->flags
)) {
700 xhci_dbg(xhci
, "urb submitted during PCI suspend\n");
704 if (usb_endpoint_xfer_control(&urb
->ep
->desc
)) {
705 /* Check to see if the max packet size for the default control
706 * endpoint changed during FS device enumeration
708 if (urb
->dev
->speed
== USB_SPEED_FULL
) {
709 ret
= xhci_check_maxpacket(xhci
, slot_id
,
715 /* We have a spinlock and interrupts disabled, so we must pass
716 * atomic context to this function, which may allocate memory.
718 spin_lock_irqsave(&xhci
->lock
, flags
);
719 if (xhci
->xhc_state
& XHCI_STATE_DYING
)
721 ret
= xhci_queue_ctrl_tx(xhci
, GFP_ATOMIC
, urb
,
723 spin_unlock_irqrestore(&xhci
->lock
, flags
);
724 } else if (usb_endpoint_xfer_bulk(&urb
->ep
->desc
)) {
725 spin_lock_irqsave(&xhci
->lock
, flags
);
726 if (xhci
->xhc_state
& XHCI_STATE_DYING
)
728 ret
= xhci_queue_bulk_tx(xhci
, GFP_ATOMIC
, urb
,
730 spin_unlock_irqrestore(&xhci
->lock
, flags
);
731 } else if (usb_endpoint_xfer_int(&urb
->ep
->desc
)) {
732 spin_lock_irqsave(&xhci
->lock
, flags
);
733 if (xhci
->xhc_state
& XHCI_STATE_DYING
)
735 ret
= xhci_queue_intr_tx(xhci
, GFP_ATOMIC
, urb
,
737 spin_unlock_irqrestore(&xhci
->lock
, flags
);
744 xhci_dbg(xhci
, "Ep 0x%x: URB %p submitted for "
745 "non-responsive xHCI host.\n",
746 urb
->ep
->desc
.bEndpointAddress
, urb
);
747 spin_unlock_irqrestore(&xhci
->lock
, flags
);
752 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop
753 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC
754 * should pick up where it left off in the TD, unless a Set Transfer Ring
755 * Dequeue Pointer is issued.
757 * The TRBs that make up the buffers for the canceled URB will be "removed" from
758 * the ring. Since the ring is a contiguous structure, they can't be physically
759 * removed. Instead, there are two options:
761 * 1) If the HC is in the middle of processing the URB to be canceled, we
762 * simply move the ring's dequeue pointer past those TRBs using the Set
763 * Transfer Ring Dequeue Pointer command. This will be the common case,
764 * when drivers timeout on the last submitted URB and attempt to cancel.
766 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a
767 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
768 * HC will need to invalidate the any TRBs it has cached after the stop
769 * endpoint command, as noted in the xHCI 0.95 errata.
771 * 3) The TD may have completed by the time the Stop Endpoint Command
772 * completes, so software needs to handle that case too.
774 * This function should protect against the TD enqueueing code ringing the
775 * doorbell while this code is waiting for a Stop Endpoint command to complete.
776 * It also needs to account for multiple cancellations on happening at the same
777 * time for the same endpoint.
779 * Note that this function can be called in any context, or so says
780 * usb_hcd_unlink_urb()
782 int xhci_urb_dequeue(struct usb_hcd
*hcd
, struct urb
*urb
, int status
)
787 struct xhci_hcd
*xhci
;
789 unsigned int ep_index
;
790 struct xhci_ring
*ep_ring
;
791 struct xhci_virt_ep
*ep
;
793 xhci
= hcd_to_xhci(hcd
);
794 spin_lock_irqsave(&xhci
->lock
, flags
);
795 /* Make sure the URB hasn't completed or been unlinked already */
796 ret
= usb_hcd_check_unlink_urb(hcd
, urb
, status
);
797 if (ret
|| !urb
->hcpriv
)
799 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
800 if (temp
== 0xffffffff) {
801 xhci_dbg(xhci
, "HW died, freeing TD.\n");
802 td
= (struct xhci_td
*) urb
->hcpriv
;
804 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
805 spin_unlock_irqrestore(&xhci
->lock
, flags
);
806 usb_hcd_giveback_urb(xhci_to_hcd(xhci
), urb
, -ESHUTDOWN
);
810 if (xhci
->xhc_state
& XHCI_STATE_DYING
) {
811 xhci_dbg(xhci
, "Ep 0x%x: URB %p to be canceled on "
812 "non-responsive xHCI host.\n",
813 urb
->ep
->desc
.bEndpointAddress
, urb
);
814 /* Let the stop endpoint command watchdog timer (which set this
815 * state) finish cleaning up the endpoint TD lists. We must
816 * have caught it in the middle of dropping a lock and giving
822 xhci_dbg(xhci
, "Cancel URB %p\n", urb
);
823 xhci_dbg(xhci
, "Event ring:\n");
824 xhci_debug_ring(xhci
, xhci
->event_ring
);
825 ep_index
= xhci_get_endpoint_index(&urb
->ep
->desc
);
826 ep
= &xhci
->devs
[urb
->dev
->slot_id
]->eps
[ep_index
];
828 xhci_dbg(xhci
, "Endpoint ring:\n");
829 xhci_debug_ring(xhci
, ep_ring
);
830 td
= (struct xhci_td
*) urb
->hcpriv
;
832 list_add_tail(&td
->cancelled_td_list
, &ep
->cancelled_td_list
);
833 /* Queue a stop endpoint command, but only if this is
834 * the first cancellation to be handled.
836 if (!(ep
->ep_state
& EP_HALT_PENDING
)) {
837 ep
->ep_state
|= EP_HALT_PENDING
;
838 ep
->stop_cmds_pending
++;
839 ep
->stop_cmd_timer
.expires
= jiffies
+
840 XHCI_STOP_EP_CMD_TIMEOUT
* HZ
;
841 add_timer(&ep
->stop_cmd_timer
);
842 xhci_queue_stop_endpoint(xhci
, urb
->dev
->slot_id
, ep_index
);
843 xhci_ring_cmd_db(xhci
);
846 spin_unlock_irqrestore(&xhci
->lock
, flags
);
850 /* Drop an endpoint from a new bandwidth configuration for this device.
851 * Only one call to this function is allowed per endpoint before
852 * check_bandwidth() or reset_bandwidth() must be called.
853 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
854 * add the endpoint to the schedule with possibly new parameters denoted by a
855 * different endpoint descriptor in usb_host_endpoint.
856 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
859 * The USB core will not allow URBs to be queued to an endpoint that is being
860 * disabled, so there's no need for mutual exclusion to protect
861 * the xhci->devs[slot_id] structure.
863 int xhci_drop_endpoint(struct usb_hcd
*hcd
, struct usb_device
*udev
,
864 struct usb_host_endpoint
*ep
)
866 struct xhci_hcd
*xhci
;
867 struct xhci_container_ctx
*in_ctx
, *out_ctx
;
868 struct xhci_input_control_ctx
*ctrl_ctx
;
869 struct xhci_slot_ctx
*slot_ctx
;
870 unsigned int last_ctx
;
871 unsigned int ep_index
;
872 struct xhci_ep_ctx
*ep_ctx
;
874 u32 new_add_flags
, new_drop_flags
, new_slot_info
;
877 ret
= xhci_check_args(hcd
, udev
, ep
, 1, __func__
);
880 xhci
= hcd_to_xhci(hcd
);
881 xhci_dbg(xhci
, "%s called for udev %p\n", __func__
, udev
);
883 drop_flag
= xhci_get_endpoint_flag(&ep
->desc
);
884 if (drop_flag
== SLOT_FLAG
|| drop_flag
== EP0_FLAG
) {
885 xhci_dbg(xhci
, "xHCI %s - can't drop slot or ep 0 %#x\n",
886 __func__
, drop_flag
);
890 if (!xhci
->devs
|| !xhci
->devs
[udev
->slot_id
]) {
891 xhci_warn(xhci
, "xHCI %s called with unaddressed device\n",
896 in_ctx
= xhci
->devs
[udev
->slot_id
]->in_ctx
;
897 out_ctx
= xhci
->devs
[udev
->slot_id
]->out_ctx
;
898 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, in_ctx
);
899 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
900 ep_ctx
= xhci_get_ep_ctx(xhci
, out_ctx
, ep_index
);
901 /* If the HC already knows the endpoint is disabled,
902 * or the HCD has noted it is disabled, ignore this request
904 if ((ep_ctx
->ep_info
& EP_STATE_MASK
) == EP_STATE_DISABLED
||
905 ctrl_ctx
->drop_flags
& xhci_get_endpoint_flag(&ep
->desc
)) {
906 xhci_warn(xhci
, "xHCI %s called with disabled ep %p\n",
911 ctrl_ctx
->drop_flags
|= drop_flag
;
912 new_drop_flags
= ctrl_ctx
->drop_flags
;
914 ctrl_ctx
->add_flags
&= ~drop_flag
;
915 new_add_flags
= ctrl_ctx
->add_flags
;
917 last_ctx
= xhci_last_valid_endpoint(ctrl_ctx
->add_flags
);
918 slot_ctx
= xhci_get_slot_ctx(xhci
, in_ctx
);
919 /* Update the last valid endpoint context, if we deleted the last one */
920 if ((slot_ctx
->dev_info
& LAST_CTX_MASK
) > LAST_CTX(last_ctx
)) {
921 slot_ctx
->dev_info
&= ~LAST_CTX_MASK
;
922 slot_ctx
->dev_info
|= LAST_CTX(last_ctx
);
924 new_slot_info
= slot_ctx
->dev_info
;
926 xhci_endpoint_zero(xhci
, xhci
->devs
[udev
->slot_id
], ep
);
928 xhci_dbg(xhci
, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
929 (unsigned int) ep
->desc
.bEndpointAddress
,
931 (unsigned int) new_drop_flags
,
932 (unsigned int) new_add_flags
,
933 (unsigned int) new_slot_info
);
937 /* Add an endpoint to a new possible bandwidth configuration for this device.
938 * Only one call to this function is allowed per endpoint before
939 * check_bandwidth() or reset_bandwidth() must be called.
940 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
941 * add the endpoint to the schedule with possibly new parameters denoted by a
942 * different endpoint descriptor in usb_host_endpoint.
943 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
946 * The USB core will not allow URBs to be queued to an endpoint until the
947 * configuration or alt setting is installed in the device, so there's no need
948 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
950 int xhci_add_endpoint(struct usb_hcd
*hcd
, struct usb_device
*udev
,
951 struct usb_host_endpoint
*ep
)
953 struct xhci_hcd
*xhci
;
954 struct xhci_container_ctx
*in_ctx
, *out_ctx
;
955 unsigned int ep_index
;
956 struct xhci_ep_ctx
*ep_ctx
;
957 struct xhci_slot_ctx
*slot_ctx
;
958 struct xhci_input_control_ctx
*ctrl_ctx
;
960 unsigned int last_ctx
;
961 u32 new_add_flags
, new_drop_flags
, new_slot_info
;
964 ret
= xhci_check_args(hcd
, udev
, ep
, 1, __func__
);
966 /* So we won't queue a reset ep command for a root hub */
970 xhci
= hcd_to_xhci(hcd
);
972 added_ctxs
= xhci_get_endpoint_flag(&ep
->desc
);
973 last_ctx
= xhci_last_valid_endpoint(added_ctxs
);
974 if (added_ctxs
== SLOT_FLAG
|| added_ctxs
== EP0_FLAG
) {
975 /* FIXME when we have to issue an evaluate endpoint command to
976 * deal with ep0 max packet size changing once we get the
979 xhci_dbg(xhci
, "xHCI %s - can't add slot or ep 0 %#x\n",
980 __func__
, added_ctxs
);
984 if (!xhci
->devs
|| !xhci
->devs
[udev
->slot_id
]) {
985 xhci_warn(xhci
, "xHCI %s called with unaddressed device\n",
990 in_ctx
= xhci
->devs
[udev
->slot_id
]->in_ctx
;
991 out_ctx
= xhci
->devs
[udev
->slot_id
]->out_ctx
;
992 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, in_ctx
);
993 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
994 ep_ctx
= xhci_get_ep_ctx(xhci
, out_ctx
, ep_index
);
995 /* If the HCD has already noted the endpoint is enabled,
996 * ignore this request.
998 if (ctrl_ctx
->add_flags
& xhci_get_endpoint_flag(&ep
->desc
)) {
999 xhci_warn(xhci
, "xHCI %s called with enabled ep %p\n",
1005 * Configuration and alternate setting changes must be done in
1006 * process context, not interrupt context (or so documenation
1007 * for usb_set_interface() and usb_set_configuration() claim).
1009 if (xhci_endpoint_init(xhci
, xhci
->devs
[udev
->slot_id
],
1010 udev
, ep
, GFP_NOIO
) < 0) {
1011 dev_dbg(&udev
->dev
, "%s - could not initialize ep %#x\n",
1012 __func__
, ep
->desc
.bEndpointAddress
);
1016 ctrl_ctx
->add_flags
|= added_ctxs
;
1017 new_add_flags
= ctrl_ctx
->add_flags
;
1019 /* If xhci_endpoint_disable() was called for this endpoint, but the
1020 * xHC hasn't been notified yet through the check_bandwidth() call,
1021 * this re-adds a new state for the endpoint from the new endpoint
1022 * descriptors. We must drop and re-add this endpoint, so we leave the
1025 new_drop_flags
= ctrl_ctx
->drop_flags
;
1027 slot_ctx
= xhci_get_slot_ctx(xhci
, in_ctx
);
1028 /* Update the last valid endpoint context, if we just added one past */
1029 if ((slot_ctx
->dev_info
& LAST_CTX_MASK
) < LAST_CTX(last_ctx
)) {
1030 slot_ctx
->dev_info
&= ~LAST_CTX_MASK
;
1031 slot_ctx
->dev_info
|= LAST_CTX(last_ctx
);
1033 new_slot_info
= slot_ctx
->dev_info
;
1035 /* Store the usb_device pointer for later use */
1038 xhci_dbg(xhci
, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1039 (unsigned int) ep
->desc
.bEndpointAddress
,
1041 (unsigned int) new_drop_flags
,
1042 (unsigned int) new_add_flags
,
1043 (unsigned int) new_slot_info
);
1047 static void xhci_zero_in_ctx(struct xhci_hcd
*xhci
, struct xhci_virt_device
*virt_dev
)
1049 struct xhci_input_control_ctx
*ctrl_ctx
;
1050 struct xhci_ep_ctx
*ep_ctx
;
1051 struct xhci_slot_ctx
*slot_ctx
;
1054 /* When a device's add flag and drop flag are zero, any subsequent
1055 * configure endpoint command will leave that endpoint's state
1056 * untouched. Make sure we don't leave any old state in the input
1057 * endpoint contexts.
1059 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, virt_dev
->in_ctx
);
1060 ctrl_ctx
->drop_flags
= 0;
1061 ctrl_ctx
->add_flags
= 0;
1062 slot_ctx
= xhci_get_slot_ctx(xhci
, virt_dev
->in_ctx
);
1063 slot_ctx
->dev_info
&= ~LAST_CTX_MASK
;
1064 /* Endpoint 0 is always valid */
1065 slot_ctx
->dev_info
|= LAST_CTX(1);
1066 for (i
= 1; i
< 31; ++i
) {
1067 ep_ctx
= xhci_get_ep_ctx(xhci
, virt_dev
->in_ctx
, i
);
1068 ep_ctx
->ep_info
= 0;
1069 ep_ctx
->ep_info2
= 0;
1071 ep_ctx
->tx_info
= 0;
1075 static int xhci_configure_endpoint_result(struct xhci_hcd
*xhci
,
1076 struct usb_device
*udev
, int *cmd_status
)
1080 switch (*cmd_status
) {
1082 dev_warn(&udev
->dev
, "Not enough host controller resources "
1083 "for new device state.\n");
1085 /* FIXME: can we allocate more resources for the HC? */
1088 dev_warn(&udev
->dev
, "Not enough bandwidth "
1089 "for new device state.\n");
1091 /* FIXME: can we go back to the old state? */
1094 /* the HCD set up something wrong */
1095 dev_warn(&udev
->dev
, "ERROR: Endpoint drop flag = 0, "
1097 "and endpoint is not disabled.\n");
1101 dev_dbg(&udev
->dev
, "Successful Endpoint Configure command\n");
1105 xhci_err(xhci
, "ERROR: unexpected command completion "
1106 "code 0x%x.\n", *cmd_status
);
1113 static int xhci_evaluate_context_result(struct xhci_hcd
*xhci
,
1114 struct usb_device
*udev
, int *cmd_status
)
1117 struct xhci_virt_device
*virt_dev
= xhci
->devs
[udev
->slot_id
];
1119 switch (*cmd_status
) {
1121 dev_warn(&udev
->dev
, "WARN: xHCI driver setup invalid evaluate "
1122 "context command.\n");
1126 dev_warn(&udev
->dev
, "WARN: slot not enabled for"
1127 "evaluate context command.\n");
1128 case COMP_CTX_STATE
:
1129 dev_warn(&udev
->dev
, "WARN: invalid context state for "
1130 "evaluate context command.\n");
1131 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
, 1);
1135 dev_dbg(&udev
->dev
, "Successful evaluate context command\n");
1139 xhci_err(xhci
, "ERROR: unexpected command completion "
1140 "code 0x%x.\n", *cmd_status
);
1147 /* Issue a configure endpoint command or evaluate context command
1148 * and wait for it to finish.
1150 static int xhci_configure_endpoint(struct xhci_hcd
*xhci
,
1151 struct usb_device
*udev
,
1152 struct xhci_command
*command
,
1153 bool ctx_change
, bool must_succeed
)
1157 unsigned long flags
;
1158 struct xhci_container_ctx
*in_ctx
;
1159 struct completion
*cmd_completion
;
1161 struct xhci_virt_device
*virt_dev
;
1163 spin_lock_irqsave(&xhci
->lock
, flags
);
1164 virt_dev
= xhci
->devs
[udev
->slot_id
];
1166 in_ctx
= command
->in_ctx
;
1167 cmd_completion
= command
->completion
;
1168 cmd_status
= &command
->status
;
1169 command
->command_trb
= xhci
->cmd_ring
->enqueue
;
1170 list_add_tail(&command
->cmd_list
, &virt_dev
->cmd_list
);
1172 in_ctx
= virt_dev
->in_ctx
;
1173 cmd_completion
= &virt_dev
->cmd_completion
;
1174 cmd_status
= &virt_dev
->cmd_status
;
1178 ret
= xhci_queue_configure_endpoint(xhci
, in_ctx
->dma
,
1179 udev
->slot_id
, must_succeed
);
1181 ret
= xhci_queue_evaluate_context(xhci
, in_ctx
->dma
,
1185 list_del(&command
->cmd_list
);
1186 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1187 xhci_dbg(xhci
, "FIXME allocate a new ring segment\n");
1190 xhci_ring_cmd_db(xhci
);
1191 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1193 /* Wait for the configure endpoint command to complete */
1194 timeleft
= wait_for_completion_interruptible_timeout(
1196 USB_CTRL_SET_TIMEOUT
);
1197 if (timeleft
<= 0) {
1198 xhci_warn(xhci
, "%s while waiting for %s command\n",
1199 timeleft
== 0 ? "Timeout" : "Signal",
1201 "configure endpoint" :
1202 "evaluate context");
1203 /* FIXME cancel the configure endpoint command */
1208 return xhci_configure_endpoint_result(xhci
, udev
, cmd_status
);
1209 return xhci_evaluate_context_result(xhci
, udev
, cmd_status
);
1212 /* Called after one or more calls to xhci_add_endpoint() or
1213 * xhci_drop_endpoint(). If this call fails, the USB core is expected
1214 * to call xhci_reset_bandwidth().
1216 * Since we are in the middle of changing either configuration or
1217 * installing a new alt setting, the USB core won't allow URBs to be
1218 * enqueued for any endpoint on the old config or interface. Nothing
1219 * else should be touching the xhci->devs[slot_id] structure, so we
1220 * don't need to take the xhci->lock for manipulating that.
1222 int xhci_check_bandwidth(struct usb_hcd
*hcd
, struct usb_device
*udev
)
1226 struct xhci_hcd
*xhci
;
1227 struct xhci_virt_device
*virt_dev
;
1228 struct xhci_input_control_ctx
*ctrl_ctx
;
1229 struct xhci_slot_ctx
*slot_ctx
;
1231 ret
= xhci_check_args(hcd
, udev
, NULL
, 0, __func__
);
1234 xhci
= hcd_to_xhci(hcd
);
1236 if (!udev
->slot_id
|| !xhci
->devs
|| !xhci
->devs
[udev
->slot_id
]) {
1237 xhci_warn(xhci
, "xHCI %s called with unaddressed device\n",
1241 xhci_dbg(xhci
, "%s called for udev %p\n", __func__
, udev
);
1242 virt_dev
= xhci
->devs
[udev
->slot_id
];
1244 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
1245 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, virt_dev
->in_ctx
);
1246 ctrl_ctx
->add_flags
|= SLOT_FLAG
;
1247 ctrl_ctx
->add_flags
&= ~EP0_FLAG
;
1248 ctrl_ctx
->drop_flags
&= ~SLOT_FLAG
;
1249 ctrl_ctx
->drop_flags
&= ~EP0_FLAG
;
1250 xhci_dbg(xhci
, "New Input Control Context:\n");
1251 slot_ctx
= xhci_get_slot_ctx(xhci
, virt_dev
->in_ctx
);
1252 xhci_dbg_ctx(xhci
, virt_dev
->in_ctx
,
1253 LAST_CTX_TO_EP_NUM(slot_ctx
->dev_info
));
1255 ret
= xhci_configure_endpoint(xhci
, udev
, NULL
,
1258 /* Callee should call reset_bandwidth() */
1262 xhci_dbg(xhci
, "Output context after successful config ep cmd:\n");
1263 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
,
1264 LAST_CTX_TO_EP_NUM(slot_ctx
->dev_info
));
1266 xhci_zero_in_ctx(xhci
, virt_dev
);
1267 /* Install new rings and free or cache any old rings */
1268 for (i
= 1; i
< 31; ++i
) {
1269 if (!virt_dev
->eps
[i
].new_ring
)
1271 /* Only cache or free the old ring if it exists.
1272 * It may not if this is the first add of an endpoint.
1274 if (virt_dev
->eps
[i
].ring
) {
1275 xhci_free_or_cache_endpoint_ring(xhci
, virt_dev
, i
);
1277 virt_dev
->eps
[i
].ring
= virt_dev
->eps
[i
].new_ring
;
1278 virt_dev
->eps
[i
].new_ring
= NULL
;
1284 void xhci_reset_bandwidth(struct usb_hcd
*hcd
, struct usb_device
*udev
)
1286 struct xhci_hcd
*xhci
;
1287 struct xhci_virt_device
*virt_dev
;
1290 ret
= xhci_check_args(hcd
, udev
, NULL
, 0, __func__
);
1293 xhci
= hcd_to_xhci(hcd
);
1295 if (!xhci
->devs
|| !xhci
->devs
[udev
->slot_id
]) {
1296 xhci_warn(xhci
, "xHCI %s called with unaddressed device\n",
1300 xhci_dbg(xhci
, "%s called for udev %p\n", __func__
, udev
);
1301 virt_dev
= xhci
->devs
[udev
->slot_id
];
1302 /* Free any rings allocated for added endpoints */
1303 for (i
= 0; i
< 31; ++i
) {
1304 if (virt_dev
->eps
[i
].new_ring
) {
1305 xhci_ring_free(xhci
, virt_dev
->eps
[i
].new_ring
);
1306 virt_dev
->eps
[i
].new_ring
= NULL
;
1309 xhci_zero_in_ctx(xhci
, virt_dev
);
1312 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd
*xhci
,
1313 struct xhci_container_ctx
*in_ctx
,
1314 struct xhci_container_ctx
*out_ctx
,
1315 u32 add_flags
, u32 drop_flags
)
1317 struct xhci_input_control_ctx
*ctrl_ctx
;
1318 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, in_ctx
);
1319 ctrl_ctx
->add_flags
= add_flags
;
1320 ctrl_ctx
->drop_flags
= drop_flags
;
1321 xhci_slot_copy(xhci
, in_ctx
, out_ctx
);
1322 ctrl_ctx
->add_flags
|= SLOT_FLAG
;
1324 xhci_dbg(xhci
, "Input Context:\n");
1325 xhci_dbg_ctx(xhci
, in_ctx
, xhci_last_valid_endpoint(add_flags
));
1328 void xhci_setup_input_ctx_for_quirk(struct xhci_hcd
*xhci
,
1329 unsigned int slot_id
, unsigned int ep_index
,
1330 struct xhci_dequeue_state
*deq_state
)
1332 struct xhci_container_ctx
*in_ctx
;
1333 struct xhci_ep_ctx
*ep_ctx
;
1337 xhci_endpoint_copy(xhci
, xhci
->devs
[slot_id
]->in_ctx
,
1338 xhci
->devs
[slot_id
]->out_ctx
, ep_index
);
1339 in_ctx
= xhci
->devs
[slot_id
]->in_ctx
;
1340 ep_ctx
= xhci_get_ep_ctx(xhci
, in_ctx
, ep_index
);
1341 addr
= xhci_trb_virt_to_dma(deq_state
->new_deq_seg
,
1342 deq_state
->new_deq_ptr
);
1344 xhci_warn(xhci
, "WARN Cannot submit config ep after "
1345 "reset ep command\n");
1346 xhci_warn(xhci
, "WARN deq seg = %p, deq ptr = %p\n",
1347 deq_state
->new_deq_seg
,
1348 deq_state
->new_deq_ptr
);
1351 ep_ctx
->deq
= addr
| deq_state
->new_cycle_state
;
1353 added_ctxs
= xhci_get_endpoint_flag_from_index(ep_index
);
1354 xhci_setup_input_ctx_for_config_ep(xhci
, xhci
->devs
[slot_id
]->in_ctx
,
1355 xhci
->devs
[slot_id
]->out_ctx
, added_ctxs
, added_ctxs
);
1358 void xhci_cleanup_stalled_ring(struct xhci_hcd
*xhci
,
1359 struct usb_device
*udev
, unsigned int ep_index
)
1361 struct xhci_dequeue_state deq_state
;
1362 struct xhci_virt_ep
*ep
;
1364 xhci_dbg(xhci
, "Cleaning up stalled endpoint ring\n");
1365 ep
= &xhci
->devs
[udev
->slot_id
]->eps
[ep_index
];
1366 /* We need to move the HW's dequeue pointer past this TD,
1367 * or it will attempt to resend it on the next doorbell ring.
1369 xhci_find_new_dequeue_state(xhci
, udev
->slot_id
,
1370 ep_index
, ep
->stopped_td
,
1373 /* HW with the reset endpoint quirk will use the saved dequeue state to
1374 * issue a configure endpoint command later.
1376 if (!(xhci
->quirks
& XHCI_RESET_EP_QUIRK
)) {
1377 xhci_dbg(xhci
, "Queueing new dequeue state\n");
1378 xhci_queue_new_dequeue_state(xhci
, udev
->slot_id
,
1379 ep_index
, &deq_state
);
1381 /* Better hope no one uses the input context between now and the
1382 * reset endpoint completion!
1384 xhci_dbg(xhci
, "Setting up input context for "
1385 "configure endpoint command\n");
1386 xhci_setup_input_ctx_for_quirk(xhci
, udev
->slot_id
,
1387 ep_index
, &deq_state
);
1391 /* Deal with stalled endpoints. The core should have sent the control message
1392 * to clear the halt condition. However, we need to make the xHCI hardware
1393 * reset its sequence number, since a device will expect a sequence number of
1394 * zero after the halt condition is cleared.
1395 * Context: in_interrupt
1397 void xhci_endpoint_reset(struct usb_hcd
*hcd
,
1398 struct usb_host_endpoint
*ep
)
1400 struct xhci_hcd
*xhci
;
1401 struct usb_device
*udev
;
1402 unsigned int ep_index
;
1403 unsigned long flags
;
1405 struct xhci_virt_ep
*virt_ep
;
1407 xhci
= hcd_to_xhci(hcd
);
1408 udev
= (struct usb_device
*) ep
->hcpriv
;
1409 /* Called with a root hub endpoint (or an endpoint that wasn't added
1410 * with xhci_add_endpoint()
1414 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
1415 virt_ep
= &xhci
->devs
[udev
->slot_id
]->eps
[ep_index
];
1416 if (!virt_ep
->stopped_td
) {
1417 xhci_dbg(xhci
, "Endpoint 0x%x not halted, refusing to reset.\n",
1418 ep
->desc
.bEndpointAddress
);
1421 if (usb_endpoint_xfer_control(&ep
->desc
)) {
1422 xhci_dbg(xhci
, "Control endpoint stall already handled.\n");
1426 xhci_dbg(xhci
, "Queueing reset endpoint command\n");
1427 spin_lock_irqsave(&xhci
->lock
, flags
);
1428 ret
= xhci_queue_reset_ep(xhci
, udev
->slot_id
, ep_index
);
1430 * Can't change the ring dequeue pointer until it's transitioned to the
1431 * stopped state, which is only upon a successful reset endpoint
1432 * command. Better hope that last command worked!
1435 xhci_cleanup_stalled_ring(xhci
, udev
, ep_index
);
1436 kfree(virt_ep
->stopped_td
);
1437 xhci_ring_cmd_db(xhci
);
1439 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1442 xhci_warn(xhci
, "FIXME allocate a new ring segment\n");
1446 * This submits a Reset Device Command, which will set the device state to 0,
1447 * set the device address to 0, and disable all the endpoints except the default
1448 * control endpoint. The USB core should come back and call
1449 * xhci_address_device(), and then re-set up the configuration. If this is
1450 * called because of a usb_reset_and_verify_device(), then the old alternate
1451 * settings will be re-installed through the normal bandwidth allocation
1454 * Wait for the Reset Device command to finish. Remove all structures
1455 * associated with the endpoints that were disabled. Clear the input device
1456 * structure? Cache the rings? Reset the control endpoint 0 max packet size?
1458 int xhci_reset_device(struct usb_hcd
*hcd
, struct usb_device
*udev
)
1461 unsigned long flags
;
1462 struct xhci_hcd
*xhci
;
1463 unsigned int slot_id
;
1464 struct xhci_virt_device
*virt_dev
;
1465 struct xhci_command
*reset_device_cmd
;
1467 int last_freed_endpoint
;
1469 ret
= xhci_check_args(hcd
, udev
, NULL
, 0, __func__
);
1472 xhci
= hcd_to_xhci(hcd
);
1473 slot_id
= udev
->slot_id
;
1474 virt_dev
= xhci
->devs
[slot_id
];
1476 xhci_dbg(xhci
, "%s called with invalid slot ID %u\n",
1481 xhci_dbg(xhci
, "Resetting device with slot ID %u\n", slot_id
);
1482 /* Allocate the command structure that holds the struct completion.
1483 * Assume we're in process context, since the normal device reset
1484 * process has to wait for the device anyway. Storage devices are
1485 * reset as part of error handling, so use GFP_NOIO instead of
1488 reset_device_cmd
= xhci_alloc_command(xhci
, false, true, GFP_NOIO
);
1489 if (!reset_device_cmd
) {
1490 xhci_dbg(xhci
, "Couldn't allocate command structure.\n");
1494 /* Attempt to submit the Reset Device command to the command ring */
1495 spin_lock_irqsave(&xhci
->lock
, flags
);
1496 reset_device_cmd
->command_trb
= xhci
->cmd_ring
->enqueue
;
1497 list_add_tail(&reset_device_cmd
->cmd_list
, &virt_dev
->cmd_list
);
1498 ret
= xhci_queue_reset_device(xhci
, slot_id
);
1500 xhci_dbg(xhci
, "FIXME: allocate a command ring segment\n");
1501 list_del(&reset_device_cmd
->cmd_list
);
1502 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1503 goto command_cleanup
;
1505 xhci_ring_cmd_db(xhci
);
1506 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1508 /* Wait for the Reset Device command to finish */
1509 timeleft
= wait_for_completion_interruptible_timeout(
1510 reset_device_cmd
->completion
,
1511 USB_CTRL_SET_TIMEOUT
);
1512 if (timeleft
<= 0) {
1513 xhci_warn(xhci
, "%s while waiting for reset device command\n",
1514 timeleft
== 0 ? "Timeout" : "Signal");
1515 spin_lock_irqsave(&xhci
->lock
, flags
);
1516 /* The timeout might have raced with the event ring handler, so
1517 * only delete from the list if the item isn't poisoned.
1519 if (reset_device_cmd
->cmd_list
.next
!= LIST_POISON1
)
1520 list_del(&reset_device_cmd
->cmd_list
);
1521 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1523 goto command_cleanup
;
1526 /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
1527 * unless we tried to reset a slot ID that wasn't enabled,
1528 * or the device wasn't in the addressed or configured state.
1530 ret
= reset_device_cmd
->status
;
1532 case COMP_EBADSLT
: /* 0.95 completion code for bad slot ID */
1533 case COMP_CTX_STATE
: /* 0.96 completion code for same thing */
1534 xhci_info(xhci
, "Can't reset device (slot ID %u) in %s state\n",
1536 xhci_get_slot_state(xhci
, virt_dev
->out_ctx
));
1537 xhci_info(xhci
, "Not freeing device rings.\n");
1538 /* Don't treat this as an error. May change my mind later. */
1540 goto command_cleanup
;
1542 xhci_dbg(xhci
, "Successful reset device command.\n");
1545 if (xhci_is_vendor_info_code(xhci
, ret
))
1547 xhci_warn(xhci
, "Unknown completion code %u for "
1548 "reset device command.\n", ret
);
1550 goto command_cleanup
;
1553 /* Everything but endpoint 0 is disabled, so free or cache the rings. */
1554 last_freed_endpoint
= 1;
1555 for (i
= 1; i
< 31; ++i
) {
1556 if (!virt_dev
->eps
[i
].ring
)
1558 xhci_free_or_cache_endpoint_ring(xhci
, virt_dev
, i
);
1559 last_freed_endpoint
= i
;
1561 xhci_dbg(xhci
, "Output context after successful reset device cmd:\n");
1562 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
, last_freed_endpoint
);
1566 xhci_free_command(xhci
, reset_device_cmd
);
1571 * At this point, the struct usb_device is about to go away, the device has
1572 * disconnected, and all traffic has been stopped and the endpoints have been
1573 * disabled. Free any HC data structures associated with that device.
1575 void xhci_free_dev(struct usb_hcd
*hcd
, struct usb_device
*udev
)
1577 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
1578 struct xhci_virt_device
*virt_dev
;
1579 unsigned long flags
;
1583 if (udev
->slot_id
== 0)
1585 virt_dev
= xhci
->devs
[udev
->slot_id
];
1589 /* Stop any wayward timer functions (which may grab the lock) */
1590 for (i
= 0; i
< 31; ++i
) {
1591 virt_dev
->eps
[i
].ep_state
&= ~EP_HALT_PENDING
;
1592 del_timer_sync(&virt_dev
->eps
[i
].stop_cmd_timer
);
1595 spin_lock_irqsave(&xhci
->lock
, flags
);
1596 /* Don't disable the slot if the host controller is dead. */
1597 state
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
1598 if (state
== 0xffffffff || (xhci
->xhc_state
& XHCI_STATE_DYING
)) {
1599 xhci_free_virt_device(xhci
, udev
->slot_id
);
1600 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1604 if (xhci_queue_slot_control(xhci
, TRB_DISABLE_SLOT
, udev
->slot_id
)) {
1605 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1606 xhci_dbg(xhci
, "FIXME: allocate a command ring segment\n");
1609 xhci_ring_cmd_db(xhci
);
1610 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1612 * Event command completion handler will free any data structures
1613 * associated with the slot. XXX Can free sleep?
1618 * Returns 0 if the xHC ran out of device slots, the Enable Slot command
1619 * timed out, or allocating memory failed. Returns 1 on success.
1621 int xhci_alloc_dev(struct usb_hcd
*hcd
, struct usb_device
*udev
)
1623 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
1624 unsigned long flags
;
1628 spin_lock_irqsave(&xhci
->lock
, flags
);
1629 ret
= xhci_queue_slot_control(xhci
, TRB_ENABLE_SLOT
, 0);
1631 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1632 xhci_dbg(xhci
, "FIXME: allocate a command ring segment\n");
1635 xhci_ring_cmd_db(xhci
);
1636 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1638 /* XXX: how much time for xHC slot assignment? */
1639 timeleft
= wait_for_completion_interruptible_timeout(&xhci
->addr_dev
,
1640 USB_CTRL_SET_TIMEOUT
);
1641 if (timeleft
<= 0) {
1642 xhci_warn(xhci
, "%s while waiting for a slot\n",
1643 timeleft
== 0 ? "Timeout" : "Signal");
1644 /* FIXME cancel the enable slot request */
1648 if (!xhci
->slot_id
) {
1649 xhci_err(xhci
, "Error while assigning device slot ID\n");
1652 /* xhci_alloc_virt_device() does not touch rings; no need to lock */
1653 if (!xhci_alloc_virt_device(xhci
, xhci
->slot_id
, udev
, GFP_KERNEL
)) {
1654 /* Disable slot, if we can do it without mem alloc */
1655 xhci_warn(xhci
, "Could not allocate xHCI USB device data structures\n");
1656 spin_lock_irqsave(&xhci
->lock
, flags
);
1657 if (!xhci_queue_slot_control(xhci
, TRB_DISABLE_SLOT
, udev
->slot_id
))
1658 xhci_ring_cmd_db(xhci
);
1659 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1662 udev
->slot_id
= xhci
->slot_id
;
1663 /* Is this a LS or FS device under a HS hub? */
1664 /* Hub or peripherial? */
1669 * Issue an Address Device command (which will issue a SetAddress request to
1671 * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so
1672 * we should only issue and wait on one address command at the same time.
1674 * We add one to the device address issued by the hardware because the USB core
1675 * uses address 1 for the root hubs (even though they're not really devices).
1677 int xhci_address_device(struct usb_hcd
*hcd
, struct usb_device
*udev
)
1679 unsigned long flags
;
1681 struct xhci_virt_device
*virt_dev
;
1683 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
1684 struct xhci_slot_ctx
*slot_ctx
;
1685 struct xhci_input_control_ctx
*ctrl_ctx
;
1688 if (!udev
->slot_id
) {
1689 xhci_dbg(xhci
, "Bad Slot ID %d\n", udev
->slot_id
);
1693 virt_dev
= xhci
->devs
[udev
->slot_id
];
1695 /* If this is a Set Address to an unconfigured device, setup ep 0 */
1697 xhci_setup_addressable_virt_dev(xhci
, udev
);
1698 /* Otherwise, assume the core has the device configured how it wants */
1699 xhci_dbg(xhci
, "Slot ID %d Input Context:\n", udev
->slot_id
);
1700 xhci_dbg_ctx(xhci
, virt_dev
->in_ctx
, 2);
1702 spin_lock_irqsave(&xhci
->lock
, flags
);
1703 ret
= xhci_queue_address_device(xhci
, virt_dev
->in_ctx
->dma
,
1706 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1707 xhci_dbg(xhci
, "FIXME: allocate a command ring segment\n");
1710 xhci_ring_cmd_db(xhci
);
1711 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1713 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
1714 timeleft
= wait_for_completion_interruptible_timeout(&xhci
->addr_dev
,
1715 USB_CTRL_SET_TIMEOUT
);
1716 /* FIXME: From section 4.3.4: "Software shall be responsible for timing
1717 * the SetAddress() "recovery interval" required by USB and aborting the
1718 * command on a timeout.
1720 if (timeleft
<= 0) {
1721 xhci_warn(xhci
, "%s while waiting for a slot\n",
1722 timeleft
== 0 ? "Timeout" : "Signal");
1723 /* FIXME cancel the address device command */
1727 switch (virt_dev
->cmd_status
) {
1728 case COMP_CTX_STATE
:
1730 xhci_err(xhci
, "Setup ERROR: address device command for slot %d.\n",
1735 dev_warn(&udev
->dev
, "Device not responding to set address.\n");
1739 xhci_dbg(xhci
, "Successful Address Device command\n");
1742 xhci_err(xhci
, "ERROR: unexpected command completion "
1743 "code 0x%x.\n", virt_dev
->cmd_status
);
1744 xhci_dbg(xhci
, "Slot ID %d Output Context:\n", udev
->slot_id
);
1745 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
, 2);
1752 temp_64
= xhci_read_64(xhci
, &xhci
->op_regs
->dcbaa_ptr
);
1753 xhci_dbg(xhci
, "Op regs DCBAA ptr = %#016llx\n", temp_64
);
1754 xhci_dbg(xhci
, "Slot ID %d dcbaa entry @%p = %#016llx\n",
1756 &xhci
->dcbaa
->dev_context_ptrs
[udev
->slot_id
],
1757 (unsigned long long)
1758 xhci
->dcbaa
->dev_context_ptrs
[udev
->slot_id
]);
1759 xhci_dbg(xhci
, "Output Context DMA address = %#08llx\n",
1760 (unsigned long long)virt_dev
->out_ctx
->dma
);
1761 xhci_dbg(xhci
, "Slot ID %d Input Context:\n", udev
->slot_id
);
1762 xhci_dbg_ctx(xhci
, virt_dev
->in_ctx
, 2);
1763 xhci_dbg(xhci
, "Slot ID %d Output Context:\n", udev
->slot_id
);
1764 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
, 2);
1766 * USB core uses address 1 for the roothubs, so we add one to the
1767 * address given back to us by the HC.
1769 slot_ctx
= xhci_get_slot_ctx(xhci
, virt_dev
->out_ctx
);
1770 udev
->devnum
= (slot_ctx
->dev_state
& DEV_ADDR_MASK
) + 1;
1771 /* Zero the input context control for later use */
1772 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, virt_dev
->in_ctx
);
1773 ctrl_ctx
->add_flags
= 0;
1774 ctrl_ctx
->drop_flags
= 0;
1776 xhci_dbg(xhci
, "Device address = %d\n", udev
->devnum
);
1777 /* XXX Meh, not sure if anyone else but choose_address uses this. */
1778 set_bit(udev
->devnum
, udev
->bus
->devmap
.devicemap
);
1783 /* Once a hub descriptor is fetched for a device, we need to update the xHC's
1784 * internal data structures for the device.
1786 int xhci_update_hub_device(struct usb_hcd
*hcd
, struct usb_device
*hdev
,
1787 struct usb_tt
*tt
, gfp_t mem_flags
)
1789 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
1790 struct xhci_virt_device
*vdev
;
1791 struct xhci_command
*config_cmd
;
1792 struct xhci_input_control_ctx
*ctrl_ctx
;
1793 struct xhci_slot_ctx
*slot_ctx
;
1794 unsigned long flags
;
1795 unsigned think_time
;
1798 /* Ignore root hubs */
1802 vdev
= xhci
->devs
[hdev
->slot_id
];
1804 xhci_warn(xhci
, "Cannot update hub desc for unknown device.\n");
1807 config_cmd
= xhci_alloc_command(xhci
, true, true, mem_flags
);
1809 xhci_dbg(xhci
, "Could not allocate xHCI command structure.\n");
1813 spin_lock_irqsave(&xhci
->lock
, flags
);
1814 xhci_slot_copy(xhci
, config_cmd
->in_ctx
, vdev
->out_ctx
);
1815 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, config_cmd
->in_ctx
);
1816 ctrl_ctx
->add_flags
|= SLOT_FLAG
;
1817 slot_ctx
= xhci_get_slot_ctx(xhci
, config_cmd
->in_ctx
);
1818 slot_ctx
->dev_info
|= DEV_HUB
;
1820 slot_ctx
->dev_info
|= DEV_MTT
;
1821 if (xhci
->hci_version
> 0x95) {
1822 xhci_dbg(xhci
, "xHCI version %x needs hub "
1823 "TT think time and number of ports\n",
1824 (unsigned int) xhci
->hci_version
);
1825 slot_ctx
->dev_info2
|= XHCI_MAX_PORTS(hdev
->maxchild
);
1826 /* Set TT think time - convert from ns to FS bit times.
1827 * 0 = 8 FS bit times, 1 = 16 FS bit times,
1828 * 2 = 24 FS bit times, 3 = 32 FS bit times.
1830 think_time
= tt
->think_time
;
1831 if (think_time
!= 0)
1832 think_time
= (think_time
/ 666) - 1;
1833 slot_ctx
->tt_info
|= TT_THINK_TIME(think_time
);
1835 xhci_dbg(xhci
, "xHCI version %x doesn't need hub "
1836 "TT think time or number of ports\n",
1837 (unsigned int) xhci
->hci_version
);
1839 slot_ctx
->dev_state
= 0;
1840 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1842 xhci_dbg(xhci
, "Set up %s for hub device.\n",
1843 (xhci
->hci_version
> 0x95) ?
1844 "configure endpoint" : "evaluate context");
1845 xhci_dbg(xhci
, "Slot %u Input Context:\n", hdev
->slot_id
);
1846 xhci_dbg_ctx(xhci
, config_cmd
->in_ctx
, 0);
1848 /* Issue and wait for the configure endpoint or
1849 * evaluate context command.
1851 if (xhci
->hci_version
> 0x95)
1852 ret
= xhci_configure_endpoint(xhci
, hdev
, config_cmd
,
1855 ret
= xhci_configure_endpoint(xhci
, hdev
, config_cmd
,
1858 xhci_dbg(xhci
, "Slot %u Output Context:\n", hdev
->slot_id
);
1859 xhci_dbg_ctx(xhci
, vdev
->out_ctx
, 0);
1861 xhci_free_command(xhci
, config_cmd
);
1865 int xhci_get_frame(struct usb_hcd
*hcd
)
1867 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
1868 /* EHCI mods by the periodic size. Why? */
1869 return xhci_readl(xhci
, &xhci
->run_regs
->microframe_index
) >> 3;
1872 MODULE_DESCRIPTION(DRIVER_DESC
);
1873 MODULE_AUTHOR(DRIVER_AUTHOR
);
1874 MODULE_LICENSE("GPL");
1876 static int __init
xhci_hcd_init(void)
1881 retval
= xhci_register_pci();
1884 printk(KERN_DEBUG
"Problem registering PCI driver.");
1889 * Check the compiler generated sizes of structures that must be laid
1890 * out in specific ways for hardware access.
1892 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array
) != 256*32/8);
1893 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx
) != 8*32/8);
1894 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx
) != 8*32/8);
1895 /* xhci_device_control has eight fields, and also
1896 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
1898 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx
) != 4*32/8);
1899 BUILD_BUG_ON(sizeof(union xhci_trb
) != 4*32/8);
1900 BUILD_BUG_ON(sizeof(struct xhci_erst_entry
) != 4*32/8);
1901 BUILD_BUG_ON(sizeof(struct xhci_cap_regs
) != 7*32/8);
1902 BUILD_BUG_ON(sizeof(struct xhci_intr_reg
) != 8*32/8);
1903 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
1904 BUILD_BUG_ON(sizeof(struct xhci_run_regs
) != (8+8*128)*32/8);
1905 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array
) != 256*32/8);
1908 module_init(xhci_hcd_init
);
1910 static void __exit
xhci_hcd_cleanup(void)
1913 xhci_unregister_pci();
1916 module_exit(xhci_hcd_cleanup
);