]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * xHCI host controller driver | |
3 | * | |
4 | * Copyright (C) 2008 Intel Corp. | |
5 | * | |
6 | * Author: Sarah Sharp | |
7 | * Some code borrowed from the Linux EHCI driver. | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, but | |
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | |
15 | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
16 | * for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software Foundation, | |
20 | * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
21 | */ | |
22 | ||
23 | #include <linux/pci.h> | |
24 | #include <linux/irq.h> | |
25 | #include <linux/log2.h> | |
26 | #include <linux/module.h> | |
27 | #include <linux/moduleparam.h> | |
28 | #include <linux/slab.h> | |
29 | #include <linux/dmi.h> | |
30 | #include <linux/dma-mapping.h> | |
31 | ||
32 | #include "xhci.h" | |
33 | #include "xhci-trace.h" | |
34 | #include "xhci-mtk.h" | |
35 | ||
36 | #define DRIVER_AUTHOR "Sarah Sharp" | |
37 | #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" | |
38 | ||
39 | #define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E) | |
40 | ||
41 | /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */ | |
42 | static int link_quirk; | |
43 | module_param(link_quirk, int, S_IRUGO | S_IWUSR); | |
44 | MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB"); | |
45 | ||
46 | static unsigned int quirks; | |
47 | module_param(quirks, uint, S_IRUGO); | |
48 | MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default"); | |
49 | ||
50 | /* TODO: copied from ehci-hcd.c - can this be refactored? */ | |
51 | /* | |
52 | * xhci_handshake - spin reading hc until handshake completes or fails | |
53 | * @ptr: address of hc register to be read | |
54 | * @mask: bits to look at in result of read | |
55 | * @done: value of those bits when handshake succeeds | |
56 | * @usec: timeout in microseconds | |
57 | * | |
58 | * Returns negative errno, or zero on success | |
59 | * | |
60 | * Success happens when the "mask" bits have the specified value (hardware | |
61 | * handshake done). There are two failure modes: "usec" have passed (major | |
62 | * hardware flakeout), or the register reads as all-ones (hardware removed). | |
63 | */ | |
64 | int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec) | |
65 | { | |
66 | u32 result; | |
67 | ||
68 | do { | |
69 | result = readl(ptr); | |
70 | if (result == ~(u32)0) /* card removed */ | |
71 | return -ENODEV; | |
72 | result &= mask; | |
73 | if (result == done) | |
74 | return 0; | |
75 | udelay(1); | |
76 | usec--; | |
77 | } while (usec > 0); | |
78 | return -ETIMEDOUT; | |
79 | } | |
80 | ||
81 | /* | |
82 | * Disable interrupts and begin the xHCI halting process. | |
83 | */ | |
84 | void xhci_quiesce(struct xhci_hcd *xhci) | |
85 | { | |
86 | u32 halted; | |
87 | u32 cmd; | |
88 | u32 mask; | |
89 | ||
90 | mask = ~(XHCI_IRQS); | |
91 | halted = readl(&xhci->op_regs->status) & STS_HALT; | |
92 | if (!halted) | |
93 | mask &= ~CMD_RUN; | |
94 | ||
95 | cmd = readl(&xhci->op_regs->command); | |
96 | cmd &= mask; | |
97 | writel(cmd, &xhci->op_regs->command); | |
98 | } | |
99 | ||
100 | /* | |
101 | * Force HC into halt state. | |
102 | * | |
103 | * Disable any IRQs and clear the run/stop bit. | |
104 | * HC will complete any current and actively pipelined transactions, and | |
105 | * should halt within 16 ms of the run/stop bit being cleared. | |
106 | * Read HC Halted bit in the status register to see when the HC is finished. | |
107 | */ | |
108 | int xhci_halt(struct xhci_hcd *xhci) | |
109 | { | |
110 | int ret; | |
111 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC"); | |
112 | xhci_quiesce(xhci); | |
113 | ||
114 | ret = xhci_handshake(&xhci->op_regs->status, | |
115 | STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); | |
116 | if (ret) { | |
117 | xhci_warn(xhci, "Host halt failed, %d\n", ret); | |
118 | return ret; | |
119 | } | |
120 | xhci->xhc_state |= XHCI_STATE_HALTED; | |
121 | xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; | |
122 | return ret; | |
123 | } | |
124 | ||
125 | /* | |
126 | * Set the run bit and wait for the host to be running. | |
127 | */ | |
128 | static int xhci_start(struct xhci_hcd *xhci) | |
129 | { | |
130 | u32 temp; | |
131 | int ret; | |
132 | ||
133 | temp = readl(&xhci->op_regs->command); | |
134 | temp |= (CMD_RUN); | |
135 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.", | |
136 | temp); | |
137 | writel(temp, &xhci->op_regs->command); | |
138 | ||
139 | /* | |
140 | * Wait for the HCHalted Status bit to be 0 to indicate the host is | |
141 | * running. | |
142 | */ | |
143 | ret = xhci_handshake(&xhci->op_regs->status, | |
144 | STS_HALT, 0, XHCI_MAX_HALT_USEC); | |
145 | if (ret == -ETIMEDOUT) | |
146 | xhci_err(xhci, "Host took too long to start, " | |
147 | "waited %u microseconds.\n", | |
148 | XHCI_MAX_HALT_USEC); | |
149 | if (!ret) | |
150 | /* clear state flags. Including dying, halted or removing */ | |
151 | xhci->xhc_state = 0; | |
152 | ||
153 | return ret; | |
154 | } | |
155 | ||
156 | /* | |
157 | * Reset a halted HC. | |
158 | * | |
159 | * This resets pipelines, timers, counters, state machines, etc. | |
160 | * Transactions will be terminated immediately, and operational registers | |
161 | * will be set to their defaults. | |
162 | */ | |
163 | int xhci_reset(struct xhci_hcd *xhci) | |
164 | { | |
165 | u32 command; | |
166 | u32 state; | |
167 | int ret, i; | |
168 | ||
169 | state = readl(&xhci->op_regs->status); | |
170 | if ((state & STS_HALT) == 0) { | |
171 | xhci_warn(xhci, "Host controller not halted, aborting reset.\n"); | |
172 | return 0; | |
173 | } | |
174 | ||
175 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC"); | |
176 | command = readl(&xhci->op_regs->command); | |
177 | command |= CMD_RESET; | |
178 | writel(command, &xhci->op_regs->command); | |
179 | ||
180 | /* Existing Intel xHCI controllers require a delay of 1 mS, | |
181 | * after setting the CMD_RESET bit, and before accessing any | |
182 | * HC registers. This allows the HC to complete the | |
183 | * reset operation and be ready for HC register access. | |
184 | * Without this delay, the subsequent HC register access, | |
185 | * may result in a system hang very rarely. | |
186 | */ | |
187 | if (xhci->quirks & XHCI_INTEL_HOST) | |
188 | udelay(1000); | |
189 | ||
190 | ret = xhci_handshake(&xhci->op_regs->command, | |
191 | CMD_RESET, 0, 10 * 1000 * 1000); | |
192 | if (ret) | |
193 | return ret; | |
194 | ||
195 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, | |
196 | "Wait for controller to be ready for doorbell rings"); | |
197 | /* | |
198 | * xHCI cannot write to any doorbells or operational registers other | |
199 | * than status until the "Controller Not Ready" flag is cleared. | |
200 | */ | |
201 | ret = xhci_handshake(&xhci->op_regs->status, | |
202 | STS_CNR, 0, 10 * 1000 * 1000); | |
203 | ||
204 | for (i = 0; i < 2; ++i) { | |
205 | xhci->bus_state[i].port_c_suspend = 0; | |
206 | xhci->bus_state[i].suspended_ports = 0; | |
207 | xhci->bus_state[i].resuming_ports = 0; | |
208 | } | |
209 | ||
210 | return ret; | |
211 | } | |
212 | ||
213 | #ifdef CONFIG_PCI | |
214 | static int xhci_free_msi(struct xhci_hcd *xhci) | |
215 | { | |
216 | int i; | |
217 | ||
218 | if (!xhci->msix_entries) | |
219 | return -EINVAL; | |
220 | ||
221 | for (i = 0; i < xhci->msix_count; i++) | |
222 | if (xhci->msix_entries[i].vector) | |
223 | free_irq(xhci->msix_entries[i].vector, | |
224 | xhci_to_hcd(xhci)); | |
225 | return 0; | |
226 | } | |
227 | ||
228 | /* | |
229 | * Set up MSI | |
230 | */ | |
231 | static int xhci_setup_msi(struct xhci_hcd *xhci) | |
232 | { | |
233 | int ret; | |
234 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | |
235 | ||
236 | ret = pci_enable_msi(pdev); | |
237 | if (ret) { | |
238 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, | |
239 | "failed to allocate MSI entry"); | |
240 | return ret; | |
241 | } | |
242 | ||
243 | ret = request_irq(pdev->irq, xhci_msi_irq, | |
244 | 0, "xhci_hcd", xhci_to_hcd(xhci)); | |
245 | if (ret) { | |
246 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, | |
247 | "disable MSI interrupt"); | |
248 | pci_disable_msi(pdev); | |
249 | } | |
250 | ||
251 | return ret; | |
252 | } | |
253 | ||
254 | /* | |
255 | * Free IRQs | |
256 | * free all IRQs request | |
257 | */ | |
258 | static void xhci_free_irq(struct xhci_hcd *xhci) | |
259 | { | |
260 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | |
261 | int ret; | |
262 | ||
263 | /* return if using legacy interrupt */ | |
264 | if (xhci_to_hcd(xhci)->irq > 0) | |
265 | return; | |
266 | ||
267 | ret = xhci_free_msi(xhci); | |
268 | if (!ret) | |
269 | return; | |
270 | if (pdev->irq > 0) | |
271 | free_irq(pdev->irq, xhci_to_hcd(xhci)); | |
272 | ||
273 | return; | |
274 | } | |
275 | ||
276 | /* | |
277 | * Set up MSI-X | |
278 | */ | |
279 | static int xhci_setup_msix(struct xhci_hcd *xhci) | |
280 | { | |
281 | int i, ret = 0; | |
282 | struct usb_hcd *hcd = xhci_to_hcd(xhci); | |
283 | struct pci_dev *pdev = to_pci_dev(hcd->self.controller); | |
284 | ||
285 | /* | |
286 | * calculate number of msi-x vectors supported. | |
287 | * - HCS_MAX_INTRS: the max number of interrupts the host can handle, | |
288 | * with max number of interrupters based on the xhci HCSPARAMS1. | |
289 | * - num_online_cpus: maximum msi-x vectors per CPUs core. | |
290 | * Add additional 1 vector to ensure always available interrupt. | |
291 | */ | |
292 | xhci->msix_count = min(num_online_cpus() + 1, | |
293 | HCS_MAX_INTRS(xhci->hcs_params1)); | |
294 | ||
295 | xhci->msix_entries = | |
296 | kmalloc((sizeof(struct msix_entry))*xhci->msix_count, | |
297 | GFP_KERNEL); | |
298 | if (!xhci->msix_entries) | |
299 | return -ENOMEM; | |
300 | ||
301 | for (i = 0; i < xhci->msix_count; i++) { | |
302 | xhci->msix_entries[i].entry = i; | |
303 | xhci->msix_entries[i].vector = 0; | |
304 | } | |
305 | ||
306 | ret = pci_enable_msix_exact(pdev, xhci->msix_entries, xhci->msix_count); | |
307 | if (ret) { | |
308 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, | |
309 | "Failed to enable MSI-X"); | |
310 | goto free_entries; | |
311 | } | |
312 | ||
313 | for (i = 0; i < xhci->msix_count; i++) { | |
314 | ret = request_irq(xhci->msix_entries[i].vector, | |
315 | xhci_msi_irq, | |
316 | 0, "xhci_hcd", xhci_to_hcd(xhci)); | |
317 | if (ret) | |
318 | goto disable_msix; | |
319 | } | |
320 | ||
321 | hcd->msix_enabled = 1; | |
322 | return ret; | |
323 | ||
324 | disable_msix: | |
325 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt"); | |
326 | xhci_free_irq(xhci); | |
327 | pci_disable_msix(pdev); | |
328 | free_entries: | |
329 | kfree(xhci->msix_entries); | |
330 | xhci->msix_entries = NULL; | |
331 | return ret; | |
332 | } | |
333 | ||
334 | /* Free any IRQs and disable MSI-X */ | |
335 | static void xhci_cleanup_msix(struct xhci_hcd *xhci) | |
336 | { | |
337 | struct usb_hcd *hcd = xhci_to_hcd(xhci); | |
338 | struct pci_dev *pdev = to_pci_dev(hcd->self.controller); | |
339 | ||
340 | if (xhci->quirks & XHCI_PLAT) | |
341 | return; | |
342 | ||
343 | xhci_free_irq(xhci); | |
344 | ||
345 | if (xhci->msix_entries) { | |
346 | pci_disable_msix(pdev); | |
347 | kfree(xhci->msix_entries); | |
348 | xhci->msix_entries = NULL; | |
349 | } else { | |
350 | pci_disable_msi(pdev); | |
351 | } | |
352 | ||
353 | hcd->msix_enabled = 0; | |
354 | return; | |
355 | } | |
356 | ||
357 | static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci) | |
358 | { | |
359 | int i; | |
360 | ||
361 | if (xhci->msix_entries) { | |
362 | for (i = 0; i < xhci->msix_count; i++) | |
363 | synchronize_irq(xhci->msix_entries[i].vector); | |
364 | } | |
365 | } | |
366 | ||
367 | static int xhci_try_enable_msi(struct usb_hcd *hcd) | |
368 | { | |
369 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
370 | struct pci_dev *pdev; | |
371 | int ret; | |
372 | ||
373 | /* The xhci platform device has set up IRQs through usb_add_hcd. */ | |
374 | if (xhci->quirks & XHCI_PLAT) | |
375 | return 0; | |
376 | ||
377 | pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | |
378 | /* | |
379 | * Some Fresco Logic host controllers advertise MSI, but fail to | |
380 | * generate interrupts. Don't even try to enable MSI. | |
381 | */ | |
382 | if (xhci->quirks & XHCI_BROKEN_MSI) | |
383 | goto legacy_irq; | |
384 | ||
385 | /* unregister the legacy interrupt */ | |
386 | if (hcd->irq) | |
387 | free_irq(hcd->irq, hcd); | |
388 | hcd->irq = 0; | |
389 | ||
390 | ret = xhci_setup_msix(xhci); | |
391 | if (ret) | |
392 | /* fall back to msi*/ | |
393 | ret = xhci_setup_msi(xhci); | |
394 | ||
395 | if (!ret) | |
396 | /* hcd->irq is 0, we have MSI */ | |
397 | return 0; | |
398 | ||
399 | if (!pdev->irq) { | |
400 | xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n"); | |
401 | return -EINVAL; | |
402 | } | |
403 | ||
404 | legacy_irq: | |
405 | if (!strlen(hcd->irq_descr)) | |
406 | snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d", | |
407 | hcd->driver->description, hcd->self.busnum); | |
408 | ||
409 | /* fall back to legacy interrupt*/ | |
410 | ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED, | |
411 | hcd->irq_descr, hcd); | |
412 | if (ret) { | |
413 | xhci_err(xhci, "request interrupt %d failed\n", | |
414 | pdev->irq); | |
415 | return ret; | |
416 | } | |
417 | hcd->irq = pdev->irq; | |
418 | return 0; | |
419 | } | |
420 | ||
421 | #else | |
422 | ||
423 | static inline int xhci_try_enable_msi(struct usb_hcd *hcd) | |
424 | { | |
425 | return 0; | |
426 | } | |
427 | ||
428 | static inline void xhci_cleanup_msix(struct xhci_hcd *xhci) | |
429 | { | |
430 | } | |
431 | ||
432 | static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci) | |
433 | { | |
434 | } | |
435 | ||
436 | #endif | |
437 | ||
438 | static void compliance_mode_recovery(unsigned long arg) | |
439 | { | |
440 | struct xhci_hcd *xhci; | |
441 | struct usb_hcd *hcd; | |
442 | u32 temp; | |
443 | int i; | |
444 | ||
445 | xhci = (struct xhci_hcd *)arg; | |
446 | ||
447 | for (i = 0; i < xhci->num_usb3_ports; i++) { | |
448 | temp = readl(xhci->usb3_ports[i]); | |
449 | if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) { | |
450 | /* | |
451 | * Compliance Mode Detected. Letting USB Core | |
452 | * handle the Warm Reset | |
453 | */ | |
454 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, | |
455 | "Compliance mode detected->port %d", | |
456 | i + 1); | |
457 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, | |
458 | "Attempting compliance mode recovery"); | |
459 | hcd = xhci->shared_hcd; | |
460 | ||
461 | if (hcd->state == HC_STATE_SUSPENDED) | |
462 | usb_hcd_resume_root_hub(hcd); | |
463 | ||
464 | usb_hcd_poll_rh_status(hcd); | |
465 | } | |
466 | } | |
467 | ||
468 | if (xhci->port_status_u0 != ((1 << xhci->num_usb3_ports)-1)) | |
469 | mod_timer(&xhci->comp_mode_recovery_timer, | |
470 | jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS)); | |
471 | } | |
472 | ||
473 | /* | |
474 | * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver | |
475 | * that causes ports behind that hardware to enter compliance mode sometimes. | |
476 | * The quirk creates a timer that polls every 2 seconds the link state of | |
477 | * each host controller's port and recovers it by issuing a Warm reset | |
478 | * if Compliance mode is detected, otherwise the port will become "dead" (no | |
479 | * device connections or disconnections will be detected anymore). Becasue no | |
480 | * status event is generated when entering compliance mode (per xhci spec), | |
481 | * this quirk is needed on systems that have the failing hardware installed. | |
482 | */ | |
483 | static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci) | |
484 | { | |
485 | xhci->port_status_u0 = 0; | |
486 | setup_timer(&xhci->comp_mode_recovery_timer, | |
487 | compliance_mode_recovery, (unsigned long)xhci); | |
488 | xhci->comp_mode_recovery_timer.expires = jiffies + | |
489 | msecs_to_jiffies(COMP_MODE_RCVRY_MSECS); | |
490 | ||
491 | add_timer(&xhci->comp_mode_recovery_timer); | |
492 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, | |
493 | "Compliance mode recovery timer initialized"); | |
494 | } | |
495 | ||
496 | /* | |
497 | * This function identifies the systems that have installed the SN65LVPE502CP | |
498 | * USB3.0 re-driver and that need the Compliance Mode Quirk. | |
499 | * Systems: | |
500 | * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820 | |
501 | */ | |
502 | static bool xhci_compliance_mode_recovery_timer_quirk_check(void) | |
503 | { | |
504 | const char *dmi_product_name, *dmi_sys_vendor; | |
505 | ||
506 | dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME); | |
507 | dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR); | |
508 | if (!dmi_product_name || !dmi_sys_vendor) | |
509 | return false; | |
510 | ||
511 | if (!(strstr(dmi_sys_vendor, "Hewlett-Packard"))) | |
512 | return false; | |
513 | ||
514 | if (strstr(dmi_product_name, "Z420") || | |
515 | strstr(dmi_product_name, "Z620") || | |
516 | strstr(dmi_product_name, "Z820") || | |
517 | strstr(dmi_product_name, "Z1 Workstation")) | |
518 | return true; | |
519 | ||
520 | return false; | |
521 | } | |
522 | ||
523 | static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci) | |
524 | { | |
525 | return (xhci->port_status_u0 == ((1 << xhci->num_usb3_ports)-1)); | |
526 | } | |
527 | ||
528 | ||
529 | /* | |
530 | * Initialize memory for HCD and xHC (one-time init). | |
531 | * | |
532 | * Program the PAGESIZE register, initialize the device context array, create | |
533 | * device contexts (?), set up a command ring segment (or two?), create event | |
534 | * ring (one for now). | |
535 | */ | |
536 | int xhci_init(struct usb_hcd *hcd) | |
537 | { | |
538 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
539 | int retval = 0; | |
540 | ||
541 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init"); | |
542 | spin_lock_init(&xhci->lock); | |
543 | if (xhci->hci_version == 0x95 && link_quirk) { | |
544 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, | |
545 | "QUIRK: Not clearing Link TRB chain bits."); | |
546 | xhci->quirks |= XHCI_LINK_TRB_QUIRK; | |
547 | } else { | |
548 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, | |
549 | "xHCI doesn't need link TRB QUIRK"); | |
550 | } | |
551 | retval = xhci_mem_init(xhci, GFP_KERNEL); | |
552 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init"); | |
553 | ||
554 | /* Initializing Compliance Mode Recovery Data If Needed */ | |
555 | if (xhci_compliance_mode_recovery_timer_quirk_check()) { | |
556 | xhci->quirks |= XHCI_COMP_MODE_QUIRK; | |
557 | compliance_mode_recovery_timer_init(xhci); | |
558 | } | |
559 | ||
560 | return retval; | |
561 | } | |
562 | ||
563 | /*-------------------------------------------------------------------------*/ | |
564 | ||
565 | ||
566 | static int xhci_run_finished(struct xhci_hcd *xhci) | |
567 | { | |
568 | if (xhci_start(xhci)) { | |
569 | xhci_halt(xhci); | |
570 | return -ENODEV; | |
571 | } | |
572 | xhci->shared_hcd->state = HC_STATE_RUNNING; | |
573 | xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; | |
574 | ||
575 | if (xhci->quirks & XHCI_NEC_HOST) | |
576 | xhci_ring_cmd_db(xhci); | |
577 | ||
578 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, | |
579 | "Finished xhci_run for USB3 roothub"); | |
580 | return 0; | |
581 | } | |
582 | ||
583 | /* | |
584 | * Start the HC after it was halted. | |
585 | * | |
586 | * This function is called by the USB core when the HC driver is added. | |
587 | * Its opposite is xhci_stop(). | |
588 | * | |
589 | * xhci_init() must be called once before this function can be called. | |
590 | * Reset the HC, enable device slot contexts, program DCBAAP, and | |
591 | * set command ring pointer and event ring pointer. | |
592 | * | |
593 | * Setup MSI-X vectors and enable interrupts. | |
594 | */ | |
595 | int xhci_run(struct usb_hcd *hcd) | |
596 | { | |
597 | u32 temp; | |
598 | u64 temp_64; | |
599 | int ret; | |
600 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
601 | ||
602 | /* Start the xHCI host controller running only after the USB 2.0 roothub | |
603 | * is setup. | |
604 | */ | |
605 | ||
606 | hcd->uses_new_polling = 1; | |
607 | if (!usb_hcd_is_primary_hcd(hcd)) | |
608 | return xhci_run_finished(xhci); | |
609 | ||
610 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run"); | |
611 | ||
612 | ret = xhci_try_enable_msi(hcd); | |
613 | if (ret) | |
614 | return ret; | |
615 | ||
616 | xhci_dbg(xhci, "Command ring memory map follows:\n"); | |
617 | xhci_debug_ring(xhci, xhci->cmd_ring); | |
618 | xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); | |
619 | xhci_dbg_cmd_ptrs(xhci); | |
620 | ||
621 | xhci_dbg(xhci, "ERST memory map follows:\n"); | |
622 | xhci_dbg_erst(xhci, &xhci->erst); | |
623 | xhci_dbg(xhci, "Event ring:\n"); | |
624 | xhci_debug_ring(xhci, xhci->event_ring); | |
625 | xhci_dbg_ring_ptrs(xhci, xhci->event_ring); | |
626 | temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); | |
627 | temp_64 &= ~ERST_PTR_MASK; | |
628 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, | |
629 | "ERST deq = 64'h%0lx", (long unsigned int) temp_64); | |
630 | ||
631 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, | |
632 | "// Set the interrupt modulation register"); | |
633 | temp = readl(&xhci->ir_set->irq_control); | |
634 | temp &= ~ER_IRQ_INTERVAL_MASK; | |
635 | /* | |
636 | * the increment interval is 8 times as much as that defined | |
637 | * in xHCI spec on MTK's controller | |
638 | */ | |
639 | temp |= (u32) ((xhci->quirks & XHCI_MTK_HOST) ? 20 : 160); | |
640 | writel(temp, &xhci->ir_set->irq_control); | |
641 | ||
642 | /* Set the HCD state before we enable the irqs */ | |
643 | temp = readl(&xhci->op_regs->command); | |
644 | temp |= (CMD_EIE); | |
645 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, | |
646 | "// Enable interrupts, cmd = 0x%x.", temp); | |
647 | writel(temp, &xhci->op_regs->command); | |
648 | ||
649 | temp = readl(&xhci->ir_set->irq_pending); | |
650 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, | |
651 | "// Enabling event ring interrupter %p by writing 0x%x to irq_pending", | |
652 | xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); | |
653 | writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending); | |
654 | xhci_print_ir_set(xhci, 0); | |
655 | ||
656 | if (xhci->quirks & XHCI_NEC_HOST) { | |
657 | struct xhci_command *command; | |
658 | command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); | |
659 | if (!command) | |
660 | return -ENOMEM; | |
661 | xhci_queue_vendor_command(xhci, command, 0, 0, 0, | |
662 | TRB_TYPE(TRB_NEC_GET_FW)); | |
663 | } | |
664 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, | |
665 | "Finished xhci_run for USB2 roothub"); | |
666 | return 0; | |
667 | } | |
668 | EXPORT_SYMBOL_GPL(xhci_run); | |
669 | ||
670 | /* | |
671 | * Stop xHCI driver. | |
672 | * | |
673 | * This function is called by the USB core when the HC driver is removed. | |
674 | * Its opposite is xhci_run(). | |
675 | * | |
676 | * Disable device contexts, disable IRQs, and quiesce the HC. | |
677 | * Reset the HC, finish any completed transactions, and cleanup memory. | |
678 | */ | |
679 | void xhci_stop(struct usb_hcd *hcd) | |
680 | { | |
681 | u32 temp; | |
682 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
683 | ||
684 | mutex_lock(&xhci->mutex); | |
685 | ||
686 | if (!(xhci->xhc_state & XHCI_STATE_HALTED)) { | |
687 | spin_lock_irq(&xhci->lock); | |
688 | ||
689 | xhci->xhc_state |= XHCI_STATE_HALTED; | |
690 | xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; | |
691 | xhci_halt(xhci); | |
692 | xhci_reset(xhci); | |
693 | ||
694 | spin_unlock_irq(&xhci->lock); | |
695 | } | |
696 | ||
697 | if (!usb_hcd_is_primary_hcd(hcd)) { | |
698 | mutex_unlock(&xhci->mutex); | |
699 | return; | |
700 | } | |
701 | ||
702 | xhci_cleanup_msix(xhci); | |
703 | ||
704 | /* Deleting Compliance Mode Recovery Timer */ | |
705 | if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && | |
706 | (!(xhci_all_ports_seen_u0(xhci)))) { | |
707 | del_timer_sync(&xhci->comp_mode_recovery_timer); | |
708 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, | |
709 | "%s: compliance mode recovery timer deleted", | |
710 | __func__); | |
711 | } | |
712 | ||
713 | if (xhci->quirks & XHCI_AMD_PLL_FIX) | |
714 | usb_amd_dev_put(); | |
715 | ||
716 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, | |
717 | "// Disabling event ring interrupts"); | |
718 | temp = readl(&xhci->op_regs->status); | |
719 | writel(temp & ~STS_EINT, &xhci->op_regs->status); | |
720 | temp = readl(&xhci->ir_set->irq_pending); | |
721 | writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending); | |
722 | xhci_print_ir_set(xhci, 0); | |
723 | ||
724 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory"); | |
725 | xhci_mem_cleanup(xhci); | |
726 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, | |
727 | "xhci_stop completed - status = %x", | |
728 | readl(&xhci->op_regs->status)); | |
729 | mutex_unlock(&xhci->mutex); | |
730 | } | |
731 | ||
732 | /* | |
733 | * Shutdown HC (not bus-specific) | |
734 | * | |
735 | * This is called when the machine is rebooting or halting. We assume that the | |
736 | * machine will be powered off, and the HC's internal state will be reset. | |
737 | * Don't bother to free memory. | |
738 | * | |
739 | * This will only ever be called with the main usb_hcd (the USB3 roothub). | |
740 | */ | |
741 | void xhci_shutdown(struct usb_hcd *hcd) | |
742 | { | |
743 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
744 | ||
745 | if (xhci->quirks & XHCI_SPURIOUS_REBOOT) | |
746 | usb_disable_xhci_ports(to_pci_dev(hcd->self.controller)); | |
747 | ||
748 | spin_lock_irq(&xhci->lock); | |
749 | xhci_halt(xhci); | |
750 | /* Workaround for spurious wakeups at shutdown with HSW */ | |
751 | if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) | |
752 | xhci_reset(xhci); | |
753 | spin_unlock_irq(&xhci->lock); | |
754 | ||
755 | xhci_cleanup_msix(xhci); | |
756 | ||
757 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, | |
758 | "xhci_shutdown completed - status = %x", | |
759 | readl(&xhci->op_regs->status)); | |
760 | ||
761 | /* Yet another workaround for spurious wakeups at shutdown with HSW */ | |
762 | if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) | |
763 | pci_set_power_state(to_pci_dev(hcd->self.controller), PCI_D3hot); | |
764 | } | |
765 | ||
766 | #ifdef CONFIG_PM | |
767 | static void xhci_save_registers(struct xhci_hcd *xhci) | |
768 | { | |
769 | xhci->s3.command = readl(&xhci->op_regs->command); | |
770 | xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification); | |
771 | xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); | |
772 | xhci->s3.config_reg = readl(&xhci->op_regs->config_reg); | |
773 | xhci->s3.erst_size = readl(&xhci->ir_set->erst_size); | |
774 | xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base); | |
775 | xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); | |
776 | xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending); | |
777 | xhci->s3.irq_control = readl(&xhci->ir_set->irq_control); | |
778 | } | |
779 | ||
780 | static void xhci_restore_registers(struct xhci_hcd *xhci) | |
781 | { | |
782 | writel(xhci->s3.command, &xhci->op_regs->command); | |
783 | writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification); | |
784 | xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr); | |
785 | writel(xhci->s3.config_reg, &xhci->op_regs->config_reg); | |
786 | writel(xhci->s3.erst_size, &xhci->ir_set->erst_size); | |
787 | xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base); | |
788 | xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue); | |
789 | writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending); | |
790 | writel(xhci->s3.irq_control, &xhci->ir_set->irq_control); | |
791 | } | |
792 | ||
793 | static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) | |
794 | { | |
795 | u64 val_64; | |
796 | ||
797 | /* step 2: initialize command ring buffer */ | |
798 | val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); | |
799 | val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | | |
800 | (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, | |
801 | xhci->cmd_ring->dequeue) & | |
802 | (u64) ~CMD_RING_RSVD_BITS) | | |
803 | xhci->cmd_ring->cycle_state; | |
804 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, | |
805 | "// Setting command ring address to 0x%llx", | |
806 | (long unsigned long) val_64); | |
807 | xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); | |
808 | } | |
809 | ||
810 | /* | |
811 | * The whole command ring must be cleared to zero when we suspend the host. | |
812 | * | |
813 | * The host doesn't save the command ring pointer in the suspend well, so we | |
814 | * need to re-program it on resume. Unfortunately, the pointer must be 64-byte | |
815 | * aligned, because of the reserved bits in the command ring dequeue pointer | |
816 | * register. Therefore, we can't just set the dequeue pointer back in the | |
817 | * middle of the ring (TRBs are 16-byte aligned). | |
818 | */ | |
819 | static void xhci_clear_command_ring(struct xhci_hcd *xhci) | |
820 | { | |
821 | struct xhci_ring *ring; | |
822 | struct xhci_segment *seg; | |
823 | ||
824 | ring = xhci->cmd_ring; | |
825 | seg = ring->deq_seg; | |
826 | do { | |
827 | memset(seg->trbs, 0, | |
828 | sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1)); | |
829 | seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= | |
830 | cpu_to_le32(~TRB_CYCLE); | |
831 | seg = seg->next; | |
832 | } while (seg != ring->deq_seg); | |
833 | ||
834 | /* Reset the software enqueue and dequeue pointers */ | |
835 | ring->deq_seg = ring->first_seg; | |
836 | ring->dequeue = ring->first_seg->trbs; | |
837 | ring->enq_seg = ring->deq_seg; | |
838 | ring->enqueue = ring->dequeue; | |
839 | ||
840 | ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1; | |
841 | /* | |
842 | * Ring is now zeroed, so the HW should look for change of ownership | |
843 | * when the cycle bit is set to 1. | |
844 | */ | |
845 | ring->cycle_state = 1; | |
846 | ||
847 | /* | |
848 | * Reset the hardware dequeue pointer. | |
849 | * Yes, this will need to be re-written after resume, but we're paranoid | |
850 | * and want to make sure the hardware doesn't access bogus memory | |
851 | * because, say, the BIOS or an SMI started the host without changing | |
852 | * the command ring pointers. | |
853 | */ | |
854 | xhci_set_cmd_ring_deq(xhci); | |
855 | } | |
856 | ||
857 | static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci) | |
858 | { | |
859 | int port_index; | |
860 | __le32 __iomem **port_array; | |
861 | unsigned long flags; | |
862 | u32 t1, t2; | |
863 | ||
864 | spin_lock_irqsave(&xhci->lock, flags); | |
865 | ||
866 | /* disble usb3 ports Wake bits*/ | |
867 | port_index = xhci->num_usb3_ports; | |
868 | port_array = xhci->usb3_ports; | |
869 | while (port_index--) { | |
870 | t1 = readl(port_array[port_index]); | |
871 | t1 = xhci_port_state_to_neutral(t1); | |
872 | t2 = t1 & ~PORT_WAKE_BITS; | |
873 | if (t1 != t2) | |
874 | writel(t2, port_array[port_index]); | |
875 | } | |
876 | ||
877 | /* disble usb2 ports Wake bits*/ | |
878 | port_index = xhci->num_usb2_ports; | |
879 | port_array = xhci->usb2_ports; | |
880 | while (port_index--) { | |
881 | t1 = readl(port_array[port_index]); | |
882 | t1 = xhci_port_state_to_neutral(t1); | |
883 | t2 = t1 & ~PORT_WAKE_BITS; | |
884 | if (t1 != t2) | |
885 | writel(t2, port_array[port_index]); | |
886 | } | |
887 | ||
888 | spin_unlock_irqrestore(&xhci->lock, flags); | |
889 | } | |
890 | ||
891 | /* | |
892 | * Stop HC (not bus-specific) | |
893 | * | |
894 | * This is called when the machine transition into S3/S4 mode. | |
895 | * | |
896 | */ | |
897 | int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) | |
898 | { | |
899 | int rc = 0; | |
900 | unsigned int delay = XHCI_MAX_HALT_USEC; | |
901 | struct usb_hcd *hcd = xhci_to_hcd(xhci); | |
902 | u32 command; | |
903 | ||
904 | if (!hcd->state) | |
905 | return 0; | |
906 | ||
907 | if (hcd->state != HC_STATE_SUSPENDED || | |
908 | xhci->shared_hcd->state != HC_STATE_SUSPENDED) | |
909 | return -EINVAL; | |
910 | ||
911 | /* Clear root port wake on bits if wakeup not allowed. */ | |
912 | if (!do_wakeup) | |
913 | xhci_disable_port_wake_on_bits(xhci); | |
914 | ||
915 | /* Don't poll the roothubs on bus suspend. */ | |
916 | xhci_dbg(xhci, "%s: stopping port polling.\n", __func__); | |
917 | clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); | |
918 | del_timer_sync(&hcd->rh_timer); | |
919 | clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); | |
920 | del_timer_sync(&xhci->shared_hcd->rh_timer); | |
921 | ||
922 | spin_lock_irq(&xhci->lock); | |
923 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); | |
924 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); | |
925 | /* step 1: stop endpoint */ | |
926 | /* skipped assuming that port suspend has done */ | |
927 | ||
928 | /* step 2: clear Run/Stop bit */ | |
929 | command = readl(&xhci->op_regs->command); | |
930 | command &= ~CMD_RUN; | |
931 | writel(command, &xhci->op_regs->command); | |
932 | ||
933 | /* Some chips from Fresco Logic need an extraordinary delay */ | |
934 | delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1; | |
935 | ||
936 | if (xhci_handshake(&xhci->op_regs->status, | |
937 | STS_HALT, STS_HALT, delay)) { | |
938 | xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n"); | |
939 | spin_unlock_irq(&xhci->lock); | |
940 | return -ETIMEDOUT; | |
941 | } | |
942 | xhci_clear_command_ring(xhci); | |
943 | ||
944 | /* step 3: save registers */ | |
945 | xhci_save_registers(xhci); | |
946 | ||
947 | /* step 4: set CSS flag */ | |
948 | command = readl(&xhci->op_regs->command); | |
949 | command |= CMD_CSS; | |
950 | writel(command, &xhci->op_regs->command); | |
951 | if (xhci_handshake(&xhci->op_regs->status, | |
952 | STS_SAVE, 0, 10 * 1000)) { | |
953 | xhci_warn(xhci, "WARN: xHC save state timeout\n"); | |
954 | spin_unlock_irq(&xhci->lock); | |
955 | return -ETIMEDOUT; | |
956 | } | |
957 | spin_unlock_irq(&xhci->lock); | |
958 | ||
959 | /* | |
960 | * Deleting Compliance Mode Recovery Timer because the xHCI Host | |
961 | * is about to be suspended. | |
962 | */ | |
963 | if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && | |
964 | (!(xhci_all_ports_seen_u0(xhci)))) { | |
965 | del_timer_sync(&xhci->comp_mode_recovery_timer); | |
966 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, | |
967 | "%s: compliance mode recovery timer deleted", | |
968 | __func__); | |
969 | } | |
970 | ||
971 | /* step 5: remove core well power */ | |
972 | /* synchronize irq when using MSI-X */ | |
973 | xhci_msix_sync_irqs(xhci); | |
974 | ||
975 | return rc; | |
976 | } | |
977 | EXPORT_SYMBOL_GPL(xhci_suspend); | |
978 | ||
979 | /* | |
980 | * start xHC (not bus-specific) | |
981 | * | |
982 | * This is called when the machine transition from S3/S4 mode. | |
983 | * | |
984 | */ | |
985 | int xhci_resume(struct xhci_hcd *xhci, bool hibernated) | |
986 | { | |
987 | u32 command, temp = 0, status; | |
988 | struct usb_hcd *hcd = xhci_to_hcd(xhci); | |
989 | struct usb_hcd *secondary_hcd; | |
990 | int retval = 0; | |
991 | bool comp_timer_running = false; | |
992 | ||
993 | if (!hcd->state) | |
994 | return 0; | |
995 | ||
996 | /* Wait a bit if either of the roothubs need to settle from the | |
997 | * transition into bus suspend. | |
998 | */ | |
999 | if (time_before(jiffies, xhci->bus_state[0].next_statechange) || | |
1000 | time_before(jiffies, | |
1001 | xhci->bus_state[1].next_statechange)) | |
1002 | msleep(100); | |
1003 | ||
1004 | set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); | |
1005 | set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); | |
1006 | ||
1007 | spin_lock_irq(&xhci->lock); | |
1008 | if (xhci->quirks & XHCI_RESET_ON_RESUME) | |
1009 | hibernated = true; | |
1010 | ||
1011 | if (!hibernated) { | |
1012 | /* step 1: restore register */ | |
1013 | xhci_restore_registers(xhci); | |
1014 | /* step 2: initialize command ring buffer */ | |
1015 | xhci_set_cmd_ring_deq(xhci); | |
1016 | /* step 3: restore state and start state*/ | |
1017 | /* step 3: set CRS flag */ | |
1018 | command = readl(&xhci->op_regs->command); | |
1019 | command |= CMD_CRS; | |
1020 | writel(command, &xhci->op_regs->command); | |
1021 | if (xhci_handshake(&xhci->op_regs->status, | |
1022 | STS_RESTORE, 0, 10 * 1000)) { | |
1023 | xhci_warn(xhci, "WARN: xHC restore state timeout\n"); | |
1024 | spin_unlock_irq(&xhci->lock); | |
1025 | return -ETIMEDOUT; | |
1026 | } | |
1027 | temp = readl(&xhci->op_regs->status); | |
1028 | } | |
1029 | ||
1030 | /* If restore operation fails, re-initialize the HC during resume */ | |
1031 | if ((temp & STS_SRE) || hibernated) { | |
1032 | ||
1033 | if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && | |
1034 | !(xhci_all_ports_seen_u0(xhci))) { | |
1035 | del_timer_sync(&xhci->comp_mode_recovery_timer); | |
1036 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, | |
1037 | "Compliance Mode Recovery Timer deleted!"); | |
1038 | } | |
1039 | ||
1040 | /* Let the USB core know _both_ roothubs lost power. */ | |
1041 | usb_root_hub_lost_power(xhci->main_hcd->self.root_hub); | |
1042 | usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub); | |
1043 | ||
1044 | xhci_dbg(xhci, "Stop HCD\n"); | |
1045 | xhci_halt(xhci); | |
1046 | xhci_reset(xhci); | |
1047 | spin_unlock_irq(&xhci->lock); | |
1048 | xhci_cleanup_msix(xhci); | |
1049 | ||
1050 | xhci_dbg(xhci, "// Disabling event ring interrupts\n"); | |
1051 | temp = readl(&xhci->op_regs->status); | |
1052 | writel(temp & ~STS_EINT, &xhci->op_regs->status); | |
1053 | temp = readl(&xhci->ir_set->irq_pending); | |
1054 | writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending); | |
1055 | xhci_print_ir_set(xhci, 0); | |
1056 | ||
1057 | xhci_dbg(xhci, "cleaning up memory\n"); | |
1058 | xhci_mem_cleanup(xhci); | |
1059 | xhci_dbg(xhci, "xhci_stop completed - status = %x\n", | |
1060 | readl(&xhci->op_regs->status)); | |
1061 | ||
1062 | /* USB core calls the PCI reinit and start functions twice: | |
1063 | * first with the primary HCD, and then with the secondary HCD. | |
1064 | * If we don't do the same, the host will never be started. | |
1065 | */ | |
1066 | if (!usb_hcd_is_primary_hcd(hcd)) | |
1067 | secondary_hcd = hcd; | |
1068 | else | |
1069 | secondary_hcd = xhci->shared_hcd; | |
1070 | ||
1071 | xhci_dbg(xhci, "Initialize the xhci_hcd\n"); | |
1072 | retval = xhci_init(hcd->primary_hcd); | |
1073 | if (retval) | |
1074 | return retval; | |
1075 | comp_timer_running = true; | |
1076 | ||
1077 | xhci_dbg(xhci, "Start the primary HCD\n"); | |
1078 | retval = xhci_run(hcd->primary_hcd); | |
1079 | if (!retval) { | |
1080 | xhci_dbg(xhci, "Start the secondary HCD\n"); | |
1081 | retval = xhci_run(secondary_hcd); | |
1082 | } | |
1083 | hcd->state = HC_STATE_SUSPENDED; | |
1084 | xhci->shared_hcd->state = HC_STATE_SUSPENDED; | |
1085 | goto done; | |
1086 | } | |
1087 | ||
1088 | /* step 4: set Run/Stop bit */ | |
1089 | command = readl(&xhci->op_regs->command); | |
1090 | command |= CMD_RUN; | |
1091 | writel(command, &xhci->op_regs->command); | |
1092 | xhci_handshake(&xhci->op_regs->status, STS_HALT, | |
1093 | 0, 250 * 1000); | |
1094 | ||
1095 | /* step 5: walk topology and initialize portsc, | |
1096 | * portpmsc and portli | |
1097 | */ | |
1098 | /* this is done in bus_resume */ | |
1099 | ||
1100 | /* step 6: restart each of the previously | |
1101 | * Running endpoints by ringing their doorbells | |
1102 | */ | |
1103 | ||
1104 | spin_unlock_irq(&xhci->lock); | |
1105 | ||
1106 | done: | |
1107 | if (retval == 0) { | |
1108 | /* Resume root hubs only when have pending events. */ | |
1109 | status = readl(&xhci->op_regs->status); | |
1110 | if (status & STS_EINT) { | |
1111 | usb_hcd_resume_root_hub(xhci->shared_hcd); | |
1112 | usb_hcd_resume_root_hub(hcd); | |
1113 | } | |
1114 | } | |
1115 | ||
1116 | /* | |
1117 | * If system is subject to the Quirk, Compliance Mode Timer needs to | |
1118 | * be re-initialized Always after a system resume. Ports are subject | |
1119 | * to suffer the Compliance Mode issue again. It doesn't matter if | |
1120 | * ports have entered previously to U0 before system's suspension. | |
1121 | */ | |
1122 | if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running) | |
1123 | compliance_mode_recovery_timer_init(xhci); | |
1124 | ||
1125 | /* Re-enable port polling. */ | |
1126 | xhci_dbg(xhci, "%s: starting port polling.\n", __func__); | |
1127 | set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); | |
1128 | usb_hcd_poll_rh_status(xhci->shared_hcd); | |
1129 | set_bit(HCD_FLAG_POLL_RH, &hcd->flags); | |
1130 | usb_hcd_poll_rh_status(hcd); | |
1131 | ||
1132 | return retval; | |
1133 | } | |
1134 | EXPORT_SYMBOL_GPL(xhci_resume); | |
1135 | #endif /* CONFIG_PM */ | |
1136 | ||
1137 | /*-------------------------------------------------------------------------*/ | |
1138 | ||
1139 | /** | |
1140 | * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and | |
1141 | * HCDs. Find the index for an endpoint given its descriptor. Use the return | |
1142 | * value to right shift 1 for the bitmask. | |
1143 | * | |
1144 | * Index = (epnum * 2) + direction - 1, | |
1145 | * where direction = 0 for OUT, 1 for IN. | |
1146 | * For control endpoints, the IN index is used (OUT index is unused), so | |
1147 | * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2) | |
1148 | */ | |
1149 | unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc) | |
1150 | { | |
1151 | unsigned int index; | |
1152 | if (usb_endpoint_xfer_control(desc)) | |
1153 | index = (unsigned int) (usb_endpoint_num(desc)*2); | |
1154 | else | |
1155 | index = (unsigned int) (usb_endpoint_num(desc)*2) + | |
1156 | (usb_endpoint_dir_in(desc) ? 1 : 0) - 1; | |
1157 | return index; | |
1158 | } | |
1159 | ||
1160 | /* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint | |
1161 | * address from the XHCI endpoint index. | |
1162 | */ | |
1163 | unsigned int xhci_get_endpoint_address(unsigned int ep_index) | |
1164 | { | |
1165 | unsigned int number = DIV_ROUND_UP(ep_index, 2); | |
1166 | unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN; | |
1167 | return direction | number; | |
1168 | } | |
1169 | ||
1170 | /* Find the flag for this endpoint (for use in the control context). Use the | |
1171 | * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is | |
1172 | * bit 1, etc. | |
1173 | */ | |
1174 | unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc) | |
1175 | { | |
1176 | return 1 << (xhci_get_endpoint_index(desc) + 1); | |
1177 | } | |
1178 | ||
1179 | /* Find the flag for this endpoint (for use in the control context). Use the | |
1180 | * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is | |
1181 | * bit 1, etc. | |
1182 | */ | |
1183 | unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index) | |
1184 | { | |
1185 | return 1 << (ep_index + 1); | |
1186 | } | |
1187 | ||
1188 | /* Compute the last valid endpoint context index. Basically, this is the | |
1189 | * endpoint index plus one. For slot contexts with more than valid endpoint, | |
1190 | * we find the most significant bit set in the added contexts flags. | |
1191 | * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000 | |
1192 | * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one. | |
1193 | */ | |
1194 | unsigned int xhci_last_valid_endpoint(u32 added_ctxs) | |
1195 | { | |
1196 | return fls(added_ctxs) - 1; | |
1197 | } | |
1198 | ||
1199 | /* Returns 1 if the arguments are OK; | |
1200 | * returns 0 this is a root hub; returns -EINVAL for NULL pointers. | |
1201 | */ | |
1202 | static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, | |
1203 | struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev, | |
1204 | const char *func) { | |
1205 | struct xhci_hcd *xhci; | |
1206 | struct xhci_virt_device *virt_dev; | |
1207 | ||
1208 | if (!hcd || (check_ep && !ep) || !udev) { | |
1209 | pr_debug("xHCI %s called with invalid args\n", func); | |
1210 | return -EINVAL; | |
1211 | } | |
1212 | if (!udev->parent) { | |
1213 | pr_debug("xHCI %s called for root hub\n", func); | |
1214 | return 0; | |
1215 | } | |
1216 | ||
1217 | xhci = hcd_to_xhci(hcd); | |
1218 | if (check_virt_dev) { | |
1219 | if (!udev->slot_id || !xhci->devs[udev->slot_id]) { | |
1220 | xhci_dbg(xhci, "xHCI %s called with unaddressed device\n", | |
1221 | func); | |
1222 | return -EINVAL; | |
1223 | } | |
1224 | ||
1225 | virt_dev = xhci->devs[udev->slot_id]; | |
1226 | if (virt_dev->udev != udev) { | |
1227 | xhci_dbg(xhci, "xHCI %s called with udev and " | |
1228 | "virt_dev does not match\n", func); | |
1229 | return -EINVAL; | |
1230 | } | |
1231 | } | |
1232 | ||
1233 | if (xhci->xhc_state & XHCI_STATE_HALTED) | |
1234 | return -ENODEV; | |
1235 | ||
1236 | return 1; | |
1237 | } | |
1238 | ||
1239 | static int xhci_configure_endpoint(struct xhci_hcd *xhci, | |
1240 | struct usb_device *udev, struct xhci_command *command, | |
1241 | bool ctx_change, bool must_succeed); | |
1242 | ||
1243 | /* | |
1244 | * Full speed devices may have a max packet size greater than 8 bytes, but the | |
1245 | * USB core doesn't know that until it reads the first 8 bytes of the | |
1246 | * descriptor. If the usb_device's max packet size changes after that point, | |
1247 | * we need to issue an evaluate context command and wait on it. | |
1248 | */ | |
1249 | static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, | |
1250 | unsigned int ep_index, struct urb *urb) | |
1251 | { | |
1252 | struct xhci_container_ctx *out_ctx; | |
1253 | struct xhci_input_control_ctx *ctrl_ctx; | |
1254 | struct xhci_ep_ctx *ep_ctx; | |
1255 | struct xhci_command *command; | |
1256 | int max_packet_size; | |
1257 | int hw_max_packet_size; | |
1258 | int ret = 0; | |
1259 | ||
1260 | out_ctx = xhci->devs[slot_id]->out_ctx; | |
1261 | ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); | |
1262 | hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2)); | |
1263 | max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc); | |
1264 | if (hw_max_packet_size != max_packet_size) { | |
1265 | xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, | |
1266 | "Max Packet Size for ep 0 changed."); | |
1267 | xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, | |
1268 | "Max packet size in usb_device = %d", | |
1269 | max_packet_size); | |
1270 | xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, | |
1271 | "Max packet size in xHCI HW = %d", | |
1272 | hw_max_packet_size); | |
1273 | xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, | |
1274 | "Issuing evaluate context command."); | |
1275 | ||
1276 | /* Set up the input context flags for the command */ | |
1277 | /* FIXME: This won't work if a non-default control endpoint | |
1278 | * changes max packet sizes. | |
1279 | */ | |
1280 | ||
1281 | command = xhci_alloc_command(xhci, false, true, GFP_KERNEL); | |
1282 | if (!command) | |
1283 | return -ENOMEM; | |
1284 | ||
1285 | command->in_ctx = xhci->devs[slot_id]->in_ctx; | |
1286 | ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); | |
1287 | if (!ctrl_ctx) { | |
1288 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", | |
1289 | __func__); | |
1290 | ret = -ENOMEM; | |
1291 | goto command_cleanup; | |
1292 | } | |
1293 | /* Set up the modified control endpoint 0 */ | |
1294 | xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, | |
1295 | xhci->devs[slot_id]->out_ctx, ep_index); | |
1296 | ||
1297 | ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); | |
1298 | ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK); | |
1299 | ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size)); | |
1300 | ||
1301 | ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG); | |
1302 | ctrl_ctx->drop_flags = 0; | |
1303 | ||
1304 | xhci_dbg(xhci, "Slot %d input context\n", slot_id); | |
1305 | xhci_dbg_ctx(xhci, command->in_ctx, ep_index); | |
1306 | xhci_dbg(xhci, "Slot %d output context\n", slot_id); | |
1307 | xhci_dbg_ctx(xhci, out_ctx, ep_index); | |
1308 | ||
1309 | ret = xhci_configure_endpoint(xhci, urb->dev, command, | |
1310 | true, false); | |
1311 | ||
1312 | /* Clean up the input context for later use by bandwidth | |
1313 | * functions. | |
1314 | */ | |
1315 | ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG); | |
1316 | command_cleanup: | |
1317 | kfree(command->completion); | |
1318 | kfree(command); | |
1319 | } | |
1320 | return ret; | |
1321 | } | |
1322 | ||
1323 | /* | |
1324 | * non-error returns are a promise to giveback() the urb later | |
1325 | * we drop ownership so next owner (or urb unlink) can get it | |
1326 | */ | |
1327 | int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) | |
1328 | { | |
1329 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
1330 | struct xhci_td *buffer; | |
1331 | unsigned long flags; | |
1332 | int ret = 0; | |
1333 | unsigned int slot_id, ep_index; | |
1334 | struct urb_priv *urb_priv; | |
1335 | int size, i; | |
1336 | ||
1337 | if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, | |
1338 | true, true, __func__) <= 0) | |
1339 | return -EINVAL; | |
1340 | ||
1341 | slot_id = urb->dev->slot_id; | |
1342 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); | |
1343 | ||
1344 | if (!HCD_HW_ACCESSIBLE(hcd)) { | |
1345 | if (!in_interrupt()) | |
1346 | xhci_dbg(xhci, "urb submitted during PCI suspend\n"); | |
1347 | ret = -ESHUTDOWN; | |
1348 | goto exit; | |
1349 | } | |
1350 | ||
1351 | if (usb_endpoint_xfer_isoc(&urb->ep->desc)) | |
1352 | size = urb->number_of_packets; | |
1353 | else if (usb_endpoint_is_bulk_out(&urb->ep->desc) && | |
1354 | urb->transfer_buffer_length > 0 && | |
1355 | urb->transfer_flags & URB_ZERO_PACKET && | |
1356 | !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc))) | |
1357 | size = 2; | |
1358 | else | |
1359 | size = 1; | |
1360 | ||
1361 | urb_priv = kzalloc(sizeof(struct urb_priv) + | |
1362 | size * sizeof(struct xhci_td *), mem_flags); | |
1363 | if (!urb_priv) | |
1364 | return -ENOMEM; | |
1365 | ||
1366 | buffer = kzalloc(size * sizeof(struct xhci_td), mem_flags); | |
1367 | if (!buffer) { | |
1368 | kfree(urb_priv); | |
1369 | return -ENOMEM; | |
1370 | } | |
1371 | ||
1372 | for (i = 0; i < size; i++) { | |
1373 | urb_priv->td[i] = buffer; | |
1374 | buffer++; | |
1375 | } | |
1376 | ||
1377 | urb_priv->length = size; | |
1378 | urb_priv->td_cnt = 0; | |
1379 | urb->hcpriv = urb_priv; | |
1380 | ||
1381 | if (usb_endpoint_xfer_control(&urb->ep->desc)) { | |
1382 | /* Check to see if the max packet size for the default control | |
1383 | * endpoint changed during FS device enumeration | |
1384 | */ | |
1385 | if (urb->dev->speed == USB_SPEED_FULL) { | |
1386 | ret = xhci_check_maxpacket(xhci, slot_id, | |
1387 | ep_index, urb); | |
1388 | if (ret < 0) { | |
1389 | xhci_urb_free_priv(urb_priv); | |
1390 | urb->hcpriv = NULL; | |
1391 | return ret; | |
1392 | } | |
1393 | } | |
1394 | ||
1395 | /* We have a spinlock and interrupts disabled, so we must pass | |
1396 | * atomic context to this function, which may allocate memory. | |
1397 | */ | |
1398 | spin_lock_irqsave(&xhci->lock, flags); | |
1399 | if (xhci->xhc_state & XHCI_STATE_DYING) | |
1400 | goto dying; | |
1401 | ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, | |
1402 | slot_id, ep_index); | |
1403 | if (ret) | |
1404 | goto free_priv; | |
1405 | spin_unlock_irqrestore(&xhci->lock, flags); | |
1406 | } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) { | |
1407 | spin_lock_irqsave(&xhci->lock, flags); | |
1408 | if (xhci->xhc_state & XHCI_STATE_DYING) | |
1409 | goto dying; | |
1410 | if (xhci->devs[slot_id]->eps[ep_index].ep_state & | |
1411 | EP_GETTING_STREAMS) { | |
1412 | xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep " | |
1413 | "is transitioning to using streams.\n"); | |
1414 | ret = -EINVAL; | |
1415 | } else if (xhci->devs[slot_id]->eps[ep_index].ep_state & | |
1416 | EP_GETTING_NO_STREAMS) { | |
1417 | xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep " | |
1418 | "is transitioning to " | |
1419 | "not having streams.\n"); | |
1420 | ret = -EINVAL; | |
1421 | } else { | |
1422 | ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, | |
1423 | slot_id, ep_index); | |
1424 | } | |
1425 | if (ret) | |
1426 | goto free_priv; | |
1427 | spin_unlock_irqrestore(&xhci->lock, flags); | |
1428 | } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { | |
1429 | spin_lock_irqsave(&xhci->lock, flags); | |
1430 | if (xhci->xhc_state & XHCI_STATE_DYING) | |
1431 | goto dying; | |
1432 | ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, | |
1433 | slot_id, ep_index); | |
1434 | if (ret) | |
1435 | goto free_priv; | |
1436 | spin_unlock_irqrestore(&xhci->lock, flags); | |
1437 | } else { | |
1438 | spin_lock_irqsave(&xhci->lock, flags); | |
1439 | if (xhci->xhc_state & XHCI_STATE_DYING) | |
1440 | goto dying; | |
1441 | ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb, | |
1442 | slot_id, ep_index); | |
1443 | if (ret) | |
1444 | goto free_priv; | |
1445 | spin_unlock_irqrestore(&xhci->lock, flags); | |
1446 | } | |
1447 | exit: | |
1448 | return ret; | |
1449 | dying: | |
1450 | xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for " | |
1451 | "non-responsive xHCI host.\n", | |
1452 | urb->ep->desc.bEndpointAddress, urb); | |
1453 | ret = -ESHUTDOWN; | |
1454 | free_priv: | |
1455 | xhci_urb_free_priv(urb_priv); | |
1456 | urb->hcpriv = NULL; | |
1457 | spin_unlock_irqrestore(&xhci->lock, flags); | |
1458 | return ret; | |
1459 | } | |
1460 | ||
1461 | /* | |
1462 | * Remove the URB's TD from the endpoint ring. This may cause the HC to stop | |
1463 | * USB transfers, potentially stopping in the middle of a TRB buffer. The HC | |
1464 | * should pick up where it left off in the TD, unless a Set Transfer Ring | |
1465 | * Dequeue Pointer is issued. | |
1466 | * | |
1467 | * The TRBs that make up the buffers for the canceled URB will be "removed" from | |
1468 | * the ring. Since the ring is a contiguous structure, they can't be physically | |
1469 | * removed. Instead, there are two options: | |
1470 | * | |
1471 | * 1) If the HC is in the middle of processing the URB to be canceled, we | |
1472 | * simply move the ring's dequeue pointer past those TRBs using the Set | |
1473 | * Transfer Ring Dequeue Pointer command. This will be the common case, | |
1474 | * when drivers timeout on the last submitted URB and attempt to cancel. | |
1475 | * | |
1476 | * 2) If the HC is in the middle of a different TD, we turn the TRBs into a | |
1477 | * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The | |
1478 | * HC will need to invalidate the any TRBs it has cached after the stop | |
1479 | * endpoint command, as noted in the xHCI 0.95 errata. | |
1480 | * | |
1481 | * 3) The TD may have completed by the time the Stop Endpoint Command | |
1482 | * completes, so software needs to handle that case too. | |
1483 | * | |
1484 | * This function should protect against the TD enqueueing code ringing the | |
1485 | * doorbell while this code is waiting for a Stop Endpoint command to complete. | |
1486 | * It also needs to account for multiple cancellations on happening at the same | |
1487 | * time for the same endpoint. | |
1488 | * | |
1489 | * Note that this function can be called in any context, or so says | |
1490 | * usb_hcd_unlink_urb() | |
1491 | */ | |
1492 | int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |
1493 | { | |
1494 | unsigned long flags; | |
1495 | int ret, i; | |
1496 | u32 temp; | |
1497 | struct xhci_hcd *xhci; | |
1498 | struct urb_priv *urb_priv; | |
1499 | struct xhci_td *td; | |
1500 | unsigned int ep_index; | |
1501 | struct xhci_ring *ep_ring; | |
1502 | struct xhci_virt_ep *ep; | |
1503 | struct xhci_command *command; | |
1504 | ||
1505 | xhci = hcd_to_xhci(hcd); | |
1506 | spin_lock_irqsave(&xhci->lock, flags); | |
1507 | /* Make sure the URB hasn't completed or been unlinked already */ | |
1508 | ret = usb_hcd_check_unlink_urb(hcd, urb, status); | |
1509 | if (ret || !urb->hcpriv) | |
1510 | goto done; | |
1511 | temp = readl(&xhci->op_regs->status); | |
1512 | if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) { | |
1513 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, | |
1514 | "HW died, freeing TD."); | |
1515 | urb_priv = urb->hcpriv; | |
1516 | for (i = urb_priv->td_cnt; | |
1517 | i < urb_priv->length && xhci->devs[urb->dev->slot_id]; | |
1518 | i++) { | |
1519 | td = urb_priv->td[i]; | |
1520 | if (!list_empty(&td->td_list)) | |
1521 | list_del_init(&td->td_list); | |
1522 | if (!list_empty(&td->cancelled_td_list)) | |
1523 | list_del_init(&td->cancelled_td_list); | |
1524 | } | |
1525 | ||
1526 | usb_hcd_unlink_urb_from_ep(hcd, urb); | |
1527 | spin_unlock_irqrestore(&xhci->lock, flags); | |
1528 | usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN); | |
1529 | xhci_urb_free_priv(urb_priv); | |
1530 | return ret; | |
1531 | } | |
1532 | if ((xhci->xhc_state & XHCI_STATE_DYING) || | |
1533 | (xhci->xhc_state & XHCI_STATE_HALTED)) { | |
1534 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, | |
1535 | "Ep 0x%x: URB %p to be canceled on " | |
1536 | "non-responsive xHCI host.", | |
1537 | urb->ep->desc.bEndpointAddress, urb); | |
1538 | /* Let the stop endpoint command watchdog timer (which set this | |
1539 | * state) finish cleaning up the endpoint TD lists. We must | |
1540 | * have caught it in the middle of dropping a lock and giving | |
1541 | * back an URB. | |
1542 | */ | |
1543 | goto done; | |
1544 | } | |
1545 | ||
1546 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); | |
1547 | ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index]; | |
1548 | ep_ring = xhci_urb_to_transfer_ring(xhci, urb); | |
1549 | if (!ep_ring) { | |
1550 | ret = -EINVAL; | |
1551 | goto done; | |
1552 | } | |
1553 | ||
1554 | urb_priv = urb->hcpriv; | |
1555 | i = urb_priv->td_cnt; | |
1556 | if (i < urb_priv->length) | |
1557 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, | |
1558 | "Cancel URB %p, dev %s, ep 0x%x, " | |
1559 | "starting at offset 0x%llx", | |
1560 | urb, urb->dev->devpath, | |
1561 | urb->ep->desc.bEndpointAddress, | |
1562 | (unsigned long long) xhci_trb_virt_to_dma( | |
1563 | urb_priv->td[i]->start_seg, | |
1564 | urb_priv->td[i]->first_trb)); | |
1565 | ||
1566 | for (; i < urb_priv->length; i++) { | |
1567 | td = urb_priv->td[i]; | |
1568 | list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); | |
1569 | } | |
1570 | ||
1571 | /* Queue a stop endpoint command, but only if this is | |
1572 | * the first cancellation to be handled. | |
1573 | */ | |
1574 | if (!(ep->ep_state & EP_HALT_PENDING)) { | |
1575 | command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC); | |
1576 | if (!command) { | |
1577 | ret = -ENOMEM; | |
1578 | goto done; | |
1579 | } | |
1580 | ep->ep_state |= EP_HALT_PENDING; | |
1581 | ep->stop_cmds_pending++; | |
1582 | ep->stop_cmd_timer.expires = jiffies + | |
1583 | XHCI_STOP_EP_CMD_TIMEOUT * HZ; | |
1584 | add_timer(&ep->stop_cmd_timer); | |
1585 | xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id, | |
1586 | ep_index, 0); | |
1587 | xhci_ring_cmd_db(xhci); | |
1588 | } | |
1589 | done: | |
1590 | spin_unlock_irqrestore(&xhci->lock, flags); | |
1591 | return ret; | |
1592 | } | |
1593 | ||
1594 | /* Drop an endpoint from a new bandwidth configuration for this device. | |
1595 | * Only one call to this function is allowed per endpoint before | |
1596 | * check_bandwidth() or reset_bandwidth() must be called. | |
1597 | * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will | |
1598 | * add the endpoint to the schedule with possibly new parameters denoted by a | |
1599 | * different endpoint descriptor in usb_host_endpoint. | |
1600 | * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is | |
1601 | * not allowed. | |
1602 | * | |
1603 | * The USB core will not allow URBs to be queued to an endpoint that is being | |
1604 | * disabled, so there's no need for mutual exclusion to protect | |
1605 | * the xhci->devs[slot_id] structure. | |
1606 | */ | |
1607 | int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, | |
1608 | struct usb_host_endpoint *ep) | |
1609 | { | |
1610 | struct xhci_hcd *xhci; | |
1611 | struct xhci_container_ctx *in_ctx, *out_ctx; | |
1612 | struct xhci_input_control_ctx *ctrl_ctx; | |
1613 | unsigned int ep_index; | |
1614 | struct xhci_ep_ctx *ep_ctx; | |
1615 | u32 drop_flag; | |
1616 | u32 new_add_flags, new_drop_flags; | |
1617 | int ret; | |
1618 | ||
1619 | ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); | |
1620 | if (ret <= 0) | |
1621 | return ret; | |
1622 | xhci = hcd_to_xhci(hcd); | |
1623 | if (xhci->xhc_state & XHCI_STATE_DYING) | |
1624 | return -ENODEV; | |
1625 | ||
1626 | xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); | |
1627 | drop_flag = xhci_get_endpoint_flag(&ep->desc); | |
1628 | if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) { | |
1629 | xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n", | |
1630 | __func__, drop_flag); | |
1631 | return 0; | |
1632 | } | |
1633 | ||
1634 | in_ctx = xhci->devs[udev->slot_id]->in_ctx; | |
1635 | out_ctx = xhci->devs[udev->slot_id]->out_ctx; | |
1636 | ctrl_ctx = xhci_get_input_control_ctx(in_ctx); | |
1637 | if (!ctrl_ctx) { | |
1638 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", | |
1639 | __func__); | |
1640 | return 0; | |
1641 | } | |
1642 | ||
1643 | ep_index = xhci_get_endpoint_index(&ep->desc); | |
1644 | ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); | |
1645 | /* If the HC already knows the endpoint is disabled, | |
1646 | * or the HCD has noted it is disabled, ignore this request | |
1647 | */ | |
1648 | if (((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) == | |
1649 | cpu_to_le32(EP_STATE_DISABLED)) || | |
1650 | le32_to_cpu(ctrl_ctx->drop_flags) & | |
1651 | xhci_get_endpoint_flag(&ep->desc)) { | |
1652 | /* Do not warn when called after a usb_device_reset */ | |
1653 | if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL) | |
1654 | xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", | |
1655 | __func__, ep); | |
1656 | return 0; | |
1657 | } | |
1658 | ||
1659 | ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag); | |
1660 | new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); | |
1661 | ||
1662 | ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag); | |
1663 | new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); | |
1664 | ||
1665 | xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); | |
1666 | ||
1667 | if (xhci->quirks & XHCI_MTK_HOST) | |
1668 | xhci_mtk_drop_ep_quirk(hcd, udev, ep); | |
1669 | ||
1670 | xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", | |
1671 | (unsigned int) ep->desc.bEndpointAddress, | |
1672 | udev->slot_id, | |
1673 | (unsigned int) new_drop_flags, | |
1674 | (unsigned int) new_add_flags); | |
1675 | return 0; | |
1676 | } | |
1677 | ||
1678 | /* Add an endpoint to a new possible bandwidth configuration for this device. | |
1679 | * Only one call to this function is allowed per endpoint before | |
1680 | * check_bandwidth() or reset_bandwidth() must be called. | |
1681 | * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will | |
1682 | * add the endpoint to the schedule with possibly new parameters denoted by a | |
1683 | * different endpoint descriptor in usb_host_endpoint. | |
1684 | * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is | |
1685 | * not allowed. | |
1686 | * | |
1687 | * The USB core will not allow URBs to be queued to an endpoint until the | |
1688 | * configuration or alt setting is installed in the device, so there's no need | |
1689 | * for mutual exclusion to protect the xhci->devs[slot_id] structure. | |
1690 | */ | |
1691 | int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, | |
1692 | struct usb_host_endpoint *ep) | |
1693 | { | |
1694 | struct xhci_hcd *xhci; | |
1695 | struct xhci_container_ctx *in_ctx; | |
1696 | unsigned int ep_index; | |
1697 | struct xhci_input_control_ctx *ctrl_ctx; | |
1698 | u32 added_ctxs; | |
1699 | u32 new_add_flags, new_drop_flags; | |
1700 | struct xhci_virt_device *virt_dev; | |
1701 | int ret = 0; | |
1702 | ||
1703 | ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); | |
1704 | if (ret <= 0) { | |
1705 | /* So we won't queue a reset ep command for a root hub */ | |
1706 | ep->hcpriv = NULL; | |
1707 | return ret; | |
1708 | } | |
1709 | xhci = hcd_to_xhci(hcd); | |
1710 | if (xhci->xhc_state & XHCI_STATE_DYING) | |
1711 | return -ENODEV; | |
1712 | ||
1713 | added_ctxs = xhci_get_endpoint_flag(&ep->desc); | |
1714 | if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) { | |
1715 | /* FIXME when we have to issue an evaluate endpoint command to | |
1716 | * deal with ep0 max packet size changing once we get the | |
1717 | * descriptors | |
1718 | */ | |
1719 | xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n", | |
1720 | __func__, added_ctxs); | |
1721 | return 0; | |
1722 | } | |
1723 | ||
1724 | virt_dev = xhci->devs[udev->slot_id]; | |
1725 | in_ctx = virt_dev->in_ctx; | |
1726 | ctrl_ctx = xhci_get_input_control_ctx(in_ctx); | |
1727 | if (!ctrl_ctx) { | |
1728 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", | |
1729 | __func__); | |
1730 | return 0; | |
1731 | } | |
1732 | ||
1733 | ep_index = xhci_get_endpoint_index(&ep->desc); | |
1734 | /* If this endpoint is already in use, and the upper layers are trying | |
1735 | * to add it again without dropping it, reject the addition. | |
1736 | */ | |
1737 | if (virt_dev->eps[ep_index].ring && | |
1738 | !(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) { | |
1739 | xhci_warn(xhci, "Trying to add endpoint 0x%x " | |
1740 | "without dropping it.\n", | |
1741 | (unsigned int) ep->desc.bEndpointAddress); | |
1742 | return -EINVAL; | |
1743 | } | |
1744 | ||
1745 | /* If the HCD has already noted the endpoint is enabled, | |
1746 | * ignore this request. | |
1747 | */ | |
1748 | if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) { | |
1749 | xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", | |
1750 | __func__, ep); | |
1751 | return 0; | |
1752 | } | |
1753 | ||
1754 | /* | |
1755 | * Configuration and alternate setting changes must be done in | |
1756 | * process context, not interrupt context (or so documenation | |
1757 | * for usb_set_interface() and usb_set_configuration() claim). | |
1758 | */ | |
1759 | if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) { | |
1760 | dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n", | |
1761 | __func__, ep->desc.bEndpointAddress); | |
1762 | return -ENOMEM; | |
1763 | } | |
1764 | ||
1765 | if (xhci->quirks & XHCI_MTK_HOST) { | |
1766 | ret = xhci_mtk_add_ep_quirk(hcd, udev, ep); | |
1767 | if (ret < 0) { | |
1768 | xhci_free_or_cache_endpoint_ring(xhci, | |
1769 | virt_dev, ep_index); | |
1770 | return ret; | |
1771 | } | |
1772 | } | |
1773 | ||
1774 | ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs); | |
1775 | new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); | |
1776 | ||
1777 | /* If xhci_endpoint_disable() was called for this endpoint, but the | |
1778 | * xHC hasn't been notified yet through the check_bandwidth() call, | |
1779 | * this re-adds a new state for the endpoint from the new endpoint | |
1780 | * descriptors. We must drop and re-add this endpoint, so we leave the | |
1781 | * drop flags alone. | |
1782 | */ | |
1783 | new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); | |
1784 | ||
1785 | /* Store the usb_device pointer for later use */ | |
1786 | ep->hcpriv = udev; | |
1787 | ||
1788 | xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", | |
1789 | (unsigned int) ep->desc.bEndpointAddress, | |
1790 | udev->slot_id, | |
1791 | (unsigned int) new_drop_flags, | |
1792 | (unsigned int) new_add_flags); | |
1793 | return 0; | |
1794 | } | |
1795 | ||
1796 | static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev) | |
1797 | { | |
1798 | struct xhci_input_control_ctx *ctrl_ctx; | |
1799 | struct xhci_ep_ctx *ep_ctx; | |
1800 | struct xhci_slot_ctx *slot_ctx; | |
1801 | int i; | |
1802 | ||
1803 | ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx); | |
1804 | if (!ctrl_ctx) { | |
1805 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", | |
1806 | __func__); | |
1807 | return; | |
1808 | } | |
1809 | ||
1810 | /* When a device's add flag and drop flag are zero, any subsequent | |
1811 | * configure endpoint command will leave that endpoint's state | |
1812 | * untouched. Make sure we don't leave any old state in the input | |
1813 | * endpoint contexts. | |
1814 | */ | |
1815 | ctrl_ctx->drop_flags = 0; | |
1816 | ctrl_ctx->add_flags = 0; | |
1817 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); | |
1818 | slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); | |
1819 | /* Endpoint 0 is always valid */ | |
1820 | slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1)); | |
1821 | for (i = 1; i < 31; ++i) { | |
1822 | ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); | |
1823 | ep_ctx->ep_info = 0; | |
1824 | ep_ctx->ep_info2 = 0; | |
1825 | ep_ctx->deq = 0; | |
1826 | ep_ctx->tx_info = 0; | |
1827 | } | |
1828 | } | |
1829 | ||
1830 | static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, | |
1831 | struct usb_device *udev, u32 *cmd_status) | |
1832 | { | |
1833 | int ret; | |
1834 | ||
1835 | switch (*cmd_status) { | |
1836 | case COMP_CMD_ABORT: | |
1837 | case COMP_CMD_STOP: | |
1838 | xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n"); | |
1839 | ret = -ETIME; | |
1840 | break; | |
1841 | case COMP_ENOMEM: | |
1842 | dev_warn(&udev->dev, | |
1843 | "Not enough host controller resources for new device state.\n"); | |
1844 | ret = -ENOMEM; | |
1845 | /* FIXME: can we allocate more resources for the HC? */ | |
1846 | break; | |
1847 | case COMP_BW_ERR: | |
1848 | case COMP_2ND_BW_ERR: | |
1849 | dev_warn(&udev->dev, | |
1850 | "Not enough bandwidth for new device state.\n"); | |
1851 | ret = -ENOSPC; | |
1852 | /* FIXME: can we go back to the old state? */ | |
1853 | break; | |
1854 | case COMP_TRB_ERR: | |
1855 | /* the HCD set up something wrong */ | |
1856 | dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, " | |
1857 | "add flag = 1, " | |
1858 | "and endpoint is not disabled.\n"); | |
1859 | ret = -EINVAL; | |
1860 | break; | |
1861 | case COMP_DEV_ERR: | |
1862 | dev_warn(&udev->dev, | |
1863 | "ERROR: Incompatible device for endpoint configure command.\n"); | |
1864 | ret = -ENODEV; | |
1865 | break; | |
1866 | case COMP_SUCCESS: | |
1867 | xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, | |
1868 | "Successful Endpoint Configure command"); | |
1869 | ret = 0; | |
1870 | break; | |
1871 | default: | |
1872 | xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n", | |
1873 | *cmd_status); | |
1874 | ret = -EINVAL; | |
1875 | break; | |
1876 | } | |
1877 | return ret; | |
1878 | } | |
1879 | ||
1880 | static int xhci_evaluate_context_result(struct xhci_hcd *xhci, | |
1881 | struct usb_device *udev, u32 *cmd_status) | |
1882 | { | |
1883 | int ret; | |
1884 | struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id]; | |
1885 | ||
1886 | switch (*cmd_status) { | |
1887 | case COMP_CMD_ABORT: | |
1888 | case COMP_CMD_STOP: | |
1889 | xhci_warn(xhci, "Timeout while waiting for evaluate context command\n"); | |
1890 | ret = -ETIME; | |
1891 | break; | |
1892 | case COMP_EINVAL: | |
1893 | dev_warn(&udev->dev, | |
1894 | "WARN: xHCI driver setup invalid evaluate context command.\n"); | |
1895 | ret = -EINVAL; | |
1896 | break; | |
1897 | case COMP_EBADSLT: | |
1898 | dev_warn(&udev->dev, | |
1899 | "WARN: slot not enabled for evaluate context command.\n"); | |
1900 | ret = -EINVAL; | |
1901 | break; | |
1902 | case COMP_CTX_STATE: | |
1903 | dev_warn(&udev->dev, | |
1904 | "WARN: invalid context state for evaluate context command.\n"); | |
1905 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1); | |
1906 | ret = -EINVAL; | |
1907 | break; | |
1908 | case COMP_DEV_ERR: | |
1909 | dev_warn(&udev->dev, | |
1910 | "ERROR: Incompatible device for evaluate context command.\n"); | |
1911 | ret = -ENODEV; | |
1912 | break; | |
1913 | case COMP_MEL_ERR: | |
1914 | /* Max Exit Latency too large error */ | |
1915 | dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n"); | |
1916 | ret = -EINVAL; | |
1917 | break; | |
1918 | case COMP_SUCCESS: | |
1919 | xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, | |
1920 | "Successful evaluate context command"); | |
1921 | ret = 0; | |
1922 | break; | |
1923 | default: | |
1924 | xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n", | |
1925 | *cmd_status); | |
1926 | ret = -EINVAL; | |
1927 | break; | |
1928 | } | |
1929 | return ret; | |
1930 | } | |
1931 | ||
1932 | static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci, | |
1933 | struct xhci_input_control_ctx *ctrl_ctx) | |
1934 | { | |
1935 | u32 valid_add_flags; | |
1936 | u32 valid_drop_flags; | |
1937 | ||
1938 | /* Ignore the slot flag (bit 0), and the default control endpoint flag | |
1939 | * (bit 1). The default control endpoint is added during the Address | |
1940 | * Device command and is never removed until the slot is disabled. | |
1941 | */ | |
1942 | valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2; | |
1943 | valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2; | |
1944 | ||
1945 | /* Use hweight32 to count the number of ones in the add flags, or | |
1946 | * number of endpoints added. Don't count endpoints that are changed | |
1947 | * (both added and dropped). | |
1948 | */ | |
1949 | return hweight32(valid_add_flags) - | |
1950 | hweight32(valid_add_flags & valid_drop_flags); | |
1951 | } | |
1952 | ||
1953 | static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci, | |
1954 | struct xhci_input_control_ctx *ctrl_ctx) | |
1955 | { | |
1956 | u32 valid_add_flags; | |
1957 | u32 valid_drop_flags; | |
1958 | ||
1959 | valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2; | |
1960 | valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2; | |
1961 | ||
1962 | return hweight32(valid_drop_flags) - | |
1963 | hweight32(valid_add_flags & valid_drop_flags); | |
1964 | } | |
1965 | ||
1966 | /* | |
1967 | * We need to reserve the new number of endpoints before the configure endpoint | |
1968 | * command completes. We can't subtract the dropped endpoints from the number | |
1969 | * of active endpoints until the command completes because we can oversubscribe | |
1970 | * the host in this case: | |
1971 | * | |
1972 | * - the first configure endpoint command drops more endpoints than it adds | |
1973 | * - a second configure endpoint command that adds more endpoints is queued | |
1974 | * - the first configure endpoint command fails, so the config is unchanged | |
1975 | * - the second command may succeed, even though there isn't enough resources | |
1976 | * | |
1977 | * Must be called with xhci->lock held. | |
1978 | */ | |
1979 | static int xhci_reserve_host_resources(struct xhci_hcd *xhci, | |
1980 | struct xhci_input_control_ctx *ctrl_ctx) | |
1981 | { | |
1982 | u32 added_eps; | |
1983 | ||
1984 | added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx); | |
1985 | if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) { | |
1986 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, | |
1987 | "Not enough ep ctxs: " | |
1988 | "%u active, need to add %u, limit is %u.", | |
1989 | xhci->num_active_eps, added_eps, | |
1990 | xhci->limit_active_eps); | |
1991 | return -ENOMEM; | |
1992 | } | |
1993 | xhci->num_active_eps += added_eps; | |
1994 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, | |
1995 | "Adding %u ep ctxs, %u now active.", added_eps, | |
1996 | xhci->num_active_eps); | |
1997 | return 0; | |
1998 | } | |
1999 | ||
2000 | /* | |
2001 | * The configure endpoint was failed by the xHC for some other reason, so we | |
2002 | * need to revert the resources that failed configuration would have used. | |
2003 | * | |
2004 | * Must be called with xhci->lock held. | |
2005 | */ | |
2006 | static void xhci_free_host_resources(struct xhci_hcd *xhci, | |
2007 | struct xhci_input_control_ctx *ctrl_ctx) | |
2008 | { | |
2009 | u32 num_failed_eps; | |
2010 | ||
2011 | num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx); | |
2012 | xhci->num_active_eps -= num_failed_eps; | |
2013 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, | |
2014 | "Removing %u failed ep ctxs, %u now active.", | |
2015 | num_failed_eps, | |
2016 | xhci->num_active_eps); | |
2017 | } | |
2018 | ||
2019 | /* | |
2020 | * Now that the command has completed, clean up the active endpoint count by | |
2021 | * subtracting out the endpoints that were dropped (but not changed). | |
2022 | * | |
2023 | * Must be called with xhci->lock held. | |
2024 | */ | |
2025 | static void xhci_finish_resource_reservation(struct xhci_hcd *xhci, | |
2026 | struct xhci_input_control_ctx *ctrl_ctx) | |
2027 | { | |
2028 | u32 num_dropped_eps; | |
2029 | ||
2030 | num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx); | |
2031 | xhci->num_active_eps -= num_dropped_eps; | |
2032 | if (num_dropped_eps) | |
2033 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, | |
2034 | "Removing %u dropped ep ctxs, %u now active.", | |
2035 | num_dropped_eps, | |
2036 | xhci->num_active_eps); | |
2037 | } | |
2038 | ||
2039 | static unsigned int xhci_get_block_size(struct usb_device *udev) | |
2040 | { | |
2041 | switch (udev->speed) { | |
2042 | case USB_SPEED_LOW: | |
2043 | case USB_SPEED_FULL: | |
2044 | return FS_BLOCK; | |
2045 | case USB_SPEED_HIGH: | |
2046 | return HS_BLOCK; | |
2047 | case USB_SPEED_SUPER: | |
2048 | case USB_SPEED_SUPER_PLUS: | |
2049 | return SS_BLOCK; | |
2050 | case USB_SPEED_UNKNOWN: | |
2051 | case USB_SPEED_WIRELESS: | |
2052 | default: | |
2053 | /* Should never happen */ | |
2054 | return 1; | |
2055 | } | |
2056 | } | |
2057 | ||
2058 | static unsigned int | |
2059 | xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw) | |
2060 | { | |
2061 | if (interval_bw->overhead[LS_OVERHEAD_TYPE]) | |
2062 | return LS_OVERHEAD; | |
2063 | if (interval_bw->overhead[FS_OVERHEAD_TYPE]) | |
2064 | return FS_OVERHEAD; | |
2065 | return HS_OVERHEAD; | |
2066 | } | |
2067 | ||
2068 | /* If we are changing a LS/FS device under a HS hub, | |
2069 | * make sure (if we are activating a new TT) that the HS bus has enough | |
2070 | * bandwidth for this new TT. | |
2071 | */ | |
2072 | static int xhci_check_tt_bw_table(struct xhci_hcd *xhci, | |
2073 | struct xhci_virt_device *virt_dev, | |
2074 | int old_active_eps) | |
2075 | { | |
2076 | struct xhci_interval_bw_table *bw_table; | |
2077 | struct xhci_tt_bw_info *tt_info; | |
2078 | ||
2079 | /* Find the bandwidth table for the root port this TT is attached to. */ | |
2080 | bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table; | |
2081 | tt_info = virt_dev->tt_info; | |
2082 | /* If this TT already had active endpoints, the bandwidth for this TT | |
2083 | * has already been added. Removing all periodic endpoints (and thus | |
2084 | * making the TT enactive) will only decrease the bandwidth used. | |
2085 | */ | |
2086 | if (old_active_eps) | |
2087 | return 0; | |
2088 | if (old_active_eps == 0 && tt_info->active_eps != 0) { | |
2089 | if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT) | |
2090 | return -ENOMEM; | |
2091 | return 0; | |
2092 | } | |
2093 | /* Not sure why we would have no new active endpoints... | |
2094 | * | |
2095 | * Maybe because of an Evaluate Context change for a hub update or a | |
2096 | * control endpoint 0 max packet size change? | |
2097 | * FIXME: skip the bandwidth calculation in that case. | |
2098 | */ | |
2099 | return 0; | |
2100 | } | |
2101 | ||
2102 | static int xhci_check_ss_bw(struct xhci_hcd *xhci, | |
2103 | struct xhci_virt_device *virt_dev) | |
2104 | { | |
2105 | unsigned int bw_reserved; | |
2106 | ||
2107 | bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100); | |
2108 | if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved)) | |
2109 | return -ENOMEM; | |
2110 | ||
2111 | bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100); | |
2112 | if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved)) | |
2113 | return -ENOMEM; | |
2114 | ||
2115 | return 0; | |
2116 | } | |
2117 | ||
2118 | /* | |
2119 | * This algorithm is a very conservative estimate of the worst-case scheduling | |
2120 | * scenario for any one interval. The hardware dynamically schedules the | |
2121 | * packets, so we can't tell which microframe could be the limiting factor in | |
2122 | * the bandwidth scheduling. This only takes into account periodic endpoints. | |
2123 | * | |
2124 | * Obviously, we can't solve an NP complete problem to find the minimum worst | |
2125 | * case scenario. Instead, we come up with an estimate that is no less than | |
2126 | * the worst case bandwidth used for any one microframe, but may be an | |
2127 | * over-estimate. | |
2128 | * | |
2129 | * We walk the requirements for each endpoint by interval, starting with the | |
2130 | * smallest interval, and place packets in the schedule where there is only one | |
2131 | * possible way to schedule packets for that interval. In order to simplify | |
2132 | * this algorithm, we record the largest max packet size for each interval, and | |
2133 | * assume all packets will be that size. | |
2134 | * | |
2135 | * For interval 0, we obviously must schedule all packets for each interval. | |
2136 | * The bandwidth for interval 0 is just the amount of data to be transmitted | |
2137 | * (the sum of all max ESIT payload sizes, plus any overhead per packet times | |
2138 | * the number of packets). | |
2139 | * | |
2140 | * For interval 1, we have two possible microframes to schedule those packets | |
2141 | * in. For this algorithm, if we can schedule the same number of packets for | |
2142 | * each possible scheduling opportunity (each microframe), we will do so. The | |
2143 | * remaining number of packets will be saved to be transmitted in the gaps in | |
2144 | * the next interval's scheduling sequence. | |
2145 | * | |
2146 | * As we move those remaining packets to be scheduled with interval 2 packets, | |
2147 | * we have to double the number of remaining packets to transmit. This is | |
2148 | * because the intervals are actually powers of 2, and we would be transmitting | |
2149 | * the previous interval's packets twice in this interval. We also have to be | |
2150 | * sure that when we look at the largest max packet size for this interval, we | |
2151 | * also look at the largest max packet size for the remaining packets and take | |
2152 | * the greater of the two. | |
2153 | * | |
2154 | * The algorithm continues to evenly distribute packets in each scheduling | |
2155 | * opportunity, and push the remaining packets out, until we get to the last | |
2156 | * interval. Then those packets and their associated overhead are just added | |
2157 | * to the bandwidth used. | |
2158 | */ | |
2159 | static int xhci_check_bw_table(struct xhci_hcd *xhci, | |
2160 | struct xhci_virt_device *virt_dev, | |
2161 | int old_active_eps) | |
2162 | { | |
2163 | unsigned int bw_reserved; | |
2164 | unsigned int max_bandwidth; | |
2165 | unsigned int bw_used; | |
2166 | unsigned int block_size; | |
2167 | struct xhci_interval_bw_table *bw_table; | |
2168 | unsigned int packet_size = 0; | |
2169 | unsigned int overhead = 0; | |
2170 | unsigned int packets_transmitted = 0; | |
2171 | unsigned int packets_remaining = 0; | |
2172 | unsigned int i; | |
2173 | ||
2174 | if (virt_dev->udev->speed >= USB_SPEED_SUPER) | |
2175 | return xhci_check_ss_bw(xhci, virt_dev); | |
2176 | ||
2177 | if (virt_dev->udev->speed == USB_SPEED_HIGH) { | |
2178 | max_bandwidth = HS_BW_LIMIT; | |
2179 | /* Convert percent of bus BW reserved to blocks reserved */ | |
2180 | bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100); | |
2181 | } else { | |
2182 | max_bandwidth = FS_BW_LIMIT; | |
2183 | bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100); | |
2184 | } | |
2185 | ||
2186 | bw_table = virt_dev->bw_table; | |
2187 | /* We need to translate the max packet size and max ESIT payloads into | |
2188 | * the units the hardware uses. | |
2189 | */ | |
2190 | block_size = xhci_get_block_size(virt_dev->udev); | |
2191 | ||
2192 | /* If we are manipulating a LS/FS device under a HS hub, double check | |
2193 | * that the HS bus has enough bandwidth if we are activing a new TT. | |
2194 | */ | |
2195 | if (virt_dev->tt_info) { | |
2196 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, | |
2197 | "Recalculating BW for rootport %u", | |
2198 | virt_dev->real_port); | |
2199 | if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) { | |
2200 | xhci_warn(xhci, "Not enough bandwidth on HS bus for " | |
2201 | "newly activated TT.\n"); | |
2202 | return -ENOMEM; | |
2203 | } | |
2204 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, | |
2205 | "Recalculating BW for TT slot %u port %u", | |
2206 | virt_dev->tt_info->slot_id, | |
2207 | virt_dev->tt_info->ttport); | |
2208 | } else { | |
2209 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, | |
2210 | "Recalculating BW for rootport %u", | |
2211 | virt_dev->real_port); | |
2212 | } | |
2213 | ||
2214 | /* Add in how much bandwidth will be used for interval zero, or the | |
2215 | * rounded max ESIT payload + number of packets * largest overhead. | |
2216 | */ | |
2217 | bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) + | |
2218 | bw_table->interval_bw[0].num_packets * | |
2219 | xhci_get_largest_overhead(&bw_table->interval_bw[0]); | |
2220 | ||
2221 | for (i = 1; i < XHCI_MAX_INTERVAL; i++) { | |
2222 | unsigned int bw_added; | |
2223 | unsigned int largest_mps; | |
2224 | unsigned int interval_overhead; | |
2225 | ||
2226 | /* | |
2227 | * How many packets could we transmit in this interval? | |
2228 | * If packets didn't fit in the previous interval, we will need | |
2229 | * to transmit that many packets twice within this interval. | |
2230 | */ | |
2231 | packets_remaining = 2 * packets_remaining + | |
2232 | bw_table->interval_bw[i].num_packets; | |
2233 | ||
2234 | /* Find the largest max packet size of this or the previous | |
2235 | * interval. | |
2236 | */ | |
2237 | if (list_empty(&bw_table->interval_bw[i].endpoints)) | |
2238 | largest_mps = 0; | |
2239 | else { | |
2240 | struct xhci_virt_ep *virt_ep; | |
2241 | struct list_head *ep_entry; | |
2242 | ||
2243 | ep_entry = bw_table->interval_bw[i].endpoints.next; | |
2244 | virt_ep = list_entry(ep_entry, | |
2245 | struct xhci_virt_ep, bw_endpoint_list); | |
2246 | /* Convert to blocks, rounding up */ | |
2247 | largest_mps = DIV_ROUND_UP( | |
2248 | virt_ep->bw_info.max_packet_size, | |
2249 | block_size); | |
2250 | } | |
2251 | if (largest_mps > packet_size) | |
2252 | packet_size = largest_mps; | |
2253 | ||
2254 | /* Use the larger overhead of this or the previous interval. */ | |
2255 | interval_overhead = xhci_get_largest_overhead( | |
2256 | &bw_table->interval_bw[i]); | |
2257 | if (interval_overhead > overhead) | |
2258 | overhead = interval_overhead; | |
2259 | ||
2260 | /* How many packets can we evenly distribute across | |
2261 | * (1 << (i + 1)) possible scheduling opportunities? | |
2262 | */ | |
2263 | packets_transmitted = packets_remaining >> (i + 1); | |
2264 | ||
2265 | /* Add in the bandwidth used for those scheduled packets */ | |
2266 | bw_added = packets_transmitted * (overhead + packet_size); | |
2267 | ||
2268 | /* How many packets do we have remaining to transmit? */ | |
2269 | packets_remaining = packets_remaining % (1 << (i + 1)); | |
2270 | ||
2271 | /* What largest max packet size should those packets have? */ | |
2272 | /* If we've transmitted all packets, don't carry over the | |
2273 | * largest packet size. | |
2274 | */ | |
2275 | if (packets_remaining == 0) { | |
2276 | packet_size = 0; | |
2277 | overhead = 0; | |
2278 | } else if (packets_transmitted > 0) { | |
2279 | /* Otherwise if we do have remaining packets, and we've | |
2280 | * scheduled some packets in this interval, take the | |
2281 | * largest max packet size from endpoints with this | |
2282 | * interval. | |
2283 | */ | |
2284 | packet_size = largest_mps; | |
2285 | overhead = interval_overhead; | |
2286 | } | |
2287 | /* Otherwise carry over packet_size and overhead from the last | |
2288 | * time we had a remainder. | |
2289 | */ | |
2290 | bw_used += bw_added; | |
2291 | if (bw_used > max_bandwidth) { | |
2292 | xhci_warn(xhci, "Not enough bandwidth. " | |
2293 | "Proposed: %u, Max: %u\n", | |
2294 | bw_used, max_bandwidth); | |
2295 | return -ENOMEM; | |
2296 | } | |
2297 | } | |
2298 | /* | |
2299 | * Ok, we know we have some packets left over after even-handedly | |
2300 | * scheduling interval 15. We don't know which microframes they will | |
2301 | * fit into, so we over-schedule and say they will be scheduled every | |
2302 | * microframe. | |
2303 | */ | |
2304 | if (packets_remaining > 0) | |
2305 | bw_used += overhead + packet_size; | |
2306 | ||
2307 | if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) { | |
2308 | unsigned int port_index = virt_dev->real_port - 1; | |
2309 | ||
2310 | /* OK, we're manipulating a HS device attached to a | |
2311 | * root port bandwidth domain. Include the number of active TTs | |
2312 | * in the bandwidth used. | |
2313 | */ | |
2314 | bw_used += TT_HS_OVERHEAD * | |
2315 | xhci->rh_bw[port_index].num_active_tts; | |
2316 | } | |
2317 | ||
2318 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, | |
2319 | "Final bandwidth: %u, Limit: %u, Reserved: %u, " | |
2320 | "Available: %u " "percent", | |
2321 | bw_used, max_bandwidth, bw_reserved, | |
2322 | (max_bandwidth - bw_used - bw_reserved) * 100 / | |
2323 | max_bandwidth); | |
2324 | ||
2325 | bw_used += bw_reserved; | |
2326 | if (bw_used > max_bandwidth) { | |
2327 | xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n", | |
2328 | bw_used, max_bandwidth); | |
2329 | return -ENOMEM; | |
2330 | } | |
2331 | ||
2332 | bw_table->bw_used = bw_used; | |
2333 | return 0; | |
2334 | } | |
2335 | ||
2336 | static bool xhci_is_async_ep(unsigned int ep_type) | |
2337 | { | |
2338 | return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP && | |
2339 | ep_type != ISOC_IN_EP && | |
2340 | ep_type != INT_IN_EP); | |
2341 | } | |
2342 | ||
2343 | static bool xhci_is_sync_in_ep(unsigned int ep_type) | |
2344 | { | |
2345 | return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP); | |
2346 | } | |
2347 | ||
2348 | static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw) | |
2349 | { | |
2350 | unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK); | |
2351 | ||
2352 | if (ep_bw->ep_interval == 0) | |
2353 | return SS_OVERHEAD_BURST + | |
2354 | (ep_bw->mult * ep_bw->num_packets * | |
2355 | (SS_OVERHEAD + mps)); | |
2356 | return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets * | |
2357 | (SS_OVERHEAD + mps + SS_OVERHEAD_BURST), | |
2358 | 1 << ep_bw->ep_interval); | |
2359 | ||
2360 | } | |
2361 | ||
2362 | void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci, | |
2363 | struct xhci_bw_info *ep_bw, | |
2364 | struct xhci_interval_bw_table *bw_table, | |
2365 | struct usb_device *udev, | |
2366 | struct xhci_virt_ep *virt_ep, | |
2367 | struct xhci_tt_bw_info *tt_info) | |
2368 | { | |
2369 | struct xhci_interval_bw *interval_bw; | |
2370 | int normalized_interval; | |
2371 | ||
2372 | if (xhci_is_async_ep(ep_bw->type)) | |
2373 | return; | |
2374 | ||
2375 | if (udev->speed >= USB_SPEED_SUPER) { | |
2376 | if (xhci_is_sync_in_ep(ep_bw->type)) | |
2377 | xhci->devs[udev->slot_id]->bw_table->ss_bw_in -= | |
2378 | xhci_get_ss_bw_consumed(ep_bw); | |
2379 | else | |
2380 | xhci->devs[udev->slot_id]->bw_table->ss_bw_out -= | |
2381 | xhci_get_ss_bw_consumed(ep_bw); | |
2382 | return; | |
2383 | } | |
2384 | ||
2385 | /* SuperSpeed endpoints never get added to intervals in the table, so | |
2386 | * this check is only valid for HS/FS/LS devices. | |
2387 | */ | |
2388 | if (list_empty(&virt_ep->bw_endpoint_list)) | |
2389 | return; | |
2390 | /* For LS/FS devices, we need to translate the interval expressed in | |
2391 | * microframes to frames. | |
2392 | */ | |
2393 | if (udev->speed == USB_SPEED_HIGH) | |
2394 | normalized_interval = ep_bw->ep_interval; | |
2395 | else | |
2396 | normalized_interval = ep_bw->ep_interval - 3; | |
2397 | ||
2398 | if (normalized_interval == 0) | |
2399 | bw_table->interval0_esit_payload -= ep_bw->max_esit_payload; | |
2400 | interval_bw = &bw_table->interval_bw[normalized_interval]; | |
2401 | interval_bw->num_packets -= ep_bw->num_packets; | |
2402 | switch (udev->speed) { | |
2403 | case USB_SPEED_LOW: | |
2404 | interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1; | |
2405 | break; | |
2406 | case USB_SPEED_FULL: | |
2407 | interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1; | |
2408 | break; | |
2409 | case USB_SPEED_HIGH: | |
2410 | interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1; | |
2411 | break; | |
2412 | case USB_SPEED_SUPER: | |
2413 | case USB_SPEED_SUPER_PLUS: | |
2414 | case USB_SPEED_UNKNOWN: | |
2415 | case USB_SPEED_WIRELESS: | |
2416 | /* Should never happen because only LS/FS/HS endpoints will get | |
2417 | * added to the endpoint list. | |
2418 | */ | |
2419 | return; | |
2420 | } | |
2421 | if (tt_info) | |
2422 | tt_info->active_eps -= 1; | |
2423 | list_del_init(&virt_ep->bw_endpoint_list); | |
2424 | } | |
2425 | ||
2426 | static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci, | |
2427 | struct xhci_bw_info *ep_bw, | |
2428 | struct xhci_interval_bw_table *bw_table, | |
2429 | struct usb_device *udev, | |
2430 | struct xhci_virt_ep *virt_ep, | |
2431 | struct xhci_tt_bw_info *tt_info) | |
2432 | { | |
2433 | struct xhci_interval_bw *interval_bw; | |
2434 | struct xhci_virt_ep *smaller_ep; | |
2435 | int normalized_interval; | |
2436 | ||
2437 | if (xhci_is_async_ep(ep_bw->type)) | |
2438 | return; | |
2439 | ||
2440 | if (udev->speed == USB_SPEED_SUPER) { | |
2441 | if (xhci_is_sync_in_ep(ep_bw->type)) | |
2442 | xhci->devs[udev->slot_id]->bw_table->ss_bw_in += | |
2443 | xhci_get_ss_bw_consumed(ep_bw); | |
2444 | else | |
2445 | xhci->devs[udev->slot_id]->bw_table->ss_bw_out += | |
2446 | xhci_get_ss_bw_consumed(ep_bw); | |
2447 | return; | |
2448 | } | |
2449 | ||
2450 | /* For LS/FS devices, we need to translate the interval expressed in | |
2451 | * microframes to frames. | |
2452 | */ | |
2453 | if (udev->speed == USB_SPEED_HIGH) | |
2454 | normalized_interval = ep_bw->ep_interval; | |
2455 | else | |
2456 | normalized_interval = ep_bw->ep_interval - 3; | |
2457 | ||
2458 | if (normalized_interval == 0) | |
2459 | bw_table->interval0_esit_payload += ep_bw->max_esit_payload; | |
2460 | interval_bw = &bw_table->interval_bw[normalized_interval]; | |
2461 | interval_bw->num_packets += ep_bw->num_packets; | |
2462 | switch (udev->speed) { | |
2463 | case USB_SPEED_LOW: | |
2464 | interval_bw->overhead[LS_OVERHEAD_TYPE] += 1; | |
2465 | break; | |
2466 | case USB_SPEED_FULL: | |
2467 | interval_bw->overhead[FS_OVERHEAD_TYPE] += 1; | |
2468 | break; | |
2469 | case USB_SPEED_HIGH: | |
2470 | interval_bw->overhead[HS_OVERHEAD_TYPE] += 1; | |
2471 | break; | |
2472 | case USB_SPEED_SUPER: | |
2473 | case USB_SPEED_SUPER_PLUS: | |
2474 | case USB_SPEED_UNKNOWN: | |
2475 | case USB_SPEED_WIRELESS: | |
2476 | /* Should never happen because only LS/FS/HS endpoints will get | |
2477 | * added to the endpoint list. | |
2478 | */ | |
2479 | return; | |
2480 | } | |
2481 | ||
2482 | if (tt_info) | |
2483 | tt_info->active_eps += 1; | |
2484 | /* Insert the endpoint into the list, largest max packet size first. */ | |
2485 | list_for_each_entry(smaller_ep, &interval_bw->endpoints, | |
2486 | bw_endpoint_list) { | |
2487 | if (ep_bw->max_packet_size >= | |
2488 | smaller_ep->bw_info.max_packet_size) { | |
2489 | /* Add the new ep before the smaller endpoint */ | |
2490 | list_add_tail(&virt_ep->bw_endpoint_list, | |
2491 | &smaller_ep->bw_endpoint_list); | |
2492 | return; | |
2493 | } | |
2494 | } | |
2495 | /* Add the new endpoint at the end of the list. */ | |
2496 | list_add_tail(&virt_ep->bw_endpoint_list, | |
2497 | &interval_bw->endpoints); | |
2498 | } | |
2499 | ||
2500 | void xhci_update_tt_active_eps(struct xhci_hcd *xhci, | |
2501 | struct xhci_virt_device *virt_dev, | |
2502 | int old_active_eps) | |
2503 | { | |
2504 | struct xhci_root_port_bw_info *rh_bw_info; | |
2505 | if (!virt_dev->tt_info) | |
2506 | return; | |
2507 | ||
2508 | rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1]; | |
2509 | if (old_active_eps == 0 && | |
2510 | virt_dev->tt_info->active_eps != 0) { | |
2511 | rh_bw_info->num_active_tts += 1; | |
2512 | rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD; | |
2513 | } else if (old_active_eps != 0 && | |
2514 | virt_dev->tt_info->active_eps == 0) { | |
2515 | rh_bw_info->num_active_tts -= 1; | |
2516 | rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD; | |
2517 | } | |
2518 | } | |
2519 | ||
2520 | static int xhci_reserve_bandwidth(struct xhci_hcd *xhci, | |
2521 | struct xhci_virt_device *virt_dev, | |
2522 | struct xhci_container_ctx *in_ctx) | |
2523 | { | |
2524 | struct xhci_bw_info ep_bw_info[31]; | |
2525 | int i; | |
2526 | struct xhci_input_control_ctx *ctrl_ctx; | |
2527 | int old_active_eps = 0; | |
2528 | ||
2529 | if (virt_dev->tt_info) | |
2530 | old_active_eps = virt_dev->tt_info->active_eps; | |
2531 | ||
2532 | ctrl_ctx = xhci_get_input_control_ctx(in_ctx); | |
2533 | if (!ctrl_ctx) { | |
2534 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", | |
2535 | __func__); | |
2536 | return -ENOMEM; | |
2537 | } | |
2538 | ||
2539 | for (i = 0; i < 31; i++) { | |
2540 | if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) | |
2541 | continue; | |
2542 | ||
2543 | /* Make a copy of the BW info in case we need to revert this */ | |
2544 | memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info, | |
2545 | sizeof(ep_bw_info[i])); | |
2546 | /* Drop the endpoint from the interval table if the endpoint is | |
2547 | * being dropped or changed. | |
2548 | */ | |
2549 | if (EP_IS_DROPPED(ctrl_ctx, i)) | |
2550 | xhci_drop_ep_from_interval_table(xhci, | |
2551 | &virt_dev->eps[i].bw_info, | |
2552 | virt_dev->bw_table, | |
2553 | virt_dev->udev, | |
2554 | &virt_dev->eps[i], | |
2555 | virt_dev->tt_info); | |
2556 | } | |
2557 | /* Overwrite the information stored in the endpoints' bw_info */ | |
2558 | xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev); | |
2559 | for (i = 0; i < 31; i++) { | |
2560 | /* Add any changed or added endpoints to the interval table */ | |
2561 | if (EP_IS_ADDED(ctrl_ctx, i)) | |
2562 | xhci_add_ep_to_interval_table(xhci, | |
2563 | &virt_dev->eps[i].bw_info, | |
2564 | virt_dev->bw_table, | |
2565 | virt_dev->udev, | |
2566 | &virt_dev->eps[i], | |
2567 | virt_dev->tt_info); | |
2568 | } | |
2569 | ||
2570 | if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) { | |
2571 | /* Ok, this fits in the bandwidth we have. | |
2572 | * Update the number of active TTs. | |
2573 | */ | |
2574 | xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); | |
2575 | return 0; | |
2576 | } | |
2577 | ||
2578 | /* We don't have enough bandwidth for this, revert the stored info. */ | |
2579 | for (i = 0; i < 31; i++) { | |
2580 | if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) | |
2581 | continue; | |
2582 | ||
2583 | /* Drop the new copies of any added or changed endpoints from | |
2584 | * the interval table. | |
2585 | */ | |
2586 | if (EP_IS_ADDED(ctrl_ctx, i)) { | |
2587 | xhci_drop_ep_from_interval_table(xhci, | |
2588 | &virt_dev->eps[i].bw_info, | |
2589 | virt_dev->bw_table, | |
2590 | virt_dev->udev, | |
2591 | &virt_dev->eps[i], | |
2592 | virt_dev->tt_info); | |
2593 | } | |
2594 | /* Revert the endpoint back to its old information */ | |
2595 | memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i], | |
2596 | sizeof(ep_bw_info[i])); | |
2597 | /* Add any changed or dropped endpoints back into the table */ | |
2598 | if (EP_IS_DROPPED(ctrl_ctx, i)) | |
2599 | xhci_add_ep_to_interval_table(xhci, | |
2600 | &virt_dev->eps[i].bw_info, | |
2601 | virt_dev->bw_table, | |
2602 | virt_dev->udev, | |
2603 | &virt_dev->eps[i], | |
2604 | virt_dev->tt_info); | |
2605 | } | |
2606 | return -ENOMEM; | |
2607 | } | |
2608 | ||
2609 | ||
2610 | /* Issue a configure endpoint command or evaluate context command | |
2611 | * and wait for it to finish. | |
2612 | */ | |
2613 | static int xhci_configure_endpoint(struct xhci_hcd *xhci, | |
2614 | struct usb_device *udev, | |
2615 | struct xhci_command *command, | |
2616 | bool ctx_change, bool must_succeed) | |
2617 | { | |
2618 | int ret; | |
2619 | unsigned long flags; | |
2620 | struct xhci_input_control_ctx *ctrl_ctx; | |
2621 | struct xhci_virt_device *virt_dev; | |
2622 | ||
2623 | if (!command) | |
2624 | return -EINVAL; | |
2625 | ||
2626 | spin_lock_irqsave(&xhci->lock, flags); | |
2627 | virt_dev = xhci->devs[udev->slot_id]; | |
2628 | ||
2629 | ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); | |
2630 | if (!ctrl_ctx) { | |
2631 | spin_unlock_irqrestore(&xhci->lock, flags); | |
2632 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", | |
2633 | __func__); | |
2634 | return -ENOMEM; | |
2635 | } | |
2636 | ||
2637 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) && | |
2638 | xhci_reserve_host_resources(xhci, ctrl_ctx)) { | |
2639 | spin_unlock_irqrestore(&xhci->lock, flags); | |
2640 | xhci_warn(xhci, "Not enough host resources, " | |
2641 | "active endpoint contexts = %u\n", | |
2642 | xhci->num_active_eps); | |
2643 | return -ENOMEM; | |
2644 | } | |
2645 | if ((xhci->quirks & XHCI_SW_BW_CHECKING) && | |
2646 | xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) { | |
2647 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) | |
2648 | xhci_free_host_resources(xhci, ctrl_ctx); | |
2649 | spin_unlock_irqrestore(&xhci->lock, flags); | |
2650 | xhci_warn(xhci, "Not enough bandwidth\n"); | |
2651 | return -ENOMEM; | |
2652 | } | |
2653 | ||
2654 | if (!ctx_change) | |
2655 | ret = xhci_queue_configure_endpoint(xhci, command, | |
2656 | command->in_ctx->dma, | |
2657 | udev->slot_id, must_succeed); | |
2658 | else | |
2659 | ret = xhci_queue_evaluate_context(xhci, command, | |
2660 | command->in_ctx->dma, | |
2661 | udev->slot_id, must_succeed); | |
2662 | if (ret < 0) { | |
2663 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) | |
2664 | xhci_free_host_resources(xhci, ctrl_ctx); | |
2665 | spin_unlock_irqrestore(&xhci->lock, flags); | |
2666 | xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, | |
2667 | "FIXME allocate a new ring segment"); | |
2668 | return -ENOMEM; | |
2669 | } | |
2670 | xhci_ring_cmd_db(xhci); | |
2671 | spin_unlock_irqrestore(&xhci->lock, flags); | |
2672 | ||
2673 | /* Wait for the configure endpoint command to complete */ | |
2674 | wait_for_completion(command->completion); | |
2675 | ||
2676 | if (!ctx_change) | |
2677 | ret = xhci_configure_endpoint_result(xhci, udev, | |
2678 | &command->status); | |
2679 | else | |
2680 | ret = xhci_evaluate_context_result(xhci, udev, | |
2681 | &command->status); | |
2682 | ||
2683 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { | |
2684 | spin_lock_irqsave(&xhci->lock, flags); | |
2685 | /* If the command failed, remove the reserved resources. | |
2686 | * Otherwise, clean up the estimate to include dropped eps. | |
2687 | */ | |
2688 | if (ret) | |
2689 | xhci_free_host_resources(xhci, ctrl_ctx); | |
2690 | else | |
2691 | xhci_finish_resource_reservation(xhci, ctrl_ctx); | |
2692 | spin_unlock_irqrestore(&xhci->lock, flags); | |
2693 | } | |
2694 | return ret; | |
2695 | } | |
2696 | ||
2697 | static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci, | |
2698 | struct xhci_virt_device *vdev, int i) | |
2699 | { | |
2700 | struct xhci_virt_ep *ep = &vdev->eps[i]; | |
2701 | ||
2702 | if (ep->ep_state & EP_HAS_STREAMS) { | |
2703 | xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n", | |
2704 | xhci_get_endpoint_address(i)); | |
2705 | xhci_free_stream_info(xhci, ep->stream_info); | |
2706 | ep->stream_info = NULL; | |
2707 | ep->ep_state &= ~EP_HAS_STREAMS; | |
2708 | } | |
2709 | } | |
2710 | ||
2711 | /* Called after one or more calls to xhci_add_endpoint() or | |
2712 | * xhci_drop_endpoint(). If this call fails, the USB core is expected | |
2713 | * to call xhci_reset_bandwidth(). | |
2714 | * | |
2715 | * Since we are in the middle of changing either configuration or | |
2716 | * installing a new alt setting, the USB core won't allow URBs to be | |
2717 | * enqueued for any endpoint on the old config or interface. Nothing | |
2718 | * else should be touching the xhci->devs[slot_id] structure, so we | |
2719 | * don't need to take the xhci->lock for manipulating that. | |
2720 | */ | |
2721 | int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | |
2722 | { | |
2723 | int i; | |
2724 | int ret = 0; | |
2725 | struct xhci_hcd *xhci; | |
2726 | struct xhci_virt_device *virt_dev; | |
2727 | struct xhci_input_control_ctx *ctrl_ctx; | |
2728 | struct xhci_slot_ctx *slot_ctx; | |
2729 | struct xhci_command *command; | |
2730 | ||
2731 | ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); | |
2732 | if (ret <= 0) | |
2733 | return ret; | |
2734 | xhci = hcd_to_xhci(hcd); | |
2735 | if ((xhci->xhc_state & XHCI_STATE_DYING) || | |
2736 | (xhci->xhc_state & XHCI_STATE_REMOVING)) | |
2737 | return -ENODEV; | |
2738 | ||
2739 | xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); | |
2740 | virt_dev = xhci->devs[udev->slot_id]; | |
2741 | ||
2742 | command = xhci_alloc_command(xhci, false, true, GFP_KERNEL); | |
2743 | if (!command) | |
2744 | return -ENOMEM; | |
2745 | ||
2746 | command->in_ctx = virt_dev->in_ctx; | |
2747 | ||
2748 | /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ | |
2749 | ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); | |
2750 | if (!ctrl_ctx) { | |
2751 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", | |
2752 | __func__); | |
2753 | ret = -ENOMEM; | |
2754 | goto command_cleanup; | |
2755 | } | |
2756 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); | |
2757 | ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG); | |
2758 | ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG)); | |
2759 | ||
2760 | /* Don't issue the command if there's no endpoints to update. */ | |
2761 | if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) && | |
2762 | ctrl_ctx->drop_flags == 0) { | |
2763 | ret = 0; | |
2764 | goto command_cleanup; | |
2765 | } | |
2766 | /* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */ | |
2767 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); | |
2768 | for (i = 31; i >= 1; i--) { | |
2769 | __le32 le32 = cpu_to_le32(BIT(i)); | |
2770 | ||
2771 | if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32)) | |
2772 | || (ctrl_ctx->add_flags & le32) || i == 1) { | |
2773 | slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); | |
2774 | slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i)); | |
2775 | break; | |
2776 | } | |
2777 | } | |
2778 | xhci_dbg(xhci, "New Input Control Context:\n"); | |
2779 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, | |
2780 | LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info))); | |
2781 | ||
2782 | ret = xhci_configure_endpoint(xhci, udev, command, | |
2783 | false, false); | |
2784 | if (ret) | |
2785 | /* Callee should call reset_bandwidth() */ | |
2786 | goto command_cleanup; | |
2787 | ||
2788 | xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); | |
2789 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, | |
2790 | LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info))); | |
2791 | ||
2792 | /* Free any rings that were dropped, but not changed. */ | |
2793 | for (i = 1; i < 31; ++i) { | |
2794 | if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) && | |
2795 | !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) { | |
2796 | xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); | |
2797 | xhci_check_bw_drop_ep_streams(xhci, virt_dev, i); | |
2798 | } | |
2799 | } | |
2800 | xhci_zero_in_ctx(xhci, virt_dev); | |
2801 | /* | |
2802 | * Install any rings for completely new endpoints or changed endpoints, | |
2803 | * and free or cache any old rings from changed endpoints. | |
2804 | */ | |
2805 | for (i = 1; i < 31; ++i) { | |
2806 | if (!virt_dev->eps[i].new_ring) | |
2807 | continue; | |
2808 | /* Only cache or free the old ring if it exists. | |
2809 | * It may not if this is the first add of an endpoint. | |
2810 | */ | |
2811 | if (virt_dev->eps[i].ring) { | |
2812 | xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); | |
2813 | } | |
2814 | xhci_check_bw_drop_ep_streams(xhci, virt_dev, i); | |
2815 | virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; | |
2816 | virt_dev->eps[i].new_ring = NULL; | |
2817 | } | |
2818 | command_cleanup: | |
2819 | kfree(command->completion); | |
2820 | kfree(command); | |
2821 | ||
2822 | return ret; | |
2823 | } | |
2824 | ||
2825 | void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) | |
2826 | { | |
2827 | struct xhci_hcd *xhci; | |
2828 | struct xhci_virt_device *virt_dev; | |
2829 | int i, ret; | |
2830 | ||
2831 | ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); | |
2832 | if (ret <= 0) | |
2833 | return; | |
2834 | xhci = hcd_to_xhci(hcd); | |
2835 | ||
2836 | xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); | |
2837 | virt_dev = xhci->devs[udev->slot_id]; | |
2838 | /* Free any rings allocated for added endpoints */ | |
2839 | for (i = 0; i < 31; ++i) { | |
2840 | if (virt_dev->eps[i].new_ring) { | |
2841 | xhci_ring_free(xhci, virt_dev->eps[i].new_ring); | |
2842 | virt_dev->eps[i].new_ring = NULL; | |
2843 | } | |
2844 | } | |
2845 | xhci_zero_in_ctx(xhci, virt_dev); | |
2846 | } | |
2847 | ||
2848 | static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, | |
2849 | struct xhci_container_ctx *in_ctx, | |
2850 | struct xhci_container_ctx *out_ctx, | |
2851 | struct xhci_input_control_ctx *ctrl_ctx, | |
2852 | u32 add_flags, u32 drop_flags) | |
2853 | { | |
2854 | ctrl_ctx->add_flags = cpu_to_le32(add_flags); | |
2855 | ctrl_ctx->drop_flags = cpu_to_le32(drop_flags); | |
2856 | xhci_slot_copy(xhci, in_ctx, out_ctx); | |
2857 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); | |
2858 | ||
2859 | xhci_dbg(xhci, "Input Context:\n"); | |
2860 | xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags)); | |
2861 | } | |
2862 | ||
2863 | static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, | |
2864 | unsigned int slot_id, unsigned int ep_index, | |
2865 | struct xhci_dequeue_state *deq_state) | |
2866 | { | |
2867 | struct xhci_input_control_ctx *ctrl_ctx; | |
2868 | struct xhci_container_ctx *in_ctx; | |
2869 | struct xhci_ep_ctx *ep_ctx; | |
2870 | u32 added_ctxs; | |
2871 | dma_addr_t addr; | |
2872 | ||
2873 | in_ctx = xhci->devs[slot_id]->in_ctx; | |
2874 | ctrl_ctx = xhci_get_input_control_ctx(in_ctx); | |
2875 | if (!ctrl_ctx) { | |
2876 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", | |
2877 | __func__); | |
2878 | return; | |
2879 | } | |
2880 | ||
2881 | xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, | |
2882 | xhci->devs[slot_id]->out_ctx, ep_index); | |
2883 | ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); | |
2884 | addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg, | |
2885 | deq_state->new_deq_ptr); | |
2886 | if (addr == 0) { | |
2887 | xhci_warn(xhci, "WARN Cannot submit config ep after " | |
2888 | "reset ep command\n"); | |
2889 | xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n", | |
2890 | deq_state->new_deq_seg, | |
2891 | deq_state->new_deq_ptr); | |
2892 | return; | |
2893 | } | |
2894 | ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state); | |
2895 | ||
2896 | added_ctxs = xhci_get_endpoint_flag_from_index(ep_index); | |
2897 | xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx, | |
2898 | xhci->devs[slot_id]->out_ctx, ctrl_ctx, | |
2899 | added_ctxs, added_ctxs); | |
2900 | } | |
2901 | ||
2902 | void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, | |
2903 | unsigned int ep_index, struct xhci_td *td) | |
2904 | { | |
2905 | struct xhci_dequeue_state deq_state; | |
2906 | struct xhci_virt_ep *ep; | |
2907 | struct usb_device *udev = td->urb->dev; | |
2908 | ||
2909 | xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, | |
2910 | "Cleaning up stalled endpoint ring"); | |
2911 | ep = &xhci->devs[udev->slot_id]->eps[ep_index]; | |
2912 | /* We need to move the HW's dequeue pointer past this TD, | |
2913 | * or it will attempt to resend it on the next doorbell ring. | |
2914 | */ | |
2915 | xhci_find_new_dequeue_state(xhci, udev->slot_id, | |
2916 | ep_index, ep->stopped_stream, td, &deq_state); | |
2917 | ||
2918 | if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg) | |
2919 | return; | |
2920 | ||
2921 | /* HW with the reset endpoint quirk will use the saved dequeue state to | |
2922 | * issue a configure endpoint command later. | |
2923 | */ | |
2924 | if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { | |
2925 | xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, | |
2926 | "Queueing new dequeue state"); | |
2927 | xhci_queue_new_dequeue_state(xhci, udev->slot_id, | |
2928 | ep_index, ep->stopped_stream, &deq_state); | |
2929 | } else { | |
2930 | /* Better hope no one uses the input context between now and the | |
2931 | * reset endpoint completion! | |
2932 | * XXX: No idea how this hardware will react when stream rings | |
2933 | * are enabled. | |
2934 | */ | |
2935 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, | |
2936 | "Setting up input context for " | |
2937 | "configure endpoint command"); | |
2938 | xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id, | |
2939 | ep_index, &deq_state); | |
2940 | } | |
2941 | } | |
2942 | ||
2943 | /* Called when clearing halted device. The core should have sent the control | |
2944 | * message to clear the device halt condition. The host side of the halt should | |
2945 | * already be cleared with a reset endpoint command issued when the STALL tx | |
2946 | * event was received. | |
2947 | * | |
2948 | * Context: in_interrupt | |
2949 | */ | |
2950 | ||
2951 | void xhci_endpoint_reset(struct usb_hcd *hcd, | |
2952 | struct usb_host_endpoint *ep) | |
2953 | { | |
2954 | struct xhci_hcd *xhci; | |
2955 | ||
2956 | xhci = hcd_to_xhci(hcd); | |
2957 | ||
2958 | /* | |
2959 | * We might need to implement the config ep cmd in xhci 4.8.1 note: | |
2960 | * The Reset Endpoint Command may only be issued to endpoints in the | |
2961 | * Halted state. If software wishes reset the Data Toggle or Sequence | |
2962 | * Number of an endpoint that isn't in the Halted state, then software | |
2963 | * may issue a Configure Endpoint Command with the Drop and Add bits set | |
2964 | * for the target endpoint. that is in the Stopped state. | |
2965 | */ | |
2966 | ||
2967 | /* For now just print debug to follow the situation */ | |
2968 | xhci_dbg(xhci, "Endpoint 0x%x ep reset callback called\n", | |
2969 | ep->desc.bEndpointAddress); | |
2970 | } | |
2971 | ||
2972 | static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, | |
2973 | struct usb_device *udev, struct usb_host_endpoint *ep, | |
2974 | unsigned int slot_id) | |
2975 | { | |
2976 | int ret; | |
2977 | unsigned int ep_index; | |
2978 | unsigned int ep_state; | |
2979 | ||
2980 | if (!ep) | |
2981 | return -EINVAL; | |
2982 | ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__); | |
2983 | if (ret <= 0) | |
2984 | return -EINVAL; | |
2985 | if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) { | |
2986 | xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion" | |
2987 | " descriptor for ep 0x%x does not support streams\n", | |
2988 | ep->desc.bEndpointAddress); | |
2989 | return -EINVAL; | |
2990 | } | |
2991 | ||
2992 | ep_index = xhci_get_endpoint_index(&ep->desc); | |
2993 | ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; | |
2994 | if (ep_state & EP_HAS_STREAMS || | |
2995 | ep_state & EP_GETTING_STREAMS) { | |
2996 | xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x " | |
2997 | "already has streams set up.\n", | |
2998 | ep->desc.bEndpointAddress); | |
2999 | xhci_warn(xhci, "Send email to xHCI maintainer and ask for " | |
3000 | "dynamic stream context array reallocation.\n"); | |
3001 | return -EINVAL; | |
3002 | } | |
3003 | if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) { | |
3004 | xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk " | |
3005 | "endpoint 0x%x; URBs are pending.\n", | |
3006 | ep->desc.bEndpointAddress); | |
3007 | return -EINVAL; | |
3008 | } | |
3009 | return 0; | |
3010 | } | |
3011 | ||
3012 | static void xhci_calculate_streams_entries(struct xhci_hcd *xhci, | |
3013 | unsigned int *num_streams, unsigned int *num_stream_ctxs) | |
3014 | { | |
3015 | unsigned int max_streams; | |
3016 | ||
3017 | /* The stream context array size must be a power of two */ | |
3018 | *num_stream_ctxs = roundup_pow_of_two(*num_streams); | |
3019 | /* | |
3020 | * Find out how many primary stream array entries the host controller | |
3021 | * supports. Later we may use secondary stream arrays (similar to 2nd | |
3022 | * level page entries), but that's an optional feature for xHCI host | |
3023 | * controllers. xHCs must support at least 4 stream IDs. | |
3024 | */ | |
3025 | max_streams = HCC_MAX_PSA(xhci->hcc_params); | |
3026 | if (*num_stream_ctxs > max_streams) { | |
3027 | xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n", | |
3028 | max_streams); | |
3029 | *num_stream_ctxs = max_streams; | |
3030 | *num_streams = max_streams; | |
3031 | } | |
3032 | } | |
3033 | ||
3034 | /* Returns an error code if one of the endpoint already has streams. | |
3035 | * This does not change any data structures, it only checks and gathers | |
3036 | * information. | |
3037 | */ | |
3038 | static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci, | |
3039 | struct usb_device *udev, | |
3040 | struct usb_host_endpoint **eps, unsigned int num_eps, | |
3041 | unsigned int *num_streams, u32 *changed_ep_bitmask) | |
3042 | { | |
3043 | unsigned int max_streams; | |
3044 | unsigned int endpoint_flag; | |
3045 | int i; | |
3046 | int ret; | |
3047 | ||
3048 | for (i = 0; i < num_eps; i++) { | |
3049 | ret = xhci_check_streams_endpoint(xhci, udev, | |
3050 | eps[i], udev->slot_id); | |
3051 | if (ret < 0) | |
3052 | return ret; | |
3053 | ||
3054 | max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp); | |
3055 | if (max_streams < (*num_streams - 1)) { | |
3056 | xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n", | |
3057 | eps[i]->desc.bEndpointAddress, | |
3058 | max_streams); | |
3059 | *num_streams = max_streams+1; | |
3060 | } | |
3061 | ||
3062 | endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc); | |
3063 | if (*changed_ep_bitmask & endpoint_flag) | |
3064 | return -EINVAL; | |
3065 | *changed_ep_bitmask |= endpoint_flag; | |
3066 | } | |
3067 | return 0; | |
3068 | } | |
3069 | ||
3070 | static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci, | |
3071 | struct usb_device *udev, | |
3072 | struct usb_host_endpoint **eps, unsigned int num_eps) | |
3073 | { | |
3074 | u32 changed_ep_bitmask = 0; | |
3075 | unsigned int slot_id; | |
3076 | unsigned int ep_index; | |
3077 | unsigned int ep_state; | |
3078 | int i; | |
3079 | ||
3080 | slot_id = udev->slot_id; | |
3081 | if (!xhci->devs[slot_id]) | |
3082 | return 0; | |
3083 | ||
3084 | for (i = 0; i < num_eps; i++) { | |
3085 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | |
3086 | ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; | |
3087 | /* Are streams already being freed for the endpoint? */ | |
3088 | if (ep_state & EP_GETTING_NO_STREAMS) { | |
3089 | xhci_warn(xhci, "WARN Can't disable streams for " | |
3090 | "endpoint 0x%x, " | |
3091 | "streams are being disabled already\n", | |
3092 | eps[i]->desc.bEndpointAddress); | |
3093 | return 0; | |
3094 | } | |
3095 | /* Are there actually any streams to free? */ | |
3096 | if (!(ep_state & EP_HAS_STREAMS) && | |
3097 | !(ep_state & EP_GETTING_STREAMS)) { | |
3098 | xhci_warn(xhci, "WARN Can't disable streams for " | |
3099 | "endpoint 0x%x, " | |
3100 | "streams are already disabled!\n", | |
3101 | eps[i]->desc.bEndpointAddress); | |
3102 | xhci_warn(xhci, "WARN xhci_free_streams() called " | |
3103 | "with non-streams endpoint\n"); | |
3104 | return 0; | |
3105 | } | |
3106 | changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc); | |
3107 | } | |
3108 | return changed_ep_bitmask; | |
3109 | } | |
3110 | ||
3111 | /* | |
3112 | * The USB device drivers use this function (through the HCD interface in USB | |
3113 | * core) to prepare a set of bulk endpoints to use streams. Streams are used to | |
3114 | * coordinate mass storage command queueing across multiple endpoints (basically | |
3115 | * a stream ID == a task ID). | |
3116 | * | |
3117 | * Setting up streams involves allocating the same size stream context array | |
3118 | * for each endpoint and issuing a configure endpoint command for all endpoints. | |
3119 | * | |
3120 | * Don't allow the call to succeed if one endpoint only supports one stream | |
3121 | * (which means it doesn't support streams at all). | |
3122 | * | |
3123 | * Drivers may get less stream IDs than they asked for, if the host controller | |
3124 | * hardware or endpoints claim they can't support the number of requested | |
3125 | * stream IDs. | |
3126 | */ | |
3127 | int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev, | |
3128 | struct usb_host_endpoint **eps, unsigned int num_eps, | |
3129 | unsigned int num_streams, gfp_t mem_flags) | |
3130 | { | |
3131 | int i, ret; | |
3132 | struct xhci_hcd *xhci; | |
3133 | struct xhci_virt_device *vdev; | |
3134 | struct xhci_command *config_cmd; | |
3135 | struct xhci_input_control_ctx *ctrl_ctx; | |
3136 | unsigned int ep_index; | |
3137 | unsigned int num_stream_ctxs; | |
3138 | unsigned int max_packet; | |
3139 | unsigned long flags; | |
3140 | u32 changed_ep_bitmask = 0; | |
3141 | ||
3142 | if (!eps) | |
3143 | return -EINVAL; | |
3144 | ||
3145 | /* Add one to the number of streams requested to account for | |
3146 | * stream 0 that is reserved for xHCI usage. | |
3147 | */ | |
3148 | num_streams += 1; | |
3149 | xhci = hcd_to_xhci(hcd); | |
3150 | xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n", | |
3151 | num_streams); | |
3152 | ||
3153 | /* MaxPSASize value 0 (2 streams) means streams are not supported */ | |
3154 | if ((xhci->quirks & XHCI_BROKEN_STREAMS) || | |
3155 | HCC_MAX_PSA(xhci->hcc_params) < 4) { | |
3156 | xhci_dbg(xhci, "xHCI controller does not support streams.\n"); | |
3157 | return -ENOSYS; | |
3158 | } | |
3159 | ||
3160 | config_cmd = xhci_alloc_command(xhci, true, true, mem_flags); | |
3161 | if (!config_cmd) { | |
3162 | xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); | |
3163 | return -ENOMEM; | |
3164 | } | |
3165 | ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx); | |
3166 | if (!ctrl_ctx) { | |
3167 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", | |
3168 | __func__); | |
3169 | xhci_free_command(xhci, config_cmd); | |
3170 | return -ENOMEM; | |
3171 | } | |
3172 | ||
3173 | /* Check to make sure all endpoints are not already configured for | |
3174 | * streams. While we're at it, find the maximum number of streams that | |
3175 | * all the endpoints will support and check for duplicate endpoints. | |
3176 | */ | |
3177 | spin_lock_irqsave(&xhci->lock, flags); | |
3178 | ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps, | |
3179 | num_eps, &num_streams, &changed_ep_bitmask); | |
3180 | if (ret < 0) { | |
3181 | xhci_free_command(xhci, config_cmd); | |
3182 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3183 | return ret; | |
3184 | } | |
3185 | if (num_streams <= 1) { | |
3186 | xhci_warn(xhci, "WARN: endpoints can't handle " | |
3187 | "more than one stream.\n"); | |
3188 | xhci_free_command(xhci, config_cmd); | |
3189 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3190 | return -EINVAL; | |
3191 | } | |
3192 | vdev = xhci->devs[udev->slot_id]; | |
3193 | /* Mark each endpoint as being in transition, so | |
3194 | * xhci_urb_enqueue() will reject all URBs. | |
3195 | */ | |
3196 | for (i = 0; i < num_eps; i++) { | |
3197 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | |
3198 | vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS; | |
3199 | } | |
3200 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3201 | ||
3202 | /* Setup internal data structures and allocate HW data structures for | |
3203 | * streams (but don't install the HW structures in the input context | |
3204 | * until we're sure all memory allocation succeeded). | |
3205 | */ | |
3206 | xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs); | |
3207 | xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n", | |
3208 | num_stream_ctxs, num_streams); | |
3209 | ||
3210 | for (i = 0; i < num_eps; i++) { | |
3211 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | |
3212 | max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&eps[i]->desc)); | |
3213 | vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci, | |
3214 | num_stream_ctxs, | |
3215 | num_streams, | |
3216 | max_packet, mem_flags); | |
3217 | if (!vdev->eps[ep_index].stream_info) | |
3218 | goto cleanup; | |
3219 | /* Set maxPstreams in endpoint context and update deq ptr to | |
3220 | * point to stream context array. FIXME | |
3221 | */ | |
3222 | } | |
3223 | ||
3224 | /* Set up the input context for a configure endpoint command. */ | |
3225 | for (i = 0; i < num_eps; i++) { | |
3226 | struct xhci_ep_ctx *ep_ctx; | |
3227 | ||
3228 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | |
3229 | ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index); | |
3230 | ||
3231 | xhci_endpoint_copy(xhci, config_cmd->in_ctx, | |
3232 | vdev->out_ctx, ep_index); | |
3233 | xhci_setup_streams_ep_input_ctx(xhci, ep_ctx, | |
3234 | vdev->eps[ep_index].stream_info); | |
3235 | } | |
3236 | /* Tell the HW to drop its old copy of the endpoint context info | |
3237 | * and add the updated copy from the input context. | |
3238 | */ | |
3239 | xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx, | |
3240 | vdev->out_ctx, ctrl_ctx, | |
3241 | changed_ep_bitmask, changed_ep_bitmask); | |
3242 | ||
3243 | /* Issue and wait for the configure endpoint command */ | |
3244 | ret = xhci_configure_endpoint(xhci, udev, config_cmd, | |
3245 | false, false); | |
3246 | ||
3247 | /* xHC rejected the configure endpoint command for some reason, so we | |
3248 | * leave the old ring intact and free our internal streams data | |
3249 | * structure. | |
3250 | */ | |
3251 | if (ret < 0) | |
3252 | goto cleanup; | |
3253 | ||
3254 | spin_lock_irqsave(&xhci->lock, flags); | |
3255 | for (i = 0; i < num_eps; i++) { | |
3256 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | |
3257 | vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; | |
3258 | xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n", | |
3259 | udev->slot_id, ep_index); | |
3260 | vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS; | |
3261 | } | |
3262 | xhci_free_command(xhci, config_cmd); | |
3263 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3264 | ||
3265 | /* Subtract 1 for stream 0, which drivers can't use */ | |
3266 | return num_streams - 1; | |
3267 | ||
3268 | cleanup: | |
3269 | /* If it didn't work, free the streams! */ | |
3270 | for (i = 0; i < num_eps; i++) { | |
3271 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | |
3272 | xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); | |
3273 | vdev->eps[ep_index].stream_info = NULL; | |
3274 | /* FIXME Unset maxPstreams in endpoint context and | |
3275 | * update deq ptr to point to normal string ring. | |
3276 | */ | |
3277 | vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; | |
3278 | vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; | |
3279 | xhci_endpoint_zero(xhci, vdev, eps[i]); | |
3280 | } | |
3281 | xhci_free_command(xhci, config_cmd); | |
3282 | return -ENOMEM; | |
3283 | } | |
3284 | ||
3285 | /* Transition the endpoint from using streams to being a "normal" endpoint | |
3286 | * without streams. | |
3287 | * | |
3288 | * Modify the endpoint context state, submit a configure endpoint command, | |
3289 | * and free all endpoint rings for streams if that completes successfully. | |
3290 | */ | |
3291 | int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev, | |
3292 | struct usb_host_endpoint **eps, unsigned int num_eps, | |
3293 | gfp_t mem_flags) | |
3294 | { | |
3295 | int i, ret; | |
3296 | struct xhci_hcd *xhci; | |
3297 | struct xhci_virt_device *vdev; | |
3298 | struct xhci_command *command; | |
3299 | struct xhci_input_control_ctx *ctrl_ctx; | |
3300 | unsigned int ep_index; | |
3301 | unsigned long flags; | |
3302 | u32 changed_ep_bitmask; | |
3303 | ||
3304 | xhci = hcd_to_xhci(hcd); | |
3305 | vdev = xhci->devs[udev->slot_id]; | |
3306 | ||
3307 | /* Set up a configure endpoint command to remove the streams rings */ | |
3308 | spin_lock_irqsave(&xhci->lock, flags); | |
3309 | changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci, | |
3310 | udev, eps, num_eps); | |
3311 | if (changed_ep_bitmask == 0) { | |
3312 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3313 | return -EINVAL; | |
3314 | } | |
3315 | ||
3316 | /* Use the xhci_command structure from the first endpoint. We may have | |
3317 | * allocated too many, but the driver may call xhci_free_streams() for | |
3318 | * each endpoint it grouped into one call to xhci_alloc_streams(). | |
3319 | */ | |
3320 | ep_index = xhci_get_endpoint_index(&eps[0]->desc); | |
3321 | command = vdev->eps[ep_index].stream_info->free_streams_command; | |
3322 | ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); | |
3323 | if (!ctrl_ctx) { | |
3324 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3325 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", | |
3326 | __func__); | |
3327 | return -EINVAL; | |
3328 | } | |
3329 | ||
3330 | for (i = 0; i < num_eps; i++) { | |
3331 | struct xhci_ep_ctx *ep_ctx; | |
3332 | ||
3333 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | |
3334 | ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); | |
3335 | xhci->devs[udev->slot_id]->eps[ep_index].ep_state |= | |
3336 | EP_GETTING_NO_STREAMS; | |
3337 | ||
3338 | xhci_endpoint_copy(xhci, command->in_ctx, | |
3339 | vdev->out_ctx, ep_index); | |
3340 | xhci_setup_no_streams_ep_input_ctx(ep_ctx, | |
3341 | &vdev->eps[ep_index]); | |
3342 | } | |
3343 | xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx, | |
3344 | vdev->out_ctx, ctrl_ctx, | |
3345 | changed_ep_bitmask, changed_ep_bitmask); | |
3346 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3347 | ||
3348 | /* Issue and wait for the configure endpoint command, | |
3349 | * which must succeed. | |
3350 | */ | |
3351 | ret = xhci_configure_endpoint(xhci, udev, command, | |
3352 | false, true); | |
3353 | ||
3354 | /* xHC rejected the configure endpoint command for some reason, so we | |
3355 | * leave the streams rings intact. | |
3356 | */ | |
3357 | if (ret < 0) | |
3358 | return ret; | |
3359 | ||
3360 | spin_lock_irqsave(&xhci->lock, flags); | |
3361 | for (i = 0; i < num_eps; i++) { | |
3362 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); | |
3363 | xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); | |
3364 | vdev->eps[ep_index].stream_info = NULL; | |
3365 | /* FIXME Unset maxPstreams in endpoint context and | |
3366 | * update deq ptr to point to normal string ring. | |
3367 | */ | |
3368 | vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS; | |
3369 | vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; | |
3370 | } | |
3371 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3372 | ||
3373 | return 0; | |
3374 | } | |
3375 | ||
3376 | /* | |
3377 | * Deletes endpoint resources for endpoints that were active before a Reset | |
3378 | * Device command, or a Disable Slot command. The Reset Device command leaves | |
3379 | * the control endpoint intact, whereas the Disable Slot command deletes it. | |
3380 | * | |
3381 | * Must be called with xhci->lock held. | |
3382 | */ | |
3383 | void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci, | |
3384 | struct xhci_virt_device *virt_dev, bool drop_control_ep) | |
3385 | { | |
3386 | int i; | |
3387 | unsigned int num_dropped_eps = 0; | |
3388 | unsigned int drop_flags = 0; | |
3389 | ||
3390 | for (i = (drop_control_ep ? 0 : 1); i < 31; i++) { | |
3391 | if (virt_dev->eps[i].ring) { | |
3392 | drop_flags |= 1 << i; | |
3393 | num_dropped_eps++; | |
3394 | } | |
3395 | } | |
3396 | xhci->num_active_eps -= num_dropped_eps; | |
3397 | if (num_dropped_eps) | |
3398 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, | |
3399 | "Dropped %u ep ctxs, flags = 0x%x, " | |
3400 | "%u now active.", | |
3401 | num_dropped_eps, drop_flags, | |
3402 | xhci->num_active_eps); | |
3403 | } | |
3404 | ||
3405 | /* | |
3406 | * This submits a Reset Device Command, which will set the device state to 0, | |
3407 | * set the device address to 0, and disable all the endpoints except the default | |
3408 | * control endpoint. The USB core should come back and call | |
3409 | * xhci_address_device(), and then re-set up the configuration. If this is | |
3410 | * called because of a usb_reset_and_verify_device(), then the old alternate | |
3411 | * settings will be re-installed through the normal bandwidth allocation | |
3412 | * functions. | |
3413 | * | |
3414 | * Wait for the Reset Device command to finish. Remove all structures | |
3415 | * associated with the endpoints that were disabled. Clear the input device | |
3416 | * structure? Cache the rings? Reset the control endpoint 0 max packet size? | |
3417 | * | |
3418 | * If the virt_dev to be reset does not exist or does not match the udev, | |
3419 | * it means the device is lost, possibly due to the xHC restore error and | |
3420 | * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to | |
3421 | * re-allocate the device. | |
3422 | */ | |
3423 | int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev) | |
3424 | { | |
3425 | int ret, i; | |
3426 | unsigned long flags; | |
3427 | struct xhci_hcd *xhci; | |
3428 | unsigned int slot_id; | |
3429 | struct xhci_virt_device *virt_dev; | |
3430 | struct xhci_command *reset_device_cmd; | |
3431 | int last_freed_endpoint; | |
3432 | struct xhci_slot_ctx *slot_ctx; | |
3433 | int old_active_eps = 0; | |
3434 | ||
3435 | ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__); | |
3436 | if (ret <= 0) | |
3437 | return ret; | |
3438 | xhci = hcd_to_xhci(hcd); | |
3439 | slot_id = udev->slot_id; | |
3440 | virt_dev = xhci->devs[slot_id]; | |
3441 | if (!virt_dev) { | |
3442 | xhci_dbg(xhci, "The device to be reset with slot ID %u does " | |
3443 | "not exist. Re-allocate the device\n", slot_id); | |
3444 | ret = xhci_alloc_dev(hcd, udev); | |
3445 | if (ret == 1) | |
3446 | return 0; | |
3447 | else | |
3448 | return -EINVAL; | |
3449 | } | |
3450 | ||
3451 | if (virt_dev->tt_info) | |
3452 | old_active_eps = virt_dev->tt_info->active_eps; | |
3453 | ||
3454 | if (virt_dev->udev != udev) { | |
3455 | /* If the virt_dev and the udev does not match, this virt_dev | |
3456 | * may belong to another udev. | |
3457 | * Re-allocate the device. | |
3458 | */ | |
3459 | xhci_dbg(xhci, "The device to be reset with slot ID %u does " | |
3460 | "not match the udev. Re-allocate the device\n", | |
3461 | slot_id); | |
3462 | ret = xhci_alloc_dev(hcd, udev); | |
3463 | if (ret == 1) | |
3464 | return 0; | |
3465 | else | |
3466 | return -EINVAL; | |
3467 | } | |
3468 | ||
3469 | /* If device is not setup, there is no point in resetting it */ | |
3470 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); | |
3471 | if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == | |
3472 | SLOT_STATE_DISABLED) | |
3473 | return 0; | |
3474 | ||
3475 | xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id); | |
3476 | /* Allocate the command structure that holds the struct completion. | |
3477 | * Assume we're in process context, since the normal device reset | |
3478 | * process has to wait for the device anyway. Storage devices are | |
3479 | * reset as part of error handling, so use GFP_NOIO instead of | |
3480 | * GFP_KERNEL. | |
3481 | */ | |
3482 | reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO); | |
3483 | if (!reset_device_cmd) { | |
3484 | xhci_dbg(xhci, "Couldn't allocate command structure.\n"); | |
3485 | return -ENOMEM; | |
3486 | } | |
3487 | ||
3488 | /* Attempt to submit the Reset Device command to the command ring */ | |
3489 | spin_lock_irqsave(&xhci->lock, flags); | |
3490 | ||
3491 | ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id); | |
3492 | if (ret) { | |
3493 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); | |
3494 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3495 | goto command_cleanup; | |
3496 | } | |
3497 | xhci_ring_cmd_db(xhci); | |
3498 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3499 | ||
3500 | /* Wait for the Reset Device command to finish */ | |
3501 | wait_for_completion(reset_device_cmd->completion); | |
3502 | ||
3503 | /* The Reset Device command can't fail, according to the 0.95/0.96 spec, | |
3504 | * unless we tried to reset a slot ID that wasn't enabled, | |
3505 | * or the device wasn't in the addressed or configured state. | |
3506 | */ | |
3507 | ret = reset_device_cmd->status; | |
3508 | switch (ret) { | |
3509 | case COMP_CMD_ABORT: | |
3510 | case COMP_CMD_STOP: | |
3511 | xhci_warn(xhci, "Timeout waiting for reset device command\n"); | |
3512 | ret = -ETIME; | |
3513 | goto command_cleanup; | |
3514 | case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */ | |
3515 | case COMP_CTX_STATE: /* 0.96 completion code for same thing */ | |
3516 | xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n", | |
3517 | slot_id, | |
3518 | xhci_get_slot_state(xhci, virt_dev->out_ctx)); | |
3519 | xhci_dbg(xhci, "Not freeing device rings.\n"); | |
3520 | /* Don't treat this as an error. May change my mind later. */ | |
3521 | ret = 0; | |
3522 | goto command_cleanup; | |
3523 | case COMP_SUCCESS: | |
3524 | xhci_dbg(xhci, "Successful reset device command.\n"); | |
3525 | break; | |
3526 | default: | |
3527 | if (xhci_is_vendor_info_code(xhci, ret)) | |
3528 | break; | |
3529 | xhci_warn(xhci, "Unknown completion code %u for " | |
3530 | "reset device command.\n", ret); | |
3531 | ret = -EINVAL; | |
3532 | goto command_cleanup; | |
3533 | } | |
3534 | ||
3535 | /* Free up host controller endpoint resources */ | |
3536 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { | |
3537 | spin_lock_irqsave(&xhci->lock, flags); | |
3538 | /* Don't delete the default control endpoint resources */ | |
3539 | xhci_free_device_endpoint_resources(xhci, virt_dev, false); | |
3540 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3541 | } | |
3542 | ||
3543 | /* Everything but endpoint 0 is disabled, so free or cache the rings. */ | |
3544 | last_freed_endpoint = 1; | |
3545 | for (i = 1; i < 31; ++i) { | |
3546 | struct xhci_virt_ep *ep = &virt_dev->eps[i]; | |
3547 | ||
3548 | if (ep->ep_state & EP_HAS_STREAMS) { | |
3549 | xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n", | |
3550 | xhci_get_endpoint_address(i)); | |
3551 | xhci_free_stream_info(xhci, ep->stream_info); | |
3552 | ep->stream_info = NULL; | |
3553 | ep->ep_state &= ~EP_HAS_STREAMS; | |
3554 | } | |
3555 | ||
3556 | if (ep->ring) { | |
3557 | xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); | |
3558 | last_freed_endpoint = i; | |
3559 | } | |
3560 | if (!list_empty(&virt_dev->eps[i].bw_endpoint_list)) | |
3561 | xhci_drop_ep_from_interval_table(xhci, | |
3562 | &virt_dev->eps[i].bw_info, | |
3563 | virt_dev->bw_table, | |
3564 | udev, | |
3565 | &virt_dev->eps[i], | |
3566 | virt_dev->tt_info); | |
3567 | xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info); | |
3568 | } | |
3569 | /* If necessary, update the number of active TTs on this root port */ | |
3570 | xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); | |
3571 | ||
3572 | xhci_dbg(xhci, "Output context after successful reset device cmd:\n"); | |
3573 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint); | |
3574 | ret = 0; | |
3575 | ||
3576 | command_cleanup: | |
3577 | xhci_free_command(xhci, reset_device_cmd); | |
3578 | return ret; | |
3579 | } | |
3580 | ||
3581 | /* | |
3582 | * At this point, the struct usb_device is about to go away, the device has | |
3583 | * disconnected, and all traffic has been stopped and the endpoints have been | |
3584 | * disabled. Free any HC data structures associated with that device. | |
3585 | */ | |
3586 | void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) | |
3587 | { | |
3588 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
3589 | struct xhci_virt_device *virt_dev; | |
3590 | unsigned long flags; | |
3591 | u32 state; | |
3592 | int i, ret; | |
3593 | struct xhci_command *command; | |
3594 | ||
3595 | command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); | |
3596 | if (!command) | |
3597 | return; | |
3598 | ||
3599 | #ifndef CONFIG_USB_DEFAULT_PERSIST | |
3600 | /* | |
3601 | * We called pm_runtime_get_noresume when the device was attached. | |
3602 | * Decrement the counter here to allow controller to runtime suspend | |
3603 | * if no devices remain. | |
3604 | */ | |
3605 | if (xhci->quirks & XHCI_RESET_ON_RESUME) | |
3606 | pm_runtime_put_noidle(hcd->self.controller); | |
3607 | #endif | |
3608 | ||
3609 | ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); | |
3610 | /* If the host is halted due to driver unload, we still need to free the | |
3611 | * device. | |
3612 | */ | |
3613 | if (ret <= 0 && ret != -ENODEV) { | |
3614 | kfree(command); | |
3615 | return; | |
3616 | } | |
3617 | ||
3618 | virt_dev = xhci->devs[udev->slot_id]; | |
3619 | ||
3620 | /* Stop any wayward timer functions (which may grab the lock) */ | |
3621 | for (i = 0; i < 31; ++i) { | |
3622 | virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING; | |
3623 | del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); | |
3624 | } | |
3625 | ||
3626 | spin_lock_irqsave(&xhci->lock, flags); | |
3627 | /* Don't disable the slot if the host controller is dead. */ | |
3628 | state = readl(&xhci->op_regs->status); | |
3629 | if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || | |
3630 | (xhci->xhc_state & XHCI_STATE_HALTED)) { | |
3631 | xhci_free_virt_device(xhci, udev->slot_id); | |
3632 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3633 | kfree(command); | |
3634 | return; | |
3635 | } | |
3636 | ||
3637 | if (xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT, | |
3638 | udev->slot_id)) { | |
3639 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3640 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); | |
3641 | return; | |
3642 | } | |
3643 | xhci_ring_cmd_db(xhci); | |
3644 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3645 | ||
3646 | /* | |
3647 | * Event command completion handler will free any data structures | |
3648 | * associated with the slot. XXX Can free sleep? | |
3649 | */ | |
3650 | } | |
3651 | ||
3652 | /* | |
3653 | * Checks if we have enough host controller resources for the default control | |
3654 | * endpoint. | |
3655 | * | |
3656 | * Must be called with xhci->lock held. | |
3657 | */ | |
3658 | static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci) | |
3659 | { | |
3660 | if (xhci->num_active_eps + 1 > xhci->limit_active_eps) { | |
3661 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, | |
3662 | "Not enough ep ctxs: " | |
3663 | "%u active, need to add 1, limit is %u.", | |
3664 | xhci->num_active_eps, xhci->limit_active_eps); | |
3665 | return -ENOMEM; | |
3666 | } | |
3667 | xhci->num_active_eps += 1; | |
3668 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, | |
3669 | "Adding 1 ep ctx, %u now active.", | |
3670 | xhci->num_active_eps); | |
3671 | return 0; | |
3672 | } | |
3673 | ||
3674 | ||
3675 | /* | |
3676 | * Returns 0 if the xHC ran out of device slots, the Enable Slot command | |
3677 | * timed out, or allocating memory failed. Returns 1 on success. | |
3678 | */ | |
3679 | int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) | |
3680 | { | |
3681 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
3682 | unsigned long flags; | |
3683 | int ret, slot_id; | |
3684 | struct xhci_command *command; | |
3685 | ||
3686 | command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); | |
3687 | if (!command) | |
3688 | return 0; | |
3689 | ||
3690 | /* xhci->slot_id and xhci->addr_dev are not thread-safe */ | |
3691 | mutex_lock(&xhci->mutex); | |
3692 | spin_lock_irqsave(&xhci->lock, flags); | |
3693 | command->completion = &xhci->addr_dev; | |
3694 | ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0); | |
3695 | if (ret) { | |
3696 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3697 | mutex_unlock(&xhci->mutex); | |
3698 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); | |
3699 | kfree(command); | |
3700 | return 0; | |
3701 | } | |
3702 | xhci_ring_cmd_db(xhci); | |
3703 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3704 | ||
3705 | wait_for_completion(command->completion); | |
3706 | slot_id = xhci->slot_id; | |
3707 | mutex_unlock(&xhci->mutex); | |
3708 | ||
3709 | if (!slot_id || command->status != COMP_SUCCESS) { | |
3710 | xhci_err(xhci, "Error while assigning device slot ID\n"); | |
3711 | xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n", | |
3712 | HCS_MAX_SLOTS( | |
3713 | readl(&xhci->cap_regs->hcs_params1))); | |
3714 | kfree(command); | |
3715 | return 0; | |
3716 | } | |
3717 | ||
3718 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { | |
3719 | spin_lock_irqsave(&xhci->lock, flags); | |
3720 | ret = xhci_reserve_host_control_ep_resources(xhci); | |
3721 | if (ret) { | |
3722 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3723 | xhci_warn(xhci, "Not enough host resources, " | |
3724 | "active endpoint contexts = %u\n", | |
3725 | xhci->num_active_eps); | |
3726 | goto disable_slot; | |
3727 | } | |
3728 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3729 | } | |
3730 | /* Use GFP_NOIO, since this function can be called from | |
3731 | * xhci_discover_or_reset_device(), which may be called as part of | |
3732 | * mass storage driver error handling. | |
3733 | */ | |
3734 | if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) { | |
3735 | xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); | |
3736 | goto disable_slot; | |
3737 | } | |
3738 | udev->slot_id = slot_id; | |
3739 | ||
3740 | #ifndef CONFIG_USB_DEFAULT_PERSIST | |
3741 | /* | |
3742 | * If resetting upon resume, we can't put the controller into runtime | |
3743 | * suspend if there is a device attached. | |
3744 | */ | |
3745 | if (xhci->quirks & XHCI_RESET_ON_RESUME) | |
3746 | pm_runtime_get_noresume(hcd->self.controller); | |
3747 | #endif | |
3748 | ||
3749 | ||
3750 | kfree(command); | |
3751 | /* Is this a LS or FS device under a HS hub? */ | |
3752 | /* Hub or peripherial? */ | |
3753 | return 1; | |
3754 | ||
3755 | disable_slot: | |
3756 | /* Disable slot, if we can do it without mem alloc */ | |
3757 | spin_lock_irqsave(&xhci->lock, flags); | |
3758 | command->completion = NULL; | |
3759 | command->status = 0; | |
3760 | if (!xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT, | |
3761 | udev->slot_id)) | |
3762 | xhci_ring_cmd_db(xhci); | |
3763 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3764 | return 0; | |
3765 | } | |
3766 | ||
3767 | /* | |
3768 | * Issue an Address Device command and optionally send a corresponding | |
3769 | * SetAddress request to the device. | |
3770 | */ | |
3771 | static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, | |
3772 | enum xhci_setup_dev setup) | |
3773 | { | |
3774 | const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address"; | |
3775 | unsigned long flags; | |
3776 | struct xhci_virt_device *virt_dev; | |
3777 | int ret = 0; | |
3778 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
3779 | struct xhci_slot_ctx *slot_ctx; | |
3780 | struct xhci_input_control_ctx *ctrl_ctx; | |
3781 | u64 temp_64; | |
3782 | struct xhci_command *command = NULL; | |
3783 | ||
3784 | mutex_lock(&xhci->mutex); | |
3785 | ||
3786 | if (xhci->xhc_state) /* dying, removing or halted */ | |
3787 | goto out; | |
3788 | ||
3789 | if (!udev->slot_id) { | |
3790 | xhci_dbg_trace(xhci, trace_xhci_dbg_address, | |
3791 | "Bad Slot ID %d", udev->slot_id); | |
3792 | ret = -EINVAL; | |
3793 | goto out; | |
3794 | } | |
3795 | ||
3796 | virt_dev = xhci->devs[udev->slot_id]; | |
3797 | ||
3798 | if (WARN_ON(!virt_dev)) { | |
3799 | /* | |
3800 | * In plug/unplug torture test with an NEC controller, | |
3801 | * a zero-dereference was observed once due to virt_dev = 0. | |
3802 | * Print useful debug rather than crash if it is observed again! | |
3803 | */ | |
3804 | xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n", | |
3805 | udev->slot_id); | |
3806 | ret = -EINVAL; | |
3807 | goto out; | |
3808 | } | |
3809 | ||
3810 | if (setup == SETUP_CONTEXT_ONLY) { | |
3811 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); | |
3812 | if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == | |
3813 | SLOT_STATE_DEFAULT) { | |
3814 | xhci_dbg(xhci, "Slot already in default state\n"); | |
3815 | goto out; | |
3816 | } | |
3817 | } | |
3818 | ||
3819 | command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); | |
3820 | if (!command) { | |
3821 | ret = -ENOMEM; | |
3822 | goto out; | |
3823 | } | |
3824 | ||
3825 | command->in_ctx = virt_dev->in_ctx; | |
3826 | command->completion = &xhci->addr_dev; | |
3827 | ||
3828 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); | |
3829 | ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx); | |
3830 | if (!ctrl_ctx) { | |
3831 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", | |
3832 | __func__); | |
3833 | ret = -EINVAL; | |
3834 | goto out; | |
3835 | } | |
3836 | /* | |
3837 | * If this is the first Set Address since device plug-in or | |
3838 | * virt_device realloaction after a resume with an xHCI power loss, | |
3839 | * then set up the slot context. | |
3840 | */ | |
3841 | if (!slot_ctx->dev_info) | |
3842 | xhci_setup_addressable_virt_dev(xhci, udev); | |
3843 | /* Otherwise, update the control endpoint ring enqueue pointer. */ | |
3844 | else | |
3845 | xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); | |
3846 | ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG); | |
3847 | ctrl_ctx->drop_flags = 0; | |
3848 | ||
3849 | xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); | |
3850 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); | |
3851 | trace_xhci_address_ctx(xhci, virt_dev->in_ctx, | |
3852 | le32_to_cpu(slot_ctx->dev_info) >> 27); | |
3853 | ||
3854 | spin_lock_irqsave(&xhci->lock, flags); | |
3855 | ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma, | |
3856 | udev->slot_id, setup); | |
3857 | if (ret) { | |
3858 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3859 | xhci_dbg_trace(xhci, trace_xhci_dbg_address, | |
3860 | "FIXME: allocate a command ring segment"); | |
3861 | goto out; | |
3862 | } | |
3863 | xhci_ring_cmd_db(xhci); | |
3864 | spin_unlock_irqrestore(&xhci->lock, flags); | |
3865 | ||
3866 | /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */ | |
3867 | wait_for_completion(command->completion); | |
3868 | ||
3869 | /* FIXME: From section 4.3.4: "Software shall be responsible for timing | |
3870 | * the SetAddress() "recovery interval" required by USB and aborting the | |
3871 | * command on a timeout. | |
3872 | */ | |
3873 | switch (command->status) { | |
3874 | case COMP_CMD_ABORT: | |
3875 | case COMP_CMD_STOP: | |
3876 | xhci_warn(xhci, "Timeout while waiting for setup device command\n"); | |
3877 | ret = -ETIME; | |
3878 | break; | |
3879 | case COMP_CTX_STATE: | |
3880 | case COMP_EBADSLT: | |
3881 | xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n", | |
3882 | act, udev->slot_id); | |
3883 | ret = -EINVAL; | |
3884 | break; | |
3885 | case COMP_TX_ERR: | |
3886 | dev_warn(&udev->dev, "Device not responding to setup %s.\n", act); | |
3887 | ret = -EPROTO; | |
3888 | break; | |
3889 | case COMP_DEV_ERR: | |
3890 | dev_warn(&udev->dev, | |
3891 | "ERROR: Incompatible device for setup %s command\n", act); | |
3892 | ret = -ENODEV; | |
3893 | break; | |
3894 | case COMP_SUCCESS: | |
3895 | xhci_dbg_trace(xhci, trace_xhci_dbg_address, | |
3896 | "Successful setup %s command", act); | |
3897 | break; | |
3898 | default: | |
3899 | xhci_err(xhci, | |
3900 | "ERROR: unexpected setup %s command completion code 0x%x.\n", | |
3901 | act, command->status); | |
3902 | xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); | |
3903 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); | |
3904 | trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1); | |
3905 | ret = -EINVAL; | |
3906 | break; | |
3907 | } | |
3908 | if (ret) | |
3909 | goto out; | |
3910 | temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); | |
3911 | xhci_dbg_trace(xhci, trace_xhci_dbg_address, | |
3912 | "Op regs DCBAA ptr = %#016llx", temp_64); | |
3913 | xhci_dbg_trace(xhci, trace_xhci_dbg_address, | |
3914 | "Slot ID %d dcbaa entry @%p = %#016llx", | |
3915 | udev->slot_id, | |
3916 | &xhci->dcbaa->dev_context_ptrs[udev->slot_id], | |
3917 | (unsigned long long) | |
3918 | le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id])); | |
3919 | xhci_dbg_trace(xhci, trace_xhci_dbg_address, | |
3920 | "Output Context DMA address = %#08llx", | |
3921 | (unsigned long long)virt_dev->out_ctx->dma); | |
3922 | xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); | |
3923 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); | |
3924 | trace_xhci_address_ctx(xhci, virt_dev->in_ctx, | |
3925 | le32_to_cpu(slot_ctx->dev_info) >> 27); | |
3926 | xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); | |
3927 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); | |
3928 | /* | |
3929 | * USB core uses address 1 for the roothubs, so we add one to the | |
3930 | * address given back to us by the HC. | |
3931 | */ | |
3932 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); | |
3933 | trace_xhci_address_ctx(xhci, virt_dev->out_ctx, | |
3934 | le32_to_cpu(slot_ctx->dev_info) >> 27); | |
3935 | /* Zero the input context control for later use */ | |
3936 | ctrl_ctx->add_flags = 0; | |
3937 | ctrl_ctx->drop_flags = 0; | |
3938 | ||
3939 | xhci_dbg_trace(xhci, trace_xhci_dbg_address, | |
3940 | "Internal device address = %d", | |
3941 | le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK); | |
3942 | out: | |
3943 | mutex_unlock(&xhci->mutex); | |
3944 | kfree(command); | |
3945 | return ret; | |
3946 | } | |
3947 | ||
3948 | int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) | |
3949 | { | |
3950 | return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS); | |
3951 | } | |
3952 | ||
3953 | int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev) | |
3954 | { | |
3955 | return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY); | |
3956 | } | |
3957 | ||
3958 | /* | |
3959 | * Transfer the port index into real index in the HW port status | |
3960 | * registers. Caculate offset between the port's PORTSC register | |
3961 | * and port status base. Divide the number of per port register | |
3962 | * to get the real index. The raw port number bases 1. | |
3963 | */ | |
3964 | int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1) | |
3965 | { | |
3966 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
3967 | __le32 __iomem *base_addr = &xhci->op_regs->port_status_base; | |
3968 | __le32 __iomem *addr; | |
3969 | int raw_port; | |
3970 | ||
3971 | if (hcd->speed < HCD_USB3) | |
3972 | addr = xhci->usb2_ports[port1 - 1]; | |
3973 | else | |
3974 | addr = xhci->usb3_ports[port1 - 1]; | |
3975 | ||
3976 | raw_port = (addr - base_addr)/NUM_PORT_REGS + 1; | |
3977 | return raw_port; | |
3978 | } | |
3979 | ||
3980 | /* | |
3981 | * Issue an Evaluate Context command to change the Maximum Exit Latency in the | |
3982 | * slot context. If that succeeds, store the new MEL in the xhci_virt_device. | |
3983 | */ | |
3984 | static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci, | |
3985 | struct usb_device *udev, u16 max_exit_latency) | |
3986 | { | |
3987 | struct xhci_virt_device *virt_dev; | |
3988 | struct xhci_command *command; | |
3989 | struct xhci_input_control_ctx *ctrl_ctx; | |
3990 | struct xhci_slot_ctx *slot_ctx; | |
3991 | unsigned long flags; | |
3992 | int ret; | |
3993 | ||
3994 | spin_lock_irqsave(&xhci->lock, flags); | |
3995 | ||
3996 | virt_dev = xhci->devs[udev->slot_id]; | |
3997 | ||
3998 | /* | |
3999 | * virt_dev might not exists yet if xHC resumed from hibernate (S4) and | |
4000 | * xHC was re-initialized. Exit latency will be set later after | |
4001 | * hub_port_finish_reset() is done and xhci->devs[] are re-allocated | |
4002 | */ | |
4003 | ||
4004 | if (!virt_dev || max_exit_latency == virt_dev->current_mel) { | |
4005 | spin_unlock_irqrestore(&xhci->lock, flags); | |
4006 | return 0; | |
4007 | } | |
4008 | ||
4009 | /* Attempt to issue an Evaluate Context command to change the MEL. */ | |
4010 | command = xhci->lpm_command; | |
4011 | ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); | |
4012 | if (!ctrl_ctx) { | |
4013 | spin_unlock_irqrestore(&xhci->lock, flags); | |
4014 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", | |
4015 | __func__); | |
4016 | return -ENOMEM; | |
4017 | } | |
4018 | ||
4019 | xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx); | |
4020 | spin_unlock_irqrestore(&xhci->lock, flags); | |
4021 | ||
4022 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); | |
4023 | slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx); | |
4024 | slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT)); | |
4025 | slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency); | |
4026 | slot_ctx->dev_state = 0; | |
4027 | ||
4028 | xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, | |
4029 | "Set up evaluate context for LPM MEL change."); | |
4030 | xhci_dbg(xhci, "Slot %u Input Context:\n", udev->slot_id); | |
4031 | xhci_dbg_ctx(xhci, command->in_ctx, 0); | |
4032 | ||
4033 | /* Issue and wait for the evaluate context command. */ | |
4034 | ret = xhci_configure_endpoint(xhci, udev, command, | |
4035 | true, true); | |
4036 | xhci_dbg(xhci, "Slot %u Output Context:\n", udev->slot_id); | |
4037 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, 0); | |
4038 | ||
4039 | if (!ret) { | |
4040 | spin_lock_irqsave(&xhci->lock, flags); | |
4041 | virt_dev->current_mel = max_exit_latency; | |
4042 | spin_unlock_irqrestore(&xhci->lock, flags); | |
4043 | } | |
4044 | return ret; | |
4045 | } | |
4046 | ||
4047 | #ifdef CONFIG_PM | |
4048 | ||
4049 | /* BESL to HIRD Encoding array for USB2 LPM */ | |
4050 | static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000, | |
4051 | 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000}; | |
4052 | ||
4053 | /* Calculate HIRD/BESL for USB2 PORTPMSC*/ | |
4054 | static int xhci_calculate_hird_besl(struct xhci_hcd *xhci, | |
4055 | struct usb_device *udev) | |
4056 | { | |
4057 | int u2del, besl, besl_host; | |
4058 | int besl_device = 0; | |
4059 | u32 field; | |
4060 | ||
4061 | u2del = HCS_U2_LATENCY(xhci->hcs_params3); | |
4062 | field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); | |
4063 | ||
4064 | if (field & USB_BESL_SUPPORT) { | |
4065 | for (besl_host = 0; besl_host < 16; besl_host++) { | |
4066 | if (xhci_besl_encoding[besl_host] >= u2del) | |
4067 | break; | |
4068 | } | |
4069 | /* Use baseline BESL value as default */ | |
4070 | if (field & USB_BESL_BASELINE_VALID) | |
4071 | besl_device = USB_GET_BESL_BASELINE(field); | |
4072 | else if (field & USB_BESL_DEEP_VALID) | |
4073 | besl_device = USB_GET_BESL_DEEP(field); | |
4074 | } else { | |
4075 | if (u2del <= 50) | |
4076 | besl_host = 0; | |
4077 | else | |
4078 | besl_host = (u2del - 51) / 75 + 1; | |
4079 | } | |
4080 | ||
4081 | besl = besl_host + besl_device; | |
4082 | if (besl > 15) | |
4083 | besl = 15; | |
4084 | ||
4085 | return besl; | |
4086 | } | |
4087 | ||
4088 | /* Calculate BESLD, L1 timeout and HIRDM for USB2 PORTHLPMC */ | |
4089 | static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev) | |
4090 | { | |
4091 | u32 field; | |
4092 | int l1; | |
4093 | int besld = 0; | |
4094 | int hirdm = 0; | |
4095 | ||
4096 | field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); | |
4097 | ||
4098 | /* xHCI l1 is set in steps of 256us, xHCI 1.0 section 5.4.11.2 */ | |
4099 | l1 = udev->l1_params.timeout / 256; | |
4100 | ||
4101 | /* device has preferred BESLD */ | |
4102 | if (field & USB_BESL_DEEP_VALID) { | |
4103 | besld = USB_GET_BESL_DEEP(field); | |
4104 | hirdm = 1; | |
4105 | } | |
4106 | ||
4107 | return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm); | |
4108 | } | |
4109 | ||
4110 | int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, | |
4111 | struct usb_device *udev, int enable) | |
4112 | { | |
4113 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
4114 | __le32 __iomem **port_array; | |
4115 | __le32 __iomem *pm_addr, *hlpm_addr; | |
4116 | u32 pm_val, hlpm_val, field; | |
4117 | unsigned int port_num; | |
4118 | unsigned long flags; | |
4119 | int hird, exit_latency; | |
4120 | int ret; | |
4121 | ||
4122 | if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support || | |
4123 | !udev->lpm_capable) | |
4124 | return -EPERM; | |
4125 | ||
4126 | if (!udev->parent || udev->parent->parent || | |
4127 | udev->descriptor.bDeviceClass == USB_CLASS_HUB) | |
4128 | return -EPERM; | |
4129 | ||
4130 | if (udev->usb2_hw_lpm_capable != 1) | |
4131 | return -EPERM; | |
4132 | ||
4133 | spin_lock_irqsave(&xhci->lock, flags); | |
4134 | ||
4135 | port_array = xhci->usb2_ports; | |
4136 | port_num = udev->portnum - 1; | |
4137 | pm_addr = port_array[port_num] + PORTPMSC; | |
4138 | pm_val = readl(pm_addr); | |
4139 | hlpm_addr = port_array[port_num] + PORTHLPMC; | |
4140 | field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); | |
4141 | ||
4142 | xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n", | |
4143 | enable ? "enable" : "disable", port_num + 1); | |
4144 | ||
4145 | if (enable) { | |
4146 | /* Host supports BESL timeout instead of HIRD */ | |
4147 | if (udev->usb2_hw_lpm_besl_capable) { | |
4148 | /* if device doesn't have a preferred BESL value use a | |
4149 | * default one which works with mixed HIRD and BESL | |
4150 | * systems. See XHCI_DEFAULT_BESL definition in xhci.h | |
4151 | */ | |
4152 | if ((field & USB_BESL_SUPPORT) && | |
4153 | (field & USB_BESL_BASELINE_VALID)) | |
4154 | hird = USB_GET_BESL_BASELINE(field); | |
4155 | else | |
4156 | hird = udev->l1_params.besl; | |
4157 | ||
4158 | exit_latency = xhci_besl_encoding[hird]; | |
4159 | spin_unlock_irqrestore(&xhci->lock, flags); | |
4160 | ||
4161 | /* USB 3.0 code dedicate one xhci->lpm_command->in_ctx | |
4162 | * input context for link powermanagement evaluate | |
4163 | * context commands. It is protected by hcd->bandwidth | |
4164 | * mutex and is shared by all devices. We need to set | |
4165 | * the max ext latency in USB 2 BESL LPM as well, so | |
4166 | * use the same mutex and xhci_change_max_exit_latency() | |
4167 | */ | |
4168 | mutex_lock(hcd->bandwidth_mutex); | |
4169 | ret = xhci_change_max_exit_latency(xhci, udev, | |
4170 | exit_latency); | |
4171 | mutex_unlock(hcd->bandwidth_mutex); | |
4172 | ||
4173 | if (ret < 0) | |
4174 | return ret; | |
4175 | spin_lock_irqsave(&xhci->lock, flags); | |
4176 | ||
4177 | hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev); | |
4178 | writel(hlpm_val, hlpm_addr); | |
4179 | /* flush write */ | |
4180 | readl(hlpm_addr); | |
4181 | } else { | |
4182 | hird = xhci_calculate_hird_besl(xhci, udev); | |
4183 | } | |
4184 | ||
4185 | pm_val &= ~PORT_HIRD_MASK; | |
4186 | pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id); | |
4187 | writel(pm_val, pm_addr); | |
4188 | pm_val = readl(pm_addr); | |
4189 | pm_val |= PORT_HLE; | |
4190 | writel(pm_val, pm_addr); | |
4191 | /* flush write */ | |
4192 | readl(pm_addr); | |
4193 | } else { | |
4194 | pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK); | |
4195 | writel(pm_val, pm_addr); | |
4196 | /* flush write */ | |
4197 | readl(pm_addr); | |
4198 | if (udev->usb2_hw_lpm_besl_capable) { | |
4199 | spin_unlock_irqrestore(&xhci->lock, flags); | |
4200 | mutex_lock(hcd->bandwidth_mutex); | |
4201 | xhci_change_max_exit_latency(xhci, udev, 0); | |
4202 | mutex_unlock(hcd->bandwidth_mutex); | |
4203 | return 0; | |
4204 | } | |
4205 | } | |
4206 | ||
4207 | spin_unlock_irqrestore(&xhci->lock, flags); | |
4208 | return 0; | |
4209 | } | |
4210 | ||
4211 | /* check if a usb2 port supports a given extened capability protocol | |
4212 | * only USB2 ports extended protocol capability values are cached. | |
4213 | * Return 1 if capability is supported | |
4214 | */ | |
4215 | static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port, | |
4216 | unsigned capability) | |
4217 | { | |
4218 | u32 port_offset, port_count; | |
4219 | int i; | |
4220 | ||
4221 | for (i = 0; i < xhci->num_ext_caps; i++) { | |
4222 | if (xhci->ext_caps[i] & capability) { | |
4223 | /* port offsets starts at 1 */ | |
4224 | port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1; | |
4225 | port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]); | |
4226 | if (port >= port_offset && | |
4227 | port < port_offset + port_count) | |
4228 | return 1; | |
4229 | } | |
4230 | } | |
4231 | return 0; | |
4232 | } | |
4233 | ||
4234 | int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) | |
4235 | { | |
4236 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
4237 | int portnum = udev->portnum - 1; | |
4238 | ||
4239 | if (hcd->speed >= HCD_USB3 || !xhci->sw_lpm_support || | |
4240 | !udev->lpm_capable) | |
4241 | return 0; | |
4242 | ||
4243 | /* we only support lpm for non-hub device connected to root hub yet */ | |
4244 | if (!udev->parent || udev->parent->parent || | |
4245 | udev->descriptor.bDeviceClass == USB_CLASS_HUB) | |
4246 | return 0; | |
4247 | ||
4248 | if (xhci->hw_lpm_support == 1 && | |
4249 | xhci_check_usb2_port_capability( | |
4250 | xhci, portnum, XHCI_HLC)) { | |
4251 | udev->usb2_hw_lpm_capable = 1; | |
4252 | udev->l1_params.timeout = XHCI_L1_TIMEOUT; | |
4253 | udev->l1_params.besl = XHCI_DEFAULT_BESL; | |
4254 | if (xhci_check_usb2_port_capability(xhci, portnum, | |
4255 | XHCI_BLC)) | |
4256 | udev->usb2_hw_lpm_besl_capable = 1; | |
4257 | } | |
4258 | ||
4259 | return 0; | |
4260 | } | |
4261 | ||
4262 | /*---------------------- USB 3.0 Link PM functions ------------------------*/ | |
4263 | ||
4264 | /* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */ | |
4265 | static unsigned long long xhci_service_interval_to_ns( | |
4266 | struct usb_endpoint_descriptor *desc) | |
4267 | { | |
4268 | return (1ULL << (desc->bInterval - 1)) * 125 * 1000; | |
4269 | } | |
4270 | ||
4271 | static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev, | |
4272 | enum usb3_link_state state) | |
4273 | { | |
4274 | unsigned long long sel; | |
4275 | unsigned long long pel; | |
4276 | unsigned int max_sel_pel; | |
4277 | char *state_name; | |
4278 | ||
4279 | switch (state) { | |
4280 | case USB3_LPM_U1: | |
4281 | /* Convert SEL and PEL stored in nanoseconds to microseconds */ | |
4282 | sel = DIV_ROUND_UP(udev->u1_params.sel, 1000); | |
4283 | pel = DIV_ROUND_UP(udev->u1_params.pel, 1000); | |
4284 | max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL; | |
4285 | state_name = "U1"; | |
4286 | break; | |
4287 | case USB3_LPM_U2: | |
4288 | sel = DIV_ROUND_UP(udev->u2_params.sel, 1000); | |
4289 | pel = DIV_ROUND_UP(udev->u2_params.pel, 1000); | |
4290 | max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL; | |
4291 | state_name = "U2"; | |
4292 | break; | |
4293 | default: | |
4294 | dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n", | |
4295 | __func__); | |
4296 | return USB3_LPM_DISABLED; | |
4297 | } | |
4298 | ||
4299 | if (sel <= max_sel_pel && pel <= max_sel_pel) | |
4300 | return USB3_LPM_DEVICE_INITIATED; | |
4301 | ||
4302 | if (sel > max_sel_pel) | |
4303 | dev_dbg(&udev->dev, "Device-initiated %s disabled " | |
4304 | "due to long SEL %llu ms\n", | |
4305 | state_name, sel); | |
4306 | else | |
4307 | dev_dbg(&udev->dev, "Device-initiated %s disabled " | |
4308 | "due to long PEL %llu ms\n", | |
4309 | state_name, pel); | |
4310 | return USB3_LPM_DISABLED; | |
4311 | } | |
4312 | ||
4313 | /* The U1 timeout should be the maximum of the following values: | |
4314 | * - For control endpoints, U1 system exit latency (SEL) * 3 | |
4315 | * - For bulk endpoints, U1 SEL * 5 | |
4316 | * - For interrupt endpoints: | |
4317 | * - Notification EPs, U1 SEL * 3 | |
4318 | * - Periodic EPs, max(105% of bInterval, U1 SEL * 2) | |
4319 | * - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2) | |
4320 | */ | |
4321 | static unsigned long long xhci_calculate_intel_u1_timeout( | |
4322 | struct usb_device *udev, | |
4323 | struct usb_endpoint_descriptor *desc) | |
4324 | { | |
4325 | unsigned long long timeout_ns; | |
4326 | int ep_type; | |
4327 | int intr_type; | |
4328 | ||
4329 | ep_type = usb_endpoint_type(desc); | |
4330 | switch (ep_type) { | |
4331 | case USB_ENDPOINT_XFER_CONTROL: | |
4332 | timeout_ns = udev->u1_params.sel * 3; | |
4333 | break; | |
4334 | case USB_ENDPOINT_XFER_BULK: | |
4335 | timeout_ns = udev->u1_params.sel * 5; | |
4336 | break; | |
4337 | case USB_ENDPOINT_XFER_INT: | |
4338 | intr_type = usb_endpoint_interrupt_type(desc); | |
4339 | if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) { | |
4340 | timeout_ns = udev->u1_params.sel * 3; | |
4341 | break; | |
4342 | } | |
4343 | /* Otherwise the calculation is the same as isoc eps */ | |
4344 | case USB_ENDPOINT_XFER_ISOC: | |
4345 | timeout_ns = xhci_service_interval_to_ns(desc); | |
4346 | timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100); | |
4347 | if (timeout_ns < udev->u1_params.sel * 2) | |
4348 | timeout_ns = udev->u1_params.sel * 2; | |
4349 | break; | |
4350 | default: | |
4351 | return 0; | |
4352 | } | |
4353 | ||
4354 | return timeout_ns; | |
4355 | } | |
4356 | ||
4357 | /* Returns the hub-encoded U1 timeout value. */ | |
4358 | static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci, | |
4359 | struct usb_device *udev, | |
4360 | struct usb_endpoint_descriptor *desc) | |
4361 | { | |
4362 | unsigned long long timeout_ns; | |
4363 | ||
4364 | if (xhci->quirks & XHCI_INTEL_HOST) | |
4365 | timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc); | |
4366 | else | |
4367 | timeout_ns = udev->u1_params.sel; | |
4368 | ||
4369 | /* The U1 timeout is encoded in 1us intervals. | |
4370 | * Don't return a timeout of zero, because that's USB3_LPM_DISABLED. | |
4371 | */ | |
4372 | if (timeout_ns == USB3_LPM_DISABLED) | |
4373 | timeout_ns = 1; | |
4374 | else | |
4375 | timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000); | |
4376 | ||
4377 | /* If the necessary timeout value is bigger than what we can set in the | |
4378 | * USB 3.0 hub, we have to disable hub-initiated U1. | |
4379 | */ | |
4380 | if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT) | |
4381 | return timeout_ns; | |
4382 | dev_dbg(&udev->dev, "Hub-initiated U1 disabled " | |
4383 | "due to long timeout %llu ms\n", timeout_ns); | |
4384 | return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1); | |
4385 | } | |
4386 | ||
4387 | /* The U2 timeout should be the maximum of: | |
4388 | * - 10 ms (to avoid the bandwidth impact on the scheduler) | |
4389 | * - largest bInterval of any active periodic endpoint (to avoid going | |
4390 | * into lower power link states between intervals). | |
4391 | * - the U2 Exit Latency of the device | |
4392 | */ | |
4393 | static unsigned long long xhci_calculate_intel_u2_timeout( | |
4394 | struct usb_device *udev, | |
4395 | struct usb_endpoint_descriptor *desc) | |
4396 | { | |
4397 | unsigned long long timeout_ns; | |
4398 | unsigned long long u2_del_ns; | |
4399 | ||
4400 | timeout_ns = 10 * 1000 * 1000; | |
4401 | ||
4402 | if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) && | |
4403 | (xhci_service_interval_to_ns(desc) > timeout_ns)) | |
4404 | timeout_ns = xhci_service_interval_to_ns(desc); | |
4405 | ||
4406 | u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL; | |
4407 | if (u2_del_ns > timeout_ns) | |
4408 | timeout_ns = u2_del_ns; | |
4409 | ||
4410 | return timeout_ns; | |
4411 | } | |
4412 | ||
4413 | /* Returns the hub-encoded U2 timeout value. */ | |
4414 | static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci, | |
4415 | struct usb_device *udev, | |
4416 | struct usb_endpoint_descriptor *desc) | |
4417 | { | |
4418 | unsigned long long timeout_ns; | |
4419 | ||
4420 | if (xhci->quirks & XHCI_INTEL_HOST) | |
4421 | timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc); | |
4422 | else | |
4423 | timeout_ns = udev->u2_params.sel; | |
4424 | ||
4425 | /* The U2 timeout is encoded in 256us intervals */ | |
4426 | timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000); | |
4427 | /* If the necessary timeout value is bigger than what we can set in the | |
4428 | * USB 3.0 hub, we have to disable hub-initiated U2. | |
4429 | */ | |
4430 | if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT) | |
4431 | return timeout_ns; | |
4432 | dev_dbg(&udev->dev, "Hub-initiated U2 disabled " | |
4433 | "due to long timeout %llu ms\n", timeout_ns); | |
4434 | return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2); | |
4435 | } | |
4436 | ||
4437 | static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci, | |
4438 | struct usb_device *udev, | |
4439 | struct usb_endpoint_descriptor *desc, | |
4440 | enum usb3_link_state state, | |
4441 | u16 *timeout) | |
4442 | { | |
4443 | if (state == USB3_LPM_U1) | |
4444 | return xhci_calculate_u1_timeout(xhci, udev, desc); | |
4445 | else if (state == USB3_LPM_U2) | |
4446 | return xhci_calculate_u2_timeout(xhci, udev, desc); | |
4447 | ||
4448 | return USB3_LPM_DISABLED; | |
4449 | } | |
4450 | ||
4451 | static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci, | |
4452 | struct usb_device *udev, | |
4453 | struct usb_endpoint_descriptor *desc, | |
4454 | enum usb3_link_state state, | |
4455 | u16 *timeout) | |
4456 | { | |
4457 | u16 alt_timeout; | |
4458 | ||
4459 | alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev, | |
4460 | desc, state, timeout); | |
4461 | ||
4462 | /* If we found we can't enable hub-initiated LPM, or | |
4463 | * the U1 or U2 exit latency was too high to allow | |
4464 | * device-initiated LPM as well, just stop searching. | |
4465 | */ | |
4466 | if (alt_timeout == USB3_LPM_DISABLED || | |
4467 | alt_timeout == USB3_LPM_DEVICE_INITIATED) { | |
4468 | *timeout = alt_timeout; | |
4469 | return -E2BIG; | |
4470 | } | |
4471 | if (alt_timeout > *timeout) | |
4472 | *timeout = alt_timeout; | |
4473 | return 0; | |
4474 | } | |
4475 | ||
4476 | static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci, | |
4477 | struct usb_device *udev, | |
4478 | struct usb_host_interface *alt, | |
4479 | enum usb3_link_state state, | |
4480 | u16 *timeout) | |
4481 | { | |
4482 | int j; | |
4483 | ||
4484 | for (j = 0; j < alt->desc.bNumEndpoints; j++) { | |
4485 | if (xhci_update_timeout_for_endpoint(xhci, udev, | |
4486 | &alt->endpoint[j].desc, state, timeout)) | |
4487 | return -E2BIG; | |
4488 | continue; | |
4489 | } | |
4490 | return 0; | |
4491 | } | |
4492 | ||
4493 | static int xhci_check_intel_tier_policy(struct usb_device *udev, | |
4494 | enum usb3_link_state state) | |
4495 | { | |
4496 | struct usb_device *parent; | |
4497 | unsigned int num_hubs; | |
4498 | ||
4499 | if (state == USB3_LPM_U2) | |
4500 | return 0; | |
4501 | ||
4502 | /* Don't enable U1 if the device is on a 2nd tier hub or lower. */ | |
4503 | for (parent = udev->parent, num_hubs = 0; parent->parent; | |
4504 | parent = parent->parent) | |
4505 | num_hubs++; | |
4506 | ||
4507 | if (num_hubs < 2) | |
4508 | return 0; | |
4509 | ||
4510 | dev_dbg(&udev->dev, "Disabling U1 link state for device" | |
4511 | " below second-tier hub.\n"); | |
4512 | dev_dbg(&udev->dev, "Plug device into first-tier hub " | |
4513 | "to decrease power consumption.\n"); | |
4514 | return -E2BIG; | |
4515 | } | |
4516 | ||
4517 | static int xhci_check_tier_policy(struct xhci_hcd *xhci, | |
4518 | struct usb_device *udev, | |
4519 | enum usb3_link_state state) | |
4520 | { | |
4521 | if (xhci->quirks & XHCI_INTEL_HOST) | |
4522 | return xhci_check_intel_tier_policy(udev, state); | |
4523 | else | |
4524 | return 0; | |
4525 | } | |
4526 | ||
4527 | /* Returns the U1 or U2 timeout that should be enabled. | |
4528 | * If the tier check or timeout setting functions return with a non-zero exit | |
4529 | * code, that means the timeout value has been finalized and we shouldn't look | |
4530 | * at any more endpoints. | |
4531 | */ | |
4532 | static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd, | |
4533 | struct usb_device *udev, enum usb3_link_state state) | |
4534 | { | |
4535 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
4536 | struct usb_host_config *config; | |
4537 | char *state_name; | |
4538 | int i; | |
4539 | u16 timeout = USB3_LPM_DISABLED; | |
4540 | ||
4541 | if (state == USB3_LPM_U1) | |
4542 | state_name = "U1"; | |
4543 | else if (state == USB3_LPM_U2) | |
4544 | state_name = "U2"; | |
4545 | else { | |
4546 | dev_warn(&udev->dev, "Can't enable unknown link state %i\n", | |
4547 | state); | |
4548 | return timeout; | |
4549 | } | |
4550 | ||
4551 | if (xhci_check_tier_policy(xhci, udev, state) < 0) | |
4552 | return timeout; | |
4553 | ||
4554 | /* Gather some information about the currently installed configuration | |
4555 | * and alternate interface settings. | |
4556 | */ | |
4557 | if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc, | |
4558 | state, &timeout)) | |
4559 | return timeout; | |
4560 | ||
4561 | config = udev->actconfig; | |
4562 | if (!config) | |
4563 | return timeout; | |
4564 | ||
4565 | for (i = 0; i < config->desc.bNumInterfaces; i++) { | |
4566 | struct usb_driver *driver; | |
4567 | struct usb_interface *intf = config->interface[i]; | |
4568 | ||
4569 | if (!intf) | |
4570 | continue; | |
4571 | ||
4572 | /* Check if any currently bound drivers want hub-initiated LPM | |
4573 | * disabled. | |
4574 | */ | |
4575 | if (intf->dev.driver) { | |
4576 | driver = to_usb_driver(intf->dev.driver); | |
4577 | if (driver && driver->disable_hub_initiated_lpm) { | |
4578 | dev_dbg(&udev->dev, "Hub-initiated %s disabled " | |
4579 | "at request of driver %s\n", | |
4580 | state_name, driver->name); | |
4581 | return xhci_get_timeout_no_hub_lpm(udev, state); | |
4582 | } | |
4583 | } | |
4584 | ||
4585 | /* Not sure how this could happen... */ | |
4586 | if (!intf->cur_altsetting) | |
4587 | continue; | |
4588 | ||
4589 | if (xhci_update_timeout_for_interface(xhci, udev, | |
4590 | intf->cur_altsetting, | |
4591 | state, &timeout)) | |
4592 | return timeout; | |
4593 | } | |
4594 | return timeout; | |
4595 | } | |
4596 | ||
4597 | static int calculate_max_exit_latency(struct usb_device *udev, | |
4598 | enum usb3_link_state state_changed, | |
4599 | u16 hub_encoded_timeout) | |
4600 | { | |
4601 | unsigned long long u1_mel_us = 0; | |
4602 | unsigned long long u2_mel_us = 0; | |
4603 | unsigned long long mel_us = 0; | |
4604 | bool disabling_u1; | |
4605 | bool disabling_u2; | |
4606 | bool enabling_u1; | |
4607 | bool enabling_u2; | |
4608 | ||
4609 | disabling_u1 = (state_changed == USB3_LPM_U1 && | |
4610 | hub_encoded_timeout == USB3_LPM_DISABLED); | |
4611 | disabling_u2 = (state_changed == USB3_LPM_U2 && | |
4612 | hub_encoded_timeout == USB3_LPM_DISABLED); | |
4613 | ||
4614 | enabling_u1 = (state_changed == USB3_LPM_U1 && | |
4615 | hub_encoded_timeout != USB3_LPM_DISABLED); | |
4616 | enabling_u2 = (state_changed == USB3_LPM_U2 && | |
4617 | hub_encoded_timeout != USB3_LPM_DISABLED); | |
4618 | ||
4619 | /* If U1 was already enabled and we're not disabling it, | |
4620 | * or we're going to enable U1, account for the U1 max exit latency. | |
4621 | */ | |
4622 | if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) || | |
4623 | enabling_u1) | |
4624 | u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000); | |
4625 | if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) || | |
4626 | enabling_u2) | |
4627 | u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000); | |
4628 | ||
4629 | if (u1_mel_us > u2_mel_us) | |
4630 | mel_us = u1_mel_us; | |
4631 | else | |
4632 | mel_us = u2_mel_us; | |
4633 | /* xHCI host controller max exit latency field is only 16 bits wide. */ | |
4634 | if (mel_us > MAX_EXIT) { | |
4635 | dev_warn(&udev->dev, "Link PM max exit latency of %lluus " | |
4636 | "is too big.\n", mel_us); | |
4637 | return -E2BIG; | |
4638 | } | |
4639 | return mel_us; | |
4640 | } | |
4641 | ||
4642 | /* Returns the USB3 hub-encoded value for the U1/U2 timeout. */ | |
4643 | int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd, | |
4644 | struct usb_device *udev, enum usb3_link_state state) | |
4645 | { | |
4646 | struct xhci_hcd *xhci; | |
4647 | u16 hub_encoded_timeout; | |
4648 | int mel; | |
4649 | int ret; | |
4650 | ||
4651 | xhci = hcd_to_xhci(hcd); | |
4652 | /* The LPM timeout values are pretty host-controller specific, so don't | |
4653 | * enable hub-initiated timeouts unless the vendor has provided | |
4654 | * information about their timeout algorithm. | |
4655 | */ | |
4656 | if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) || | |
4657 | !xhci->devs[udev->slot_id]) | |
4658 | return USB3_LPM_DISABLED; | |
4659 | ||
4660 | hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state); | |
4661 | mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout); | |
4662 | if (mel < 0) { | |
4663 | /* Max Exit Latency is too big, disable LPM. */ | |
4664 | hub_encoded_timeout = USB3_LPM_DISABLED; | |
4665 | mel = 0; | |
4666 | } | |
4667 | ||
4668 | ret = xhci_change_max_exit_latency(xhci, udev, mel); | |
4669 | if (ret) | |
4670 | return ret; | |
4671 | return hub_encoded_timeout; | |
4672 | } | |
4673 | ||
4674 | int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd, | |
4675 | struct usb_device *udev, enum usb3_link_state state) | |
4676 | { | |
4677 | struct xhci_hcd *xhci; | |
4678 | u16 mel; | |
4679 | ||
4680 | xhci = hcd_to_xhci(hcd); | |
4681 | if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) || | |
4682 | !xhci->devs[udev->slot_id]) | |
4683 | return 0; | |
4684 | ||
4685 | mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED); | |
4686 | return xhci_change_max_exit_latency(xhci, udev, mel); | |
4687 | } | |
4688 | #else /* CONFIG_PM */ | |
4689 | ||
4690 | int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, | |
4691 | struct usb_device *udev, int enable) | |
4692 | { | |
4693 | return 0; | |
4694 | } | |
4695 | ||
4696 | int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) | |
4697 | { | |
4698 | return 0; | |
4699 | } | |
4700 | ||
4701 | int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd, | |
4702 | struct usb_device *udev, enum usb3_link_state state) | |
4703 | { | |
4704 | return USB3_LPM_DISABLED; | |
4705 | } | |
4706 | ||
4707 | int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd, | |
4708 | struct usb_device *udev, enum usb3_link_state state) | |
4709 | { | |
4710 | return 0; | |
4711 | } | |
4712 | #endif /* CONFIG_PM */ | |
4713 | ||
4714 | /*-------------------------------------------------------------------------*/ | |
4715 | ||
4716 | /* Once a hub descriptor is fetched for a device, we need to update the xHC's | |
4717 | * internal data structures for the device. | |
4718 | */ | |
4719 | int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, | |
4720 | struct usb_tt *tt, gfp_t mem_flags) | |
4721 | { | |
4722 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
4723 | struct xhci_virt_device *vdev; | |
4724 | struct xhci_command *config_cmd; | |
4725 | struct xhci_input_control_ctx *ctrl_ctx; | |
4726 | struct xhci_slot_ctx *slot_ctx; | |
4727 | unsigned long flags; | |
4728 | unsigned think_time; | |
4729 | int ret; | |
4730 | ||
4731 | /* Ignore root hubs */ | |
4732 | if (!hdev->parent) | |
4733 | return 0; | |
4734 | ||
4735 | vdev = xhci->devs[hdev->slot_id]; | |
4736 | if (!vdev) { | |
4737 | xhci_warn(xhci, "Cannot update hub desc for unknown device.\n"); | |
4738 | return -EINVAL; | |
4739 | } | |
4740 | config_cmd = xhci_alloc_command(xhci, true, true, mem_flags); | |
4741 | if (!config_cmd) { | |
4742 | xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); | |
4743 | return -ENOMEM; | |
4744 | } | |
4745 | ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx); | |
4746 | if (!ctrl_ctx) { | |
4747 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", | |
4748 | __func__); | |
4749 | xhci_free_command(xhci, config_cmd); | |
4750 | return -ENOMEM; | |
4751 | } | |
4752 | ||
4753 | spin_lock_irqsave(&xhci->lock, flags); | |
4754 | if (hdev->speed == USB_SPEED_HIGH && | |
4755 | xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) { | |
4756 | xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n"); | |
4757 | xhci_free_command(xhci, config_cmd); | |
4758 | spin_unlock_irqrestore(&xhci->lock, flags); | |
4759 | return -ENOMEM; | |
4760 | } | |
4761 | ||
4762 | xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); | |
4763 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); | |
4764 | slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); | |
4765 | slot_ctx->dev_info |= cpu_to_le32(DEV_HUB); | |
4766 | /* | |
4767 | * refer to section 6.2.2: MTT should be 0 for full speed hub, | |
4768 | * but it may be already set to 1 when setup an xHCI virtual | |
4769 | * device, so clear it anyway. | |
4770 | */ | |
4771 | if (tt->multi) | |
4772 | slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); | |
4773 | else if (hdev->speed == USB_SPEED_FULL) | |
4774 | slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT); | |
4775 | ||
4776 | if (xhci->hci_version > 0x95) { | |
4777 | xhci_dbg(xhci, "xHCI version %x needs hub " | |
4778 | "TT think time and number of ports\n", | |
4779 | (unsigned int) xhci->hci_version); | |
4780 | slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild)); | |
4781 | /* Set TT think time - convert from ns to FS bit times. | |
4782 | * 0 = 8 FS bit times, 1 = 16 FS bit times, | |
4783 | * 2 = 24 FS bit times, 3 = 32 FS bit times. | |
4784 | * | |
4785 | * xHCI 1.0: this field shall be 0 if the device is not a | |
4786 | * High-spped hub. | |
4787 | */ | |
4788 | think_time = tt->think_time; | |
4789 | if (think_time != 0) | |
4790 | think_time = (think_time / 666) - 1; | |
4791 | if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH) | |
4792 | slot_ctx->tt_info |= | |
4793 | cpu_to_le32(TT_THINK_TIME(think_time)); | |
4794 | } else { | |
4795 | xhci_dbg(xhci, "xHCI version %x doesn't need hub " | |
4796 | "TT think time or number of ports\n", | |
4797 | (unsigned int) xhci->hci_version); | |
4798 | } | |
4799 | slot_ctx->dev_state = 0; | |
4800 | spin_unlock_irqrestore(&xhci->lock, flags); | |
4801 | ||
4802 | xhci_dbg(xhci, "Set up %s for hub device.\n", | |
4803 | (xhci->hci_version > 0x95) ? | |
4804 | "configure endpoint" : "evaluate context"); | |
4805 | xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id); | |
4806 | xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0); | |
4807 | ||
4808 | /* Issue and wait for the configure endpoint or | |
4809 | * evaluate context command. | |
4810 | */ | |
4811 | if (xhci->hci_version > 0x95) | |
4812 | ret = xhci_configure_endpoint(xhci, hdev, config_cmd, | |
4813 | false, false); | |
4814 | else | |
4815 | ret = xhci_configure_endpoint(xhci, hdev, config_cmd, | |
4816 | true, false); | |
4817 | ||
4818 | xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id); | |
4819 | xhci_dbg_ctx(xhci, vdev->out_ctx, 0); | |
4820 | ||
4821 | xhci_free_command(xhci, config_cmd); | |
4822 | return ret; | |
4823 | } | |
4824 | ||
4825 | int xhci_get_frame(struct usb_hcd *hcd) | |
4826 | { | |
4827 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | |
4828 | /* EHCI mods by the periodic size. Why? */ | |
4829 | return readl(&xhci->run_regs->microframe_index) >> 3; | |
4830 | } | |
4831 | ||
4832 | int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) | |
4833 | { | |
4834 | struct xhci_hcd *xhci; | |
4835 | struct device *dev = hcd->self.controller; | |
4836 | int retval; | |
4837 | ||
4838 | /* Accept arbitrarily long scatter-gather lists */ | |
4839 | hcd->self.sg_tablesize = ~0; | |
4840 | ||
4841 | /* support to build packet from discontinuous buffers */ | |
4842 | hcd->self.no_sg_constraint = 1; | |
4843 | ||
4844 | /* XHCI controllers don't stop the ep queue on short packets :| */ | |
4845 | hcd->self.no_stop_on_short = 1; | |
4846 | ||
4847 | xhci = hcd_to_xhci(hcd); | |
4848 | ||
4849 | if (usb_hcd_is_primary_hcd(hcd)) { | |
4850 | xhci->main_hcd = hcd; | |
4851 | /* Mark the first roothub as being USB 2.0. | |
4852 | * The xHCI driver will register the USB 3.0 roothub. | |
4853 | */ | |
4854 | hcd->speed = HCD_USB2; | |
4855 | hcd->self.root_hub->speed = USB_SPEED_HIGH; | |
4856 | /* | |
4857 | * USB 2.0 roothub under xHCI has an integrated TT, | |
4858 | * (rate matching hub) as opposed to having an OHCI/UHCI | |
4859 | * companion controller. | |
4860 | */ | |
4861 | hcd->has_tt = 1; | |
4862 | } else { | |
4863 | if (xhci->sbrn == 0x31) { | |
4864 | xhci_info(xhci, "Host supports USB 3.1 Enhanced SuperSpeed\n"); | |
4865 | hcd->speed = HCD_USB31; | |
4866 | hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS; | |
4867 | } | |
4868 | /* xHCI private pointer was set in xhci_pci_probe for the second | |
4869 | * registered roothub. | |
4870 | */ | |
4871 | return 0; | |
4872 | } | |
4873 | ||
4874 | mutex_init(&xhci->mutex); | |
4875 | xhci->cap_regs = hcd->regs; | |
4876 | xhci->op_regs = hcd->regs + | |
4877 | HC_LENGTH(readl(&xhci->cap_regs->hc_capbase)); | |
4878 | xhci->run_regs = hcd->regs + | |
4879 | (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK); | |
4880 | /* Cache read-only capability registers */ | |
4881 | xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1); | |
4882 | xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2); | |
4883 | xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3); | |
4884 | xhci->hcc_params = readl(&xhci->cap_regs->hc_capbase); | |
4885 | xhci->hci_version = HC_VERSION(xhci->hcc_params); | |
4886 | xhci->hcc_params = readl(&xhci->cap_regs->hcc_params); | |
4887 | if (xhci->hci_version > 0x100) | |
4888 | xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2); | |
4889 | xhci_print_registers(xhci); | |
4890 | ||
4891 | xhci->quirks |= quirks; | |
4892 | ||
4893 | get_quirks(dev, xhci); | |
4894 | ||
4895 | /* In xhci controllers which follow xhci 1.0 spec gives a spurious | |
4896 | * success event after a short transfer. This quirk will ignore such | |
4897 | * spurious event. | |
4898 | */ | |
4899 | if (xhci->hci_version > 0x96) | |
4900 | xhci->quirks |= XHCI_SPURIOUS_SUCCESS; | |
4901 | ||
4902 | /* Make sure the HC is halted. */ | |
4903 | retval = xhci_halt(xhci); | |
4904 | if (retval) | |
4905 | return retval; | |
4906 | ||
4907 | xhci_dbg(xhci, "Resetting HCD\n"); | |
4908 | /* Reset the internal HC memory state and registers. */ | |
4909 | retval = xhci_reset(xhci); | |
4910 | if (retval) | |
4911 | return retval; | |
4912 | xhci_dbg(xhci, "Reset complete\n"); | |
4913 | ||
4914 | /* | |
4915 | * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0) | |
4916 | * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit | |
4917 | * address memory pointers actually. So, this driver clears the AC64 | |
4918 | * bit of xhci->hcc_params to call dma_set_coherent_mask(dev, | |
4919 | * DMA_BIT_MASK(32)) in this xhci_gen_setup(). | |
4920 | */ | |
4921 | if (xhci->quirks & XHCI_NO_64BIT_SUPPORT) | |
4922 | xhci->hcc_params &= ~BIT(0); | |
4923 | ||
4924 | /* Set dma_mask and coherent_dma_mask to 64-bits, | |
4925 | * if xHC supports 64-bit addressing */ | |
4926 | if (HCC_64BIT_ADDR(xhci->hcc_params) && | |
4927 | !dma_set_mask(dev, DMA_BIT_MASK(64))) { | |
4928 | xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n"); | |
4929 | dma_set_coherent_mask(dev, DMA_BIT_MASK(64)); | |
4930 | } else { | |
4931 | /* | |
4932 | * This is to avoid error in cases where a 32-bit USB | |
4933 | * controller is used on a 64-bit capable system. | |
4934 | */ | |
4935 | retval = dma_set_mask(dev, DMA_BIT_MASK(32)); | |
4936 | if (retval) | |
4937 | return retval; | |
4938 | xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n"); | |
4939 | dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); | |
4940 | } | |
4941 | ||
4942 | xhci_dbg(xhci, "Calling HCD init\n"); | |
4943 | /* Initialize HCD and host controller data structures. */ | |
4944 | retval = xhci_init(hcd); | |
4945 | if (retval) | |
4946 | return retval; | |
4947 | xhci_dbg(xhci, "Called HCD init\n"); | |
4948 | ||
4949 | xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%08x\n", | |
4950 | xhci->hcc_params, xhci->hci_version, xhci->quirks); | |
4951 | ||
4952 | return 0; | |
4953 | } | |
4954 | EXPORT_SYMBOL_GPL(xhci_gen_setup); | |
4955 | ||
4956 | static const struct hc_driver xhci_hc_driver = { | |
4957 | .description = "xhci-hcd", | |
4958 | .product_desc = "xHCI Host Controller", | |
4959 | .hcd_priv_size = sizeof(struct xhci_hcd), | |
4960 | ||
4961 | /* | |
4962 | * generic hardware linkage | |
4963 | */ | |
4964 | .irq = xhci_irq, | |
4965 | .flags = HCD_MEMORY | HCD_USB3 | HCD_SHARED, | |
4966 | ||
4967 | /* | |
4968 | * basic lifecycle operations | |
4969 | */ | |
4970 | .reset = NULL, /* set in xhci_init_driver() */ | |
4971 | .start = xhci_run, | |
4972 | .stop = xhci_stop, | |
4973 | .shutdown = xhci_shutdown, | |
4974 | ||
4975 | /* | |
4976 | * managing i/o requests and associated device resources | |
4977 | */ | |
4978 | .urb_enqueue = xhci_urb_enqueue, | |
4979 | .urb_dequeue = xhci_urb_dequeue, | |
4980 | .alloc_dev = xhci_alloc_dev, | |
4981 | .free_dev = xhci_free_dev, | |
4982 | .alloc_streams = xhci_alloc_streams, | |
4983 | .free_streams = xhci_free_streams, | |
4984 | .add_endpoint = xhci_add_endpoint, | |
4985 | .drop_endpoint = xhci_drop_endpoint, | |
4986 | .endpoint_reset = xhci_endpoint_reset, | |
4987 | .check_bandwidth = xhci_check_bandwidth, | |
4988 | .reset_bandwidth = xhci_reset_bandwidth, | |
4989 | .address_device = xhci_address_device, | |
4990 | .enable_device = xhci_enable_device, | |
4991 | .update_hub_device = xhci_update_hub_device, | |
4992 | .reset_device = xhci_discover_or_reset_device, | |
4993 | ||
4994 | /* | |
4995 | * scheduling support | |
4996 | */ | |
4997 | .get_frame_number = xhci_get_frame, | |
4998 | ||
4999 | /* | |
5000 | * root hub support | |
5001 | */ | |
5002 | .hub_control = xhci_hub_control, | |
5003 | .hub_status_data = xhci_hub_status_data, | |
5004 | .bus_suspend = xhci_bus_suspend, | |
5005 | .bus_resume = xhci_bus_resume, | |
5006 | ||
5007 | /* | |
5008 | * call back when device connected and addressed | |
5009 | */ | |
5010 | .update_device = xhci_update_device, | |
5011 | .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm, | |
5012 | .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout, | |
5013 | .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout, | |
5014 | .find_raw_port_number = xhci_find_raw_port_number, | |
5015 | }; | |
5016 | ||
5017 | void xhci_init_driver(struct hc_driver *drv, | |
5018 | const struct xhci_driver_overrides *over) | |
5019 | { | |
5020 | BUG_ON(!over); | |
5021 | ||
5022 | /* Copy the generic table to drv then apply the overrides */ | |
5023 | *drv = xhci_hc_driver; | |
5024 | ||
5025 | if (over) { | |
5026 | drv->hcd_priv_size += over->extra_priv_size; | |
5027 | if (over->reset) | |
5028 | drv->reset = over->reset; | |
5029 | if (over->start) | |
5030 | drv->start = over->start; | |
5031 | } | |
5032 | } | |
5033 | EXPORT_SYMBOL_GPL(xhci_init_driver); | |
5034 | ||
5035 | MODULE_DESCRIPTION(DRIVER_DESC); | |
5036 | MODULE_AUTHOR(DRIVER_AUTHOR); | |
5037 | MODULE_LICENSE("GPL"); | |
5038 | ||
5039 | static int __init xhci_hcd_init(void) | |
5040 | { | |
5041 | /* | |
5042 | * Check the compiler generated sizes of structures that must be laid | |
5043 | * out in specific ways for hardware access. | |
5044 | */ | |
5045 | BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8); | |
5046 | BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8); | |
5047 | BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8); | |
5048 | /* xhci_device_control has eight fields, and also | |
5049 | * embeds one xhci_slot_ctx and 31 xhci_ep_ctx | |
5050 | */ | |
5051 | BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8); | |
5052 | BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8); | |
5053 | BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8); | |
5054 | BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 8*32/8); | |
5055 | BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8); | |
5056 | /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */ | |
5057 | BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8); | |
5058 | ||
5059 | if (usb_disabled()) | |
5060 | return -ENODEV; | |
5061 | ||
5062 | return 0; | |
5063 | } | |
5064 | ||
5065 | /* | |
5066 | * If an init function is provided, an exit function must also be provided | |
5067 | * to allow module unload. | |
5068 | */ | |
5069 | static void __exit xhci_hcd_fini(void) { } | |
5070 | ||
5071 | module_init(xhci_hcd_init); | |
5072 | module_exit(xhci_hcd_fini); |