]>
git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/usb/host/xhci-dbg.c
2 * xHCI host controller driver
4 * Copyright (C) 2008 Intel Corp.
7 * Some code borrowed from the Linux EHCI driver.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #define XHCI_INIT_VALUE 0x0
27 /* Add verbose debugging later, just print everything for now */
29 void xhci_dbg_regs(struct xhci_hcd
*xhci
)
33 xhci_dbg(xhci
, "// xHCI capability registers at %p:\n",
35 temp
= readl(&xhci
->cap_regs
->hc_capbase
);
36 xhci_dbg(xhci
, "// @%p = 0x%x (CAPLENGTH AND HCIVERSION)\n",
37 &xhci
->cap_regs
->hc_capbase
, temp
);
38 xhci_dbg(xhci
, "// CAPLENGTH: 0x%x\n",
39 (unsigned int) HC_LENGTH(temp
));
40 xhci_dbg(xhci
, "// HCIVERSION: 0x%x\n",
41 (unsigned int) HC_VERSION(temp
));
43 xhci_dbg(xhci
, "// xHCI operational registers at %p:\n", xhci
->op_regs
);
45 temp
= readl(&xhci
->cap_regs
->run_regs_off
);
46 xhci_dbg(xhci
, "// @%p = 0x%x RTSOFF\n",
47 &xhci
->cap_regs
->run_regs_off
,
48 (unsigned int) temp
& RTSOFF_MASK
);
49 xhci_dbg(xhci
, "// xHCI runtime registers at %p:\n", xhci
->run_regs
);
51 temp
= readl(&xhci
->cap_regs
->db_off
);
52 xhci_dbg(xhci
, "// @%p = 0x%x DBOFF\n", &xhci
->cap_regs
->db_off
, temp
);
53 xhci_dbg(xhci
, "// Doorbell array at %p:\n", xhci
->dba
);
56 static void xhci_print_cap_regs(struct xhci_hcd
*xhci
)
61 xhci_dbg(xhci
, "xHCI capability registers at %p:\n", xhci
->cap_regs
);
63 temp
= readl(&xhci
->cap_regs
->hc_capbase
);
64 hci_version
= HC_VERSION(temp
);
65 xhci_dbg(xhci
, "CAPLENGTH AND HCIVERSION 0x%x:\n",
67 xhci_dbg(xhci
, "CAPLENGTH: 0x%x\n",
68 (unsigned int) HC_LENGTH(temp
));
69 xhci_dbg(xhci
, "HCIVERSION: 0x%x\n", hci_version
);
71 temp
= readl(&xhci
->cap_regs
->hcs_params1
);
72 xhci_dbg(xhci
, "HCSPARAMS 1: 0x%x\n",
74 xhci_dbg(xhci
, " Max device slots: %u\n",
75 (unsigned int) HCS_MAX_SLOTS(temp
));
76 xhci_dbg(xhci
, " Max interrupters: %u\n",
77 (unsigned int) HCS_MAX_INTRS(temp
));
78 xhci_dbg(xhci
, " Max ports: %u\n",
79 (unsigned int) HCS_MAX_PORTS(temp
));
81 temp
= readl(&xhci
->cap_regs
->hcs_params2
);
82 xhci_dbg(xhci
, "HCSPARAMS 2: 0x%x\n",
84 xhci_dbg(xhci
, " Isoc scheduling threshold: %u\n",
85 (unsigned int) HCS_IST(temp
));
86 xhci_dbg(xhci
, " Maximum allowed segments in event ring: %u\n",
87 (unsigned int) HCS_ERST_MAX(temp
));
89 temp
= readl(&xhci
->cap_regs
->hcs_params3
);
90 xhci_dbg(xhci
, "HCSPARAMS 3 0x%x:\n",
92 xhci_dbg(xhci
, " Worst case U1 device exit latency: %u\n",
93 (unsigned int) HCS_U1_LATENCY(temp
));
94 xhci_dbg(xhci
, " Worst case U2 device exit latency: %u\n",
95 (unsigned int) HCS_U2_LATENCY(temp
));
97 temp
= readl(&xhci
->cap_regs
->hcc_params
);
98 xhci_dbg(xhci
, "HCC PARAMS 0x%x:\n", (unsigned int) temp
);
99 xhci_dbg(xhci
, " HC generates %s bit addresses\n",
100 HCC_64BIT_ADDR(temp
) ? "64" : "32");
101 xhci_dbg(xhci
, " HC %s Contiguous Frame ID Capability\n",
102 HCC_CFC(temp
) ? "has" : "hasn't");
103 xhci_dbg(xhci
, " HC %s generate Stopped - Short Package event\n",
104 HCC_SPC(temp
) ? "can" : "can't");
106 xhci_dbg(xhci
, " FIXME: more HCCPARAMS debugging\n");
108 temp
= readl(&xhci
->cap_regs
->run_regs_off
);
109 xhci_dbg(xhci
, "RTSOFF 0x%x:\n", temp
& RTSOFF_MASK
);
111 /* xhci 1.1 controllers have the HCCPARAMS2 register */
112 if (hci_version
> 100) {
113 temp
= readl(&xhci
->cap_regs
->hcc_params2
);
114 xhci_dbg(xhci
, "HCC PARAMS2 0x%x:\n", (unsigned int) temp
);
115 xhci_dbg(xhci
, " HC %s Force save context capability",
116 HCC2_FSC(temp
) ? "supports" : "doesn't support");
117 xhci_dbg(xhci
, " HC %s Large ESIT Payload Capability",
118 HCC2_LEC(temp
) ? "supports" : "doesn't support");
119 xhci_dbg(xhci
, " HC %s Extended TBC capability",
120 HCC2_ETC(temp
) ? "supports" : "doesn't support");
124 static void xhci_print_command_reg(struct xhci_hcd
*xhci
)
128 temp
= readl(&xhci
->op_regs
->command
);
129 xhci_dbg(xhci
, "USBCMD 0x%x:\n", temp
);
130 xhci_dbg(xhci
, " HC is %s\n",
131 (temp
& CMD_RUN
) ? "running" : "being stopped");
132 xhci_dbg(xhci
, " HC has %sfinished hard reset\n",
133 (temp
& CMD_RESET
) ? "not " : "");
134 xhci_dbg(xhci
, " Event Interrupts %s\n",
135 (temp
& CMD_EIE
) ? "enabled " : "disabled");
136 xhci_dbg(xhci
, " Host System Error Interrupts %s\n",
137 (temp
& CMD_HSEIE
) ? "enabled " : "disabled");
138 xhci_dbg(xhci
, " HC has %sfinished light reset\n",
139 (temp
& CMD_LRESET
) ? "not " : "");
142 static void xhci_print_status(struct xhci_hcd
*xhci
)
146 temp
= readl(&xhci
->op_regs
->status
);
147 xhci_dbg(xhci
, "USBSTS 0x%x:\n", temp
);
148 xhci_dbg(xhci
, " Event ring is %sempty\n",
149 (temp
& STS_EINT
) ? "not " : "");
150 xhci_dbg(xhci
, " %sHost System Error\n",
151 (temp
& STS_FATAL
) ? "WARNING: " : "No ");
152 xhci_dbg(xhci
, " HC is %s\n",
153 (temp
& STS_HALT
) ? "halted" : "running");
156 static void xhci_print_op_regs(struct xhci_hcd
*xhci
)
158 xhci_dbg(xhci
, "xHCI operational registers at %p:\n", xhci
->op_regs
);
159 xhci_print_command_reg(xhci
);
160 xhci_print_status(xhci
);
163 static void xhci_print_ports(struct xhci_hcd
*xhci
)
165 __le32 __iomem
*addr
;
168 char *names
[NUM_PORT_REGS
] = {
175 ports
= HCS_MAX_PORTS(xhci
->hcs_params1
);
176 addr
= &xhci
->op_regs
->port_status_base
;
177 for (i
= 0; i
< ports
; i
++) {
178 for (j
= 0; j
< NUM_PORT_REGS
; j
++) {
179 xhci_dbg(xhci
, "%p port %s reg = 0x%x\n",
181 (unsigned int) readl(addr
));
187 void xhci_print_ir_set(struct xhci_hcd
*xhci
, int set_num
)
189 struct xhci_intr_reg __iomem
*ir_set
= &xhci
->run_regs
->ir_set
[set_num
];
194 addr
= &ir_set
->irq_pending
;
196 if (temp
== XHCI_INIT_VALUE
)
199 xhci_dbg(xhci
, " %p: ir_set[%i]\n", ir_set
, set_num
);
201 xhci_dbg(xhci
, " %p: ir_set.pending = 0x%x\n", addr
,
204 addr
= &ir_set
->irq_control
;
206 xhci_dbg(xhci
, " %p: ir_set.control = 0x%x\n", addr
,
209 addr
= &ir_set
->erst_size
;
211 xhci_dbg(xhci
, " %p: ir_set.erst_size = 0x%x\n", addr
,
214 addr
= &ir_set
->rsvd
;
216 if (temp
!= XHCI_INIT_VALUE
)
217 xhci_dbg(xhci
, " WARN: %p: ir_set.rsvd = 0x%x\n",
218 addr
, (unsigned int)temp
);
220 addr
= &ir_set
->erst_base
;
221 temp_64
= xhci_read_64(xhci
, addr
);
222 xhci_dbg(xhci
, " %p: ir_set.erst_base = @%08llx\n",
225 addr
= &ir_set
->erst_dequeue
;
226 temp_64
= xhci_read_64(xhci
, addr
);
227 xhci_dbg(xhci
, " %p: ir_set.erst_dequeue = @%08llx\n",
231 void xhci_print_run_regs(struct xhci_hcd
*xhci
)
236 xhci_dbg(xhci
, "xHCI runtime registers at %p:\n", xhci
->run_regs
);
237 temp
= readl(&xhci
->run_regs
->microframe_index
);
238 xhci_dbg(xhci
, " %p: Microframe index = 0x%x\n",
239 &xhci
->run_regs
->microframe_index
,
240 (unsigned int) temp
);
241 for (i
= 0; i
< 7; i
++) {
242 temp
= readl(&xhci
->run_regs
->rsvd
[i
]);
243 if (temp
!= XHCI_INIT_VALUE
)
244 xhci_dbg(xhci
, " WARN: %p: Rsvd[%i] = 0x%x\n",
245 &xhci
->run_regs
->rsvd
[i
],
246 i
, (unsigned int) temp
);
250 void xhci_print_registers(struct xhci_hcd
*xhci
)
252 xhci_print_cap_regs(xhci
);
253 xhci_print_op_regs(xhci
);
254 xhci_print_ports(xhci
);
257 void xhci_print_trb_offsets(struct xhci_hcd
*xhci
, union xhci_trb
*trb
)
260 for (i
= 0; i
< 4; i
++)
261 xhci_dbg(xhci
, "Offset 0x%x = 0x%x\n",
262 i
*4, trb
->generic
.field
[i
]);
266 * Debug a transfer request block (TRB).
268 void xhci_debug_trb(struct xhci_hcd
*xhci
, union xhci_trb
*trb
)
271 u32 type
= le32_to_cpu(trb
->link
.control
) & TRB_TYPE_BITMASK
;
274 case TRB_TYPE(TRB_LINK
):
275 xhci_dbg(xhci
, "Link TRB:\n");
276 xhci_print_trb_offsets(xhci
, trb
);
278 address
= le64_to_cpu(trb
->link
.segment_ptr
);
279 xhci_dbg(xhci
, "Next ring segment DMA address = 0x%llx\n", address
);
281 xhci_dbg(xhci
, "Interrupter target = 0x%x\n",
282 GET_INTR_TARGET(le32_to_cpu(trb
->link
.intr_target
)));
283 xhci_dbg(xhci
, "Cycle bit = %u\n",
284 le32_to_cpu(trb
->link
.control
) & TRB_CYCLE
);
285 xhci_dbg(xhci
, "Toggle cycle bit = %u\n",
286 le32_to_cpu(trb
->link
.control
) & LINK_TOGGLE
);
287 xhci_dbg(xhci
, "No Snoop bit = %u\n",
288 le32_to_cpu(trb
->link
.control
) & TRB_NO_SNOOP
);
290 case TRB_TYPE(TRB_TRANSFER
):
291 address
= le64_to_cpu(trb
->trans_event
.buffer
);
293 * FIXME: look at flags to figure out if it's an address or if
294 * the data is directly in the buffer field.
296 xhci_dbg(xhci
, "DMA address or buffer contents= %llu\n", address
);
298 case TRB_TYPE(TRB_COMPLETION
):
299 address
= le64_to_cpu(trb
->event_cmd
.cmd_trb
);
300 xhci_dbg(xhci
, "Command TRB pointer = %llu\n", address
);
301 xhci_dbg(xhci
, "Completion status = %u\n",
302 GET_COMP_CODE(le32_to_cpu(trb
->event_cmd
.status
)));
303 xhci_dbg(xhci
, "Flags = 0x%x\n",
304 le32_to_cpu(trb
->event_cmd
.flags
));
307 xhci_dbg(xhci
, "Unknown TRB with TRB type ID %u\n",
308 (unsigned int) type
>>10);
309 xhci_print_trb_offsets(xhci
, trb
);
315 * Debug a segment with an xHCI ring.
317 * @return The Link TRB of the segment, or NULL if there is no Link TRB
318 * (which is a bug, since all segments must have a Link TRB).
320 * Prints out all TRBs in the segment, even those after the Link TRB.
322 * XXX: should we print out TRBs that the HC owns? As long as we don't
323 * write, that should be fine... We shouldn't expect that the memory pointed to
324 * by the TRB is valid at all. Do we care about ones the HC owns? Probably,
327 void xhci_debug_segment(struct xhci_hcd
*xhci
, struct xhci_segment
*seg
)
331 union xhci_trb
*trb
= seg
->trbs
;
333 for (i
= 0; i
< TRBS_PER_SEGMENT
; i
++) {
335 xhci_dbg(xhci
, "@%016llx %08x %08x %08x %08x\n", addr
,
336 lower_32_bits(le64_to_cpu(trb
->link
.segment_ptr
)),
337 upper_32_bits(le64_to_cpu(trb
->link
.segment_ptr
)),
338 le32_to_cpu(trb
->link
.intr_target
),
339 le32_to_cpu(trb
->link
.control
));
340 addr
+= sizeof(*trb
);
344 void xhci_dbg_ring_ptrs(struct xhci_hcd
*xhci
, struct xhci_ring
*ring
)
346 xhci_dbg(xhci
, "Ring deq = %p (virt), 0x%llx (dma)\n",
348 (unsigned long long)xhci_trb_virt_to_dma(ring
->deq_seg
,
350 xhci_dbg(xhci
, "Ring deq updated %u times\n",
352 xhci_dbg(xhci
, "Ring enq = %p (virt), 0x%llx (dma)\n",
354 (unsigned long long)xhci_trb_virt_to_dma(ring
->enq_seg
,
356 xhci_dbg(xhci
, "Ring enq updated %u times\n",
361 * Debugging for an xHCI ring, which is a queue broken into multiple segments.
363 * Print out each segment in the ring. Check that the DMA address in
364 * each link segment actually matches the segment's stored DMA address.
365 * Check that the link end bit is only set at the end of the ring.
366 * Check that the dequeue and enqueue pointers point to real data in this ring
367 * (not some other ring).
369 void xhci_debug_ring(struct xhci_hcd
*xhci
, struct xhci_ring
*ring
)
371 /* FIXME: Throw an error if any segment doesn't have a Link TRB */
372 struct xhci_segment
*seg
;
373 struct xhci_segment
*first_seg
= ring
->first_seg
;
374 xhci_debug_segment(xhci
, first_seg
);
376 if (!ring
->enq_updates
&& !ring
->deq_updates
) {
377 xhci_dbg(xhci
, " Ring has not been updated\n");
380 for (seg
= first_seg
->next
; seg
!= first_seg
; seg
= seg
->next
)
381 xhci_debug_segment(xhci
, seg
);
384 void xhci_dbg_ep_rings(struct xhci_hcd
*xhci
,
385 unsigned int slot_id
, unsigned int ep_index
,
386 struct xhci_virt_ep
*ep
)
389 struct xhci_ring
*ring
;
391 if (ep
->ep_state
& EP_HAS_STREAMS
) {
392 for (i
= 1; i
< ep
->stream_info
->num_streams
; i
++) {
393 ring
= ep
->stream_info
->stream_rings
[i
];
394 xhci_dbg(xhci
, "Dev %d endpoint %d stream ID %d:\n",
395 slot_id
, ep_index
, i
);
396 xhci_debug_segment(xhci
, ring
->deq_seg
);
402 xhci_dbg(xhci
, "Dev %d endpoint ring %d:\n",
404 xhci_debug_segment(xhci
, ring
->deq_seg
);
408 void xhci_dbg_erst(struct xhci_hcd
*xhci
, struct xhci_erst
*erst
)
410 u64 addr
= erst
->erst_dma_addr
;
412 struct xhci_erst_entry
*entry
;
414 for (i
= 0; i
< erst
->num_entries
; i
++) {
415 entry
= &erst
->entries
[i
];
416 xhci_dbg(xhci
, "@%016llx %08x %08x %08x %08x\n",
418 lower_32_bits(le64_to_cpu(entry
->seg_addr
)),
419 upper_32_bits(le64_to_cpu(entry
->seg_addr
)),
420 le32_to_cpu(entry
->seg_size
),
421 le32_to_cpu(entry
->rsvd
));
422 addr
+= sizeof(*entry
);
426 void xhci_dbg_cmd_ptrs(struct xhci_hcd
*xhci
)
430 val
= xhci_read_64(xhci
, &xhci
->op_regs
->cmd_ring
);
431 xhci_dbg(xhci
, "// xHC command ring deq ptr low bits + flags = @%08x\n",
433 xhci_dbg(xhci
, "// xHC command ring deq ptr high bits = @%08x\n",
437 /* Print the last 32 bytes for 64-byte contexts */
438 static void dbg_rsvd64(struct xhci_hcd
*xhci
, u64
*ctx
, dma_addr_t dma
)
441 for (i
= 0; i
< 4; i
++) {
442 xhci_dbg(xhci
, "@%p (virt) @%08llx "
443 "(dma) %#08llx - rsvd64[%d]\n",
444 &ctx
[4 + i
], (unsigned long long)dma
,
450 char *xhci_get_slot_state(struct xhci_hcd
*xhci
,
451 struct xhci_container_ctx
*ctx
)
453 struct xhci_slot_ctx
*slot_ctx
= xhci_get_slot_ctx(xhci
, ctx
);
455 switch (GET_SLOT_STATE(le32_to_cpu(slot_ctx
->dev_state
))) {
456 case SLOT_STATE_ENABLED
:
457 return "enabled/disabled";
458 case SLOT_STATE_DEFAULT
:
460 case SLOT_STATE_ADDRESSED
:
462 case SLOT_STATE_CONFIGURED
:
469 static void xhci_dbg_slot_ctx(struct xhci_hcd
*xhci
, struct xhci_container_ctx
*ctx
)
471 /* Fields are 32 bits wide, DMA addresses are in bytes */
472 int field_size
= 32 / 8;
475 struct xhci_slot_ctx
*slot_ctx
= xhci_get_slot_ctx(xhci
, ctx
);
476 dma_addr_t dma
= ctx
->dma
+
477 ((unsigned long)slot_ctx
- (unsigned long)ctx
->bytes
);
478 int csz
= HCC_64BYTE_CONTEXT(xhci
->hcc_params
);
480 xhci_dbg(xhci
, "Slot Context:\n");
481 xhci_dbg(xhci
, "@%p (virt) @%08llx (dma) %#08x - dev_info\n",
483 (unsigned long long)dma
, slot_ctx
->dev_info
);
485 xhci_dbg(xhci
, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n",
486 &slot_ctx
->dev_info2
,
487 (unsigned long long)dma
, slot_ctx
->dev_info2
);
489 xhci_dbg(xhci
, "@%p (virt) @%08llx (dma) %#08x - tt_info\n",
491 (unsigned long long)dma
, slot_ctx
->tt_info
);
493 xhci_dbg(xhci
, "@%p (virt) @%08llx (dma) %#08x - dev_state\n",
494 &slot_ctx
->dev_state
,
495 (unsigned long long)dma
, slot_ctx
->dev_state
);
497 for (i
= 0; i
< 4; i
++) {
498 xhci_dbg(xhci
, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
499 &slot_ctx
->reserved
[i
], (unsigned long long)dma
,
500 slot_ctx
->reserved
[i
], i
);
505 dbg_rsvd64(xhci
, (u64
*)slot_ctx
, dma
);
508 static void xhci_dbg_ep_ctx(struct xhci_hcd
*xhci
,
509 struct xhci_container_ctx
*ctx
,
510 unsigned int last_ep
)
513 int last_ep_ctx
= 31;
514 /* Fields are 32 bits wide, DMA addresses are in bytes */
515 int field_size
= 32 / 8;
516 int csz
= HCC_64BYTE_CONTEXT(xhci
->hcc_params
);
519 last_ep_ctx
= last_ep
+ 1;
520 for (i
= 0; i
< last_ep_ctx
; i
++) {
521 unsigned int epaddr
= xhci_get_endpoint_address(i
);
522 struct xhci_ep_ctx
*ep_ctx
= xhci_get_ep_ctx(xhci
, ctx
, i
);
523 dma_addr_t dma
= ctx
->dma
+
524 ((unsigned long)ep_ctx
- (unsigned long)ctx
->bytes
);
526 xhci_dbg(xhci
, "%s Endpoint %02d Context (ep_index %02d):\n",
527 usb_endpoint_out(epaddr
) ? "OUT" : "IN",
528 epaddr
& USB_ENDPOINT_NUMBER_MASK
, i
);
529 xhci_dbg(xhci
, "@%p (virt) @%08llx (dma) %#08x - ep_info\n",
531 (unsigned long long)dma
, ep_ctx
->ep_info
);
533 xhci_dbg(xhci
, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n",
535 (unsigned long long)dma
, ep_ctx
->ep_info2
);
537 xhci_dbg(xhci
, "@%p (virt) @%08llx (dma) %#08llx - deq\n",
539 (unsigned long long)dma
, ep_ctx
->deq
);
541 xhci_dbg(xhci
, "@%p (virt) @%08llx (dma) %#08x - tx_info\n",
543 (unsigned long long)dma
, ep_ctx
->tx_info
);
545 for (j
= 0; j
< 3; j
++) {
546 xhci_dbg(xhci
, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
547 &ep_ctx
->reserved
[j
],
548 (unsigned long long)dma
,
549 ep_ctx
->reserved
[j
], j
);
554 dbg_rsvd64(xhci
, (u64
*)ep_ctx
, dma
);
558 void xhci_dbg_ctx(struct xhci_hcd
*xhci
,
559 struct xhci_container_ctx
*ctx
,
560 unsigned int last_ep
)
563 /* Fields are 32 bits wide, DMA addresses are in bytes */
564 int field_size
= 32 / 8;
565 dma_addr_t dma
= ctx
->dma
;
566 int csz
= HCC_64BYTE_CONTEXT(xhci
->hcc_params
);
568 if (ctx
->type
== XHCI_CTX_TYPE_INPUT
) {
569 struct xhci_input_control_ctx
*ctrl_ctx
=
570 xhci_get_input_control_ctx(ctx
);
572 xhci_warn(xhci
, "Could not get input context, bad type.\n");
576 xhci_dbg(xhci
, "@%p (virt) @%08llx (dma) %#08x - drop flags\n",
577 &ctrl_ctx
->drop_flags
, (unsigned long long)dma
,
578 ctrl_ctx
->drop_flags
);
580 xhci_dbg(xhci
, "@%p (virt) @%08llx (dma) %#08x - add flags\n",
581 &ctrl_ctx
->add_flags
, (unsigned long long)dma
,
582 ctrl_ctx
->add_flags
);
584 for (i
= 0; i
< 6; i
++) {
585 xhci_dbg(xhci
, "@%p (virt) @%08llx (dma) %#08x - rsvd2[%d]\n",
586 &ctrl_ctx
->rsvd2
[i
], (unsigned long long)dma
,
587 ctrl_ctx
->rsvd2
[i
], i
);
592 dbg_rsvd64(xhci
, (u64
*)ctrl_ctx
, dma
);
595 xhci_dbg_slot_ctx(xhci
, ctx
);
596 xhci_dbg_ep_ctx(xhci
, ctx
, last_ep
);
599 void xhci_dbg_trace(struct xhci_hcd
*xhci
, void (*trace
)(struct va_format
*),
600 const char *fmt
, ...)
602 struct va_format vaf
;
608 xhci_dbg(xhci
, "%pV\n", &vaf
);
612 EXPORT_SYMBOL_GPL(xhci_dbg_trace
);